Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place. Commercial Alternative to JupyterHub.
Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place. Commercial Alternative to JupyterHub.
Path: blob/next/scripts/general.sh
Views: 3960
#!/bin/bash1#2# Copyright (c) 2015 Igor Pecovnik, igor.pecovnik@gma**.com3#4# This file is licensed under the terms of the GNU General Public5# License version 2. This program is licensed "as is" without any6# warranty of any kind, whether express or implied.789# Functions:10# cleaning11# exit_with_error12# get_package_list_hash13# create_sources_list14# clean_up_git15# waiter_local_git16# fetch_from_repo17# improved_git18# display_alert19# fingerprint_image20# distro_menu21# addtorepo22# repo-remove-old-packages23# wait_for_package_manager24# install_pkg_deb25# prepare_host_basic26# prepare_host27# webseed28# download_and_verify29# show_developer_warning30# show_checklist_variables313233# cleaning <target>34#35# target: what to clean36# "make" - "make clean" for selected kernel and u-boot37# "debs" - delete output/debs for board&branch38# "ubootdebs" - delete output/debs for uboot&board&branch39# "alldebs" - delete output/debs40# "cache" - delete output/cache41# "oldcache" - remove old output/cache42# "images" - delete output/images43# "sources" - delete output/sources44#4546cleaning()47{48case $1 in49debs) # delete ${DEB_STORAGE} for current branch and family50if [[ -d "${DEB_STORAGE}" ]]; then51display_alert "Cleaning ${DEB_STORAGE} for" "$BOARD $BRANCH" "info"52# easier than dealing with variable expansion and escaping dashes in file names53find "${DEB_STORAGE}" -name "${CHOSEN_UBOOT}_*.deb" -delete54find "${DEB_STORAGE}" \( -name "${CHOSEN_KERNEL}_*.deb" -o \55-name "orangepi-*.deb" -o \56-name "plymouth-theme-orangepi_*.deb" -o \57-name "${CHOSEN_KERNEL/image/dtb}_*.deb" -o \58-name "${CHOSEN_KERNEL/image/headers}_*.deb" -o \59-name "${CHOSEN_KERNEL/image/source}_*.deb" -o \60-name "${CHOSEN_KERNEL/image/firmware-image}_*.deb" \) -delete61[[ -n $RELEASE ]] && rm -f "${DEB_STORAGE}/${RELEASE}/${CHOSEN_ROOTFS}"_*.deb62[[ -n $RELEASE ]] && rm -f "${DEB_STORAGE}/${RELEASE}/orangepi-desktop-${RELEASE}"_*.deb63fi64;;6566ubootdebs) # delete ${DEB_STORAGE} for uboot, current branch and family67if [[ -d "${DEB_STORAGE}" ]]; then68display_alert "Cleaning ${DEB_STORAGE} for u-boot" "$BOARD $BRANCH" "info"69# easier than dealing with variable expansion and escaping dashes in file names70find "${DEB_STORAGE}" -name "${CHOSEN_UBOOT}_*.deb" -delete71fi72;;7374extras) # delete ${DEB_STORAGE}/extra/$RELEASE for all architectures75if [[ -n $RELEASE && -d ${DEB_STORAGE}/extra/$RELEASE ]]; then76display_alert "Cleaning ${DEB_STORAGE}/extra for" "$RELEASE" "info"77rm -rf "${DEB_STORAGE}/extra/${RELEASE}"78fi79;;8081alldebs) # delete output/debs82[[ -d "${DEB_STORAGE}" ]] && display_alert "Cleaning" "${DEB_STORAGE}" "info" && rm -rf "${DEB_STORAGE}"/*83;;8485cache) # delete output/cache86[[ -d $EXTER/cache/rootfs ]] && display_alert "Cleaning" "rootfs cache (all)" "info" && find $EXTER/cache/rootfs -type f -delete87;;8889images) # delete output/images90[[ -d "${DEST}"/images ]] && display_alert "Cleaning" "output/images" "info" && rm -rf "${DEST}"/images/*91;;9293sources) # delete output/sources and output/buildpkg94[[ -d $EXTER/cache/sources ]] && display_alert "Cleaning" "sources" "info" && rm -rf $EXTER/cache/sources/* "${DEST}"/buildpkg/*95;;9697oldcache) # remove old `cache/rootfs` except for the newest 8 files98if [[ -d $EXTER/cache/rootfs && $(ls -1 $EXTER/cache/rootfs/*.lz4 2> /dev/null | wc -l) -gt "${ROOTFS_CACHE_MAX}" ]]; then99display_alert "Cleaning" "rootfs cache (old)" "info"100(cd $EXTER/cache/rootfs; ls -t *.lz4 | sed -e "1,${ROOTFS_CACHE_MAX}d" | xargs -d '\n' rm -f)101# Remove signatures if they are present. We use them for internal purpose102(cd $EXTER/cache/rootfs; ls -t *.asc | sed -e "1,${ROOTFS_CACHE_MAX}d" | xargs -d '\n' rm -f)103fi104;;105esac106}107108# exit_with_error <message> <highlight>109#110# a way to terminate build process111# with verbose error message112#113114exit_with_error()115{116local _file117local _line=${BASH_LINENO[0]}118local _function=${FUNCNAME[1]}119local _description=$1120local _highlight=$2121_file=$(basename "${BASH_SOURCE[1]}")122local stacktrace="$(get_extension_hook_stracktrace "${BASH_SOURCE[*]}" "${BASH_LINENO[*]}")"123124display_alert "ERROR in function $_function" "$stacktrace" "err"125display_alert "$_description" "$_highlight" "err"126display_alert "Process terminated" "" "info"127128if [[ "${ERROR_DEBUG_SHELL}" == "yes" ]]; then129display_alert "MOUNT" "${MOUNT}" "err"130display_alert "SDCARD" "${SDCARD}" "err"131display_alert "Here's a shell." "debug it" "err"132bash < /dev/tty || true133fi134135# TODO: execute run_after_build here?136overlayfs_wrapper "cleanup"137# unlock loop device access in case of starvation138exec {FD}>/var/lock/orangepi-debootstrap-losetup139flock -u "${FD}"140141exit 255142}143144# get_package_list_hash145#146# returns md5 hash for current package list and rootfs cache version147148get_package_list_hash()149{150local package_arr exclude_arr151local list_content152read -ra package_arr <<< "${DEBOOTSTRAP_LIST} ${PACKAGE_LIST}"153read -ra exclude_arr <<< "${PACKAGE_LIST_EXCLUDE}"154( ( printf "%s\n" "${package_arr[@]}"; printf -- "-%s\n" "${exclude_arr[@]}" ) | sort -u; echo "${1}" ) \155| md5sum | cut -d' ' -f 1156}157158# create_sources_list <release> <basedir>159#160# <release>: buster|bullseye|bookworm|bionic|focal|jammy|noble|hirsute|sid161# <basedir>: path to root directory162#163create_sources_list()164{165local release=$1166local basedir=$2167[[ -z $basedir ]] && exit_with_error "No basedir passed to create_sources_list"168169case $release in170stretch|buster)171cat <<-EOF > "${basedir}"/etc/apt/sources.list172deb http://${DEBIAN_MIRROR} $release main contrib non-free173#deb-src http://${DEBIAN_MIRROR} $release main contrib non-free174175deb http://${DEBIAN_MIRROR} ${release}-updates main contrib non-free176#deb-src http://${DEBIAN_MIRROR} ${release}-updates main contrib non-free177178deb http://${DEBIAN_MIRROR} ${release}-backports main contrib non-free179#deb-src http://${DEBIAN_MIRROR} ${release}-backports main contrib non-free180181deb http://${DEBIAN_SECURTY} ${release}/updates main contrib non-free182#deb-src http://${DEBIAN_SECURTY} ${release}/updates main contrib non-free183EOF184;;185186bullseye)187cat <<-EOF > "${basedir}"/etc/apt/sources.list188deb https://${DEBIAN_MIRROR} $release main contrib non-free189#deb-src https://${DEBIAN_MIRROR} $release main contrib non-free190191deb https://${DEBIAN_MIRROR} ${release}-updates main contrib non-free192#deb-src https://${DEBIAN_MIRROR} ${release}-updates main contrib non-free193194deb https://${DEBIAN_MIRROR} ${release}-backports main contrib non-free195#deb-src https://${DEBIAN_MIRROR} ${release}-backports main contrib non-free196197deb https://${DEBIAN_SECURTY} ${release}-security main contrib non-free198#deb-src https://${DEBIAN_SECURTY} ${release}-security main contrib non-free199EOF200;;201202bookworm)203cat <<- EOF > "${basedir}"/etc/apt/sources.list204deb http://${DEBIAN_MIRROR} $release main contrib non-free non-free-firmware205#deb-src http://${DEBIAN_MIRROR} $release main contrib non-free non-free-firmware206207deb http://${DEBIAN_MIRROR} ${release}-updates main contrib non-free non-free-firmware208#deb-src http://${DEBIAN_MIRROR} ${release}-updates main contrib non-free non-free-firmware209210deb http://${DEBIAN_MIRROR} ${release}-backports main contrib non-free non-free-firmware211#deb-src http://${DEBIAN_MIRROR} ${release}-backports main contrib non-free non-free-firmware212213deb http://${DEBIAN_SECURTY} ${release}-security main contrib non-free non-free-firmware214#deb-src http://${DEBIAN_SECURTY} ${release}-security main contrib non-free non-free-firmware215EOF216;;217218sid) # sid is permanent unstable development and has no such thing as updates or security219cat <<- EOF > "${basedir}"/etc/apt/sources.list220deb https://snapshot.debian.org/archive/debian-ports/20221225T084846Z unstable main221#deb http://${DEBIAN_MIRROR} $release main contrib non-free non-free-firmware222#deb-src http://${DEBIAN_MIRROR} $release main contrib non-free non-free-firmware223224#deb http://${DEBIAN_MIRROR} unstable main contrib non-free non-free-firmware225#deb-src http://${DEBIAN_MIRROR} unstable main contrib non-free non-free-firmware226EOF227;;228229xenial|bionic|focal|hirsute|impish|jammy|noble)230cat <<-EOF > "${basedir}"/etc/apt/sources.list231deb http://${UBUNTU_MIRROR} $release main restricted universe multiverse232#deb-src http://${UBUNTU_MIRROR} $release main restricted universe multiverse233234deb http://${UBUNTU_MIRROR} ${release}-security main restricted universe multiverse235#deb-src http://${UBUNTU_MIRROR} ${release}-security main restricted universe multiverse236237deb http://${UBUNTU_MIRROR} ${release}-updates main restricted universe multiverse238#deb-src http://${UBUNTU_MIRROR} ${release}-updates main restricted universe multiverse239240deb http://${UBUNTU_MIRROR} ${release}-backports main restricted universe multiverse241#deb-src http://${UBUNTU_MIRROR} ${release}-backports main restricted universe multiverse242EOF243;;244245raspi)246cat <<-EOF > "${basedir}"/etc/apt/sources.list247deb http://${DEBIAN_MIRROR} bullseye main contrib non-free248#deb-src http://${DEBIAN_MIRROR} bullseye main contrib non-free249250deb http://${DEBIAN_MIRROR} bullseye-updates main contrib non-free251#deb-src http://${DEBIAN_MIRROR} bullseye-updates main contrib non-free252253deb http://${DEBIAN_MIRROR} bullseye-backports main contrib non-free254#deb-src http://${DEBIAN_MIRROR} bullseye-backports main contrib non-free255256deb http://${DEBIAN_SECURTY} bullseye-security main contrib non-free257#deb-src http://${DEBIAN_SECURTY} bullseye-security main contrib non-free258EOF259260cat <<-EOF > "${basedir}"/etc/apt/sources.list.d/raspi.list261deb http://${RASPI_MIRROR} bullseye main262# Uncomment line below then 'apt-get update' to enable 'apt-get source'263#deb-src http://archive.raspberrypi.org/debian/ bullseye main264EOF265266if [ -n "$APT_PROXY" ]; then267install -m 644 files/51cache "${APT_PROXY}/etc/apt/apt.conf.d/51cache"268sed "${basedir}/etc/apt/apt.conf.d/51cache" -i -e "s|APT_PROXY|${APT_PROXY}|"269else270rm -f "${basedir}/etc/apt/apt.conf.d/51cache"271fi272273cat ${EXTER}/packages/raspi/stage0/00-configure-apt/files/raspberrypi.gpg.key | gpg --dearmor > "${basedir}/raspberrypi-archive-stable.gpg"274install -m 644 "${basedir}/raspberrypi-archive-stable.gpg" "${basedir}/etc/apt/trusted.gpg.d/"275;;276esac277278# stage: add armbian repository and install key279#if [[ $DOWNLOAD_MIRROR == "china" ]]; then280# echo "deb https://mirrors.tuna.tsinghua.edu.cn/armbian $RELEASE main ${RELEASE}-utils ${RELEASE}-desktop" > "${SDCARD}"/etc/apt/sources.list.d/armbian.list281#elif [[ $DOWNLOAD_MIRROR == "bfsu" ]]; then282# echo "deb http://mirrors.bfsu.edu.cn/armbian $RELEASE main ${RELEASE}-utils ${RELEASE}-desktop" > "${SDCARD}"/etc/apt/sources.list.d/armbian.list283#else284# echo "deb http://"$([[ $BETA == yes ]] && echo "beta" || echo "apt" )".armbian.com $RELEASE main ${RELEASE}-utils ${RELEASE}-desktop" > "${SDCARD}"/etc/apt/sources.list.d/armbian.list285#fi286287# replace local package server if defined. Suitable for development288#[[ -n $LOCAL_MIRROR ]] && echo "deb http://$LOCAL_MIRROR $RELEASE main ${RELEASE}-utils ${RELEASE}-desktop" > "${SDCARD}"/etc/apt/sources.list.d/armbian.list289290#display_alert "Adding Armbian repository and authentication key" "/etc/apt/sources.list.d/armbian.list" "info"291#cp "${EXTER}"/config/armbian.key "${SDCARD}"292#chroot "${SDCARD}" /bin/bash -c "cat armbian.key | apt-key add - > /dev/null 2>&1"293#rm "${SDCARD}"/armbian.key294}295296297#298# This function retries Git operations to avoid failure in case remote is borked299# If the git team needs to call a remote server, use this function.300#301improved_git()302{303304local realgit=$(command -v git)305local retries=3306local delay=10307local count=1308while [ $count -lt $retries ]; do309$realgit "$@"310if [[ $? -eq 0 || -f .git/index.lock ]]; then311retries=0312break313fi314let count=$count+1315sleep $delay316done317318}319320clean_up_git ()321{322local target_dir=$1323324# Files that are not tracked by git and were added325# when the patch was applied must be removed.326git -C $target_dir clean -qdf327328# Return the files that are tracked by git to the initial state.329git -C $target_dir checkout -qf HEAD330}331332# used : waiter_local_git arg1='value' arg2:'value'333# waiter_local_git \334# url='https://github.com/megous/linux' \335# name='megous' \336# dir='linux-mainline/5.14' \337# branch='orange-pi-5.14' \338# obj=<tag|commit> or tag:$tag ...339# An optional parameter for switching to a git object such as a tag, commit,340# or a specific branch. The object must exist in the local repository.341# This optional parameter takes precedence. If it is specified, then342# the commit state corresponding to the specified git object will be extracted343# to the working directory. Otherwise, the commit corresponding to the top of344# the branch will be extracted.345# The settings for the kernel variables of the original kernel346# VAR_SHALLOW_ORIGINAL=var_origin_kernel must be in the main script347# before calling the function348waiter_local_git ()349{350for arg in $@;do351352case $arg in353url=*|https://*|git://*) eval "local url=${arg/url=/}"354;;355dir=*|/*/*/*) eval "local dir=${arg/dir=/}"356;;357*=*|*:*) eval "local ${arg/:/=}"358;;359esac360361done362363# Required variables cannot be empty.364for var in url name dir branch; do365[ "${var#*=}" == "" ] && exit_with_error "Error in configuration"366done367368local reachability369370# The 'offline' variable must always be set to 'true' or 'false'371if [ "$OFFLINE_WORK" == "yes" ]; then372local offline=true373else374local offline=false375fi376377local work_dir="$(realpath ${EXTER}/cache/sources)/$dir"378mkdir -p $work_dir379cd $work_dir || exit_with_error380381display_alert "Checking git sources" "$dir $url$name/$branch" "info"382383if [ "$(git rev-parse --git-dir 2>/dev/null)" != ".git" ]; then384git init -q .385386# Run in the sub shell to avoid mixing environment variables.387if [ -n "$VAR_SHALLOW_ORIGINAL" ]; then388(389$VAR_SHALLOW_ORIGINAL390391display_alert "Add original git sources" "$dir $name/$branch" "info"392if [ "$(improved_git ls-remote -h $url $branch | \393awk -F'/' '{if (NR == 1) print $NF}')" != "$branch" ];then394display_alert "Bad $branch for $url in $VAR_SHALLOW_ORIGINAL"395exit 177396fi397398git remote add -t $branch $name $url399400# Handle an exception if the initial tag is the top of the branch401# As v5.16 == HEAD402if [ "${start_tag}.1" == "$(improved_git ls-remote -t $url ${start_tag}.1 | \403awk -F'/' '{ print $NF }')" ]404then405improved_git fetch --shallow-exclude=$start_tag $name406else407improved_git fetch --depth 1 $name408fi409improved_git fetch --deepen=1 $name410# For a shallow clone, this works quickly and saves space.411git gc412)413414[ "$?" == "177" ] && exit415fi416fi417418files_for_clean="$(git status -s | wc -l)"419if [ "$files_for_clean" != "0" ];then420display_alert " Cleaning .... " "$files_for_clean files"421clean_up_git $work_dir422fi423424if [ "$name" != "$(git remote show | grep $name)" ];then425git remote add -t $branch $name $url426fi427428if ! $offline; then429for t_name in $(git remote show);do430improved_git fetch $t_name431done432fi433434# When switching, we use the concept of only "detached branch". Therefore,435# we extract the hash from the tag, the branch name, or from the hash itself.436# This serves as a check of the reachability of the extraction.437# We do not use variables that characterize the current state of the git,438# such as `HEAD` and `FETCH_HEAD`.439reachability=false440for var in obj tag commit branch;do441eval pval=\$$var442443if [ -n "$pval" ] && [ "$pval" != *HEAD ]; then444case $var in445obj|tag|commit) obj=$pval ;;446branch) obj=${name}/$branch ;;447esac448449if t_hash=$(git rev-parse $obj 2>/dev/null);then450reachability=true451break452else453display_alert "Variable $var=$obj unreachable for extraction"454fi455fi456done457458if $reachability && [ "$t_hash" != "$(git rev-parse @ 2>/dev/null)" ];then459# Switch "detached branch" as hash460display_alert "Switch $obj = $t_hash"461git checkout -qf $t_hash462else463# the working directory corresponds to the target commit,464# nothing needs to be done465display_alert "Up to date"466fi467}468469# fetch_from_repo <url> <directory> <ref> <ref_subdir>470# <url>: remote repository URL471# <directory>: local directory; subdir for branch/tag will be created472# <ref>:473# branch:name474# tag:name475# head(*)476# commit:hash477#478# *: Implies ref_subdir=no479#480# <ref_subdir>: "yes" to create subdirectory for tag or branch name481#482fetch_from_repo()483{484local url=$1485local dir=$2486local ref=$3487local ref_subdir=$4488489# Set GitHub mirror before anything else touches $url490url=${url//'https://github.com/'/$GITHUB_SOURCE'/'}491492# The 'offline' variable must always be set to 'true' or 'false'493if [ "$OFFLINE_WORK" == "yes" ]; then494local offline=true495else496local offline=false497fi498499[[ -z $ref || ( $ref != tag:* && $ref != branch:* && $ref != head && $ref != commit:* ) ]] && exit_with_error "Error in configuration"500local ref_type=${ref%%:*}501if [[ $ref_type == head ]]; then502local ref_name=HEAD503else504local ref_name=${ref##*:}505fi506507display_alert "Checking git sources" "$dir $ref_name" "info"508509# get default remote branch name without cloning510# local ref_name=$(git ls-remote --symref $url HEAD | grep -o 'refs/heads/\S*' | sed 's%refs/heads/%%')511# for git:// protocol comparing hashes of "git ls-remote -h $url" and "git ls-remote --symref $url HEAD" is needed512513if [[ $ref_subdir == yes ]]; then514local workdir=$dir/$ref_name515else516local workdir=$dir517fi518519mkdir -p "${workdir}" 2>/dev/null || \520exit_with_error "No path or no write permission" "${workdir}"521522cd "${workdir}" || exit523524# check if existing remote URL for the repo or branch does not match current one525# may not be supported by older git versions526# Check the folder as a git repository.527# Then the target URL matches the local URL.528529if [[ "$(git rev-parse --git-dir 2>/dev/null)" == ".git" && \530"$url" != *"$(git remote get-url origin | sed 's/^.*@//' | sed 's/^.*\/\///' 2>/dev/null)" ]]; then531display_alert "Remote URL does not match, removing existing local copy"532rm -rf .git ./*533fi534535if [[ "$(git rev-parse --git-dir 2>/dev/null)" != ".git" ]]; then536display_alert "Creating local copy"537git init -q .538git remote add origin "${url}"539# Here you need to upload from a new address540offline=false541fi542543local changed=false544545# when we work offline we simply return the sources to their original state546if ! $offline; then547local local_hash548local_hash=$(git rev-parse @ 2>/dev/null)549550case $ref_type in551branch)552# TODO: grep refs/heads/$name553local remote_hash554remote_hash=$(improved_git ls-remote -h "${url}" "$ref_name" | head -1 | cut -f1)555[[ -z $local_hash || "${local_hash}" != "${remote_hash}" ]] && changed=true556;;557558tag)559local remote_hash560remote_hash=$(improved_git ls-remote -t "${url}" "$ref_name" | cut -f1)561if [[ -z $local_hash || "${local_hash}" != "${remote_hash}" ]]; then562remote_hash=$(improved_git ls-remote -t "${url}" "$ref_name^{}" | cut -f1)563[[ -z $remote_hash || "${local_hash}" != "${remote_hash}" ]] && changed=true564fi565;;566567head)568local remote_hash569remote_hash=$(improved_git ls-remote "${url}" HEAD | cut -f1)570[[ -z $local_hash || "${local_hash}" != "${remote_hash}" ]] && changed=true571;;572573commit)574[[ -z $local_hash || $local_hash == "@" ]] && changed=true575;;576esac577578fi # offline579580if [[ $changed == true ]]; then581582# remote was updated, fetch and check out updates583display_alert "Fetching updates"584case $ref_type in585branch) improved_git fetch --depth 200 origin "${ref_name}" ;;586tag) improved_git fetch --depth 200 origin tags/"${ref_name}" ;;587head) improved_git fetch --depth 200 origin HEAD ;;588esac589590# commit type needs support for older git servers that doesn't support fetching id directly591if [[ $ref_type == commit ]]; then592593improved_git fetch --depth 200 origin "${ref_name}"594595# cover old type596if [[ $? -ne 0 ]]; then597598display_alert "Commit checkout not supported on this repository. Doing full clone." "" "wrn"599improved_git pull600git checkout -fq "${ref_name}"601display_alert "Checkout out to" "$(git --no-pager log -2 --pretty=format:"$ad%s [%an]" | head -1)" "info"602603else604605display_alert "Checking out"606git checkout -f -q FETCH_HEAD607git clean -qdf608609fi610else611612display_alert "Checking out"613git checkout -f -q FETCH_HEAD614git clean -qdf615616fi617elif [[ -n $(git status -uno --porcelain --ignore-submodules=all) ]]; then618# working directory is not clean619display_alert " Cleaning .... " "$(git status -s | wc -l) files"620621# Return the files that are tracked by git to the initial state.622git checkout -f -q HEAD623624# Files that are not tracked by git and were added625# when the patch was applied must be removed.626git clean -qdf627else628# working directory is clean, nothing to do629display_alert "Up to date"630fi631632if [[ -f .gitmodules ]]; then633display_alert "Updating submodules" "" "ext"634# FML: http://stackoverflow.com/a/17692710635for i in $(git config -f .gitmodules --get-regexp path | awk '{ print $2 }'); do636cd "${workdir}" || exit637local surl sref638surl=$(git config -f .gitmodules --get "submodule.$i.url")639sref=$(git config -f .gitmodules --get "submodule.$i.branch")640if [[ -n $sref ]]; then641sref="branch:$sref"642else643sref="head"644fi645fetch_from_repo "$surl" "$workdir/$i" "$sref"646done647fi648} #############################################################################649650#--------------------------------------------------------------------------------------------------------------------------------651# Let's have unique way of displaying alerts652#--------------------------------------------------------------------------------------------------------------------------------653display_alert()654{655# log function parameters to install.log656[[ -n "${DEST}" ]] && echo "Displaying message: $@" >> "${DEST}"/${LOG_SUBPATH}/output.log657658local tmp=""659[[ -n $2 ]] && tmp="[\e[0;33m $2 \x1B[0m]"660661case $3 in662err)663echo -e "[\e[0;31m error \x1B[0m] $1 $tmp"664;;665666wrn)667echo -e "[\e[0;35m warn \x1B[0m] $1 $tmp"668;;669670ext)671echo -e "[\e[0;32m o.k. \x1B[0m] \e[1;32m$1\x1B[0m $tmp"672;;673674info)675echo -e "[\e[0;32m o.k. \x1B[0m] $1 $tmp"676;;677678*)679echo -e "[\e[0;32m .... \x1B[0m] $1 $tmp"680;;681esac682}683684#--------------------------------------------------------------------------------------------------------------------------------685# fingerprint_image <out_txt_file> [image_filename]686# Saving build summary to the image687#--------------------------------------------------------------------------------------------------------------------------------688fingerprint_image()689{690cat <<-EOF > "${1}"691--------------------------------------------------------------------------------692Title: ${VENDOR} $REVISION ${BOARD^} $DISTRIBUTION $RELEASE $BRANCH693Kernel: Linux $VER694Build date: $(date +'%d.%m.%Y')695Maintainer: $MAINTAINER <$MAINTAINERMAIL>696Sources: https://github.com/orangepi-xunlong/orangepi-build697Support: http://www.orangepi.org/698EOF699700if [ -n "$2" ]; then701cat <<-EOF >> "${1}"702--------------------------------------------------------------------------------703Partitioning configuration: $IMAGE_PARTITION_TABLE offset: $OFFSET704Boot partition type: ${BOOTFS_TYPE:-(none)} ${BOOTSIZE:+"(${BOOTSIZE} MB)"}705Root partition type: $ROOTFS_TYPE ${FIXED_IMAGE_SIZE:+"(${FIXED_IMAGE_SIZE} MB)"}706707CPU configuration: $CPUMIN - $CPUMAX with $GOVERNOR708--------------------------------------------------------------------------------709Verify GPG signature:710gpg --verify $2.img.asc711712Verify image file integrity:713sha256sum --check $2.img.sha714715Prepare SD card (four methodes):716zcat $2.img.gz | pv | dd of=/dev/sdX bs=1M717dd if=$2.img of=/dev/sdX bs=1M718balena-etcher $2.img.gz -d /dev/sdX719balena-etcher $2.img -d /dev/sdX720EOF721fi722723cat <<-EOF >> "${1}"724--------------------------------------------------------------------------------725$(cat "${SRC}"/LICENSE)726--------------------------------------------------------------------------------727EOF728}729730731#--------------------------------------------------------------------------------------------------------------------------------732# Create kernel boot logo from packages/blobs/splash/logo.png and packages/blobs/splash/spinner.gif (animated)733# and place to the file /lib/firmware/bootsplash734#--------------------------------------------------------------------------------------------------------------------------------735function boot_logo ()736{737display_alert "Building kernel splash logo" "$RELEASE" "info"738739LOGO=${EXTER}/packages/blobs/splash/logo.png740LOGO_WIDTH=$(identify $LOGO | cut -d " " -f 3 | cut -d x -f 1)741LOGO_HEIGHT=$(identify $LOGO | cut -d " " -f 3 | cut -d x -f 2)742THROBBER=${EXTER}/packages/blobs/splash/spinner.gif743THROBBER_WIDTH=$(identify $THROBBER | head -1 | cut -d " " -f 3 | cut -d x -f 1)744THROBBER_HEIGHT=$(identify $THROBBER | head -1 | cut -d " " -f 3 | cut -d x -f 2)745convert -alpha remove -background "#000000" $LOGO "${SDCARD}"/tmp/logo.rgb746convert -alpha remove -background "#000000" $THROBBER "${SDCARD}"/tmp/throbber%02d.rgb747${EXTER}/packages/blobs/splash/bootsplash-packer \748--bg_red 0x00 \749--bg_green 0x00 \750--bg_blue 0x00 \751--frame_ms 48 \752--picture \753--pic_width $LOGO_WIDTH \754--pic_height $LOGO_HEIGHT \755--pic_position 0 \756--blob "${SDCARD}"/tmp/logo.rgb \757--picture \758--pic_width $THROBBER_WIDTH \759--pic_height $THROBBER_HEIGHT \760--pic_position 0x05 \761--pic_position_offset 200 \762--pic_anim_type 1 \763--pic_anim_loop 0 \764--blob "${SDCARD}"/tmp/throbber00.rgb \765--blob "${SDCARD}"/tmp/throbber01.rgb \766--blob "${SDCARD}"/tmp/throbber02.rgb \767--blob "${SDCARD}"/tmp/throbber03.rgb \768--blob "${SDCARD}"/tmp/throbber04.rgb \769--blob "${SDCARD}"/tmp/throbber05.rgb \770--blob "${SDCARD}"/tmp/throbber06.rgb \771--blob "${SDCARD}"/tmp/throbber07.rgb \772--blob "${SDCARD}"/tmp/throbber08.rgb \773--blob "${SDCARD}"/tmp/throbber09.rgb \774--blob "${SDCARD}"/tmp/throbber10.rgb \775--blob "${SDCARD}"/tmp/throbber11.rgb \776--blob "${SDCARD}"/tmp/throbber12.rgb \777--blob "${SDCARD}"/tmp/throbber13.rgb \778--blob "${SDCARD}"/tmp/throbber14.rgb \779--blob "${SDCARD}"/tmp/throbber15.rgb \780--blob "${SDCARD}"/tmp/throbber16.rgb \781--blob "${SDCARD}"/tmp/throbber17.rgb \782--blob "${SDCARD}"/tmp/throbber18.rgb \783--blob "${SDCARD}"/tmp/throbber19.rgb \784--blob "${SDCARD}"/tmp/throbber20.rgb \785--blob "${SDCARD}"/tmp/throbber21.rgb \786--blob "${SDCARD}"/tmp/throbber22.rgb \787--blob "${SDCARD}"/tmp/throbber23.rgb \788--blob "${SDCARD}"/tmp/throbber24.rgb \789--blob "${SDCARD}"/tmp/throbber25.rgb \790--blob "${SDCARD}"/tmp/throbber26.rgb \791--blob "${SDCARD}"/tmp/throbber27.rgb \792--blob "${SDCARD}"/tmp/throbber28.rgb \793--blob "${SDCARD}"/tmp/throbber29.rgb \794--blob "${SDCARD}"/tmp/throbber30.rgb \795--blob "${SDCARD}"/tmp/throbber31.rgb \796--blob "${SDCARD}"/tmp/throbber32.rgb \797--blob "${SDCARD}"/tmp/throbber33.rgb \798--blob "${SDCARD}"/tmp/throbber34.rgb \799--blob "${SDCARD}"/tmp/throbber35.rgb \800--blob "${SDCARD}"/tmp/throbber36.rgb \801--blob "${SDCARD}"/tmp/throbber37.rgb \802--blob "${SDCARD}"/tmp/throbber38.rgb \803--blob "${SDCARD}"/tmp/throbber39.rgb \804--blob "${SDCARD}"/tmp/throbber40.rgb \805--blob "${SDCARD}"/tmp/throbber41.rgb \806--blob "${SDCARD}"/tmp/throbber42.rgb \807--blob "${SDCARD}"/tmp/throbber43.rgb \808--blob "${SDCARD}"/tmp/throbber44.rgb \809--blob "${SDCARD}"/tmp/throbber45.rgb \810--blob "${SDCARD}"/tmp/throbber46.rgb \811--blob "${SDCARD}"/tmp/throbber47.rgb \812--blob "${SDCARD}"/tmp/throbber48.rgb \813--blob "${SDCARD}"/tmp/throbber49.rgb \814--blob "${SDCARD}"/tmp/throbber50.rgb \815--blob "${SDCARD}"/tmp/throbber51.rgb \816--blob "${SDCARD}"/tmp/throbber52.rgb \817--blob "${SDCARD}"/tmp/throbber53.rgb \818--blob "${SDCARD}"/tmp/throbber54.rgb \819--blob "${SDCARD}"/tmp/throbber55.rgb \820--blob "${SDCARD}"/tmp/throbber56.rgb \821--blob "${SDCARD}"/tmp/throbber57.rgb \822--blob "${SDCARD}"/tmp/throbber58.rgb \823--blob "${SDCARD}"/tmp/throbber59.rgb \824--blob "${SDCARD}"/tmp/throbber60.rgb \825--blob "${SDCARD}"/tmp/throbber61.rgb \826--blob "${SDCARD}"/tmp/throbber62.rgb \827--blob "${SDCARD}"/tmp/throbber63.rgb \828--blob "${SDCARD}"/tmp/throbber64.rgb \829--blob "${SDCARD}"/tmp/throbber65.rgb \830--blob "${SDCARD}"/tmp/throbber66.rgb \831--blob "${SDCARD}"/tmp/throbber67.rgb \832--blob "${SDCARD}"/tmp/throbber68.rgb \833--blob "${SDCARD}"/tmp/throbber69.rgb \834--blob "${SDCARD}"/tmp/throbber70.rgb \835--blob "${SDCARD}"/tmp/throbber71.rgb \836--blob "${SDCARD}"/tmp/throbber72.rgb \837--blob "${SDCARD}"/tmp/throbber73.rgb \838--blob "${SDCARD}"/tmp/throbber74.rgb \839"${SDCARD}"/lib/firmware/bootsplash.orangepi >/dev/null 2>&1840if [[ $BOOT_LOGO == yes || $BOOT_LOGO == desktop && $BUILD_DESKTOP == yes && $RELEASE != buster ]]; then841[[ -f "${SDCARD}"/boot/orangepiEnv.txt ]] && grep -q '^bootlogo' "${SDCARD}"/boot/orangepiEnv.txt && \842sed -i 's/^bootlogo.*/bootlogo=true/' "${SDCARD}"/boot/orangepiEnv.txt || echo 'bootlogo=true' >> "${SDCARD}"/boot/orangepiEnv.txt843[[ -f "${SDCARD}"/boot/boot.ini ]] && sed -i 's/^setenv bootlogo.*/setenv bootlogo "true"/' "${SDCARD}"/boot/boot.ini844fi845# enable additional services846chroot "${SDCARD}" /bin/bash -c "systemctl --no-reload enable bootsplash-ask-password-console.path >/dev/null 2>&1"847chroot "${SDCARD}" /bin/bash -c "systemctl --no-reload enable bootsplash-hide-when-booted.service >/dev/null 2>&1"848chroot "${SDCARD}" /bin/bash -c "systemctl --no-reload enable bootsplash-show-on-shutdown.service >/dev/null 2>&1"849}850851852853DISTRIBUTIONS_DESC_DIR="external/config/distributions"854855function distro_menu ()856{857# create a select menu for choosing a distribution based EXPERT status858859local distrib_dir="${1}"860861if [[ -d "${distrib_dir}" && -f "${distrib_dir}/support" ]]; then862local support_level="$(cat "${distrib_dir}/support")"863if [[ "${support_level}" != "supported" && $EXPERT != "yes" ]]; then864:865else866local distro_codename="$(basename "${distrib_dir}")"867local distro_fullname="$(cat "${distrib_dir}/name")"868local expert_infos=""869[[ $EXPERT == "yes" ]] && expert_infos="(${support_level})"870871if [[ "${BRANCH}" == "legacy" ]]; then872DISTRIB_TYPE="${DISTRIB_TYPE_LEGACY}"873[[ -z "${DISTRIB_TYPE_LEGACY}" ]] && DISTRIB_TYPE="buster bionic focal"874elif [[ "${BRANCH}" == "current" ]]; then875DISTRIB_TYPE="${DISTRIB_TYPE_CURRENT}"876[[ -z "${DISTRIB_TYPE_CURRENT}" ]] && DISTRIB_TYPE="bullseye bookworm focal jammy noble"877elif [[ "${BRANCH}" == "next" ]]; then878if [[ -n "${DISTRIB_TYPE_NEXT}" ]]; then879DISTRIB_TYPE="${DISTRIB_TYPE_NEXT}"880else881DISTRIB_TYPE="${DISTRIB_TYPE_CURRENT}"882[[ -z "${DISTRIB_TYPE_CURRENT}" ]] && DISTRIB_TYPE="bullseye bookworm focal jammy noble"883fi884fi885886if [[ "${DISTRIB_TYPE}" =~ "${distro_codename}" ]]; then887options+=("${distro_codename}" "${distro_fullname} ${expert_infos}")888fi889fi890fi891}892893function distros_options() {894for distrib_dir in "${DISTRIBUTIONS_DESC_DIR}/"*; do895distro_menu "${distrib_dir}"896done897}898899function set_distribution_status() {900901local distro_support_desc_filepath="${SRC}/${DISTRIBUTIONS_DESC_DIR}/${RELEASE}/support"902if [[ ! -f "${distro_support_desc_filepath}" ]]; then903exit_with_error "Distribution ${distribution_name} does not exist"904else905DISTRIBUTION_STATUS="$(cat "${distro_support_desc_filepath}")"906fi907908[[ "${DISTRIBUTION_STATUS}" != "supported" ]] && [[ "${EXPERT}" != "yes" ]] && exit_with_error "Orange Pi ${RELEASE} is unsupported and, therefore, only available to experts (EXPERT=yes)"909910}911912adding_packages()913{914# add deb files to repository if they are not already there915916display_alert "Checking and adding to repository $release" "$3" "ext"917for f in "${DEB_STORAGE}${2}"/*.deb918do919local name version arch920name=$(dpkg-deb -I "${f}" | grep Package | awk '{print $2}')921version=$(dpkg-deb -I "${f}" | grep Version | awk '{print $2}')922arch=$(dpkg-deb -I "${f}" | grep Architecture | awk '{print $2}')923# add if not already there924aptly repo search -architectures="${arch}" -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${1}" 'Name (% '${name}'), $Version (='${version}'), $Architecture (='${arch}')' &>/dev/null925if [[ $? -ne 0 ]]; then926display_alert "Adding ${1}" "$name" "info"927aptly repo add -force-replace=true -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${1}" "${f}" &>/dev/null928fi929done930931}932933934935936addtorepo()937{938# create repository939# parameter "remove" dumps all and creates new940# parameter "delete" remove incoming directory if publishing is succesful941# function: cycle trough distributions942943local distributions=("stretch" "bionic" "buster" "bullseye" "bookworm" "focal" "hirsute" "jammy" "noble" "sid")944#local distributions=($(grep -rw config/distributions/*/ -e 'supported' | cut -d"/" -f3))945local errors=0946947for release in "${distributions[@]}"; do948949local forceoverwrite=""950951# let's drop from publish if exits952if [[ -n $(aptly publish list -config="${SCRIPTPATH}config/${REPO_CONFIG}" -raw | awk '{print $(NF)}' | grep "${release}") ]]; then953aptly publish drop -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${release}" > /dev/null 2>&1954fi955956# create local repository if not exist957if [[ -z $(aptly repo list -config="${SCRIPTPATH}config/${REPO_CONFIG}" -raw | awk '{print $(NF)}' | grep "${release}") ]]; then958display_alert "Creating section" "main" "info"959aptly repo create -config="${SCRIPTPATH}config/${REPO_CONFIG}" -distribution="${release}" -component="main" \960-comment="Armbian main repository" "${release}" >/dev/null961fi962963if [[ -z $(aptly repo list -config="${SCRIPTPATH}config/${REPO_CONFIG}" -raw | awk '{print $(NF)}' | grep "^utils") ]]; then964aptly repo create -config="${SCRIPTPATH}config/${REPO_CONFIG}" -distribution="${release}" -component="utils" \965-comment="Armbian utilities (backwards compatibility)" utils >/dev/null966fi967if [[ -z $(aptly repo list -config="${SCRIPTPATH}config/${REPO_CONFIG}" -raw | awk '{print $(NF)}' | grep "${release}-utils") ]]; then968aptly repo create -config="${SCRIPTPATH}config/${REPO_CONFIG}" -distribution="${release}" -component="${release}-utils" \969-comment="Armbian ${release} utilities" "${release}-utils" >/dev/null970fi971if [[ -z $(aptly repo list -config="${SCRIPTPATH}config/${REPO_CONFIG}" -raw | awk '{print $(NF)}' | grep "${release}-desktop") ]]; then972aptly repo create -config="${SCRIPTPATH}config/${REPO_CONFIG}" -distribution="${release}" -component="${release}-desktop" \973-comment="Armbian ${release} desktop" "${release}-desktop" >/dev/null974fi975976977# adding main978if find "${DEB_STORAGE}"/ -maxdepth 1 -type f -name "*.deb" 2>/dev/null | grep -q .; then979adding_packages "$release" "" "main"980else981aptly repo add -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${release}" "${SCRIPTPATH}config/templates/example.deb" >/dev/null982fi983984local COMPONENTS="main"985986# adding main distribution packages987if find "${DEB_STORAGE}/${release}" -maxdepth 1 -type f -name "*.deb" 2>/dev/null | grep -q .; then988adding_packages "${release}-utils" "/${release}" "release packages"989else990# workaround - add dummy package to not trigger error991aptly repo add -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${release}" "${SCRIPTPATH}config/templates/example.deb" >/dev/null992fi993994# adding release-specific utils995if find "${DEB_STORAGE}/extra/${release}-utils" -maxdepth 1 -type f -name "*.deb" 2>/dev/null | grep -q .; then996adding_packages "${release}-utils" "/extra/${release}-utils" "release utils"997else998aptly repo add -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${release}-utils" "${SCRIPTPATH}config/templates/example.deb" >/dev/null999fi1000COMPONENTS="${COMPONENTS} ${release}-utils"10011002# adding desktop1003if find "${DEB_STORAGE}/extra/${release}-desktop" -maxdepth 1 -type f -name "*.deb" 2>/dev/null | grep -q .; then1004adding_packages "${release}-desktop" "/extra/${release}-desktop" "desktop"1005else1006# workaround - add dummy package to not trigger error1007aptly repo add -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${release}-desktop" "${SCRIPTPATH}config/templates/example.deb" >/dev/null1008fi1009COMPONENTS="${COMPONENTS} ${release}-desktop"10101011local mainnum utilnum desknum1012mainnum=$(aptly repo show -with-packages -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${release}" | grep "Number of packages" | awk '{print $NF}')1013utilnum=$(aptly repo show -with-packages -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${release}-desktop" | grep "Number of packages" | awk '{print $NF}')1014desknum=$(aptly repo show -with-packages -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${release}-utils" | grep "Number of packages" | awk '{print $NF}')10151016if [ $mainnum -gt 0 ] && [ $utilnum -gt 0 ] && [ $desknum -gt 0 ]; then10171018# publish1019aptly publish \1020-acquire-by-hash \1021-passphrase="${GPG_PASS}" \1022-origin="Armbian" \1023-label="Armbian" \1024-config="${SCRIPTPATH}config/${REPO_CONFIG}" \1025-component="${COMPONENTS// /,}" \1026-distribution="${release}" repo "${release}" ${COMPONENTS//main/} >/dev/null10271028if [[ $? -ne 0 ]]; then1029display_alert "Publishing failed" "${release}" "err"1030errors=$((errors+1))1031exit 01032fi1033else1034errors=$((errors+1))1035local err_txt=": All components must be present: main, utils and desktop for first build"1036fi10371038done10391040# cleanup1041display_alert "Cleaning repository" "${DEB_STORAGE}" "info"1042aptly db cleanup -config="${SCRIPTPATH}config/${REPO_CONFIG}"10431044# display what we have1045echo ""1046display_alert "List of local repos" "local" "info"1047(aptly repo list -config="${SCRIPTPATH}config/${REPO_CONFIG}") | grep -E packages10481049# remove debs if no errors found1050if [[ $errors -eq 0 ]]; then1051if [[ "$2" == "delete" ]]; then1052display_alert "Purging incoming debs" "all" "ext"1053find "${DEB_STORAGE}" -name "*.deb" -type f -delete1054fi1055else1056display_alert "There were some problems $err_txt" "leaving incoming directory intact" "err"1057fi10581059}10601061106210631064repo-manipulate()1065{1066# repository manipulation1067# "show" displays packages in each repository1068# "server" serve repository - useful for local diagnostics1069# "unique" manually select which package should be removed from all repositories1070# "update" search for new files in output/debs* to add them to repository1071# "purge" leave only last 5 versions10721073local DISTROS=("stretch" "bionic" "buster" "bullseye" "bookworm" "focal" "hirsute" "jammy" "noble" "sid")1074#local DISTROS=($(grep -rw config/distributions/*/ -e 'supported' | cut -d"/" -f3))10751076case $@ in10771078serve)1079# display repository content1080display_alert "Serving content" "common utils" "ext"1081aptly serve -listen=$(ip -f inet addr | grep -Po 'inet \K[\d.]+' | grep -v 127.0.0.1 | head -1):80 -config="${SCRIPTPATH}config/${REPO_CONFIG}"1082exit 01083;;10841085show)1086# display repository content1087for release in "${DISTROS[@]}"; do1088display_alert "Displaying repository contents for" "$release" "ext"1089aptly repo show -with-packages -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${release}" | tail -n +71090aptly repo show -with-packages -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${release}-desktop" | tail -n +71091done1092display_alert "Displaying repository contents for" "common utils" "ext"1093aptly repo show -with-packages -config="${SCRIPTPATH}config/${REPO_CONFIG}" utils | tail -n +71094echo "done."1095exit 01096;;10971098unique)1099# which package should be removed from all repositories1100IFS=$'\n'1101while true; do1102LIST=()1103for release in "${DISTROS[@]}"; do1104LIST+=( $(aptly repo show -with-packages -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${release}" | tail -n +7) )1105LIST+=( $(aptly repo show -with-packages -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${release}-desktop" | tail -n +7) )1106done1107LIST+=( $(aptly repo show -with-packages -config="${SCRIPTPATH}config/${REPO_CONFIG}" utils | tail -n +7) )1108LIST=( $(echo "${LIST[@]}" | tr ' ' '\n' | sort -u))1109new_list=()1110# create a human readable menu1111for ((n=0;n<$((${#LIST[@]}));n++));1112do1113new_list+=( "${LIST[$n]}" )1114new_list+=( "" )1115done1116LIST=("${new_list[@]}")1117LIST_LENGTH=$((${#LIST[@]}/2));1118exec 3>&11119TARGET_VERSION=$(dialog --cancel-label "Cancel" --backtitle "BACKTITLE" --no-collapse --title "Remove packages from repositories" --clear --menu "Delete" $((9+${LIST_LENGTH})) 82 65 "${LIST[@]}" 2>&1 1>&3)1120exitstatus=$?;1121exec 3>&-1122if [[ $exitstatus -eq 0 ]]; then1123for release in "${DISTROS[@]}"; do1124aptly repo remove -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${release}" "$TARGET_VERSION"1125aptly repo remove -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${release}-desktop" "$TARGET_VERSION"1126done1127aptly repo remove -config="${SCRIPTPATH}config/${REPO_CONFIG}" "utils" "$TARGET_VERSION"1128else1129exit 11130fi1131aptly db cleanup -config="${SCRIPTPATH}config/${REPO_CONFIG}" > /dev/null 2>&11132done1133;;11341135update)1136# display full help test1137# run repository update1138addtorepo "update" ""1139# add a key to repo1140cp "${SCRIPTPATH}"config/armbian.key "${REPO_STORAGE}"/public/1141exit 01142;;11431144purge)1145for release in "${DISTROS[@]}"; do1146repo-remove-old-packages "$release" "armhf" "5"1147repo-remove-old-packages "$release" "arm64" "5"1148repo-remove-old-packages "$release" "amd64" "5"1149repo-remove-old-packages "$release" "all" "5"1150aptly -config="${SCRIPTPATH}config/${REPO_CONFIG}" -passphrase="${GPG_PASS}" publish update "${release}" > /dev/null 2>&11151done1152exit 01153;;11541155purgeedge)1156for release in "${DISTROS[@]}"; do1157repo-remove-old-packages "$release" "armhf" "3" "edge"1158repo-remove-old-packages "$release" "arm64" "3" "edge"1159repo-remove-old-packages "$release" "amd64" "3" "edge"1160repo-remove-old-packages "$release" "all" "3" "edge"1161aptly -config="${SCRIPTPATH}config/${REPO_CONFIG}" -passphrase="${GPG_PASS}" publish update "${release}" > /dev/null 2>&11162done1163exit 01164;;116511661167purgesource)1168for release in "${DISTROS[@]}"; do1169aptly repo remove -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${release}" 'Name (% *-source*)'1170aptly -config="${SCRIPTPATH}config/${REPO_CONFIG}" -passphrase="${GPG_PASS}" publish update "${release}" > /dev/null 2>&11171done1172aptly db cleanup -config="${SCRIPTPATH}config/${REPO_CONFIG}" > /dev/null 2>&11173exit 01174;;1175*)11761177echo -e "Usage: repository show | serve | unique | create | update | purge | purgesource\n"1178echo -e "\n show = display repository content"1179echo -e "\n serve = publish your repositories on current server over HTTP"1180echo -e "\n unique = manually select which package should be removed from all repositories"1181echo -e "\n update = updating repository"1182echo -e "\n purge = removes all but last 5 versions"1183echo -e "\n purgeedge = removes all but last 3 edge versions"1184echo -e "\n purgesource = removes all sources\n\n"1185exit 01186;;11871188esac11891190}11911192119311941195# Removes old packages in the received repo1196#1197# $1: Repository1198# $2: Architecture1199# $3: Amount of packages to keep1200# $4: Additional search pattern1201repo-remove-old-packages() {1202local repo=$11203local arch=$21204local keep=$31205for pkg in $(aptly repo search -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${repo}" "Architecture ($arch)" | grep -v "ERROR: no results" | sort -t '.' -nk4 | grep -e "$4"); do1206local pkg_name1207count=01208pkg_name=$(echo "${pkg}" | cut -d_ -f1)1209for subpkg in $(aptly repo search -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${repo}" "Name ($pkg_name)" | grep -v "ERROR: no results" | sort -rt '.' -nk4); do1210((count+=1))1211if [[ $count -gt $keep ]]; then1212pkg_version=$(echo "${subpkg}" | cut -d_ -f2)1213aptly repo remove -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${repo}" "Name ($pkg_name), Version (= $pkg_version)"1214fi1215done1216done1217}12181219122012211222# wait_for_package_manager1223#1224# * installation will break if we try to install when package manager is running1225#1226wait_for_package_manager()1227{1228# exit if package manager is running in the back1229while true; do1230if [[ "$(fuser /var/lib/dpkg/lock 2>/dev/null; echo $?)" != 1 && "$(fuser /var/lib/dpkg/lock-frontend 2>/dev/null; echo $?)" != 1 ]]; then1231display_alert "Package manager is running in the background." "Please wait! Retrying in 30 sec" "wrn"1232sleep 301233else1234break1235fi1236done1237}1238123912401241# Installing debian packages in the orangepi build system.1242# The function accepts four optional parameters:1243# autoupdate - If the installation list is not empty then update first.1244# upgrade, clean - the same name for apt1245# verbose - detailed log for the function1246#1247# list="pkg1 pkg2 pkg3 pkgbadname pkg-1.0 | pkg-2.0 pkg5 (>= 9)"1248# install_pkg_deb upgrade verbose $list1249# or1250# install_pkg_deb autoupdate $list1251#1252# If the package has a bad name, we will see it in the log file.1253# If there is an LOG_OUTPUT_FILE variable and it has a value as1254# the full real path to the log file, then all the information will be there.1255#1256# The LOG_OUTPUT_FILE variable must be defined in the calling function1257# before calling the install_pkg_deb function and unset after.1258#1259install_pkg_deb ()1260{1261local list=""1262local log_file1263local for_install1264local need_autoup=false1265local need_upgrade=false1266local need_clean=false1267local need_verbose=false1268local _line=${BASH_LINENO[0]}1269local _function=${FUNCNAME[1]}1270local _file=$(basename "${BASH_SOURCE[1]}")1271local tmp_file=$(mktemp /tmp/install_log_XXXXX)1272export DEBIAN_FRONTEND=noninteractive12731274list=$(1275for p in $*;do1276case $p in1277autoupdate) need_autoup=true; continue ;;1278upgrade) need_upgrade=true; continue ;;1279clean) need_clean=true; continue ;;1280verbose) need_verbose=true; continue ;;1281\||\(*|*\)) continue ;;1282esac1283echo " $p"1284done1285)12861287if [ -d $(dirname $LOG_OUTPUT_FILE) ]; then1288log_file=${LOG_OUTPUT_FILE}1289else1290log_file="${SRC}/output/${LOG_SUBPATH}/install.log"1291fi12921293# This is necessary first when there is no apt cache.1294if $need_upgrade; then1295apt-get -q update || echo "apt cannot update" >>$tmp_file1296apt-get -y upgrade || echo "apt cannot upgrade" >>$tmp_file1297fi12981299# If the package is not installed, check the latest1300# up-to-date version in the apt cache.1301# Exclude bad package names and send a message to the log.1302for_install=$(1303for p in $list;do1304if $(dpkg-query -W -f '${db:Status-Abbrev}' $p |& awk '/ii/{exit 1}');then1305apt-cache show $p -o APT::Cache::AllVersions=no |& \1306awk -v p=$p -v tmp_file=$tmp_file \1307'/^Package:/{print $2} /^E:/{print "Bad package name: ",p >>tmp_file}'1308fi1309done1310)13111312# This information should be logged.1313if [ -s $tmp_file ]; then1314echo -e "\nInstalling packages in function: $_function" "[$_file:$_line]" \1315>>$log_file1316echo -e "\nIncoming list:" >>$log_file1317printf "%-30s %-30s %-30s %-30s\n" $list >>$log_file1318echo "" >>$log_file1319cat $tmp_file >>$log_file1320fi13211322if [ -n "$for_install" ]; then1323if $need_autoup; then1324apt-get -q update1325apt-get -y upgrade1326fi1327apt-get install -qq -y --no-install-recommends $for_install1328echo -e "\nPackages installed:" >>$log_file1329dpkg-query -W \1330-f '${binary:Package;-27} ${Version;-23}\n' \1331$for_install >>$log_file13321333fi13341335# We will show the status after installation all listed1336if $need_verbose; then1337echo -e "\nstatus after installation:" >>$log_file1338dpkg-query -W \1339-f '${binary:Package;-27} ${Version;-23} [ ${Status} ]\n' \1340$list >>$log_file1341fi13421343if $need_clean;then apt-get clean; fi1344rm $tmp_file1345}1346134713481349# prepare_host_basic1350#1351# * installs only basic packages1352#1353prepare_host_basic()1354{13551356# command:package1 package2 ...1357# list of commands that are neeeded:packages where this command is1358local check_pack install_pack1359local checklist=(1360"whiptail:whiptail"1361"dialog:dialog"1362"fuser:psmisc"1363"getfacl:acl"1364"uuid:uuid uuid-runtime"1365"curl:curl"1366"gpg:gnupg"1367"gawk:gawk"1368"git:git"1369)13701371for check_pack in "${checklist[@]}"; do1372if ! which ${check_pack%:*} >/dev/null; then local install_pack+=${check_pack#*:}" "; fi1373done13741375if [[ -n $install_pack ]]; then1376display_alert "Installing basic packages" "$install_pack"1377sudo bash -c "apt-get -qq update && apt-get install -qq -y --no-install-recommends $install_pack"1378fi13791380}13811382138313841385# prepare_host1386#1387# * checks and installs necessary packages1388# * creates directory structure1389# * changes system settings1390#1391prepare_host()1392{1393display_alert "Preparing" "host" "info"13941395# The 'offline' variable must always be set to 'true' or 'false'1396if [ "$OFFLINE_WORK" == "yes" ]; then1397local offline=true1398else1399local offline=false1400fi14011402# wait until package manager finishes possible system maintanace1403wait_for_package_manager14041405# fix for Locales settings1406if ! grep -q "^en_US.UTF-8 UTF-8" /etc/locale.gen; then1407sudo sed -i 's/# en_US.UTF-8/en_US.UTF-8/' /etc/locale.gen1408sudo locale-gen1409fi14101411export LC_ALL="en_US.UTF-8"14121413# packages list for host1414# NOTE: please sync any changes here with the Dockerfile and Vagrantfile14151416local hostdeps="acl aptly aria2 bc binfmt-support bison btrfs-progs \1417build-essential ca-certificates ccache cpio cryptsetup curl \1418debian-archive-keyring debian-keyring debootstrap device-tree-compiler \1419dialog dirmngr dosfstools dwarves f2fs-tools fakeroot flex gawk \1420gcc-arm-linux-gnueabihf gdisk gpg imagemagick jq kmod libbison-dev \1421libc6-dev-armhf-cross libelf-dev libfdt-dev libfile-fcntllock-perl \1422libfl-dev liblz4-tool libncurses-dev libpython2.7-dev libssl-dev \1423libusb-1.0-0-dev linux-base locales lzop ncurses-base ncurses-term \1424nfs-kernel-server ntpdate p7zip-full parted patchutils pigz pixz \1425pkg-config pv python3-dev python3-distutils qemu-user-static rsync swig \1426systemd-container u-boot-tools udev unzip uuid-dev wget whiptail zip \1427zlib1g-dev gcc-riscv64-linux-gnu"14281429if [[ $(dpkg --print-architecture) == amd64 ]]; then14301431hostdeps+=" distcc lib32ncurses-dev lib32stdc++6 libc6-i386"1432grep -q i386 <(dpkg --print-foreign-architectures) || dpkg --add-architecture i38614331434elif [[ $(dpkg --print-architecture) == arm64 ]]; then14351436hostdeps+=" gcc-arm-linux-gnueabi gcc-arm-none-eabi libc6 libc6-amd64-cross qemu"14371438else14391440display_alert "Please read documentation to set up proper compilation environment"1441display_alert "https://www.armbian.com/using-armbian-tools/"1442exit_with_error "Running this tool on non x86_64 build host is not supported"14431444fi14451446# Add support for Ubuntu 20.04, 21.04 and Mint 20.x1447if [[ $HOSTRELEASE =~ ^(focal|hirsute|jammy|noble|noble|ulyana|ulyssa|bullseye|bookworm|uma)$ ]]; then1448hostdeps+=" python2 python3"1449ln -fs /usr/bin/python2.7 /usr/bin/python21450ln -fs /usr/bin/python2.7 /usr/bin/python1451else1452hostdeps+=" python libpython-dev"1453fi14541455display_alert "Build host OS release" "${HOSTRELEASE:-(unknown)}" "info"14561457# Ubuntu 21.04.x (Hirsute) x86_64 is the only fully supported host OS release1458# Using Docker/VirtualBox/Vagrant is the only supported way to run the build script on other Linux distributions1459#1460# NO_HOST_RELEASE_CHECK overrides the check for a supported host system1461# Disable host OS check at your own risk. Any issues reported with unsupported releases will be closed without discussion1462if [[ -z $HOSTRELEASE || "focal jammy noble" != *"$HOSTRELEASE"* ]]; then1463if [[ $NO_HOST_RELEASE_CHECK == yes ]]; then1464display_alert "You are running on an unsupported system" "${HOSTRELEASE:-(unknown)}" "wrn"1465display_alert "Do not report any errors, warnings or other issues encountered beyond this point" "" "wrn"1466else1467exit_with_error "It seems you ignore documentation and run an unsupported build system: ${HOSTRELEASE:-(unknown)}"1468fi1469fi14701471if grep -qE "(Microsoft|WSL)" /proc/version; then1472if [ -f /.dockerenv ]; then1473display_alert "Building images using Docker on WSL2 may fail" "" "wrn"1474else1475exit_with_error "Windows subsystem for Linux is not a supported build environment"1476fi1477fi14781479if systemd-detect-virt -q -c; then1480display_alert "Running in container" "$(systemd-detect-virt)" "info"1481# disable apt-cacher unless NO_APT_CACHER=no is not specified explicitly1482if [[ $NO_APT_CACHER != no ]]; then1483display_alert "apt-cacher is disabled in containers, set NO_APT_CACHER=no to override" "" "wrn"1484NO_APT_CACHER=yes1485fi1486CONTAINER_COMPAT=yes1487# trying to use nested containers is not a good idea, so don't permit EXTERNAL_NEW=compile1488if [[ $EXTERNAL_NEW == compile ]]; then1489display_alert "EXTERNAL_NEW=compile is not available when running in container, setting to prebuilt" "" "wrn"1490EXTERNAL_NEW=prebuilt1491fi1492SYNC_CLOCK=no1493fi149414951496# Skip verification if you are working offline1497if ! $offline; then14981499# warning: apt-cacher-ng will fail if installed and used both on host and in1500# container/chroot environment with shared network1501# set NO_APT_CACHER=yes to prevent installation errors in such case1502if [[ $NO_APT_CACHER != yes ]]; then hostdeps+=" apt-cacher-ng"; fi15031504export EXTRA_BUILD_DEPS=""1505call_extension_method "add_host_dependencies" <<- 'ADD_HOST_DEPENDENCIES'1506*run before installing host dependencies*1507you can add packages to install, space separated, to ${EXTRA_BUILD_DEPS} here.1508ADD_HOST_DEPENDENCIES15091510if [ -n "${EXTRA_BUILD_DEPS}" ]; then hostdeps+=" ${EXTRA_BUILD_DEPS}"; fi15111512display_alert "Installing build dependencies"1513# don't prompt for apt cacher selection1514sudo echo "apt-cacher-ng apt-cacher-ng/tunnelenable boolean false" | sudo debconf-set-selections15151516LOG_OUTPUT_FILE="${DEST}"/${LOG_SUBPATH}/hostdeps.log1517install_pkg_deb "autoupdate $hostdeps"1518unset LOG_OUTPUT_FILE15191520update-ccache-symlinks15211522export FINAL_HOST_DEPS="$hostdeps ${EXTRA_BUILD_DEPS}"1523call_extension_method "host_dependencies_ready" <<- 'HOST_DEPENDENCIES_READY'1524*run after all host dependencies are installed*1525At this point we can read `${FINAL_HOST_DEPS}`, but changing won't have any effect.1526All the dependencies, including the default/core deps and the ones added via `${EXTRA_BUILD_DEPS}`1527are installed at this point. The system clock has not yet been synced.1528HOST_DEPENDENCIES_READY15291530# sync clock1531if [[ $SYNC_CLOCK != no ]]; then1532display_alert "Syncing clock" "${NTP_SERVER:-pool.ntp.org}" "info"1533ntpdate -s "${NTP_SERVER:-pool.ntp.org}"1534fi15351536# create directory structure1537mkdir -p $SRC/output $EXTER/cache $USERPATCHES_PATH1538if [[ -n $SUDO_USER ]]; then1539chgrp --quiet sudo cache output "${USERPATCHES_PATH}"1540# SGID bit on cache/sources breaks kernel dpkg packaging1541chmod --quiet g+w,g+s output "${USERPATCHES_PATH}"1542# fix existing permissions1543find "${SRC}"/output "${USERPATCHES_PATH}" -type d ! -group sudo -exec chgrp --quiet sudo {} \;1544find "${SRC}"/output "${USERPATCHES_PATH}" -type d ! -perm -g+w,g+s -exec chmod --quiet g+w,g+s {} \;1545fi1546mkdir -p $DEST/debs/{extra,u-boot} $DEST/{config,debug,patch,images} $USERPATCHES_PATH/overlay $EXTER/cache/{debs,sources,hash} $SRC/toolchains $SRC/.tmp15471548# build aarch641549if [[ $(dpkg --print-architecture) == amd64 ]]; then1550if [[ "${SKIP_EXTERNAL_TOOLCHAINS}" != "yes" ]]; then15511552# bind mount toolchain if defined1553if [[ -d "${ARMBIAN_CACHE_TOOLCHAIN_PATH}" ]]; then1554mountpoint -q "${SRC}"/cache/toolchain && umount -l "${SRC}"/cache/toolchain1555mount --bind "${ARMBIAN_CACHE_TOOLCHAIN_PATH}" "${SRC}"/cache/toolchain1556fi15571558display_alert "Checking for external GCC compilers" "" "info"1559# download external Linaro compiler and missing special dependencies since they are needed for certain sources15601561local toolchains=(1562"ky-toolchain-linux-glibc-x86_64-v1.0.1.tar.xz"1563"gcc-linaro-aarch64-none-elf-4.8-2013.11_linux.tar.xz"1564"gcc-linaro-arm-none-eabi-4.8-2014.04_linux.tar.xz"1565"gcc-linaro-arm-linux-gnueabihf-4.8-2014.04_linux.tar.xz"1566"gcc-linaro-4.9.4-2017.01-x86_64_arm-linux-gnueabi.tar.xz"1567"gcc-linaro-4.9.4-2017.01-x86_64_aarch64-linux-gnu.tar.xz"1568"gcc-linaro-5.5.0-2017.10-x86_64_arm-linux-gnueabihf.tar.xz"1569"gcc-linaro-7.4.1-2019.02-x86_64_arm-linux-gnueabi.tar.xz"1570"gcc-linaro-7.4.1-2019.02-x86_64_aarch64-linux-gnu.tar.xz"1571"gcc-arm-9.2-2019.12-x86_64-arm-none-linux-gnueabihf.tar.xz"1572"gcc-arm-9.2-2019.12-x86_64-aarch64-none-linux-gnu.tar.xz"1573"gcc-arm-11.2-2022.02-x86_64-arm-none-linux-gnueabihf.tar.xz"1574"gcc-arm-11.2-2022.02-x86_64-aarch64-none-linux-gnu.tar.xz"1575)15761577USE_TORRENT_STATUS=${USE_TORRENT}1578USE_TORRENT="no"1579for toolchain in ${toolchains[@]}; do1580download_and_verify "_toolchain" "${toolchain##*/}"1581done1582USE_TORRENT=${USE_TORRENT_STATUS}15831584rm -rf $SRC/toolchains/*.tar.xz*1585local existing_dirs=( $(ls -1 $SRC/toolchains) )1586for dir in ${existing_dirs[@]}; do1587local found=no1588for toolchain in ${toolchains[@]}; do1589local filename=${toolchain##*/}1590local dirname=${filename//.tar.xz}1591[[ $dir == $dirname ]] && found=yes1592done1593if [[ $found == no ]]; then1594display_alert "Removing obsolete toolchain" "$dir"1595rm -rf $SRC/toolchains/$dir1596fi1597done1598else1599display_alert "Ignoring toolchains" "SKIP_EXTERNAL_TOOLCHAINS: ${SKIP_EXTERNAL_TOOLCHAINS}" "info"1600fi1601fi16021603fi # check offline16041605# enable arm binary format so that the cross-architecture chroot environment will work1606if [[ $BUILD_OPT == "image" || $BUILD_OPT == "rootfs" ]]; then1607modprobe -q binfmt_misc1608mountpoint -q /proc/sys/fs/binfmt_misc/ || mount binfmt_misc -t binfmt_misc /proc/sys/fs/binfmt_misc1609if [[ "$(arch)" != "aarch64" ]]; then1610test -e /proc/sys/fs/binfmt_misc/qemu-arm || update-binfmts --enable qemu-arm1611test -e /proc/sys/fs/binfmt_misc/qemu-aarch64 || update-binfmts --enable qemu-aarch641612fi1613fi16141615[[ ! -f "${USERPATCHES_PATH}"/customize-image.sh ]] && cp "${EXTER}"/config/templates/customize-image.sh.template "${USERPATCHES_PATH}"/customize-image.sh16161617if [[ ! -f "${USERPATCHES_PATH}"/README ]]; then1618rm -f "${USERPATCHES_PATH}"/readme.txt1619echo 'Please read documentation about customizing build configuration' > "${USERPATCHES_PATH}"/README1620echo 'https:/www.orangepi.org' >> "${USERPATCHES_PATH}"/README16211622# create patches directory structure under USERPATCHES_PATH1623find $EXTER/patch -maxdepth 2 -type d ! -name . | sed "s%/.*patch%/$USERPATCHES_PATH%" | xargs mkdir -p1624fi16251626# check free space (basic)1627local freespace=$(findmnt --target "${SRC}" -n -o AVAIL -b 2>/dev/null) # in bytes1628if [[ -n $freespace && $(( $freespace / 1073741824 )) -lt 10 ]]; then1629display_alert "Low free space left" "$(( $freespace / 1073741824 )) GiB" "wrn"1630# pause here since dialog-based menu will hide this message otherwise1631echo -e "Press \e[0;33m<Ctrl-C>\x1B[0m to abort compilation, \e[0;33m<Enter>\x1B[0m to ignore and continue"1632read1633fi1634}16351636163716381639function webseed ()1640{1641# list of mirrors that host our files1642unset text1643# Hardcoded to EU mirrors since1644local CCODE=$(curl -s redirect.armbian.com/geoip | jq '.continent.code' -r)1645WEBSEED=($(curl -s https://redirect.armbian.com/mirrors | jq -r '.'${CCODE}' | .[] | values'))1646# aria2 simply split chunks based on sources count not depending on download speed1647# when selecting china mirrors, use only China mirror, others are very slow there1648if [[ $DOWNLOAD_MIRROR == china ]]; then1649WEBSEED=(1650https://mirrors.tuna.tsinghua.edu.cn/armbian-releases/1651)1652elif [[ $DOWNLOAD_MIRROR == bfsu ]]; then1653WEBSEED=(1654https://mirrors.bfsu.edu.cn/armbian-releases/1655)1656fi1657for toolchain in ${WEBSEED[@]}; do1658text="${text} ${toolchain}${1}"1659done1660text="${text:1}"1661echo "${text}"1662}16631664166516661667download_and_verify()1668{16691670local remotedir=$11671local filename=$21672local localdir=$SRC/toolchains1673local dirname=${filename//.tar.xz}16741675if [[ $DOWNLOAD_MIRROR == china ]]; then1676local server="https://mirrors.tuna.tsinghua.edu.cn/armbian-releases/"1677elif [[ $DOWNLOAD_MIRROR == bfsu ]]; then1678local server="https://mirrors.bfsu.edu.cn/armbian-releases/"1679else1680local server=${ARMBIAN_MIRROR}1681fi16821683if [[ -f ${localdir}/${dirname}/.download-complete ]]; then1684return1685fi16861687if [[ ${filename} == *ky* ]]; then1688server="http://www.iplaystore.cn/"1689remotedir=""1690fi16911692# switch to china mirror if US timeouts1693timeout 10 curl --head --fail --silent ${server}${remotedir}/${filename} 2>&1 >/dev/null1694if [[ $? -ne 7 && $? -ne 22 && $? -ne 0 ]]; then1695display_alert "Timeout from $server" "retrying" "info"1696server="https://mirrors.tuna.tsinghua.edu.cn/armbian-releases/"16971698# switch to another china mirror if tuna timeouts1699timeout 10 curl --head --fail --silent ${server}${remotedir}/${filename} 2>&1 >/dev/null1700if [[ $? -ne 7 && $? -ne 22 && $? -ne 0 ]]; then1701display_alert "Timeout from $server" "retrying" "info"1702server="https://mirrors.bfsu.edu.cn/armbian-releases/"1703fi1704fi170517061707# check if file exists on remote server before running aria2 downloader1708[[ ! `timeout 10 curl --head --fail --silent ${server}${remotedir}/${filename}` ]] && return17091710cd "${localdir}" || exit17111712# use local control file1713if [[ -f "${EXTER}"/config/torrents/${filename}.asc ]]; then1714local torrent="${EXTER}"/config/torrents/${filename}.torrent1715ln -sf "${EXTER}/config/torrents/${filename}.asc" "${localdir}/${filename}.asc"1716elif [[ ! `timeout 10 curl --head --fail --silent "${server}${remotedir}/${filename}.asc"` ]]; then1717return1718else1719# download control file1720local torrent=${server}$remotedir/${filename}.torrent1721aria2c --download-result=hide --disable-ipv6=true --summary-interval=0 --console-log-level=error --auto-file-renaming=false \1722--continue=false --allow-overwrite=true --dir="${localdir}" ${server}${remotedir}/${filename}.asc $(webseed "$remotedir/${filename}.asc") -o "${filename}.asc"1723[[ $? -ne 0 ]] && display_alert "Failed to download control file" "" "wrn"1724fi17251726# download torrent first1727if [[ ${USE_TORRENT} == "yes" ]]; then17281729display_alert "downloading using torrent network" "$filename"1730local ariatorrent="--summary-interval=0 --auto-save-interval=0 --seed-time=0 --bt-stop-timeout=120 --console-log-level=error \1731--allow-overwrite=true --download-result=hide --rpc-save-upload-metadata=false --auto-file-renaming=false \1732--file-allocation=trunc --continue=true ${torrent} \1733--dht-file-path=$EXTER/cache/.aria2/dht.dat --disable-ipv6=true --stderr --follow-torrent=mem --dir=${localdir}"17341735# exception. It throws error if dht.dat file does not exists. Error suppress needed only at first download.1736if [[ -f $EXTER/cache/.aria2/dht.dat ]]; then1737# shellcheck disable=SC20861738aria2c ${ariatorrent}1739else1740# shellcheck disable=SC20351741aria2c ${ariatorrent} &> "${DEST}"/${LOG_SUBPATH}/torrent.log1742fi1743# mark complete1744[[ $? -eq 0 ]] && touch "${localdir}/${filename}.complete"17451746fi174717481749# direct download if torrent fails1750if [[ ! -f "${localdir}/${filename}.complete" ]]; then1751if [[ ! `timeout 10 curl --head --fail --silent ${server}${remotedir}/${filename} 2>&1 >/dev/null` ]]; then1752display_alert "downloading using http(s) network" "$filename"1753aria2c --download-result=hide --rpc-save-upload-metadata=false --console-log-level=error \1754--dht-file-path="${SRC}"/cache/.aria2/dht.dat --disable-ipv6=true --summary-interval=0 --auto-file-renaming=false --dir="${localdir}" ${server}${remotedir}/${filename} $(webseed "${remotedir}/${filename}") -o "${filename}"1755# mark complete1756[[ $? -eq 0 ]] && touch "${localdir}/${filename}.complete" && echo ""17571758fi1759fi17601761if [[ -f ${localdir}/${filename}.asc ]]; then17621763if grep -q 'BEGIN PGP SIGNATURE' "${localdir}/${filename}.asc"; then17641765if [[ ! -d $EXTER/cache/.gpg ]]; then1766mkdir -p $EXTER/cache/.gpg1767chmod 700 $EXTER/cache/.gpg1768touch $EXTER/cache/.gpg/gpg.conf1769chmod 600 $EXTER/cache/.gpg/gpg.conf1770fi17711772# Verify archives with Linaro and Armbian GPG keys17731774if [ x"" != x"${http_proxy}" ]; then1775(gpg --homedir "${EXTER}"/cache/.gpg --no-permission-warning --list-keys 8F427EAF >> "${DEST}"/${LOG_SUBPATH}/output.log 2>&1\1776|| gpg --homedir "${EXTER}"/cache/.gpg --no-permission-warning \1777--keyserver hkp://keyserver.ubuntu.com:80 --keyserver-options http-proxy="${http_proxy}" \1778--recv-keys 8F427EAF >> "${DEST}"/${LOG_SUBPATH}/output.log 2>&1)17791780(gpg --homedir "${EXTER}"/cache/.gpg --no-permission-warning --list-keys 9F0E78D5 >> "${DEST}"/${LOG_SUBPATH}/output.log 2>&1\1781|| gpg --homedir "${EXTER}"/cache/.gpg --no-permission-warning \1782--keyserver hkp://keyserver.ubuntu.com:80 --keyserver-options http-proxy="${http_proxy}" \1783--recv-keys 9F0E78D5 >> "${DEST}"/${LOG_SUBPATH}/output.log 2>&1)1784else1785(gpg --homedir "${EXTER}"/cache/.gpg --no-permission-warning --list-keys 8F427EAF >> "${DEST}"/${LOG_SUBPATH}/output.log 2>&1\1786|| gpg --homedir "${EXTER}"/cache/.gpg --no-permission-warning \1787--keyserver hkp://keyserver.ubuntu.com:80 \1788--recv-keys 8F427EAF >> "${DEST}"/${LOG_SUBPATH}/output.log 2>&1)17891790(gpg --homedir "${EXTER}"/cache/.gpg --no-permission-warning --list-keys 9F0E78D5 >> "${DEST}"/${LOG_SUBPATH}/output.log 2>&1\1791|| gpg --homedir "${EXTER}"/cache/.gpg --no-permission-warning \1792--keyserver hkp://keyserver.ubuntu.com:80 \1793--recv-keys 9F0E78D5 >> "${DEST}"/${LOG_SUBPATH}/output.log 2>&1)1794fi17951796gpg --homedir "${EXTER}"/cache/.gpg --no-permission-warning --verify \1797--trust-model always -q "${localdir}/${filename}.asc" >> "${DEST}"/${LOG_SUBPATH}/output.log 2>&11798[[ ${PIPESTATUS[0]} -eq 0 ]] && verified=true && display_alert "Verified" "PGP" "info"17991800else18011802md5sum -c --status "${localdir}/${filename}.asc" && verified=true && display_alert "Verified" "MD5" "info"18031804fi18051806if [[ $verified == true ]]; then1807if [[ "${filename:(-6)}" == "tar.xz" ]]; then18081809display_alert "decompressing"1810pv -p -b -r -c -N "[ .... ] ${filename}" "${filename}" | xz -dc | tar xp --xattrs --no-same-owner --overwrite1811[[ $? -eq 0 ]] && touch "${localdir}/${dirname}/.download-complete"1812fi1813else1814exit_with_error "verification failed"1815fi18161817fi1818}18191820182118221823show_developer_warning()1824{1825local temp_rc1826temp_rc=$(mktemp)1827cat <<-'EOF' > "${temp_rc}"1828screen_color = (WHITE,RED,ON)1829EOF1830local warn_text="You are switching to the \Z1EXPERT MODE\Zn18311832This allows building experimental configurations that are provided1833\Z1AS IS\Zn to developers and expert users,1834\Z1WITHOUT ANY RESPONSIBILITIES\Zn from the Armbian team:18351836- You are using these configurations \Z1AT YOUR OWN RISK\Zn1837- Bug reports related to the dev kernel, CSC, WIP and EOS boards1838\Z1will be closed without a discussion\Zn1839- Forum posts related to dev kernel, CSC, WIP and EOS boards1840should be created in the \Z2\"Community forums\"\Zn section1841"1842DIALOGRC=$temp_rc dialog --title "Expert mode warning" --backtitle "${backtitle}" --colors --defaultno --no-label "I do not agree" \1843--yes-label "I understand and agree" --yesno "$warn_text" "${TTY_Y}" "${TTY_X}"1844[[ $? -ne 0 ]] && exit_with_error "Error switching to the expert mode"1845SHOW_WARNING=no1846}18471848# is a formatted output of the values of variables1849# from the list at the place of the function call.1850#1851# The LOG_OUTPUT_FILE variable must be defined in the calling function1852# before calling the `show_checklist_variables` function and unset after.1853#1854show_checklist_variables ()1855{1856local checklist=$*1857local var pval1858local log_file=${LOG_OUTPUT_FILE:-"${SRC}"/output/${LOG_SUBPATH}/trash.log}1859local _line=${BASH_LINENO[0]}1860local _function=${FUNCNAME[1]}1861local _file=$(basename "${BASH_SOURCE[1]}")18621863echo -e "Show variables in function: $_function" "[$_file:$_line]\n" >>$log_file18641865for var in $checklist;do1866eval pval=\$$var1867echo -e "\n$var =:" >>$log_file1868if [ $(echo "$pval" | awk -F"/" '{print NF}') -ge 4 ];then1869printf "%s\n" $pval >>$log_file1870else1871printf "%-30s %-30s %-30s %-30s\n" $pval >>$log_file1872fi1873done1874}18751876install_wiringop()1877{1878install_deb_chroot "$EXTER/cache/debs/${ARCH}/wiringpi-2.58-1.deb"1879chroot "${SDCARD}" /bin/bash -c "apt-mark hold wiringpi" >> "${DEST}"/${LOG_SUBPATH}/install.log 2>&118801881if [[ ${IGNORE_UPDATES} != yes ]]; then18821883fetch_from_repo "https://github.com/orangepi-xunlong/wiringOP.git" "${EXTER}/cache/sources/wiringOP" "branch:next" "yes"1884fetch_from_repo "https://github.com/orangepi-xunlong/wiringOP-Python.git" "${EXTER}/cache/sources/wiringOP-Python" "branch:next" "yes"18851886fi18871888cp ${EXTER}/cache/sources/wiringOP/next ${SDCARD}/usr/src/wiringOP -rfa1889cp ${EXTER}/cache/sources/wiringOP-Python/next ${SDCARD}/usr/src/wiringOP-Python -rfa18901891rm $SDCARD/root/*.deb >/dev/null 2>&11892}189318941895install_310b-npu-driver()1896{1897local driver_path="$EXTER/cache/sources/ascend-driver"1898local driver_name="Ascend-hdk-310b-npu-driver_23.0.5_linux-aarch64-opiaimax.run"1899local driver=${driver_path}/${driver_name}19001901if [[ -f "${driver}" ]]; then1902display_alert "Installing" "$driver_name" "info"1903cp "${driver}" "${SDCARD}/opt/"1904chmod +x "${SDCARD}/opt/Ascend-hdk-310b-npu-driver_23.0.5_linux-aarch64-opiaimax.run"1905chroot "${SDCARD}" /bin/bash -c "/opt/${driver_name} --chroot --full --install-username=orangepi --install-usergroup=orangepi --install-for-all"1906fi1907}190819091910install_docker() {19111912[[ $install_docker != yes ]] && return19131914display_alert "Installing" "docker" "info"1915chroot "${SDCARD}" /bin/bash -c "apt-get install -y -qq apt-transport-https ca-certificates curl gnupg2 software-properties-common >/dev/null 2>&1"19161917case ${RELEASE} in1918buster|bullseye|bookworm)1919distributor_id="debian"1920;;1921xenial|bionic|focal|jammy|noble)1922distributor_id="ubuntu"1923;;1924esac19251926#if [[ ${SELECTED_CONFIGURATION} == desktop ]]; then1927mirror_url=https://repo.huaweicloud.com1928#else1929# mirror_url=https://mirrors.aliyun.com1930#fi19311932chroot "${SDCARD}" /bin/bash -c "curl -fsSL ${mirror_url}/docker-ce/linux/${distributor_id}/gpg | apt-key add -"1933echo "deb [arch=${ARCH}] ${mirror_url}/docker-ce/linux/${distributor_id} ${RELEASE} stable" > "${SDCARD}"/etc/apt/sources.list.d/docker.list19341935chroot "${SDCARD}" /bin/bash -c "apt-get update"1936chroot "${SDCARD}" /bin/bash -c "apt-get install -y -qq docker-ce docker-ce-cli containerd.io"1937chroot "${SDCARD}" /bin/bash -c "sudo groupadd docker"19381939run_on_sdcard "systemctl --no-reload disable docker.service"1940}194119421943#function run_after_build()1944#{1945# chown -R $(logname).$(logname) $BOOTSOURCEDIR1946# chown -R $(logname).$(logname) $LINUXSOURCEDIR1947# chown -R $(logname).$(logname) $USERPATCHES_PATH1948# chown -R $(logname).$(logname) $DEST/{config,debs,debug,images,patch}1949#1950# if [[ $DEBUG_DEB == yes && $BUILD_OPT =~ u-boot|kernel ]]; then1951#1952# [[ -z $REMOTEIP ]] && exit_with_error "The remote IP address has not been set" ""1953# [[ -z $PASS_ROOT ]] && PASS_ROOT="orangepi"1954# [[ -z $MMC_DEV ]] && MMC_DEV="tfcard"1955#1956# #ssh-keygen -f "~/.ssh/known_hosts" -R ${REMOTEIP}1957# local num=01958# while true;1959# do1960# ping ${REMOTEIP} -c 1 > /dev/null 2>&11961#1962# if [[ $? == 0 ]]; then1963# echo " "1964# break1965# fi1966#1967# if [[ $num == 0 ]]; then1968# display_alert "${BOARD} network cannot be connected" "${REMOTEIP}" "wrn"1969# ((num++))1970# fi1971#1972# echo -e ".\c"1973# done1974# display_alert "${BOARD} network is connected" "${REMOTEIP}" "info"1975#1976# if [[ $BUILD_OPT == u-boot ]]; then1977# sshpass -p ${PASS_ROOT} scp ${DEB_STORAGE}/u-boot/${CHOSEN_UBOOT}_${REVISION}_${ARCH}.deb root@${REMOTEIP}:/root1978# display_alert "Uninstall deb package" "linux-u-boot-${BOARD}-${BRANCH}" "info"1979# sshpass -p ${PASS_ROOT} ssh root@${REMOTEIP} "apt-get purge -y linux-u-boot-${BOARD}-${BRANCH}"1980# display_alert "Install deb package" "${CHOSEN_UBOOT}_${REVISION}_${ARCH}.deb" "info"1981# sshpass -p ${PASS_ROOT} ssh root@${REMOTEIP} "dpkg -i /root/${CHOSEN_UBOOT}_${REVISION}_${ARCH}.deb"1982#1983# if [[ $MMC_DEV == emmc ]]; then1984# display_alert "Burn the U-Boot into EMMC" "" "info"1985# sshpass -p ${PASS_ROOT} ssh root@${REMOTEIP} "dd bs=1k seek=8 if=/usr/lib/linux-u-boot-legacy-orangepi400_2.1.0_arm64/boot0_sdcard.fex of=/dev/mmcblk0"1986# sshpass -p ${PASS_ROOT} ssh root@${REMOTEIP} "dd bs=1k seek=16400 if=/usr/lib/linux-u-boot-legacy-orangepi400_2.1.0_arm64/boot_package.fex of=/dev/mmcblk0"1987# sshpass -p ${PASS_ROOT} ssh root@${REMOTEIP} "sync"1988# else1989# display_alert "Burn the U-Boot into TF card" "" "info"1990# sshpass -p ${PASS_ROOT} ssh root@${REMOTEIP} "nand-sata-install DEBUG_UBOOT"1991# sshpass -p ${PASS_ROOT} ssh root@${REMOTEIP} "sync"1992# sshpass -p ${PASS_ROOT} ssh root@${REMOTEIP} "reboot"1993# fi1994# fi1995#1996# if [[ $BUILD_OPT == kernel ]]; then1997# sshpass -p ${PASS_ROOT} scp ${DEB_STORAGE}/linux-image-${BRANCH}-${LINUXFAMILY}_${REVISION}_${ARCH}.deb root@${REMOTEIP}:/root1998# sshpass -p ${PASS_ROOT} ssh root@${REMOTEIP} "apt-get purge -y linux-image-${BRANCH}-${LINUXFAMILY}"1999# sshpass -p ${PASS_ROOT} ssh root@${REMOTEIP} "dpkg -i /root/linux-image-${BRANCH}-${LINUXFAMILY}_${REVISION}_${ARCH}.deb"2000# if [[ $BRANCH == current && $BOARD =~ orangepizero2|orangepi400 ]]; then2001# sshpass -p ${PASS_ROOT} scp ${LINUXSOURCEDIR}/arch/arm64/boot/dts/allwinner/sun50i-h616-orangepi-*.dtb root@${REMOTEIP}:/boot/dtb/allwinner/2002# fi2003#2004# sshpass -p ${PASS_ROOT} ssh root@${REMOTEIP} "sync"2005# sshpass -p ${PASS_ROOT} ssh root@${REMOTEIP} "reboot"2006# fi2007# fi2008#2009# if [[ $DEBUG_DEB == yes && $BUILD_OPT == image ]]; then2010# scp ${destimg}/*.img ${PC_NAME}@${PC_IP}:${PC_DIR}2011# fi2012#}201320142015