CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
orangepi-xunlong

Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place. Commercial Alternative to JupyterHub.

GitHub Repository: orangepi-xunlong/orangepi-build
Path: blob/next/scripts/general.sh
Views: 3960
1
#!/bin/bash
2
#
3
# Copyright (c) 2015 Igor Pecovnik, igor.pecovnik@gma**.com
4
#
5
# This file is licensed under the terms of the GNU General Public
6
# License version 2. This program is licensed "as is" without any
7
# warranty of any kind, whether express or implied.
8
9
10
# Functions:
11
# cleaning
12
# exit_with_error
13
# get_package_list_hash
14
# create_sources_list
15
# clean_up_git
16
# waiter_local_git
17
# fetch_from_repo
18
# improved_git
19
# display_alert
20
# fingerprint_image
21
# distro_menu
22
# addtorepo
23
# repo-remove-old-packages
24
# wait_for_package_manager
25
# install_pkg_deb
26
# prepare_host_basic
27
# prepare_host
28
# webseed
29
# download_and_verify
30
# show_developer_warning
31
# show_checklist_variables
32
33
34
# cleaning <target>
35
#
36
# target: what to clean
37
# "make" - "make clean" for selected kernel and u-boot
38
# "debs" - delete output/debs for board&branch
39
# "ubootdebs" - delete output/debs for uboot&board&branch
40
# "alldebs" - delete output/debs
41
# "cache" - delete output/cache
42
# "oldcache" - remove old output/cache
43
# "images" - delete output/images
44
# "sources" - delete output/sources
45
#
46
47
cleaning()
48
{
49
case $1 in
50
debs) # delete ${DEB_STORAGE} for current branch and family
51
if [[ -d "${DEB_STORAGE}" ]]; then
52
display_alert "Cleaning ${DEB_STORAGE} for" "$BOARD $BRANCH" "info"
53
# easier than dealing with variable expansion and escaping dashes in file names
54
find "${DEB_STORAGE}" -name "${CHOSEN_UBOOT}_*.deb" -delete
55
find "${DEB_STORAGE}" \( -name "${CHOSEN_KERNEL}_*.deb" -o \
56
-name "orangepi-*.deb" -o \
57
-name "plymouth-theme-orangepi_*.deb" -o \
58
-name "${CHOSEN_KERNEL/image/dtb}_*.deb" -o \
59
-name "${CHOSEN_KERNEL/image/headers}_*.deb" -o \
60
-name "${CHOSEN_KERNEL/image/source}_*.deb" -o \
61
-name "${CHOSEN_KERNEL/image/firmware-image}_*.deb" \) -delete
62
[[ -n $RELEASE ]] && rm -f "${DEB_STORAGE}/${RELEASE}/${CHOSEN_ROOTFS}"_*.deb
63
[[ -n $RELEASE ]] && rm -f "${DEB_STORAGE}/${RELEASE}/orangepi-desktop-${RELEASE}"_*.deb
64
fi
65
;;
66
67
ubootdebs) # delete ${DEB_STORAGE} for uboot, current branch and family
68
if [[ -d "${DEB_STORAGE}" ]]; then
69
display_alert "Cleaning ${DEB_STORAGE} for u-boot" "$BOARD $BRANCH" "info"
70
# easier than dealing with variable expansion and escaping dashes in file names
71
find "${DEB_STORAGE}" -name "${CHOSEN_UBOOT}_*.deb" -delete
72
fi
73
;;
74
75
extras) # delete ${DEB_STORAGE}/extra/$RELEASE for all architectures
76
if [[ -n $RELEASE && -d ${DEB_STORAGE}/extra/$RELEASE ]]; then
77
display_alert "Cleaning ${DEB_STORAGE}/extra for" "$RELEASE" "info"
78
rm -rf "${DEB_STORAGE}/extra/${RELEASE}"
79
fi
80
;;
81
82
alldebs) # delete output/debs
83
[[ -d "${DEB_STORAGE}" ]] && display_alert "Cleaning" "${DEB_STORAGE}" "info" && rm -rf "${DEB_STORAGE}"/*
84
;;
85
86
cache) # delete output/cache
87
[[ -d $EXTER/cache/rootfs ]] && display_alert "Cleaning" "rootfs cache (all)" "info" && find $EXTER/cache/rootfs -type f -delete
88
;;
89
90
images) # delete output/images
91
[[ -d "${DEST}"/images ]] && display_alert "Cleaning" "output/images" "info" && rm -rf "${DEST}"/images/*
92
;;
93
94
sources) # delete output/sources and output/buildpkg
95
[[ -d $EXTER/cache/sources ]] && display_alert "Cleaning" "sources" "info" && rm -rf $EXTER/cache/sources/* "${DEST}"/buildpkg/*
96
;;
97
98
oldcache) # remove old `cache/rootfs` except for the newest 8 files
99
if [[ -d $EXTER/cache/rootfs && $(ls -1 $EXTER/cache/rootfs/*.lz4 2> /dev/null | wc -l) -gt "${ROOTFS_CACHE_MAX}" ]]; then
100
display_alert "Cleaning" "rootfs cache (old)" "info"
101
(cd $EXTER/cache/rootfs; ls -t *.lz4 | sed -e "1,${ROOTFS_CACHE_MAX}d" | xargs -d '\n' rm -f)
102
# Remove signatures if they are present. We use them for internal purpose
103
(cd $EXTER/cache/rootfs; ls -t *.asc | sed -e "1,${ROOTFS_CACHE_MAX}d" | xargs -d '\n' rm -f)
104
fi
105
;;
106
esac
107
}
108
109
# exit_with_error <message> <highlight>
110
#
111
# a way to terminate build process
112
# with verbose error message
113
#
114
115
exit_with_error()
116
{
117
local _file
118
local _line=${BASH_LINENO[0]}
119
local _function=${FUNCNAME[1]}
120
local _description=$1
121
local _highlight=$2
122
_file=$(basename "${BASH_SOURCE[1]}")
123
local stacktrace="$(get_extension_hook_stracktrace "${BASH_SOURCE[*]}" "${BASH_LINENO[*]}")"
124
125
display_alert "ERROR in function $_function" "$stacktrace" "err"
126
display_alert "$_description" "$_highlight" "err"
127
display_alert "Process terminated" "" "info"
128
129
if [[ "${ERROR_DEBUG_SHELL}" == "yes" ]]; then
130
display_alert "MOUNT" "${MOUNT}" "err"
131
display_alert "SDCARD" "${SDCARD}" "err"
132
display_alert "Here's a shell." "debug it" "err"
133
bash < /dev/tty || true
134
fi
135
136
# TODO: execute run_after_build here?
137
overlayfs_wrapper "cleanup"
138
# unlock loop device access in case of starvation
139
exec {FD}>/var/lock/orangepi-debootstrap-losetup
140
flock -u "${FD}"
141
142
exit 255
143
}
144
145
# get_package_list_hash
146
#
147
# returns md5 hash for current package list and rootfs cache version
148
149
get_package_list_hash()
150
{
151
local package_arr exclude_arr
152
local list_content
153
read -ra package_arr <<< "${DEBOOTSTRAP_LIST} ${PACKAGE_LIST}"
154
read -ra exclude_arr <<< "${PACKAGE_LIST_EXCLUDE}"
155
( ( printf "%s\n" "${package_arr[@]}"; printf -- "-%s\n" "${exclude_arr[@]}" ) | sort -u; echo "${1}" ) \
156
| md5sum | cut -d' ' -f 1
157
}
158
159
# create_sources_list <release> <basedir>
160
#
161
# <release>: buster|bullseye|bookworm|bionic|focal|jammy|noble|hirsute|sid
162
# <basedir>: path to root directory
163
#
164
create_sources_list()
165
{
166
local release=$1
167
local basedir=$2
168
[[ -z $basedir ]] && exit_with_error "No basedir passed to create_sources_list"
169
170
case $release in
171
stretch|buster)
172
cat <<-EOF > "${basedir}"/etc/apt/sources.list
173
deb http://${DEBIAN_MIRROR} $release main contrib non-free
174
#deb-src http://${DEBIAN_MIRROR} $release main contrib non-free
175
176
deb http://${DEBIAN_MIRROR} ${release}-updates main contrib non-free
177
#deb-src http://${DEBIAN_MIRROR} ${release}-updates main contrib non-free
178
179
deb http://${DEBIAN_MIRROR} ${release}-backports main contrib non-free
180
#deb-src http://${DEBIAN_MIRROR} ${release}-backports main contrib non-free
181
182
deb http://${DEBIAN_SECURTY} ${release}/updates main contrib non-free
183
#deb-src http://${DEBIAN_SECURTY} ${release}/updates main contrib non-free
184
EOF
185
;;
186
187
bullseye)
188
cat <<-EOF > "${basedir}"/etc/apt/sources.list
189
deb https://${DEBIAN_MIRROR} $release main contrib non-free
190
#deb-src https://${DEBIAN_MIRROR} $release main contrib non-free
191
192
deb https://${DEBIAN_MIRROR} ${release}-updates main contrib non-free
193
#deb-src https://${DEBIAN_MIRROR} ${release}-updates main contrib non-free
194
195
deb https://${DEBIAN_MIRROR} ${release}-backports main contrib non-free
196
#deb-src https://${DEBIAN_MIRROR} ${release}-backports main contrib non-free
197
198
deb https://${DEBIAN_SECURTY} ${release}-security main contrib non-free
199
#deb-src https://${DEBIAN_SECURTY} ${release}-security main contrib non-free
200
EOF
201
;;
202
203
bookworm)
204
cat <<- EOF > "${basedir}"/etc/apt/sources.list
205
deb http://${DEBIAN_MIRROR} $release main contrib non-free non-free-firmware
206
#deb-src http://${DEBIAN_MIRROR} $release main contrib non-free non-free-firmware
207
208
deb http://${DEBIAN_MIRROR} ${release}-updates main contrib non-free non-free-firmware
209
#deb-src http://${DEBIAN_MIRROR} ${release}-updates main contrib non-free non-free-firmware
210
211
deb http://${DEBIAN_MIRROR} ${release}-backports main contrib non-free non-free-firmware
212
#deb-src http://${DEBIAN_MIRROR} ${release}-backports main contrib non-free non-free-firmware
213
214
deb http://${DEBIAN_SECURTY} ${release}-security main contrib non-free non-free-firmware
215
#deb-src http://${DEBIAN_SECURTY} ${release}-security main contrib non-free non-free-firmware
216
EOF
217
;;
218
219
sid) # sid is permanent unstable development and has no such thing as updates or security
220
cat <<- EOF > "${basedir}"/etc/apt/sources.list
221
deb https://snapshot.debian.org/archive/debian-ports/20221225T084846Z unstable main
222
#deb http://${DEBIAN_MIRROR} $release main contrib non-free non-free-firmware
223
#deb-src http://${DEBIAN_MIRROR} $release main contrib non-free non-free-firmware
224
225
#deb http://${DEBIAN_MIRROR} unstable main contrib non-free non-free-firmware
226
#deb-src http://${DEBIAN_MIRROR} unstable main contrib non-free non-free-firmware
227
EOF
228
;;
229
230
xenial|bionic|focal|hirsute|impish|jammy|noble)
231
cat <<-EOF > "${basedir}"/etc/apt/sources.list
232
deb http://${UBUNTU_MIRROR} $release main restricted universe multiverse
233
#deb-src http://${UBUNTU_MIRROR} $release main restricted universe multiverse
234
235
deb http://${UBUNTU_MIRROR} ${release}-security main restricted universe multiverse
236
#deb-src http://${UBUNTU_MIRROR} ${release}-security main restricted universe multiverse
237
238
deb http://${UBUNTU_MIRROR} ${release}-updates main restricted universe multiverse
239
#deb-src http://${UBUNTU_MIRROR} ${release}-updates main restricted universe multiverse
240
241
deb http://${UBUNTU_MIRROR} ${release}-backports main restricted universe multiverse
242
#deb-src http://${UBUNTU_MIRROR} ${release}-backports main restricted universe multiverse
243
EOF
244
;;
245
246
raspi)
247
cat <<-EOF > "${basedir}"/etc/apt/sources.list
248
deb http://${DEBIAN_MIRROR} bullseye main contrib non-free
249
#deb-src http://${DEBIAN_MIRROR} bullseye main contrib non-free
250
251
deb http://${DEBIAN_MIRROR} bullseye-updates main contrib non-free
252
#deb-src http://${DEBIAN_MIRROR} bullseye-updates main contrib non-free
253
254
deb http://${DEBIAN_MIRROR} bullseye-backports main contrib non-free
255
#deb-src http://${DEBIAN_MIRROR} bullseye-backports main contrib non-free
256
257
deb http://${DEBIAN_SECURTY} bullseye-security main contrib non-free
258
#deb-src http://${DEBIAN_SECURTY} bullseye-security main contrib non-free
259
EOF
260
261
cat <<-EOF > "${basedir}"/etc/apt/sources.list.d/raspi.list
262
deb http://${RASPI_MIRROR} bullseye main
263
# Uncomment line below then 'apt-get update' to enable 'apt-get source'
264
#deb-src http://archive.raspberrypi.org/debian/ bullseye main
265
EOF
266
267
if [ -n "$APT_PROXY" ]; then
268
install -m 644 files/51cache "${APT_PROXY}/etc/apt/apt.conf.d/51cache"
269
sed "${basedir}/etc/apt/apt.conf.d/51cache" -i -e "s|APT_PROXY|${APT_PROXY}|"
270
else
271
rm -f "${basedir}/etc/apt/apt.conf.d/51cache"
272
fi
273
274
cat ${EXTER}/packages/raspi/stage0/00-configure-apt/files/raspberrypi.gpg.key | gpg --dearmor > "${basedir}/raspberrypi-archive-stable.gpg"
275
install -m 644 "${basedir}/raspberrypi-archive-stable.gpg" "${basedir}/etc/apt/trusted.gpg.d/"
276
;;
277
esac
278
279
# stage: add armbian repository and install key
280
#if [[ $DOWNLOAD_MIRROR == "china" ]]; then
281
# echo "deb https://mirrors.tuna.tsinghua.edu.cn/armbian $RELEASE main ${RELEASE}-utils ${RELEASE}-desktop" > "${SDCARD}"/etc/apt/sources.list.d/armbian.list
282
#elif [[ $DOWNLOAD_MIRROR == "bfsu" ]]; then
283
# echo "deb http://mirrors.bfsu.edu.cn/armbian $RELEASE main ${RELEASE}-utils ${RELEASE}-desktop" > "${SDCARD}"/etc/apt/sources.list.d/armbian.list
284
#else
285
# echo "deb http://"$([[ $BETA == yes ]] && echo "beta" || echo "apt" )".armbian.com $RELEASE main ${RELEASE}-utils ${RELEASE}-desktop" > "${SDCARD}"/etc/apt/sources.list.d/armbian.list
286
#fi
287
288
# replace local package server if defined. Suitable for development
289
#[[ -n $LOCAL_MIRROR ]] && echo "deb http://$LOCAL_MIRROR $RELEASE main ${RELEASE}-utils ${RELEASE}-desktop" > "${SDCARD}"/etc/apt/sources.list.d/armbian.list
290
291
#display_alert "Adding Armbian repository and authentication key" "/etc/apt/sources.list.d/armbian.list" "info"
292
#cp "${EXTER}"/config/armbian.key "${SDCARD}"
293
#chroot "${SDCARD}" /bin/bash -c "cat armbian.key | apt-key add - > /dev/null 2>&1"
294
#rm "${SDCARD}"/armbian.key
295
}
296
297
298
#
299
# This function retries Git operations to avoid failure in case remote is borked
300
# If the git team needs to call a remote server, use this function.
301
#
302
improved_git()
303
{
304
305
local realgit=$(command -v git)
306
local retries=3
307
local delay=10
308
local count=1
309
while [ $count -lt $retries ]; do
310
$realgit "$@"
311
if [[ $? -eq 0 || -f .git/index.lock ]]; then
312
retries=0
313
break
314
fi
315
let count=$count+1
316
sleep $delay
317
done
318
319
}
320
321
clean_up_git ()
322
{
323
local target_dir=$1
324
325
# Files that are not tracked by git and were added
326
# when the patch was applied must be removed.
327
git -C $target_dir clean -qdf
328
329
# Return the files that are tracked by git to the initial state.
330
git -C $target_dir checkout -qf HEAD
331
}
332
333
# used : waiter_local_git arg1='value' arg2:'value'
334
# waiter_local_git \
335
# url='https://github.com/megous/linux' \
336
# name='megous' \
337
# dir='linux-mainline/5.14' \
338
# branch='orange-pi-5.14' \
339
# obj=<tag|commit> or tag:$tag ...
340
# An optional parameter for switching to a git object such as a tag, commit,
341
# or a specific branch. The object must exist in the local repository.
342
# This optional parameter takes precedence. If it is specified, then
343
# the commit state corresponding to the specified git object will be extracted
344
# to the working directory. Otherwise, the commit corresponding to the top of
345
# the branch will be extracted.
346
# The settings for the kernel variables of the original kernel
347
# VAR_SHALLOW_ORIGINAL=var_origin_kernel must be in the main script
348
# before calling the function
349
waiter_local_git ()
350
{
351
for arg in $@;do
352
353
case $arg in
354
url=*|https://*|git://*) eval "local url=${arg/url=/}"
355
;;
356
dir=*|/*/*/*) eval "local dir=${arg/dir=/}"
357
;;
358
*=*|*:*) eval "local ${arg/:/=}"
359
;;
360
esac
361
362
done
363
364
# Required variables cannot be empty.
365
for var in url name dir branch; do
366
[ "${var#*=}" == "" ] && exit_with_error "Error in configuration"
367
done
368
369
local reachability
370
371
# The 'offline' variable must always be set to 'true' or 'false'
372
if [ "$OFFLINE_WORK" == "yes" ]; then
373
local offline=true
374
else
375
local offline=false
376
fi
377
378
local work_dir="$(realpath ${EXTER}/cache/sources)/$dir"
379
mkdir -p $work_dir
380
cd $work_dir || exit_with_error
381
382
display_alert "Checking git sources" "$dir $url$name/$branch" "info"
383
384
if [ "$(git rev-parse --git-dir 2>/dev/null)" != ".git" ]; then
385
git init -q .
386
387
# Run in the sub shell to avoid mixing environment variables.
388
if [ -n "$VAR_SHALLOW_ORIGINAL" ]; then
389
(
390
$VAR_SHALLOW_ORIGINAL
391
392
display_alert "Add original git sources" "$dir $name/$branch" "info"
393
if [ "$(improved_git ls-remote -h $url $branch | \
394
awk -F'/' '{if (NR == 1) print $NF}')" != "$branch" ];then
395
display_alert "Bad $branch for $url in $VAR_SHALLOW_ORIGINAL"
396
exit 177
397
fi
398
399
git remote add -t $branch $name $url
400
401
# Handle an exception if the initial tag is the top of the branch
402
# As v5.16 == HEAD
403
if [ "${start_tag}.1" == "$(improved_git ls-remote -t $url ${start_tag}.1 | \
404
awk -F'/' '{ print $NF }')" ]
405
then
406
improved_git fetch --shallow-exclude=$start_tag $name
407
else
408
improved_git fetch --depth 1 $name
409
fi
410
improved_git fetch --deepen=1 $name
411
# For a shallow clone, this works quickly and saves space.
412
git gc
413
)
414
415
[ "$?" == "177" ] && exit
416
fi
417
fi
418
419
files_for_clean="$(git status -s | wc -l)"
420
if [ "$files_for_clean" != "0" ];then
421
display_alert " Cleaning .... " "$files_for_clean files"
422
clean_up_git $work_dir
423
fi
424
425
if [ "$name" != "$(git remote show | grep $name)" ];then
426
git remote add -t $branch $name $url
427
fi
428
429
if ! $offline; then
430
for t_name in $(git remote show);do
431
improved_git fetch $t_name
432
done
433
fi
434
435
# When switching, we use the concept of only "detached branch". Therefore,
436
# we extract the hash from the tag, the branch name, or from the hash itself.
437
# This serves as a check of the reachability of the extraction.
438
# We do not use variables that characterize the current state of the git,
439
# such as `HEAD` and `FETCH_HEAD`.
440
reachability=false
441
for var in obj tag commit branch;do
442
eval pval=\$$var
443
444
if [ -n "$pval" ] && [ "$pval" != *HEAD ]; then
445
case $var in
446
obj|tag|commit) obj=$pval ;;
447
branch) obj=${name}/$branch ;;
448
esac
449
450
if t_hash=$(git rev-parse $obj 2>/dev/null);then
451
reachability=true
452
break
453
else
454
display_alert "Variable $var=$obj unreachable for extraction"
455
fi
456
fi
457
done
458
459
if $reachability && [ "$t_hash" != "$(git rev-parse @ 2>/dev/null)" ];then
460
# Switch "detached branch" as hash
461
display_alert "Switch $obj = $t_hash"
462
git checkout -qf $t_hash
463
else
464
# the working directory corresponds to the target commit,
465
# nothing needs to be done
466
display_alert "Up to date"
467
fi
468
}
469
470
# fetch_from_repo <url> <directory> <ref> <ref_subdir>
471
# <url>: remote repository URL
472
# <directory>: local directory; subdir for branch/tag will be created
473
# <ref>:
474
# branch:name
475
# tag:name
476
# head(*)
477
# commit:hash
478
#
479
# *: Implies ref_subdir=no
480
#
481
# <ref_subdir>: "yes" to create subdirectory for tag or branch name
482
#
483
fetch_from_repo()
484
{
485
local url=$1
486
local dir=$2
487
local ref=$3
488
local ref_subdir=$4
489
490
# Set GitHub mirror before anything else touches $url
491
url=${url//'https://github.com/'/$GITHUB_SOURCE'/'}
492
493
# The 'offline' variable must always be set to 'true' or 'false'
494
if [ "$OFFLINE_WORK" == "yes" ]; then
495
local offline=true
496
else
497
local offline=false
498
fi
499
500
[[ -z $ref || ( $ref != tag:* && $ref != branch:* && $ref != head && $ref != commit:* ) ]] && exit_with_error "Error in configuration"
501
local ref_type=${ref%%:*}
502
if [[ $ref_type == head ]]; then
503
local ref_name=HEAD
504
else
505
local ref_name=${ref##*:}
506
fi
507
508
display_alert "Checking git sources" "$dir $ref_name" "info"
509
510
# get default remote branch name without cloning
511
# local ref_name=$(git ls-remote --symref $url HEAD | grep -o 'refs/heads/\S*' | sed 's%refs/heads/%%')
512
# for git:// protocol comparing hashes of "git ls-remote -h $url" and "git ls-remote --symref $url HEAD" is needed
513
514
if [[ $ref_subdir == yes ]]; then
515
local workdir=$dir/$ref_name
516
else
517
local workdir=$dir
518
fi
519
520
mkdir -p "${workdir}" 2>/dev/null || \
521
exit_with_error "No path or no write permission" "${workdir}"
522
523
cd "${workdir}" || exit
524
525
# check if existing remote URL for the repo or branch does not match current one
526
# may not be supported by older git versions
527
# Check the folder as a git repository.
528
# Then the target URL matches the local URL.
529
530
if [[ "$(git rev-parse --git-dir 2>/dev/null)" == ".git" && \
531
"$url" != *"$(git remote get-url origin | sed 's/^.*@//' | sed 's/^.*\/\///' 2>/dev/null)" ]]; then
532
display_alert "Remote URL does not match, removing existing local copy"
533
rm -rf .git ./*
534
fi
535
536
if [[ "$(git rev-parse --git-dir 2>/dev/null)" != ".git" ]]; then
537
display_alert "Creating local copy"
538
git init -q .
539
git remote add origin "${url}"
540
# Here you need to upload from a new address
541
offline=false
542
fi
543
544
local changed=false
545
546
# when we work offline we simply return the sources to their original state
547
if ! $offline; then
548
local local_hash
549
local_hash=$(git rev-parse @ 2>/dev/null)
550
551
case $ref_type in
552
branch)
553
# TODO: grep refs/heads/$name
554
local remote_hash
555
remote_hash=$(improved_git ls-remote -h "${url}" "$ref_name" | head -1 | cut -f1)
556
[[ -z $local_hash || "${local_hash}" != "${remote_hash}" ]] && changed=true
557
;;
558
559
tag)
560
local remote_hash
561
remote_hash=$(improved_git ls-remote -t "${url}" "$ref_name" | cut -f1)
562
if [[ -z $local_hash || "${local_hash}" != "${remote_hash}" ]]; then
563
remote_hash=$(improved_git ls-remote -t "${url}" "$ref_name^{}" | cut -f1)
564
[[ -z $remote_hash || "${local_hash}" != "${remote_hash}" ]] && changed=true
565
fi
566
;;
567
568
head)
569
local remote_hash
570
remote_hash=$(improved_git ls-remote "${url}" HEAD | cut -f1)
571
[[ -z $local_hash || "${local_hash}" != "${remote_hash}" ]] && changed=true
572
;;
573
574
commit)
575
[[ -z $local_hash || $local_hash == "@" ]] && changed=true
576
;;
577
esac
578
579
fi # offline
580
581
if [[ $changed == true ]]; then
582
583
# remote was updated, fetch and check out updates
584
display_alert "Fetching updates"
585
case $ref_type in
586
branch) improved_git fetch --depth 200 origin "${ref_name}" ;;
587
tag) improved_git fetch --depth 200 origin tags/"${ref_name}" ;;
588
head) improved_git fetch --depth 200 origin HEAD ;;
589
esac
590
591
# commit type needs support for older git servers that doesn't support fetching id directly
592
if [[ $ref_type == commit ]]; then
593
594
improved_git fetch --depth 200 origin "${ref_name}"
595
596
# cover old type
597
if [[ $? -ne 0 ]]; then
598
599
display_alert "Commit checkout not supported on this repository. Doing full clone." "" "wrn"
600
improved_git pull
601
git checkout -fq "${ref_name}"
602
display_alert "Checkout out to" "$(git --no-pager log -2 --pretty=format:"$ad%s [%an]" | head -1)" "info"
603
604
else
605
606
display_alert "Checking out"
607
git checkout -f -q FETCH_HEAD
608
git clean -qdf
609
610
fi
611
else
612
613
display_alert "Checking out"
614
git checkout -f -q FETCH_HEAD
615
git clean -qdf
616
617
fi
618
elif [[ -n $(git status -uno --porcelain --ignore-submodules=all) ]]; then
619
# working directory is not clean
620
display_alert " Cleaning .... " "$(git status -s | wc -l) files"
621
622
# Return the files that are tracked by git to the initial state.
623
git checkout -f -q HEAD
624
625
# Files that are not tracked by git and were added
626
# when the patch was applied must be removed.
627
git clean -qdf
628
else
629
# working directory is clean, nothing to do
630
display_alert "Up to date"
631
fi
632
633
if [[ -f .gitmodules ]]; then
634
display_alert "Updating submodules" "" "ext"
635
# FML: http://stackoverflow.com/a/17692710
636
for i in $(git config -f .gitmodules --get-regexp path | awk '{ print $2 }'); do
637
cd "${workdir}" || exit
638
local surl sref
639
surl=$(git config -f .gitmodules --get "submodule.$i.url")
640
sref=$(git config -f .gitmodules --get "submodule.$i.branch")
641
if [[ -n $sref ]]; then
642
sref="branch:$sref"
643
else
644
sref="head"
645
fi
646
fetch_from_repo "$surl" "$workdir/$i" "$sref"
647
done
648
fi
649
} #############################################################################
650
651
#--------------------------------------------------------------------------------------------------------------------------------
652
# Let's have unique way of displaying alerts
653
#--------------------------------------------------------------------------------------------------------------------------------
654
display_alert()
655
{
656
# log function parameters to install.log
657
[[ -n "${DEST}" ]] && echo "Displaying message: $@" >> "${DEST}"/${LOG_SUBPATH}/output.log
658
659
local tmp=""
660
[[ -n $2 ]] && tmp="[\e[0;33m $2 \x1B[0m]"
661
662
case $3 in
663
err)
664
echo -e "[\e[0;31m error \x1B[0m] $1 $tmp"
665
;;
666
667
wrn)
668
echo -e "[\e[0;35m warn \x1B[0m] $1 $tmp"
669
;;
670
671
ext)
672
echo -e "[\e[0;32m o.k. \x1B[0m] \e[1;32m$1\x1B[0m $tmp"
673
;;
674
675
info)
676
echo -e "[\e[0;32m o.k. \x1B[0m] $1 $tmp"
677
;;
678
679
*)
680
echo -e "[\e[0;32m .... \x1B[0m] $1 $tmp"
681
;;
682
esac
683
}
684
685
#--------------------------------------------------------------------------------------------------------------------------------
686
# fingerprint_image <out_txt_file> [image_filename]
687
# Saving build summary to the image
688
#--------------------------------------------------------------------------------------------------------------------------------
689
fingerprint_image()
690
{
691
cat <<-EOF > "${1}"
692
--------------------------------------------------------------------------------
693
Title: ${VENDOR} $REVISION ${BOARD^} $DISTRIBUTION $RELEASE $BRANCH
694
Kernel: Linux $VER
695
Build date: $(date +'%d.%m.%Y')
696
Maintainer: $MAINTAINER <$MAINTAINERMAIL>
697
Sources: https://github.com/orangepi-xunlong/orangepi-build
698
Support: http://www.orangepi.org/
699
EOF
700
701
if [ -n "$2" ]; then
702
cat <<-EOF >> "${1}"
703
--------------------------------------------------------------------------------
704
Partitioning configuration: $IMAGE_PARTITION_TABLE offset: $OFFSET
705
Boot partition type: ${BOOTFS_TYPE:-(none)} ${BOOTSIZE:+"(${BOOTSIZE} MB)"}
706
Root partition type: $ROOTFS_TYPE ${FIXED_IMAGE_SIZE:+"(${FIXED_IMAGE_SIZE} MB)"}
707
708
CPU configuration: $CPUMIN - $CPUMAX with $GOVERNOR
709
--------------------------------------------------------------------------------
710
Verify GPG signature:
711
gpg --verify $2.img.asc
712
713
Verify image file integrity:
714
sha256sum --check $2.img.sha
715
716
Prepare SD card (four methodes):
717
zcat $2.img.gz | pv | dd of=/dev/sdX bs=1M
718
dd if=$2.img of=/dev/sdX bs=1M
719
balena-etcher $2.img.gz -d /dev/sdX
720
balena-etcher $2.img -d /dev/sdX
721
EOF
722
fi
723
724
cat <<-EOF >> "${1}"
725
--------------------------------------------------------------------------------
726
$(cat "${SRC}"/LICENSE)
727
--------------------------------------------------------------------------------
728
EOF
729
}
730
731
732
#--------------------------------------------------------------------------------------------------------------------------------
733
# Create kernel boot logo from packages/blobs/splash/logo.png and packages/blobs/splash/spinner.gif (animated)
734
# and place to the file /lib/firmware/bootsplash
735
#--------------------------------------------------------------------------------------------------------------------------------
736
function boot_logo ()
737
{
738
display_alert "Building kernel splash logo" "$RELEASE" "info"
739
740
LOGO=${EXTER}/packages/blobs/splash/logo.png
741
LOGO_WIDTH=$(identify $LOGO | cut -d " " -f 3 | cut -d x -f 1)
742
LOGO_HEIGHT=$(identify $LOGO | cut -d " " -f 3 | cut -d x -f 2)
743
THROBBER=${EXTER}/packages/blobs/splash/spinner.gif
744
THROBBER_WIDTH=$(identify $THROBBER | head -1 | cut -d " " -f 3 | cut -d x -f 1)
745
THROBBER_HEIGHT=$(identify $THROBBER | head -1 | cut -d " " -f 3 | cut -d x -f 2)
746
convert -alpha remove -background "#000000" $LOGO "${SDCARD}"/tmp/logo.rgb
747
convert -alpha remove -background "#000000" $THROBBER "${SDCARD}"/tmp/throbber%02d.rgb
748
${EXTER}/packages/blobs/splash/bootsplash-packer \
749
--bg_red 0x00 \
750
--bg_green 0x00 \
751
--bg_blue 0x00 \
752
--frame_ms 48 \
753
--picture \
754
--pic_width $LOGO_WIDTH \
755
--pic_height $LOGO_HEIGHT \
756
--pic_position 0 \
757
--blob "${SDCARD}"/tmp/logo.rgb \
758
--picture \
759
--pic_width $THROBBER_WIDTH \
760
--pic_height $THROBBER_HEIGHT \
761
--pic_position 0x05 \
762
--pic_position_offset 200 \
763
--pic_anim_type 1 \
764
--pic_anim_loop 0 \
765
--blob "${SDCARD}"/tmp/throbber00.rgb \
766
--blob "${SDCARD}"/tmp/throbber01.rgb \
767
--blob "${SDCARD}"/tmp/throbber02.rgb \
768
--blob "${SDCARD}"/tmp/throbber03.rgb \
769
--blob "${SDCARD}"/tmp/throbber04.rgb \
770
--blob "${SDCARD}"/tmp/throbber05.rgb \
771
--blob "${SDCARD}"/tmp/throbber06.rgb \
772
--blob "${SDCARD}"/tmp/throbber07.rgb \
773
--blob "${SDCARD}"/tmp/throbber08.rgb \
774
--blob "${SDCARD}"/tmp/throbber09.rgb \
775
--blob "${SDCARD}"/tmp/throbber10.rgb \
776
--blob "${SDCARD}"/tmp/throbber11.rgb \
777
--blob "${SDCARD}"/tmp/throbber12.rgb \
778
--blob "${SDCARD}"/tmp/throbber13.rgb \
779
--blob "${SDCARD}"/tmp/throbber14.rgb \
780
--blob "${SDCARD}"/tmp/throbber15.rgb \
781
--blob "${SDCARD}"/tmp/throbber16.rgb \
782
--blob "${SDCARD}"/tmp/throbber17.rgb \
783
--blob "${SDCARD}"/tmp/throbber18.rgb \
784
--blob "${SDCARD}"/tmp/throbber19.rgb \
785
--blob "${SDCARD}"/tmp/throbber20.rgb \
786
--blob "${SDCARD}"/tmp/throbber21.rgb \
787
--blob "${SDCARD}"/tmp/throbber22.rgb \
788
--blob "${SDCARD}"/tmp/throbber23.rgb \
789
--blob "${SDCARD}"/tmp/throbber24.rgb \
790
--blob "${SDCARD}"/tmp/throbber25.rgb \
791
--blob "${SDCARD}"/tmp/throbber26.rgb \
792
--blob "${SDCARD}"/tmp/throbber27.rgb \
793
--blob "${SDCARD}"/tmp/throbber28.rgb \
794
--blob "${SDCARD}"/tmp/throbber29.rgb \
795
--blob "${SDCARD}"/tmp/throbber30.rgb \
796
--blob "${SDCARD}"/tmp/throbber31.rgb \
797
--blob "${SDCARD}"/tmp/throbber32.rgb \
798
--blob "${SDCARD}"/tmp/throbber33.rgb \
799
--blob "${SDCARD}"/tmp/throbber34.rgb \
800
--blob "${SDCARD}"/tmp/throbber35.rgb \
801
--blob "${SDCARD}"/tmp/throbber36.rgb \
802
--blob "${SDCARD}"/tmp/throbber37.rgb \
803
--blob "${SDCARD}"/tmp/throbber38.rgb \
804
--blob "${SDCARD}"/tmp/throbber39.rgb \
805
--blob "${SDCARD}"/tmp/throbber40.rgb \
806
--blob "${SDCARD}"/tmp/throbber41.rgb \
807
--blob "${SDCARD}"/tmp/throbber42.rgb \
808
--blob "${SDCARD}"/tmp/throbber43.rgb \
809
--blob "${SDCARD}"/tmp/throbber44.rgb \
810
--blob "${SDCARD}"/tmp/throbber45.rgb \
811
--blob "${SDCARD}"/tmp/throbber46.rgb \
812
--blob "${SDCARD}"/tmp/throbber47.rgb \
813
--blob "${SDCARD}"/tmp/throbber48.rgb \
814
--blob "${SDCARD}"/tmp/throbber49.rgb \
815
--blob "${SDCARD}"/tmp/throbber50.rgb \
816
--blob "${SDCARD}"/tmp/throbber51.rgb \
817
--blob "${SDCARD}"/tmp/throbber52.rgb \
818
--blob "${SDCARD}"/tmp/throbber53.rgb \
819
--blob "${SDCARD}"/tmp/throbber54.rgb \
820
--blob "${SDCARD}"/tmp/throbber55.rgb \
821
--blob "${SDCARD}"/tmp/throbber56.rgb \
822
--blob "${SDCARD}"/tmp/throbber57.rgb \
823
--blob "${SDCARD}"/tmp/throbber58.rgb \
824
--blob "${SDCARD}"/tmp/throbber59.rgb \
825
--blob "${SDCARD}"/tmp/throbber60.rgb \
826
--blob "${SDCARD}"/tmp/throbber61.rgb \
827
--blob "${SDCARD}"/tmp/throbber62.rgb \
828
--blob "${SDCARD}"/tmp/throbber63.rgb \
829
--blob "${SDCARD}"/tmp/throbber64.rgb \
830
--blob "${SDCARD}"/tmp/throbber65.rgb \
831
--blob "${SDCARD}"/tmp/throbber66.rgb \
832
--blob "${SDCARD}"/tmp/throbber67.rgb \
833
--blob "${SDCARD}"/tmp/throbber68.rgb \
834
--blob "${SDCARD}"/tmp/throbber69.rgb \
835
--blob "${SDCARD}"/tmp/throbber70.rgb \
836
--blob "${SDCARD}"/tmp/throbber71.rgb \
837
--blob "${SDCARD}"/tmp/throbber72.rgb \
838
--blob "${SDCARD}"/tmp/throbber73.rgb \
839
--blob "${SDCARD}"/tmp/throbber74.rgb \
840
"${SDCARD}"/lib/firmware/bootsplash.orangepi >/dev/null 2>&1
841
if [[ $BOOT_LOGO == yes || $BOOT_LOGO == desktop && $BUILD_DESKTOP == yes && $RELEASE != buster ]]; then
842
[[ -f "${SDCARD}"/boot/orangepiEnv.txt ]] && grep -q '^bootlogo' "${SDCARD}"/boot/orangepiEnv.txt && \
843
sed -i 's/^bootlogo.*/bootlogo=true/' "${SDCARD}"/boot/orangepiEnv.txt || echo 'bootlogo=true' >> "${SDCARD}"/boot/orangepiEnv.txt
844
[[ -f "${SDCARD}"/boot/boot.ini ]] && sed -i 's/^setenv bootlogo.*/setenv bootlogo "true"/' "${SDCARD}"/boot/boot.ini
845
fi
846
# enable additional services
847
chroot "${SDCARD}" /bin/bash -c "systemctl --no-reload enable bootsplash-ask-password-console.path >/dev/null 2>&1"
848
chroot "${SDCARD}" /bin/bash -c "systemctl --no-reload enable bootsplash-hide-when-booted.service >/dev/null 2>&1"
849
chroot "${SDCARD}" /bin/bash -c "systemctl --no-reload enable bootsplash-show-on-shutdown.service >/dev/null 2>&1"
850
}
851
852
853
854
DISTRIBUTIONS_DESC_DIR="external/config/distributions"
855
856
function distro_menu ()
857
{
858
# create a select menu for choosing a distribution based EXPERT status
859
860
local distrib_dir="${1}"
861
862
if [[ -d "${distrib_dir}" && -f "${distrib_dir}/support" ]]; then
863
local support_level="$(cat "${distrib_dir}/support")"
864
if [[ "${support_level}" != "supported" && $EXPERT != "yes" ]]; then
865
:
866
else
867
local distro_codename="$(basename "${distrib_dir}")"
868
local distro_fullname="$(cat "${distrib_dir}/name")"
869
local expert_infos=""
870
[[ $EXPERT == "yes" ]] && expert_infos="(${support_level})"
871
872
if [[ "${BRANCH}" == "legacy" ]]; then
873
DISTRIB_TYPE="${DISTRIB_TYPE_LEGACY}"
874
[[ -z "${DISTRIB_TYPE_LEGACY}" ]] && DISTRIB_TYPE="buster bionic focal"
875
elif [[ "${BRANCH}" == "current" ]]; then
876
DISTRIB_TYPE="${DISTRIB_TYPE_CURRENT}"
877
[[ -z "${DISTRIB_TYPE_CURRENT}" ]] && DISTRIB_TYPE="bullseye bookworm focal jammy noble"
878
elif [[ "${BRANCH}" == "next" ]]; then
879
if [[ -n "${DISTRIB_TYPE_NEXT}" ]]; then
880
DISTRIB_TYPE="${DISTRIB_TYPE_NEXT}"
881
else
882
DISTRIB_TYPE="${DISTRIB_TYPE_CURRENT}"
883
[[ -z "${DISTRIB_TYPE_CURRENT}" ]] && DISTRIB_TYPE="bullseye bookworm focal jammy noble"
884
fi
885
fi
886
887
if [[ "${DISTRIB_TYPE}" =~ "${distro_codename}" ]]; then
888
options+=("${distro_codename}" "${distro_fullname} ${expert_infos}")
889
fi
890
fi
891
fi
892
}
893
894
function distros_options() {
895
for distrib_dir in "${DISTRIBUTIONS_DESC_DIR}/"*; do
896
distro_menu "${distrib_dir}"
897
done
898
}
899
900
function set_distribution_status() {
901
902
local distro_support_desc_filepath="${SRC}/${DISTRIBUTIONS_DESC_DIR}/${RELEASE}/support"
903
if [[ ! -f "${distro_support_desc_filepath}" ]]; then
904
exit_with_error "Distribution ${distribution_name} does not exist"
905
else
906
DISTRIBUTION_STATUS="$(cat "${distro_support_desc_filepath}")"
907
fi
908
909
[[ "${DISTRIBUTION_STATUS}" != "supported" ]] && [[ "${EXPERT}" != "yes" ]] && exit_with_error "Orange Pi ${RELEASE} is unsupported and, therefore, only available to experts (EXPERT=yes)"
910
911
}
912
913
adding_packages()
914
{
915
# add deb files to repository if they are not already there
916
917
display_alert "Checking and adding to repository $release" "$3" "ext"
918
for f in "${DEB_STORAGE}${2}"/*.deb
919
do
920
local name version arch
921
name=$(dpkg-deb -I "${f}" | grep Package | awk '{print $2}')
922
version=$(dpkg-deb -I "${f}" | grep Version | awk '{print $2}')
923
arch=$(dpkg-deb -I "${f}" | grep Architecture | awk '{print $2}')
924
# add if not already there
925
aptly repo search -architectures="${arch}" -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${1}" 'Name (% '${name}'), $Version (='${version}'), $Architecture (='${arch}')' &>/dev/null
926
if [[ $? -ne 0 ]]; then
927
display_alert "Adding ${1}" "$name" "info"
928
aptly repo add -force-replace=true -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${1}" "${f}" &>/dev/null
929
fi
930
done
931
932
}
933
934
935
936
937
addtorepo()
938
{
939
# create repository
940
# parameter "remove" dumps all and creates new
941
# parameter "delete" remove incoming directory if publishing is succesful
942
# function: cycle trough distributions
943
944
local distributions=("stretch" "bionic" "buster" "bullseye" "bookworm" "focal" "hirsute" "jammy" "noble" "sid")
945
#local distributions=($(grep -rw config/distributions/*/ -e 'supported' | cut -d"/" -f3))
946
local errors=0
947
948
for release in "${distributions[@]}"; do
949
950
local forceoverwrite=""
951
952
# let's drop from publish if exits
953
if [[ -n $(aptly publish list -config="${SCRIPTPATH}config/${REPO_CONFIG}" -raw | awk '{print $(NF)}' | grep "${release}") ]]; then
954
aptly publish drop -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${release}" > /dev/null 2>&1
955
fi
956
957
# create local repository if not exist
958
if [[ -z $(aptly repo list -config="${SCRIPTPATH}config/${REPO_CONFIG}" -raw | awk '{print $(NF)}' | grep "${release}") ]]; then
959
display_alert "Creating section" "main" "info"
960
aptly repo create -config="${SCRIPTPATH}config/${REPO_CONFIG}" -distribution="${release}" -component="main" \
961
-comment="Armbian main repository" "${release}" >/dev/null
962
fi
963
964
if [[ -z $(aptly repo list -config="${SCRIPTPATH}config/${REPO_CONFIG}" -raw | awk '{print $(NF)}' | grep "^utils") ]]; then
965
aptly repo create -config="${SCRIPTPATH}config/${REPO_CONFIG}" -distribution="${release}" -component="utils" \
966
-comment="Armbian utilities (backwards compatibility)" utils >/dev/null
967
fi
968
if [[ -z $(aptly repo list -config="${SCRIPTPATH}config/${REPO_CONFIG}" -raw | awk '{print $(NF)}' | grep "${release}-utils") ]]; then
969
aptly repo create -config="${SCRIPTPATH}config/${REPO_CONFIG}" -distribution="${release}" -component="${release}-utils" \
970
-comment="Armbian ${release} utilities" "${release}-utils" >/dev/null
971
fi
972
if [[ -z $(aptly repo list -config="${SCRIPTPATH}config/${REPO_CONFIG}" -raw | awk '{print $(NF)}' | grep "${release}-desktop") ]]; then
973
aptly repo create -config="${SCRIPTPATH}config/${REPO_CONFIG}" -distribution="${release}" -component="${release}-desktop" \
974
-comment="Armbian ${release} desktop" "${release}-desktop" >/dev/null
975
fi
976
977
978
# adding main
979
if find "${DEB_STORAGE}"/ -maxdepth 1 -type f -name "*.deb" 2>/dev/null | grep -q .; then
980
adding_packages "$release" "" "main"
981
else
982
aptly repo add -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${release}" "${SCRIPTPATH}config/templates/example.deb" >/dev/null
983
fi
984
985
local COMPONENTS="main"
986
987
# adding main distribution packages
988
if find "${DEB_STORAGE}/${release}" -maxdepth 1 -type f -name "*.deb" 2>/dev/null | grep -q .; then
989
adding_packages "${release}-utils" "/${release}" "release packages"
990
else
991
# workaround - add dummy package to not trigger error
992
aptly repo add -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${release}" "${SCRIPTPATH}config/templates/example.deb" >/dev/null
993
fi
994
995
# adding release-specific utils
996
if find "${DEB_STORAGE}/extra/${release}-utils" -maxdepth 1 -type f -name "*.deb" 2>/dev/null | grep -q .; then
997
adding_packages "${release}-utils" "/extra/${release}-utils" "release utils"
998
else
999
aptly repo add -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${release}-utils" "${SCRIPTPATH}config/templates/example.deb" >/dev/null
1000
fi
1001
COMPONENTS="${COMPONENTS} ${release}-utils"
1002
1003
# adding desktop
1004
if find "${DEB_STORAGE}/extra/${release}-desktop" -maxdepth 1 -type f -name "*.deb" 2>/dev/null | grep -q .; then
1005
adding_packages "${release}-desktop" "/extra/${release}-desktop" "desktop"
1006
else
1007
# workaround - add dummy package to not trigger error
1008
aptly repo add -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${release}-desktop" "${SCRIPTPATH}config/templates/example.deb" >/dev/null
1009
fi
1010
COMPONENTS="${COMPONENTS} ${release}-desktop"
1011
1012
local mainnum utilnum desknum
1013
mainnum=$(aptly repo show -with-packages -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${release}" | grep "Number of packages" | awk '{print $NF}')
1014
utilnum=$(aptly repo show -with-packages -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${release}-desktop" | grep "Number of packages" | awk '{print $NF}')
1015
desknum=$(aptly repo show -with-packages -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${release}-utils" | grep "Number of packages" | awk '{print $NF}')
1016
1017
if [ $mainnum -gt 0 ] && [ $utilnum -gt 0 ] && [ $desknum -gt 0 ]; then
1018
1019
# publish
1020
aptly publish \
1021
-acquire-by-hash \
1022
-passphrase="${GPG_PASS}" \
1023
-origin="Armbian" \
1024
-label="Armbian" \
1025
-config="${SCRIPTPATH}config/${REPO_CONFIG}" \
1026
-component="${COMPONENTS// /,}" \
1027
-distribution="${release}" repo "${release}" ${COMPONENTS//main/} >/dev/null
1028
1029
if [[ $? -ne 0 ]]; then
1030
display_alert "Publishing failed" "${release}" "err"
1031
errors=$((errors+1))
1032
exit 0
1033
fi
1034
else
1035
errors=$((errors+1))
1036
local err_txt=": All components must be present: main, utils and desktop for first build"
1037
fi
1038
1039
done
1040
1041
# cleanup
1042
display_alert "Cleaning repository" "${DEB_STORAGE}" "info"
1043
aptly db cleanup -config="${SCRIPTPATH}config/${REPO_CONFIG}"
1044
1045
# display what we have
1046
echo ""
1047
display_alert "List of local repos" "local" "info"
1048
(aptly repo list -config="${SCRIPTPATH}config/${REPO_CONFIG}") | grep -E packages
1049
1050
# remove debs if no errors found
1051
if [[ $errors -eq 0 ]]; then
1052
if [[ "$2" == "delete" ]]; then
1053
display_alert "Purging incoming debs" "all" "ext"
1054
find "${DEB_STORAGE}" -name "*.deb" -type f -delete
1055
fi
1056
else
1057
display_alert "There were some problems $err_txt" "leaving incoming directory intact" "err"
1058
fi
1059
1060
}
1061
1062
1063
1064
1065
repo-manipulate()
1066
{
1067
# repository manipulation
1068
# "show" displays packages in each repository
1069
# "server" serve repository - useful for local diagnostics
1070
# "unique" manually select which package should be removed from all repositories
1071
# "update" search for new files in output/debs* to add them to repository
1072
# "purge" leave only last 5 versions
1073
1074
local DISTROS=("stretch" "bionic" "buster" "bullseye" "bookworm" "focal" "hirsute" "jammy" "noble" "sid")
1075
#local DISTROS=($(grep -rw config/distributions/*/ -e 'supported' | cut -d"/" -f3))
1076
1077
case $@ in
1078
1079
serve)
1080
# display repository content
1081
display_alert "Serving content" "common utils" "ext"
1082
aptly serve -listen=$(ip -f inet addr | grep -Po 'inet \K[\d.]+' | grep -v 127.0.0.1 | head -1):80 -config="${SCRIPTPATH}config/${REPO_CONFIG}"
1083
exit 0
1084
;;
1085
1086
show)
1087
# display repository content
1088
for release in "${DISTROS[@]}"; do
1089
display_alert "Displaying repository contents for" "$release" "ext"
1090
aptly repo show -with-packages -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${release}" | tail -n +7
1091
aptly repo show -with-packages -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${release}-desktop" | tail -n +7
1092
done
1093
display_alert "Displaying repository contents for" "common utils" "ext"
1094
aptly repo show -with-packages -config="${SCRIPTPATH}config/${REPO_CONFIG}" utils | tail -n +7
1095
echo "done."
1096
exit 0
1097
;;
1098
1099
unique)
1100
# which package should be removed from all repositories
1101
IFS=$'\n'
1102
while true; do
1103
LIST=()
1104
for release in "${DISTROS[@]}"; do
1105
LIST+=( $(aptly repo show -with-packages -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${release}" | tail -n +7) )
1106
LIST+=( $(aptly repo show -with-packages -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${release}-desktop" | tail -n +7) )
1107
done
1108
LIST+=( $(aptly repo show -with-packages -config="${SCRIPTPATH}config/${REPO_CONFIG}" utils | tail -n +7) )
1109
LIST=( $(echo "${LIST[@]}" | tr ' ' '\n' | sort -u))
1110
new_list=()
1111
# create a human readable menu
1112
for ((n=0;n<$((${#LIST[@]}));n++));
1113
do
1114
new_list+=( "${LIST[$n]}" )
1115
new_list+=( "" )
1116
done
1117
LIST=("${new_list[@]}")
1118
LIST_LENGTH=$((${#LIST[@]}/2));
1119
exec 3>&1
1120
TARGET_VERSION=$(dialog --cancel-label "Cancel" --backtitle "BACKTITLE" --no-collapse --title "Remove packages from repositories" --clear --menu "Delete" $((9+${LIST_LENGTH})) 82 65 "${LIST[@]}" 2>&1 1>&3)
1121
exitstatus=$?;
1122
exec 3>&-
1123
if [[ $exitstatus -eq 0 ]]; then
1124
for release in "${DISTROS[@]}"; do
1125
aptly repo remove -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${release}" "$TARGET_VERSION"
1126
aptly repo remove -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${release}-desktop" "$TARGET_VERSION"
1127
done
1128
aptly repo remove -config="${SCRIPTPATH}config/${REPO_CONFIG}" "utils" "$TARGET_VERSION"
1129
else
1130
exit 1
1131
fi
1132
aptly db cleanup -config="${SCRIPTPATH}config/${REPO_CONFIG}" > /dev/null 2>&1
1133
done
1134
;;
1135
1136
update)
1137
# display full help test
1138
# run repository update
1139
addtorepo "update" ""
1140
# add a key to repo
1141
cp "${SCRIPTPATH}"config/armbian.key "${REPO_STORAGE}"/public/
1142
exit 0
1143
;;
1144
1145
purge)
1146
for release in "${DISTROS[@]}"; do
1147
repo-remove-old-packages "$release" "armhf" "5"
1148
repo-remove-old-packages "$release" "arm64" "5"
1149
repo-remove-old-packages "$release" "amd64" "5"
1150
repo-remove-old-packages "$release" "all" "5"
1151
aptly -config="${SCRIPTPATH}config/${REPO_CONFIG}" -passphrase="${GPG_PASS}" publish update "${release}" > /dev/null 2>&1
1152
done
1153
exit 0
1154
;;
1155
1156
purgeedge)
1157
for release in "${DISTROS[@]}"; do
1158
repo-remove-old-packages "$release" "armhf" "3" "edge"
1159
repo-remove-old-packages "$release" "arm64" "3" "edge"
1160
repo-remove-old-packages "$release" "amd64" "3" "edge"
1161
repo-remove-old-packages "$release" "all" "3" "edge"
1162
aptly -config="${SCRIPTPATH}config/${REPO_CONFIG}" -passphrase="${GPG_PASS}" publish update "${release}" > /dev/null 2>&1
1163
done
1164
exit 0
1165
;;
1166
1167
1168
purgesource)
1169
for release in "${DISTROS[@]}"; do
1170
aptly repo remove -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${release}" 'Name (% *-source*)'
1171
aptly -config="${SCRIPTPATH}config/${REPO_CONFIG}" -passphrase="${GPG_PASS}" publish update "${release}" > /dev/null 2>&1
1172
done
1173
aptly db cleanup -config="${SCRIPTPATH}config/${REPO_CONFIG}" > /dev/null 2>&1
1174
exit 0
1175
;;
1176
*)
1177
1178
echo -e "Usage: repository show | serve | unique | create | update | purge | purgesource\n"
1179
echo -e "\n show = display repository content"
1180
echo -e "\n serve = publish your repositories on current server over HTTP"
1181
echo -e "\n unique = manually select which package should be removed from all repositories"
1182
echo -e "\n update = updating repository"
1183
echo -e "\n purge = removes all but last 5 versions"
1184
echo -e "\n purgeedge = removes all but last 3 edge versions"
1185
echo -e "\n purgesource = removes all sources\n\n"
1186
exit 0
1187
;;
1188
1189
esac
1190
1191
}
1192
1193
1194
1195
1196
# Removes old packages in the received repo
1197
#
1198
# $1: Repository
1199
# $2: Architecture
1200
# $3: Amount of packages to keep
1201
# $4: Additional search pattern
1202
repo-remove-old-packages() {
1203
local repo=$1
1204
local arch=$2
1205
local keep=$3
1206
for pkg in $(aptly repo search -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${repo}" "Architecture ($arch)" | grep -v "ERROR: no results" | sort -t '.' -nk4 | grep -e "$4"); do
1207
local pkg_name
1208
count=0
1209
pkg_name=$(echo "${pkg}" | cut -d_ -f1)
1210
for subpkg in $(aptly repo search -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${repo}" "Name ($pkg_name)" | grep -v "ERROR: no results" | sort -rt '.' -nk4); do
1211
((count+=1))
1212
if [[ $count -gt $keep ]]; then
1213
pkg_version=$(echo "${subpkg}" | cut -d_ -f2)
1214
aptly repo remove -config="${SCRIPTPATH}config/${REPO_CONFIG}" "${repo}" "Name ($pkg_name), Version (= $pkg_version)"
1215
fi
1216
done
1217
done
1218
}
1219
1220
1221
1222
1223
# wait_for_package_manager
1224
#
1225
# * installation will break if we try to install when package manager is running
1226
#
1227
wait_for_package_manager()
1228
{
1229
# exit if package manager is running in the back
1230
while true; do
1231
if [[ "$(fuser /var/lib/dpkg/lock 2>/dev/null; echo $?)" != 1 && "$(fuser /var/lib/dpkg/lock-frontend 2>/dev/null; echo $?)" != 1 ]]; then
1232
display_alert "Package manager is running in the background." "Please wait! Retrying in 30 sec" "wrn"
1233
sleep 30
1234
else
1235
break
1236
fi
1237
done
1238
}
1239
1240
1241
1242
# Installing debian packages in the orangepi build system.
1243
# The function accepts four optional parameters:
1244
# autoupdate - If the installation list is not empty then update first.
1245
# upgrade, clean - the same name for apt
1246
# verbose - detailed log for the function
1247
#
1248
# list="pkg1 pkg2 pkg3 pkgbadname pkg-1.0 | pkg-2.0 pkg5 (>= 9)"
1249
# install_pkg_deb upgrade verbose $list
1250
# or
1251
# install_pkg_deb autoupdate $list
1252
#
1253
# If the package has a bad name, we will see it in the log file.
1254
# If there is an LOG_OUTPUT_FILE variable and it has a value as
1255
# the full real path to the log file, then all the information will be there.
1256
#
1257
# The LOG_OUTPUT_FILE variable must be defined in the calling function
1258
# before calling the install_pkg_deb function and unset after.
1259
#
1260
install_pkg_deb ()
1261
{
1262
local list=""
1263
local log_file
1264
local for_install
1265
local need_autoup=false
1266
local need_upgrade=false
1267
local need_clean=false
1268
local need_verbose=false
1269
local _line=${BASH_LINENO[0]}
1270
local _function=${FUNCNAME[1]}
1271
local _file=$(basename "${BASH_SOURCE[1]}")
1272
local tmp_file=$(mktemp /tmp/install_log_XXXXX)
1273
export DEBIAN_FRONTEND=noninteractive
1274
1275
list=$(
1276
for p in $*;do
1277
case $p in
1278
autoupdate) need_autoup=true; continue ;;
1279
upgrade) need_upgrade=true; continue ;;
1280
clean) need_clean=true; continue ;;
1281
verbose) need_verbose=true; continue ;;
1282
\||\(*|*\)) continue ;;
1283
esac
1284
echo " $p"
1285
done
1286
)
1287
1288
if [ -d $(dirname $LOG_OUTPUT_FILE) ]; then
1289
log_file=${LOG_OUTPUT_FILE}
1290
else
1291
log_file="${SRC}/output/${LOG_SUBPATH}/install.log"
1292
fi
1293
1294
# This is necessary first when there is no apt cache.
1295
if $need_upgrade; then
1296
apt-get -q update || echo "apt cannot update" >>$tmp_file
1297
apt-get -y upgrade || echo "apt cannot upgrade" >>$tmp_file
1298
fi
1299
1300
# If the package is not installed, check the latest
1301
# up-to-date version in the apt cache.
1302
# Exclude bad package names and send a message to the log.
1303
for_install=$(
1304
for p in $list;do
1305
if $(dpkg-query -W -f '${db:Status-Abbrev}' $p |& awk '/ii/{exit 1}');then
1306
apt-cache show $p -o APT::Cache::AllVersions=no |& \
1307
awk -v p=$p -v tmp_file=$tmp_file \
1308
'/^Package:/{print $2} /^E:/{print "Bad package name: ",p >>tmp_file}'
1309
fi
1310
done
1311
)
1312
1313
# This information should be logged.
1314
if [ -s $tmp_file ]; then
1315
echo -e "\nInstalling packages in function: $_function" "[$_file:$_line]" \
1316
>>$log_file
1317
echo -e "\nIncoming list:" >>$log_file
1318
printf "%-30s %-30s %-30s %-30s\n" $list >>$log_file
1319
echo "" >>$log_file
1320
cat $tmp_file >>$log_file
1321
fi
1322
1323
if [ -n "$for_install" ]; then
1324
if $need_autoup; then
1325
apt-get -q update
1326
apt-get -y upgrade
1327
fi
1328
apt-get install -qq -y --no-install-recommends $for_install
1329
echo -e "\nPackages installed:" >>$log_file
1330
dpkg-query -W \
1331
-f '${binary:Package;-27} ${Version;-23}\n' \
1332
$for_install >>$log_file
1333
1334
fi
1335
1336
# We will show the status after installation all listed
1337
if $need_verbose; then
1338
echo -e "\nstatus after installation:" >>$log_file
1339
dpkg-query -W \
1340
-f '${binary:Package;-27} ${Version;-23} [ ${Status} ]\n' \
1341
$list >>$log_file
1342
fi
1343
1344
if $need_clean;then apt-get clean; fi
1345
rm $tmp_file
1346
}
1347
1348
1349
1350
# prepare_host_basic
1351
#
1352
# * installs only basic packages
1353
#
1354
prepare_host_basic()
1355
{
1356
1357
# command:package1 package2 ...
1358
# list of commands that are neeeded:packages where this command is
1359
local check_pack install_pack
1360
local checklist=(
1361
"whiptail:whiptail"
1362
"dialog:dialog"
1363
"fuser:psmisc"
1364
"getfacl:acl"
1365
"uuid:uuid uuid-runtime"
1366
"curl:curl"
1367
"gpg:gnupg"
1368
"gawk:gawk"
1369
"git:git"
1370
)
1371
1372
for check_pack in "${checklist[@]}"; do
1373
if ! which ${check_pack%:*} >/dev/null; then local install_pack+=${check_pack#*:}" "; fi
1374
done
1375
1376
if [[ -n $install_pack ]]; then
1377
display_alert "Installing basic packages" "$install_pack"
1378
sudo bash -c "apt-get -qq update && apt-get install -qq -y --no-install-recommends $install_pack"
1379
fi
1380
1381
}
1382
1383
1384
1385
1386
# prepare_host
1387
#
1388
# * checks and installs necessary packages
1389
# * creates directory structure
1390
# * changes system settings
1391
#
1392
prepare_host()
1393
{
1394
display_alert "Preparing" "host" "info"
1395
1396
# The 'offline' variable must always be set to 'true' or 'false'
1397
if [ "$OFFLINE_WORK" == "yes" ]; then
1398
local offline=true
1399
else
1400
local offline=false
1401
fi
1402
1403
# wait until package manager finishes possible system maintanace
1404
wait_for_package_manager
1405
1406
# fix for Locales settings
1407
if ! grep -q "^en_US.UTF-8 UTF-8" /etc/locale.gen; then
1408
sudo sed -i 's/# en_US.UTF-8/en_US.UTF-8/' /etc/locale.gen
1409
sudo locale-gen
1410
fi
1411
1412
export LC_ALL="en_US.UTF-8"
1413
1414
# packages list for host
1415
# NOTE: please sync any changes here with the Dockerfile and Vagrantfile
1416
1417
local hostdeps="acl aptly aria2 bc binfmt-support bison btrfs-progs \
1418
build-essential ca-certificates ccache cpio cryptsetup curl \
1419
debian-archive-keyring debian-keyring debootstrap device-tree-compiler \
1420
dialog dirmngr dosfstools dwarves f2fs-tools fakeroot flex gawk \
1421
gcc-arm-linux-gnueabihf gdisk gpg imagemagick jq kmod libbison-dev \
1422
libc6-dev-armhf-cross libelf-dev libfdt-dev libfile-fcntllock-perl \
1423
libfl-dev liblz4-tool libncurses-dev libpython2.7-dev libssl-dev \
1424
libusb-1.0-0-dev linux-base locales lzop ncurses-base ncurses-term \
1425
nfs-kernel-server ntpdate p7zip-full parted patchutils pigz pixz \
1426
pkg-config pv python3-dev python3-distutils qemu-user-static rsync swig \
1427
systemd-container u-boot-tools udev unzip uuid-dev wget whiptail zip \
1428
zlib1g-dev gcc-riscv64-linux-gnu"
1429
1430
if [[ $(dpkg --print-architecture) == amd64 ]]; then
1431
1432
hostdeps+=" distcc lib32ncurses-dev lib32stdc++6 libc6-i386"
1433
grep -q i386 <(dpkg --print-foreign-architectures) || dpkg --add-architecture i386
1434
1435
elif [[ $(dpkg --print-architecture) == arm64 ]]; then
1436
1437
hostdeps+=" gcc-arm-linux-gnueabi gcc-arm-none-eabi libc6 libc6-amd64-cross qemu"
1438
1439
else
1440
1441
display_alert "Please read documentation to set up proper compilation environment"
1442
display_alert "https://www.armbian.com/using-armbian-tools/"
1443
exit_with_error "Running this tool on non x86_64 build host is not supported"
1444
1445
fi
1446
1447
# Add support for Ubuntu 20.04, 21.04 and Mint 20.x
1448
if [[ $HOSTRELEASE =~ ^(focal|hirsute|jammy|noble|noble|ulyana|ulyssa|bullseye|bookworm|uma)$ ]]; then
1449
hostdeps+=" python2 python3"
1450
ln -fs /usr/bin/python2.7 /usr/bin/python2
1451
ln -fs /usr/bin/python2.7 /usr/bin/python
1452
else
1453
hostdeps+=" python libpython-dev"
1454
fi
1455
1456
display_alert "Build host OS release" "${HOSTRELEASE:-(unknown)}" "info"
1457
1458
# Ubuntu 21.04.x (Hirsute) x86_64 is the only fully supported host OS release
1459
# Using Docker/VirtualBox/Vagrant is the only supported way to run the build script on other Linux distributions
1460
#
1461
# NO_HOST_RELEASE_CHECK overrides the check for a supported host system
1462
# Disable host OS check at your own risk. Any issues reported with unsupported releases will be closed without discussion
1463
if [[ -z $HOSTRELEASE || "focal jammy noble" != *"$HOSTRELEASE"* ]]; then
1464
if [[ $NO_HOST_RELEASE_CHECK == yes ]]; then
1465
display_alert "You are running on an unsupported system" "${HOSTRELEASE:-(unknown)}" "wrn"
1466
display_alert "Do not report any errors, warnings or other issues encountered beyond this point" "" "wrn"
1467
else
1468
exit_with_error "It seems you ignore documentation and run an unsupported build system: ${HOSTRELEASE:-(unknown)}"
1469
fi
1470
fi
1471
1472
if grep -qE "(Microsoft|WSL)" /proc/version; then
1473
if [ -f /.dockerenv ]; then
1474
display_alert "Building images using Docker on WSL2 may fail" "" "wrn"
1475
else
1476
exit_with_error "Windows subsystem for Linux is not a supported build environment"
1477
fi
1478
fi
1479
1480
if systemd-detect-virt -q -c; then
1481
display_alert "Running in container" "$(systemd-detect-virt)" "info"
1482
# disable apt-cacher unless NO_APT_CACHER=no is not specified explicitly
1483
if [[ $NO_APT_CACHER != no ]]; then
1484
display_alert "apt-cacher is disabled in containers, set NO_APT_CACHER=no to override" "" "wrn"
1485
NO_APT_CACHER=yes
1486
fi
1487
CONTAINER_COMPAT=yes
1488
# trying to use nested containers is not a good idea, so don't permit EXTERNAL_NEW=compile
1489
if [[ $EXTERNAL_NEW == compile ]]; then
1490
display_alert "EXTERNAL_NEW=compile is not available when running in container, setting to prebuilt" "" "wrn"
1491
EXTERNAL_NEW=prebuilt
1492
fi
1493
SYNC_CLOCK=no
1494
fi
1495
1496
1497
# Skip verification if you are working offline
1498
if ! $offline; then
1499
1500
# warning: apt-cacher-ng will fail if installed and used both on host and in
1501
# container/chroot environment with shared network
1502
# set NO_APT_CACHER=yes to prevent installation errors in such case
1503
if [[ $NO_APT_CACHER != yes ]]; then hostdeps+=" apt-cacher-ng"; fi
1504
1505
export EXTRA_BUILD_DEPS=""
1506
call_extension_method "add_host_dependencies" <<- 'ADD_HOST_DEPENDENCIES'
1507
*run before installing host dependencies*
1508
you can add packages to install, space separated, to ${EXTRA_BUILD_DEPS} here.
1509
ADD_HOST_DEPENDENCIES
1510
1511
if [ -n "${EXTRA_BUILD_DEPS}" ]; then hostdeps+=" ${EXTRA_BUILD_DEPS}"; fi
1512
1513
display_alert "Installing build dependencies"
1514
# don't prompt for apt cacher selection
1515
sudo echo "apt-cacher-ng apt-cacher-ng/tunnelenable boolean false" | sudo debconf-set-selections
1516
1517
LOG_OUTPUT_FILE="${DEST}"/${LOG_SUBPATH}/hostdeps.log
1518
install_pkg_deb "autoupdate $hostdeps"
1519
unset LOG_OUTPUT_FILE
1520
1521
update-ccache-symlinks
1522
1523
export FINAL_HOST_DEPS="$hostdeps ${EXTRA_BUILD_DEPS}"
1524
call_extension_method "host_dependencies_ready" <<- 'HOST_DEPENDENCIES_READY'
1525
*run after all host dependencies are installed*
1526
At this point we can read `${FINAL_HOST_DEPS}`, but changing won't have any effect.
1527
All the dependencies, including the default/core deps and the ones added via `${EXTRA_BUILD_DEPS}`
1528
are installed at this point. The system clock has not yet been synced.
1529
HOST_DEPENDENCIES_READY
1530
1531
# sync clock
1532
if [[ $SYNC_CLOCK != no ]]; then
1533
display_alert "Syncing clock" "${NTP_SERVER:-pool.ntp.org}" "info"
1534
ntpdate -s "${NTP_SERVER:-pool.ntp.org}"
1535
fi
1536
1537
# create directory structure
1538
mkdir -p $SRC/output $EXTER/cache $USERPATCHES_PATH
1539
if [[ -n $SUDO_USER ]]; then
1540
chgrp --quiet sudo cache output "${USERPATCHES_PATH}"
1541
# SGID bit on cache/sources breaks kernel dpkg packaging
1542
chmod --quiet g+w,g+s output "${USERPATCHES_PATH}"
1543
# fix existing permissions
1544
find "${SRC}"/output "${USERPATCHES_PATH}" -type d ! -group sudo -exec chgrp --quiet sudo {} \;
1545
find "${SRC}"/output "${USERPATCHES_PATH}" -type d ! -perm -g+w,g+s -exec chmod --quiet g+w,g+s {} \;
1546
fi
1547
mkdir -p $DEST/debs/{extra,u-boot} $DEST/{config,debug,patch,images} $USERPATCHES_PATH/overlay $EXTER/cache/{debs,sources,hash} $SRC/toolchains $SRC/.tmp
1548
1549
# build aarch64
1550
if [[ $(dpkg --print-architecture) == amd64 ]]; then
1551
if [[ "${SKIP_EXTERNAL_TOOLCHAINS}" != "yes" ]]; then
1552
1553
# bind mount toolchain if defined
1554
if [[ -d "${ARMBIAN_CACHE_TOOLCHAIN_PATH}" ]]; then
1555
mountpoint -q "${SRC}"/cache/toolchain && umount -l "${SRC}"/cache/toolchain
1556
mount --bind "${ARMBIAN_CACHE_TOOLCHAIN_PATH}" "${SRC}"/cache/toolchain
1557
fi
1558
1559
display_alert "Checking for external GCC compilers" "" "info"
1560
# download external Linaro compiler and missing special dependencies since they are needed for certain sources
1561
1562
local toolchains=(
1563
"ky-toolchain-linux-glibc-x86_64-v1.0.1.tar.xz"
1564
"gcc-linaro-aarch64-none-elf-4.8-2013.11_linux.tar.xz"
1565
"gcc-linaro-arm-none-eabi-4.8-2014.04_linux.tar.xz"
1566
"gcc-linaro-arm-linux-gnueabihf-4.8-2014.04_linux.tar.xz"
1567
"gcc-linaro-4.9.4-2017.01-x86_64_arm-linux-gnueabi.tar.xz"
1568
"gcc-linaro-4.9.4-2017.01-x86_64_aarch64-linux-gnu.tar.xz"
1569
"gcc-linaro-5.5.0-2017.10-x86_64_arm-linux-gnueabihf.tar.xz"
1570
"gcc-linaro-7.4.1-2019.02-x86_64_arm-linux-gnueabi.tar.xz"
1571
"gcc-linaro-7.4.1-2019.02-x86_64_aarch64-linux-gnu.tar.xz"
1572
"gcc-arm-9.2-2019.12-x86_64-arm-none-linux-gnueabihf.tar.xz"
1573
"gcc-arm-9.2-2019.12-x86_64-aarch64-none-linux-gnu.tar.xz"
1574
"gcc-arm-11.2-2022.02-x86_64-arm-none-linux-gnueabihf.tar.xz"
1575
"gcc-arm-11.2-2022.02-x86_64-aarch64-none-linux-gnu.tar.xz"
1576
)
1577
1578
USE_TORRENT_STATUS=${USE_TORRENT}
1579
USE_TORRENT="no"
1580
for toolchain in ${toolchains[@]}; do
1581
download_and_verify "_toolchain" "${toolchain##*/}"
1582
done
1583
USE_TORRENT=${USE_TORRENT_STATUS}
1584
1585
rm -rf $SRC/toolchains/*.tar.xz*
1586
local existing_dirs=( $(ls -1 $SRC/toolchains) )
1587
for dir in ${existing_dirs[@]}; do
1588
local found=no
1589
for toolchain in ${toolchains[@]}; do
1590
local filename=${toolchain##*/}
1591
local dirname=${filename//.tar.xz}
1592
[[ $dir == $dirname ]] && found=yes
1593
done
1594
if [[ $found == no ]]; then
1595
display_alert "Removing obsolete toolchain" "$dir"
1596
rm -rf $SRC/toolchains/$dir
1597
fi
1598
done
1599
else
1600
display_alert "Ignoring toolchains" "SKIP_EXTERNAL_TOOLCHAINS: ${SKIP_EXTERNAL_TOOLCHAINS}" "info"
1601
fi
1602
fi
1603
1604
fi # check offline
1605
1606
# enable arm binary format so that the cross-architecture chroot environment will work
1607
if [[ $BUILD_OPT == "image" || $BUILD_OPT == "rootfs" ]]; then
1608
modprobe -q binfmt_misc
1609
mountpoint -q /proc/sys/fs/binfmt_misc/ || mount binfmt_misc -t binfmt_misc /proc/sys/fs/binfmt_misc
1610
if [[ "$(arch)" != "aarch64" ]]; then
1611
test -e /proc/sys/fs/binfmt_misc/qemu-arm || update-binfmts --enable qemu-arm
1612
test -e /proc/sys/fs/binfmt_misc/qemu-aarch64 || update-binfmts --enable qemu-aarch64
1613
fi
1614
fi
1615
1616
[[ ! -f "${USERPATCHES_PATH}"/customize-image.sh ]] && cp "${EXTER}"/config/templates/customize-image.sh.template "${USERPATCHES_PATH}"/customize-image.sh
1617
1618
if [[ ! -f "${USERPATCHES_PATH}"/README ]]; then
1619
rm -f "${USERPATCHES_PATH}"/readme.txt
1620
echo 'Please read documentation about customizing build configuration' > "${USERPATCHES_PATH}"/README
1621
echo 'https:/www.orangepi.org' >> "${USERPATCHES_PATH}"/README
1622
1623
# create patches directory structure under USERPATCHES_PATH
1624
find $EXTER/patch -maxdepth 2 -type d ! -name . | sed "s%/.*patch%/$USERPATCHES_PATH%" | xargs mkdir -p
1625
fi
1626
1627
# check free space (basic)
1628
local freespace=$(findmnt --target "${SRC}" -n -o AVAIL -b 2>/dev/null) # in bytes
1629
if [[ -n $freespace && $(( $freespace / 1073741824 )) -lt 10 ]]; then
1630
display_alert "Low free space left" "$(( $freespace / 1073741824 )) GiB" "wrn"
1631
# pause here since dialog-based menu will hide this message otherwise
1632
echo -e "Press \e[0;33m<Ctrl-C>\x1B[0m to abort compilation, \e[0;33m<Enter>\x1B[0m to ignore and continue"
1633
read
1634
fi
1635
}
1636
1637
1638
1639
1640
function webseed ()
1641
{
1642
# list of mirrors that host our files
1643
unset text
1644
# Hardcoded to EU mirrors since
1645
local CCODE=$(curl -s redirect.armbian.com/geoip | jq '.continent.code' -r)
1646
WEBSEED=($(curl -s https://redirect.armbian.com/mirrors | jq -r '.'${CCODE}' | .[] | values'))
1647
# aria2 simply split chunks based on sources count not depending on download speed
1648
# when selecting china mirrors, use only China mirror, others are very slow there
1649
if [[ $DOWNLOAD_MIRROR == china ]]; then
1650
WEBSEED=(
1651
https://mirrors.tuna.tsinghua.edu.cn/armbian-releases/
1652
)
1653
elif [[ $DOWNLOAD_MIRROR == bfsu ]]; then
1654
WEBSEED=(
1655
https://mirrors.bfsu.edu.cn/armbian-releases/
1656
)
1657
fi
1658
for toolchain in ${WEBSEED[@]}; do
1659
text="${text} ${toolchain}${1}"
1660
done
1661
text="${text:1}"
1662
echo "${text}"
1663
}
1664
1665
1666
1667
1668
download_and_verify()
1669
{
1670
1671
local remotedir=$1
1672
local filename=$2
1673
local localdir=$SRC/toolchains
1674
local dirname=${filename//.tar.xz}
1675
1676
if [[ $DOWNLOAD_MIRROR == china ]]; then
1677
local server="https://mirrors.tuna.tsinghua.edu.cn/armbian-releases/"
1678
elif [[ $DOWNLOAD_MIRROR == bfsu ]]; then
1679
local server="https://mirrors.bfsu.edu.cn/armbian-releases/"
1680
else
1681
local server=${ARMBIAN_MIRROR}
1682
fi
1683
1684
if [[ -f ${localdir}/${dirname}/.download-complete ]]; then
1685
return
1686
fi
1687
1688
if [[ ${filename} == *ky* ]]; then
1689
server="http://www.iplaystore.cn/"
1690
remotedir=""
1691
fi
1692
1693
# switch to china mirror if US timeouts
1694
timeout 10 curl --head --fail --silent ${server}${remotedir}/${filename} 2>&1 >/dev/null
1695
if [[ $? -ne 7 && $? -ne 22 && $? -ne 0 ]]; then
1696
display_alert "Timeout from $server" "retrying" "info"
1697
server="https://mirrors.tuna.tsinghua.edu.cn/armbian-releases/"
1698
1699
# switch to another china mirror if tuna timeouts
1700
timeout 10 curl --head --fail --silent ${server}${remotedir}/${filename} 2>&1 >/dev/null
1701
if [[ $? -ne 7 && $? -ne 22 && $? -ne 0 ]]; then
1702
display_alert "Timeout from $server" "retrying" "info"
1703
server="https://mirrors.bfsu.edu.cn/armbian-releases/"
1704
fi
1705
fi
1706
1707
1708
# check if file exists on remote server before running aria2 downloader
1709
[[ ! `timeout 10 curl --head --fail --silent ${server}${remotedir}/${filename}` ]] && return
1710
1711
cd "${localdir}" || exit
1712
1713
# use local control file
1714
if [[ -f "${EXTER}"/config/torrents/${filename}.asc ]]; then
1715
local torrent="${EXTER}"/config/torrents/${filename}.torrent
1716
ln -sf "${EXTER}/config/torrents/${filename}.asc" "${localdir}/${filename}.asc"
1717
elif [[ ! `timeout 10 curl --head --fail --silent "${server}${remotedir}/${filename}.asc"` ]]; then
1718
return
1719
else
1720
# download control file
1721
local torrent=${server}$remotedir/${filename}.torrent
1722
aria2c --download-result=hide --disable-ipv6=true --summary-interval=0 --console-log-level=error --auto-file-renaming=false \
1723
--continue=false --allow-overwrite=true --dir="${localdir}" ${server}${remotedir}/${filename}.asc $(webseed "$remotedir/${filename}.asc") -o "${filename}.asc"
1724
[[ $? -ne 0 ]] && display_alert "Failed to download control file" "" "wrn"
1725
fi
1726
1727
# download torrent first
1728
if [[ ${USE_TORRENT} == "yes" ]]; then
1729
1730
display_alert "downloading using torrent network" "$filename"
1731
local ariatorrent="--summary-interval=0 --auto-save-interval=0 --seed-time=0 --bt-stop-timeout=120 --console-log-level=error \
1732
--allow-overwrite=true --download-result=hide --rpc-save-upload-metadata=false --auto-file-renaming=false \
1733
--file-allocation=trunc --continue=true ${torrent} \
1734
--dht-file-path=$EXTER/cache/.aria2/dht.dat --disable-ipv6=true --stderr --follow-torrent=mem --dir=${localdir}"
1735
1736
# exception. It throws error if dht.dat file does not exists. Error suppress needed only at first download.
1737
if [[ -f $EXTER/cache/.aria2/dht.dat ]]; then
1738
# shellcheck disable=SC2086
1739
aria2c ${ariatorrent}
1740
else
1741
# shellcheck disable=SC2035
1742
aria2c ${ariatorrent} &> "${DEST}"/${LOG_SUBPATH}/torrent.log
1743
fi
1744
# mark complete
1745
[[ $? -eq 0 ]] && touch "${localdir}/${filename}.complete"
1746
1747
fi
1748
1749
1750
# direct download if torrent fails
1751
if [[ ! -f "${localdir}/${filename}.complete" ]]; then
1752
if [[ ! `timeout 10 curl --head --fail --silent ${server}${remotedir}/${filename} 2>&1 >/dev/null` ]]; then
1753
display_alert "downloading using http(s) network" "$filename"
1754
aria2c --download-result=hide --rpc-save-upload-metadata=false --console-log-level=error \
1755
--dht-file-path="${SRC}"/cache/.aria2/dht.dat --disable-ipv6=true --summary-interval=0 --auto-file-renaming=false --dir="${localdir}" ${server}${remotedir}/${filename} $(webseed "${remotedir}/${filename}") -o "${filename}"
1756
# mark complete
1757
[[ $? -eq 0 ]] && touch "${localdir}/${filename}.complete" && echo ""
1758
1759
fi
1760
fi
1761
1762
if [[ -f ${localdir}/${filename}.asc ]]; then
1763
1764
if grep -q 'BEGIN PGP SIGNATURE' "${localdir}/${filename}.asc"; then
1765
1766
if [[ ! -d $EXTER/cache/.gpg ]]; then
1767
mkdir -p $EXTER/cache/.gpg
1768
chmod 700 $EXTER/cache/.gpg
1769
touch $EXTER/cache/.gpg/gpg.conf
1770
chmod 600 $EXTER/cache/.gpg/gpg.conf
1771
fi
1772
1773
# Verify archives with Linaro and Armbian GPG keys
1774
1775
if [ x"" != x"${http_proxy}" ]; then
1776
(gpg --homedir "${EXTER}"/cache/.gpg --no-permission-warning --list-keys 8F427EAF >> "${DEST}"/${LOG_SUBPATH}/output.log 2>&1\
1777
|| gpg --homedir "${EXTER}"/cache/.gpg --no-permission-warning \
1778
--keyserver hkp://keyserver.ubuntu.com:80 --keyserver-options http-proxy="${http_proxy}" \
1779
--recv-keys 8F427EAF >> "${DEST}"/${LOG_SUBPATH}/output.log 2>&1)
1780
1781
(gpg --homedir "${EXTER}"/cache/.gpg --no-permission-warning --list-keys 9F0E78D5 >> "${DEST}"/${LOG_SUBPATH}/output.log 2>&1\
1782
|| gpg --homedir "${EXTER}"/cache/.gpg --no-permission-warning \
1783
--keyserver hkp://keyserver.ubuntu.com:80 --keyserver-options http-proxy="${http_proxy}" \
1784
--recv-keys 9F0E78D5 >> "${DEST}"/${LOG_SUBPATH}/output.log 2>&1)
1785
else
1786
(gpg --homedir "${EXTER}"/cache/.gpg --no-permission-warning --list-keys 8F427EAF >> "${DEST}"/${LOG_SUBPATH}/output.log 2>&1\
1787
|| gpg --homedir "${EXTER}"/cache/.gpg --no-permission-warning \
1788
--keyserver hkp://keyserver.ubuntu.com:80 \
1789
--recv-keys 8F427EAF >> "${DEST}"/${LOG_SUBPATH}/output.log 2>&1)
1790
1791
(gpg --homedir "${EXTER}"/cache/.gpg --no-permission-warning --list-keys 9F0E78D5 >> "${DEST}"/${LOG_SUBPATH}/output.log 2>&1\
1792
|| gpg --homedir "${EXTER}"/cache/.gpg --no-permission-warning \
1793
--keyserver hkp://keyserver.ubuntu.com:80 \
1794
--recv-keys 9F0E78D5 >> "${DEST}"/${LOG_SUBPATH}/output.log 2>&1)
1795
fi
1796
1797
gpg --homedir "${EXTER}"/cache/.gpg --no-permission-warning --verify \
1798
--trust-model always -q "${localdir}/${filename}.asc" >> "${DEST}"/${LOG_SUBPATH}/output.log 2>&1
1799
[[ ${PIPESTATUS[0]} -eq 0 ]] && verified=true && display_alert "Verified" "PGP" "info"
1800
1801
else
1802
1803
md5sum -c --status "${localdir}/${filename}.asc" && verified=true && display_alert "Verified" "MD5" "info"
1804
1805
fi
1806
1807
if [[ $verified == true ]]; then
1808
if [[ "${filename:(-6)}" == "tar.xz" ]]; then
1809
1810
display_alert "decompressing"
1811
pv -p -b -r -c -N "[ .... ] ${filename}" "${filename}" | xz -dc | tar xp --xattrs --no-same-owner --overwrite
1812
[[ $? -eq 0 ]] && touch "${localdir}/${dirname}/.download-complete"
1813
fi
1814
else
1815
exit_with_error "verification failed"
1816
fi
1817
1818
fi
1819
}
1820
1821
1822
1823
1824
show_developer_warning()
1825
{
1826
local temp_rc
1827
temp_rc=$(mktemp)
1828
cat <<-'EOF' > "${temp_rc}"
1829
screen_color = (WHITE,RED,ON)
1830
EOF
1831
local warn_text="You are switching to the \Z1EXPERT MODE\Zn
1832
1833
This allows building experimental configurations that are provided
1834
\Z1AS IS\Zn to developers and expert users,
1835
\Z1WITHOUT ANY RESPONSIBILITIES\Zn from the Armbian team:
1836
1837
- You are using these configurations \Z1AT YOUR OWN RISK\Zn
1838
- Bug reports related to the dev kernel, CSC, WIP and EOS boards
1839
\Z1will be closed without a discussion\Zn
1840
- Forum posts related to dev kernel, CSC, WIP and EOS boards
1841
should be created in the \Z2\"Community forums\"\Zn section
1842
"
1843
DIALOGRC=$temp_rc dialog --title "Expert mode warning" --backtitle "${backtitle}" --colors --defaultno --no-label "I do not agree" \
1844
--yes-label "I understand and agree" --yesno "$warn_text" "${TTY_Y}" "${TTY_X}"
1845
[[ $? -ne 0 ]] && exit_with_error "Error switching to the expert mode"
1846
SHOW_WARNING=no
1847
}
1848
1849
# is a formatted output of the values of variables
1850
# from the list at the place of the function call.
1851
#
1852
# The LOG_OUTPUT_FILE variable must be defined in the calling function
1853
# before calling the `show_checklist_variables` function and unset after.
1854
#
1855
show_checklist_variables ()
1856
{
1857
local checklist=$*
1858
local var pval
1859
local log_file=${LOG_OUTPUT_FILE:-"${SRC}"/output/${LOG_SUBPATH}/trash.log}
1860
local _line=${BASH_LINENO[0]}
1861
local _function=${FUNCNAME[1]}
1862
local _file=$(basename "${BASH_SOURCE[1]}")
1863
1864
echo -e "Show variables in function: $_function" "[$_file:$_line]\n" >>$log_file
1865
1866
for var in $checklist;do
1867
eval pval=\$$var
1868
echo -e "\n$var =:" >>$log_file
1869
if [ $(echo "$pval" | awk -F"/" '{print NF}') -ge 4 ];then
1870
printf "%s\n" $pval >>$log_file
1871
else
1872
printf "%-30s %-30s %-30s %-30s\n" $pval >>$log_file
1873
fi
1874
done
1875
}
1876
1877
install_wiringop()
1878
{
1879
install_deb_chroot "$EXTER/cache/debs/${ARCH}/wiringpi-2.58-1.deb"
1880
chroot "${SDCARD}" /bin/bash -c "apt-mark hold wiringpi" >> "${DEST}"/${LOG_SUBPATH}/install.log 2>&1
1881
1882
if [[ ${IGNORE_UPDATES} != yes ]]; then
1883
1884
fetch_from_repo "https://github.com/orangepi-xunlong/wiringOP.git" "${EXTER}/cache/sources/wiringOP" "branch:next" "yes"
1885
fetch_from_repo "https://github.com/orangepi-xunlong/wiringOP-Python.git" "${EXTER}/cache/sources/wiringOP-Python" "branch:next" "yes"
1886
1887
fi
1888
1889
cp ${EXTER}/cache/sources/wiringOP/next ${SDCARD}/usr/src/wiringOP -rfa
1890
cp ${EXTER}/cache/sources/wiringOP-Python/next ${SDCARD}/usr/src/wiringOP-Python -rfa
1891
1892
rm $SDCARD/root/*.deb >/dev/null 2>&1
1893
}
1894
1895
1896
install_310b-npu-driver()
1897
{
1898
local driver_path="$EXTER/cache/sources/ascend-driver"
1899
local driver_name="Ascend-hdk-310b-npu-driver_23.0.5_linux-aarch64-opiaimax.run"
1900
local driver=${driver_path}/${driver_name}
1901
1902
if [[ -f "${driver}" ]]; then
1903
display_alert "Installing" "$driver_name" "info"
1904
cp "${driver}" "${SDCARD}/opt/"
1905
chmod +x "${SDCARD}/opt/Ascend-hdk-310b-npu-driver_23.0.5_linux-aarch64-opiaimax.run"
1906
chroot "${SDCARD}" /bin/bash -c "/opt/${driver_name} --chroot --full --install-username=orangepi --install-usergroup=orangepi --install-for-all"
1907
fi
1908
}
1909
1910
1911
install_docker() {
1912
1913
[[ $install_docker != yes ]] && return
1914
1915
display_alert "Installing" "docker" "info"
1916
chroot "${SDCARD}" /bin/bash -c "apt-get install -y -qq apt-transport-https ca-certificates curl gnupg2 software-properties-common >/dev/null 2>&1"
1917
1918
case ${RELEASE} in
1919
buster|bullseye|bookworm)
1920
distributor_id="debian"
1921
;;
1922
xenial|bionic|focal|jammy|noble)
1923
distributor_id="ubuntu"
1924
;;
1925
esac
1926
1927
#if [[ ${SELECTED_CONFIGURATION} == desktop ]]; then
1928
mirror_url=https://repo.huaweicloud.com
1929
#else
1930
# mirror_url=https://mirrors.aliyun.com
1931
#fi
1932
1933
chroot "${SDCARD}" /bin/bash -c "curl -fsSL ${mirror_url}/docker-ce/linux/${distributor_id}/gpg | apt-key add -"
1934
echo "deb [arch=${ARCH}] ${mirror_url}/docker-ce/linux/${distributor_id} ${RELEASE} stable" > "${SDCARD}"/etc/apt/sources.list.d/docker.list
1935
1936
chroot "${SDCARD}" /bin/bash -c "apt-get update"
1937
chroot "${SDCARD}" /bin/bash -c "apt-get install -y -qq docker-ce docker-ce-cli containerd.io"
1938
chroot "${SDCARD}" /bin/bash -c "sudo groupadd docker"
1939
1940
run_on_sdcard "systemctl --no-reload disable docker.service"
1941
}
1942
1943
1944
#function run_after_build()
1945
#{
1946
# chown -R $(logname).$(logname) $BOOTSOURCEDIR
1947
# chown -R $(logname).$(logname) $LINUXSOURCEDIR
1948
# chown -R $(logname).$(logname) $USERPATCHES_PATH
1949
# chown -R $(logname).$(logname) $DEST/{config,debs,debug,images,patch}
1950
#
1951
# if [[ $DEBUG_DEB == yes && $BUILD_OPT =~ u-boot|kernel ]]; then
1952
#
1953
# [[ -z $REMOTEIP ]] && exit_with_error "The remote IP address has not been set" ""
1954
# [[ -z $PASS_ROOT ]] && PASS_ROOT="orangepi"
1955
# [[ -z $MMC_DEV ]] && MMC_DEV="tfcard"
1956
#
1957
# #ssh-keygen -f "~/.ssh/known_hosts" -R ${REMOTEIP}
1958
# local num=0
1959
# while true;
1960
# do
1961
# ping ${REMOTEIP} -c 1 > /dev/null 2>&1
1962
#
1963
# if [[ $? == 0 ]]; then
1964
# echo " "
1965
# break
1966
# fi
1967
#
1968
# if [[ $num == 0 ]]; then
1969
# display_alert "${BOARD} network cannot be connected" "${REMOTEIP}" "wrn"
1970
# ((num++))
1971
# fi
1972
#
1973
# echo -e ".\c"
1974
# done
1975
# display_alert "${BOARD} network is connected" "${REMOTEIP}" "info"
1976
#
1977
# if [[ $BUILD_OPT == u-boot ]]; then
1978
# sshpass -p ${PASS_ROOT} scp ${DEB_STORAGE}/u-boot/${CHOSEN_UBOOT}_${REVISION}_${ARCH}.deb root@${REMOTEIP}:/root
1979
# display_alert "Uninstall deb package" "linux-u-boot-${BOARD}-${BRANCH}" "info"
1980
# sshpass -p ${PASS_ROOT} ssh root@${REMOTEIP} "apt-get purge -y linux-u-boot-${BOARD}-${BRANCH}"
1981
# display_alert "Install deb package" "${CHOSEN_UBOOT}_${REVISION}_${ARCH}.deb" "info"
1982
# sshpass -p ${PASS_ROOT} ssh root@${REMOTEIP} "dpkg -i /root/${CHOSEN_UBOOT}_${REVISION}_${ARCH}.deb"
1983
#
1984
# if [[ $MMC_DEV == emmc ]]; then
1985
# display_alert "Burn the U-Boot into EMMC" "" "info"
1986
# sshpass -p ${PASS_ROOT} ssh root@${REMOTEIP} "dd bs=1k seek=8 if=/usr/lib/linux-u-boot-legacy-orangepi400_2.1.0_arm64/boot0_sdcard.fex of=/dev/mmcblk0"
1987
# sshpass -p ${PASS_ROOT} ssh root@${REMOTEIP} "dd bs=1k seek=16400 if=/usr/lib/linux-u-boot-legacy-orangepi400_2.1.0_arm64/boot_package.fex of=/dev/mmcblk0"
1988
# sshpass -p ${PASS_ROOT} ssh root@${REMOTEIP} "sync"
1989
# else
1990
# display_alert "Burn the U-Boot into TF card" "" "info"
1991
# sshpass -p ${PASS_ROOT} ssh root@${REMOTEIP} "nand-sata-install DEBUG_UBOOT"
1992
# sshpass -p ${PASS_ROOT} ssh root@${REMOTEIP} "sync"
1993
# sshpass -p ${PASS_ROOT} ssh root@${REMOTEIP} "reboot"
1994
# fi
1995
# fi
1996
#
1997
# if [[ $BUILD_OPT == kernel ]]; then
1998
# sshpass -p ${PASS_ROOT} scp ${DEB_STORAGE}/linux-image-${BRANCH}-${LINUXFAMILY}_${REVISION}_${ARCH}.deb root@${REMOTEIP}:/root
1999
# sshpass -p ${PASS_ROOT} ssh root@${REMOTEIP} "apt-get purge -y linux-image-${BRANCH}-${LINUXFAMILY}"
2000
# sshpass -p ${PASS_ROOT} ssh root@${REMOTEIP} "dpkg -i /root/linux-image-${BRANCH}-${LINUXFAMILY}_${REVISION}_${ARCH}.deb"
2001
# if [[ $BRANCH == current && $BOARD =~ orangepizero2|orangepi400 ]]; then
2002
# sshpass -p ${PASS_ROOT} scp ${LINUXSOURCEDIR}/arch/arm64/boot/dts/allwinner/sun50i-h616-orangepi-*.dtb root@${REMOTEIP}:/boot/dtb/allwinner/
2003
# fi
2004
#
2005
# sshpass -p ${PASS_ROOT} ssh root@${REMOTEIP} "sync"
2006
# sshpass -p ${PASS_ROOT} ssh root@${REMOTEIP} "reboot"
2007
# fi
2008
# fi
2009
#
2010
# if [[ $DEBUG_DEB == yes && $BUILD_OPT == image ]]; then
2011
# scp ${destimg}/*.img ${PC_NAME}@${PC_IP}:${PC_DIR}
2012
# fi
2013
#}
2014
2015