2
1

dl-wrapper 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242
  1. #!/usr/bin/env bash
  2. # This script is a wrapper to the other download backends.
  3. # Its role is to ensure atomicity when saving downloaded files
  4. # back to BR2_DL_DIR, and not clutter BR2_DL_DIR with partial,
  5. # failed downloads.
  6. # To avoid cluttering BR2_DL_DIR, we download to a trashable
  7. # location, namely in $(BUILD_DIR).
  8. # Then, we move the downloaded file to a temporary file in the
  9. # same directory as the final output file.
  10. # This allows us to finally atomically rename it to its final
  11. # name.
  12. # If anything goes wrong, we just remove all the temporaries
  13. # created so far.
  14. # We want to catch any unexpected failure, and exit immediately.
  15. set -e
  16. # shellcheck source=helpers source-path=SCRIPTDIR
  17. . "${0%/*}/helpers"
  18. export BR_BACKEND_DL_GETOPTS=":hc:d:o:n:N:H:lru:qf:e"
  19. main() {
  20. local OPT OPTARG
  21. local backend output large_file recurse quiet rc
  22. local -a uris hfiles backend_opts post_process_opts
  23. # Parse our options; anything after '--' is for the backend
  24. while getopts ":c:d:D:o:n:N:H:lrf:u:qp:P:" OPT; do
  25. case "${OPT}" in
  26. c) cset="${OPTARG}";;
  27. d) dl_dir="${OPTARG}";;
  28. D) old_dl_dir="${OPTARG}";;
  29. o) output="${OPTARG}";;
  30. n) raw_base_name="${OPTARG}";;
  31. N) base_name="${OPTARG}";;
  32. H) hfiles+=( "${OPTARG}" );;
  33. l) large_file="-l";;
  34. r) recurse="-r";;
  35. f) filename="${OPTARG}";;
  36. u) uris+=( "${OPTARG}" );;
  37. p) post_process="${OPTARG}";;
  38. P) post_process_opts+=( "${OPTARG}" );;
  39. q) quiet="-q";;
  40. :) error "option '%s' expects a mandatory argument\n" "${OPTARG}";;
  41. \?) error "unknown option '%s'\n" "${OPTARG}";;
  42. esac
  43. done
  44. # Forget our options, and keep only those for the backend
  45. shift $((OPTIND-1))
  46. if [ -z "${output}" ]; then
  47. error "no output specified, use -o\n"
  48. fi
  49. # Legacy handling: check if the file already exists in the global
  50. # download directory. If it does, hard-link it. If it turns out it
  51. # was an incorrect download, we'd still check it below anyway.
  52. # If we can neither link nor copy, fallback to doing a download.
  53. # NOTE! This is not atomic, is subject to TOCTTOU, but the whole
  54. # dl-wrapper runs under an flock, so we're safe.
  55. # shellcheck disable=SC2166 # -a works in this case
  56. if [ ! -e "${output}" -a -e "${old_dl_dir}/${filename}" ]; then
  57. ln "${old_dl_dir}/${filename}" "${output}" || \
  58. cp "${old_dl_dir}/${filename}" "${output}" || \
  59. true
  60. fi
  61. # If the output file already exists and:
  62. # - there's no .hash file: do not download it again and exit promptly
  63. # - matches all its hashes: do not download it again and exit promptly
  64. # - fails at least one of its hashes: force a re-download
  65. # - there's no hash (but a .hash file): consider it a hard error
  66. if [ -e "${output}" ]; then
  67. if support/download/check-hash ${quiet} "${output}" "${output##*/}" "${hfiles[@]}"; then
  68. exit 0
  69. elif [ ${?} -ne 2 ]; then
  70. # Do not remove the file, otherwise it might get re-downloaded
  71. # from a later location (i.e. primary -> upstream -> mirror).
  72. # Do not print a message, check-hash already did.
  73. exit 1
  74. fi
  75. rm -f "${output}"
  76. warn "Re-downloading '%s'...\n" "${output##*/}"
  77. fi
  78. # Look through all the uris that we were given to download the package
  79. # source
  80. download_and_check=0
  81. rc=1
  82. for uri in "${uris[@]}"; do
  83. backend_urlencode="${uri%%+*}"
  84. backend="${backend_urlencode%|*}"
  85. case "${backend}" in
  86. git|svn|cvs|bzr|file|scp|hg|sftp) ;;
  87. ftp|ftps) backend="curl" ;;
  88. *) backend="wget" ;;
  89. esac
  90. uri=${uri#*+}
  91. urlencode=${backend_urlencode#*|}
  92. # urlencode must be "urlencode"
  93. [ "${urlencode}" != "urlencode" ] && urlencode=""
  94. # tmpd is a temporary directory in which backends may store
  95. # intermediate by-products of the download.
  96. # tmpf is the file in which the backends should put the downloaded
  97. # content.
  98. # tmpd is located in $(BUILD_DIR), so as not to clutter the (precious)
  99. # $(BR2_DL_DIR)
  100. # We let the backends create tmpf, so they are able to set whatever
  101. # permission bits they want (although we're only really interested in
  102. # the executable bit.)
  103. tmpd="$(mktemp -d "${BUILD_DIR}/.${output##*/}.XXXXXX")"
  104. tmpf="${tmpd}/output"
  105. # shellcheck disable=SC2206 # all three are dash-options
  106. backend_opts=( ${quiet} ${large_file} ${recurse} )
  107. if [ "${urlencode}" ]; then
  108. backend_opts+=( -e )
  109. fi
  110. # Helpers expect to run in a directory that is *really* trashable, so
  111. # they are free to create whatever files and/or sub-dirs they might need.
  112. # Doing the 'cd' here rather than in all backends is easier.
  113. cd "${tmpd}"
  114. # If the backend fails, we can just remove the content of the temporary
  115. # directory to remove all the cruft it may have left behind, and try
  116. # the next URI until it succeeds. Once out of URI to try, we need to
  117. # cleanup and exit.
  118. if ! "${OLDPWD}/support/download/${backend}" \
  119. -c "${cset}" \
  120. -d "${dl_dir}" \
  121. -n "${raw_base_name}" \
  122. -N "${base_name}" \
  123. -f "${filename}" \
  124. -u "${uri}" \
  125. -o "${tmpf}" \
  126. "${backend_opts[@]}" \
  127. -- \
  128. "${@}"
  129. then
  130. # cd back to keep path coherence
  131. cd "${OLDPWD}"
  132. rm -rf "${tmpd}"
  133. continue
  134. fi
  135. if [ -n "${post_process}" ] ; then
  136. if ! "${OLDPWD}/support/download/${post_process}-post-process" \
  137. -o "${tmpf}" \
  138. -n "${raw_base_name}" \
  139. "${post_process_opts[@]}"
  140. then
  141. # cd back to keep path coherence
  142. cd "${OLDPWD}"
  143. rm -rf "${tmpd}"
  144. continue
  145. fi
  146. fi
  147. # cd back to free the temp-dir, so we can remove it later
  148. cd "${OLDPWD}"
  149. # Check if the downloaded file is sane, and matches the stored hashes
  150. # for that file
  151. if support/download/check-hash ${quiet} "${tmpf}" "${output##*/}" "${hfiles[@]}"; then
  152. rc=0
  153. else
  154. if [ ${?} -ne 3 ]; then
  155. rm -rf "${tmpd}"
  156. continue
  157. fi
  158. # the hash file exists and there was no hash to check the file
  159. # against
  160. rc=1
  161. fi
  162. download_and_check=1
  163. break
  164. done
  165. # We tried every URI possible, none seems to work or to check against the
  166. # available hash. *ABORT MISSION*
  167. if [ "${download_and_check}" -eq 0 ]; then
  168. rm -rf "${tmpd}"
  169. exit 1
  170. fi
  171. # tmp_output is in the same directory as the final output, so we can
  172. # later move it atomically.
  173. tmp_output="$(mktemp "${output}.XXXXXX")"
  174. # 'mktemp' creates files with 'go=-rwx', so the files are not accessible
  175. # to users other than the one doing the download (and root, of course).
  176. # This can be problematic when a shared BR2_DL_DIR is used by different
  177. # users (e.g. on a build server), where all users may write to the shared
  178. # location, since other users would not be allowed to read the files
  179. # another user downloaded.
  180. # So, we restore the 'go' access rights to a more sensible value, while
  181. # still abiding by the current user's umask. We must do that before the
  182. # final 'mv', so just do it now.
  183. # Some backends (cp and scp) may create executable files, so we need to
  184. # carry the executable bit if needed.
  185. [ -x "${tmpf}" ] && new_mode=755 || new_mode=644
  186. new_mode="$(printf "%04o" $((0${new_mode} & ~0$(umask))))"
  187. chmod "${new_mode}" "${tmp_output}"
  188. # We must *not* unlink tmp_output, otherwise there is a small window
  189. # during which another download process may create the same tmp_output
  190. # name (very, very unlikely; but not impossible.)
  191. # Using 'cp' is not reliable, since 'cp' may unlink the destination file
  192. # if it is unable to open it with O_WRONLY|O_TRUNC; see:
  193. # http://pubs.opengroup.org/onlinepubs/9699919799/utilities/cp.html
  194. # Since the destination filesystem can be anything, it might not support
  195. # O_TRUNC, so 'cp' would unlink it first.
  196. # Use 'cat' and append-redirection '>>' to save to the final location,
  197. # since that is the only way we can be 100% sure of the behaviour.
  198. if ! cat "${tmpf}" >>"${tmp_output}"; then
  199. rm -rf "${tmpd}" "${tmp_output}"
  200. exit 1
  201. fi
  202. rm -rf "${tmpd}"
  203. # tmp_output and output are on the same filesystem, so POSIX guarantees
  204. # that 'mv' is atomic, because it then uses rename() that POSIX mandates
  205. # to be atomic, see:
  206. # http://pubs.opengroup.org/onlinepubs/9699919799/functions/rename.html
  207. if ! mv -f "${tmp_output}" "${output}"; then
  208. rm -f "${tmp_output}"
  209. exit 1
  210. fi
  211. return ${rc}
  212. }
  213. my_name="${0##*/}"
  214. main "${@}"