upload_to_google_storage.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329
  1. #!/usr/bin/env python3
  2. # Copyright (c) 2012 The Chromium Authors. All rights reserved.
  3. # Use of this source code is governed by a BSD-style license that can be
  4. # found in the LICENSE file.
  5. """Uploads files to Google Storage content addressed."""
  6. import hashlib
  7. import optparse
  8. import os
  9. import queue
  10. import re
  11. import stat
  12. import sys
  13. import tarfile
  14. import threading
  15. import time
  16. from download_from_google_storage import get_sha1
  17. from download_from_google_storage import Gsutil
  18. from download_from_google_storage import PrinterThread
  19. from download_from_google_storage import GSUTIL_DEFAULT_PATH
  20. USAGE_STRING = """%prog [options] target [target2 ...].
  21. Target is the file intended to be uploaded to Google Storage.
  22. If target is "-", then a list of files will be taken from standard input
  23. This script will generate a file (original filename).sha1 containing the
  24. sha1 sum of the uploaded file.
  25. It is recommended that the .sha1 file is checked into the repository,
  26. the original file removed from the repository, and a hook added to the
  27. DEPS file to call download_from_google_storage.py.
  28. Example usages
  29. --------------
  30. Scan the current directory and upload all files larger than 1MB:
  31. find . -name .svn -prune -o -size +1000k -type f -print0 | %prog -0 -b bkt -
  32. (Replace "bkt" with the name of a writable bucket.)
  33. """
  34. def get_md5(filename):
  35. md5_calculator = hashlib.md5()
  36. with open(filename, 'rb') as f:
  37. while True:
  38. chunk = f.read(1024 * 1024)
  39. if not chunk:
  40. break
  41. md5_calculator.update(chunk)
  42. return md5_calculator.hexdigest()
  43. def get_md5_cached(filename):
  44. """Don't calculate the MD5 if we can find a .md5 file."""
  45. # See if we can find an existing MD5 sum stored in a file.
  46. if os.path.exists('%s.md5' % filename):
  47. with open('%s.md5' % filename, 'rb') as f:
  48. md5_match = re.search('([a-z0-9]{32})', f.read().decode())
  49. if md5_match:
  50. return md5_match.group(1)
  51. else:
  52. md5_hash = get_md5(filename)
  53. with open('%s.md5' % filename, 'wb') as f:
  54. f.write(md5_hash.encode())
  55. return md5_hash
  56. def _upload_worker(thread_num, upload_queue, base_url, gsutil, md5_lock, force,
  57. use_md5, stdout_queue, ret_codes, gzip):
  58. while True:
  59. filename, sha1_sum = upload_queue.get()
  60. if not filename:
  61. break
  62. file_url = '%s/%s' % (base_url, sha1_sum)
  63. if gsutil.check_call('ls', file_url)[0] == 0 and not force:
  64. # File exists, check MD5 hash.
  65. _, out, _ = gsutil.check_call_with_retries('ls', '-L', file_url)
  66. etag_match = re.search(r'ETag:\s+\S+', out)
  67. if etag_match:
  68. stdout_queue.put('%d> File with url %s already exists' %
  69. (thread_num, file_url))
  70. remote_md5 = etag_match.group(0).split()[1]
  71. # Calculate the MD5 checksum to match it to Google Storage's
  72. # ETag.
  73. with md5_lock:
  74. if use_md5:
  75. local_md5 = get_md5_cached(filename)
  76. else:
  77. local_md5 = get_md5(filename)
  78. if local_md5 == remote_md5:
  79. stdout_queue.put(
  80. '%d> File %s already exists and MD5 matches, upload '
  81. 'skipped' % (thread_num, filename))
  82. continue
  83. stdout_queue.put('%d> Uploading %s...' % (thread_num, filename))
  84. gsutil_args = ['-h', 'Cache-Control:public, max-age=31536000']
  85. # Mark executable files with the header "x-goog-meta-executable: 1"
  86. # which the download script will check for to preserve the executable
  87. # bit.
  88. if not sys.platform.startswith('win'):
  89. if os.stat(filename).st_mode & stat.S_IEXEC:
  90. gsutil_args += ['-h', 'x-goog-meta-executable:1']
  91. gsutil_args += ['cp']
  92. if gzip:
  93. gsutil_args.extend(['-z', gzip])
  94. gsutil_args.extend([filename, file_url])
  95. code, _, err = gsutil.check_call_with_retries(*gsutil_args)
  96. if code != 0:
  97. ret_codes.put((code, 'Encountered error on uploading %s to %s\n%s' %
  98. (filename, file_url, err)))
  99. continue
  100. def get_targets(args, parser, use_null_terminator):
  101. if not args:
  102. parser.error('Missing target.')
  103. if len(args) == 1 and args[0] == '-':
  104. # Take stdin as a newline or null separated list of files.
  105. if use_null_terminator:
  106. return sys.stdin.read().split('\0')
  107. return sys.stdin.read().splitlines()
  108. return args
  109. def upload_to_google_storage(input_filenames, base_url, gsutil, force, use_md5,
  110. num_threads, skip_hashing, gzip):
  111. # We only want one MD5 calculation happening at a time to avoid HD
  112. # thrashing.
  113. md5_lock = threading.Lock()
  114. # Start up all the worker threads plus the printer thread.
  115. all_threads = []
  116. ret_codes = queue.Queue()
  117. ret_codes.put((0, None))
  118. upload_queue = queue.Queue()
  119. upload_timer = time.time()
  120. stdout_queue = queue.Queue()
  121. printer_thread = PrinterThread(stdout_queue)
  122. printer_thread.daemon = True
  123. printer_thread.start()
  124. for thread_num in range(num_threads):
  125. t = threading.Thread(target=_upload_worker,
  126. args=[
  127. thread_num, upload_queue, base_url, gsutil,
  128. md5_lock, force, use_md5, stdout_queue,
  129. ret_codes, gzip
  130. ])
  131. t.daemon = True
  132. t.start()
  133. all_threads.append(t)
  134. # We want to hash everything in a single thread since its faster.
  135. # The bottleneck is in disk IO, not CPU.
  136. hashing_start = time.time()
  137. has_missing_files = False
  138. for filename in input_filenames:
  139. if not os.path.exists(filename):
  140. stdout_queue.put('Main> Error: %s not found, skipping.' % filename)
  141. has_missing_files = True
  142. continue
  143. if os.path.exists('%s.sha1' % filename) and skip_hashing:
  144. stdout_queue.put(
  145. 'Main> Found hash for %s, sha1 calculation skipped.' % filename)
  146. with open(filename + '.sha1', 'rb') as f:
  147. sha1_file = f.read(1024)
  148. if not re.match('^([a-z0-9]{40})$', sha1_file.decode()):
  149. print('Invalid sha1 hash file %s.sha1' % filename,
  150. file=sys.stderr)
  151. return 1
  152. upload_queue.put((filename, sha1_file.decode()))
  153. continue
  154. stdout_queue.put('Main> Calculating hash for %s...' % filename)
  155. sha1_sum = get_sha1(filename)
  156. with open(filename + '.sha1', 'wb') as f:
  157. f.write(sha1_sum.encode())
  158. stdout_queue.put('Main> Done calculating hash for %s.' % filename)
  159. upload_queue.put((filename, sha1_sum))
  160. hashing_duration = time.time() - hashing_start
  161. # Wait for everything to finish.
  162. for _ in all_threads:
  163. upload_queue.put((None, None)) # To mark the end of the work queue.
  164. for t in all_threads:
  165. t.join()
  166. stdout_queue.put(None)
  167. printer_thread.join()
  168. # Print timing information.
  169. print('Hashing %s files took %1f seconds' %
  170. (len(input_filenames), hashing_duration))
  171. print('Uploading took %1f seconds' % (time.time() - upload_timer))
  172. # See if we ran into any errors.
  173. max_ret_code = 0
  174. for ret_code, message in ret_codes.queue:
  175. max_ret_code = max(ret_code, max_ret_code)
  176. if message:
  177. print(message, file=sys.stderr)
  178. if has_missing_files:
  179. print('One or more input files missing', file=sys.stderr)
  180. max_ret_code = max(1, max_ret_code)
  181. if not max_ret_code:
  182. print('Success!')
  183. return max_ret_code
  184. def create_archives(dirs):
  185. archive_names = []
  186. for name in dirs:
  187. tarname = '%s.tar.gz' % name
  188. with tarfile.open(tarname, 'w:gz') as tar:
  189. tar.add(name)
  190. archive_names.append(tarname)
  191. return archive_names
  192. def validate_archive_dirs(dirs):
  193. for d in dirs:
  194. # We don't allow .. in paths in our archives.
  195. if d == '..':
  196. return False
  197. # We only allow dirs.
  198. if not os.path.isdir(d):
  199. return False
  200. # We don't allow sym links in our archives.
  201. if os.path.islink(d):
  202. return False
  203. # We required that the subdirectories we are archiving are all just
  204. # below cwd.
  205. if d not in next(os.walk('.'))[1]:
  206. return False
  207. return True
  208. def main():
  209. parser = optparse.OptionParser(USAGE_STRING)
  210. parser.add_option('-b',
  211. '--bucket',
  212. help='Google Storage bucket to upload to.')
  213. parser.add_option('-e', '--boto', help='Specify a custom boto file.')
  214. parser.add_option('-a',
  215. '--archive',
  216. action='store_true',
  217. help='Archive directory as a tar.gz file')
  218. parser.add_option('-f',
  219. '--force',
  220. action='store_true',
  221. help='Force upload even if remote file exists.')
  222. parser.add_option('-g',
  223. '--gsutil_path',
  224. default=GSUTIL_DEFAULT_PATH,
  225. help='Path to the gsutil script.')
  226. parser.add_option('-m',
  227. '--use_md5',
  228. action='store_true',
  229. help='Generate MD5 files when scanning, and don\'t check '
  230. 'the MD5 checksum if a .md5 file is found.')
  231. parser.add_option('-t',
  232. '--num_threads',
  233. default=1,
  234. type='int',
  235. help='Number of uploader threads to run.')
  236. parser.add_option('-s',
  237. '--skip_hashing',
  238. action='store_true',
  239. help='Skip hashing if .sha1 file exists.')
  240. parser.add_option('-0',
  241. '--use_null_terminator',
  242. action='store_true',
  243. help='Use \\0 instead of \\n when parsing '
  244. 'the file list from stdin. This is useful if the input '
  245. 'is coming from "find ... -print0".')
  246. parser.add_option('-z',
  247. '--gzip',
  248. metavar='ext',
  249. help='Gzip files which end in ext. '
  250. 'ext is a comma-separated list')
  251. (options, args) = parser.parse_args()
  252. # Enumerate our inputs.
  253. input_filenames = get_targets(args, parser, options.use_null_terminator)
  254. if options.archive:
  255. if not validate_archive_dirs(input_filenames):
  256. parser.error(
  257. 'Only directories just below cwd are valid entries when '
  258. 'using the --archive argument. Entries can not contain .. '
  259. ' and entries can not be symlinks. Entries was %s' %
  260. input_filenames)
  261. return 1
  262. input_filenames = create_archives(input_filenames)
  263. # Make sure we can find a working instance of gsutil.
  264. if os.path.exists(GSUTIL_DEFAULT_PATH):
  265. gsutil = Gsutil(GSUTIL_DEFAULT_PATH, boto_path=options.boto)
  266. else:
  267. gsutil = None
  268. for path in os.environ["PATH"].split(os.pathsep):
  269. if os.path.exists(path) and 'gsutil' in os.listdir(path):
  270. gsutil = Gsutil(os.path.join(path, 'gsutil'),
  271. boto_path=options.boto)
  272. if not gsutil:
  273. parser.error('gsutil not found in %s, bad depot_tools checkout?' %
  274. GSUTIL_DEFAULT_PATH)
  275. base_url = 'gs://%s' % options.bucket
  276. return upload_to_google_storage(input_filenames, base_url, gsutil,
  277. options.force, options.use_md5,
  278. options.num_threads, options.skip_hashing,
  279. options.gzip)
  280. if __name__ == '__main__':
  281. try:
  282. sys.exit(main())
  283. except KeyboardInterrupt:
  284. sys.stderr.write('interrupted\n')
  285. sys.exit(1)