blob: bad86915b332a02ce1c8cb826cdd1bbfe639c96e [file] [log] [blame]
[email protected]ca0f8392011-09-08 17:15:151# Copyright (c) 2011 The Chromium Authors. All rights reserved.
[email protected]06617272010-11-04 13:50:502# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
[email protected]5f3eee32009-09-17 00:34:304
[email protected]5aeb7dd2009-11-17 18:09:015"""Generic utils."""
6
[email protected]167b9e62009-09-17 17:41:027import errno
[email protected]d9141bf2009-12-23 16:13:328import logging
[email protected]5f3eee32009-09-17 00:34:309import os
[email protected]3742c842010-09-09 19:27:1410import Queue
[email protected]ac915bb2009-11-13 17:03:0111import re
[email protected]8f9c69f2009-09-17 00:48:2812import stat
[email protected]5f3eee32009-09-17 00:34:3013import sys
[email protected]0e0436a2011-10-25 13:32:4114import tempfile
[email protected]9e5317a2010-08-13 20:35:1115import threading
[email protected]167b9e62009-09-17 17:41:0216import time
[email protected]5f3eee32009-09-17 00:34:3017
[email protected]ca0f8392011-09-08 17:15:1518import subprocess2
19
[email protected]5f3eee32009-09-17 00:34:3020
[email protected]66c83e62010-09-07 14:18:4521class Error(Exception):
22 """gclient exception class."""
23 pass
24
25
[email protected]ac915bb2009-11-13 17:03:0126def SplitUrlRevision(url):
27 """Splits url and returns a two-tuple: url, rev"""
28 if url.startswith('ssh:'):
[email protected]78b8cd12010-10-26 12:47:0729 # Make sure ssh://[email protected]/~/test.git@stable works
30 regex = r'(ssh://(?:[-\w]+@)?[-\w:\.]+/[-~\w\./]+)(?:@(.+))?'
[email protected]ac915bb2009-11-13 17:03:0131 components = re.search(regex, url).groups()
32 else:
[email protected]116704f2010-06-11 17:34:3833 components = url.split('@', 1)
[email protected]ac915bb2009-11-13 17:03:0134 if len(components) == 1:
35 components += [None]
36 return tuple(components)
37
38
[email protected]eaab7842011-04-28 09:07:5839def IsDateRevision(revision):
40 """Returns true if the given revision is of the form "{ ... }"."""
41 return bool(revision and re.match(r'^\{.+\}$', str(revision)))
42
43
44def MakeDateRevision(date):
45 """Returns a revision representing the latest revision before the given
46 date."""
47 return "{" + date + "}"
48
49
[email protected]5990f9d2010-07-07 18:02:5850def SyntaxErrorToError(filename, e):
51 """Raises a gclient_utils.Error exception with the human readable message"""
52 try:
53 # Try to construct a human readable error message
54 if filename:
55 error_message = 'There is a syntax error in %s\n' % filename
56 else:
57 error_message = 'There is a syntax error\n'
58 error_message += 'Line #%s, character %s: "%s"' % (
59 e.lineno, e.offset, re.sub(r'[\r\n]*$', '', e.text))
60 except:
61 # Something went wrong, re-raise the original exception
62 raise e
63 else:
64 raise Error(error_message)
65
66
[email protected]5f3eee32009-09-17 00:34:3067class PrintableObject(object):
68 def __str__(self):
69 output = ''
70 for i in dir(self):
71 if i.startswith('__'):
72 continue
73 output += '%s = %s\n' % (i, str(getattr(self, i, '')))
74 return output
75
76
[email protected]5aeb7dd2009-11-17 18:09:0177def FileRead(filename, mode='rU'):
[email protected]5f3eee32009-09-17 00:34:3078 content = None
[email protected]5aeb7dd2009-11-17 18:09:0179 f = open(filename, mode)
[email protected]5f3eee32009-09-17 00:34:3080 try:
81 content = f.read()
82 finally:
83 f.close()
84 return content
85
86
[email protected]5aeb7dd2009-11-17 18:09:0187def FileWrite(filename, content, mode='w'):
88 f = open(filename, mode)
[email protected]5f3eee32009-09-17 00:34:3089 try:
90 f.write(content)
91 finally:
92 f.close()
93
94
[email protected]f9040722011-03-09 14:47:5195def rmtree(path):
96 """shutil.rmtree() on steroids.
[email protected]5f3eee32009-09-17 00:34:3097
[email protected]f9040722011-03-09 14:47:5198 Recursively removes a directory, even if it's marked read-only.
[email protected]5f3eee32009-09-17 00:34:3099
100 shutil.rmtree() doesn't work on Windows if any of the files or directories
101 are read-only, which svn repositories and some .svn files are. We need to
102 be able to force the files to be writable (i.e., deletable) as we traverse
103 the tree.
104
105 Even with all this, Windows still sometimes fails to delete a file, citing
106 a permission error (maybe something to do with antivirus scans or disk
107 indexing). The best suggestion any of the user forums had was to wait a
108 bit and try again, so we do that too. It's hand-waving, but sometimes it
109 works. :/
110
111 On POSIX systems, things are a little bit simpler. The modes of the files
112 to be deleted doesn't matter, only the modes of the directories containing
113 them are significant. As the directory tree is traversed, each directory
114 has its mode set appropriately before descending into it. This should
115 result in the entire tree being removed, with the possible exception of
116 *path itself, because nothing attempts to change the mode of its parent.
117 Doing so would be hazardous, as it's not a directory slated for removal.
118 In the ordinary case, this is not a problem: for our purposes, the user
119 will never lack write permission on *path's parent.
120 """
[email protected]f9040722011-03-09 14:47:51121 if not os.path.exists(path):
[email protected]5f3eee32009-09-17 00:34:30122 return
123
[email protected]f9040722011-03-09 14:47:51124 if os.path.islink(path) or not os.path.isdir(path):
125 raise Error('Called rmtree(%s) in non-directory' % path)
[email protected]5f3eee32009-09-17 00:34:30126
[email protected]5f3eee32009-09-17 00:34:30127 if sys.platform == 'win32':
[email protected]5f3eee32009-09-17 00:34:30128 # Some people don't have the APIs installed. In that case we'll do without.
[email protected]1edee692011-03-12 19:39:13129 win32api = None
130 win32con = None
[email protected]5f3eee32009-09-17 00:34:30131 try:
[email protected]1edee692011-03-12 19:39:13132 # Unable to import 'XX'
133 # pylint: disable=F0401
134 import win32api, win32con
[email protected]5f3eee32009-09-17 00:34:30135 except ImportError:
[email protected]f9040722011-03-09 14:47:51136 pass
[email protected]5f3eee32009-09-17 00:34:30137 else:
138 # On POSIX systems, we need the x-bit set on the directory to access it,
139 # the r-bit to see its contents, and the w-bit to remove files from it.
140 # The actual modes of the files within the directory is irrelevant.
[email protected]f9040722011-03-09 14:47:51141 os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
[email protected]5f3eee32009-09-17 00:34:30142
[email protected]f9040722011-03-09 14:47:51143 def remove(func, subpath):
144 if sys.platform == 'win32':
145 os.chmod(subpath, stat.S_IWRITE)
146 if win32api and win32con:
147 win32api.SetFileAttributes(subpath, win32con.FILE_ATTRIBUTE_NORMAL)
148 try:
149 func(subpath)
150 except OSError, e:
151 if e.errno != errno.EACCES or sys.platform != 'win32':
152 raise
153 # Failed to delete, try again after a 100ms sleep.
154 time.sleep(0.1)
155 func(subpath)
156
157 for fn in os.listdir(path):
[email protected]5f3eee32009-09-17 00:34:30158 # If fullpath is a symbolic link that points to a directory, isdir will
159 # be True, but we don't want to descend into that as a directory, we just
160 # want to remove the link. Check islink and treat links as ordinary files
161 # would be treated regardless of what they reference.
[email protected]f9040722011-03-09 14:47:51162 fullpath = os.path.join(path, fn)
[email protected]5f3eee32009-09-17 00:34:30163 if os.path.islink(fullpath) or not os.path.isdir(fullpath):
[email protected]f9040722011-03-09 14:47:51164 remove(os.remove, fullpath)
[email protected]5f3eee32009-09-17 00:34:30165 else:
[email protected]f9040722011-03-09 14:47:51166 # Recurse.
167 rmtree(fullpath)
[email protected]5f3eee32009-09-17 00:34:30168
[email protected]f9040722011-03-09 14:47:51169 remove(os.rmdir, path)
170
171# TODO(maruel): Rename the references.
172RemoveDirectory = rmtree
[email protected]5f3eee32009-09-17 00:34:30173
174
[email protected]6c48a302011-10-20 23:44:20175def safe_makedirs(tree):
176 """Creates the directory in a safe manner.
177
178 Because multiple threads can create these directories concurently, trap the
179 exception and pass on.
180 """
181 count = 0
182 while not os.path.exists(tree):
183 count += 1
184 try:
185 os.makedirs(tree)
186 except OSError, e:
187 # 17 POSIX, 183 Windows
188 if e.errno not in (17, 183):
189 raise
190 if count > 40:
191 # Give up.
192 raise
193
194
[email protected]17d01792010-09-01 18:07:10195def CheckCallAndFilterAndHeader(args, always=False, **kwargs):
196 """Adds 'header' support to CheckCallAndFilter.
[email protected]5f3eee32009-09-17 00:34:30197
[email protected]17d01792010-09-01 18:07:10198 If |always| is True, a message indicating what is being done
199 is printed to stdout all the time even if not output is generated. Otherwise
200 the message header is printed only if the call generated any ouput.
[email protected]5f3eee32009-09-17 00:34:30201 """
[email protected]17d01792010-09-01 18:07:10202 stdout = kwargs.get('stdout', None) or sys.stdout
203 if always:
[email protected]559c3f82010-08-23 19:26:08204 stdout.write('\n________ running \'%s\' in \'%s\'\n'
[email protected]17d01792010-09-01 18:07:10205 % (' '.join(args), kwargs.get('cwd', '.')))
206 else:
207 filter_fn = kwargs.get('filter_fn', None)
208 def filter_msg(line):
209 if line is None:
210 stdout.write('\n________ running \'%s\' in \'%s\'\n'
211 % (' '.join(args), kwargs.get('cwd', '.')))
212 elif filter_fn:
213 filter_fn(line)
214 kwargs['filter_fn'] = filter_msg
215 kwargs['call_filter_on_first_line'] = True
216 # Obviously.
217 kwargs['print_stdout'] = True
218 return CheckCallAndFilter(args, **kwargs)
[email protected]5f3eee32009-09-17 00:34:30219
[email protected]17d01792010-09-01 18:07:10220
[email protected]042f0e72011-10-23 00:04:35221class Wrapper(object):
222 """Wraps an object, acting as a transparent proxy for all properties by
223 default.
224 """
225 def __init__(self, wrapped):
226 self._wrapped = wrapped
227
228 def __getattr__(self, name):
229 return getattr(self._wrapped, name)
[email protected]db111f72010-09-08 13:36:53230
[email protected]e0de9cb2010-09-17 15:07:14231
[email protected]042f0e72011-10-23 00:04:35232class AutoFlush(Wrapper):
[email protected]e0de9cb2010-09-17 15:07:14233 """Creates a file object clone to automatically flush after N seconds."""
[email protected]042f0e72011-10-23 00:04:35234 def __init__(self, wrapped, delay):
235 super(AutoFlush, self).__init__(wrapped)
236 if not hasattr(self, 'lock'):
237 self.lock = threading.Lock()
238 self.__last_flushed_at = time.time()
239 self.delay = delay
[email protected]e0de9cb2010-09-17 15:07:14240
[email protected]042f0e72011-10-23 00:04:35241 @property
242 def autoflush(self):
243 return self
[email protected]e0de9cb2010-09-17 15:07:14244
[email protected]042f0e72011-10-23 00:04:35245 def write(self, out, *args, **kwargs):
246 self._wrapped.write(out, *args, **kwargs)
[email protected]db111f72010-09-08 13:36:53247 should_flush = False
[email protected]042f0e72011-10-23 00:04:35248 self.lock.acquire()
[email protected]9c531262010-09-08 13:41:13249 try:
[email protected]042f0e72011-10-23 00:04:35250 if self.delay and (time.time() - self.__last_flushed_at) > self.delay:
[email protected]db111f72010-09-08 13:36:53251 should_flush = True
[email protected]042f0e72011-10-23 00:04:35252 self.__last_flushed_at = time.time()
[email protected]9c531262010-09-08 13:41:13253 finally:
[email protected]042f0e72011-10-23 00:04:35254 self.lock.release()
[email protected]db111f72010-09-08 13:36:53255 if should_flush:
[email protected]042f0e72011-10-23 00:04:35256 self.flush()
[email protected]db111f72010-09-08 13:36:53257
258
[email protected]042f0e72011-10-23 00:04:35259class Annotated(Wrapper):
[email protected]4ed34182010-09-17 15:57:47260 """Creates a file object clone to automatically prepends every line in worker
[email protected]042f0e72011-10-23 00:04:35261 threads with a NN> prefix.
262 """
263 def __init__(self, wrapped, include_zero=False):
264 super(Annotated, self).__init__(wrapped)
265 if not hasattr(self, 'lock'):
266 self.lock = threading.Lock()
267 self.__output_buffers = {}
268 self.__include_zero = include_zero
[email protected]cb1e97a2010-09-09 20:09:20269
[email protected]042f0e72011-10-23 00:04:35270 @property
271 def annotated(self):
272 return self
[email protected]cb1e97a2010-09-09 20:09:20273
[email protected]042f0e72011-10-23 00:04:35274 def write(self, out):
275 index = getattr(threading.currentThread(), 'index', 0)
276 if not index and not self.__include_zero:
277 # Unindexed threads aren't buffered.
278 return self._wrapped.write(out)
[email protected]cb1e97a2010-09-09 20:09:20279
[email protected]042f0e72011-10-23 00:04:35280 self.lock.acquire()
[email protected]4ed34182010-09-17 15:57:47281 try:
282 # Use a dummy array to hold the string so the code can be lockless.
283 # Strings are immutable, requiring to keep a lock for the whole dictionary
284 # otherwise. Using an array is faster than using a dummy object.
[email protected]042f0e72011-10-23 00:04:35285 if not index in self.__output_buffers:
286 obj = self.__output_buffers[index] = ['']
[email protected]4ed34182010-09-17 15:57:47287 else:
[email protected]042f0e72011-10-23 00:04:35288 obj = self.__output_buffers[index]
[email protected]4ed34182010-09-17 15:57:47289 finally:
[email protected]042f0e72011-10-23 00:04:35290 self.lock.release()
[email protected]4ed34182010-09-17 15:57:47291
292 # Continue lockless.
293 obj[0] += out
294 while '\n' in obj[0]:
295 line, remaining = obj[0].split('\n', 1)
[email protected]e939bb52011-06-01 22:59:15296 if line:
[email protected]042f0e72011-10-23 00:04:35297 self._wrapped.write('%d>%s\n' % (index, line))
[email protected]4ed34182010-09-17 15:57:47298 obj[0] = remaining
299
[email protected]042f0e72011-10-23 00:04:35300 def flush(self):
[email protected]4ed34182010-09-17 15:57:47301 """Flush buffered output."""
302 orphans = []
[email protected]042f0e72011-10-23 00:04:35303 self.lock.acquire()
[email protected]4ed34182010-09-17 15:57:47304 try:
305 # Detect threads no longer existing.
306 indexes = (getattr(t, 'index', None) for t in threading.enumerate())
[email protected]cb2985f2010-11-03 14:08:31307 indexes = filter(None, indexes)
[email protected]042f0e72011-10-23 00:04:35308 for index in self.__output_buffers:
[email protected]4ed34182010-09-17 15:57:47309 if not index in indexes:
[email protected]042f0e72011-10-23 00:04:35310 orphans.append((index, self.__output_buffers[index][0]))
[email protected]4ed34182010-09-17 15:57:47311 for orphan in orphans:
[email protected]042f0e72011-10-23 00:04:35312 del self.__output_buffers[orphan[0]]
[email protected]4ed34182010-09-17 15:57:47313 finally:
[email protected]042f0e72011-10-23 00:04:35314 self.lock.release()
[email protected]4ed34182010-09-17 15:57:47315
316 # Don't keep the lock while writting. Will append \n when it shouldn't.
317 for orphan in orphans:
[email protected]e939bb52011-06-01 22:59:15318 if orphan[1]:
[email protected]042f0e72011-10-23 00:04:35319 self._wrapped.write('%d>%s\n' % (orphan[0], orphan[1]))
320 return self._wrapped.flush()
[email protected]4ed34182010-09-17 15:57:47321
[email protected]042f0e72011-10-23 00:04:35322
323def MakeFileAutoFlush(fileobj, delay=10):
324 autoflush = getattr(fileobj, 'autoflush', None)
325 if autoflush:
326 autoflush.delay = delay
327 return fileobj
328 return AutoFlush(fileobj, delay)
329
330
331def MakeFileAnnotated(fileobj, include_zero=False):
332 if getattr(fileobj, 'annotated', None):
333 return fileobj
334 return Annotated(fileobj)
[email protected]cb1e97a2010-09-09 20:09:20335
336
[email protected]17d01792010-09-01 18:07:10337def CheckCallAndFilter(args, stdout=None, filter_fn=None,
338 print_stdout=None, call_filter_on_first_line=False,
339 **kwargs):
340 """Runs a command and calls back a filter function if needed.
341
[email protected]57bf78d2011-09-08 18:57:33342 Accepts all subprocess2.Popen() parameters plus:
[email protected]17d01792010-09-01 18:07:10343 print_stdout: If True, the command's stdout is forwarded to stdout.
344 filter_fn: A function taking a single string argument called with each line
[email protected]57bf78d2011-09-08 18:57:33345 of the subprocess2's output. Each line has the trailing newline
[email protected]17d01792010-09-01 18:07:10346 character trimmed.
347 stdout: Can be any bufferable output.
348
349 stderr is always redirected to stdout.
350 """
351 assert print_stdout or filter_fn
352 stdout = stdout or sys.stdout
353 filter_fn = filter_fn or (lambda x: None)
[email protected]a82a8ee2011-09-08 18:41:37354 kid = subprocess2.Popen(
355 args, bufsize=0, stdout=subprocess2.PIPE, stderr=subprocess2.STDOUT,
356 **kwargs)
[email protected]5f3eee32009-09-17 00:34:30357
[email protected]57bf78d2011-09-08 18:57:33358 # Do a flush of stdout before we begin reading from the subprocess2's stdout
[email protected]559c3f82010-08-23 19:26:08359 stdout.flush()
[email protected]8ad1cee2010-08-16 19:12:27360
[email protected]5f3eee32009-09-17 00:34:30361 # Also, we need to forward stdout to prevent weird re-ordering of output.
362 # This has to be done on a per byte basis to make sure it is not buffered:
363 # normally buffering is done for each line, but if svn requests input, no
364 # end-of-line character is output after the prompt and it would not show up.
[email protected]109cb9d2011-09-14 20:03:11365 try:
366 in_byte = kid.stdout.read(1)
367 if in_byte:
368 if call_filter_on_first_line:
369 filter_fn(None)
370 in_line = ''
371 while in_byte:
372 if in_byte != '\r':
373 if print_stdout:
374 stdout.write(in_byte)
375 if in_byte != '\n':
376 in_line += in_byte
377 else:
378 filter_fn(in_line)
379 in_line = ''
[email protected]85d3e3a2011-10-07 17:12:00380 else:
381 filter_fn(in_line)
382 in_line = ''
[email protected]109cb9d2011-09-14 20:03:11383 in_byte = kid.stdout.read(1)
384 # Flush the rest of buffered output. This is only an issue with
385 # stdout/stderr not ending with a \n.
386 if len(in_line):
387 filter_fn(in_line)
388 rv = kid.wait()
389 except KeyboardInterrupt:
390 print >> sys.stderr, 'Failed while running "%s"' % ' '.join(args)
391 raise
392
[email protected]5f3eee32009-09-17 00:34:30393 if rv:
[email protected]a82a8ee2011-09-08 18:41:37394 raise subprocess2.CalledProcessError(
395 rv, args, kwargs.get('cwd', None), None, None)
[email protected]17d01792010-09-01 18:07:10396 return 0
[email protected]5f3eee32009-09-17 00:34:30397
398
[email protected]9eda4112010-06-11 18:56:10399def FindGclientRoot(from_dir, filename='.gclient'):
[email protected]a9371762009-12-22 18:27:38400 """Tries to find the gclient root."""
[email protected]20760a52010-09-08 08:47:28401 real_from_dir = os.path.realpath(from_dir)
402 path = real_from_dir
[email protected]9eda4112010-06-11 18:56:10403 while not os.path.exists(os.path.join(path, filename)):
[email protected]3a292682010-08-23 18:54:55404 split_path = os.path.split(path)
405 if not split_path[1]:
[email protected]a9371762009-12-22 18:27:38406 return None
[email protected]3a292682010-08-23 18:54:55407 path = split_path[0]
[email protected]20760a52010-09-08 08:47:28408
409 # If we did not find the file in the current directory, make sure we are in a
410 # sub directory that is controlled by this configuration.
411 if path != real_from_dir:
412 entries_filename = os.path.join(path, filename + '_entries')
413 if not os.path.exists(entries_filename):
414 # If .gclient_entries does not exist, a previous call to gclient sync
415 # might have failed. In that case, we cannot verify that the .gclient
416 # is the one we want to use. In order to not to cause too much trouble,
417 # just issue a warning and return the path anyway.
[email protected]cb2985f2010-11-03 14:08:31418 print >> sys.stderr, ("%s file in parent directory %s might not be the "
[email protected]20760a52010-09-08 08:47:28419 "file you want to use" % (filename, path))
420 return path
421 scope = {}
422 try:
423 exec(FileRead(entries_filename), scope)
424 except SyntaxError, e:
425 SyntaxErrorToError(filename, e)
426 all_directories = scope['entries'].keys()
427 path_to_check = real_from_dir[len(path)+1:]
428 while path_to_check:
429 if path_to_check in all_directories:
430 return path
431 path_to_check = os.path.dirname(path_to_check)
432 return None
[email protected]3742c842010-09-09 19:27:14433
[email protected]d9141bf2009-12-23 16:13:32434 logging.info('Found gclient root at ' + path)
[email protected]a9371762009-12-22 18:27:38435 return path
[email protected]3ccbf7e2009-12-22 20:46:42436
[email protected]9eda4112010-06-11 18:56:10437
[email protected]3ccbf7e2009-12-22 20:46:42438def PathDifference(root, subpath):
439 """Returns the difference subpath minus root."""
440 root = os.path.realpath(root)
441 subpath = os.path.realpath(subpath)
442 if not subpath.startswith(root):
443 return None
444 # If the root does not have a trailing \ or /, we add it so the returned
445 # path starts immediately after the seperator regardless of whether it is
446 # provided.
447 root = os.path.join(root, '')
448 return subpath[len(root):]
[email protected]f43d0192010-04-15 02:36:04449
450
451def FindFileUpwards(filename, path=None):
[email protected]13595ff2011-10-13 01:25:07452 """Search upwards from the a directory (default: current) to find a file.
453
454 Returns nearest upper-level directory with the passed in file.
455 """
[email protected]f43d0192010-04-15 02:36:04456 if not path:
457 path = os.getcwd()
458 path = os.path.realpath(path)
459 while True:
460 file_path = os.path.join(path, filename)
[email protected]13595ff2011-10-13 01:25:07461 if os.path.exists(file_path):
462 return path
[email protected]f43d0192010-04-15 02:36:04463 (new_path, _) = os.path.split(path)
464 if new_path == path:
465 return None
466 path = new_path
467
468
469def GetGClientRootAndEntries(path=None):
470 """Returns the gclient root and the dict of entries."""
471 config_file = '.gclient_entries'
[email protected]93a9ee02011-10-18 18:23:58472 root = FindFileUpwards(config_file, path)
473 if not root:
[email protected]116704f2010-06-11 17:34:38474 print "Can't find %s" % config_file
[email protected]f43d0192010-04-15 02:36:04475 return None
[email protected]93a9ee02011-10-18 18:23:58476 config_path = os.path.join(root, config_file)
[email protected]f43d0192010-04-15 02:36:04477 env = {}
478 execfile(config_path, env)
479 config_dir = os.path.dirname(config_path)
480 return config_dir, env['entries']
[email protected]80cbe8b2010-08-13 13:53:07481
482
[email protected]6ca8bf82011-09-19 23:04:30483def lockedmethod(method):
484 """Method decorator that holds self.lock for the duration of the call."""
485 def inner(self, *args, **kwargs):
486 try:
487 try:
488 self.lock.acquire()
489 except KeyboardInterrupt:
490 print >> sys.stderr, 'Was deadlocked'
491 raise
492 return method(self, *args, **kwargs)
493 finally:
494 self.lock.release()
495 return inner
496
497
[email protected]80cbe8b2010-08-13 13:53:07498class WorkItem(object):
499 """One work item."""
[email protected]4901daf2011-10-20 14:34:47500 # On cygwin, creating a lock throwing randomly when nearing ~100 locks.
501 # As a workaround, use a single lock. Yep you read it right. Single lock for
502 # all the 100 objects.
503 lock = threading.Lock()
504
[email protected]6ca8bf82011-09-19 23:04:30505 def __init__(self, name):
[email protected]485dcab2011-09-14 12:48:47506 # A unique string representing this work item.
[email protected]6ca8bf82011-09-19 23:04:30507 self._name = name
[email protected]80cbe8b2010-08-13 13:53:07508
[email protected]77e4eca2010-09-21 13:23:07509 def run(self, work_queue):
510 """work_queue is passed as keyword argument so it should be
[email protected]3742c842010-09-09 19:27:14511 the last parameters of the function when you override it."""
[email protected]80cbe8b2010-08-13 13:53:07512 pass
513
[email protected]6ca8bf82011-09-19 23:04:30514 @property
515 def name(self):
516 return self._name
517
[email protected]80cbe8b2010-08-13 13:53:07518
519class ExecutionQueue(object):
[email protected]9e5317a2010-08-13 20:35:11520 """Runs a set of WorkItem that have interdependencies and were WorkItem are
521 added as they are processed.
[email protected]80cbe8b2010-08-13 13:53:07522
[email protected]9e5317a2010-08-13 20:35:11523 In gclient's case, Dependencies sometime needs to be run out of order due to
524 From() keyword. This class manages that all the required dependencies are run
525 before running each one.
[email protected]80cbe8b2010-08-13 13:53:07526
[email protected]9e5317a2010-08-13 20:35:11527 Methods of this class are thread safe.
[email protected]80cbe8b2010-08-13 13:53:07528 """
[email protected]9e5317a2010-08-13 20:35:11529 def __init__(self, jobs, progress):
530 """jobs specifies the number of concurrent tasks to allow. progress is a
531 Progress instance."""
532 # Set when a thread is done or a new item is enqueued.
533 self.ready_cond = threading.Condition()
534 # Maximum number of concurrent tasks.
535 self.jobs = jobs
536 # List of WorkItem, for gclient, these are Dependency instances.
[email protected]80cbe8b2010-08-13 13:53:07537 self.queued = []
538 # List of strings representing each Dependency.name that was run.
539 self.ran = []
540 # List of items currently running.
541 self.running = []
[email protected]9e5317a2010-08-13 20:35:11542 # Exceptions thrown if any.
[email protected]3742c842010-09-09 19:27:14543 self.exceptions = Queue.Queue()
544 # Progress status
[email protected]80cbe8b2010-08-13 13:53:07545 self.progress = progress
546 if self.progress:
[email protected]3742c842010-09-09 19:27:14547 self.progress.update(0)
[email protected]80cbe8b2010-08-13 13:53:07548
549 def enqueue(self, d):
550 """Enqueue one Dependency to be executed later once its requirements are
551 satisfied.
552 """
553 assert isinstance(d, WorkItem)
[email protected]9e5317a2010-08-13 20:35:11554 self.ready_cond.acquire()
[email protected]80cbe8b2010-08-13 13:53:07555 try:
[email protected]80cbe8b2010-08-13 13:53:07556 self.queued.append(d)
557 total = len(self.queued) + len(self.ran) + len(self.running)
[email protected]9e5317a2010-08-13 20:35:11558 logging.debug('enqueued(%s)' % d.name)
559 if self.progress:
560 self.progress._total = total + 1
561 self.progress.update(0)
562 self.ready_cond.notifyAll()
[email protected]80cbe8b2010-08-13 13:53:07563 finally:
[email protected]9e5317a2010-08-13 20:35:11564 self.ready_cond.release()
[email protected]80cbe8b2010-08-13 13:53:07565
566 def flush(self, *args, **kwargs):
567 """Runs all enqueued items until all are executed."""
[email protected]3742c842010-09-09 19:27:14568 kwargs['work_queue'] = self
[email protected]9e5317a2010-08-13 20:35:11569 self.ready_cond.acquire()
[email protected]80cbe8b2010-08-13 13:53:07570 try:
[email protected]9e5317a2010-08-13 20:35:11571 while True:
572 # Check for task to run first, then wait.
573 while True:
[email protected]3742c842010-09-09 19:27:14574 if not self.exceptions.empty():
575 # Systematically flush the queue when an exception logged.
[email protected]9e5317a2010-08-13 20:35:11576 self.queued = []
[email protected]3742c842010-09-09 19:27:14577 self._flush_terminated_threads()
578 if (not self.queued and not self.running or
579 self.jobs == len(self.running)):
[email protected]1333cb32011-10-04 23:40:16580 logging.debug('No more worker threads or can\'t queue anything.')
[email protected]9e5317a2010-08-13 20:35:11581 break
[email protected]3742c842010-09-09 19:27:14582
583 # Check for new tasks to start.
[email protected]9e5317a2010-08-13 20:35:11584 for i in xrange(len(self.queued)):
585 # Verify its requirements.
586 for r in self.queued[i].requirements:
587 if not r in self.ran:
588 # Requirement not met.
589 break
590 else:
591 # Start one work item: all its requirements are satisfied.
[email protected]3742c842010-09-09 19:27:14592 self._run_one_task(self.queued.pop(i), args, kwargs)
[email protected]9e5317a2010-08-13 20:35:11593 break
594 else:
595 # Couldn't find an item that could run. Break out the outher loop.
596 break
[email protected]3742c842010-09-09 19:27:14597
[email protected]9e5317a2010-08-13 20:35:11598 if not self.queued and not self.running:
[email protected]3742c842010-09-09 19:27:14599 # We're done.
[email protected]9e5317a2010-08-13 20:35:11600 break
601 # We need to poll here otherwise Ctrl-C isn't processed.
[email protected]485dcab2011-09-14 12:48:47602 try:
603 self.ready_cond.wait(10)
604 except KeyboardInterrupt:
605 # Help debugging by printing some information:
606 print >> sys.stderr, (
607 ('\nAllowed parallel jobs: %d\n# queued: %d\nRan: %s\n'
608 'Running: %d') % (
609 self.jobs,
610 len(self.queued),
611 ', '.join(self.ran),
612 len(self.running)))
613 for i in self.queued:
614 print >> sys.stderr, '%s: %s' % (i.name, ', '.join(i.requirements))
615 raise
[email protected]9e5317a2010-08-13 20:35:11616 # Something happened: self.enqueue() or a thread terminated. Loop again.
[email protected]80cbe8b2010-08-13 13:53:07617 finally:
[email protected]9e5317a2010-08-13 20:35:11618 self.ready_cond.release()
[email protected]3742c842010-09-09 19:27:14619
[email protected]9e5317a2010-08-13 20:35:11620 assert not self.running, 'Now guaranteed to be single-threaded'
[email protected]3742c842010-09-09 19:27:14621 if not self.exceptions.empty():
[email protected]c8d064b2010-08-16 16:46:14622 # To get back the stack location correctly, the raise a, b, c form must be
623 # used, passing a tuple as the first argument doesn't work.
[email protected]3742c842010-09-09 19:27:14624 e = self.exceptions.get()
[email protected]c8d064b2010-08-16 16:46:14625 raise e[0], e[1], e[2]
[email protected]80cbe8b2010-08-13 13:53:07626 if self.progress:
627 self.progress.end()
[email protected]80cbe8b2010-08-13 13:53:07628
[email protected]3742c842010-09-09 19:27:14629 def _flush_terminated_threads(self):
630 """Flush threads that have terminated."""
631 running = self.running
632 self.running = []
633 for t in running:
634 if t.isAlive():
635 self.running.append(t)
636 else:
637 t.join()
[email protected]042f0e72011-10-23 00:04:35638 sys.stdout.flush()
[email protected]3742c842010-09-09 19:27:14639 if self.progress:
[email protected]55a2eb82010-10-06 23:35:18640 self.progress.update(1, t.item.name)
[email protected]f36c0ee2011-09-14 19:16:47641 if t.item.name in self.ran:
642 raise Error(
643 'gclient is confused, "%s" is already in "%s"' % (
644 t.item.name, ', '.join(self.ran)))
[email protected]acc45672010-09-09 21:21:21645 if not t.item.name in self.ran:
646 self.ran.append(t.item.name)
[email protected]3742c842010-09-09 19:27:14647
648 def _run_one_task(self, task_item, args, kwargs):
649 if self.jobs > 1:
650 # Start the thread.
651 index = len(self.ran) + len(self.running) + 1
[email protected]77e4eca2010-09-21 13:23:07652 new_thread = self._Worker(task_item, index, args, kwargs)
[email protected]3742c842010-09-09 19:27:14653 self.running.append(new_thread)
654 new_thread.start()
655 else:
656 # Run the 'thread' inside the main thread. Don't try to catch any
657 # exception.
658 task_item.run(*args, **kwargs)
659 self.ran.append(task_item.name)
660 if self.progress:
[email protected]55a2eb82010-10-06 23:35:18661 self.progress.update(1, ', '.join(t.item.name for t in self.running))
[email protected]3742c842010-09-09 19:27:14662
[email protected]9e5317a2010-08-13 20:35:11663 class _Worker(threading.Thread):
664 """One thread to execute one WorkItem."""
[email protected]4ed34182010-09-17 15:57:47665 def __init__(self, item, index, args, kwargs):
[email protected]9e5317a2010-08-13 20:35:11666 threading.Thread.__init__(self, name=item.name or 'Worker')
[email protected]1333cb32011-10-04 23:40:16667 logging.info('_Worker(%s) reqs:%s' % (item.name, item.requirements))
[email protected]9e5317a2010-08-13 20:35:11668 self.item = item
[email protected]4ed34182010-09-17 15:57:47669 self.index = index
[email protected]3742c842010-09-09 19:27:14670 self.args = args
671 self.kwargs = kwargs
[email protected]80cbe8b2010-08-13 13:53:07672
[email protected]9e5317a2010-08-13 20:35:11673 def run(self):
674 """Runs in its own thread."""
[email protected]1333cb32011-10-04 23:40:16675 logging.debug('_Worker.run(%s)' % self.item.name)
[email protected]3742c842010-09-09 19:27:14676 work_queue = self.kwargs['work_queue']
[email protected]9e5317a2010-08-13 20:35:11677 try:
678 self.item.run(*self.args, **self.kwargs)
[email protected]c8d064b2010-08-16 16:46:14679 except Exception:
680 # Catch exception location.
[email protected]3742c842010-09-09 19:27:14681 logging.info('Caught exception in thread %s' % self.item.name)
682 logging.info(str(sys.exc_info()))
683 work_queue.exceptions.put(sys.exc_info())
[email protected]1333cb32011-10-04 23:40:16684 logging.info('_Worker.run(%s) done' % self.item.name)
[email protected]9e5317a2010-08-13 20:35:11685
[email protected]3742c842010-09-09 19:27:14686 work_queue.ready_cond.acquire()
[email protected]9e5317a2010-08-13 20:35:11687 try:
[email protected]3742c842010-09-09 19:27:14688 work_queue.ready_cond.notifyAll()
[email protected]9e5317a2010-08-13 20:35:11689 finally:
[email protected]3742c842010-09-09 19:27:14690 work_queue.ready_cond.release()
[email protected]0e0436a2011-10-25 13:32:41691
692
693def GetEditor(git):
694 """Returns the most plausible editor to use."""
695 if git:
696 editor = os.environ.get('GIT_EDITOR')
697 else:
698 editor = os.environ.get('SVN_EDITOR')
699 if not editor:
700 editor = os.environ.get('EDITOR')
701 if not editor:
702 if sys.platform.startswith('win'):
703 editor = 'notepad'
704 else:
705 editor = 'vim'
706 return editor
707
708
709def RunEditor(content, git):
710 """Opens up the default editor in the system to get the CL description."""
711 file_handle, filename = tempfile.mkstemp(text=True)
712 # Make sure CRLF is handled properly by requiring none.
713 if '\r' in content:
[email protected]0eff22d2011-10-25 16:11:16714 print >> sys.stderr, (
715 '!! Please remove \\r from your change description !!')
[email protected]0e0436a2011-10-25 13:32:41716 fileobj = os.fdopen(file_handle, 'w')
717 # Still remove \r if present.
718 fileobj.write(re.sub('\r?\n', '\n', content))
719 fileobj.close()
720
721 try:
722 cmd = '%s %s' % (GetEditor(git), filename)
723 if sys.platform == 'win32' and os.environ.get('TERM') == 'msys':
724 # Msysgit requires the usage of 'env' to be present.
725 cmd = 'env ' + cmd
726 try:
727 # shell=True to allow the shell to handle all forms of quotes in
728 # $EDITOR.
729 subprocess2.check_call(cmd, shell=True)
730 except subprocess2.CalledProcessError:
731 return None
732 return FileRead(filename)
733 finally:
734 os.remove(filename)