blob: 97c8227c097370f2e933f759a5a268e752d72020 [file] [log] [blame]
[email protected]06617272010-11-04 13:50:501# Copyright (c) 2010 The Chromium Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
[email protected]5f3eee32009-09-17 00:34:304
[email protected]5aeb7dd2009-11-17 18:09:015"""Generic utils."""
6
[email protected]167b9e62009-09-17 17:41:027import errno
[email protected]d9141bf2009-12-23 16:13:328import logging
[email protected]5f3eee32009-09-17 00:34:309import os
[email protected]3742c842010-09-09 19:27:1410import Queue
[email protected]ac915bb2009-11-13 17:03:0111import re
[email protected]8f9c69f2009-09-17 00:48:2812import stat
[email protected]5f3eee32009-09-17 00:34:3013import subprocess
14import sys
[email protected]9e5317a2010-08-13 20:35:1115import threading
[email protected]167b9e62009-09-17 17:41:0216import time
[email protected]5f3eee32009-09-17 00:34:3017import xml.dom.minidom
[email protected]167b9e62009-09-17 17:41:0218import xml.parsers.expat
[email protected]5f3eee32009-09-17 00:34:3019
[email protected]5f3eee32009-09-17 00:34:3020
[email protected]5a376ed2011-03-30 01:18:1521def hack_subprocess():
22 """subprocess functions may throw exceptions when used in multiple threads.
23
24 See http://bugs.python.org/issue1731717 for more information.
25 """
26 subprocess._cleanup = lambda: None
[email protected]06617272010-11-04 13:50:5027
28
[email protected]66c83e62010-09-07 14:18:4529class Error(Exception):
30 """gclient exception class."""
31 pass
32
33
34class CheckCallError(OSError, Error):
[email protected]9a2f37e2009-12-19 16:03:2835 """CheckCall() returned non-0."""
[email protected]66c83e62010-09-07 14:18:4536 def __init__(self, command, cwd, returncode, stdout, stderr=None):
37 OSError.__init__(self, command, cwd, returncode, stdout, stderr)
[email protected]ad80e3b2010-09-09 14:18:2838 Error.__init__(self, command)
[email protected]9a2f37e2009-12-19 16:03:2839 self.command = command
40 self.cwd = cwd
[email protected]66c83e62010-09-07 14:18:4541 self.returncode = returncode
[email protected]9a2f37e2009-12-19 16:03:2842 self.stdout = stdout
[email protected]7be5ef22010-01-30 22:31:5043 self.stderr = stderr
[email protected]9a2f37e2009-12-19 16:03:2844
[email protected]7b194c12010-09-07 20:57:0945 def __str__(self):
46 out = ' '.join(self.command)
47 if self.cwd:
48 out += ' in ' + self.cwd
49 if self.returncode is not None:
50 out += ' returned %d' % self.returncode
51 if self.stdout is not None:
52 out += '\nstdout: %s\n' % self.stdout
53 if self.stderr is not None:
54 out += '\nstderr: %s\n' % self.stderr
55 return out
56
[email protected]9a2f37e2009-12-19 16:03:2857
[email protected]5a376ed2011-03-30 01:18:1558def Popen(args, **kwargs):
59 """Calls subprocess.Popen() with hacks to work around certain behaviors.
60
61 Ensure English outpout for svn and make it work reliably on Windows.
62 """
63 logging.debug(u'%s, cwd=%s' % (u' '.join(args), kwargs.get('cwd', '')))
64 if not 'env' in kwargs:
65 # It's easier to parse the stdout if it is always in English.
66 kwargs['env'] = os.environ.copy()
67 kwargs['env']['LANGUAGE'] = 'en'
68 if not 'shell' in kwargs:
69 # *Sigh*: Windows needs shell=True, or else it won't search %PATH% for the
70 # executable, but shell=True makes subprocess on Linux fail when it's called
71 # with a list because it only tries to execute the first item in the list.
72 kwargs['shell'] = (sys.platform=='win32')
73 try:
74 return subprocess.Popen(args, **kwargs)
75 except OSError, e:
76 if e.errno == errno.EAGAIN and sys.platform == 'cygwin':
77 raise Error(
78 'Visit '
79 'http://code.google.com/p/chromium/wiki/CygwinDllRemappingFailure to '
80 'learn how to fix this error; you need to rebase your cygwin dlls')
81 raise
82
83
[email protected]ac610232010-10-13 14:01:3184def CheckCall(command, print_error=True, **kwargs):
[email protected]3a292682010-08-23 18:54:5585 """Similar subprocess.check_call() but redirects stdout and
86 returns (stdout, stderr).
[email protected]9a2f37e2009-12-19 16:03:2887
88 Works on python 2.4
89 """
[email protected]18111352009-12-20 17:21:2890 try:
[email protected]ea8c1a92009-12-20 17:21:5991 stderr = None
92 if not print_error:
93 stderr = subprocess.PIPE
[email protected]ac610232010-10-13 14:01:3194 process = Popen(command, stdout=subprocess.PIPE, stderr=stderr, **kwargs)
[email protected]7be5ef22010-01-30 22:31:5095 std_out, std_err = process.communicate()
[email protected]18111352009-12-20 17:21:2896 except OSError, e:
[email protected]ac610232010-10-13 14:01:3197 raise CheckCallError(command, kwargs.get('cwd', None), e.errno, None)
[email protected]18111352009-12-20 17:21:2898 if process.returncode:
[email protected]ac610232010-10-13 14:01:3199 raise CheckCallError(command, kwargs.get('cwd', None), process.returncode,
100 std_out, std_err)
[email protected]7be5ef22010-01-30 22:31:50101 return std_out, std_err
[email protected]9a2f37e2009-12-19 16:03:28102
103
[email protected]ac915bb2009-11-13 17:03:01104def SplitUrlRevision(url):
105 """Splits url and returns a two-tuple: url, rev"""
106 if url.startswith('ssh:'):
[email protected]78b8cd12010-10-26 12:47:07107 # Make sure ssh://[email protected]/~/test.git@stable works
108 regex = r'(ssh://(?:[-\w]+@)?[-\w:\.]+/[-~\w\./]+)(?:@(.+))?'
[email protected]ac915bb2009-11-13 17:03:01109 components = re.search(regex, url).groups()
110 else:
[email protected]116704f2010-06-11 17:34:38111 components = url.split('@', 1)
[email protected]ac915bb2009-11-13 17:03:01112 if len(components) == 1:
113 components += [None]
114 return tuple(components)
115
116
[email protected]5f3eee32009-09-17 00:34:30117def ParseXML(output):
118 try:
119 return xml.dom.minidom.parseString(output)
120 except xml.parsers.expat.ExpatError:
121 return None
122
123
124def GetNamedNodeText(node, node_name):
125 child_nodes = node.getElementsByTagName(node_name)
126 if not child_nodes:
127 return None
128 assert len(child_nodes) == 1 and child_nodes[0].childNodes.length == 1
129 return child_nodes[0].firstChild.nodeValue
130
131
132def GetNodeNamedAttributeText(node, node_name, attribute_name):
133 child_nodes = node.getElementsByTagName(node_name)
134 if not child_nodes:
135 return None
136 assert len(child_nodes) == 1
137 return child_nodes[0].getAttribute(attribute_name)
138
139
[email protected]5990f9d2010-07-07 18:02:58140def SyntaxErrorToError(filename, e):
141 """Raises a gclient_utils.Error exception with the human readable message"""
142 try:
143 # Try to construct a human readable error message
144 if filename:
145 error_message = 'There is a syntax error in %s\n' % filename
146 else:
147 error_message = 'There is a syntax error\n'
148 error_message += 'Line #%s, character %s: "%s"' % (
149 e.lineno, e.offset, re.sub(r'[\r\n]*$', '', e.text))
150 except:
151 # Something went wrong, re-raise the original exception
152 raise e
153 else:
154 raise Error(error_message)
155
156
[email protected]5f3eee32009-09-17 00:34:30157class PrintableObject(object):
158 def __str__(self):
159 output = ''
160 for i in dir(self):
161 if i.startswith('__'):
162 continue
163 output += '%s = %s\n' % (i, str(getattr(self, i, '')))
164 return output
165
166
[email protected]5aeb7dd2009-11-17 18:09:01167def FileRead(filename, mode='rU'):
[email protected]5f3eee32009-09-17 00:34:30168 content = None
[email protected]5aeb7dd2009-11-17 18:09:01169 f = open(filename, mode)
[email protected]5f3eee32009-09-17 00:34:30170 try:
171 content = f.read()
172 finally:
173 f.close()
174 return content
175
176
[email protected]5aeb7dd2009-11-17 18:09:01177def FileWrite(filename, content, mode='w'):
178 f = open(filename, mode)
[email protected]5f3eee32009-09-17 00:34:30179 try:
180 f.write(content)
181 finally:
182 f.close()
183
184
[email protected]f9040722011-03-09 14:47:51185def rmtree(path):
186 """shutil.rmtree() on steroids.
[email protected]5f3eee32009-09-17 00:34:30187
[email protected]f9040722011-03-09 14:47:51188 Recursively removes a directory, even if it's marked read-only.
[email protected]5f3eee32009-09-17 00:34:30189
190 shutil.rmtree() doesn't work on Windows if any of the files or directories
191 are read-only, which svn repositories and some .svn files are. We need to
192 be able to force the files to be writable (i.e., deletable) as we traverse
193 the tree.
194
195 Even with all this, Windows still sometimes fails to delete a file, citing
196 a permission error (maybe something to do with antivirus scans or disk
197 indexing). The best suggestion any of the user forums had was to wait a
198 bit and try again, so we do that too. It's hand-waving, but sometimes it
199 works. :/
200
201 On POSIX systems, things are a little bit simpler. The modes of the files
202 to be deleted doesn't matter, only the modes of the directories containing
203 them are significant. As the directory tree is traversed, each directory
204 has its mode set appropriately before descending into it. This should
205 result in the entire tree being removed, with the possible exception of
206 *path itself, because nothing attempts to change the mode of its parent.
207 Doing so would be hazardous, as it's not a directory slated for removal.
208 In the ordinary case, this is not a problem: for our purposes, the user
209 will never lack write permission on *path's parent.
210 """
[email protected]f9040722011-03-09 14:47:51211 if not os.path.exists(path):
[email protected]5f3eee32009-09-17 00:34:30212 return
213
[email protected]f9040722011-03-09 14:47:51214 if os.path.islink(path) or not os.path.isdir(path):
215 raise Error('Called rmtree(%s) in non-directory' % path)
[email protected]5f3eee32009-09-17 00:34:30216
[email protected]5f3eee32009-09-17 00:34:30217 if sys.platform == 'win32':
[email protected]5f3eee32009-09-17 00:34:30218 # Some people don't have the APIs installed. In that case we'll do without.
[email protected]1edee692011-03-12 19:39:13219 win32api = None
220 win32con = None
[email protected]5f3eee32009-09-17 00:34:30221 try:
[email protected]1edee692011-03-12 19:39:13222 # Unable to import 'XX'
223 # pylint: disable=F0401
224 import win32api, win32con
[email protected]5f3eee32009-09-17 00:34:30225 except ImportError:
[email protected]f9040722011-03-09 14:47:51226 pass
[email protected]5f3eee32009-09-17 00:34:30227 else:
228 # On POSIX systems, we need the x-bit set on the directory to access it,
229 # the r-bit to see its contents, and the w-bit to remove files from it.
230 # The actual modes of the files within the directory is irrelevant.
[email protected]f9040722011-03-09 14:47:51231 os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
[email protected]5f3eee32009-09-17 00:34:30232
[email protected]f9040722011-03-09 14:47:51233 def remove(func, subpath):
234 if sys.platform == 'win32':
235 os.chmod(subpath, stat.S_IWRITE)
236 if win32api and win32con:
237 win32api.SetFileAttributes(subpath, win32con.FILE_ATTRIBUTE_NORMAL)
238 try:
239 func(subpath)
240 except OSError, e:
241 if e.errno != errno.EACCES or sys.platform != 'win32':
242 raise
243 # Failed to delete, try again after a 100ms sleep.
244 time.sleep(0.1)
245 func(subpath)
246
247 for fn in os.listdir(path):
[email protected]5f3eee32009-09-17 00:34:30248 # If fullpath is a symbolic link that points to a directory, isdir will
249 # be True, but we don't want to descend into that as a directory, we just
250 # want to remove the link. Check islink and treat links as ordinary files
251 # would be treated regardless of what they reference.
[email protected]f9040722011-03-09 14:47:51252 fullpath = os.path.join(path, fn)
[email protected]5f3eee32009-09-17 00:34:30253 if os.path.islink(fullpath) or not os.path.isdir(fullpath):
[email protected]f9040722011-03-09 14:47:51254 remove(os.remove, fullpath)
[email protected]5f3eee32009-09-17 00:34:30255 else:
[email protected]f9040722011-03-09 14:47:51256 # Recurse.
257 rmtree(fullpath)
[email protected]5f3eee32009-09-17 00:34:30258
[email protected]f9040722011-03-09 14:47:51259 remove(os.rmdir, path)
260
261# TODO(maruel): Rename the references.
262RemoveDirectory = rmtree
[email protected]5f3eee32009-09-17 00:34:30263
264
[email protected]17d01792010-09-01 18:07:10265def CheckCallAndFilterAndHeader(args, always=False, **kwargs):
266 """Adds 'header' support to CheckCallAndFilter.
[email protected]5f3eee32009-09-17 00:34:30267
[email protected]17d01792010-09-01 18:07:10268 If |always| is True, a message indicating what is being done
269 is printed to stdout all the time even if not output is generated. Otherwise
270 the message header is printed only if the call generated any ouput.
[email protected]5f3eee32009-09-17 00:34:30271 """
[email protected]17d01792010-09-01 18:07:10272 stdout = kwargs.get('stdout', None) or sys.stdout
273 if always:
[email protected]559c3f82010-08-23 19:26:08274 stdout.write('\n________ running \'%s\' in \'%s\'\n'
[email protected]17d01792010-09-01 18:07:10275 % (' '.join(args), kwargs.get('cwd', '.')))
276 else:
277 filter_fn = kwargs.get('filter_fn', None)
278 def filter_msg(line):
279 if line is None:
280 stdout.write('\n________ running \'%s\' in \'%s\'\n'
281 % (' '.join(args), kwargs.get('cwd', '.')))
282 elif filter_fn:
283 filter_fn(line)
284 kwargs['filter_fn'] = filter_msg
285 kwargs['call_filter_on_first_line'] = True
286 # Obviously.
287 kwargs['print_stdout'] = True
288 return CheckCallAndFilter(args, **kwargs)
[email protected]5f3eee32009-09-17 00:34:30289
[email protected]17d01792010-09-01 18:07:10290
[email protected]e0de9cb2010-09-17 15:07:14291def SoftClone(obj):
292 """Clones an object. copy.copy() doesn't work on 'file' objects."""
[email protected]4ed34182010-09-17 15:57:47293 if obj.__class__.__name__ == 'SoftCloned':
294 return obj
[email protected]cb2985f2010-11-03 14:08:31295 class SoftCloned(object):
296 pass
[email protected]4ed34182010-09-17 15:57:47297 new_obj = SoftCloned()
[email protected]e0de9cb2010-09-17 15:07:14298 for member in dir(obj):
299 if member.startswith('_'):
300 continue
301 setattr(new_obj, member, getattr(obj, member))
302 return new_obj
[email protected]db111f72010-09-08 13:36:53303
[email protected]e0de9cb2010-09-17 15:07:14304
305def MakeFileAutoFlush(fileobj, delay=10):
306 """Creates a file object clone to automatically flush after N seconds."""
307 if hasattr(fileobj, 'last_flushed_at'):
308 # Already patched. Just update delay.
309 fileobj.delay = delay
310 return fileobj
311
[email protected]b17b55b2010-11-03 14:42:37312 # Attribute 'XXX' defined outside __init__
313 # pylint: disable=W0201
[email protected]e0de9cb2010-09-17 15:07:14314 new_fileobj = SoftClone(fileobj)
[email protected]4ed34182010-09-17 15:57:47315 if not hasattr(new_fileobj, 'lock'):
316 new_fileobj.lock = threading.Lock()
[email protected]e0de9cb2010-09-17 15:07:14317 new_fileobj.last_flushed_at = time.time()
318 new_fileobj.delay = delay
[email protected]4ed34182010-09-17 15:57:47319 new_fileobj.old_auto_flush_write = new_fileobj.write
[email protected]e0de9cb2010-09-17 15:07:14320 # Silence pylint.
321 new_fileobj.flush = fileobj.flush
322
323 def auto_flush_write(out):
324 new_fileobj.old_auto_flush_write(out)
[email protected]db111f72010-09-08 13:36:53325 should_flush = False
[email protected]e0de9cb2010-09-17 15:07:14326 new_fileobj.lock.acquire()
[email protected]9c531262010-09-08 13:41:13327 try:
[email protected]e0de9cb2010-09-17 15:07:14328 if (new_fileobj.delay and
329 (time.time() - new_fileobj.last_flushed_at) > new_fileobj.delay):
[email protected]db111f72010-09-08 13:36:53330 should_flush = True
[email protected]e0de9cb2010-09-17 15:07:14331 new_fileobj.last_flushed_at = time.time()
[email protected]9c531262010-09-08 13:41:13332 finally:
[email protected]e0de9cb2010-09-17 15:07:14333 new_fileobj.lock.release()
[email protected]db111f72010-09-08 13:36:53334 if should_flush:
[email protected]e0de9cb2010-09-17 15:07:14335 new_fileobj.flush()
[email protected]db111f72010-09-08 13:36:53336
[email protected]e0de9cb2010-09-17 15:07:14337 new_fileobj.write = auto_flush_write
338 return new_fileobj
[email protected]db111f72010-09-08 13:36:53339
340
[email protected]4ed34182010-09-17 15:57:47341def MakeFileAnnotated(fileobj):
342 """Creates a file object clone to automatically prepends every line in worker
343 threads with a NN> prefix."""
344 if hasattr(fileobj, 'output_buffers'):
345 # Already patched.
346 return fileobj
[email protected]cb1e97a2010-09-09 20:09:20347
[email protected]b17b55b2010-11-03 14:42:37348 # Attribute 'XXX' defined outside __init__
349 # pylint: disable=W0201
[email protected]4ed34182010-09-17 15:57:47350 new_fileobj = SoftClone(fileobj)
351 if not hasattr(new_fileobj, 'lock'):
352 new_fileobj.lock = threading.Lock()
353 new_fileobj.output_buffers = {}
354 new_fileobj.old_annotated_write = new_fileobj.write
[email protected]cb1e97a2010-09-09 20:09:20355
[email protected]4ed34182010-09-17 15:57:47356 def annotated_write(out):
357 index = getattr(threading.currentThread(), 'index', None)
358 if index is None:
359 # Undexed threads aren't buffered.
360 new_fileobj.old_annotated_write(out)
361 return
[email protected]cb1e97a2010-09-09 20:09:20362
[email protected]4ed34182010-09-17 15:57:47363 new_fileobj.lock.acquire()
364 try:
365 # Use a dummy array to hold the string so the code can be lockless.
366 # Strings are immutable, requiring to keep a lock for the whole dictionary
367 # otherwise. Using an array is faster than using a dummy object.
368 if not index in new_fileobj.output_buffers:
369 obj = new_fileobj.output_buffers[index] = ['']
370 else:
371 obj = new_fileobj.output_buffers[index]
372 finally:
373 new_fileobj.lock.release()
374
375 # Continue lockless.
376 obj[0] += out
377 while '\n' in obj[0]:
378 line, remaining = obj[0].split('\n', 1)
379 new_fileobj.old_annotated_write('%d>%s\n' % (index, line))
380 obj[0] = remaining
381
382 def full_flush():
383 """Flush buffered output."""
384 orphans = []
385 new_fileobj.lock.acquire()
386 try:
387 # Detect threads no longer existing.
388 indexes = (getattr(t, 'index', None) for t in threading.enumerate())
[email protected]cb2985f2010-11-03 14:08:31389 indexes = filter(None, indexes)
[email protected]4ed34182010-09-17 15:57:47390 for index in new_fileobj.output_buffers:
391 if not index in indexes:
392 orphans.append((index, new_fileobj.output_buffers[index][0]))
393 for orphan in orphans:
394 del new_fileobj.output_buffers[orphan[0]]
395 finally:
396 new_fileobj.lock.release()
397
398 # Don't keep the lock while writting. Will append \n when it shouldn't.
399 for orphan in orphans:
400 new_fileobj.old_annotated_write('%d>%s\n' % (orphan[0], orphan[1]))
401
402 new_fileobj.write = annotated_write
403 new_fileobj.full_flush = full_flush
404 return new_fileobj
[email protected]cb1e97a2010-09-09 20:09:20405
406
[email protected]17d01792010-09-01 18:07:10407def CheckCallAndFilter(args, stdout=None, filter_fn=None,
408 print_stdout=None, call_filter_on_first_line=False,
409 **kwargs):
410 """Runs a command and calls back a filter function if needed.
411
412 Accepts all subprocess.Popen() parameters plus:
413 print_stdout: If True, the command's stdout is forwarded to stdout.
414 filter_fn: A function taking a single string argument called with each line
415 of the subprocess's output. Each line has the trailing newline
416 character trimmed.
417 stdout: Can be any bufferable output.
418
419 stderr is always redirected to stdout.
420 """
421 assert print_stdout or filter_fn
422 stdout = stdout or sys.stdout
423 filter_fn = filter_fn or (lambda x: None)
424 assert not 'stderr' in kwargs
[email protected]2b9aa8e2010-08-25 20:01:42425 kid = Popen(args, bufsize=0,
426 stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
427 **kwargs)
[email protected]5f3eee32009-09-17 00:34:30428
[email protected]17d01792010-09-01 18:07:10429 # Do a flush of stdout before we begin reading from the subprocess's stdout
[email protected]559c3f82010-08-23 19:26:08430 stdout.flush()
[email protected]8ad1cee2010-08-16 19:12:27431
[email protected]5f3eee32009-09-17 00:34:30432 # Also, we need to forward stdout to prevent weird re-ordering of output.
433 # This has to be done on a per byte basis to make sure it is not buffered:
434 # normally buffering is done for each line, but if svn requests input, no
435 # end-of-line character is output after the prompt and it would not show up.
436 in_byte = kid.stdout.read(1)
[email protected]17d01792010-09-01 18:07:10437 if in_byte:
438 if call_filter_on_first_line:
439 filter_fn(None)
440 in_line = ''
441 while in_byte:
442 if in_byte != '\r':
443 if print_stdout:
444 stdout.write(in_byte)
445 if in_byte != '\n':
446 in_line += in_byte
447 else:
448 filter_fn(in_line)
449 in_line = ''
[email protected]17d01792010-09-01 18:07:10450 in_byte = kid.stdout.read(1)
451 # Flush the rest of buffered output. This is only an issue with
452 # stdout/stderr not ending with a \n.
453 if len(in_line):
454 filter_fn(in_line)
[email protected]5f3eee32009-09-17 00:34:30455 rv = kid.wait()
[email protected]5f3eee32009-09-17 00:34:30456 if rv:
[email protected]7b194c12010-09-07 20:57:09457 raise CheckCallError(args, kwargs.get('cwd', None), rv, None)
[email protected]17d01792010-09-01 18:07:10458 return 0
[email protected]5f3eee32009-09-17 00:34:30459
460
[email protected]9eda4112010-06-11 18:56:10461def FindGclientRoot(from_dir, filename='.gclient'):
[email protected]a9371762009-12-22 18:27:38462 """Tries to find the gclient root."""
[email protected]20760a52010-09-08 08:47:28463 real_from_dir = os.path.realpath(from_dir)
464 path = real_from_dir
[email protected]9eda4112010-06-11 18:56:10465 while not os.path.exists(os.path.join(path, filename)):
[email protected]3a292682010-08-23 18:54:55466 split_path = os.path.split(path)
467 if not split_path[1]:
[email protected]a9371762009-12-22 18:27:38468 return None
[email protected]3a292682010-08-23 18:54:55469 path = split_path[0]
[email protected]20760a52010-09-08 08:47:28470
471 # If we did not find the file in the current directory, make sure we are in a
472 # sub directory that is controlled by this configuration.
473 if path != real_from_dir:
474 entries_filename = os.path.join(path, filename + '_entries')
475 if not os.path.exists(entries_filename):
476 # If .gclient_entries does not exist, a previous call to gclient sync
477 # might have failed. In that case, we cannot verify that the .gclient
478 # is the one we want to use. In order to not to cause too much trouble,
479 # just issue a warning and return the path anyway.
[email protected]cb2985f2010-11-03 14:08:31480 print >> sys.stderr, ("%s file in parent directory %s might not be the "
[email protected]20760a52010-09-08 08:47:28481 "file you want to use" % (filename, path))
482 return path
483 scope = {}
484 try:
485 exec(FileRead(entries_filename), scope)
486 except SyntaxError, e:
487 SyntaxErrorToError(filename, e)
488 all_directories = scope['entries'].keys()
489 path_to_check = real_from_dir[len(path)+1:]
490 while path_to_check:
491 if path_to_check in all_directories:
492 return path
493 path_to_check = os.path.dirname(path_to_check)
494 return None
[email protected]3742c842010-09-09 19:27:14495
[email protected]d9141bf2009-12-23 16:13:32496 logging.info('Found gclient root at ' + path)
[email protected]a9371762009-12-22 18:27:38497 return path
[email protected]3ccbf7e2009-12-22 20:46:42498
[email protected]9eda4112010-06-11 18:56:10499
[email protected]3ccbf7e2009-12-22 20:46:42500def PathDifference(root, subpath):
501 """Returns the difference subpath minus root."""
502 root = os.path.realpath(root)
503 subpath = os.path.realpath(subpath)
504 if not subpath.startswith(root):
505 return None
506 # If the root does not have a trailing \ or /, we add it so the returned
507 # path starts immediately after the seperator regardless of whether it is
508 # provided.
509 root = os.path.join(root, '')
510 return subpath[len(root):]
[email protected]f43d0192010-04-15 02:36:04511
512
513def FindFileUpwards(filename, path=None):
514 """Search upwards from the a directory (default: current) to find a file."""
515 if not path:
516 path = os.getcwd()
517 path = os.path.realpath(path)
518 while True:
519 file_path = os.path.join(path, filename)
520 if os.path.isfile(file_path):
521 return file_path
522 (new_path, _) = os.path.split(path)
523 if new_path == path:
524 return None
525 path = new_path
526
527
528def GetGClientRootAndEntries(path=None):
529 """Returns the gclient root and the dict of entries."""
530 config_file = '.gclient_entries'
531 config_path = FindFileUpwards(config_file, path)
532
533 if not config_path:
[email protected]116704f2010-06-11 17:34:38534 print "Can't find %s" % config_file
[email protected]f43d0192010-04-15 02:36:04535 return None
536
537 env = {}
538 execfile(config_path, env)
539 config_dir = os.path.dirname(config_path)
540 return config_dir, env['entries']
[email protected]80cbe8b2010-08-13 13:53:07541
542
543class WorkItem(object):
544 """One work item."""
545 # A list of string, each being a WorkItem name.
546 requirements = []
547 # A unique string representing this work item.
548 name = None
549
[email protected]77e4eca2010-09-21 13:23:07550 def run(self, work_queue):
551 """work_queue is passed as keyword argument so it should be
[email protected]3742c842010-09-09 19:27:14552 the last parameters of the function when you override it."""
[email protected]80cbe8b2010-08-13 13:53:07553 pass
554
555
556class ExecutionQueue(object):
[email protected]9e5317a2010-08-13 20:35:11557 """Runs a set of WorkItem that have interdependencies and were WorkItem are
558 added as they are processed.
[email protected]80cbe8b2010-08-13 13:53:07559
[email protected]9e5317a2010-08-13 20:35:11560 In gclient's case, Dependencies sometime needs to be run out of order due to
561 From() keyword. This class manages that all the required dependencies are run
562 before running each one.
[email protected]80cbe8b2010-08-13 13:53:07563
[email protected]9e5317a2010-08-13 20:35:11564 Methods of this class are thread safe.
[email protected]80cbe8b2010-08-13 13:53:07565 """
[email protected]9e5317a2010-08-13 20:35:11566 def __init__(self, jobs, progress):
567 """jobs specifies the number of concurrent tasks to allow. progress is a
568 Progress instance."""
[email protected]5a376ed2011-03-30 01:18:15569 hack_subprocess()
[email protected]9e5317a2010-08-13 20:35:11570 # Set when a thread is done or a new item is enqueued.
571 self.ready_cond = threading.Condition()
572 # Maximum number of concurrent tasks.
573 self.jobs = jobs
574 # List of WorkItem, for gclient, these are Dependency instances.
[email protected]80cbe8b2010-08-13 13:53:07575 self.queued = []
576 # List of strings representing each Dependency.name that was run.
577 self.ran = []
578 # List of items currently running.
579 self.running = []
[email protected]9e5317a2010-08-13 20:35:11580 # Exceptions thrown if any.
[email protected]3742c842010-09-09 19:27:14581 self.exceptions = Queue.Queue()
582 # Progress status
[email protected]80cbe8b2010-08-13 13:53:07583 self.progress = progress
584 if self.progress:
[email protected]3742c842010-09-09 19:27:14585 self.progress.update(0)
[email protected]80cbe8b2010-08-13 13:53:07586
587 def enqueue(self, d):
588 """Enqueue one Dependency to be executed later once its requirements are
589 satisfied.
590 """
591 assert isinstance(d, WorkItem)
[email protected]9e5317a2010-08-13 20:35:11592 self.ready_cond.acquire()
[email protected]80cbe8b2010-08-13 13:53:07593 try:
[email protected]80cbe8b2010-08-13 13:53:07594 self.queued.append(d)
595 total = len(self.queued) + len(self.ran) + len(self.running)
[email protected]9e5317a2010-08-13 20:35:11596 logging.debug('enqueued(%s)' % d.name)
597 if self.progress:
598 self.progress._total = total + 1
599 self.progress.update(0)
600 self.ready_cond.notifyAll()
[email protected]80cbe8b2010-08-13 13:53:07601 finally:
[email protected]9e5317a2010-08-13 20:35:11602 self.ready_cond.release()
[email protected]80cbe8b2010-08-13 13:53:07603
604 def flush(self, *args, **kwargs):
605 """Runs all enqueued items until all are executed."""
[email protected]3742c842010-09-09 19:27:14606 kwargs['work_queue'] = self
[email protected]9e5317a2010-08-13 20:35:11607 self.ready_cond.acquire()
[email protected]80cbe8b2010-08-13 13:53:07608 try:
[email protected]9e5317a2010-08-13 20:35:11609 while True:
610 # Check for task to run first, then wait.
611 while True:
[email protected]3742c842010-09-09 19:27:14612 if not self.exceptions.empty():
613 # Systematically flush the queue when an exception logged.
[email protected]9e5317a2010-08-13 20:35:11614 self.queued = []
[email protected]3742c842010-09-09 19:27:14615 self._flush_terminated_threads()
616 if (not self.queued and not self.running or
617 self.jobs == len(self.running)):
618 # No more worker threads or can't queue anything.
[email protected]9e5317a2010-08-13 20:35:11619 break
[email protected]3742c842010-09-09 19:27:14620
621 # Check for new tasks to start.
[email protected]9e5317a2010-08-13 20:35:11622 for i in xrange(len(self.queued)):
623 # Verify its requirements.
624 for r in self.queued[i].requirements:
625 if not r in self.ran:
626 # Requirement not met.
627 break
628 else:
629 # Start one work item: all its requirements are satisfied.
[email protected]3742c842010-09-09 19:27:14630 self._run_one_task(self.queued.pop(i), args, kwargs)
[email protected]9e5317a2010-08-13 20:35:11631 break
632 else:
633 # Couldn't find an item that could run. Break out the outher loop.
634 break
[email protected]3742c842010-09-09 19:27:14635
[email protected]9e5317a2010-08-13 20:35:11636 if not self.queued and not self.running:
[email protected]3742c842010-09-09 19:27:14637 # We're done.
[email protected]9e5317a2010-08-13 20:35:11638 break
639 # We need to poll here otherwise Ctrl-C isn't processed.
640 self.ready_cond.wait(10)
641 # Something happened: self.enqueue() or a thread terminated. Loop again.
[email protected]80cbe8b2010-08-13 13:53:07642 finally:
[email protected]9e5317a2010-08-13 20:35:11643 self.ready_cond.release()
[email protected]3742c842010-09-09 19:27:14644
[email protected]9e5317a2010-08-13 20:35:11645 assert not self.running, 'Now guaranteed to be single-threaded'
[email protected]3742c842010-09-09 19:27:14646 if not self.exceptions.empty():
[email protected]c8d064b2010-08-16 16:46:14647 # To get back the stack location correctly, the raise a, b, c form must be
648 # used, passing a tuple as the first argument doesn't work.
[email protected]3742c842010-09-09 19:27:14649 e = self.exceptions.get()
[email protected]c8d064b2010-08-16 16:46:14650 raise e[0], e[1], e[2]
[email protected]80cbe8b2010-08-13 13:53:07651 if self.progress:
652 self.progress.end()
[email protected]80cbe8b2010-08-13 13:53:07653
[email protected]3742c842010-09-09 19:27:14654 def _flush_terminated_threads(self):
655 """Flush threads that have terminated."""
656 running = self.running
657 self.running = []
658 for t in running:
659 if t.isAlive():
660 self.running.append(t)
661 else:
662 t.join()
[email protected]97ae58e2011-03-18 00:29:20663 sys.stdout.full_flush() # pylint: disable=E1101
[email protected]3742c842010-09-09 19:27:14664 if self.progress:
[email protected]55a2eb82010-10-06 23:35:18665 self.progress.update(1, t.item.name)
[email protected]acc45672010-09-09 21:21:21666 assert not t.item.name in self.ran
667 if not t.item.name in self.ran:
668 self.ran.append(t.item.name)
[email protected]3742c842010-09-09 19:27:14669
670 def _run_one_task(self, task_item, args, kwargs):
671 if self.jobs > 1:
672 # Start the thread.
673 index = len(self.ran) + len(self.running) + 1
[email protected]77e4eca2010-09-21 13:23:07674 new_thread = self._Worker(task_item, index, args, kwargs)
[email protected]3742c842010-09-09 19:27:14675 self.running.append(new_thread)
676 new_thread.start()
677 else:
678 # Run the 'thread' inside the main thread. Don't try to catch any
679 # exception.
680 task_item.run(*args, **kwargs)
681 self.ran.append(task_item.name)
682 if self.progress:
[email protected]55a2eb82010-10-06 23:35:18683 self.progress.update(1, ', '.join(t.item.name for t in self.running))
[email protected]3742c842010-09-09 19:27:14684
[email protected]9e5317a2010-08-13 20:35:11685 class _Worker(threading.Thread):
686 """One thread to execute one WorkItem."""
[email protected]4ed34182010-09-17 15:57:47687 def __init__(self, item, index, args, kwargs):
[email protected]9e5317a2010-08-13 20:35:11688 threading.Thread.__init__(self, name=item.name or 'Worker')
[email protected]3742c842010-09-09 19:27:14689 logging.info(item.name)
[email protected]9e5317a2010-08-13 20:35:11690 self.item = item
[email protected]4ed34182010-09-17 15:57:47691 self.index = index
[email protected]3742c842010-09-09 19:27:14692 self.args = args
693 self.kwargs = kwargs
[email protected]80cbe8b2010-08-13 13:53:07694
[email protected]9e5317a2010-08-13 20:35:11695 def run(self):
696 """Runs in its own thread."""
697 logging.debug('running(%s)' % self.item.name)
[email protected]3742c842010-09-09 19:27:14698 work_queue = self.kwargs['work_queue']
[email protected]9e5317a2010-08-13 20:35:11699 try:
700 self.item.run(*self.args, **self.kwargs)
[email protected]c8d064b2010-08-16 16:46:14701 except Exception:
702 # Catch exception location.
[email protected]3742c842010-09-09 19:27:14703 logging.info('Caught exception in thread %s' % self.item.name)
704 logging.info(str(sys.exc_info()))
705 work_queue.exceptions.put(sys.exc_info())
706 logging.info('Task %s done' % self.item.name)
[email protected]9e5317a2010-08-13 20:35:11707
[email protected]3742c842010-09-09 19:27:14708 work_queue.ready_cond.acquire()
[email protected]9e5317a2010-08-13 20:35:11709 try:
[email protected]3742c842010-09-09 19:27:14710 work_queue.ready_cond.notifyAll()
[email protected]9e5317a2010-08-13 20:35:11711 finally:
[email protected]3742c842010-09-09 19:27:14712 work_queue.ready_cond.release()