#!/usr/bin/env python ''' Yabs - Yet Another Build System. Yabs is a simple build system, in the form of a Python library. Rules are expressed as python functions that are registered with Yabs using the add_rule() function. Targets are built by calling the make() function. Yabs requires python-2.2 or later. Names starting with '_' are for internal use only. ''' from __future__ import generators import commands, errno, exceptions, os, select, signal, string, sys, time, traceback, thread import threading, yabserrors import fcntl import StringIO import warnings warnings.filterwarnings( action='ignore', message='The popen2 module is deprecated. Use the subprocess module', category=DeprecationWarning, module='' ) import popen2 def place( n=1): ''' Useful debugging function - returns representation of source position of caller. ''' tb = traceback.extract_stack( None, 1+n) if len(tb)==0: # this can happen if psyco is being used. return '' filename, line, function, text = tb[0] ret = os.path.split( filename)[1] + ':' + str( line) + ':' + function + ':' if 0: tid = str( threading.currentThread()) ret = '[' + tid + '] ' + ret return ret def callers(): tb = traceback.extract_stack( None, None) ret = 'thread=' + str( threading.currentThread()) + '\n' i = 0 for filename, line, function, text in tb: ret += ' ' + filename + ':' + str( line) + ':' + function + '\n' return ret #if 'server-41' in os.uname()[1]: def debug1( *args): sys.stderr.write( place(2)) for arg in args: sys.stderr.write( ' ' + str(arg)) sys.stderr.write( '\n') if 0: # detailed logging to file. if 1: debug_f = file( 'yabs-debug-log', 'a', 1) else: debug_f = sys.stderr def debug( *args): debug_f.write( place(2)) for arg in args: debug_f.write( ' ' + str(arg)) debug_f.write( '\n') debug( time.ctime()) else: def debug( *args): pass # First check that we are running under a recent-enough version of Python. This # python-version trick was lifted from http://tinyurl.com/3yhry. def _initial_init( s): ''' like int(), but ignores any trailing non-digits, instead of raising `ValueError: invalid literal for int(). Also returns 0 if doesn't start with a digit. ' ''' for i in range( len( s)): if not s[i].isdigit(): if i==0: return 0 return int( s[:i]) return int( s) python_version_text = string.split( sys.version)[ 0] python_version_list = map( _initial_init, string.split( python_version_text, '.')) if python_version_list < [ 2, 2]: raise Exception( 'yabs: python version appears to be ' + python_version_text + '; yabs requires python-2.2 or later.') def _traceback_print_exception_wrapper( etype, value, tb0, limit=None, file=None, prefix=''): ''' Ignores the param. ''' return traceback.print_exception( etype, value, tb0, limit, file) print_exception = _traceback_print_exception_wrapper ''' Override this to print exceptions and tracebacks in different ways. ''' def print_exception_compact( etype, value, tb0, limit=None, file=None, prefix=''): ''' Alternative to traceback.print_exception; prints a more compact backtrace, with one line per frame, each line looking like: ::(): ''' if file is None: file = sys.stderr try: tb = traceback.extract_tb( tb0, limit) for frame in tb: filename, line, fnname, text = frame print >>file, prefix + str( filename) + ':' + str(line) + ':' + str( fnname) + '(): ' + str( text) lines = traceback.format_exception_only( etype, value) for line in lines: print >>file, prefix + line, finally: # clear things to avoid cycles. etype = None value = None tb0 = None tb = None def print_exc( limit=None, file=None, prefix=''): ''' Like traceback.print_exc(), except uses print_exception, so customised backtrace formatting will be used. ''' if file is None: file = sys.stderr try: etype, value, tb = sys.exc_info() print_exception( etype, value, tb, limit, file, prefix) finally: etype = value = tb = None def exception_as_string( etype, value, tb, limit=None, prefix=''): ''' Returns string representation of the exception specified by (etype, value, tb). ''' text = StringIO.StringIO() print_exception( etype, value, tb, limit, text, prefix) return text.getvalue() def current_exception_as_string( limit=None, prefix=''): ''' Returns string representation of the current exception. ''' text = StringIO.StringIO() print_exc( limit, text, prefix) return text.getvalue() def _trace( state, info): ''' Writes `info' to .statefile, if it exists. Used to maintain information about the command/function that Yabs is running, useful when building large projects with echoing turned off. todo: do something sensible when concurrent. ''' if state.statefile: f = open( state.statefile, 'w') try: f.write( str( info).strip() + '\n') finally: f.close() def subprocess( command, echo=None, state=None, fn=None, endtime=None, prefix=None): ''' Runs using , which defaults to .subprocessfn, which in turn defaults to _subprocess_popen4. If specified, should be an absolute time in seconds since epoc, as returned by time.time(), and the command is terminated at this time. This is not supported by all _subprocess_* functions. can be a string, or a function that returns a string; it will be used to prefix all lines returned in (and echoed to screen if is set). If None, is used. Returns (e, text), where is the integer return code, and is the text output by the command. ''' if state is None: state=default_state if prefix is None: prefix = state.echo_prefix if not fn: fn = state.subprocessfn if state.debug>=5: print place(), 'command=', command print place(), 'echo=', echo print place(), 'prefix=', prefix print place(), 'fn=', fn print place(), 'state.subprocessfn=', state.subprocessfn print place(), 'caller=', callers() _trace( state, command) e, text = fn( command, echo=echo, state=state, endtime=endtime, prefix=prefix) return e, text def subprocess_text( command, echo=None, state=None, fn=None, endtime=None, prefix=None): ''' Returns just the text part of the return from subprocess() function. Raises exception if the subprocess() function indicates an error. ''' e, text = subprocess( command, echo, state, fn, endtime, prefix) if e: raise yabserrors.command_error( command, e, text) return text def _subprocess_popen4( command, echo=False, state=None, endtime=None, prefix=None, linebuffering=True): ''' If timeout occurs, returns ( -1, ). ''' #print place(), command, 'echo=', echo, 'prefix=', prefix #print place(), 'callers=', callers() if echo is True: echo = sys.stdout if isinstance( echo, int) and echo != False: # bizarrely, isinstance( False, int) returns True. # Write to low-level fd: def write( text): os.write( echo, text) elif echo: # Assume is a stream with a write() method. def write( text): echo.write( text) else: # Don't output anything. def write( text): pass if prefix is None: prefix = '' if callable( prefix): prefixfn = prefix else: prefixfn = lambda p=prefix: p if state is None: state = default_state if prefix == None: prefix = state.echo_prefix # We use an array of lines/strings rather than a string - appending to a # string gets very slow with long strings. text = [] timedout = False child = popen2.Popen4( command) try: while 1: if endtime: timeout = endtime - time.time() if timeout <= 0: timeout = 0 else: timeout = None while 1: try: r, w, x = select.select( [child.fromchild], [], [], timeout) except select.error: pass else: break if r: # data is available from child.fromchild, or EOF. In either # case, we can read without fear of blocking. if linebuffering: t = child.fromchild.readline() if t: l = prefixfn() + t else: t = child.fromchild.read(100) if t: # Crude - will ommit prefix at start and append extra prefix at end. l = t.replace( '\n', '\n' + prefixfn()) if t=='': break # EOF text.append( t) write( l) else: #print place(), 'timeout' timedout = True if os.uname()[0]=='OpenBSD': os.kill( child.pid, signal.SIGKILL) else: os.kill( child.pid, signal.SIGTERM) break except Exception, e: try: os.kill( child.pid, signal.SIGTERM) except OSError: pass raise ret = child.wait() if timedout: ret = -1 return ret, ''.join(text) def _eintr_repeat( fn, max_its=4): ''' calls (), repeating up to times if it raises EINTR. ''' i = 0 while 1: i += 1 try: return fn() except IOError, e: if i is not handled correctly. ''' #print callers() import pty, select, threading if state is None: state = default_state if prefix == None: prefix = state.echo_prefix #print place(), 'endtime=', endtime #print place(), 'command=', command if state.debug>1: print '_subprocess_pty: ', command pid, fd = pty.fork() #print place(), 'pid,fd=', pid, fd if pid==0: #print 'child:' #args = command.split( ' ') # should handle \ escapes args = [ '/bin/sh', '-c', command] try: os.execvp( '/bin/sh', args) #try: os.execvp( args[0], args) except OSError, e: print e sys.exit( e.errno) # should only get OSError from os.execvp, but just in case: except Exception, e: print 'os.execvp failed:', e sys.exit( 1) else: #print 'parent' fd2 = os.fdopen( fd, 'r') if echo: out = _PrefixOutput( prefix, echo) else: out = _PrefixOutput( prefix, None) def threadfn(): pollblock = select.poll() pollblock.register( fd) #pollblock.register( sys.stdin) kill = False while 1: if endtime: timeout = 1000 * (endtime-time.time()) if timeout<1: timeout = 1 pollresult = pollblock.poll( timeout) else: pollresult = pollblock.poll() #print place(), 'pollresult=', pollresult if len( pollresult)==0: # timeout # terminate child process nicely, and set so # that we kill it brutally next time around. #print place(), 'killing child process...' if kill == True: sig = signal.SIGKILL else: sig = signal.SIGTERM kill = True try: os.kill( pid, sig) except OSError, e: print place(), 'failed to kill child process:', e pass for f, r in pollresult: assert f==fd or f==sys.stdin.fileno() if f==fd: text = '' try: text = fd2.readline() _eintr_repeat( lambda: out.write(text)) except IOError, e: #print place(), 'ignoring error:', e pass if not text: return t = threading.Thread( None, threadfn) t.setDaemon(1) t.start() #print place(), ': waiting for', pid dummypid, status = os.waitpid( pid, 0) if ( os.WIFSIGNALED( status)): print 'subprocess_pty: signal ', os.WTERMSIG( status) e = 1 elif ( os.WIFEXITED( status)): e = os.WEXITSTATUS( status) #print place(), pid, 'exited, dummypid=', dummypid, 'e=', e else: raise Exception( 'subprocess_ptr: child has stopped?') t.join() #os.close( fd) fd2.close() # this will also close fd. #print '_subprocess_pty: returning ', e, text return status, out.buffer def _subprocess_pty0( command, echo=False, state=None, endtime=None, prefix=None): ''' Implementation of subprocess() that uses pty. This implementation handles correctly; _subprocess_pty() above does not. ''' #print callers() import pty, select, threading if state is None: state = default_state if prefix == None: prefix = state.echo_prefix #print place(), 'endtime=', endtime #print place(), 'command=', command if state.debug>1: print '_subprocess_pty: ', command pid, fd = pty.fork() #print place(), 'pid,fd=', pid, fd if pid==0: #print 'child:' #args = command.split( ' ') # should handle \ escapes args = [ '/bin/sh', '-c', command] try: os.execvp( '/bin/sh', args) #try: os.execvp( args[0], args) except OSError, e: print e sys.exit( e.errno) # should only get OSError from os.execvp, but just in case: except Exception, e: print 'os.execvp failed:', e sys.exit( 1) else: #print 'parent' fd2 = os.fdopen( fd, 'r') if echo: out = _PrefixOutput( prefix, echo) else: out = _PrefixOutput( prefix, None) def threadfn(): pollblock = select.poll() pollblock.register( fd, select.POLLIN | select.POLLHUP) pollblock.register( sys.stdin, select.POLLIN | select.POLLHUP) kill = False while 1: if endtime: timeout = 1000 * (endtime-time.time()) if timeout<1: timeout = 1 pollresult = pollblock.poll( timeout) else: pollresult = pollblock.poll() #print place(), 'pollresult=', pollresult if len( pollresult)==0: # timeout # terminate child process nicely, and set so # that we kill it brutally next time around. #print place(), 'killing child process...' if kill == True: sig = signal.SIGKILL else: sig = signal.SIGTERM kill = True try: os.kill( pid, sig) except OSError, e: print place(), 'failed to kill child process:', e pass for f, r in pollresult: assert f==fd or f==sys.stdin.fileno() # we can get select.POLLHUP as well as select.POLLIN. # Seems ok to ignore select.POLLHUP in this case, and read. # the select.POLLHUP appears to be set next time. if r & select.POLLIN: if f==fd: # since upgrading to openbsd-4.6:python-2.6, have seen # interrupted system call errors here. try: text = fd2.read() print place(), 'have read text=%s' % repr(text) _eintr_repeat( lambda: out.write(text)) except IOError, e: print place(), 'ignoring error:', e elif 0 and f==sys.stdin.fileno(): l = sys.stdin.readline() os.write( fd, l) elif r & select.POLLERR or r & select.POLLHUP: #print place(), 'returning, r=', r # Seems that we can get this but there is still text to # read. so we read any left-over text here: try: t = fd2.read() except IOError: t = '' if t: out.write( t) #print place(), 'trailing text is:', t #if text: out.write(text+'\n') return t = threading.Thread( None, threadfn) t.setDaemon(1) t.start() dummypid, status = os.waitpid( pid, 0) if ( os.WIFSIGNALED( status)): print place(), 'signal %s' % os.WTERMSIG( status) e = 1 elif ( os.WIFEXITED( status)): e = os.WEXITSTATUS( status) #print place(), 'e=', e else: raise Exception( 'subprocess_ptr: child has stopped?') t.join() #os.close( fd) fd2.close() # this will also close fd. #print '_subprocess_pty: returning ', e, text return status, out.buffer def _subprocess_ossystem( command, echo=False, state=None, endtime=None, prefix=None): ''' Implementation of subprocess() that uses os.system. Doesn't support , and returns empty output string. ''' #print place(), command if endtime != None: raise Exception( 'yabs._subprocess_ossystem() doesn\'t support ') return os.system( command), '' def _subprocess_popen( command, echo=False, state=None, endtime=None, prefix=None): ''' Implementation of subprocess() that uses os.popen. Doesn't support . Runs specified command, returning (e, t), where is return code and is the text from command's stdout and stderr. if is True, also outputs line-by-line using print. If evaluates as true, outputs line-by-line using print >>echo. ''' #print callers(), command #print place(), command #print '_subprocess_popen: command=', command, 'echo=', echo assert type( command)==str if state is None: state = default_state if prefix is None: prefix = state.echo_prefix if endtime != None: raise Exception( 'yabs._subprocess_popen() doesn\'t support ') if os.name=='nt': # the following works on windows/python-24 - see test12 stdin, out = os.popen4( command) text = '' while True: l = out.readline() if l=='': break text += l if echo is True: print l, elif echo: print >>echo, l, if echo and l!='' and not l.endswith( '\n'): if echo is True: print else: print >>echo stdin.close() e = out.close() if e is None: e = 0 else: # this is based on commands.getstatusoutput(). differences # are that we read a line at a time, so that we can echo # to the screen while the command executes. # there should be a better way of capturing both stdout and # stderr, than using shell redirection as here, but i can't # find one - tried popen2 for example. # maybe python2.4's subcommand will provide a nice way. if echo is True or ( isinstance(echo,int) and echo!=0): out = _PrefixOutput( prefix, sys.stdout) elif echo: echo.write # should also check it is callable. out = _PrefixOutput( prefix, echo) else: out = _PrefixOutput( prefix, None) pipe = os.popen('{ ' + command + '; } 2>&1', 'r') #print place(), pipe e = None while True: try: line = pipe.readline() #print place(), repr( line) except exceptions.KeyboardInterrupt, e: print 'yabs: pipe_readline() returned exception:', e, e.__class__ line='' e = 0x0002 # fake a SIGINT if line=='': break # eof out.write( line) if e is None: e = pipe.close() else: pipe.close() if e is None: e = 0 if 0: # popen2.popen4 doesn't seem to send out stdin to the child. import popen2 # popen2.popen4 doesn't seem to capture stderr, so we do it by hand. p = popen2.Popen4( command + ' 2>&1') text = '' while True: l = p.fromchild.readline() if l=='': break text += l if echo: print l, if echo and l!='' and not l.endswith( '\n'): print e = p.wait() return e, out.buffer # Things for concurrency. Yabs has a single lock. Most of Yabs runs with # this lock held. The lock is released only while running commands to build # targets. There is also a single condition, which is signaled whenever things # have changed, allowing blocked threads to attempt to claim resources or # detect that targets have been built. def mt_exit( state): debug( 'called from:' + callers()) if not state.mt: return state._lock() try: state._mt_exited = True state._notifyAll() finally: debug() state._unlock() def _mt_target_fn( ruleresult, target, state, prefixes, nesting, remake): ''' Run in separate thread by _mt_spawntasks(). builds , then releases the 's resource and calls state.notifyAll(). ''' state._lock() try: try: if state.debug>=2: print state.prefix(), 'building:', ruleresult.target _run_ruleresult( ruleresult, state, prefixes, nesting, remake) except Exception, e: print_exc() os.abort() finally: debug( 'target=', target) if state.debug>=2: print prefix(), 'built: ', ruleresult.target # release the resource and tell everyone that they may be able to claim # a new resource: debug( 'finished, calling notifyAll, target=', target) ruleresult.resource.release() state._notifyAll() del state._mt_threads[ threading.currentThread()] state._unlock() def _mt_spawntasks( state): ''' Repeatedly attempts to claim resources required to build targets in .mt_targets. When a target's resource is acquired, we spawn a new thread to build that target. We wait on state._mt_condition. We always scan targets from the first target, so if more than target share a resource, the earliest target in .mt_targets will always be built first. Currently there is no attempt to reuse threads. We return when ._mt_exited is set. ''' state._lock() try: try: while 1: if state._mt_exited: if state.debug>=2: print place(), '_mt_exited set, exitting' debug( '_mt_exited set, exitting') break i = 0 debug( 'mt_targets=', state._mt_targets) while i < len( state._mt_targets): target, ruleresult, prefixes, nesting, remake = state._mt_targets[i] try: r = ruleresult.resource.tryacquire() except Exception, e: #print place(), 'resource acquisition failed:', e #print place(), 'ruleresult=', ruleresult # Resource acquisition failed badly. Mark target as failed. state.targetcache[ ruleresult.target] = ruleresult.rule, e del state._mt_targets[i] state._notifyAll() continue if r is True: # we can spawn a new thread to run the rule result: del state._mt_targets[i] t = threading.Thread( target=_mt_target_fn, name=ruleresult.target, args=( ruleresult, target, state, prefixes, nesting, remake)) state._mt_threads[ t] = ruleresult.target, len( state._mt_threads) if state.debug>=4: print 'starting new thread: ' + str( len( state._mt_threads))\ + ': ' + t.getName() if state.debug>=5: print 'new thread is for:', ruleresult.target debug( 'starting new thread:', len( state._mt_threads), target) t.setDaemon(1) t.start() i = 0 else: if r is False: debug( 'couldn\'t acquire resource:', ruleresult.resource) #print place(), 'couldn\'t acquire resource:', ruleresult.resource, type(r) else: debug( 'couldn\'t acquire resource immediately:', ruleresult.resource) # tryagain later: if state._mt_wakeup_time==0 or r < state._mt_wakeup_time: state._mt_wakeup_time = r i += 1 debug( 'calling state.wait()') state._wait() #print place() + 'state.wait() returned' debug( 'state.wait() returned') finally: state._unlock() except Exception, e: print_exc() assert 0, str( e) class mt_Resource: ''' Simple default resource for use by concurrent yabs. The resource can have up to users. If is specified, the resource is also only available if the system's current load average (os.getloadavg()[0]) is less than . We are protected by state's mutex. Note that resources are not owned by specific threads. ''' def __init__( self, n=1, max_load_average=None): #print place(), 'n=%s max_load_average=%s' % ( str(n), str(max_load_average)) self.n = n self.max_load_average = max_load_average self.count = 0 def tryacquire( self): ''' Returns True if resource was acquired, else returns False. ''' try: assert self.count <= self.n allow = False if self.count < self.n: if self.max_load_average is None: allow = True else: try: current_load_average = os.getloadavg()[0] except OSError: current_load_average = 0 if current_load_average < self.max_load_average: allow = True if 0: print place(), 'allow=%i current_load_average=%f self.max_load_average=%f\n' % ( allow, current_load_average, self.max_load_average) if allow: self.count += 1 if 0: print 'giving access to default resource, count=%s n=%s max_load_average=%s' % ( repr(self.count), repr(self.n), repr(self.max_load_average)) return True #print 'refusing access to default resource, count=', self.count if self.max_load_average is None: return False else: # return an absolute time so that we periodically re-check the # system's load-average. return time.time() + 5 finally: pass def release( self, n=0): try: assert self.count > 0 self.count -= 1 #print 'releasing resource, count=', self.count finally: pass def __str__( self): return 'n=%i, count=%i' % ( self.n, self.count) return 'n=%i, count=%i, owners=%s' % ( self.n, self.count, str( self.owners)) # _tdb_* - thread debugging. # # <_tdb_threads> is a mapping of thread id to the mutex/condition that the # thread is blocked on. # # <_tdb_mutexes> is mapping of mutex to thread that owns the mutex. # # <_tdb_lock> protects and . # _tdb_threads = {} _tdb_mutexes = {} _tdb_lock = threading.Lock() def _tdb_str(): ret = '' ret += '\nblocked threads:\n' for t, ( m, c) in _tdb_threads.items(): ret += ' t=' + str( t) + ', m=' + str( m) + '\n' ret += c #for cc in c: # ret += str( cc) + '\n' ret += '\nowned mutexes:\n' for m, t in _tdb_mutexes.items(): ret += ' m=' + str( m) + ', t=' + str( t) + '\n' return ret return '\nblocked threads:' + str( _tdb_threads) + '\nowned mutexes:' + str( _tdb_mutexes) _tdb_rlock_type = None def _tdb_willdeadlock( m): ''' If attempting to acquire mutex will deadlock, returns list of ,,,... that describe the deadlock. Otherwise returns False. ''' ret = [] ret.append( m) i = 0 while 1: #print 'ret=', ret t = _tdb_mutexes.get( m, None) if t is None: return False ret.append( t) if t == threading.currentThread(): if i==0 and type( m) == _tdb_rlock_type: # ok break #return True print 'deadlock detected:', ret return ret m, c = _tdb_threads.get( t, ( None, None)) ret.append( ( m, c)) i += 1 def _tdb_acquire( mutex): ''' Acquires , asserting if there would be a deadlock. ''' _tdb_lock.acquire() global _tdb_rlock_type if _tdb_rlock_type is None: temp_recursivelock = type( threading.RLock()) temp_lock = type( threading.Lock()) if temp_recursivelock != temp_lock: _tdb_rlock_type = temp_recursivelock else: _tdb_rlock_type = type(None) try: if type( mutex) != _tdb_rlock_type: assert _tdb_mutexes.get( mutex, None) != threading.currentThread(),\ 'this thread already owns the lock:\n' + callers() assert not _tdb_threads.has_key( threading.currentThread()) assert not _tdb_willdeadlock( mutex) _tdb_threads[ threading.currentThread()] = mutex, callers finally: _tdb_lock.release() mutex.acquire() _tdb_lock.acquire() try: del _tdb_threads[ threading.currentThread()] _tdb_mutexes[ mutex] = threading.currentThread() finally: _tdb_lock.release() def _tdb_release( mutex): ''' Releases . ''' _tdb_lock.acquire() try: assert not _tdb_threads.has_key( threading.currentThread()) assert _tdb_mutexes[ mutex] == threading.currentThread() del _tdb_mutexes[ mutex] finally: _tdb_lock.release() mutex.release() def _tdb_wait( mutex, condition, timeout_time=0): ''' Does a wait on ,. ''' _tdb_lock.acquire() try: assert _tdb_mutexes.get( mutex, None) == threading.currentThread() assert not _tdb_threads.has_key( threading.currentThread()) del _tdb_mutexes[ mutex] _tdb_threads[ threading.currentThread()] = condition, callers() finally: _tdb_lock.release() if timeout_time > 0: timeout = timeout_time - time.time() #print place(), 'timeout_time=', timeout_time, 'timeout=', timeout if timeout > 0: condition.wait( timeout) else: condition.wait() _tdb_lock.acquire() try: del _tdb_threads[ threading.currentThread()] _tdb_mutexes[ mutex] = threading.currentThread() finally: _tdb_lock.release() if 0: # test deadlock-detection: l = threading.Lock() _tdb_acquire( l) assert _tdb_willdeadlock( l) l = threading.RLock() _tdb_acquire( l) assert not _tdb_willdeadlock( l) if 0: l1 = threading.RLock() _tdb_acquire( l1) _tdb_acquire( l1) if 0: l1 = threading.Lock() l2 = threading.Lock() l3 = threading.Lock() def a(): _tdb_acquire( l1) time.sleep(1) _tdb_acquire( l2) def b(): _tdb_acquire( l2) time.sleep(1) _tdb_acquire( l3) def c(): _tdb_acquire( l3) time.sleep(1) _tdb_acquire( l1) print 'running deliberate deadlock' t1 = threading.Thread( target=a) t2 = threading.Thread( target=b) t3 = threading.Thread( target=c) t1.start() t2.start() t3.start() t1.join() t2.join() t3.join() class _MtimeCache: ''' Caches modification times for files. Also maintains statistics about the number of times we have called os.stat(). ''' def __init__( self): self.cache = dict() self.oldprefixes = [] self.num_nonstat = 0 self.num_stat = 0 class _NotifyingDict( dict): ''' A dictionary that calls a specified fn when it is modified. ''' def __init__( self, fn): ''' Modifications will cause () to be called. ''' self.fn = fn def __setitem__( self, i, v): dict.__setitem__( self, i, v) self.fn() class State: ''' Container for all state used by yabs functions. self.premake_fn is called before each target is run, passing and . ''' def __init__( self): self.rules = list() self.targetcache = _NotifyingDict( lambda self=self: self._notifyAll()) self.mtimecache = _MtimeCache() self.rulecache = dict() self.defaultbuild = '' # echo defaults: self.echo_target = 3 self.echo_rule = 0 self.echo_command = 3 self.echo_output = 3 self.echo_exception = 2 self.echo_timing = 0 self.periodic_debug_last = 0 self.periodic_debug_interval = None self.dryrun = False self.debug = 0 self.targets = list() self.pending_targets= [] self.autocmds_rules = dict() self.show_cwd_paths = False self.echo_prefix = '' # prefix for output from commands self.subprocessfn = _subprocess_popen self.subprocessfn = _subprocess_popen4 self.premake_fn = None self.keepgoing = False self.statefile = None self.summary = [] self.prefix = '' # prefix for yabs diagnostics. self.xtermtitle_target = None self.received_sighup = False # concurrency: self.mt = 0 # default to no concurrency. self._mt_exited = False self.mt_targets = [] # all additions to this must be followed by state.notifyAll(). Unlike # self.targetcache(), self.mt_targets doesn't do this # automatically. Really need a NotifyingList class. def mt_init( self, mt, max_load_average=None): ''' If is not zero, activates concurrency. Also sets the default resource so that we use up to separate threads for default targets. ''' if self.mt: raise Exception( 'mt_init() called twice') assert mt>=0 if mt>0: self.mt = mt self._mt_mutex = threading.Lock() self._mt_condition = threading.Condition( self._mt_mutex) self._mt_wakeup_time = 0 self._mt_targets = [] self._mt_threads = { threading.currentThread(): ( '', 0)} self._mt_default_resource = mt_Resource( mt, max_load_average) self._mt_spawnthread = threading.Thread( target=_mt_spawntasks, args=( self,), name='_mt_spawntasks') self._mt_spawnthread.setDaemon(1) # so that it quits when all other threads exit. self._mt_spawnthread.start() def _notifyAll( self): if self.mt: #print place(), 'notifying...' self._mt_condition.notifyAll() def _lock( self): if self.mt: _tdb_acquire( self._mt_mutex) def _unlock( self): if self.mt: _tdb_release( self._mt_mutex) def _wait( self): if self.mt: _tdb_wait( self._mt_mutex, self._mt_condition, self._mt_wakeup_time) # wakeup time will always be set if required before we are called # again. we need to reset it here to default to not timeout in # future. self._mt_wakeup_time = 0 default_state = State() ''' A global state object for Yabs. Many of the Yabs functions default to using this global if the user doesn't specify the state. There is usually little need to use a different state object. ''' class changed: pass class pending: pass class unchanged: pass # for speed, we assign an int to each category, so that start_make() can # have a fast lookup of whether categories matched a target. _category2int = {} _int2category = [] class _Rule: ''' Represents a rule. Used only by add_rule(). We store a complete traceback so that if a rule fails, we can display who created the rule. We require our caller to pass us this backtrace. ''' def __init__( self, rule_fn, phony, root, autocmds, autodeps, internal, always, backtrace, targetinfo, categories, lockfilename): ''' Only called from yabs.add_rule(). must be either None or a string ending with os.sep. should be from traceback.extract_stack(), and should exclude the yabs.add_rule() fn. we use it to calculate if is an integer, and it is also used in diagnostics if a rule fails. thus the design only requires one call to the relatively slow traceback.extract_stack() function. ''' assert callable( rule_fn) or type( rule_fn)==str assert phony is False or phony is True assert root is None or type( root)==str if type(root)==str: assert root.endswith( os.sep) # have disabled this because it makes things difficult to # debug. we prefer out caller to use yabs.fn() explicitly. #if type( rule_fn)==str: # #print 'calling fn, rule_fn=', rule_fn # rule_fn = fn( rule_fn, up=1) assert callable( rule_fn) self.rule_fn = rule_fn self.phony = phony self.root = root self.autocmds = autocmds self.autodeps = autodeps self.backtrace = backtrace self.internal = internal self.always = always self.targetinfo = targetinfo if lockfilename is None: self.lockfilename = None self.lockfile_timeout = None elif isinstance( lockfilename, str): self.lockfilename = lockfilename else: assert isinstance( lockfilename, tuple) or isinstance( lockfilename, list) assert len( lockfilename) == 2 assert isinstance( lockfilename[0], str) assert isinstance( lockfilename[1], int) or isinstance( lockfilename[1], long) self.lockfilename, self.lockfile_timeout = lockfilename self.categories_int = [] for category in categories: category_int = _category2int.get( category) if category_int is None: category_int = len( _category2int) _category2int[ category] = category_int _int2category.append( category) assert len( _int2category) == len( _category2int) self.categories_int.append( category_int) self.num_non_matches = 0 self.num_category_non_matches = 0 self.num_matches = 0 def __repr__( self): return 'fn=%s targetinfo=%s categories=%s' % ( str( self.rule_fn.func_code), repr(self.targetinfo), repr(self.categories_int), ) caller = 1 ''' A constant for use with get_caller_directory() and the parameter in add_rule(). ''' def get_caller_directory( n=caller): ''' Returns absolute path of the directory containing the python module corresponding to the function levels up the call stack. Directory's trailing os.sep is retained. E.g. yabs.get_caller_directory(0) returns the directory containing the yabs.py module (which contains the get_caller_directory() function itself). get_caller_directory(1) returns the directory containing the caller's module. get_caller_directory(2) returns the directory containing the caller's caller's module. Usually get_caller_directory(1) is the desired call. yabs.caller is a constant that gives the same effect, e.g.: yabs.get_caller_directory( yabs.caller). ''' assert n>=0 tb = traceback.extract_stack( None, 1+n) # traceback.extract_stack() puts leaf fns at end of the array. we've # deliberately asked for the n+1 items most recent fns, so the first item # is the one we want. filename, line, function, text = tb[0] root = os.path.split( os.path.abspath( filename))[0] + os.sep # very occasionally, we seem to end up with an incorrect directory. # touching yabs.py stops this error, so this is just to enable a # non-invasive way of turning checking on. if os.environ.has_key( 'YABS_SANITY') and 'home/home' in root: print '***' print 'possible problem in yabs.get_caller_directory' print 'tb=', tb print 'filename=', filename print 'os.path.abspath( filename)=', os.path.abspath( filename) print 'root=', root print '***' return root # The following fns find the globals()/locals() dict's of caller function # levels up the stack. based on code in: # http://groups.google.co.uk/group/comp.lang.python/browse_thread/thread/c8750a0adefe0c30/cb738b32e45a3bd0?lnk=st&q=%22Steven+D.+Majewski%22+uplocals&rnum=1&hl=en#cb738b32e45a3bd0 def _linearize( head, link_attr ): #print 'linearize: head = ', head list = [] obj = head while obj: list.append( obj ) obj = getattr( obj, link_attr ) #print 'linearize: returning ', list return list def _callers(): # traceback doesn't seem to give a way of directly getting complete info # about the current stack. so we use the lower-level `sys.exc_info()' which # gives info about the current stack from within an exception handler. try: raise Exception() except: # The original code used: # tb = sys.exc_traceback.tb_frame # but we try to be thread-safe by using sys.exc_info(). tb = sys.exc_info()[2] try: #print 'tb=', tb traces = _linearize( tb.tb_frame, 'f_back' ) list = [] for frame in traces : list.append( frame ) return list[1:] # return my caller's info finally: # avoid cycles to help the garbage collector del tb def _upglobals( n ): return _callers()[n+1].f_globals def _uplocals( n ): return _callers()[n+1].f_locals def _upfname( n ): return _callers()[n+1].f_code.co_name def fn( text, globals_=None, locals_=None, up=None): ''' Returns an anonymous function created by calling exec on . should be a string containing a Python function definition, ommiting the initial `def ' (actually, leading `def' can be retained, for clarity). Any leading/trailing white space is removed using str.strip(). Leading white space on all lines will be handled automatically by exec, so you can use an indented python triple-quoted string. In addition, newlines and backslashes are re-escaped inside single/double-quoted strings. this enables nested use of fn(). e.g.: yabs.fn( """ def ( target, state): if target != 'foo': return None return [], None, 'touch foo' """) If globals_/locals_ are not specified, fn() will find its caller's globals/locals using yabs._upglobals()/yabs._uplocals(). If is specified, yabs.fn() will look at the stack frame levels up. Unfortunately the use of globals_/locals_ doesn't work in exactly the way one would hope - the resultant function doesn't seem to be able to access locals variables. E.g.: import yabs def a(): x = 42 f = yabs.fn( """ def (): print x # fails def g(): print x # succeeds f() g() a() See make.py:test22(). The created function will have a .func_code attribute like conventional functions. Unfortunately this attribute is read-only, and the only element that we can control is .func_code.co_name, which we set to encode our caller's filename, function and line number (using '_' for non-alphanumberic characters). An example of a .func_code passed through str() is: import yabs f = yabs.fn( """ def(): pass """) print f.func_code - which outputs: ", line 1> ''' if up is None: if globals_ is None: globals_ = _upglobals(1) #print 'yabs.fn(): have set globals_ to:', globals_ if locals_ is None: locals_ = _uplocals(1) #print 'yabs.fn(): have set locals_ to:', locals_ else: assert globals_ is None assert locals_ is None globals_ = _upglobals( 1+up) locals_ = _uplocals( 1+up) #print #print 'yabs.fn: globals_=', globals_ #print 'yabs.fn: locals_=', locals_ text = text.strip() if text.startswith( 'def'): text = text[ 3:].strip() if not text.startswith( '('): raise Exception( 'fn string must start with `(\': ', repr( text)) def _escape_quotes( text): ''' escape newlines and backslashes that are inside single/double-quoted strings. should probably do something about backslashes inside triple-quoted strings, but we don't bother for now. ''' quote = None ret = '' for c in text: if quote: if c=='\n': ret += '\\n' elif c=='\\': ret += '\\\\' else: if c==quote: quote = None ret += c else: if c=='\'' or c=='"': quote = c ret += c return ret text = _escape_quotes( text) #print 'execing:', text # the exec will put the fn in the locals_ dict. we return it after removing # it from this dict. we construct a function name that roughly encodes our # caller's file, function and line. if up is None: tb = traceback.extract_stack( None, 2) else: tb = traceback.extract_stack( None, 2+up) #print 'tb[0]=', tb[0] tb_filename, tb_line, tb_function, tb_text = tb[0] fnname = '_yabs_fn_temp_' for i in tb_filename + '_' + tb_function: if i in 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_': fnname += i else: fnname += '_' fnname += '_' + str( tb_line) # now create the function using exec. exec ( 'def ' + fnname + text) in globals_, locals_ ret = locals_[ fnname] del locals_[ fnname] if 0: # make the new function have some properties of our caller's function. # unfortunately these attributes are read-only, so this doesn't work. tb = traceback.extract_stack( None, 1) filename, line, function, text = tb[0] ret.func_code.co_filename = filename ret.func_code.co_firstlineno = line return ret class RemakeIfZeroLength: ''' Special prerequisite/semi-prerequisite that forces a target to be remade if it exists but has zero length. ''' pass def add_rule( rule_fn, phony=False, root=None, autocmds=None, autodeps=None, internal=0, always=False, state=None, targetinfo=None, categories=[], lockfilename=None ): ''' Adds a new Yabs rule. should be a function that takes a target filename and a context (an object with a .state member that is the Yabs state, and a .out member to which text output should be sent). If the rule cannot create the target, it should return None. Otherwise, it should return a tuple with an initial portion of the following list: A string consisting of individual commands separated by newlines, or an empty string if no command is necessary. A list of prerequisites (files that are required to build the target). : A list of files that don't have to exist, but will cause a remake of if they can't be built or are newer than . A function that will be called after all prerequisites and semi-prerequisites have been created, which should return any additional prereqistes that can only be specified by examining the existing prerequisites. This function takes a single State parameter, and should return a list of additional prerequisites. It is repeatedly called until it returns an empty list. A number that is added to the rule's value. This is useful if a rule applies to different types of targets which require different levels of diagnostics. Resource required by the rule when it runs. Only used if Yabs is concurrent. must provide two methods: tryacquire(): Returns True if resource is acquired. Otherwise returns False or returns the absolute time at which the resource may become available - Yabs will check again after this time. If an exception is raised, Yabs assumes that the resource will never become available, and all targets that require that resource are marked as having failed. release(): Releases resource. *Disabled*: if is a string, it will be compiled into an anonymous function using yabs.fn(). If a command is prefixed with a '-', any error it returns is ignored. Rules are inserted at front of 's rules, so rules added later will be found first by the yabs.make() function. The returned prerequisites and semi-prerequisites can instead be functions, which take a single state parameter, which return a list of filenames when called. Alternatively, they can be a string, which is converted into a single (semi-)prerequisite. Similarly, the returned command can be a function taking a single state parameter that returns a command-string or None, or raises an exception. This function can generate the target itself directly, but it is usually better to return a command-string to allow autocmds and autodeps to work. A prerequisite or semi-prerequisite that is None is treated specially - it ensures that a target is always remade. If a prerequisite or semi-prerequisite is RemakeIfZeroLength, the target will always be remade if it exists but has zero length. If is True, Yabs never looks at 's datestamp, and doesn't require that exists after the rule's commands are run. The target is still treated as a real filename though - it will be converted to an absolute filename using (if specified) or the current directory. If is None, the rule is always passed absolute filenames, and must return absolute filenames for both prerequisites and semi-prerequisites. Otherwise, can be an integer, which identifies a function in the backtrace, whose module's directory is taken as the root for the rule. E.g. root=yabs.caller will use the module of the function that called yabs.add_rule(). root=yabs.caller+1 will use this function's caller's module etc. Otherwise, must be an absolute path, (optionally ending with os.sep) and all targets will be given relative to . Any prerequisite or semi-prerequisite returned from that are not an absolute path will have prepended. The command returned from the rule will be run with the current directory set to . Alternatively, can be yabs.caller, in which case yabs will replace it with the directory containing the file that calls yabs.add_rule(). If not None, is treated as a filename suffix that is appended to a target filename to form a dependency filename. If the dependency file exists, its contents are treated like extra semi-prerequisites. When the rule's commands are run, all files that are opened for reading by these commands are logged to the dependency file. Note that this system has only been implemented for Unix systems, using the $LD_PRELOAD environmental variable with a special shared library, yabs.autodeps_so, that is built automatically. This shared library has only been built for OpenBSD and Linux at the time of writing; other Unix systems may require slightly different build parameters. Also, files that the command attempts, but fails, to open, are also logged. They are treated as a third form of prerequisite, where inability to remake the prerequisite doesn't force a remake of the target; this is the correct behaviour - if such a prerequisites exists, then the command will always be re-run, but if it still doesn't exist, it is safe to not re-run the command. [At the moment, this is done using a hack - the semi-prerequisite is prefixed with a `-'; ultimately, it would make more sense to recognise the three forms of prerequisites that seem to be relevent to Yabs, perhaps using simple `', `-' and `--' prefixes.] This is necessary in situations involving multiple include-paths, such as the following: compiler include path is: /usr/local/include:/usr/include /usr/local/include/foo.h does not exist. /usr/include/foo.h exists. source file main.c contains #include "foo.h" Yabs is asked to build main.o, so compiles main.c /usr/local/include/foo.h is created. Yabs is asked to build main.o again. The creation of the file /usr/local/include/foo.h should force a recompilation of main.c, even though the attempt to open that file will have failed previously, and the filename will not be listed in tradional auto-dependency information such as the output from gcc -M. Note that gcc doesn't seem to attempt to open the header file when the parent directory doesn't exist, so this system isn't bomb-proof. If not None, is treated as a filename suffix that is appended to target filenames, and used to store the command(s) that returns when asked how to create a target. If the commands are changed (e.g. is modified), Yabs will detect this and force a remake of the target, even if it is newer than all of its prerequisites. The system works in the following way: yabs.add_rule() makes a call to yabs._add_autocmds_rule(), so that Yabs will have a rule available that will write the commands for a target to a file . Also, Yabs ensures that is always added to the prerequisites returned from . Note that this mechanism results in an extra file per target; if there are many targets, it might be better to use the technique in yabs3's compilation rules where command files are per-directory. is used to reduce the diagnostics when this rule is used. For example, setting internal=1 will reduce the effective state.debug level by 1 when outputing diagnostics in connection with this rule. [this is currently broken]. If True, this rule's command(s) will be run each time the target is required. Usually, a target's commands are run only once. If is None, yabs.default_state is used. If not None, is yielded by known_targets(). E.g. could be a regex used by the rule when matching targets. A list of categories, e.g. the objects returned by make_rulecategory_suffix() or make_rulecategory_root(). Categories are used purely as a speed optimisation, allowing Yabs to avoid calling rules for targets that the rule will not match. Yabs remembers the results of calling a category for a particular target so if (as is the intention) many rules use the same category objects, Yabs will be able to detect when it doesn't have to call a rule, with very little cost. A category should be callable, taking a single - an absolute target name. It should return True if the category matches (indicating that rules that include the category may match ) or False (indicating that rules that include the category will always return None for this target). If specified, should be either a string or a ( string, timeout) pair. The specified file is locked while the rule runs. This can be used to avoid concurrency problems between different invocations of Yabs programmes. If contains '%s', it is replaced by the name of the target. If is specified, the attempted lock will time out in the specified number of seconds. We return the created _Rule object. This is useful because it contains the rule's root if applicable. Example usage: def myrule( target, state): if target!='myprog.exe': return None prereqs = [ 'foo.o', 'bar.o'] semiprereqs = [ 'foo.h', 'bar.h'] command = 'link -o ' + target + ' ' + string.join( prereqs, ' ') return prereqs, semiprereqs, command yabs.add_rule( myrule) ''' if state is None: state = default_state # get our caller's backtrace, as required by class _Rule. backtrace = traceback.extract_stack( None, None)[:-1] if type( root)==int: # has leaf fns at the end of the array, so we count from # the end. E.g. if root=yabs.caller, this will be backtrace[-1], i.e. # the last item in the array i.e. our immediate caller. r = root filename, line, function, text = backtrace[-root] root = os.path.split( os.path.abspath( filename))[0] + os.sep #assert root == get_caller_directory( r+1) # the above line slows things down a bit. but it should be ok to # uncomment it. if type(root)==str and not root.endswith( os.sep): root = root + os.sep categories2 = categories[:] if root and _use_root_category: categories2.append( make_rulecategory_root( root)) #print place(), 'targetinfo=', targetinfo, 'root=', root, 'category=', categories2[-1] rule = _Rule( rule_fn, phony, root, autocmds, autodeps, internal, always, backtrace, targetinfo, categories2, lockfilename) # insert new rule at head of list of rules: state.rules.insert( 0, rule) if autocmds: _add_autocmds_rule( autocmds, state=state) return rule def known_targets( state=None): ''' Yields (, ) for each rule that as a non-None . E.g. rules registered with yabs2.add_patternrule() and yabs3.add_exe(), yabs3.add_so() etc. Intended for use as a diagnostic, e.g. to show the user what targets are available. ''' if state is None: state = default_state for r in reversed( state.rules): if r.targetinfo: yield r.root, r.targetinfo def _file_rule_internal( filename, state): ''' Default rule, for human-written files. This is automatically registered as the lowest-priority rule. Note that this rule is never used if a previous rule has matched the target, but has failed due to failure to create prerequisites. This avoids most problems caused by old generated files existing when a rule is modified incorrectly. Maybe this rule should not be registered. Instead, a rule such as yabs3._source_rule() that ignores files whose name implies that they are generated files is better. This would avoid problems with Yabs seeming to work because a generated file exists even when there is no rule that will regenerate this file, which can happen when a rule is changed. ''' assert isinstance( state, State) if mtime( filename, state.mtimecache) == 0: if state.debug>4: print 'yabs: file_rule failed for ' + filename return None else: if state.debug>4: print 'yabs: file_rule succeeded for ' + filename return '', _file_rule_ptr = _file_rule_internal def _file_rule( filename, state): return _file_rule_ptr( filename, state) add_rule( _file_rule, internal=1) def set_file_rule( rulefn, state=None): global _file_rule_ptr _file_rule_ptr = rulefn return if state is None: state = default_state add_rule( rulefn, state=state) state.rules[-1] = state.rules[0] del state.rules[0] def updatediff( filename, new_contents, always=False): ''' Writes to if doesn't exist, or 's existing contents differ from . File's parent directory is always made if not already present. Returns True if file has been changed, or False if file has been left unchanged. If is true, always overwrites file, but still returns False if contents are unchanged. ''' ret = True try: f = open( filename, 'r') contents = f.read() f.close() #print 'updatediff: contents=', contents #print 'updatediff: new_contents=', new_contents if contents==new_contents: #print 'contents are unchanged' ret = False if not always: return ret # spot the inefficiency... except IOError: pass #print 'contents are changed' f = open( filename, 'w') f.write( new_contents) f.close() return ret def _add_autocmds_rule( autocmds, state=None): ''' This is used by add_rule() when is specified. Adds a rule for filenames ending with . Given a target foo, this rule looks in Yabs's rule cache for the command for target foo, and writes it to foo using yabs.updatediff(). This function can be called multiple times with the same - it checks for multiple registration. ''' if state is None: state=default_state if type( autocmds)!=str: raise Exception( 'autocmds should be a string, but is type ', type( autocmds), ':', autocmds) if state.autocmds_rules.has_key( autocmds): return #print '_add_autocmds_rule: autocmds=', autocmds def autocmds_rule( target, state): #print 'yabs._add_autocmds_rule.autocmds_rule: target=', target, 'autocmds=', autocmds if target.endswith( autocmds): #print 'yabs._add_autocmds_rule.autocmds_rule: target=', target, 'autocmds=', autocmds target0 = target[:-len(autocmds)] rrp = _findrule_fromcache( target0, state) if rrp is None or len( rrp)!=2: if state.debug>=3: print place(), 'autocmds failure, suffix=', autocmds, ', target', target, ', rrp=', rrp print place(), 'rulecache=' for i in state.rulecache: print place(), ' ', i return None #print 'rrp=', rrp, 'len(rrp)=', len(rrp) rule, ruleresult = rrp if rule is None: return None command = _command_gettext( ruleresult.command, state) def autocmds_rule_command( context): if context.state.debug>3: print 'yabs: updating commandfile ' + target if updatediff( target, command): if context.state.debug>2: print 'yabs: command file changed: ' + target else: if context.state.debug>2: print 'yabs: command file unchanged: ' + target return '' return autocmds_rule_command, [], [ None] add_rule( autocmds_rule, state=state, internal=1) state.autocmds_rules[ autocmds] = autocmds_rule class _PrefixOutput: ''' Behaves like an output stream object. Stores all output in self.buffer. If is not None, also sends all output to , prefixing all lines with . We take care not to output the prefix as the last line when the last input ends with '\n' - the prefix is only output when we know that there are following characters. Also, if prefix is callable, it is expected to take no parameters and return a prefix, and is called each time it is used ''' def __init__( self, prefix='', stream=None): #print place(), 'stream=', stream #print place(), callers() self.buffer = '' self.startofline = True self.stream = stream #print '_PrefixOutput: prefix=', prefix if callable( prefix): self.prefix = prefix else: self.prefix = lambda : prefix def write( self, text): if text=='': return # It is important to write to self.stream in one call to # self.stream.write(), otherwise with concurrent yabs, any # line-buffering of the output from a threads' running of commands # is lost, and the output can be messed up. So we buffer up the # text we want to write into text2, and output it in one go. text2 = '' if self.startofline: text2 += self.prefix() lines = text.split( '\n') text2 += lines[0] self.startofline = False for line in lines[1:]: if line=='': self.startofline = True text2 += '\n' else: self.startofline = False text2 += '\n' + self.prefix() + line if isinstance( self.stream, int): os.write( self.stream, text2) elif self.stream: if 1: self.stream.write( text2) else: # this deliberately does multiple calls, and can be used to # test that test33 works. for l in text2: self.stream.write(l) self.buffer += text2 def flush( self): pass def fileno( self): if self.stream: return self.stream.fileno() else: return -1 def _print_withprefix( whereto, prefix, text): _PrefixOutput( prefix, whereto).write( text) def _command_gettext( command, state, prefix=None): ''' Calls if it is callable, to get the text of the command. Return text command, or raises exception containing any output from the callable command. If .echo is False, (the default is True), the callable command will be given dummy sys.stdout and sys.stderr which write to an internal buffer. This buffer is included in any exception, but is otherwise discarded. See yabs2's -e option. Note that this facility is turned off when yabs is concurrent. ''' #print place() assert isinstance( state, State) if prefix is None: prefix = state.echo_prefix if callable( command): #print '_command_gettext: command is', command.func_code # use single-item list to hold buffer, so that instance of our nested # Buffer class can modify it. if state.echo_output == 3: prefixoutput = _PrefixOutput( prefix, sys.stdout) else: prefixoutput = _PrefixOutput( prefix, None) if not state.mt: old_stdout = sys.stdout old_stderr = sys.stderr sys.stdout = prefixoutput sys.stderr = prefixoutput class Context: pass context = Context() context.state = state context.out = prefixoutput try: try: _trace( state, command.func_code) command2 = command( context) finally: if not state.mt: sys.stdout = old_stdout sys.stderr = old_stderr except: # commandfn_error() calls sys.exc_info() to get info about the # exception. raise yabserrors.commandfn_error( command, prefixoutput.buffer) # maybe we should translate to a yabserrors.command_error? if command2 is None: command2='' if type( command2)!=str: raise Exception( 'rule-command ' + str( command.func_code) + ' has returned type ' + str( type( command2)) + ', instead of a string: ' + str( command2)) command = command2 else: if type( command) != str: raise Exception( 'rule-command ' + str( command) + ' is not a string') return command # Things for autodeps # autodeps doesn't work on Windows - I don't know of any way to detect what # files are opened by child tasks on Windows. if os.name!='nt': uname = os.uname() uname_os = uname[0] uname_osv = uname[2] uname_cpu = uname[4] string.replace( uname_osv, '/', '-') string.replace( uname_osv, '(', '{') string.replace( uname_osv, ')', '}') uname_env = 'os=' + uname_os + ',osv=' + uname_osv + ',cpu=' + uname_cpu yabs_root = get_caller_directory() autodeps_so = os.path.join( yabs_root, 'autodeps-' + uname_env + '.so') #print 'yabs_root=', yabs_root # is a shared library that we use with $LD_PRELOAD when # running sub-commands. We use the suffix so that different # systems can share a Yabs directory. Naturally, we use a Yabs rule to # build . def _autodeps_rule( target, state): if target!=autodeps_so: return None #print place(), 'target=', target src = os.path.join( yabs_root, 'autodeps.c') obj = src + '.' + uname_env + '.o' if uname_os=='OpenBSD': return ( 'gcc -W -Wall -c -o ' + obj + ' ' + src + '\n' + 'gcc -shared -fpic -o ' + target + ' ' + obj, [ src], ) # could probably avoid the intermediate file, like # the linux version below. elif uname_os=='Linux': r = ( 'gcc -W -Wall -fPIC -shared -ldl -o ' + target + ' ' + src, [ src], ) #print place(), 'returning:', r return r else: raise Exception( 'Don\'t know how to build autodeps shared object for ' + uname_os) return None add_rule( _autodeps_rule, autocmds='.cmds', internal=1) if os.uname()[0] == 'OpenBSD': # OpenBSD doesn't appear to support flock in multiple threads - e.g. # fcntl.flock() raises an exception. So we use a per-filename (flock, # mutex) pair to mimic per-thread flock support. To lock, we first lock # the mutex, then we lock the file. To unlock, we unlock the file, then # unlock the mutex. def _flock_mutex_acquire( mutex, endtime=None): ''' Acquire a mutex with optional endtime timeout. Returns True if we have acquired else False. ''' if endtime is None: mutex.acquire() return True else: while 1: if mutex.acquire( blocking=False): return True t = time.time() if t >= endtime: return False time.sleep( min( 0.5, endtime - t)) def _flock_acquire_nonblocking( f): try: fcntl.flock( f, fcntl.LOCK_EX | fcntl.LOCK_NB) return True except IOError, e: if e.errno == errno.EACCES or e.errno == errno.EAGAIN: return False raise def _flock_acquire( f, endtime=None): ''' Acquire flock with optional endtime timeout. Returns True if we have acquired flock for file , else False. ''' if endtime is None: fcntl.flock( f, fcntl.LOCK_EX) return True else: while 1: if _flock_acquire_nonblocking( f): return True t = time.time() if t >= endtime: return False time.sleep( min( 0.5, endtime - t)) # We use a global dict mapping from flock filename to _flock_state object # containing a mutex/file pair, protected by a single global mutex. # # We keep a count of the number of threads that are blocked on each mutex; # if this is zero, we delete the mutex/file pair, which avoids leaking file # object for each filename that has been locked. _flock_mutex = threading.Lock() _flock_filename2state = {} class _flock_state: def __init__( self, filename): self.filename = filename self.mutex = threading.Lock() self.file = file( filename, 'a') self.num_blocked_threads = 0 def _flock( filename, timeout=None): ''' Acquires a file lock. ''' endtime = None if timeout is not None: endtime = time.time() + timeout _flock_mutex.acquire() if not _flock_filename2state.has_key( filename): _flock_filename2state[ filename] = _flock_state( filename) state = _flock_filename2state[ filename] state.num_blocked_threads += 1 _flock_mutex.release() have_acquired_mutex = _flock_mutex_acquire( state.mutex, endtime) _flock_mutex.acquire() state.num_blocked_threads -= 1 _flock_mutex.release() if not have_acquired_mutex: return # we've acquired mutex. now attempt to acquire flock. if not _flock_acquire( state.file, endtime): # We have failed to acquire the flock. state.mutex.release() return # We've acquired state.mutex and the flock. return , which can # be passed to _funlock(). state.mutex will only ever be released by # _funlock(), after the flock has been released. return state def _funlock( state): ''' Unlocks a file lock; should be as returned by _flock(). ''' fcntl.flock( state.file, fcntl.LOCK_UN) _flock_mutex.acquire() if state.num_blocked_threads == 0: # No other thread is waiting for this flock, so we can remote from # _flock_filename2state. Otherwise we will leave state.file open. del _flock_filename2state[ state.filename] state.mutex.release() _flock_mutex.release() else: def _flock( filename, timeout=None, mode=0666): ''' Opens and locks . Returns the file object, which can be passed to _funlock, or None if lock was not acquired. If is None (the default) we block until the file lock is available. Otherwise, we only wait until timeout has expired. If is not None, it should be an integer suitable for os.chmod(), e.g. 0644. The default is 0666 as locks are often to prevent concurrent access from different users, and we usually don't want to get a "permission denied" error if the lock happens to have been created by someone else.''' endtime = None if timeout is not None: endtime = time.time() + timeout f = file( filename, 'a') if mode is not None: try: os.chmod( filename, mode) except OSError, e: if e.errno == errno.EPERM: # chmod failed due to permissions; try to use sudo. subprocess( 'sudo -n chmod %i "%s"' % ( mode, filename), endtime = time.time() + 30) if endtime is None: fcntl.flock( f, fcntl.LOCK_EX) return f else: while 1: try: fcntl.flock( f, fcntl.LOCK_EX | fcntl.LOCK_NB) if 0: print place(), 'have acquired flock for filename=%s callers=\n%s' % ( filename, callers()) return f except IOError, e: if e.errno != errno.EACCES and e.errno != errno.EAGAIN: raise if 0: print place(), 'failed to acquire flock for filename=%s callers=\n%s' % ( filename, callers()) t = time.time() if t >= endtime: return False time.sleep( min( 0.5, endtime - t)) def _funlock( f): ''' Unlocks file object . ''' if f: fcntl.flock( f, fcntl.LOCK_UN) if 0: print place(), 'have released flock. callers=\n%s' % ( callers()) def test_flock(): ''' Test flock works with multiple processes and threads. ''' def threadfn( i): prefix = 'pid=%i thread=%i' % ( os.getpid(), i) filename = '/tmp/yabs-flocktest' print prefix, 'calling _flock()' f = _flock( filename) print prefix, 'has acquired lock. sleeping...' time.sleep(5) print prefix, 'calling _funlock()' _funlock( f) print prefix, 'returning' # Create a second process, so that we test that _flock() and _funlock() # across processes: p = os.fork() t1 = threading.Thread( None, lambda: threadfn(1)) t1.setDaemon(1) t1.start() time.sleep(0) t2 = threading.Thread( None, lambda: threadfn(2)) t2.setDaemon(1) t2.start() t3 = threading.Thread( None, lambda: threadfn(3)) t3.setDaemon(1) t3.start() t1.join() t2.join() if p: sys.exit() if 0: test_flock(); def _flock_wrapper( ruleresult, prefixes, nesting): ''' Convenience wrapper for _flock(). If the file lock is not immediately available, we output diagnostics before doing a blocking wait. ''' if ruleresult.rule.lockfilename: filename = ruleresult.rule.lockfilename.replace( '%s', ruleresult.target) f = _flock( filename, timeout=0) if f is None: print prefixes.prefix( nesting), 'waiting for flock: %s' % filename f = _flock( filename, timeout=ruleresult.rule.lockfile_timeout) return f def _run_ruleresult( ruleresult, state, prefixes, nesting, remake): ''' Runs the command/fn returned from a rule, by calling command_run(). Unlocks Yabs's lock while doing so. ''' target_text = _str_cwd( ruleresult.target, state) if state.echo_target==3: if ruleresult.rule.internal + ruleresult.internal > 0 and state.debug<=1: #print prefixes.prefix( nesting), 'remaking (internal), debug=', state.debug, ',' + target_text pass else: print prefixes.prefix( nesting), 'remaking: ' + target_text if state.debug>=1: print prefixes.prefix( nesting), '- because ' + remake if state.echo_rule==3: print prefixes.prefix( nesting), 'rule: ' + str( ruleresult.rule.rule_fn.func_code) if state.premake_fn: state.premake_fn( ruleresult.target, state) if ruleresult.rule.autodeps: autodeps_filename = ruleresult.target + ruleresult.rule.autodeps try: os.remove( autodeps_filename) except OSError: pass else: autodeps_filename = None old_mtime = mtime( ruleresult.target, state.mtimecache) if not ruleresult.rule.phony: _ensure_directory( os.path.dirname( ruleresult.target), state) # run command with state unlocked, so that concurrency works. note that # command_run() never raises exception, so we don't really need the # try..finally. if state.echo_timing: t = time.time() state._unlock() lockf = None try: if ruleresult.rule.lockfilename: lockf = _flock_wrapper( ruleresult, prefixes, nesting) e, text = command_run( ruleresult.command, state, ruleresult.rule.root, autodeps_filename, prefixes=prefixes) finally: if state.echo_timing: t = time.time() - t if lockf: _funlock( lockf) state._lock() if state.echo_timing: if ruleresult.rule.internal + ruleresult.internal > 0 and state.debug<=1: pass else: if e: print prefixes.prefix( nesting), '*** target failed: %8.1fs: %s' % ( t, target_text) else: print prefixes.prefix( nesting), 'remade: %8.1fs: %s' % ( t, target_text) mtime_flush( ruleresult.target, state.mtimecache) new_mtime = mtime( ruleresult.target, state.mtimecache) if e==0 and (not ruleresult.rule.phony) and new_mtime==0: #print place(), 'command failed to create the target.' e = yabserrors.file_command_error( ruleresult.target, ruleresult.command) if e: pass # error reporting is done inside start_make() else: if ruleresult.rule.phony: e = changed elif new_mtime <= old_mtime: e = unchanged else: e = changed state.targetcache[ ruleresult.target] = ruleresult.rule, e def command_run_text( command, state=None, cwd=None, autodeps_filename=None, fn=None, endtime=None): ''' As command_run, but returns text, or raises exception. ''' e, text = command_run( command, state, cwd, autodeps_filename, fn, endtime) if e: raise e return text def command_run( command, state=None, cwd=None, autodeps_filename=None, fn=None, endtime=None, prefixes=None): ''' Runs newline-separated commands in , prefixing each with `cd && ' if is specified. Also prefixes with settings for $LD_PRELOAD and $YABS_AUTODEPS_FILENAME if is set, so that auto dependencies are written to . If .silent is False, commands are written to stdout before being run. If .dryrun is True, command are not actually run. If .echo is True (the default), output from commands is written to stdout as the command executes; see yabs2's -e option. Returns ( e, outputtext). e is 0 if no error occurred, else a yabserrors.command_error, which will also contain the output text from the last sub-command. If state.use_os_system is True, always calls commands using os.system. This is to overcome problems with child tasks not responding properly to stdin etc. When python 2.4 is widespred, will hopefully be able to rely on the subprocess module. Absorbs exceptions. ''' if state is None: state = default_state assert isinstance( state, State) if prefixes is None: prefixes = _Prefixes( '', '', state) try: commands = _command_gettext( command, state) except Exception, e: return e, '' out_all = '' for i in string.split( commands, '\n'): command = string.strip( i) ignore_error = False if len(command)>0 and command[0]=='-': ignore_error = True command = command[1:] if len(command)==0: continue if cwd: command = 'cd ' + cwd + ' && ' + command if autodeps_filename: if os.name=='nt': raise Exception( 'rules with auto-dependency files are ' 'not supported under win32') # trying to use multiple chain LD_PRELOAD libraries sometimes # hangs, so we don't attempt to do it. if 0: ld_preload = os.getenv( 'LD_PRELOAD') else: ld_preload = None if ld_preload: ld_preload = autodeps_so + ':' + ld_preload else: ld_preload = autodeps_so command = 'export LD_PRELOAD=' + ld_preload\ + ' YABS_AUTODEPS_FILENAME=' + autodeps_filename + ' && '\ + command if state.echo_command==3: #if state.dryrun: print 'yabs: would run:' if 0 and state.mt: print '[' + str( threading.currentThread()) + '] ' + command else: print prefixes.prefix( 0), command if not state.dryrun: #print place(), 'state.echo_output=', state.echo_output #print place(), 'state.prefix=', state.prefix #print place(), 'prefix=', prefixes #print place(), 'echo_prefix=', echo_prefix e, out = subprocess( command, state.echo_output==3, state=state, fn=fn, endtime=endtime, prefix=prefixes.echo_prefix( 0)) out_all += out #print place(), 'yabs: e=', e, ', out=', out if e: if ignore_error: if state.debug >= 2: print 'yabs: ignored error from last command' else: return yabserrors.command_error( command, e, out), out_all return 0, out_all def _check_rule_command( rule, command): ''' Checks that command is either a string or is callable. Commands are eventually run with _command_gettext()/command_run(), which will check that if command is callable, it returns a string. ''' if not callable( command) and type( command)!=type(''): raise Exception( 'command returned from rule [' + str( rule.rule_fn.func_code) + '] is type ' + str( type( command)) + '; should be string or function.') def mtime_raw( filename, mtimecache=None): ''' Returns modification time of , or 0 if it doesn't exist. ''' assert type( filename)==str if mtimecache==None: mtimecache=default_state.mtimecache for prefix in mtimecache.oldprefixes: if filename.startswith( prefix): #print 'mtime_raw: not stat-ing: ' + filename mtimecache.num_nonstat += 1 return 1 try: mtimecache.num_stat += 1 t = os.path.getmtime( filename) except os.error, e: t = 0 if e.errno==errno.ESTALE: # try again, after opening file. this might fix some NFS issues. print place(), 'received ESTALE for:', filename try: file(filename) except OSError: pass else: try: mtimecache.num_stat += 1 t = os.path.getmtime( filename) except os.error: pass return t def mtime( filename, mtimecache=None): ''' Returns modification time of , or 0 if doesn't exist, using caching. ''' if mtimecache==None: mtimecache=default_state.mtimecache assert type( filename)==str, str( filename) filename = os.path.abspath( filename) #print 'yabs: mtime: filename0=' + filename0 + ', filename=' + filename if not mtimecache.cache.has_key( filename): mtimecache.cache[ filename] = mtime_raw( filename, mtimecache) ret = mtimecache.cache[ filename] #print 'mtime, returning ' + str( ret) + ', real=' + str( mtime_raw( filename))\ # + ': ' + filename return ret def mtime_flush( filename, mtimecache=None): ''' Removes any cached modification time for , including entries from calls to mtime_markold() and mtime_marknew(). For example, this is called after a command is run that remakes a target. ''' if mtimecache==None: mtimecache=default_state.mtimecache assert type( filename)==str filename = os.path.abspath( filename) if mtimecache.cache.has_key( filename): del mtimecache.cache[ filename] def mtime_markold( filename, mtimecache=None): ''' Future calls to mtime( , ) will return 1. ''' if mtimecache==None: mtimecache= default_state.mtimecache assert type( filename)==str filename = os.path.abspath( filename) mtimecache.cache[ filename] = 1 def mtime_mark_nonexistent( filename, mtimecache=None): ''' Future calls to mtime( , ) will return 0. ''' if mtimecache==None: mtimecache= default_state.mtimecache assert type( filename)==str filename = os.path.abspath( filename) mtimecache.cache[ filename] = 0 def mtime_add_oldprefix( tree, mtimecache=None): if mtimecache==None: mtimecache=default_state.mtimecache mtimecache.oldprefixes.append( tree) _mtime_new = 3600*24*365*10*1000 def mtime_marknew( filename, mtimecache=None): ''' Future calls to mtime( , ) will return fixed value that is a long time in the future. ''' if mtimecache==None: mtimecache=default_state.mtimecache assert type( filename)==str filename = os.path.abspath( filename) #print 'yabs: mtime_marknew: filename=' + filename mtimecache.cache[ filename] = _mtime_new # yuk - simply set to 10,000 years after epoc. class Prerequisite: ''' A normal prerequisite. ''' pass class SemiPrerequisite: ''' A prerequisite, such as a header file, that is allowed to fail. ''' pass class FailedSemiPrerequisite: ''' prerequisite that was tried last time but didn't exist last time. ''' pass class Ruleresult: ''' An easy-to-use representation of the tuples returned from rules. ''' def __init__( self, ruleresult, rule, target, state): ''' is the value returned from a rule, i.e. initial portion of [ , , , ...]. ''' #print place(), ruleresult, rule, target self.extraprereq_fn = None self.internal = 0 ruleresult_is_bad = False if isinstance( ruleresult, str): raise Exception( 'The rule function' + ' [' + str( rule.rule_fn.func_code) + ']' + ' should return a tuple/array of at least one item, but it returned a string.') try: assert len( ruleresult) >= 1 except Exception: raise Exception( 'The rule function' + '[' + str( rule.rule_fn.func_code) + ']' + ' should return a tuple/array of at least one item.') def set( i, default): if len( ruleresult) >= i+1: return ruleresult[i] else: return default self.rule = rule self.target = target self.command = set( 0, '') self.raw_prereqs = set( 1, []) self.raw_semiprereqs = set( 2, []) self.extraprereq_fn = set( 3, None) self.internal = set( 4, 0) if state.mt: self.resource = set( 5, state._mt_default_resource) else: self.resource = None if self.extraprereq_fn is None or callable( self.extraprereq_fn): pass else: raise Exception( 'rule result from [' + str( rule.rule_fn.func_code) + '] has invalid extraprereq_fn, should be None or callable, but is: ' + repr( self.extraprereq_fn)) def prerequisites( self, state): ''' Generator, returning ( ptype, prerequisite), where is Prerequisite, SemiPrerequisite or FailedSemiPrerequisite. ''' def recurse( p, state): if type(p)==str or p==None or p is RemakeIfZeroLength: yield p elif callable( p): for pp in recurse( p( state), state): yield pp else: # should assert that x is iterable. for pp in p: for ppp in recurse( pp, state): yield ppp for p in recurse( self.raw_prereqs, state): yield Prerequisite, p for p in recurse( self.raw_semiprereqs, state): yield SemiPrerequisite, p if self.rule.autodeps: for ptype, p in _autodeps_get( self.target + self.rule.autodeps, state): if ptype == FailedSemiPrerequisite\ and mtime( p, state.mtimecache) == 0: # didn't exist before, doesn't exist now, so no # reason to rebuild, so don't bother to list as a # prereq. pass else: yield ptype, p yield SemiPrerequisite, autodeps_so return def __str__( self): return ( 'target=' + str( self.target) + ', command=' + str( self.command) + ', prerequisites=' + str( self.prerequisites) + ', resource=' + str( self.resource) + ', internal=' + str( self.internal) ) def _output_ruleinfo( prefix, rule, ruleresult, state): ''' Outputs debug information about a rule and its result (if not None). ''' assert isinstance( state, State) if 1 or state.debug>3: if rule.phony: print prefix + ' rule is (phony) ' + str( rule.rule_fn.func_code) else: print prefix + ' rule is ' + str( rule.rule_fn.func_code) print prefix + ' ruleroot is: ' + str( rule.root) if state.debug>3: print prefix + ' rule registration: ' + str( rule.backtrace) print prefix + ' rule autocmds: ' + str( rule.autocmds) print prefix + ' rule autodeps: ' + str( rule.autodeps) if ruleresult is not None: print prefix + ' prerequisites are: ' + str( ruleresult.prerequisites) print prefix + ' command is ' + str( ruleresult.command) def _findrule_fromcache( target, state=None): ''' Finds matching rule for target, from 's rule cache. This assumes that the rule has already been found by make(). Returns a tuple: ( rule, ruleresult, phony), where is True/False, and is the tuple returned from the rule function. If rule not found, raises exception. ''' if state is None: state=default_state if not state.rulecache.has_key( target): if state.debug>2: print 'No previously found rule for target ' + target return None return state.rulecache[ target] def _ensure_directory( directory, state): ''' Ensure exists. Uses mtime cache, so can be called repeatedly without being slow. ''' if mtime( directory, state.mtimecache)==0: os.makedirs( directory) mtime_flush( directory) def _str_cwd( filename, state): ''' Returns filename relative to current directory, if .show_cwd_paths is True. Otherwise returns filename. ''' if state.show_cwd_paths: cwd = os.getcwd() + os.sep if filename.startswith( cwd): return '.' + os.sep + filename[ len( cwd):] return filename def print_failures( whereto, prefixes, target, rule, e, depth, state, format, recurse): ''' Outputs human-readable information about a failed call to yabs.make(). should be an error returned from make() - see yabs.make() for a description of the format. ''' show_target = False show_rule = False show_command = False show_output = False show_exception = False if 0: print place(), 'state.echo_target=', state.echo_target print place(), 'state.echo_rule=', state.echo_rule print place(), 'state.echo_command=', state.echo_command print place(), 'state.echo_output=', state.echo_output print place(), 'state.echo_exception=', state.echo_exception if format=='auto': show_target = 1 # == state.echo_target show_rule = 1 == state.echo_rule show_command = 1 == state.echo_command show_output = 1 == state.echo_output show_exception = 1 == state.echo_exception else: assert type( format) == str for f in format.split( ','): if f=='' or f=='.': pass elif f=='target': show_target = True elif f=='rule': show_rule = True elif f=='command': show_command = True elif f=='output': show_output = True elif f=='except': show_exception = True else: whereto.write( prefix() + 'ignoring unrecognised summary item: ' + f + '\n') if 0 and depth==0: print ( place() + 'show rule=', show_rule, 'command=', show_command, 'output=', show_output, 'exception=', show_exception) #print callers() _print_failure( whereto, prefixes, target, rule, e, depth, state, show_target, show_rule, show_command, show_output, show_exception, recurse) def _print_failure( whereto, prefixes, target, rule, e, depth, state, show_target, show_rule, show_command, show_output, show_exception, recurse): #print place(), 'called from:', place(2) #print callers() #print 'show_target=', show_target #print 'show_rule=', show_rule #print 'show_command=', show_command #print 'show_output=', show_output #print 'show_exception=', show_exception if isinstance( prefixes, str): prefixes = _Prefixes( prefixes, '', state) prefix2 = prefixes.prefix(depth) if show_target and target: whereto.write( prefix2 + '*** target failed: ' + _str_cwd( target, state) + '\n') if show_rule and rule: whereto.write( prefix2 + '*** rule:\n') whereto.write( prefix2 + ' ' + str( rule.rule_fn.func_code) + '\n') if type(e)==list: for rule, prereq, ee in e: if show_rule and rule: whereto.write( prefix2 + '*** rule:\n') whereto.write( prefix2 + ' ' + str( rule.rule_fn.func_code) + '\n') if recurse: _print_failure( whereto, prefixes, prereq, None, ee, depth+1, state, show_target, show_rule, show_command, show_output, show_exception, recurse) elif isinstance( e, yabserrors.command_error): if show_command and not e.command_has_been_printed: whereto.write( prefix2 + '*** command was:\n') if callable( e.command): whereto.write( prefix2 + ' ' + str( e.command.func_code) + '\n') else: whereto.write( prefix2 + ' ' + str( e.command).strip() + '\n') e.command_has_been_printed = True if show_output and not e.text_has_been_printed: whereto.write( prefix2 + '*** output was:\n') _print_withprefix( whereto, prefix2 + ' ' , str( e.text)) e.text_has_been_printed = True if show_exception and not e.ret_has_been_printed: if os.name=='nt': whereto.write( prefix2 + '*** return value was: 0x%x:\n' % e.ret) else: whereto.write( prefix2 + '*** return value was: 0x%x' % e.ret\ + ': '\ + yabserrors.exit_description( e.ret) + ':\n') e.ret_has_been_printed = True elif isinstance( e, yabserrors.commandfn_error): if show_command: whereto.write( prefix2 + '*** command-fn failed:\n') whereto.write( prefix2 + ' ' + str( e.commandfn.func_code) + '\n') if show_output and not e.text_has_been_printed: whereto.write( prefix2 + '*** output was:\n') _print_withprefix( whereto, prefix2 + ' ', str( e.text)) e.text_has_been_printed = True if show_exception: whereto.write( prefix2 + '*** error was:\n%s' % exception_as_string( e.exc_info[0], e.exc_info[1], e.exc_info[2], prefix = prefix2 + ' ' )) elif isinstance( e, yabserrors.rulefn_error): if show_exception: whereto.write( prefix2 + '*** rule failed:\n%s' % exception_as_string( e.exc_info[0], e.exc_info[1], e.exc_info[2], prefix = prefix2 + ' ' )) elif isinstance( e, Exception): if show_exception: whereto.write( prefix2 + '*** exception was:\n') _print_withprefix( whereto, prefix2 + ' ', str(e).strip() + '\n') elif e==changed: pass elif e==unchanged: pass elif e==pending: pass else: print e.__class__, e def relativepath_simple( from0, to_, sep=os.sep, up='..'+os.sep): ''' Constructs relative path, from directory to path . This has be optimised for speed - see alternative implementations below. ''' if len( from0)==0: return to_ elif from0[-1]==sep: from1 = from0 else: from1 = from0 + sep last_sep = 0 for i in xrange( min( len( from1), len( to_))): if from1[i] != to_[i]: break if from1[i]==sep: last_sep = i num_up = from1.count( sep, last_sep) - 1 #print 'from=', from0, 'to=', to_, ', last_sep=', last_sep, ', num_up=', num_up return num_up * up + to_[last_sep+1:] relativepath_cache = {} def relativepath( from0, to_, sep=os.sep, up='..'+os.sep): ''' relativepath() is rather heavily used, often with identical parameters, so we use a cache - significantly improves speed. ''' key = from0, to_, sep, up ret = relativepath_cache.get( key, None) if ret is None: ret = relativepath_simple( *key) relativepath_cache[ key] = ret return ret if 0: # tests for relativepath(): relative_path_tests = [ ( '/home/foo/bar', '/home/qwerty/abcd/pqr', '../../qwerty/abcd/pqr'), ( '/home/qwerty/abcd/pqr', '/home/foo/bar', '../../../foo/bar'), ( '/home/foo/abcd/pqr/', '/home/foo/bar', '../../bar'), ( '/foo', '/home/qwerty/abcd/pqr/', '../home/qwerty/abcd/pqr/'), ( '/foo/bar/qw', '/foo/qw', '../../qw'), ( '/foo/bar/qw', '/foo/qw/', '../../qw/'), ( '/foo', '/foo/qw/', 'qw/'), ( '', '/foo/qw/', '/foo/qw/'), ] # These tuples contain: from, to, correct-result. for f, t, r in relative_path_tests: assert relativepath( f, t) == r if 1: # alternative implementations of relativepath. are up to 2x slower. def relativepath0( from_, to_, sep=os.sep, up='..'+os.sep): if from_=='' or from_ is None: return to_ # would be nice to not special case, but string.join is not exactly right. while from_.endswith( sep): from_=from_[:-1] #while to_.endswith( sep): to_=to_[:-1] f = from_.split( sep) t = to_.split( sep) i = 0 while 1: if i>=len( t) or i>=len( f) or t[i] != f[i]: break i += 1 #print 'i=', i # first i directories are the same relative_path = (len(f)-i) * up relative_path += string.join( t[ i:], sep) return relative_path def relativepath1( from_0, to_, sep=os.sep, up='..'+os.sep): if len(from_0)==0: return to_ elif from_0[-1]==sep: from_ = from_0 else: from_ = from_0 + sep p = os.path.commonprefix( ( from_, to_)) s = p.rfind( sep) + 1 num_up = from_.count( sep, len(p)) if from_[-1]!=sep: num_up += 1 #print 'from=', from_0, 'to=', to_, ', p=', p, ', s=', s, ', num_up=', num_up return num_up * up + to_[s:] for k in relativepath1, relativepath0, relativepath_simple: t = time.time() for i in range( 10*1000): for j in relative_path_tests: s = k( j[0], j[1]) assert s==j[2], ( j[2], s) t = time.time() - t print k, t def relativepath_user( user, to, sep=os.sep, up='..'+os.sep): ''' If is within ~, returns representation of starting with `~'. Otherwise returns absolute path of . ''' user_path = os.path.expanduser( '~' + user) to_abs = os.path.abspath( to) if to_abs.startswith( user_path): return '~' + user + to_abs[ len( user_path):] else: return to_abs def xtermtitle_usertarget( target, state): ''' writes @: into titlebar of xterm. intended for use as .premake_fn. ''' import socket if os.isatty(0): sys.stdout.write( '\033]2;' + os.environ['USER'] + '@' + socket.gethostname() + ':' + relativepath_user( os.environ['USER'], target) + '\007') _use_root_category = True _use_root_category = False # setting _use_root_category to true makes yabs add a category that rejects # targets that don't start with a rule's . this breaks how is # supposed to behave though - yabs will still ask rules to make things outside # of their , by passing relative paths that start with '../' etc. def _call_rule( rule, target, state, prefixes): ''' Call rule function, and return a RuleResult. Doesn't actually build a target. ''' #print 'target=', target if rule.root: if _use_root_category: # if the rule category code has done its stuff properly, this # assert should be fine: if 0: assert target.startswith( rule.root) target2 = target[ len(rule.root):] if 0: assert target2 == relativepath( rule.root, target) else: if 1: target2 = relativepath( rule.root, target) else: # this is a hack that excludes targets that are outside a # rule's . don't think there's much speed advantage. if not target.startswith( rule.root): return target2 = target[ len(rule.root):] #target2 = relativepath( rule.root, target) if state.debug>=4: print 'converted target: ' + target print ' to relative path: ' + target2 print ' ruleroot=: ' + rule.root else: target2 = target try: state.debug -= rule.internal try: #print place(), 'calling rule.rule_fn, target2=', target2 ruleresult_raw = rule.rule_fn( target2, state) finally: state.debug += rule.internal #print place() except Exception, e: #print place(), 'e=', e e = yabserrors.rulefn_error( e) return e if ruleresult_raw is None: if state.debug>=5: print place() + 'rule doesn\'t match' rule.num_non_matches += 1 return rule.num_matches += 1 return Ruleresult( ruleresult_raw, rule, target, state) def cancel_targets( targets, state): if not state.mt: return for target in targets: i = 0 while 1: if i>=len( state._mt_targets): break if state._mt_targets[i][0]==target: t, rr, prefixes, nesting, remake = state._mt_targets[i] if state.debug>=2: print prefixes.prefix(0), 'cancelling:', t del state._mt_targets[i] else: i += 1 def _try_get_result( targets, state, prefixes, nesting): ''' Looks in .targetcache for result for any target in . if found, removes from and returns ( , result), else returns None. ''' # don't use enumerate - not available on python-2.2. for i in range( len( targets)): target = targets[i] rule, result = state.targetcache[ target] if result != pending: del targets[i] #show_result( target, rule, result, prefixes, state, nesting) return target, result def _wait_result( targets, state, prefixes, nesting): ''' Waits for any item in to be made. Removes the item from , and returns ( , ,). Relies on state._mt_condition.notifyAll() being called whenever .targetcache is changed. This is done automatically - .targetcache is a _NotifyingDict. ''' while 1: debug( 'targets=', targets) i = 0 while 1: if i==len( targets): break target = targets[i] assert state.targetcache.has_key( target), target rule, result = state.targetcache[ target] if result!=pending: del targets[i] #show_result( target, rule, result, prefixes, state, nesting) debug( 'returning:', target, result) return target, result else: i += 1 if len( targets)==0: break debug( 'calling state._wait') state._wait() def _autodeps_get( autodeps_name, state): ''' A generator that yields prerequisites from an autodeps file. ''' try: f = file( autodeps_name, 'r') except IOError: return while True: line = f.readline() if line=='': break c = line[0] filename = line[1:-1] # remove initial +/- and trailing newline. if c=='+': yield SemiPrerequisite, filename elif c=='-': # this file failed to open last time. so if it # now exists, we should force a rebuild. we # cheat a little by also pretending the file # is very new, in case someone has created # it as an old file. if mtime( filename, state.mtimecache): if state.debug>=1: print 'file failed to open previously, but now exists:'\ + filename mtime_marknew( filename) yield FailedSemiPrerequisite, filename f.close() class _Prefixes: def __init__( self, prefix, echo_prefix, state): def get( *items): for p in items: if p: if callable( p): return p else: return lambda: p return lambda: '' self._prefix = get( prefix, state.prefix) self._echo_prefix = get( echo_prefix, state.echo_prefix) def prefix( self, nesting): return self._prefix() + 'yabs ' + str(nesting) + ': ' + nesting*' ' def echo_prefix( self, nesting): p = self._echo_prefix() if p is None: p = self.prefix( nesting) #print place(), 'p=', p return p def periodic_debug( text, state=None): if state.periodic_debug_interval is None: return t = time.time() if t - state.periodic_debug_last > state.periodic_debug_interval: state.periodic_debug_last = t print 'yabs: [' + text + ']' debug_num_targets = 0 class Tstats: def __init__( self, active=True): self.active = active self.stats = dict() self.current_pos = '' self.current_t = time.time() def mark( self): if not self.active: return t = time.time() caller = place(2) if self.current_pos not in self.stats: self.stats[ self.current_pos] = [0,0] self.stats[ self.current_pos][0] += 1 self.stats[ self.current_pos][1] += t - self.current_t self.current_pos = caller self.current_t = time.time() def __str__( self): ret = '' for pos, ( n, t) in self.stats.iteritems(): ret += '%5i %8f %s\n' % ( n, t, pos) return ret tstats = Tstats( False) def tstats_print(): if tstats.active: print tstats import atexit atexit.register( tstats_print) _rulecategory_suffix_store = {} def make_rulecategory_suffix( suffix): ret = _rulecategory_suffix_store.get( suffix) if ret: return ret class Category: def __init__( self, suffix): self.suffix = suffix def __call__( self, target): return target.endswith( self.suffix) def __repr__( self): return 'suffix==' + self.suffix ret = Category( suffix) _rulecategory_suffix_store[ suffix] = ret return ret _rulecategory_root_store = {} def make_rulecategory_root( root): ret = _rulecategory_root_store.get( root) if ret: # We have already created the required category object. return ret class Category: def __init__( self, root): self.root = root def __call__( self, target): #print place(), 'target=', target, 'root=', self.root return target.startswith( self.root) def __repr__( self): return 'root==' + self.root ret = Category( root) _rulecategory_root_store[ root] = ret return ret def start_make( target, state, prefix=None, echo_prefix=None, prefixes=None, nesting=0, nesting_targets=[]): ''' This is the heart of Yabs. We are a Python generator. We start a builds of , then repeatedly yield a dummy value until the build has completed, leaving the result in state.targetcache[ target]. We use yield in this way so that concurrent builds can schedule nested prerequisites without blocking. We never block; the caller is expected to block with state._wait() during iteration. We call ourselves recursively to build prerequisites. ''' tstats.mark() #print place(), 'target=', target debug( 'target=', target) global debug_num_targets debug_num_targets += 1 periodic_debug( 'checking target: ' + str( debug_num_targets) + ' ' + target, state) if state.received_sighup: # output current status - what we're trying to build, which is a # list of targets - top level target then prerequisites. state.received_sighup = False print place(), 'current targets:' for i in nesting_targets + [target]: print place(), ' ', i if prefixes is None: prefixes = _Prefixes( prefix, echo_prefix, state) try: rule, result = state.targetcache[ target] except KeyError: tstats.mark() pass else: tstats.mark() if rule and rule.always: pass else: if state.debug>=3: print prefixes.prefix( nesting) + 'have already dealt with ' + _str_cwd( target, state) + ': ' + repr( state.targetcache[ target]) tstats.mark() return tstats.mark() # while we are attempting to build , mark it as pending, so # that recursive/concurrent builds work. state.targetcache[ target] = None, pending failed_prereqs = [] # info about prereqs that have failed to make. newer_prereqs = [] # prereqs that are phony or newer than . ruleresult = None def append_failure( failed_prereqs, e, target, rule, nesting): ''' We output info about errors as we come across them, so that we can use recurse=False to avoid duplicate info, but still output full info. ''' failed_prereqs.append( e) #print 'e[2]=', e[2].__class__ #if not isinstance( e[2], list): if 1: _print_failure( sys.stdout, prefixes, target, rule, e[2], nesting, state, state.echo_target>=2, state.echo_rule==2, state.echo_command==2, state.echo_output==2, state.echo_exception==2, recurse=False) if state.debug >= 2: print prefixes.prefix( nesting), 'target: ' + _str_cwd( target, state) rule_ok = None # set to True if rule's prerequistes have built succesfully. Set to # False if one or more of the rule's prerequisites have failed. category_matches = [None] * len( _int2category) # Each time a rule is considered, we want to avoid calling the rule if any # of its categories return false when passed . To avoid calling # categories more than once for , we store category return value # for in category_matches[i] where is the category's integer # id (see _category2int). We use an integer id here for speed - the more # obvious way using a dict mapping from the category itself to the return # value is slow enough that using categories essentially doesn't seem to # speed things up at all. tstats.mark() # We try each rule in turn. If a rule matches, we attempt to build # all of its prerequisites. If any of the prerequisites fail to # build, we carry on looking at other rules. for rule in state.rules: if 1: # see whether matches all of 's categories. If any of # 's categories doesn't match, we don't need to call the rule. exclude_rule = False for category_int in rule.categories_int: assert category_int >= 0 and category_int < len( category_matches) match = category_matches[ category_int] if match is None: # we haven't looked at with before. category = _int2category[ category_int] match = category( target) category_matches[category_int] = match if not match: exclude_rule = True break if exclude_rule: # one of 's categories does not match , so can # be safely skipped. rule.num_category_non_matches += 1 continue if rule.rule_fn==_file_rule and len( failed_prereqs)>0: if state.debug>=3: print prefixes.prefix( nesting) + 'ignoring file_rule, because previous rules have '\ 'failed due to failed prerequisites' continue ruleresult = _call_rule( rule, target, state, prefixes) if state.debug>=4: print prefixes.prefix( nesting), 'ruleresult=', ruleresult if ruleresult is None: continue if isinstance( ruleresult, Exception): #print place(), 'rule exception:', ruleresult.__class__, ruleresult append_failure( failed_prereqs, ( rule, target, ruleresult), target, rule, nesting) rule_ok = None ruleresult = None if 0: _print_failure( sys.stdout, prefixes, target, rule, e[2], nesting, state, state.echo_target>=2, state.echo_rule==2, state.echo_command==2, state.echo_output==2, state.echo_exception==2, recurse=False) break continue rule_ok = None remake_if_zero_length = False if state.debug>=2: print place(), prefixes.prefix( nesting), 'target:', target print place(), prefixes.prefix( nesting), 'rule matches:', rule print place(), prefixes.prefix( nesting), 'ruleresult=', ruleresult #print prefixes.prefix( nesting), ' prereqs:', ruleresult.prerequisites # We provisionally set rulecache[ target] to this rule, for use by # yabs2.command_rule(). state.rulecache[ target] = rule, ruleresult tstats.mark() if rule.autocmds: # add autocmds file as a semi-prerequisites. ruleresult.raw_semiprereqs.append( target + rule.autocmds) pending_prereqs = [] # prereqs that we have started to make. prereqs = [] # absolute prerequisite filenames. prereqs2prereqtype = dict() for ptype, p in ruleresult.prerequisites( state): if p is None: newer_prereqs.append( p) elif p is RemakeIfZeroLength: remake_if_zero_length = True else: if not os.path.isabs( p): if rule.root: if p.startswith( './'): pp = os.path.join( rule.root, p[2:]) else: pp = os.path.join( rule.root, p) else: print prefixes.prefix( nesting) + 'warning: rule returned a relative prerequisite, but rule has no root-dir.' print prefixes.prefix( nesting) + 'rule is:' _output_ruleinfo( prefixes.prefix( nesting) + ' ', rule, None, state) print prefixes.prefix( nesting) + 'target is: ' + target print prefixes.prefix( nesting) + 'prerequiste is: ' + p print prefixes.prefix( nesting) + 'ignoring rule' preq_make_result = [] rule_ok = False break else: pp = p pp = os.path.abspath(pp) if pp==target: if ptype==Prerequisite: print prefixes.prefix( nesting) + 'recursion detected for target ' + _str_cwd( target, state) print prefixes.prefix( nesting) + 'rule is:' _output_ruleinfo( prefixes.prefix( nesting) + ' ', rule, None, state) print prefixes.prefix( nesting) + 'target is: ' + target print prefixes.prefix( nesting) + 'prerequiste is: ' + p rule_ok = False break else: # ignore recursion for semi-prerequistes, because they # often include the target. pass else: existing_ptype = prereqs2prereqtype.get( pp, None) if existing_ptype==SemiPrerequisite or existing_ptype is None: prereqs2prereqtype[ pp] = ptype prereqs.append( ( ptype, pp)) elif existing_ptype==Prerequisite: # never change type from full to # semi-prerequisite - if rule lists prereq as # both normal and semi, we need to treat it as a # normal prereq. pass else: assert 0 tstats.mark() if rule_ok==False: continue # Loop continually, starting to make prerequisites and checking for # prerequisites that have been made. This technique works whether Yabs # is concurrent or not. debug( 'starting prereq loop for target=', target) generators = [] i = 0 while 1: tstats.mark() if i < len( prereqs): tstats.mark() # start making another prerequiste: prereq_type, prereq = prereqs[i] if prereq is None: # forces us to rebuild the target. newer_prereqs.append( prereq) else: generator = start_make( prereq, prefixes=prefixes, state=state, nesting=nesting+1, nesting_targets=nesting_targets+[target]) pending_prereqs.append( prereq) if state.mt: # We are concurrent. Need to call # generator.next() once here, so that the # prereq build is started - e.g. this will set # state.targetcache[ prereq] to yabs.pending. try: generator.next() tstats.mark() except StopIteration: # has built immediately, so no need to use # again. #print place(), 'StopIteration, target=', target, 'prereq=', prereq # next two lines fix test38. tstats.mark() i += 1 continue else: # not yet built, so add # to list of generators to call: generators.append( generator) assert state.targetcache.has_key( prereq), 'prereq=' + prereq + ', state.targetcache=' + str( state.targetcache) else: # We are not concurrent, so don't proceed until # has been made (or fails). for g in generator: tstats.mark() pass tstats.mark() tstats.mark() # check for a result from any of our remaining prerequisites. #print place(), 'pending_prereqs=', pending_prereqs targetresult = _try_get_result( pending_prereqs, state, prefixes, nesting+1) tstats.mark() #print place(), 'target=', target #print place(), 'targetresult=', targetresult #print place(), 'prereqs=', prereqs #print place(), 'pending_prereqs=', pending_prereqs #print place(), 'state.targetcache=', state.targetcache if targetresult: #print place(), 'targetresult=', targetresult prereq, result = targetresult # start_make() has no knowledge of whether a target is a # prerequisite or semi-prerequisite. But we need to know, so # recover the prerequisite-type from our original list of # prerequisites: if 0: prereq_type = None debug( 'num prereqs=', len(prereqs)) found = False for p in prereqs: if p[1]==prereq: prereq_type = p[0] debug(p) assert not found found = True break assert prereq_type != None debug( 'prereq_type=', prereq_type) debug( 'prereqs2prereqtype[prereq]=', prereqs2prereqtype[prereq]) assert prereq_type==prereqs2prereqtype[prereq] prereq_type = prereqs2prereqtype[prereq] tstats.mark() if result is changed: # Note that all phony prereqs will be marked as changed. newer_prereqs.append( ( prereq, prereq_type)) elif result is unchanged: #if mtime( prereq, state.mtimecache) > mtime( target): # newer_prereqs.append( ( prereq, prereq_type)) pass else: # failed to build . if prereq_type is Prerequisite: append_failure( failed_prereqs, ( rule, prereq, result), prereq, rule, nesting) #failed_prereqs.append( ( rule, prereq, result)) rule_ok = False if state.keepgoing: pass else: # Stop building all the other prerequisites. cancel_targets( pending_prereqs, state) pending_prereqs = [] break else: # failed to build semi-prerequisite. if state.debug >= 2: print prefixes.prefix( nesting), 'ignoring failure to build semi-prerequisite:', prereq # force rebuild: newer_prereqs.append( ( prereq, prereq_type)) tstats.mark() else: # no result available, so we do one iteration for each # of our prerequisites (these will not block), and yield # if there is no change, so that our caller can schedule # new targets if required. print place(), 'generators=', # generators tstats.mark() g = 0 g_change = False while g < len( generators): try: gg = generators[g].next() tstats.mark() except StopIteration: tstats.mark() del generators[g] g_change = True else: g += 1 if not g_change: debug( 'yielding:', target, nesting) tstats.mark() yield place() + target tstats.mark() #print place(), 'pending_prereqs=', pending_prereqs, 'prereqs=', prereqs, 'i=', i if len( pending_prereqs)==0 and i>=len( prereqs): #print place(), 'All prereqs have been made and we have handled their results', rule_ok if rule_ok is None: rule_ok = True break i += 1 tstats.mark() if rule_ok is True: break tstats.mark() # We've processed all the prerequisites. Now decide whether to run the # rule's command. debug( 'rule_ok=', rule_ok) if rule_ok is True: # Prerequisites were created successfully. Decide whether to build # ; if so, set to text describing why. remake = None if ruleresult.rule.phony: remake = 'target is phony' else: target_mtime = mtime( target, state.mtimecache) if target_mtime==0: remake = 'target doesn\'t exist' else: if newer_prereqs: if newer_prereqs[0]==None: remake = 'prerequisite is None' else: remake = 'prerequisite changed: ' + newer_prereqs[0][0] remake = 'prerequisite changed: ' + str( newer_prereqs) else: for ptype, p in prereqs: if mtime( p, state.mtimecache) > target_mtime: remake = 'prerequisite newer than target: ' + p break if remake_if_zero_length and not remake: try: if os.path.getsize( target) == 0: remake = 'target has zero length' #print place(), 'remaking because target has zero length:', target except os.error: pass def target_text(): # lazy calling of _str_cwd() to save time. ret = _str_cwd( target, state) if ruleresult.rule.phony: ret = 'phony ' + ret return ret if remake: debug( 'remaking', target_text(), 'state.mt=', state.mt) if state.mt: debug( 'appending', target, 'to state._mt_targets') state._mt_targets.append( ( target, ruleresult, prefixes, nesting, remake)) state._notifyAll() else: #print place(), 'prefixes=', prefixes _run_ruleresult( ruleresult, state, prefixes, nesting, remake) else: debug( 'not remaking', target_text) if state.debug>=3: print prefixes.prefix( nesting), 'not remaking: ' + target_text() rule, result = state.targetcache[ target] state.targetcache[ target] = rule, unchanged else: #print place(), 'rule_ok=', rule_ok, failed_prereqs assert state.targetcache.has_key( target), 'target=' + target + ', targetcache keys=' + str( state.targetcache.keys()) state.targetcache[ target] = rule, failed_prereqs rule, ret = state.targetcache[ target] if nesting==0 and ret!=changed and ret!=unchanged and ret!=pending: # this is a hack. we should really get make() to do this, but it # doesn't have easy access to all the params needed by _print_failure. _print_failure( sys.stdout, prefixes, target, rule, ret, nesting, state, state.echo_target>=2, state.echo_rule==2, state.echo_command==2, state.echo_output==2, state.echo_exception==2, recurse=False) #print place(), ret return def make( target, prefix=None, echo_prefix=None, state=None): ''' If is a relative path, is converted to an absolute path using os.path.abspath(). Uses the registered rules in to try to remake 's prerequisites, semi-prerequisites and itself, returning one of the following: yabs.unchanged - 's rule's command was not run because was found to be up-to-date, or the command was run but the timestamp of was unchanged. yabs.changed - 's rule's command was run and it updated , or it was run and was a phony rule. A list containing information about why could not be made. Each item in the list is a 3-tuple ( , , ), where is an instance of the yabs._Rule class, describing a rule that claimed to be able to remake , but which required a prerequisite that could not be made. The item is the error returned from make() when it was recursively called to remake , so the list returned from make() is actually a tree-like structure. Note that does not appear in the list. The function yabs.print_failures() can be used to display the list in a human readable format. All diagnostics are prefixed with . can be a function taking no params, in which case it should return a prefix string. Note that make() does not first check that rules exist for all of a rule's prerequisites before attempting to create the first prerequisite. This has the disadvantage that it means that rules could be run even when they ultimately don't work because some other prerequisites cannot be made. It has the advantage that the rules can depend on files created by earlier rules; for example a rule could untar a .tar file, and subsequent rules will be able to respond to the extracted files. It might even be possible to import extra rules from these files into the build programme itself. See http://tinyurl.com/yqk6l for a usenet posting that mentions this issue. ''' tstats.mark() if state is None: state = default_state if not os.path.isabs( target): target = os.path.abspath( target) prefixes = _Prefixes( prefix, echo_prefix, state) state._lock() try: tstats.mark() for dummy in start_make( target, prefixes=prefixes, state=state): tstats.mark() debug( 'dummy=', dummy) debug( 'target=', target) tstats.mark() state._wait() tstats.mark() tstats.mark() target, r = _wait_result( [target], state, prefixes, 0) #print place(), 'target=', target, ', r=', r tstats.mark() if 0: print place(), 'number of rules:', len( state.rules) for rule in state.rules: print place(), 'rule matches,non-matches,category-non-matches=%i,%i,%i: %s' % ( rule.num_matches, rule.num_non_matches, rule.num_category_non_matches, repr(rule) ) return r finally: state._unlock() def _tdb_status( state): while 1: time.sleep(5) _tdb_lock.acquire() try: print '_tdb_status:', _tdb_str().replace( '\n', '\n\r') print '_tdb_status:', 'all threads:', str( threading.enumerate()).replace( '\n', '\n\r') print 'threading.enumerate():', threading.enumerate() print 'state.m=', state.mt print 'state._mt_exited=', state._mt_exited finally: _tdb_lock.release() if state._mt_exited: print place(), 'state._mt_exited is set' break if 0: # set up a background thread that periodically prints status of other # threads. useful when debugging concurrent useage. _tdb_dummy = threading.Thread( target=_tdb_status, name="tdb dummy", args=(default_state,)) _tdb_dummy.start() #thread.start_new_thread( _tdb_status, (None,)) def handle_sighup( state=None): ''' adds handler for SIGHUP that sets .received_sighup. start_make() polls this when it is called, and outputs a diagnostic about the current state. ''' if state is None: state = default_state def sighandler( signum, frame): state.received_sighup = True signal.signal( signal.SIGHUP, sighandler)