Package SCons :: Module Taskmaster
[hide private]
[frames] | no frames]

Source Code for Module SCons.Taskmaster

   1  # 
   2  # Copyright (c) 2001 - 2014 The SCons Foundation 
   3  # 
   4  # Permission is hereby granted, free of charge, to any person obtaining 
   5  # a copy of this software and associated documentation files (the 
   6  # "Software"), to deal in the Software without restriction, including 
   7  # without limitation the rights to use, copy, modify, merge, publish, 
   8  # distribute, sublicense, and/or sell copies of the Software, and to 
   9  # permit persons to whom the Software is furnished to do so, subject to 
  10  # the following conditions: 
  11  # 
  12  # The above copyright notice and this permission notice shall be included 
  13  # in all copies or substantial portions of the Software. 
  14  # 
  15  # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY 
  16  # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE 
  17  # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 
  18  # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE 
  19  # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 
  20  # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 
  21  # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 
  22   
  23  __doc__ = """ 
  24  Generic Taskmaster module for the SCons build engine. 
  25   
  26  This module contains the primary interface(s) between a wrapping user 
  27  interface and the SCons build engine.  There are two key classes here: 
  28   
  29      Taskmaster 
  30          This is the main engine for walking the dependency graph and 
  31          calling things to decide what does or doesn't need to be built. 
  32   
  33      Task 
  34          This is the base class for allowing a wrapping interface to 
  35          decide what does or doesn't actually need to be done.  The 
  36          intention is for a wrapping interface to subclass this as 
  37          appropriate for different types of behavior it may need. 
  38   
  39          The canonical example is the SCons native Python interface, 
  40          which has Task subclasses that handle its specific behavior, 
  41          like printing "`foo' is up to date" when a top-level target 
  42          doesn't need to be built, and handling the -c option by removing 
  43          targets as its "build" action.  There is also a separate subclass 
  44          for suppressing this output when the -q option is used. 
  45   
  46          The Taskmaster instantiates a Task object for each (set of) 
  47          target(s) that it decides need to be evaluated and/or built. 
  48  """ 
  49   
  50  __revision__ = "src/engine/SCons/Taskmaster.py  2014/09/27 12:51:43 garyo" 
  51   
  52  from itertools import chain 
  53  import operator 
  54  import sys 
  55  import traceback 
  56   
  57  import SCons.Errors 
  58  import SCons.Node 
  59  import SCons.Warnings 
  60   
  61  StateString = SCons.Node.StateString 
  62  NODE_NO_STATE = SCons.Node.no_state 
  63  NODE_PENDING = SCons.Node.pending 
  64  NODE_EXECUTING = SCons.Node.executing 
  65  NODE_UP_TO_DATE = SCons.Node.up_to_date 
  66  NODE_EXECUTED = SCons.Node.executed 
  67  NODE_FAILED = SCons.Node.failed 
  68   
  69  print_prepare = 0               # set by option --debug=prepare 
  70   
  71  # A subsystem for recording stats about how different Nodes are handled by 
  72  # the main Taskmaster loop.  There's no external control here (no need for 
  73  # a --debug= option); enable it by changing the value of CollectStats. 
  74   
  75  CollectStats = None 
  76   
77 -class Stats(object):
78 """ 79 A simple class for holding statistics about the disposition of a 80 Node by the Taskmaster. If we're collecting statistics, each Node 81 processed by the Taskmaster gets one of these attached, in which case 82 the Taskmaster records its decision each time it processes the Node. 83 (Ideally, that's just once per Node.) 84 """
85 - def __init__(self):
86 """ 87 Instantiates a Taskmaster.Stats object, initializing all 88 appropriate counters to zero. 89 """ 90 self.considered = 0 91 self.already_handled = 0 92 self.problem = 0 93 self.child_failed = 0 94 self.not_built = 0 95 self.side_effects = 0 96 self.build = 0
97 98 StatsNodes = [] 99 100 fmt = "%(considered)3d "\ 101 "%(already_handled)3d " \ 102 "%(problem)3d " \ 103 "%(child_failed)3d " \ 104 "%(not_built)3d " \ 105 "%(side_effects)3d " \ 106 "%(build)3d " 107
108 -def dump_stats():
109 for n in sorted(StatsNodes, key=lambda a: str(a)): 110 print (fmt % n.stats.__dict__) + str(n)
111 112 113
114 -class Task(object):
115 """ 116 Default SCons build engine task. 117 118 This controls the interaction of the actual building of node 119 and the rest of the engine. 120 121 This is expected to handle all of the normally-customizable 122 aspects of controlling a build, so any given application 123 *should* be able to do what it wants by sub-classing this 124 class and overriding methods as appropriate. If an application 125 needs to customze something by sub-classing Taskmaster (or 126 some other build engine class), we should first try to migrate 127 that functionality into this class. 128 129 Note that it's generally a good idea for sub-classes to call 130 these methods explicitly to update state, etc., rather than 131 roll their own interaction with Taskmaster from scratch. 132 """
133 - def __init__(self, tm, targets, top, node):
134 self.tm = tm 135 self.targets = targets 136 self.top = top 137 self.node = node 138 self.exc_clear()
139
140 - def trace_message(self, method, node, description='node'):
141 fmt = '%-20s %s %s\n' 142 return fmt % (method + ':', description, self.tm.trace_node(node))
143
144 - def display(self, message):
145 """ 146 Hook to allow the calling interface to display a message. 147 148 This hook gets called as part of preparing a task for execution 149 (that is, a Node to be built). As part of figuring out what Node 150 should be built next, the actually target list may be altered, 151 along with a message describing the alteration. The calling 152 interface can subclass Task and provide a concrete implementation 153 of this method to see those messages. 154 """ 155 pass
156
157 - def prepare(self):
158 """ 159 Called just before the task is executed. 160 161 This is mainly intended to give the target Nodes a chance to 162 unlink underlying files and make all necessary directories before 163 the Action is actually called to build the targets. 164 """ 165 global print_prepare 166 T = self.tm.trace 167 if T: T.write(self.trace_message(u'Task.prepare()', self.node)) 168 169 # Now that it's the appropriate time, give the TaskMaster a 170 # chance to raise any exceptions it encountered while preparing 171 # this task. 172 self.exception_raise() 173 174 if self.tm.message: 175 self.display(self.tm.message) 176 self.tm.message = None 177 178 # Let the targets take care of any necessary preparations. 179 # This includes verifying that all of the necessary sources 180 # and dependencies exist, removing the target file(s), etc. 181 # 182 # As of April 2008, the get_executor().prepare() method makes 183 # sure that all of the aggregate sources necessary to build this 184 # Task's target(s) exist in one up-front check. The individual 185 # target t.prepare() methods check that each target's explicit 186 # or implicit dependencies exists, and also initialize the 187 # .sconsign info. 188 executor = self.targets[0].get_executor() 189 if executor is None: 190 return 191 executor.prepare() 192 for t in executor.get_action_targets(): 193 if print_prepare: 194 print "Preparing target %s..."%t 195 for s in t.side_effects: 196 print "...with side-effect %s..."%s 197 t.prepare() 198 for s in t.side_effects: 199 if print_prepare: 200 print "...Preparing side-effect %s..."%s 201 s.prepare()
202
203 - def get_target(self):
204 """Fetch the target being built or updated by this task. 205 """ 206 return self.node
207
208 - def needs_execute(self):
209 # TODO(deprecate): "return True" is the old default behavior; 210 # change it to NotImplementedError (after running through the 211 # Deprecation Cycle) so the desired behavior is explicitly 212 # determined by which concrete subclass is used. 213 #raise NotImplementedError 214 msg = ('Taskmaster.Task is an abstract base class; instead of\n' 215 '\tusing it directly, ' 216 'derive from it and override the abstract methods.') 217 SCons.Warnings.warn(SCons.Warnings.TaskmasterNeedsExecuteWarning, msg) 218 return True
219
220 - def execute(self):
221 """ 222 Called to execute the task. 223 224 This method is called from multiple threads in a parallel build, 225 so only do thread safe stuff here. Do thread unsafe stuff in 226 prepare(), executed() or failed(). 227 """ 228 T = self.tm.trace 229 if T: T.write(self.trace_message(u'Task.execute()', self.node)) 230 231 try: 232 cached_targets = [] 233 for t in self.targets: 234 if not t.retrieve_from_cache(): 235 break 236 cached_targets.append(t) 237 if len(cached_targets) < len(self.targets): 238 # Remove targets before building. It's possible that we 239 # partially retrieved targets from the cache, leaving 240 # them in read-only mode. That might cause the command 241 # to fail. 242 # 243 for t in cached_targets: 244 try: 245 t.fs.unlink(t.path) 246 except (IOError, OSError): 247 pass 248 self.targets[0].build() 249 else: 250 for t in cached_targets: 251 t.cached = 1 252 except SystemExit: 253 exc_value = sys.exc_info()[1] 254 raise SCons.Errors.ExplicitExit(self.targets[0], exc_value.code) 255 except SCons.Errors.UserError: 256 raise 257 except SCons.Errors.BuildError: 258 raise 259 except Exception, e: 260 buildError = SCons.Errors.convert_to_BuildError(e) 261 buildError.node = self.targets[0] 262 buildError.exc_info = sys.exc_info() 263 raise buildError
264
265 - def executed_without_callbacks(self):
266 """ 267 Called when the task has been successfully executed 268 and the Taskmaster instance doesn't want to call 269 the Node's callback methods. 270 """ 271 T = self.tm.trace 272 if T: T.write(self.trace_message('Task.executed_without_callbacks()', 273 self.node)) 274 275 for t in self.targets: 276 if t.get_state() == NODE_EXECUTING: 277 for side_effect in t.side_effects: 278 side_effect.set_state(NODE_NO_STATE) 279 t.set_state(NODE_EXECUTED)
280
281 - def executed_with_callbacks(self):
282 """ 283 Called when the task has been successfully executed and 284 the Taskmaster instance wants to call the Node's callback 285 methods. 286 287 This may have been a do-nothing operation (to preserve build 288 order), so we must check the node's state before deciding whether 289 it was "built", in which case we call the appropriate Node method. 290 In any event, we always call "visited()", which will handle any 291 post-visit actions that must take place regardless of whether 292 or not the target was an actual built target or a source Node. 293 """ 294 global print_prepare 295 T = self.tm.trace 296 if T: T.write(self.trace_message('Task.executed_with_callbacks()', 297 self.node)) 298 299 for t in self.targets: 300 if t.get_state() == NODE_EXECUTING: 301 for side_effect in t.side_effects: 302 side_effect.set_state(NODE_NO_STATE) 303 t.set_state(NODE_EXECUTED) 304 if not t.cached: 305 t.push_to_cache() 306 t.built() 307 t.visited() 308 if (not print_prepare and 309 (not hasattr(self, 'options') or not self.options.debug_includes)): 310 t.release_target_info() 311 else: 312 t.visited()
313 314 executed = executed_with_callbacks 315
316 - def failed(self):
317 """ 318 Default action when a task fails: stop the build. 319 320 Note: Although this function is normally invoked on nodes in 321 the executing state, it might also be invoked on up-to-date 322 nodes when using Configure(). 323 """ 324 self.fail_stop()
325
326 - def fail_stop(self):
327 """ 328 Explicit stop-the-build failure. 329 330 This sets failure status on the target nodes and all of 331 their dependent parent nodes. 332 333 Note: Although this function is normally invoked on nodes in 334 the executing state, it might also be invoked on up-to-date 335 nodes when using Configure(). 336 """ 337 T = self.tm.trace 338 if T: T.write(self.trace_message('Task.failed_stop()', self.node)) 339 340 # Invoke will_not_build() to clean-up the pending children 341 # list. 342 self.tm.will_not_build(self.targets, lambda n: n.set_state(NODE_FAILED)) 343 344 # Tell the taskmaster to not start any new tasks 345 self.tm.stop() 346 347 # We're stopping because of a build failure, but give the 348 # calling Task class a chance to postprocess() the top-level 349 # target under which the build failure occurred. 350 self.targets = [self.tm.current_top] 351 self.top = 1
352
353 - def fail_continue(self):
354 """ 355 Explicit continue-the-build failure. 356 357 This sets failure status on the target nodes and all of 358 their dependent parent nodes. 359 360 Note: Although this function is normally invoked on nodes in 361 the executing state, it might also be invoked on up-to-date 362 nodes when using Configure(). 363 """ 364 T = self.tm.trace 365 if T: T.write(self.trace_message('Task.failed_continue()', self.node)) 366 367 self.tm.will_not_build(self.targets, lambda n: n.set_state(NODE_FAILED))
368
369 - def make_ready_all(self):
370 """ 371 Marks all targets in a task ready for execution. 372 373 This is used when the interface needs every target Node to be 374 visited--the canonical example being the "scons -c" option. 375 """ 376 T = self.tm.trace 377 if T: T.write(self.trace_message('Task.make_ready_all()', self.node)) 378 379 self.out_of_date = self.targets[:] 380 for t in self.targets: 381 t.disambiguate().set_state(NODE_EXECUTING) 382 for s in t.side_effects: 383 # add disambiguate here to mirror the call on targets above 384 s.disambiguate().set_state(NODE_EXECUTING)
385
386 - def make_ready_current(self):
387 """ 388 Marks all targets in a task ready for execution if any target 389 is not current. 390 391 This is the default behavior for building only what's necessary. 392 """ 393 global print_prepare 394 T = self.tm.trace 395 if T: T.write(self.trace_message(u'Task.make_ready_current()', 396 self.node)) 397 398 self.out_of_date = [] 399 needs_executing = False 400 for t in self.targets: 401 try: 402 t.disambiguate().make_ready() 403 is_up_to_date = not t.has_builder() or \ 404 (not t.always_build and t.is_up_to_date()) 405 except EnvironmentError, e: 406 raise SCons.Errors.BuildError(node=t, errstr=e.strerror, filename=e.filename) 407 408 if not is_up_to_date: 409 self.out_of_date.append(t) 410 needs_executing = True 411 412 if needs_executing: 413 for t in self.targets: 414 t.set_state(NODE_EXECUTING) 415 for s in t.side_effects: 416 # add disambiguate here to mirror the call on targets in first loop above 417 s.disambiguate().set_state(NODE_EXECUTING) 418 else: 419 for t in self.targets: 420 # We must invoke visited() to ensure that the node 421 # information has been computed before allowing the 422 # parent nodes to execute. (That could occur in a 423 # parallel build...) 424 t.visited() 425 t.set_state(NODE_UP_TO_DATE) 426 if (not print_prepare and 427 (not hasattr(self, 'options') or not self.options.debug_includes)): 428 t.release_target_info()
429 430 make_ready = make_ready_current 431
432 - def postprocess(self):
433 """ 434 Post-processes a task after it's been executed. 435 436 This examines all the targets just built (or not, we don't care 437 if the build was successful, or even if there was no build 438 because everything was up-to-date) to see if they have any 439 waiting parent Nodes, or Nodes waiting on a common side effect, 440 that can be put back on the candidates list. 441 """ 442 T = self.tm.trace 443 if T: T.write(self.trace_message(u'Task.postprocess()', self.node)) 444 445 # We may have built multiple targets, some of which may have 446 # common parents waiting for this build. Count up how many 447 # targets each parent was waiting for so we can subtract the 448 # values later, and so we *don't* put waiting side-effect Nodes 449 # back on the candidates list if the Node is also a waiting 450 # parent. 451 452 targets = set(self.targets) 453 454 pending_children = self.tm.pending_children 455 parents = {} 456 for t in targets: 457 # A node can only be in the pending_children set if it has 458 # some waiting_parents. 459 if t.waiting_parents: 460 if T: T.write(self.trace_message(u'Task.postprocess()', 461 t, 462 'removing')) 463 pending_children.discard(t) 464 for p in t.waiting_parents: 465 parents[p] = parents.get(p, 0) + 1 466 467 for t in targets: 468 if t.side_effects is not None: 469 for s in t.side_effects: 470 if s.get_state() == NODE_EXECUTING: 471 s.set_state(NODE_NO_STATE) 472 for p in s.waiting_parents: 473 parents[p] = parents.get(p, 0) + 1 474 for p in s.waiting_s_e: 475 if p.ref_count == 0: 476 self.tm.candidates.append(p) 477 478 for p, subtract in parents.items(): 479 p.ref_count = p.ref_count - subtract 480 if T: T.write(self.trace_message(u'Task.postprocess()', 481 p, 482 'adjusted parent ref count')) 483 if p.ref_count == 0: 484 self.tm.candidates.append(p) 485 486 for t in targets: 487 t.postprocess()
488 489 # Exception handling subsystem. 490 # 491 # Exceptions that occur while walking the DAG or examining Nodes 492 # must be raised, but must be raised at an appropriate time and in 493 # a controlled manner so we can, if necessary, recover gracefully, 494 # possibly write out signature information for Nodes we've updated, 495 # etc. This is done by having the Taskmaster tell us about the 496 # exception, and letting 497
498 - def exc_info(self):
499 """ 500 Returns info about a recorded exception. 501 """ 502 return self.exception
503
504 - def exc_clear(self):
505 """ 506 Clears any recorded exception. 507 508 This also changes the "exception_raise" attribute to point 509 to the appropriate do-nothing method. 510 """ 511 self.exception = (None, None, None) 512 self.exception_raise = self._no_exception_to_raise
513
514 - def exception_set(self, exception=None):
515 """ 516 Records an exception to be raised at the appropriate time. 517 518 This also changes the "exception_raise" attribute to point 519 to the method that will, in fact 520 """ 521 if not exception: 522 exception = sys.exc_info() 523 self.exception = exception 524 self.exception_raise = self._exception_raise
525
526 - def _no_exception_to_raise(self):
527 pass
528
529 - def _exception_raise(self):
530 """ 531 Raises a pending exception that was recorded while getting a 532 Task ready for execution. 533 """ 534 exc = self.exc_info()[:] 535 try: 536 exc_type, exc_value, exc_traceback = exc 537 except ValueError: 538 exc_type, exc_value = exc 539 exc_traceback = None 540 raise exc_type, exc_value, exc_traceback
541
542 -class AlwaysTask(Task):
543 - def needs_execute(self):
544 """ 545 Always returns True (indicating this Task should always 546 be executed). 547 548 Subclasses that need this behavior (as opposed to the default 549 of only executing Nodes that are out of date w.r.t. their 550 dependencies) can use this as follows: 551 552 class MyTaskSubclass(SCons.Taskmaster.Task): 553 needs_execute = SCons.Taskmaster.Task.execute_always 554 """ 555 return True
556
557 -class OutOfDateTask(Task):
558 - def needs_execute(self):
559 """ 560 Returns True (indicating this Task should be executed) if this 561 Task's target state indicates it needs executing, which has 562 already been determined by an earlier up-to-date check. 563 """ 564 return self.targets[0].get_state() == SCons.Node.executing
565 566
567 -def find_cycle(stack, visited):
568 if stack[-1] in visited: 569 return None 570 visited.add(stack[-1]) 571 for n in stack[-1].waiting_parents: 572 stack.append(n) 573 if stack[0] == stack[-1]: 574 return stack 575 if find_cycle(stack, visited): 576 return stack 577 stack.pop() 578 return None
579 580
581 -class Taskmaster(object):
582 """ 583 The Taskmaster for walking the dependency DAG. 584 """ 585
586 - def __init__(self, targets=[], tasker=None, order=None, trace=None):
587 self.original_top = targets 588 self.top_targets_left = targets[:] 589 self.top_targets_left.reverse() 590 self.candidates = [] 591 if tasker is None: 592 tasker = OutOfDateTask 593 self.tasker = tasker 594 if not order: 595 order = lambda l: l 596 self.order = order 597 self.message = None 598 self.trace = trace 599 self.next_candidate = self.find_next_candidate 600 self.pending_children = set()
601
602 - def find_next_candidate(self):
603 """ 604 Returns the next candidate Node for (potential) evaluation. 605 606 The candidate list (really a stack) initially consists of all of 607 the top-level (command line) targets provided when the Taskmaster 608 was initialized. While we walk the DAG, visiting Nodes, all the 609 children that haven't finished processing get pushed on to the 610 candidate list. Each child can then be popped and examined in 611 turn for whether *their* children are all up-to-date, in which 612 case a Task will be created for their actual evaluation and 613 potential building. 614 615 Here is where we also allow candidate Nodes to alter the list of 616 Nodes that should be examined. This is used, for example, when 617 invoking SCons in a source directory. A source directory Node can 618 return its corresponding build directory Node, essentially saying, 619 "Hey, you really need to build this thing over here instead." 620 """ 621 try: 622 return self.candidates.pop() 623 except IndexError: 624 pass 625 try: 626 node = self.top_targets_left.pop() 627 except IndexError: 628 return None 629 self.current_top = node 630 alt, message = node.alter_targets() 631 if alt: 632 self.message = message 633 self.candidates.append(node) 634 self.candidates.extend(self.order(alt)) 635 node = self.candidates.pop() 636 return node
637
638 - def no_next_candidate(self):
639 """ 640 Stops Taskmaster processing by not returning a next candidate. 641 642 Note that we have to clean-up the Taskmaster candidate list 643 because the cycle detection depends on the fact all nodes have 644 been processed somehow. 645 """ 646 while self.candidates: 647 candidates = self.candidates 648 self.candidates = [] 649 self.will_not_build(candidates) 650 return None
651
652 - def _validate_pending_children(self):
653 """ 654 Validate the content of the pending_children set. Assert if an 655 internal error is found. 656 657 This function is used strictly for debugging the taskmaster by 658 checking that no invariants are violated. It is not used in 659 normal operation. 660 661 The pending_children set is used to detect cycles in the 662 dependency graph. We call a "pending child" a child that is 663 found in the "pending" state when checking the dependencies of 664 its parent node. 665 666 A pending child can occur when the Taskmaster completes a loop 667 through a cycle. For example, lets imagine a graph made of 668 three node (A, B and C) making a cycle. The evaluation starts 669 at node A. The taskmaster first consider whether node A's 670 child B is up-to-date. Then, recursively, node B needs to 671 check whether node C is up-to-date. This leaves us with a 672 dependency graph looking like: 673 674 Next candidate \ 675 \ 676 Node A (Pending) --> Node B(Pending) --> Node C (NoState) 677 ^ | 678 | | 679 +-------------------------------------+ 680 681 Now, when the Taskmaster examines the Node C's child Node A, 682 it finds that Node A is in the "pending" state. Therefore, 683 Node A is a pending child of node C. 684 685 Pending children indicate that the Taskmaster has potentially 686 loop back through a cycle. We say potentially because it could 687 also occur when a DAG is evaluated in parallel. For example, 688 consider the following graph: 689 690 691 Node A (Pending) --> Node B(Pending) --> Node C (Pending) --> ... 692 | ^ 693 | | 694 +----------> Node D (NoState) --------+ 695 / 696 Next candidate / 697 698 The Taskmaster first evaluates the nodes A, B, and C and 699 starts building some children of node C. Assuming, that the 700 maximum parallel level has not been reached, the Taskmaster 701 will examine Node D. It will find that Node C is a pending 702 child of Node D. 703 704 In summary, evaluating a graph with a cycle will always 705 involve a pending child at one point. A pending child might 706 indicate either a cycle or a diamond-shaped DAG. Only a 707 fraction of the nodes ends-up being a "pending child" of 708 another node. This keeps the pending_children set small in 709 practice. 710 711 We can differentiate between the two cases if we wait until 712 the end of the build. At this point, all the pending children 713 nodes due to a diamond-shaped DAG will have been properly 714 built (or will have failed to build). But, the pending 715 children involved in a cycle will still be in the pending 716 state. 717 718 The taskmaster removes nodes from the pending_children set as 719 soon as a pending_children node moves out of the pending 720 state. This also helps to keep the pending_children set small. 721 """ 722 723 for n in self.pending_children: 724 assert n.state in (NODE_PENDING, NODE_EXECUTING), \ 725 (str(n), StateString[n.state]) 726 assert len(n.waiting_parents) != 0, (str(n), len(n.waiting_parents)) 727 for p in n.waiting_parents: 728 assert p.ref_count > 0, (str(n), str(p), p.ref_count)
729 730
731 - def trace_message(self, message):
732 return 'Taskmaster: %s\n' % message
733
734 - def trace_node(self, node):
735 return '<%-10s %-3s %s>' % (StateString[node.get_state()], 736 node.ref_count, 737 repr(str(node)))
738
739 - def _find_next_ready_node(self):
740 """ 741 Finds the next node that is ready to be built. 742 743 This is *the* main guts of the DAG walk. We loop through the 744 list of candidates, looking for something that has no un-built 745 children (i.e., that is a leaf Node or has dependencies that are 746 all leaf Nodes or up-to-date). Candidate Nodes are re-scanned 747 (both the target Node itself and its sources, which are always 748 scanned in the context of a given target) to discover implicit 749 dependencies. A Node that must wait for some children to be 750 built will be put back on the candidates list after the children 751 have finished building. A Node that has been put back on the 752 candidates list in this way may have itself (or its sources) 753 re-scanned, in order to handle generated header files (e.g.) and 754 the implicit dependencies therein. 755 756 Note that this method does not do any signature calculation or 757 up-to-date check itself. All of that is handled by the Task 758 class. This is purely concerned with the dependency graph walk. 759 """ 760 761 self.ready_exc = None 762 763 T = self.trace 764 if T: T.write(u'\n' + self.trace_message('Looking for a node to evaluate')) 765 766 while True: 767 node = self.next_candidate() 768 if node is None: 769 if T: T.write(self.trace_message('No candidate anymore.') + u'\n') 770 return None 771 772 node = node.disambiguate() 773 state = node.get_state() 774 775 # For debugging only: 776 # 777 # try: 778 # self._validate_pending_children() 779 # except: 780 # self.ready_exc = sys.exc_info() 781 # return node 782 783 if CollectStats: 784 if not hasattr(node, 'stats'): 785 node.stats = Stats() 786 StatsNodes.append(node) 787 S = node.stats 788 S.considered = S.considered + 1 789 else: 790 S = None 791 792 if T: T.write(self.trace_message(u' Considering node %s and its children:' % self.trace_node(node))) 793 794 if state == NODE_NO_STATE: 795 # Mark this node as being on the execution stack: 796 node.set_state(NODE_PENDING) 797 elif state > NODE_PENDING: 798 # Skip this node if it has already been evaluated: 799 if S: S.already_handled = S.already_handled + 1 800 if T: T.write(self.trace_message(u' already handled (executed)')) 801 continue 802 803 executor = node.get_executor() 804 805 try: 806 children = executor.get_all_children() 807 except SystemExit: 808 exc_value = sys.exc_info()[1] 809 e = SCons.Errors.ExplicitExit(node, exc_value.code) 810 self.ready_exc = (SCons.Errors.ExplicitExit, e) 811 if T: T.write(self.trace_message(' SystemExit')) 812 return node 813 except Exception, e: 814 # We had a problem just trying to figure out the 815 # children (like a child couldn't be linked in to a 816 # VariantDir, or a Scanner threw something). Arrange to 817 # raise the exception when the Task is "executed." 818 self.ready_exc = sys.exc_info() 819 if S: S.problem = S.problem + 1 820 if T: T.write(self.trace_message(' exception %s while scanning children.\n' % e)) 821 return node 822 823 children_not_visited = [] 824 children_pending = set() 825 children_not_ready = [] 826 children_failed = False 827 828 for child in chain(executor.get_all_prerequisites(), children): 829 childstate = child.get_state() 830 831 if T: T.write(self.trace_message(u' ' + self.trace_node(child))) 832 833 if childstate == NODE_NO_STATE: 834 children_not_visited.append(child) 835 elif childstate == NODE_PENDING: 836 children_pending.add(child) 837 elif childstate == NODE_FAILED: 838 children_failed = True 839 840 if childstate <= NODE_EXECUTING: 841 children_not_ready.append(child) 842 843 844 # These nodes have not even been visited yet. Add 845 # them to the list so that on some next pass we can 846 # take a stab at evaluating them (or their children). 847 children_not_visited.reverse() 848 self.candidates.extend(self.order(children_not_visited)) 849 #if T and children_not_visited: 850 # T.write(self.trace_message(' adding to candidates: %s' % map(str, children_not_visited))) 851 # T.write(self.trace_message(' candidates now: %s\n' % map(str, self.candidates))) 852 853 # Skip this node if any of its children have failed. 854 # 855 # This catches the case where we're descending a top-level 856 # target and one of our children failed while trying to be 857 # built by a *previous* descent of an earlier top-level 858 # target. 859 # 860 # It can also occur if a node is reused in multiple 861 # targets. One first descends though the one of the 862 # target, the next time occurs through the other target. 863 # 864 # Note that we can only have failed_children if the 865 # --keep-going flag was used, because without it the build 866 # will stop before diving in the other branch. 867 # 868 # Note that even if one of the children fails, we still 869 # added the other children to the list of candidate nodes 870 # to keep on building (--keep-going). 871 if children_failed: 872 for n in executor.get_action_targets(): 873 n.set_state(NODE_FAILED) 874 875 if S: S.child_failed = S.child_failed + 1 876 if T: T.write(self.trace_message('****** %s\n' % self.trace_node(node))) 877 continue 878 879 if children_not_ready: 880 for child in children_not_ready: 881 # We're waiting on one or more derived targets 882 # that have not yet finished building. 883 if S: S.not_built = S.not_built + 1 884 885 # Add this node to the waiting parents lists of 886 # anything we're waiting on, with a reference 887 # count so we can be put back on the list for 888 # re-evaluation when they've all finished. 889 node.ref_count = node.ref_count + child.add_to_waiting_parents(node) 890 if T: T.write(self.trace_message(u' adjusted ref count: %s, child %s' % 891 (self.trace_node(node), repr(str(child))))) 892 893 if T: 894 for pc in children_pending: 895 T.write(self.trace_message(' adding %s to the pending children set\n' % 896 self.trace_node(pc))) 897 self.pending_children = self.pending_children | children_pending 898 899 continue 900 901 # Skip this node if it has side-effects that are 902 # currently being built: 903 wait_side_effects = False 904 for se in executor.get_action_side_effects(): 905 if se.get_state() == NODE_EXECUTING: 906 se.add_to_waiting_s_e(node) 907 wait_side_effects = True 908 909 if wait_side_effects: 910 if S: S.side_effects = S.side_effects + 1 911 continue 912 913 # The default when we've gotten through all of the checks above: 914 # this node is ready to be built. 915 if S: S.build = S.build + 1 916 if T: T.write(self.trace_message(u'Evaluating %s\n' % 917 self.trace_node(node))) 918 919 # For debugging only: 920 # 921 # try: 922 # self._validate_pending_children() 923 # except: 924 # self.ready_exc = sys.exc_info() 925 # return node 926 927 return node 928 929 return None
930
931 - def next_task(self):
932 """ 933 Returns the next task to be executed. 934 935 This simply asks for the next Node to be evaluated, and then wraps 936 it in the specific Task subclass with which we were initialized. 937 """ 938 node = self._find_next_ready_node() 939 940 if node is None: 941 return None 942 943 executor = node.get_executor() 944 if executor is None: 945 return None 946 947 tlist = executor.get_all_targets() 948 949 task = self.tasker(self, tlist, node in self.original_top, node) 950 try: 951 task.make_ready() 952 except: 953 # We had a problem just trying to get this task ready (like 954 # a child couldn't be linked in to a VariantDir when deciding 955 # whether this node is current). Arrange to raise the 956 # exception when the Task is "executed." 957 self.ready_exc = sys.exc_info() 958 959 if self.ready_exc: 960 task.exception_set(self.ready_exc) 961 962 self.ready_exc = None 963 964 return task
965
966 - def will_not_build(self, nodes, node_func=lambda n: None):
967 """ 968 Perform clean-up about nodes that will never be built. Invokes 969 a user defined function on all of these nodes (including all 970 of their parents). 971 """ 972 973 T = self.trace 974 975 pending_children = self.pending_children 976 977 to_visit = set(nodes) 978 pending_children = pending_children - to_visit 979 980 if T: 981 for n in nodes: 982 T.write(self.trace_message(' removing node %s from the pending children set\n' % 983 self.trace_node(n))) 984 try: 985 while len(to_visit): 986 node = to_visit.pop() 987 node_func(node) 988 989 # Prune recursion by flushing the waiting children 990 # list immediately. 991 parents = node.waiting_parents 992 node.waiting_parents = set() 993 994 to_visit = to_visit | parents 995 pending_children = pending_children - parents 996 997 for p in parents: 998 p.ref_count = p.ref_count - 1 999 if T: T.write(self.trace_message(' removing parent %s from the pending children set\n' % 1000 self.trace_node(p))) 1001 except KeyError: 1002 # The container to_visit has been emptied. 1003 pass 1004 1005 # We have the stick back the pending_children list into the 1006 # taskmaster because the python 1.5.2 compatibility does not 1007 # allow us to use in-place updates 1008 self.pending_children = pending_children
1009
1010 - def stop(self):
1011 """ 1012 Stops the current build completely. 1013 """ 1014 self.next_candidate = self.no_next_candidate
1015
1016 - def cleanup(self):
1017 """ 1018 Check for dependency cycles. 1019 """ 1020 if not self.pending_children: 1021 return 1022 1023 nclist = [(n, find_cycle([n], set())) for n in self.pending_children] 1024 1025 genuine_cycles = [ 1026 node for node,cycle in nclist 1027 if cycle or node.get_state() != NODE_EXECUTED 1028 ] 1029 if not genuine_cycles: 1030 # All of the "cycles" found were single nodes in EXECUTED state, 1031 # which is to say, they really weren't cycles. Just return. 1032 return 1033 1034 desc = 'Found dependency cycle(s):\n' 1035 for node, cycle in nclist: 1036 if cycle: 1037 desc = desc + " " + " -> ".join(map(str, cycle)) + "\n" 1038 else: 1039 desc = desc + \ 1040 " Internal Error: no cycle found for node %s (%s) in state %s\n" % \ 1041 (node, repr(node), StateString[node.get_state()]) 1042 1043 raise SCons.Errors.UserError(desc)
1044 1045 # Local Variables: 1046 # tab-width:4 1047 # indent-tabs-mode:nil 1048 # End: 1049 # vim: set expandtab tabstop=4 shiftwidth=4: 1050