Package SCons :: Module Taskmaster
[hide private]
[frames] | no frames]

Source Code for Module SCons.Taskmaster

   1  # 
   2  # Copyright (c) 2001 - 2019 The SCons Foundation 
   3  # 
   4  # Permission is hereby granted, free of charge, to any person obtaining 
   5  # a copy of this software and associated documentation files (the 
   6  # "Software"), to deal in the Software without restriction, including 
   7  # without limitation the rights to use, copy, modify, merge, publish, 
   8  # distribute, sublicense, and/or sell copies of the Software, and to 
   9  # permit persons to whom the Software is furnished to do so, subject to 
  10  # the following conditions: 
  11  # 
  12  # The above copyright notice and this permission notice shall be included 
  13  # in all copies or substantial portions of the Software. 
  14  # 
  15  # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY 
  16  # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE 
  17  # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 
  18  # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE 
  19  # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 
  20  # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 
  21  # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 
  22   
  23  from __future__ import print_function 
  24   
  25  import sys 
  26   
  27  __doc__ = """ 
  28      Generic Taskmaster module for the SCons build engine. 
  29      ===================================================== 
  30       
  31      This module contains the primary interface(s) between a wrapping user 
  32      interface and the SCons build engine.  There are two key classes here: 
  33       
  34      Taskmaster 
  35      ---------- 
  36          This is the main engine for walking the dependency graph and 
  37          calling things to decide what does or doesn't need to be built. 
  38   
  39      Task 
  40      ---- 
  41          This is the base class for allowing a wrapping interface to 
  42          decide what does or doesn't actually need to be done.  The 
  43          intention is for a wrapping interface to subclass this as 
  44          appropriate for different types of behavior it may need. 
  45   
  46          The canonical example is the SCons native Python interface, 
  47          which has Task subclasses that handle its specific behavior, 
  48          like printing "'foo' is up to date" when a top-level target 
  49          doesn't need to be built, and handling the -c option by removing 
  50          targets as its "build" action.  There is also a separate subclass 
  51          for suppressing this output when the -q option is used. 
  52   
  53          The Taskmaster instantiates a Task object for each (set of) 
  54          target(s) that it decides need to be evaluated and/or built. 
  55  """ 
  56   
  57  __revision__ = "src/engine/SCons/Taskmaster.py 3a41ed6b288cee8d085373ad7fa02894e1903864 2019-01-23 17:30:35 bdeegan" 
  58   
  59  from itertools import chain 
  60  import operator 
  61  import sys 
  62  import traceback 
  63   
  64  import SCons.Errors 
  65  import SCons.Node 
  66  import SCons.Warnings 
  67   
  68  StateString = SCons.Node.StateString 
  69  NODE_NO_STATE = SCons.Node.no_state 
  70  NODE_PENDING = SCons.Node.pending 
  71  NODE_EXECUTING = SCons.Node.executing 
  72  NODE_UP_TO_DATE = SCons.Node.up_to_date 
  73  NODE_EXECUTED = SCons.Node.executed 
  74  NODE_FAILED = SCons.Node.failed 
  75   
  76  print_prepare = 0               # set by option --debug=prepare 
  77   
  78  # A subsystem for recording stats about how different Nodes are handled by 
  79  # the main Taskmaster loop.  There's no external control here (no need for 
  80  # a --debug= option); enable it by changing the value of CollectStats. 
  81   
  82  CollectStats = None 
  83   
84 -class Stats(object):
85 """ 86 A simple class for holding statistics about the disposition of a 87 Node by the Taskmaster. If we're collecting statistics, each Node 88 processed by the Taskmaster gets one of these attached, in which case 89 the Taskmaster records its decision each time it processes the Node. 90 (Ideally, that's just once per Node.) 91 """
92 - def __init__(self):
93 """ 94 Instantiates a Taskmaster.Stats object, initializing all 95 appropriate counters to zero. 96 """ 97 self.considered = 0 98 self.already_handled = 0 99 self.problem = 0 100 self.child_failed = 0 101 self.not_built = 0 102 self.side_effects = 0 103 self.build = 0
104 105 StatsNodes = [] 106 107 fmt = "%(considered)3d "\ 108 "%(already_handled)3d " \ 109 "%(problem)3d " \ 110 "%(child_failed)3d " \ 111 "%(not_built)3d " \ 112 "%(side_effects)3d " \ 113 "%(build)3d " 114
115 -def dump_stats():
116 for n in sorted(StatsNodes, key=lambda a: str(a)): 117 print((fmt % n.attributes.stats.__dict__) + str(n))
118 119 120
121 -class Task(object):
122 """ 123 Default SCons build engine task. 124 125 This controls the interaction of the actual building of node 126 and the rest of the engine. 127 128 This is expected to handle all of the normally-customizable 129 aspects of controlling a build, so any given application 130 *should* be able to do what it wants by sub-classing this 131 class and overriding methods as appropriate. If an application 132 needs to customize something by sub-classing Taskmaster (or 133 some other build engine class), we should first try to migrate 134 that functionality into this class. 135 136 Note that it's generally a good idea for sub-classes to call 137 these methods explicitly to update state, etc., rather than 138 roll their own interaction with Taskmaster from scratch. 139 """
140 - def __init__(self, tm, targets, top, node):
141 self.tm = tm 142 self.targets = targets 143 self.top = top 144 self.node = node 145 self.exc_clear()
146
147 - def trace_message(self, method, node, description='node'):
148 fmt = '%-20s %s %s\n' 149 return fmt % (method + ':', description, self.tm.trace_node(node))
150
151 - def display(self, message):
152 """ 153 Hook to allow the calling interface to display a message. 154 155 This hook gets called as part of preparing a task for execution 156 (that is, a Node to be built). As part of figuring out what Node 157 should be built next, the actual target list may be altered, 158 along with a message describing the alteration. The calling 159 interface can subclass Task and provide a concrete implementation 160 of this method to see those messages. 161 """ 162 pass
163
164 - def prepare(self):
165 """ 166 Called just before the task is executed. 167 168 This is mainly intended to give the target Nodes a chance to 169 unlink underlying files and make all necessary directories before 170 the Action is actually called to build the targets. 171 """ 172 global print_prepare 173 T = self.tm.trace 174 if T: T.write(self.trace_message(u'Task.prepare()', self.node)) 175 176 # Now that it's the appropriate time, give the TaskMaster a 177 # chance to raise any exceptions it encountered while preparing 178 # this task. 179 self.exception_raise() 180 181 if self.tm.message: 182 self.display(self.tm.message) 183 self.tm.message = None 184 185 # Let the targets take care of any necessary preparations. 186 # This includes verifying that all of the necessary sources 187 # and dependencies exist, removing the target file(s), etc. 188 # 189 # As of April 2008, the get_executor().prepare() method makes 190 # sure that all of the aggregate sources necessary to build this 191 # Task's target(s) exist in one up-front check. The individual 192 # target t.prepare() methods check that each target's explicit 193 # or implicit dependencies exists, and also initialize the 194 # .sconsign info. 195 executor = self.targets[0].get_executor() 196 if executor is None: 197 return 198 executor.prepare() 199 for t in executor.get_action_targets(): 200 if print_prepare: 201 print("Preparing target %s..."%t) 202 for s in t.side_effects: 203 print("...with side-effect %s..."%s) 204 t.prepare() 205 for s in t.side_effects: 206 if print_prepare: 207 print("...Preparing side-effect %s..."%s) 208 s.prepare()
209
210 - def get_target(self):
211 """Fetch the target being built or updated by this task. 212 """ 213 return self.node
214
215 - def needs_execute(self):
216 # TODO(deprecate): "return True" is the old default behavior; 217 # change it to NotImplementedError (after running through the 218 # Deprecation Cycle) so the desired behavior is explicitly 219 # determined by which concrete subclass is used. 220 #raise NotImplementedError 221 msg = ('Taskmaster.Task is an abstract base class; instead of\n' 222 '\tusing it directly, ' 223 'derive from it and override the abstract methods.') 224 SCons.Warnings.warn(SCons.Warnings.TaskmasterNeedsExecuteWarning, msg) 225 return True
226
227 - def execute(self):
228 """ 229 Called to execute the task. 230 231 This method is called from multiple threads in a parallel build, 232 so only do thread safe stuff here. Do thread unsafe stuff in 233 prepare(), executed() or failed(). 234 """ 235 T = self.tm.trace 236 if T: T.write(self.trace_message(u'Task.execute()', self.node)) 237 238 try: 239 cached_targets = [] 240 for t in self.targets: 241 if not t.retrieve_from_cache(): 242 break 243 cached_targets.append(t) 244 if len(cached_targets) < len(self.targets): 245 # Remove targets before building. It's possible that we 246 # partially retrieved targets from the cache, leaving 247 # them in read-only mode. That might cause the command 248 # to fail. 249 # 250 for t in cached_targets: 251 try: 252 t.fs.unlink(t.get_internal_path()) 253 except (IOError, OSError): 254 pass 255 self.targets[0].build() 256 else: 257 for t in cached_targets: 258 t.cached = 1 259 except SystemExit: 260 exc_value = sys.exc_info()[1] 261 raise SCons.Errors.ExplicitExit(self.targets[0], exc_value.code) 262 except SCons.Errors.UserError: 263 raise 264 except SCons.Errors.BuildError: 265 raise 266 except Exception as e: 267 buildError = SCons.Errors.convert_to_BuildError(e) 268 buildError.node = self.targets[0] 269 buildError.exc_info = sys.exc_info() 270 raise buildError
271
272 - def executed_without_callbacks(self):
273 """ 274 Called when the task has been successfully executed 275 and the Taskmaster instance doesn't want to call 276 the Node's callback methods. 277 """ 278 T = self.tm.trace 279 if T: T.write(self.trace_message('Task.executed_without_callbacks()', 280 self.node)) 281 282 for t in self.targets: 283 if t.get_state() == NODE_EXECUTING: 284 for side_effect in t.side_effects: 285 side_effect.set_state(NODE_NO_STATE) 286 t.set_state(NODE_EXECUTED)
287
288 - def executed_with_callbacks(self):
289 """ 290 Called when the task has been successfully executed and 291 the Taskmaster instance wants to call the Node's callback 292 methods. 293 294 This may have been a do-nothing operation (to preserve build 295 order), so we must check the node's state before deciding whether 296 it was "built", in which case we call the appropriate Node method. 297 In any event, we always call "visited()", which will handle any 298 post-visit actions that must take place regardless of whether 299 or not the target was an actual built target or a source Node. 300 """ 301 global print_prepare 302 T = self.tm.trace 303 if T: T.write(self.trace_message('Task.executed_with_callbacks()', 304 self.node)) 305 306 for t in self.targets: 307 if t.get_state() == NODE_EXECUTING: 308 for side_effect in t.side_effects: 309 side_effect.set_state(NODE_NO_STATE) 310 t.set_state(NODE_EXECUTED) 311 if not t.cached: 312 t.push_to_cache() 313 t.built() 314 t.visited() 315 if (not print_prepare and 316 (not hasattr(self, 'options') or not self.options.debug_includes)): 317 t.release_target_info() 318 else: 319 t.visited()
320 321 executed = executed_with_callbacks 322
323 - def failed(self):
324 """ 325 Default action when a task fails: stop the build. 326 327 Note: Although this function is normally invoked on nodes in 328 the executing state, it might also be invoked on up-to-date 329 nodes when using Configure(). 330 """ 331 self.fail_stop()
332
333 - def fail_stop(self):
334 """ 335 Explicit stop-the-build failure. 336 337 This sets failure status on the target nodes and all of 338 their dependent parent nodes. 339 340 Note: Although this function is normally invoked on nodes in 341 the executing state, it might also be invoked on up-to-date 342 nodes when using Configure(). 343 """ 344 T = self.tm.trace 345 if T: T.write(self.trace_message('Task.failed_stop()', self.node)) 346 347 # Invoke will_not_build() to clean-up the pending children 348 # list. 349 self.tm.will_not_build(self.targets, lambda n: n.set_state(NODE_FAILED)) 350 351 # Tell the taskmaster to not start any new tasks 352 self.tm.stop() 353 354 # We're stopping because of a build failure, but give the 355 # calling Task class a chance to postprocess() the top-level 356 # target under which the build failure occurred. 357 self.targets = [self.tm.current_top] 358 self.top = 1
359
360 - def fail_continue(self):
361 """ 362 Explicit continue-the-build failure. 363 364 This sets failure status on the target nodes and all of 365 their dependent parent nodes. 366 367 Note: Although this function is normally invoked on nodes in 368 the executing state, it might also be invoked on up-to-date 369 nodes when using Configure(). 370 """ 371 T = self.tm.trace 372 if T: T.write(self.trace_message('Task.failed_continue()', self.node)) 373 374 self.tm.will_not_build(self.targets, lambda n: n.set_state(NODE_FAILED))
375
376 - def make_ready_all(self):
377 """ 378 Marks all targets in a task ready for execution. 379 380 This is used when the interface needs every target Node to be 381 visited--the canonical example being the "scons -c" option. 382 """ 383 T = self.tm.trace 384 if T: T.write(self.trace_message('Task.make_ready_all()', self.node)) 385 386 self.out_of_date = self.targets[:] 387 for t in self.targets: 388 t.disambiguate().set_state(NODE_EXECUTING) 389 for s in t.side_effects: 390 # add disambiguate here to mirror the call on targets above 391 s.disambiguate().set_state(NODE_EXECUTING)
392
393 - def make_ready_current(self):
394 """ 395 Marks all targets in a task ready for execution if any target 396 is not current. 397 398 This is the default behavior for building only what's necessary. 399 """ 400 global print_prepare 401 T = self.tm.trace 402 if T: T.write(self.trace_message(u'Task.make_ready_current()', 403 self.node)) 404 405 self.out_of_date = [] 406 needs_executing = False 407 for t in self.targets: 408 try: 409 t.disambiguate().make_ready() 410 is_up_to_date = not t.has_builder() or \ 411 (not t.always_build and t.is_up_to_date()) 412 except EnvironmentError as e: 413 raise SCons.Errors.BuildError(node=t, errstr=e.strerror, filename=e.filename) 414 415 if not is_up_to_date: 416 self.out_of_date.append(t) 417 needs_executing = True 418 419 if needs_executing: 420 for t in self.targets: 421 t.set_state(NODE_EXECUTING) 422 for s in t.side_effects: 423 # add disambiguate here to mirror the call on targets in first loop above 424 s.disambiguate().set_state(NODE_EXECUTING) 425 else: 426 for t in self.targets: 427 # We must invoke visited() to ensure that the node 428 # information has been computed before allowing the 429 # parent nodes to execute. (That could occur in a 430 # parallel build...) 431 t.visited() 432 t.set_state(NODE_UP_TO_DATE) 433 if (not print_prepare and 434 (not hasattr(self, 'options') or not self.options.debug_includes)): 435 t.release_target_info()
436 437 make_ready = make_ready_current 438
439 - def postprocess(self):
440 """ 441 Post-processes a task after it's been executed. 442 443 This examines all the targets just built (or not, we don't care 444 if the build was successful, or even if there was no build 445 because everything was up-to-date) to see if they have any 446 waiting parent Nodes, or Nodes waiting on a common side effect, 447 that can be put back on the candidates list. 448 """ 449 T = self.tm.trace 450 if T: T.write(self.trace_message(u'Task.postprocess()', self.node)) 451 452 # We may have built multiple targets, some of which may have 453 # common parents waiting for this build. Count up how many 454 # targets each parent was waiting for so we can subtract the 455 # values later, and so we *don't* put waiting side-effect Nodes 456 # back on the candidates list if the Node is also a waiting 457 # parent. 458 459 targets = set(self.targets) 460 461 pending_children = self.tm.pending_children 462 parents = {} 463 for t in targets: 464 # A node can only be in the pending_children set if it has 465 # some waiting_parents. 466 if t.waiting_parents: 467 if T: T.write(self.trace_message(u'Task.postprocess()', 468 t, 469 'removing')) 470 pending_children.discard(t) 471 for p in t.waiting_parents: 472 parents[p] = parents.get(p, 0) + 1 473 t.waiting_parents = set() 474 475 for t in targets: 476 if t.side_effects is not None: 477 for s in t.side_effects: 478 if s.get_state() == NODE_EXECUTING: 479 s.set_state(NODE_NO_STATE) 480 481 # The side-effects may have been transferred to 482 # NODE_NO_STATE by executed_with{,out}_callbacks, but was 483 # not taken out of the waiting parents/pending children 484 # data structures. Check for that now. 485 if s.get_state() == NODE_NO_STATE and s.waiting_parents: 486 pending_children.discard(s) 487 for p in s.waiting_parents: 488 parents[p] = parents.get(p, 0) + 1 489 s.waiting_parents = set() 490 for p in s.waiting_s_e: 491 if p.ref_count == 0: 492 self.tm.candidates.append(p) 493 494 for p, subtract in parents.items(): 495 p.ref_count = p.ref_count - subtract 496 if T: T.write(self.trace_message(u'Task.postprocess()', 497 p, 498 'adjusted parent ref count')) 499 if p.ref_count == 0: 500 self.tm.candidates.append(p) 501 502 for t in targets: 503 t.postprocess()
504 505 # Exception handling subsystem. 506 # 507 # Exceptions that occur while walking the DAG or examining Nodes 508 # must be raised, but must be raised at an appropriate time and in 509 # a controlled manner so we can, if necessary, recover gracefully, 510 # possibly write out signature information for Nodes we've updated, 511 # etc. This is done by having the Taskmaster tell us about the 512 # exception, and letting 513
514 - def exc_info(self):
515 """ 516 Returns info about a recorded exception. 517 """ 518 return self.exception
519
520 - def exc_clear(self):
521 """ 522 Clears any recorded exception. 523 524 This also changes the "exception_raise" attribute to point 525 to the appropriate do-nothing method. 526 """ 527 self.exception = (None, None, None) 528 self.exception_raise = self._no_exception_to_raise
529
530 - def exception_set(self, exception=None):
531 """ 532 Records an exception to be raised at the appropriate time. 533 534 This also changes the "exception_raise" attribute to point 535 to the method that will, in fact 536 """ 537 if not exception: 538 exception = sys.exc_info() 539 self.exception = exception 540 self.exception_raise = self._exception_raise
541
542 - def _no_exception_to_raise(self):
543 pass
544
545 - def _exception_raise(self):
546 """ 547 Raises a pending exception that was recorded while getting a 548 Task ready for execution. 549 """ 550 exc = self.exc_info()[:] 551 try: 552 exc_type, exc_value, exc_traceback = exc 553 except ValueError: 554 exc_type, exc_value = exc 555 exc_traceback = None 556 557 # raise exc_type(exc_value).with_traceback(exc_traceback) 558 if sys.version_info[0] == 2: 559 exec("raise exc_type, exc_value, exc_traceback") 560 else: # sys.version_info[0] == 3: 561 if isinstance(exc_value, Exception): #hasattr(exc_value, 'with_traceback'): 562 # If exc_value is an exception, then just reraise 563 exec("raise exc_value.with_traceback(exc_traceback)") 564 else: 565 # else we'll create an exception using the value and raise that 566 exec("raise exc_type(exc_value).with_traceback(exc_traceback)")
567 568 569 # raise e.__class__, e.__class__(e), sys.exc_info()[2] 570 # exec("raise exc_type(exc_value).with_traceback(exc_traceback)") 571 572 573
574 -class AlwaysTask(Task):
575 - def needs_execute(self):
576 """ 577 Always returns True (indicating this Task should always 578 be executed). 579 580 Subclasses that need this behavior (as opposed to the default 581 of only executing Nodes that are out of date w.r.t. their 582 dependencies) can use this as follows: 583 584 class MyTaskSubclass(SCons.Taskmaster.Task): 585 needs_execute = SCons.Taskmaster.Task.execute_always 586 """ 587 return True
588
589 -class OutOfDateTask(Task):
590 - def needs_execute(self):
591 """ 592 Returns True (indicating this Task should be executed) if this 593 Task's target state indicates it needs executing, which has 594 already been determined by an earlier up-to-date check. 595 """ 596 return self.targets[0].get_state() == SCons.Node.executing
597 598
599 -def find_cycle(stack, visited):
600 if stack[-1] in visited: 601 return None 602 visited.add(stack[-1]) 603 for n in stack[-1].waiting_parents: 604 stack.append(n) 605 if stack[0] == stack[-1]: 606 return stack 607 if find_cycle(stack, visited): 608 return stack 609 stack.pop() 610 return None
611 612
613 -class Taskmaster(object):
614 """ 615 The Taskmaster for walking the dependency DAG. 616 """ 617
618 - def __init__(self, targets=[], tasker=None, order=None, trace=None):
619 self.original_top = targets 620 self.top_targets_left = targets[:] 621 self.top_targets_left.reverse() 622 self.candidates = [] 623 if tasker is None: 624 tasker = OutOfDateTask 625 self.tasker = tasker 626 if not order: 627 order = lambda l: l 628 self.order = order 629 self.message = None 630 self.trace = trace 631 self.next_candidate = self.find_next_candidate 632 self.pending_children = set()
633
634 - def find_next_candidate(self):
635 """ 636 Returns the next candidate Node for (potential) evaluation. 637 638 The candidate list (really a stack) initially consists of all of 639 the top-level (command line) targets provided when the Taskmaster 640 was initialized. While we walk the DAG, visiting Nodes, all the 641 children that haven't finished processing get pushed on to the 642 candidate list. Each child can then be popped and examined in 643 turn for whether *their* children are all up-to-date, in which 644 case a Task will be created for their actual evaluation and 645 potential building. 646 647 Here is where we also allow candidate Nodes to alter the list of 648 Nodes that should be examined. This is used, for example, when 649 invoking SCons in a source directory. A source directory Node can 650 return its corresponding build directory Node, essentially saying, 651 "Hey, you really need to build this thing over here instead." 652 """ 653 try: 654 return self.candidates.pop() 655 except IndexError: 656 pass 657 try: 658 node = self.top_targets_left.pop() 659 except IndexError: 660 return None 661 self.current_top = node 662 alt, message = node.alter_targets() 663 if alt: 664 self.message = message 665 self.candidates.append(node) 666 self.candidates.extend(self.order(alt)) 667 node = self.candidates.pop() 668 return node
669
670 - def no_next_candidate(self):
671 """ 672 Stops Taskmaster processing by not returning a next candidate. 673 674 Note that we have to clean-up the Taskmaster candidate list 675 because the cycle detection depends on the fact all nodes have 676 been processed somehow. 677 """ 678 while self.candidates: 679 candidates = self.candidates 680 self.candidates = [] 681 self.will_not_build(candidates) 682 return None
683
684 - def _validate_pending_children(self):
685 """ 686 Validate the content of the pending_children set. Assert if an 687 internal error is found. 688 689 This function is used strictly for debugging the taskmaster by 690 checking that no invariants are violated. It is not used in 691 normal operation. 692 693 The pending_children set is used to detect cycles in the 694 dependency graph. We call a "pending child" a child that is 695 found in the "pending" state when checking the dependencies of 696 its parent node. 697 698 A pending child can occur when the Taskmaster completes a loop 699 through a cycle. For example, let's imagine a graph made of 700 three nodes (A, B and C) making a cycle. The evaluation starts 701 at node A. The Taskmaster first considers whether node A's 702 child B is up-to-date. Then, recursively, node B needs to 703 check whether node C is up-to-date. This leaves us with a 704 dependency graph looking like:: 705 706 Next candidate \ 707 \ 708 Node A (Pending) --> Node B(Pending) --> Node C (NoState) 709 ^ | 710 | | 711 +-------------------------------------+ 712 713 Now, when the Taskmaster examines the Node C's child Node A, 714 it finds that Node A is in the "pending" state. Therefore, 715 Node A is a pending child of node C. 716 717 Pending children indicate that the Taskmaster has potentially 718 loop back through a cycle. We say potentially because it could 719 also occur when a DAG is evaluated in parallel. For example, 720 consider the following graph:: 721 722 Node A (Pending) --> Node B(Pending) --> Node C (Pending) --> ... 723 | ^ 724 | | 725 +----------> Node D (NoState) --------+ 726 / 727 Next candidate / 728 729 The Taskmaster first evaluates the nodes A, B, and C and 730 starts building some children of node C. Assuming, that the 731 maximum parallel level has not been reached, the Taskmaster 732 will examine Node D. It will find that Node C is a pending 733 child of Node D. 734 735 In summary, evaluating a graph with a cycle will always 736 involve a pending child at one point. A pending child might 737 indicate either a cycle or a diamond-shaped DAG. Only a 738 fraction of the nodes ends-up being a "pending child" of 739 another node. This keeps the pending_children set small in 740 practice. 741 742 We can differentiate between the two cases if we wait until 743 the end of the build. At this point, all the pending children 744 nodes due to a diamond-shaped DAG will have been properly 745 built (or will have failed to build). But, the pending 746 children involved in a cycle will still be in the pending 747 state. 748 749 The taskmaster removes nodes from the pending_children set as 750 soon as a pending_children node moves out of the pending 751 state. This also helps to keep the pending_children set small. 752 """ 753 754 for n in self.pending_children: 755 assert n.state in (NODE_PENDING, NODE_EXECUTING), \ 756 (str(n), StateString[n.state]) 757 assert len(n.waiting_parents) != 0, (str(n), len(n.waiting_parents)) 758 for p in n.waiting_parents: 759 assert p.ref_count > 0, (str(n), str(p), p.ref_count)
760 761
762 - def trace_message(self, message):
763 return 'Taskmaster: %s\n' % message
764
765 - def trace_node(self, node):
766 return '<%-10s %-3s %s>' % (StateString[node.get_state()], 767 node.ref_count, 768 repr(str(node)))
769
770 - def _find_next_ready_node(self):
771 """ 772 Finds the next node that is ready to be built. 773 774 This is *the* main guts of the DAG walk. We loop through the 775 list of candidates, looking for something that has no un-built 776 children (i.e., that is a leaf Node or has dependencies that are 777 all leaf Nodes or up-to-date). Candidate Nodes are re-scanned 778 (both the target Node itself and its sources, which are always 779 scanned in the context of a given target) to discover implicit 780 dependencies. A Node that must wait for some children to be 781 built will be put back on the candidates list after the children 782 have finished building. A Node that has been put back on the 783 candidates list in this way may have itself (or its sources) 784 re-scanned, in order to handle generated header files (e.g.) and 785 the implicit dependencies therein. 786 787 Note that this method does not do any signature calculation or 788 up-to-date check itself. All of that is handled by the Task 789 class. This is purely concerned with the dependency graph walk. 790 """ 791 792 self.ready_exc = None 793 794 T = self.trace 795 if T: T.write(SCons.Util.UnicodeType('\n') + self.trace_message('Looking for a node to evaluate')) 796 797 while True: 798 node = self.next_candidate() 799 if node is None: 800 if T: T.write(self.trace_message('No candidate anymore.') + u'\n') 801 return None 802 803 node = node.disambiguate() 804 state = node.get_state() 805 806 # For debugging only: 807 # 808 # try: 809 # self._validate_pending_children() 810 # except: 811 # self.ready_exc = sys.exc_info() 812 # return node 813 814 if CollectStats: 815 if not hasattr(node.attributes, 'stats'): 816 node.attributes.stats = Stats() 817 StatsNodes.append(node) 818 S = node.attributes.stats 819 S.considered = S.considered + 1 820 else: 821 S = None 822 823 if T: T.write(self.trace_message(u' Considering node %s and its children:' % self.trace_node(node))) 824 825 if state == NODE_NO_STATE: 826 # Mark this node as being on the execution stack: 827 node.set_state(NODE_PENDING) 828 elif state > NODE_PENDING: 829 # Skip this node if it has already been evaluated: 830 if S: S.already_handled = S.already_handled + 1 831 if T: T.write(self.trace_message(u' already handled (executed)')) 832 continue 833 834 executor = node.get_executor() 835 836 try: 837 children = executor.get_all_children() 838 except SystemExit: 839 exc_value = sys.exc_info()[1] 840 e = SCons.Errors.ExplicitExit(node, exc_value.code) 841 self.ready_exc = (SCons.Errors.ExplicitExit, e) 842 if T: T.write(self.trace_message(' SystemExit')) 843 return node 844 except Exception as e: 845 # We had a problem just trying to figure out the 846 # children (like a child couldn't be linked in to a 847 # VariantDir, or a Scanner threw something). Arrange to 848 # raise the exception when the Task is "executed." 849 self.ready_exc = sys.exc_info() 850 if S: S.problem = S.problem + 1 851 if T: T.write(self.trace_message(' exception %s while scanning children.\n' % e)) 852 return node 853 854 children_not_visited = [] 855 children_pending = set() 856 children_not_ready = [] 857 children_failed = False 858 859 for child in chain(executor.get_all_prerequisites(), children): 860 childstate = child.get_state() 861 862 if T: T.write(self.trace_message(u' ' + self.trace_node(child))) 863 864 if childstate == NODE_NO_STATE: 865 children_not_visited.append(child) 866 elif childstate == NODE_PENDING: 867 children_pending.add(child) 868 elif childstate == NODE_FAILED: 869 children_failed = True 870 871 if childstate <= NODE_EXECUTING: 872 children_not_ready.append(child) 873 874 # These nodes have not even been visited yet. Add 875 # them to the list so that on some next pass we can 876 # take a stab at evaluating them (or their children). 877 children_not_visited.reverse() 878 self.candidates.extend(self.order(children_not_visited)) 879 880 # if T and children_not_visited: 881 # T.write(self.trace_message(' adding to candidates: %s' % map(str, children_not_visited))) 882 # T.write(self.trace_message(' candidates now: %s\n' % map(str, self.candidates))) 883 884 # Skip this node if any of its children have failed. 885 # 886 # This catches the case where we're descending a top-level 887 # target and one of our children failed while trying to be 888 # built by a *previous* descent of an earlier top-level 889 # target. 890 # 891 # It can also occur if a node is reused in multiple 892 # targets. One first descends though the one of the 893 # target, the next time occurs through the other target. 894 # 895 # Note that we can only have failed_children if the 896 # --keep-going flag was used, because without it the build 897 # will stop before diving in the other branch. 898 # 899 # Note that even if one of the children fails, we still 900 # added the other children to the list of candidate nodes 901 # to keep on building (--keep-going). 902 if children_failed: 903 for n in executor.get_action_targets(): 904 n.set_state(NODE_FAILED) 905 906 if S: S.child_failed = S.child_failed + 1 907 if T: T.write(self.trace_message('****** %s\n' % self.trace_node(node))) 908 continue 909 910 if children_not_ready: 911 for child in children_not_ready: 912 # We're waiting on one or more derived targets 913 # that have not yet finished building. 914 if S: S.not_built = S.not_built + 1 915 916 # Add this node to the waiting parents lists of 917 # anything we're waiting on, with a reference 918 # count so we can be put back on the list for 919 # re-evaluation when they've all finished. 920 node.ref_count = node.ref_count + child.add_to_waiting_parents(node) 921 if T: T.write(self.trace_message(u' adjusted ref count: %s, child %s' % 922 (self.trace_node(node), repr(str(child))))) 923 924 if T: 925 for pc in children_pending: 926 T.write(self.trace_message(' adding %s to the pending children set\n' % 927 self.trace_node(pc))) 928 self.pending_children = self.pending_children | children_pending 929 930 continue 931 932 # Skip this node if it has side-effects that are 933 # currently being built: 934 wait_side_effects = False 935 for se in executor.get_action_side_effects(): 936 if se.get_state() == NODE_EXECUTING: 937 se.add_to_waiting_s_e(node) 938 wait_side_effects = True 939 940 if wait_side_effects: 941 if S: S.side_effects = S.side_effects + 1 942 continue 943 944 # The default when we've gotten through all of the checks above: 945 # this node is ready to be built. 946 if S: S.build = S.build + 1 947 if T: T.write(self.trace_message(u'Evaluating %s\n' % 948 self.trace_node(node))) 949 950 # For debugging only: 951 # 952 # try: 953 # self._validate_pending_children() 954 # except: 955 # self.ready_exc = sys.exc_info() 956 # return node 957 958 return node 959 960 return None
961
962 - def next_task(self):
963 """ 964 Returns the next task to be executed. 965 966 This simply asks for the next Node to be evaluated, and then wraps 967 it in the specific Task subclass with which we were initialized. 968 """ 969 node = self._find_next_ready_node() 970 971 if node is None: 972 return None 973 974 executor = node.get_executor() 975 if executor is None: 976 return None 977 978 tlist = executor.get_all_targets() 979 980 task = self.tasker(self, tlist, node in self.original_top, node) 981 try: 982 task.make_ready() 983 except Exception as e : 984 # We had a problem just trying to get this task ready (like 985 # a child couldn't be linked to a VariantDir when deciding 986 # whether this node is current). Arrange to raise the 987 # exception when the Task is "executed." 988 self.ready_exc = sys.exc_info() 989 990 if self.ready_exc: 991 task.exception_set(self.ready_exc) 992 993 self.ready_exc = None 994 995 return task
996
997 - def will_not_build(self, nodes, node_func=lambda n: None):
998 """ 999 Perform clean-up about nodes that will never be built. Invokes 1000 a user defined function on all of these nodes (including all 1001 of their parents). 1002 """ 1003 1004 T = self.trace 1005 1006 pending_children = self.pending_children 1007 1008 to_visit = set(nodes) 1009 pending_children = pending_children - to_visit 1010 1011 if T: 1012 for n in nodes: 1013 T.write(self.trace_message(' removing node %s from the pending children set\n' % 1014 self.trace_node(n))) 1015 try: 1016 while len(to_visit): 1017 node = to_visit.pop() 1018 node_func(node) 1019 1020 # Prune recursion by flushing the waiting children 1021 # list immediately. 1022 parents = node.waiting_parents 1023 node.waiting_parents = set() 1024 1025 to_visit = to_visit | parents 1026 pending_children = pending_children - parents 1027 1028 for p in parents: 1029 p.ref_count = p.ref_count - 1 1030 if T: T.write(self.trace_message(' removing parent %s from the pending children set\n' % 1031 self.trace_node(p))) 1032 except KeyError: 1033 # The container to_visit has been emptied. 1034 pass 1035 1036 # We have the stick back the pending_children list into the 1037 # taskmaster because the python 1.5.2 compatibility does not 1038 # allow us to use in-place updates 1039 self.pending_children = pending_children
1040
1041 - def stop(self):
1042 """ 1043 Stops the current build completely. 1044 """ 1045 self.next_candidate = self.no_next_candidate
1046
1047 - def cleanup(self):
1048 """ 1049 Check for dependency cycles. 1050 """ 1051 if not self.pending_children: 1052 return 1053 1054 nclist = [(n, find_cycle([n], set())) for n in self.pending_children] 1055 1056 genuine_cycles = [ 1057 node for node,cycle in nclist 1058 if cycle or node.get_state() != NODE_EXECUTED 1059 ] 1060 if not genuine_cycles: 1061 # All of the "cycles" found were single nodes in EXECUTED state, 1062 # which is to say, they really weren't cycles. Just return. 1063 return 1064 1065 desc = 'Found dependency cycle(s):\n' 1066 for node, cycle in nclist: 1067 if cycle: 1068 desc = desc + " " + " -> ".join(map(str, cycle)) + "\n" 1069 else: 1070 desc = desc + \ 1071 " Internal Error: no cycle found for node %s (%s) in state %s\n" % \ 1072 (node, repr(node), StateString[node.get_state()]) 1073 1074 raise SCons.Errors.UserError(desc)
1075 1076 # Local Variables: 1077 # tab-width:4 1078 # indent-tabs-mode:nil 1079 # End: 1080 # vim: set expandtab tabstop=4 shiftwidth=4: 1081