Package SCons :: Module Taskmaster
[hide private]
[frames] | no frames]

Source Code for Module SCons.Taskmaster

   1  # 
   2  # Copyright (c) 2001 - 2017 The SCons Foundation 
   3  # 
   4  # Permission is hereby granted, free of charge, to any person obtaining 
   5  # a copy of this software and associated documentation files (the 
   6  # "Software"), to deal in the Software without restriction, including 
   7  # without limitation the rights to use, copy, modify, merge, publish, 
   8  # distribute, sublicense, and/or sell copies of the Software, and to 
   9  # permit persons to whom the Software is furnished to do so, subject to 
  10  # the following conditions: 
  11  # 
  12  # The above copyright notice and this permission notice shall be included 
  13  # in all copies or substantial portions of the Software. 
  14  # 
  15  # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY 
  16  # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE 
  17  # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 
  18  # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE 
  19  # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION 
  20  # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION 
  21  # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 
  22   
  23  from __future__ import print_function 
  24   
  25  import sys 
  26   
  27  __doc__ = """ 
  28      Generic Taskmaster module for the SCons build engine. 
  29      ===================================================== 
  30       
  31      This module contains the primary interface(s) between a wrapping user 
  32      interface and the SCons build engine.  There are two key classes here: 
  33       
  34      Taskmaster 
  35      ---------- 
  36          This is the main engine for walking the dependency graph and 
  37          calling things to decide what does or doesn't need to be built. 
  38   
  39      Task 
  40      ---- 
  41          This is the base class for allowing a wrapping interface to 
  42          decide what does or doesn't actually need to be done.  The 
  43          intention is for a wrapping interface to subclass this as 
  44          appropriate for different types of behavior it may need. 
  45   
  46          The canonical example is the SCons native Python interface, 
  47          which has Task subclasses that handle its specific behavior, 
  48          like printing "'foo' is up to date" when a top-level target 
  49          doesn't need to be built, and handling the -c option by removing 
  50          targets as its "build" action.  There is also a separate subclass 
  51          for suppressing this output when the -q option is used. 
  52   
  53          The Taskmaster instantiates a Task object for each (set of) 
  54          target(s) that it decides need to be evaluated and/or built. 
  55  """ 
  56   
  57  __revision__ = "src/engine/SCons/Taskmaster.py 74b2c53bc42290e911b334a6b44f187da698a668 2017/11/14 13:16:53 bdbaddog" 
  58   
  59  from itertools import chain 
  60  import operator 
  61  import sys 
  62  import traceback 
  63   
  64  import SCons.Errors 
  65  import SCons.Node 
  66  import SCons.Warnings 
  67   
  68  StateString = SCons.Node.StateString 
  69  NODE_NO_STATE = SCons.Node.no_state 
  70  NODE_PENDING = SCons.Node.pending 
  71  NODE_EXECUTING = SCons.Node.executing 
  72  NODE_UP_TO_DATE = SCons.Node.up_to_date 
  73  NODE_EXECUTED = SCons.Node.executed 
  74  NODE_FAILED = SCons.Node.failed 
  75   
  76  print_prepare = 0               # set by option --debug=prepare 
  77   
  78  # A subsystem for recording stats about how different Nodes are handled by 
  79  # the main Taskmaster loop.  There's no external control here (no need for 
  80  # a --debug= option); enable it by changing the value of CollectStats. 
  81   
  82  CollectStats = None 
  83   
84 -class Stats(object):
85 """ 86 A simple class for holding statistics about the disposition of a 87 Node by the Taskmaster. If we're collecting statistics, each Node 88 processed by the Taskmaster gets one of these attached, in which case 89 the Taskmaster records its decision each time it processes the Node. 90 (Ideally, that's just once per Node.) 91 """
92 - def __init__(self):
93 """ 94 Instantiates a Taskmaster.Stats object, initializing all 95 appropriate counters to zero. 96 """ 97 self.considered = 0 98 self.already_handled = 0 99 self.problem = 0 100 self.child_failed = 0 101 self.not_built = 0 102 self.side_effects = 0 103 self.build = 0
104 105 StatsNodes = [] 106 107 fmt = "%(considered)3d "\ 108 "%(already_handled)3d " \ 109 "%(problem)3d " \ 110 "%(child_failed)3d " \ 111 "%(not_built)3d " \ 112 "%(side_effects)3d " \ 113 "%(build)3d " 114
115 -def dump_stats():
116 for n in sorted(StatsNodes, key=lambda a: str(a)): 117 print((fmt % n.attributes.stats.__dict__) + str(n))
118 119 120
121 -class Task(object):
122 """ 123 Default SCons build engine task. 124 125 This controls the interaction of the actual building of node 126 and the rest of the engine. 127 128 This is expected to handle all of the normally-customizable 129 aspects of controlling a build, so any given application 130 *should* be able to do what it wants by sub-classing this 131 class and overriding methods as appropriate. If an application 132 needs to customize something by sub-classing Taskmaster (or 133 some other build engine class), we should first try to migrate 134 that functionality into this class. 135 136 Note that it's generally a good idea for sub-classes to call 137 these methods explicitly to update state, etc., rather than 138 roll their own interaction with Taskmaster from scratch. 139 """
140 - def __init__(self, tm, targets, top, node):
141 self.tm = tm 142 self.targets = targets 143 self.top = top 144 self.node = node 145 self.exc_clear()
146
147 - def trace_message(self, method, node, description='node'):
148 fmt = '%-20s %s %s\n' 149 return fmt % (method + ':', description, self.tm.trace_node(node))
150
151 - def display(self, message):
152 """ 153 Hook to allow the calling interface to display a message. 154 155 This hook gets called as part of preparing a task for execution 156 (that is, a Node to be built). As part of figuring out what Node 157 should be built next, the actual target list may be altered, 158 along with a message describing the alteration. The calling 159 interface can subclass Task and provide a concrete implementation 160 of this method to see those messages. 161 """ 162 pass
163
164 - def prepare(self):
165 """ 166 Called just before the task is executed. 167 168 This is mainly intended to give the target Nodes a chance to 169 unlink underlying files and make all necessary directories before 170 the Action is actually called to build the targets. 171 """ 172 global print_prepare 173 T = self.tm.trace 174 if T: T.write(self.trace_message(u'Task.prepare()', self.node)) 175 176 # Now that it's the appropriate time, give the TaskMaster a 177 # chance to raise any exceptions it encountered while preparing 178 # this task. 179 self.exception_raise() 180 181 if self.tm.message: 182 self.display(self.tm.message) 183 self.tm.message = None 184 185 # Let the targets take care of any necessary preparations. 186 # This includes verifying that all of the necessary sources 187 # and dependencies exist, removing the target file(s), etc. 188 # 189 # As of April 2008, the get_executor().prepare() method makes 190 # sure that all of the aggregate sources necessary to build this 191 # Task's target(s) exist in one up-front check. The individual 192 # target t.prepare() methods check that each target's explicit 193 # or implicit dependencies exists, and also initialize the 194 # .sconsign info. 195 executor = self.targets[0].get_executor() 196 if executor is None: 197 return 198 executor.prepare() 199 for t in executor.get_action_targets(): 200 if print_prepare: 201 print("Preparing target %s..."%t) 202 for s in t.side_effects: 203 print("...with side-effect %s..."%s) 204 t.prepare() 205 for s in t.side_effects: 206 if print_prepare: 207 print("...Preparing side-effect %s..."%s) 208 s.prepare()
209
210 - def get_target(self):
211 """Fetch the target being built or updated by this task. 212 """ 213 return self.node
214
215 - def needs_execute(self):
216 # TODO(deprecate): "return True" is the old default behavior; 217 # change it to NotImplementedError (after running through the 218 # Deprecation Cycle) so the desired behavior is explicitly 219 # determined by which concrete subclass is used. 220 #raise NotImplementedError 221 msg = ('Taskmaster.Task is an abstract base class; instead of\n' 222 '\tusing it directly, ' 223 'derive from it and override the abstract methods.') 224 SCons.Warnings.warn(SCons.Warnings.TaskmasterNeedsExecuteWarning, msg) 225 return True
226
227 - def execute(self):
228 """ 229 Called to execute the task. 230 231 This method is called from multiple threads in a parallel build, 232 so only do thread safe stuff here. Do thread unsafe stuff in 233 prepare(), executed() or failed(). 234 """ 235 T = self.tm.trace 236 if T: T.write(self.trace_message(u'Task.execute()', self.node)) 237 238 try: 239 cached_targets = [] 240 for t in self.targets: 241 if not t.retrieve_from_cache(): 242 break 243 cached_targets.append(t) 244 if len(cached_targets) < len(self.targets): 245 # Remove targets before building. It's possible that we 246 # partially retrieved targets from the cache, leaving 247 # them in read-only mode. That might cause the command 248 # to fail. 249 # 250 for t in cached_targets: 251 try: 252 t.fs.unlink(t.get_internal_path()) 253 except (IOError, OSError): 254 pass 255 self.targets[0].build() 256 else: 257 for t in cached_targets: 258 t.cached = 1 259 except SystemExit: 260 exc_value = sys.exc_info()[1] 261 raise SCons.Errors.ExplicitExit(self.targets[0], exc_value.code) 262 except SCons.Errors.UserError: 263 raise 264 except SCons.Errors.BuildError: 265 raise 266 except Exception as e: 267 buildError = SCons.Errors.convert_to_BuildError(e) 268 buildError.node = self.targets[0] 269 buildError.exc_info = sys.exc_info() 270 raise buildError
271
272 - def executed_without_callbacks(self):
273 """ 274 Called when the task has been successfully executed 275 and the Taskmaster instance doesn't want to call 276 the Node's callback methods. 277 """ 278 T = self.tm.trace 279 if T: T.write(self.trace_message('Task.executed_without_callbacks()', 280 self.node)) 281 282 for t in self.targets: 283 if t.get_state() == NODE_EXECUTING: 284 for side_effect in t.side_effects: 285 side_effect.set_state(NODE_NO_STATE) 286 t.set_state(NODE_EXECUTED)
287
288 - def executed_with_callbacks(self):
289 """ 290 Called when the task has been successfully executed and 291 the Taskmaster instance wants to call the Node's callback 292 methods. 293 294 This may have been a do-nothing operation (to preserve build 295 order), so we must check the node's state before deciding whether 296 it was "built", in which case we call the appropriate Node method. 297 In any event, we always call "visited()", which will handle any 298 post-visit actions that must take place regardless of whether 299 or not the target was an actual built target or a source Node. 300 """ 301 global print_prepare 302 T = self.tm.trace 303 if T: T.write(self.trace_message('Task.executed_with_callbacks()', 304 self.node)) 305 306 for t in self.targets: 307 if t.get_state() == NODE_EXECUTING: 308 for side_effect in t.side_effects: 309 side_effect.set_state(NODE_NO_STATE) 310 t.set_state(NODE_EXECUTED) 311 if not t.cached: 312 t.push_to_cache() 313 t.built() 314 t.visited() 315 if (not print_prepare and 316 (not hasattr(self, 'options') or not self.options.debug_includes)): 317 t.release_target_info() 318 else: 319 t.visited()
320 321 executed = executed_with_callbacks 322
323 - def failed(self):
324 """ 325 Default action when a task fails: stop the build. 326 327 Note: Although this function is normally invoked on nodes in 328 the executing state, it might also be invoked on up-to-date 329 nodes when using Configure(). 330 """ 331 self.fail_stop()
332
333 - def fail_stop(self):
334 """ 335 Explicit stop-the-build failure. 336 337 This sets failure status on the target nodes and all of 338 their dependent parent nodes. 339 340 Note: Although this function is normally invoked on nodes in 341 the executing state, it might also be invoked on up-to-date 342 nodes when using Configure(). 343 """ 344 T = self.tm.trace 345 if T: T.write(self.trace_message('Task.failed_stop()', self.node)) 346 347 # Invoke will_not_build() to clean-up the pending children 348 # list. 349 self.tm.will_not_build(self.targets, lambda n: n.set_state(NODE_FAILED)) 350 351 # Tell the taskmaster to not start any new tasks 352 self.tm.stop() 353 354 # We're stopping because of a build failure, but give the 355 # calling Task class a chance to postprocess() the top-level 356 # target under which the build failure occurred. 357 self.targets = [self.tm.current_top] 358 self.top = 1
359
360 - def fail_continue(self):
361 """ 362 Explicit continue-the-build failure. 363 364 This sets failure status on the target nodes and all of 365 their dependent parent nodes. 366 367 Note: Although this function is normally invoked on nodes in 368 the executing state, it might also be invoked on up-to-date 369 nodes when using Configure(). 370 """ 371 T = self.tm.trace 372 if T: T.write(self.trace_message('Task.failed_continue()', self.node)) 373 374 self.tm.will_not_build(self.targets, lambda n: n.set_state(NODE_FAILED))
375
376 - def make_ready_all(self):
377 """ 378 Marks all targets in a task ready for execution. 379 380 This is used when the interface needs every target Node to be 381 visited--the canonical example being the "scons -c" option. 382 """ 383 T = self.tm.trace 384 if T: T.write(self.trace_message('Task.make_ready_all()', self.node)) 385 386 self.out_of_date = self.targets[:] 387 for t in self.targets: 388 t.disambiguate().set_state(NODE_EXECUTING) 389 for s in t.side_effects: 390 # add disambiguate here to mirror the call on targets above 391 s.disambiguate().set_state(NODE_EXECUTING)
392
393 - def make_ready_current(self):
394 """ 395 Marks all targets in a task ready for execution if any target 396 is not current. 397 398 This is the default behavior for building only what's necessary. 399 """ 400 global print_prepare 401 T = self.tm.trace 402 if T: T.write(self.trace_message(u'Task.make_ready_current()', 403 self.node)) 404 405 self.out_of_date = [] 406 needs_executing = False 407 for t in self.targets: 408 try: 409 t.disambiguate().make_ready() 410 is_up_to_date = not t.has_builder() or \ 411 (not t.always_build and t.is_up_to_date()) 412 except EnvironmentError as e: 413 raise SCons.Errors.BuildError(node=t, errstr=e.strerror, filename=e.filename) 414 415 if not is_up_to_date: 416 self.out_of_date.append(t) 417 needs_executing = True 418 419 if needs_executing: 420 for t in self.targets: 421 t.set_state(NODE_EXECUTING) 422 for s in t.side_effects: 423 # add disambiguate here to mirror the call on targets in first loop above 424 s.disambiguate().set_state(NODE_EXECUTING) 425 else: 426 for t in self.targets: 427 # We must invoke visited() to ensure that the node 428 # information has been computed before allowing the 429 # parent nodes to execute. (That could occur in a 430 # parallel build...) 431 t.visited() 432 t.set_state(NODE_UP_TO_DATE) 433 if (not print_prepare and 434 (not hasattr(self, 'options') or not self.options.debug_includes)): 435 t.release_target_info()
436 437 make_ready = make_ready_current 438
439 - def postprocess(self):
440 """ 441 Post-processes a task after it's been executed. 442 443 This examines all the targets just built (or not, we don't care 444 if the build was successful, or even if there was no build 445 because everything was up-to-date) to see if they have any 446 waiting parent Nodes, or Nodes waiting on a common side effect, 447 that can be put back on the candidates list. 448 """ 449 T = self.tm.trace 450 if T: T.write(self.trace_message(u'Task.postprocess()', self.node)) 451 452 # We may have built multiple targets, some of which may have 453 # common parents waiting for this build. Count up how many 454 # targets each parent was waiting for so we can subtract the 455 # values later, and so we *don't* put waiting side-effect Nodes 456 # back on the candidates list if the Node is also a waiting 457 # parent. 458 459 targets = set(self.targets) 460 461 pending_children = self.tm.pending_children 462 parents = {} 463 for t in targets: 464 # A node can only be in the pending_children set if it has 465 # some waiting_parents. 466 if t.waiting_parents: 467 if T: T.write(self.trace_message(u'Task.postprocess()', 468 t, 469 'removing')) 470 pending_children.discard(t) 471 for p in t.waiting_parents: 472 parents[p] = parents.get(p, 0) + 1 473 474 for t in targets: 475 if t.side_effects is not None: 476 for s in t.side_effects: 477 if s.get_state() == NODE_EXECUTING: 478 s.set_state(NODE_NO_STATE) 479 for p in s.waiting_parents: 480 parents[p] = parents.get(p, 0) + 1 481 for p in s.waiting_s_e: 482 if p.ref_count == 0: 483 self.tm.candidates.append(p) 484 485 for p, subtract in parents.items(): 486 p.ref_count = p.ref_count - subtract 487 if T: T.write(self.trace_message(u'Task.postprocess()', 488 p, 489 'adjusted parent ref count')) 490 if p.ref_count == 0: 491 self.tm.candidates.append(p) 492 493 for t in targets: 494 t.postprocess()
495 496 # Exception handling subsystem. 497 # 498 # Exceptions that occur while walking the DAG or examining Nodes 499 # must be raised, but must be raised at an appropriate time and in 500 # a controlled manner so we can, if necessary, recover gracefully, 501 # possibly write out signature information for Nodes we've updated, 502 # etc. This is done by having the Taskmaster tell us about the 503 # exception, and letting 504
505 - def exc_info(self):
506 """ 507 Returns info about a recorded exception. 508 """ 509 return self.exception
510
511 - def exc_clear(self):
512 """ 513 Clears any recorded exception. 514 515 This also changes the "exception_raise" attribute to point 516 to the appropriate do-nothing method. 517 """ 518 self.exception = (None, None, None) 519 self.exception_raise = self._no_exception_to_raise
520
521 - def exception_set(self, exception=None):
522 """ 523 Records an exception to be raised at the appropriate time. 524 525 This also changes the "exception_raise" attribute to point 526 to the method that will, in fact 527 """ 528 if not exception: 529 exception = sys.exc_info() 530 self.exception = exception 531 self.exception_raise = self._exception_raise
532
533 - def _no_exception_to_raise(self):
534 pass
535
536 - def _exception_raise(self):
537 """ 538 Raises a pending exception that was recorded while getting a 539 Task ready for execution. 540 """ 541 exc = self.exc_info()[:] 542 try: 543 exc_type, exc_value, exc_traceback = exc 544 except ValueError: 545 exc_type, exc_value = exc 546 exc_traceback = None 547 548 # raise exc_type(exc_value).with_traceback(exc_traceback) 549 if sys.version_info[0] == 2: 550 exec("raise exc_type, exc_value, exc_traceback") 551 else: # sys.version_info[0] == 3: 552 if isinstance(exc_value, Exception): #hasattr(exc_value, 'with_traceback'): 553 # If exc_value is an exception, then just reraise 554 exec("raise exc_value.with_traceback(exc_traceback)") 555 else: 556 # else we'll create an exception using the value and raise that 557 exec("raise exc_type(exc_value).with_traceback(exc_traceback)")
558 559 560 # raise e.__class__, e.__class__(e), sys.exc_info()[2] 561 # exec("raise exc_type(exc_value).with_traceback(exc_traceback)") 562 563 564
565 -class AlwaysTask(Task):
566 - def needs_execute(self):
567 """ 568 Always returns True (indicating this Task should always 569 be executed). 570 571 Subclasses that need this behavior (as opposed to the default 572 of only executing Nodes that are out of date w.r.t. their 573 dependencies) can use this as follows: 574 575 class MyTaskSubclass(SCons.Taskmaster.Task): 576 needs_execute = SCons.Taskmaster.Task.execute_always 577 """ 578 return True
579
580 -class OutOfDateTask(Task):
581 - def needs_execute(self):
582 """ 583 Returns True (indicating this Task should be executed) if this 584 Task's target state indicates it needs executing, which has 585 already been determined by an earlier up-to-date check. 586 """ 587 return self.targets[0].get_state() == SCons.Node.executing
588 589
590 -def find_cycle(stack, visited):
591 if stack[-1] in visited: 592 return None 593 visited.add(stack[-1]) 594 for n in stack[-1].waiting_parents: 595 stack.append(n) 596 if stack[0] == stack[-1]: 597 return stack 598 if find_cycle(stack, visited): 599 return stack 600 stack.pop() 601 return None
602 603
604 -class Taskmaster(object):
605 """ 606 The Taskmaster for walking the dependency DAG. 607 """ 608
609 - def __init__(self, targets=[], tasker=None, order=None, trace=None):
610 self.original_top = targets 611 self.top_targets_left = targets[:] 612 self.top_targets_left.reverse() 613 self.candidates = [] 614 if tasker is None: 615 tasker = OutOfDateTask 616 self.tasker = tasker 617 if not order: 618 order = lambda l: l 619 self.order = order 620 self.message = None 621 self.trace = trace 622 self.next_candidate = self.find_next_candidate 623 self.pending_children = set()
624
625 - def find_next_candidate(self):
626 """ 627 Returns the next candidate Node for (potential) evaluation. 628 629 The candidate list (really a stack) initially consists of all of 630 the top-level (command line) targets provided when the Taskmaster 631 was initialized. While we walk the DAG, visiting Nodes, all the 632 children that haven't finished processing get pushed on to the 633 candidate list. Each child can then be popped and examined in 634 turn for whether *their* children are all up-to-date, in which 635 case a Task will be created for their actual evaluation and 636 potential building. 637 638 Here is where we also allow candidate Nodes to alter the list of 639 Nodes that should be examined. This is used, for example, when 640 invoking SCons in a source directory. A source directory Node can 641 return its corresponding build directory Node, essentially saying, 642 "Hey, you really need to build this thing over here instead." 643 """ 644 try: 645 return self.candidates.pop() 646 except IndexError: 647 pass 648 try: 649 node = self.top_targets_left.pop() 650 except IndexError: 651 return None 652 self.current_top = node 653 alt, message = node.alter_targets() 654 if alt: 655 self.message = message 656 self.candidates.append(node) 657 self.candidates.extend(self.order(alt)) 658 node = self.candidates.pop() 659 return node
660
661 - def no_next_candidate(self):
662 """ 663 Stops Taskmaster processing by not returning a next candidate. 664 665 Note that we have to clean-up the Taskmaster candidate list 666 because the cycle detection depends on the fact all nodes have 667 been processed somehow. 668 """ 669 while self.candidates: 670 candidates = self.candidates 671 self.candidates = [] 672 self.will_not_build(candidates) 673 return None
674
675 - def _validate_pending_children(self):
676 """ 677 Validate the content of the pending_children set. Assert if an 678 internal error is found. 679 680 This function is used strictly for debugging the taskmaster by 681 checking that no invariants are violated. It is not used in 682 normal operation. 683 684 The pending_children set is used to detect cycles in the 685 dependency graph. We call a "pending child" a child that is 686 found in the "pending" state when checking the dependencies of 687 its parent node. 688 689 A pending child can occur when the Taskmaster completes a loop 690 through a cycle. For example, let's imagine a graph made of 691 three nodes (A, B and C) making a cycle. The evaluation starts 692 at node A. The Taskmaster first considers whether node A's 693 child B is up-to-date. Then, recursively, node B needs to 694 check whether node C is up-to-date. This leaves us with a 695 dependency graph looking like:: 696 697 Next candidate \ 698 \ 699 Node A (Pending) --> Node B(Pending) --> Node C (NoState) 700 ^ | 701 | | 702 +-------------------------------------+ 703 704 Now, when the Taskmaster examines the Node C's child Node A, 705 it finds that Node A is in the "pending" state. Therefore, 706 Node A is a pending child of node C. 707 708 Pending children indicate that the Taskmaster has potentially 709 loop back through a cycle. We say potentially because it could 710 also occur when a DAG is evaluated in parallel. For example, 711 consider the following graph:: 712 713 Node A (Pending) --> Node B(Pending) --> Node C (Pending) --> ... 714 | ^ 715 | | 716 +----------> Node D (NoState) --------+ 717 / 718 Next candidate / 719 720 The Taskmaster first evaluates the nodes A, B, and C and 721 starts building some children of node C. Assuming, that the 722 maximum parallel level has not been reached, the Taskmaster 723 will examine Node D. It will find that Node C is a pending 724 child of Node D. 725 726 In summary, evaluating a graph with a cycle will always 727 involve a pending child at one point. A pending child might 728 indicate either a cycle or a diamond-shaped DAG. Only a 729 fraction of the nodes ends-up being a "pending child" of 730 another node. This keeps the pending_children set small in 731 practice. 732 733 We can differentiate between the two cases if we wait until 734 the end of the build. At this point, all the pending children 735 nodes due to a diamond-shaped DAG will have been properly 736 built (or will have failed to build). But, the pending 737 children involved in a cycle will still be in the pending 738 state. 739 740 The taskmaster removes nodes from the pending_children set as 741 soon as a pending_children node moves out of the pending 742 state. This also helps to keep the pending_children set small. 743 """ 744 745 for n in self.pending_children: 746 assert n.state in (NODE_PENDING, NODE_EXECUTING), \ 747 (str(n), StateString[n.state]) 748 assert len(n.waiting_parents) != 0, (str(n), len(n.waiting_parents)) 749 for p in n.waiting_parents: 750 assert p.ref_count > 0, (str(n), str(p), p.ref_count)
751 752
753 - def trace_message(self, message):
754 return 'Taskmaster: %s\n' % message
755
756 - def trace_node(self, node):
757 return '<%-10s %-3s %s>' % (StateString[node.get_state()], 758 node.ref_count, 759 repr(str(node)))
760
761 - def _find_next_ready_node(self):
762 """ 763 Finds the next node that is ready to be built. 764 765 This is *the* main guts of the DAG walk. We loop through the 766 list of candidates, looking for something that has no un-built 767 children (i.e., that is a leaf Node or has dependencies that are 768 all leaf Nodes or up-to-date). Candidate Nodes are re-scanned 769 (both the target Node itself and its sources, which are always 770 scanned in the context of a given target) to discover implicit 771 dependencies. A Node that must wait for some children to be 772 built will be put back on the candidates list after the children 773 have finished building. A Node that has been put back on the 774 candidates list in this way may have itself (or its sources) 775 re-scanned, in order to handle generated header files (e.g.) and 776 the implicit dependencies therein. 777 778 Note that this method does not do any signature calculation or 779 up-to-date check itself. All of that is handled by the Task 780 class. This is purely concerned with the dependency graph walk. 781 """ 782 783 self.ready_exc = None 784 785 T = self.trace 786 if T: T.write(SCons.Util.UnicodeType('\n') + self.trace_message('Looking for a node to evaluate')) 787 788 while True: 789 node = self.next_candidate() 790 if node is None: 791 if T: T.write(self.trace_message('No candidate anymore.') + u'\n') 792 return None 793 794 node = node.disambiguate() 795 state = node.get_state() 796 797 # For debugging only: 798 # 799 # try: 800 # self._validate_pending_children() 801 # except: 802 # self.ready_exc = sys.exc_info() 803 # return node 804 805 if CollectStats: 806 if not hasattr(node.attributes, 'stats'): 807 node.attributes.stats = Stats() 808 StatsNodes.append(node) 809 S = node.attributes.stats 810 S.considered = S.considered + 1 811 else: 812 S = None 813 814 if T: T.write(self.trace_message(u' Considering node %s and its children:' % self.trace_node(node))) 815 816 if state == NODE_NO_STATE: 817 # Mark this node as being on the execution stack: 818 node.set_state(NODE_PENDING) 819 elif state > NODE_PENDING: 820 # Skip this node if it has already been evaluated: 821 if S: S.already_handled = S.already_handled + 1 822 if T: T.write(self.trace_message(u' already handled (executed)')) 823 continue 824 825 executor = node.get_executor() 826 827 try: 828 children = executor.get_all_children() 829 except SystemExit: 830 exc_value = sys.exc_info()[1] 831 e = SCons.Errors.ExplicitExit(node, exc_value.code) 832 self.ready_exc = (SCons.Errors.ExplicitExit, e) 833 if T: T.write(self.trace_message(' SystemExit')) 834 return node 835 except Exception as e: 836 # We had a problem just trying to figure out the 837 # children (like a child couldn't be linked in to a 838 # VariantDir, or a Scanner threw something). Arrange to 839 # raise the exception when the Task is "executed." 840 self.ready_exc = sys.exc_info() 841 if S: S.problem = S.problem + 1 842 if T: T.write(self.trace_message(' exception %s while scanning children.\n' % e)) 843 return node 844 845 children_not_visited = [] 846 children_pending = set() 847 children_not_ready = [] 848 children_failed = False 849 850 for child in chain(executor.get_all_prerequisites(), children): 851 childstate = child.get_state() 852 853 if T: T.write(self.trace_message(u' ' + self.trace_node(child))) 854 855 if childstate == NODE_NO_STATE: 856 children_not_visited.append(child) 857 elif childstate == NODE_PENDING: 858 children_pending.add(child) 859 elif childstate == NODE_FAILED: 860 children_failed = True 861 862 if childstate <= NODE_EXECUTING: 863 children_not_ready.append(child) 864 865 # These nodes have not even been visited yet. Add 866 # them to the list so that on some next pass we can 867 # take a stab at evaluating them (or their children). 868 children_not_visited.reverse() 869 self.candidates.extend(self.order(children_not_visited)) 870 871 # if T and children_not_visited: 872 # T.write(self.trace_message(' adding to candidates: %s' % map(str, children_not_visited))) 873 # T.write(self.trace_message(' candidates now: %s\n' % map(str, self.candidates))) 874 875 # Skip this node if any of its children have failed. 876 # 877 # This catches the case where we're descending a top-level 878 # target and one of our children failed while trying to be 879 # built by a *previous* descent of an earlier top-level 880 # target. 881 # 882 # It can also occur if a node is reused in multiple 883 # targets. One first descends though the one of the 884 # target, the next time occurs through the other target. 885 # 886 # Note that we can only have failed_children if the 887 # --keep-going flag was used, because without it the build 888 # will stop before diving in the other branch. 889 # 890 # Note that even if one of the children fails, we still 891 # added the other children to the list of candidate nodes 892 # to keep on building (--keep-going). 893 if children_failed: 894 for n in executor.get_action_targets(): 895 n.set_state(NODE_FAILED) 896 897 if S: S.child_failed = S.child_failed + 1 898 if T: T.write(self.trace_message('****** %s\n' % self.trace_node(node))) 899 continue 900 901 if children_not_ready: 902 for child in children_not_ready: 903 # We're waiting on one or more derived targets 904 # that have not yet finished building. 905 if S: S.not_built = S.not_built + 1 906 907 # Add this node to the waiting parents lists of 908 # anything we're waiting on, with a reference 909 # count so we can be put back on the list for 910 # re-evaluation when they've all finished. 911 node.ref_count = node.ref_count + child.add_to_waiting_parents(node) 912 if T: T.write(self.trace_message(u' adjusted ref count: %s, child %s' % 913 (self.trace_node(node), repr(str(child))))) 914 915 if T: 916 for pc in children_pending: 917 T.write(self.trace_message(' adding %s to the pending children set\n' % 918 self.trace_node(pc))) 919 self.pending_children = self.pending_children | children_pending 920 921 continue 922 923 # Skip this node if it has side-effects that are 924 # currently being built: 925 wait_side_effects = False 926 for se in executor.get_action_side_effects(): 927 if se.get_state() == NODE_EXECUTING: 928 se.add_to_waiting_s_e(node) 929 wait_side_effects = True 930 931 if wait_side_effects: 932 if S: S.side_effects = S.side_effects + 1 933 continue 934 935 # The default when we've gotten through all of the checks above: 936 # this node is ready to be built. 937 if S: S.build = S.build + 1 938 if T: T.write(self.trace_message(u'Evaluating %s\n' % 939 self.trace_node(node))) 940 941 # For debugging only: 942 # 943 # try: 944 # self._validate_pending_children() 945 # except: 946 # self.ready_exc = sys.exc_info() 947 # return node 948 949 return node 950 951 return None
952
953 - def next_task(self):
954 """ 955 Returns the next task to be executed. 956 957 This simply asks for the next Node to be evaluated, and then wraps 958 it in the specific Task subclass with which we were initialized. 959 """ 960 node = self._find_next_ready_node() 961 962 if node is None: 963 return None 964 965 executor = node.get_executor() 966 if executor is None: 967 return None 968 969 tlist = executor.get_all_targets() 970 971 task = self.tasker(self, tlist, node in self.original_top, node) 972 try: 973 task.make_ready() 974 except Exception as e : 975 # We had a problem just trying to get this task ready (like 976 # a child couldn't be linked to a VariantDir when deciding 977 # whether this node is current). Arrange to raise the 978 # exception when the Task is "executed." 979 self.ready_exc = sys.exc_info() 980 981 if self.ready_exc: 982 task.exception_set(self.ready_exc) 983 984 self.ready_exc = None 985 986 return task
987
988 - def will_not_build(self, nodes, node_func=lambda n: None):
989 """ 990 Perform clean-up about nodes that will never be built. Invokes 991 a user defined function on all of these nodes (including all 992 of their parents). 993 """ 994 995 T = self.trace 996 997 pending_children = self.pending_children 998 999 to_visit = set(nodes) 1000 pending_children = pending_children - to_visit 1001 1002 if T: 1003 for n in nodes: 1004 T.write(self.trace_message(' removing node %s from the pending children set\n' % 1005 self.trace_node(n))) 1006 try: 1007 while len(to_visit): 1008 node = to_visit.pop() 1009 node_func(node) 1010 1011 # Prune recursion by flushing the waiting children 1012 # list immediately. 1013 parents = node.waiting_parents 1014 node.waiting_parents = set() 1015 1016 to_visit = to_visit | parents 1017 pending_children = pending_children - parents 1018 1019 for p in parents: 1020 p.ref_count = p.ref_count - 1 1021 if T: T.write(self.trace_message(' removing parent %s from the pending children set\n' % 1022 self.trace_node(p))) 1023 except KeyError: 1024 # The container to_visit has been emptied. 1025 pass 1026 1027 # We have the stick back the pending_children list into the 1028 # taskmaster because the python 1.5.2 compatibility does not 1029 # allow us to use in-place updates 1030 self.pending_children = pending_children
1031
1032 - def stop(self):
1033 """ 1034 Stops the current build completely. 1035 """ 1036 self.next_candidate = self.no_next_candidate
1037
1038 - def cleanup(self):
1039 """ 1040 Check for dependency cycles. 1041 """ 1042 if not self.pending_children: 1043 return 1044 1045 nclist = [(n, find_cycle([n], set())) for n in self.pending_children] 1046 1047 genuine_cycles = [ 1048 node for node,cycle in nclist 1049 if cycle or node.get_state() != NODE_EXECUTED 1050 ] 1051 if not genuine_cycles: 1052 # All of the "cycles" found were single nodes in EXECUTED state, 1053 # which is to say, they really weren't cycles. Just return. 1054 return 1055 1056 desc = 'Found dependency cycle(s):\n' 1057 for node, cycle in nclist: 1058 if cycle: 1059 desc = desc + " " + " -> ".join(map(str, cycle)) + "\n" 1060 else: 1061 desc = desc + \ 1062 " Internal Error: no cycle found for node %s (%s) in state %s\n" % \ 1063 (node, repr(node), StateString[node.get_state()]) 1064 1065 raise SCons.Errors.UserError(desc)
1066 1067 # Local Variables: 1068 # tab-width:4 1069 # indent-tabs-mode:nil 1070 # End: 1071 # vim: set expandtab tabstop=4 shiftwidth=4: 1072