forked from pranjalbajaj/java7-sourcecode
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathForkJoinPool.java
More file actions
2177 lines (2055 loc) · 84.9 KB
/
ForkJoinPool.java
File metadata and controls
2177 lines (2055 loc) · 84.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*
* ORACLE PROPRIETARY/CONFIDENTIAL. Use is subject to license terms.
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*/
/*
*
*
*
*
*
* Written by Doug Lea with assistance from members of JCP JSR-166
* Expert Group and released to the public domain, as explained at
* http://creativecommons.org/publicdomain/zero/1.0/
*/
package java.util.concurrent;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.Random;
import java.util.concurrent.AbstractExecutorService;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.concurrent.RejectedExecutionException;
import java.util.concurrent.RunnableFuture;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.LockSupport;
import java.util.concurrent.locks.ReentrantLock;
import java.util.concurrent.locks.Condition;
/**
* An {@link ExecutorService} for running {@link ForkJoinTask}s.
* A {@code ForkJoinPool} provides the entry point for submissions
* from non-{@code ForkJoinTask} clients, as well as management and
* monitoring operations.
*
* <p>A {@code ForkJoinPool} differs from other kinds of {@link
* ExecutorService} mainly by virtue of employing
* <em>work-stealing</em>: all threads in the pool attempt to find and
* execute subtasks created by other active tasks (eventually blocking
* waiting for work if none exist). This enables efficient processing
* when most tasks spawn other subtasks (as do most {@code
* ForkJoinTask}s). When setting <em>asyncMode</em> to true in
* constructors, {@code ForkJoinPool}s may also be appropriate for use
* with event-style tasks that are never joined.
*
* <p>A {@code ForkJoinPool} is constructed with a given target
* parallelism level; by default, equal to the number of available
* processors. The pool attempts to maintain enough active (or
* available) threads by dynamically adding, suspending, or resuming
* internal worker threads, even if some tasks are stalled waiting to
* join others. However, no such adjustments are guaranteed in the
* face of blocked IO or other unmanaged synchronization. The nested
* {@link ManagedBlocker} interface enables extension of the kinds of
* synchronization accommodated.
*
* <p>In addition to execution and lifecycle control methods, this
* class provides status check methods (for example
* {@link #getStealCount}) that are intended to aid in developing,
* tuning, and monitoring fork/join applications. Also, method
* {@link #toString} returns indications of pool state in a
* convenient form for informal monitoring.
*
* <p> As is the case with other ExecutorServices, there are three
* main task execution methods summarized in the following
* table. These are designed to be used by clients not already engaged
* in fork/join computations in the current pool. The main forms of
* these methods accept instances of {@code ForkJoinTask}, but
* overloaded forms also allow mixed execution of plain {@code
* Runnable}- or {@code Callable}- based activities as well. However,
* tasks that are already executing in a pool should normally
* <em>NOT</em> use these pool execution methods, but instead use the
* within-computation forms listed in the table.
*
* <table BORDER CELLPADDING=3 CELLSPACING=1>
* <tr>
* <td></td>
* <td ALIGN=CENTER> <b>Call from non-fork/join clients</b></td>
* <td ALIGN=CENTER> <b>Call from within fork/join computations</b></td>
* </tr>
* <tr>
* <td> <b>Arrange async execution</td>
* <td> {@link #execute(ForkJoinTask)}</td>
* <td> {@link ForkJoinTask#fork}</td>
* </tr>
* <tr>
* <td> <b>Await and obtain result</td>
* <td> {@link #invoke(ForkJoinTask)}</td>
* <td> {@link ForkJoinTask#invoke}</td>
* </tr>
* <tr>
* <td> <b>Arrange exec and obtain Future</td>
* <td> {@link #submit(ForkJoinTask)}</td>
* <td> {@link ForkJoinTask#fork} (ForkJoinTasks <em>are</em> Futures)</td>
* </tr>
* </table>
*
* <p><b>Sample Usage.</b> Normally a single {@code ForkJoinPool} is
* used for all parallel task execution in a program or subsystem.
* Otherwise, use would not usually outweigh the construction and
* bookkeeping overhead of creating a large set of threads. For
* example, a common pool could be used for the {@code SortTasks}
* illustrated in {@link RecursiveAction}. Because {@code
* ForkJoinPool} uses threads in {@linkplain java.lang.Thread#isDaemon
* daemon} mode, there is typically no need to explicitly {@link
* #shutdown} such a pool upon program exit.
*
* <pre>
* static final ForkJoinPool mainPool = new ForkJoinPool();
* ...
* public void sort(long[] array) {
* mainPool.invoke(new SortTask(array, 0, array.length));
* }
* </pre>
*
* <p><b>Implementation notes</b>: This implementation restricts the
* maximum number of running threads to 32767. Attempts to create
* pools with greater than the maximum number result in
* {@code IllegalArgumentException}.
*
* <p>This implementation rejects submitted tasks (that is, by throwing
* {@link RejectedExecutionException}) only when the pool is shut down
* or internal resources have been exhausted.
*
* @since 1.7
* @author Doug Lea
*/
public class ForkJoinPool extends AbstractExecutorService {
/*
* Implementation Overview
*
* This class provides the central bookkeeping and control for a
* set of worker threads: Submissions from non-FJ threads enter
* into a submission queue. Workers take these tasks and typically
* split them into subtasks that may be stolen by other workers.
* Preference rules give first priority to processing tasks from
* their own queues (LIFO or FIFO, depending on mode), then to
* randomized FIFO steals of tasks in other worker queues, and
* lastly to new submissions.
*
* The main throughput advantages of work-stealing stem from
* decentralized control -- workers mostly take tasks from
* themselves or each other. We cannot negate this in the
* implementation of other management responsibilities. The main
* tactic for avoiding bottlenecks is packing nearly all
* essentially atomic control state into a single 64bit volatile
* variable ("ctl"). This variable is read on the order of 10-100
* times as often as it is modified (always via CAS). (There is
* some additional control state, for example variable "shutdown"
* for which we can cope with uncoordinated updates.) This
* streamlines synchronization and control at the expense of messy
* constructions needed to repack status bits upon updates.
* Updates tend not to contend with each other except during
* bursts while submitted tasks begin or end. In some cases when
* they do contend, threads can instead do something else
* (usually, scan for tasks) until contention subsides.
*
* To enable packing, we restrict maximum parallelism to (1<<15)-1
* (which is far in excess of normal operating range) to allow
* ids, counts, and their negations (used for thresholding) to fit
* into 16bit fields.
*
* Recording Workers. Workers are recorded in the "workers" array
* that is created upon pool construction and expanded if (rarely)
* necessary. This is an array as opposed to some other data
* structure to support index-based random steals by workers.
* Updates to the array recording new workers and unrecording
* terminated ones are protected from each other by a seqLock
* (scanGuard) but the array is otherwise concurrently readable,
* and accessed directly by workers. To simplify index-based
* operations, the array size is always a power of two, and all
* readers must tolerate null slots. To avoid flailing during
* start-up, the array is presized to hold twice #parallelism
* workers (which is unlikely to need further resizing during
* execution). But to avoid dealing with so many null slots,
* variable scanGuard includes a mask for the nearest power of two
* that contains all current workers. All worker thread creation
* is on-demand, triggered by task submissions, replacement of
* terminated workers, and/or compensation for blocked
* workers. However, all other support code is set up to work with
* other policies. To ensure that we do not hold on to worker
* references that would prevent GC, ALL accesses to workers are
* via indices into the workers array (which is one source of some
* of the messy code constructions here). In essence, the workers
* array serves as a weak reference mechanism. Thus for example
* the wait queue field of ctl stores worker indices, not worker
* references. Access to the workers in associated methods (for
* example signalWork) must both index-check and null-check the
* IDs. All such accesses ignore bad IDs by returning out early
* from what they are doing, since this can only be associated
* with termination, in which case it is OK to give up.
*
* All uses of the workers array, as well as queue arrays, check
* that the array is non-null (even if previously non-null). This
* allows nulling during termination, which is currently not
* necessary, but remains an option for resource-revocation-based
* shutdown schemes.
*
* Wait Queuing. Unlike HPC work-stealing frameworks, we cannot
* let workers spin indefinitely scanning for tasks when none can
* be found immediately, and we cannot start/resume workers unless
* there appear to be tasks available. On the other hand, we must
* quickly prod them into action when new tasks are submitted or
* generated. We park/unpark workers after placing in an event
* wait queue when they cannot find work. This "queue" is actually
* a simple Treiber stack, headed by the "id" field of ctl, plus a
* 15bit counter value to both wake up waiters (by advancing their
* count) and avoid ABA effects. Successors are held in worker
* field "nextWait". Queuing deals with several intrinsic races,
* mainly that a task-producing thread can miss seeing (and
* signalling) another thread that gave up looking for work but
* has not yet entered the wait queue. We solve this by requiring
* a full sweep of all workers both before (in scan()) and after
* (in tryAwaitWork()) a newly waiting worker is added to the wait
* queue. During a rescan, the worker might release some other
* queued worker rather than itself, which has the same net
* effect. Because enqueued workers may actually be rescanning
* rather than waiting, we set and clear the "parked" field of
* ForkJoinWorkerThread to reduce unnecessary calls to unpark.
* (Use of the parked field requires a secondary recheck to avoid
* missed signals.)
*
* Signalling. We create or wake up workers only when there
* appears to be at least one task they might be able to find and
* execute. When a submission is added or another worker adds a
* task to a queue that previously had two or fewer tasks, they
* signal waiting workers (or trigger creation of new ones if
* fewer than the given parallelism level -- see signalWork).
* These primary signals are buttressed by signals during rescans
* as well as those performed when a worker steals a task and
* notices that there are more tasks too; together these cover the
* signals needed in cases when more than two tasks are pushed
* but untaken.
*
* Trimming workers. To release resources after periods of lack of
* use, a worker starting to wait when the pool is quiescent will
* time out and terminate if the pool has remained quiescent for
* SHRINK_RATE nanosecs. This will slowly propagate, eventually
* terminating all workers after long periods of non-use.
*
* Submissions. External submissions are maintained in an
* array-based queue that is structured identically to
* ForkJoinWorkerThread queues except for the use of
* submissionLock in method addSubmission. Unlike the case for
* worker queues, multiple external threads can add new
* submissions, so adding requires a lock.
*
* Compensation. Beyond work-stealing support and lifecycle
* control, the main responsibility of this framework is to take
* actions when one worker is waiting to join a task stolen (or
* always held by) another. Because we are multiplexing many
* tasks on to a pool of workers, we can't just let them block (as
* in Thread.join). We also cannot just reassign the joiner's
* run-time stack with another and replace it later, which would
* be a form of "continuation", that even if possible is not
* necessarily a good idea since we sometimes need both an
* unblocked task and its continuation to progress. Instead we
* combine two tactics:
*
* Helping: Arranging for the joiner to execute some task that it
* would be running if the steal had not occurred. Method
* ForkJoinWorkerThread.joinTask tracks joining->stealing
* links to try to find such a task.
*
* Compensating: Unless there are already enough live threads,
* method tryPreBlock() may create or re-activate a spare
* thread to compensate for blocked joiners until they
* unblock.
*
* The ManagedBlocker extension API can't use helping so relies
* only on compensation in method awaitBlocker.
*
* It is impossible to keep exactly the target parallelism number
* of threads running at any given time. Determining the
* existence of conservatively safe helping targets, the
* availability of already-created spares, and the apparent need
* to create new spares are all racy and require heuristic
* guidance, so we rely on multiple retries of each. Currently,
* in keeping with on-demand signalling policy, we compensate only
* if blocking would leave less than one active (non-waiting,
* non-blocked) worker. Additionally, to avoid some false alarms
* due to GC, lagging counters, system activity, etc, compensated
* blocking for joins is only attempted after rechecks stabilize
* (retries are interspersed with Thread.yield, for good
* citizenship). The variable blockedCount, incremented before
* blocking and decremented after, is sometimes needed to
* distinguish cases of waiting for work vs blocking on joins or
* other managed sync. Both cases are equivalent for most pool
* control, so we can update non-atomically. (Additionally,
* contention on blockedCount alleviates some contention on ctl).
*
* Shutdown and Termination. A call to shutdownNow atomically sets
* the ctl stop bit and then (non-atomically) sets each workers
* "terminate" status, cancels all unprocessed tasks, and wakes up
* all waiting workers. Detecting whether termination should
* commence after a non-abrupt shutdown() call requires more work
* and bookkeeping. We need consensus about quiesence (i.e., that
* there is no more work) which is reflected in active counts so
* long as there are no current blockers, as well as possible
* re-evaluations during independent changes in blocking or
* quiescing workers.
*
* Style notes: There is a lot of representation-level coupling
* among classes ForkJoinPool, ForkJoinWorkerThread, and
* ForkJoinTask. Most fields of ForkJoinWorkerThread maintain
* data structures managed by ForkJoinPool, so are directly
* accessed. Conversely we allow access to "workers" array by
* workers, and direct access to ForkJoinTask.status by both
* ForkJoinPool and ForkJoinWorkerThread. There is little point
* trying to reduce this, since any associated future changes in
* representations will need to be accompanied by algorithmic
* changes anyway. All together, these low-level implementation
* choices produce as much as a factor of 4 performance
* improvement compared to naive implementations, and enable the
* processing of billions of tasks per second, at the expense of
* some ugliness.
*
* Methods signalWork() and scan() are the main bottlenecks so are
* especially heavily micro-optimized/mangled. There are lots of
* inline assignments (of form "while ((local = field) != 0)")
* which are usually the simplest way to ensure the required read
* orderings (which are sometimes critical). This leads to a
* "C"-like style of listing declarations of these locals at the
* heads of methods or blocks. There are several occurrences of
* the unusual "do {} while (!cas...)" which is the simplest way
* to force an update of a CAS'ed variable. There are also other
* coding oddities that help some methods perform reasonably even
* when interpreted (not compiled).
*
* The order of declarations in this file is: (1) declarations of
* statics (2) fields (along with constants used when unpacking
* some of them), listed in an order that tends to reduce
* contention among them a bit under most JVMs. (3) internal
* control methods (4) callbacks and other support for
* ForkJoinTask and ForkJoinWorkerThread classes, (5) exported
* methods (plus a few little helpers). (6) static block
* initializing all statics in a minimally dependent order.
*/
/**
* Factory for creating new {@link ForkJoinWorkerThread}s.
* A {@code ForkJoinWorkerThreadFactory} must be defined and used
* for {@code ForkJoinWorkerThread} subclasses that extend base
* functionality or initialize threads with different contexts.
*/
public static interface ForkJoinWorkerThreadFactory {
/**
* Returns a new worker thread operating in the given pool.
*
* @param pool the pool this thread works in
* @throws NullPointerException if the pool is null
*/
public ForkJoinWorkerThread newThread(ForkJoinPool pool);
}
/**
* Default ForkJoinWorkerThreadFactory implementation; creates a
* new ForkJoinWorkerThread.
*/
static class DefaultForkJoinWorkerThreadFactory
implements ForkJoinWorkerThreadFactory {
public ForkJoinWorkerThread newThread(ForkJoinPool pool) {
return new ForkJoinWorkerThread(pool);
}
}
/**
* Creates a new ForkJoinWorkerThread. This factory is used unless
* overridden in ForkJoinPool constructors.
*/
public static final ForkJoinWorkerThreadFactory
defaultForkJoinWorkerThreadFactory;
/**
* Permission required for callers of methods that may start or
* kill threads.
*/
private static final RuntimePermission modifyThreadPermission;
/**
* If there is a security manager, makes sure caller has
* permission to modify threads.
*/
private static void checkPermission() {
SecurityManager security = System.getSecurityManager();
if (security != null)
security.checkPermission(modifyThreadPermission);
}
/**
* Generator for assigning sequence numbers as pool names.
*/
private static final AtomicInteger poolNumberGenerator;
/**
* Generator for initial random seeds for worker victim
* selection. This is used only to create initial seeds. Random
* steals use a cheaper xorshift generator per steal attempt. We
* don't expect much contention on seedGenerator, so just use a
* plain Random.
*/
static final Random workerSeedGenerator;
/**
* Array holding all worker threads in the pool. Initialized upon
* construction. Array size must be a power of two. Updates and
* replacements are protected by scanGuard, but the array is
* always kept in a consistent enough state to be randomly
* accessed without locking by workers performing work-stealing,
* as well as other traversal-based methods in this class, so long
* as reads memory-acquire by first reading ctl. All readers must
* tolerate that some array slots may be null.
*/
ForkJoinWorkerThread[] workers;
/**
* Initial size for submission queue array. Must be a power of
* two. In many applications, these always stay small so we use a
* small initial cap.
*/
private static final int INITIAL_QUEUE_CAPACITY = 8;
/**
* Maximum size for submission queue array. Must be a power of two
* less than or equal to 1 << (31 - width of array entry) to
* ensure lack of index wraparound, but is capped at a lower
* value to help users trap runaway computations.
*/
private static final int MAXIMUM_QUEUE_CAPACITY = 1 << 24; // 16M
/**
* Array serving as submission queue. Initialized upon construction.
*/
private ForkJoinTask<?>[] submissionQueue;
/**
* Lock protecting submissions array for addSubmission
*/
private final ReentrantLock submissionLock;
/**
* Condition for awaitTermination, using submissionLock for
* convenience.
*/
private final Condition termination;
/**
* Creation factory for worker threads.
*/
private final ForkJoinWorkerThreadFactory factory;
/**
* The uncaught exception handler used when any worker abruptly
* terminates.
*/
final Thread.UncaughtExceptionHandler ueh;
/**
* Prefix for assigning names to worker threads
*/
private final String workerNamePrefix;
/**
* Sum of per-thread steal counts, updated only when threads are
* idle or terminating.
*/
private volatile long stealCount;
/**
* Main pool control -- a long packed with:
* AC: Number of active running workers minus target parallelism (16 bits)
* TC: Number of total workers minus target parallelism (16bits)
* ST: true if pool is terminating (1 bit)
* EC: the wait count of top waiting thread (15 bits)
* ID: ~poolIndex of top of Treiber stack of waiting threads (16 bits)
*
* When convenient, we can extract the upper 32 bits of counts and
* the lower 32 bits of queue state, u = (int)(ctl >>> 32) and e =
* (int)ctl. The ec field is never accessed alone, but always
* together with id and st. The offsets of counts by the target
* parallelism and the positionings of fields makes it possible to
* perform the most common checks via sign tests of fields: When
* ac is negative, there are not enough active workers, when tc is
* negative, there are not enough total workers, when id is
* negative, there is at least one waiting worker, and when e is
* negative, the pool is terminating. To deal with these possibly
* negative fields, we use casts in and out of "short" and/or
* signed shifts to maintain signedness.
*/
volatile long ctl;
// bit positions/shifts for fields
private static final int AC_SHIFT = 48;
private static final int TC_SHIFT = 32;
private static final int ST_SHIFT = 31;
private static final int EC_SHIFT = 16;
// bounds
private static final int MAX_ID = 0x7fff; // max poolIndex
private static final int SMASK = 0xffff; // mask short bits
private static final int SHORT_SIGN = 1 << 15;
private static final int INT_SIGN = 1 << 31;
// masks
private static final long STOP_BIT = 0x0001L << ST_SHIFT;
private static final long AC_MASK = ((long)SMASK) << AC_SHIFT;
private static final long TC_MASK = ((long)SMASK) << TC_SHIFT;
// units for incrementing and decrementing
private static final long TC_UNIT = 1L << TC_SHIFT;
private static final long AC_UNIT = 1L << AC_SHIFT;
// masks and units for dealing with u = (int)(ctl >>> 32)
private static final int UAC_SHIFT = AC_SHIFT - 32;
private static final int UTC_SHIFT = TC_SHIFT - 32;
private static final int UAC_MASK = SMASK << UAC_SHIFT;
private static final int UTC_MASK = SMASK << UTC_SHIFT;
private static final int UAC_UNIT = 1 << UAC_SHIFT;
private static final int UTC_UNIT = 1 << UTC_SHIFT;
// masks and units for dealing with e = (int)ctl
private static final int E_MASK = 0x7fffffff; // no STOP_BIT
private static final int EC_UNIT = 1 << EC_SHIFT;
/**
* The target parallelism level.
*/
final int parallelism;
/**
* Index (mod submission queue length) of next element to take
* from submission queue. Usage is identical to that for
* per-worker queues -- see ForkJoinWorkerThread internal
* documentation.
*/
volatile int queueBase;
/**
* Index (mod submission queue length) of next element to add
* in submission queue. Usage is identical to that for
* per-worker queues -- see ForkJoinWorkerThread internal
* documentation.
*/
int queueTop;
/**
* True when shutdown() has been called.
*/
volatile boolean shutdown;
/**
* True if use local fifo, not default lifo, for local polling
* Read by, and replicated by ForkJoinWorkerThreads
*/
final boolean locallyFifo;
/**
* The number of threads in ForkJoinWorkerThreads.helpQuiescePool.
* When non-zero, suppresses automatic shutdown when active
* counts become zero.
*/
volatile int quiescerCount;
/**
* The number of threads blocked in join.
*/
volatile int blockedCount;
/**
* Counter for worker Thread names (unrelated to their poolIndex)
*/
private volatile int nextWorkerNumber;
/**
* The index for the next created worker. Accessed under scanGuard.
*/
private int nextWorkerIndex;
/**
* SeqLock and index masking for updates to workers array. Locked
* when SG_UNIT is set. Unlocking clears bit by adding
* SG_UNIT. Staleness of read-only operations can be checked by
* comparing scanGuard to value before the reads. The low 16 bits
* (i.e, anding with SMASK) hold (the smallest power of two
* covering all worker indices, minus one, and is used to avoid
* dealing with large numbers of null slots when the workers array
* is overallocated.
*/
volatile int scanGuard;
private static final int SG_UNIT = 1 << 16;
/**
* The wakeup interval (in nanoseconds) for a worker waiting for a
* task when the pool is quiescent to instead try to shrink the
* number of workers. The exact value does not matter too
* much. It must be short enough to release resources during
* sustained periods of idleness, but not so short that threads
* are continually re-created.
*/
private static final long SHRINK_RATE =
4L * 1000L * 1000L * 1000L; // 4 seconds
/**
* Top-level loop for worker threads: On each step: if the
* previous step swept through all queues and found no tasks, or
* there are excess threads, then possibly blocks. Otherwise,
* scans for and, if found, executes a task. Returns when pool
* and/or worker terminate.
*
* @param w the worker
*/
final void work(ForkJoinWorkerThread w) {
boolean swept = false; // true on empty scans
long c;
while (!w.terminate && (int)(c = ctl) >= 0) {
int a; // active count
if (!swept && (a = (int)(c >> AC_SHIFT)) <= 0)
swept = scan(w, a);
else if (tryAwaitWork(w, c))
swept = false;
}
}
// Signalling
/**
* Wakes up or creates a worker.
*/
final void signalWork() {
/*
* The while condition is true if: (there is are too few total
* workers OR there is at least one waiter) AND (there are too
* few active workers OR the pool is terminating). The value
* of e distinguishes the remaining cases: zero (no waiters)
* for create, negative if terminating (in which case do
* nothing), else release a waiter. The secondary checks for
* release (non-null array etc) can fail if the pool begins
* terminating after the test, and don't impose any added cost
* because JVMs must perform null and bounds checks anyway.
*/
long c; int e, u;
while ((((e = (int)(c = ctl)) | (u = (int)(c >>> 32))) &
(INT_SIGN|SHORT_SIGN)) == (INT_SIGN|SHORT_SIGN) && e >= 0) {
if (e > 0) { // release a waiting worker
int i; ForkJoinWorkerThread w; ForkJoinWorkerThread[] ws;
if ((ws = workers) == null ||
(i = ~e & SMASK) >= ws.length ||
(w = ws[i]) == null)
break;
long nc = (((long)(w.nextWait & E_MASK)) |
((long)(u + UAC_UNIT) << 32));
if (w.eventCount == e &&
UNSAFE.compareAndSwapLong(this, ctlOffset, c, nc)) {
w.eventCount = (e + EC_UNIT) & E_MASK;
if (w.parked)
UNSAFE.unpark(w);
break;
}
}
else if (UNSAFE.compareAndSwapLong
(this, ctlOffset, c,
(long)(((u + UTC_UNIT) & UTC_MASK) |
((u + UAC_UNIT) & UAC_MASK)) << 32)) {
addWorker();
break;
}
}
}
/**
* Variant of signalWork to help release waiters on rescans.
* Tries once to release a waiter if active count < 0.
*
* @return false if failed due to contention, else true
*/
private boolean tryReleaseWaiter() {
long c; int e, i; ForkJoinWorkerThread w; ForkJoinWorkerThread[] ws;
if ((e = (int)(c = ctl)) > 0 &&
(int)(c >> AC_SHIFT) < 0 &&
(ws = workers) != null &&
(i = ~e & SMASK) < ws.length &&
(w = ws[i]) != null) {
long nc = ((long)(w.nextWait & E_MASK) |
((c + AC_UNIT) & (AC_MASK|TC_MASK)));
if (w.eventCount != e ||
!UNSAFE.compareAndSwapLong(this, ctlOffset, c, nc))
return false;
w.eventCount = (e + EC_UNIT) & E_MASK;
if (w.parked)
UNSAFE.unpark(w);
}
return true;
}
// Scanning for tasks
/**
* Scans for and, if found, executes one task. Scans start at a
* random index of workers array, and randomly select the first
* (2*#workers)-1 probes, and then, if all empty, resort to 2
* circular sweeps, which is necessary to check quiescence. and
* taking a submission only if no stealable tasks were found. The
* steal code inside the loop is a specialized form of
* ForkJoinWorkerThread.deqTask, followed bookkeeping to support
* helpJoinTask and signal propagation. The code for submission
* queues is almost identical. On each steal, the worker completes
* not only the task, but also all local tasks that this task may
* have generated. On detecting staleness or contention when
* trying to take a task, this method returns without finishing
* sweep, which allows global state rechecks before retry.
*
* @param w the worker
* @param a the number of active workers
* @return true if swept all queues without finding a task
*/
private boolean scan(ForkJoinWorkerThread w, int a) {
int g = scanGuard; // mask 0 avoids useless scans if only one active
int m = (parallelism == 1 - a && blockedCount == 0) ? 0 : g & SMASK;
ForkJoinWorkerThread[] ws = workers;
if (ws == null || ws.length <= m) // staleness check
return false;
for (int r = w.seed, k = r, j = -(m + m); j <= m + m; ++j) {
ForkJoinTask<?> t; ForkJoinTask<?>[] q; int b, i;
ForkJoinWorkerThread v = ws[k & m];
if (v != null && (b = v.queueBase) != v.queueTop &&
(q = v.queue) != null && (i = (q.length - 1) & b) >= 0) {
long u = (i << ASHIFT) + ABASE;
if ((t = q[i]) != null && v.queueBase == b &&
UNSAFE.compareAndSwapObject(q, u, t, null)) {
int d = (v.queueBase = b + 1) - v.queueTop;
v.stealHint = w.poolIndex;
if (d != 0)
signalWork(); // propagate if nonempty
w.execTask(t);
}
r ^= r << 13; r ^= r >>> 17; w.seed = r ^ (r << 5);
return false; // store next seed
}
else if (j < 0) { // xorshift
r ^= r << 13; r ^= r >>> 17; k = r ^= r << 5;
}
else
++k;
}
if (scanGuard != g) // staleness check
return false;
else { // try to take submission
ForkJoinTask<?> t; ForkJoinTask<?>[] q; int b, i;
if ((b = queueBase) != queueTop &&
(q = submissionQueue) != null &&
(i = (q.length - 1) & b) >= 0) {
long u = (i << ASHIFT) + ABASE;
if ((t = q[i]) != null && queueBase == b &&
UNSAFE.compareAndSwapObject(q, u, t, null)) {
queueBase = b + 1;
w.execTask(t);
}
return false;
}
return true; // all queues empty
}
}
/**
* Tries to enqueue worker w in wait queue and await change in
* worker's eventCount. If the pool is quiescent and there is
* more than one worker, possibly terminates worker upon exit.
* Otherwise, before blocking, rescans queues to avoid missed
* signals. Upon finding work, releases at least one worker
* (which may be the current worker). Rescans restart upon
* detected staleness or failure to release due to
* contention. Note the unusual conventions about Thread.interrupt
* here and elsewhere: Because interrupts are used solely to alert
* threads to check termination, which is checked here anyway, we
* clear status (using Thread.interrupted) before any call to
* park, so that park does not immediately return due to status
* being set via some other unrelated call to interrupt in user
* code.
*
* @param w the calling worker
* @param c the ctl value on entry
* @return true if waited or another thread was released upon enq
*/
private boolean tryAwaitWork(ForkJoinWorkerThread w, long c) {
int v = w.eventCount;
w.nextWait = (int)c; // w's successor record
long nc = (long)(v & E_MASK) | ((c - AC_UNIT) & (AC_MASK|TC_MASK));
if (ctl != c || !UNSAFE.compareAndSwapLong(this, ctlOffset, c, nc)) {
long d = ctl; // return true if lost to a deq, to force scan
return (int)d != (int)c && ((d - c) & AC_MASK) >= 0L;
}
for (int sc = w.stealCount; sc != 0;) { // accumulate stealCount
long s = stealCount;
if (UNSAFE.compareAndSwapLong(this, stealCountOffset, s, s + sc))
sc = w.stealCount = 0;
else if (w.eventCount != v)
return true; // update next time
}
if ((!shutdown || !tryTerminate(false)) &&
(int)c != 0 && parallelism + (int)(nc >> AC_SHIFT) == 0 &&
blockedCount == 0 && quiescerCount == 0)
idleAwaitWork(w, nc, c, v); // quiescent
for (boolean rescanned = false;;) {
if (w.eventCount != v)
return true;
if (!rescanned) {
int g = scanGuard, m = g & SMASK;
ForkJoinWorkerThread[] ws = workers;
if (ws != null && m < ws.length) {
rescanned = true;
for (int i = 0; i <= m; ++i) {
ForkJoinWorkerThread u = ws[i];
if (u != null) {
if (u.queueBase != u.queueTop &&
!tryReleaseWaiter())
rescanned = false; // contended
if (w.eventCount != v)
return true;
}
}
}
if (scanGuard != g || // stale
(queueBase != queueTop && !tryReleaseWaiter()))
rescanned = false;
if (!rescanned)
Thread.yield(); // reduce contention
else
Thread.interrupted(); // clear before park
}
else {
w.parked = true; // must recheck
if (w.eventCount != v) {
w.parked = false;
return true;
}
LockSupport.park(this);
rescanned = w.parked = false;
}
}
}
/**
* If inactivating worker w has caused pool to become
* quiescent, check for pool termination, and wait for event
* for up to SHRINK_RATE nanosecs (rescans are unnecessary in
* this case because quiescence reflects consensus about lack
* of work). On timeout, if ctl has not changed, terminate the
* worker. Upon its termination (see deregisterWorker), it may
* wake up another worker to possibly repeat this process.
*
* @param w the calling worker
* @param currentCtl the ctl value after enqueuing w
* @param prevCtl the ctl value if w terminated
* @param v the eventCount w awaits change
*/
private void idleAwaitWork(ForkJoinWorkerThread w, long currentCtl,
long prevCtl, int v) {
if (w.eventCount == v) {
if (shutdown)
tryTerminate(false);
ForkJoinTask.helpExpungeStaleExceptions(); // help clean weak refs
while (ctl == currentCtl) {
long startTime = System.nanoTime();
w.parked = true;
if (w.eventCount == v) // must recheck
LockSupport.parkNanos(this, SHRINK_RATE);
w.parked = false;
if (w.eventCount != v)
break;
else if (System.nanoTime() - startTime <
SHRINK_RATE - (SHRINK_RATE / 10)) // timing slop
Thread.interrupted(); // spurious wakeup
else if (UNSAFE.compareAndSwapLong(this, ctlOffset,
currentCtl, prevCtl)) {
w.terminate = true; // restore previous
w.eventCount = ((int)currentCtl + EC_UNIT) & E_MASK;
break;
}
}
}
}
// Submissions
/**
* Enqueues the given task in the submissionQueue. Same idea as
* ForkJoinWorkerThread.pushTask except for use of submissionLock.
*
* @param t the task
*/
private void addSubmission(ForkJoinTask<?> t) {
final ReentrantLock lock = this.submissionLock;
lock.lock();
try {
ForkJoinTask<?>[] q; int s, m;
if ((q = submissionQueue) != null) { // ignore if queue removed
long u = (((s = queueTop) & (m = q.length-1)) << ASHIFT)+ABASE;
UNSAFE.putOrderedObject(q, u, t);
queueTop = s + 1;
if (s - queueBase == m)
growSubmissionQueue();
}
} finally {
lock.unlock();
}
signalWork();
}
// (pollSubmission is defined below with exported methods)
/**
* Creates or doubles submissionQueue array.
* Basically identical to ForkJoinWorkerThread version.
*/
private void growSubmissionQueue() {
ForkJoinTask<?>[] oldQ = submissionQueue;
int size = oldQ != null ? oldQ.length << 1 : INITIAL_QUEUE_CAPACITY;
if (size > MAXIMUM_QUEUE_CAPACITY)
throw new RejectedExecutionException("Queue capacity exceeded");
if (size < INITIAL_QUEUE_CAPACITY)
size = INITIAL_QUEUE_CAPACITY;
ForkJoinTask<?>[] q = submissionQueue = new ForkJoinTask<?>[size];
int mask = size - 1;
int top = queueTop;
int oldMask;
if (oldQ != null && (oldMask = oldQ.length - 1) >= 0) {
for (int b = queueBase; b != top; ++b) {
long u = ((b & oldMask) << ASHIFT) + ABASE;
Object x = UNSAFE.getObjectVolatile(oldQ, u);
if (x != null && UNSAFE.compareAndSwapObject(oldQ, u, x, null))
UNSAFE.putObjectVolatile
(q, ((b & mask) << ASHIFT) + ABASE, x);
}
}
}
// Blocking support
/**
* Tries to increment blockedCount, decrement active count
* (sometimes implicitly) and possibly release or create a
* compensating worker in preparation for blocking. Fails
* on contention or termination.
*
* @return true if the caller can block, else should recheck and retry
*/
private boolean tryPreBlock() {
int b = blockedCount;
if (UNSAFE.compareAndSwapInt(this, blockedCountOffset, b, b + 1)) {
int pc = parallelism;
do {
ForkJoinWorkerThread[] ws; ForkJoinWorkerThread w;
int e, ac, tc, rc, i;
long c = ctl;
int u = (int)(c >>> 32);
if ((e = (int)c) < 0) {
// skip -- terminating
}
else if ((ac = (u >> UAC_SHIFT)) <= 0 && e != 0 &&
(ws = workers) != null &&
(i = ~e & SMASK) < ws.length &&
(w = ws[i]) != null) {
long nc = ((long)(w.nextWait & E_MASK) |
(c & (AC_MASK|TC_MASK)));
if (w.eventCount == e &&
UNSAFE.compareAndSwapLong(this, ctlOffset, c, nc)) {
w.eventCount = (e + EC_UNIT) & E_MASK;
if (w.parked)
UNSAFE.unpark(w);
return true; // release an idle worker
}
}