Source file
src/runtime/proc.go
1
2
3
4
5 package runtime
6
7 import (
8 "internal/abi"
9 "internal/cpu"
10 "internal/goarch"
11 "internal/goos"
12 "internal/runtime/atomic"
13 "internal/runtime/exithook"
14 "internal/runtime/strconv"
15 "internal/runtime/sys"
16 "internal/stringslite"
17 "unsafe"
18 )
19
20
21 var modinfo string
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117 var (
118 m0 m
119 g0 g
120 mcache0 *mcache
121 raceprocctx0 uintptr
122 raceFiniLock mutex
123 )
124
125
126
127 var runtime_inittasks []*initTask
128
129
130
131
132
133 var main_init_done chan bool
134
135
136 func main_main()
137
138
139 var mainStarted bool
140
141
142 var runtimeInitTime int64
143
144
145 var initSigmask sigset
146
147
148 func main() {
149 mp := getg().m
150
151
152
153 mp.g0.racectx = 0
154
155
156
157
158 if goarch.PtrSize == 8 {
159 maxstacksize = 1000000000
160 } else {
161 maxstacksize = 250000000
162 }
163
164
165
166
167 maxstackceiling = 2 * maxstacksize
168
169
170 mainStarted = true
171
172 if haveSysmon {
173 systemstack(func() {
174 newm(sysmon, nil, -1)
175 })
176 }
177
178
179
180
181
182
183
184 lockOSThread()
185
186 if mp != &m0 {
187 throw("runtime.main not on m0")
188 }
189
190
191
192 runtimeInitTime = nanotime()
193 if runtimeInitTime == 0 {
194 throw("nanotime returning zero")
195 }
196
197 if debug.inittrace != 0 {
198 inittrace.id = getg().goid
199 inittrace.active = true
200 }
201
202 doInit(runtime_inittasks)
203
204
205 needUnlock := true
206 defer func() {
207 if needUnlock {
208 unlockOSThread()
209 }
210 }()
211
212 gcenable()
213 defaultGOMAXPROCSUpdateEnable()
214
215 main_init_done = make(chan bool)
216 if iscgo {
217 if _cgo_pthread_key_created == nil {
218 throw("_cgo_pthread_key_created missing")
219 }
220
221 if _cgo_thread_start == nil {
222 throw("_cgo_thread_start missing")
223 }
224 if GOOS != "windows" {
225 if _cgo_setenv == nil {
226 throw("_cgo_setenv missing")
227 }
228 if _cgo_unsetenv == nil {
229 throw("_cgo_unsetenv missing")
230 }
231 }
232 if _cgo_notify_runtime_init_done == nil {
233 throw("_cgo_notify_runtime_init_done missing")
234 }
235
236
237 if set_crosscall2 == nil {
238 throw("set_crosscall2 missing")
239 }
240 set_crosscall2()
241
242
243
244 startTemplateThread()
245 cgocall(_cgo_notify_runtime_init_done, nil)
246 }
247
248
249
250
251
252
253
254
255 for m := &firstmoduledata; m != nil; m = m.next {
256 doInit(m.inittasks)
257 }
258
259
260
261 inittrace.active = false
262
263 close(main_init_done)
264
265 needUnlock = false
266 unlockOSThread()
267
268 if isarchive || islibrary {
269
270
271 if GOARCH == "wasm" {
272
273
274
275
276
277
278
279 pause(sys.GetCallerSP() - 16)
280 panic("unreachable")
281 }
282 return
283 }
284 fn := main_main
285 fn()
286
287 exitHooksRun := false
288 if raceenabled {
289 runExitHooks(0)
290 exitHooksRun = true
291 racefini()
292 }
293
294
295
296
297
298
299
300
301 if asanenabled && (isarchive || islibrary || NumCgoCall() > 1) {
302 runExitHooks(0)
303 exitHooksRun = true
304 lsandoleakcheck()
305 }
306
307
308
309
310
311 if runningPanicDefers.Load() != 0 {
312
313 for c := 0; c < 1000; c++ {
314 if runningPanicDefers.Load() == 0 {
315 break
316 }
317 Gosched()
318 }
319 }
320 if panicking.Load() != 0 {
321 gopark(nil, nil, waitReasonPanicWait, traceBlockForever, 1)
322 }
323 if !exitHooksRun {
324 runExitHooks(0)
325 }
326
327 exit(0)
328 for {
329 var x *int32
330 *x = 0
331 }
332 }
333
334
335
336
337 func os_beforeExit(exitCode int) {
338 runExitHooks(exitCode)
339 if exitCode == 0 && raceenabled {
340 racefini()
341 }
342
343
344 if exitCode == 0 && asanenabled && (isarchive || islibrary || NumCgoCall() > 1) {
345 lsandoleakcheck()
346 }
347 }
348
349 func init() {
350 exithook.Gosched = Gosched
351 exithook.Goid = func() uint64 { return getg().goid }
352 exithook.Throw = throw
353 }
354
355 func runExitHooks(code int) {
356 exithook.Run(code)
357 }
358
359
360 func init() {
361 go forcegchelper()
362 }
363
364 func forcegchelper() {
365 forcegc.g = getg()
366 lockInit(&forcegc.lock, lockRankForcegc)
367 for {
368 lock(&forcegc.lock)
369 if forcegc.idle.Load() {
370 throw("forcegc: phase error")
371 }
372 forcegc.idle.Store(true)
373 goparkunlock(&forcegc.lock, waitReasonForceGCIdle, traceBlockSystemGoroutine, 1)
374
375 if debug.gctrace > 0 {
376 println("GC forced")
377 }
378
379 gcStart(gcTrigger{kind: gcTriggerTime, now: nanotime()})
380 }
381 }
382
383
384
385
386
387 func Gosched() {
388 checkTimeouts()
389 mcall(gosched_m)
390 }
391
392
393
394
395
396 func goschedguarded() {
397 mcall(goschedguarded_m)
398 }
399
400
401
402
403
404
405 func goschedIfBusy() {
406 gp := getg()
407
408
409 if !gp.preempt && sched.npidle.Load() > 0 {
410 return
411 }
412 mcall(gosched_m)
413 }
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443 func gopark(unlockf func(*g, unsafe.Pointer) bool, lock unsafe.Pointer, reason waitReason, traceReason traceBlockReason, traceskip int) {
444 if reason != waitReasonSleep {
445 checkTimeouts()
446 }
447 mp := acquirem()
448 gp := mp.curg
449 status := readgstatus(gp)
450 if status != _Grunning && status != _Gscanrunning {
451 throw("gopark: bad g status")
452 }
453 mp.waitlock = lock
454 mp.waitunlockf = unlockf
455 gp.waitreason = reason
456 mp.waitTraceBlockReason = traceReason
457 mp.waitTraceSkip = traceskip
458 releasem(mp)
459
460 mcall(park_m)
461 }
462
463
464
465 func goparkunlock(lock *mutex, reason waitReason, traceReason traceBlockReason, traceskip int) {
466 gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceReason, traceskip)
467 }
468
469
470
471
472
473
474
475
476
477
478
479 func goready(gp *g, traceskip int) {
480 systemstack(func() {
481 ready(gp, traceskip, true)
482 })
483 }
484
485
486 func acquireSudog() *sudog {
487
488
489
490
491
492
493
494
495 mp := acquirem()
496 pp := mp.p.ptr()
497 if len(pp.sudogcache) == 0 {
498 lock(&sched.sudoglock)
499
500 for len(pp.sudogcache) < cap(pp.sudogcache)/2 && sched.sudogcache != nil {
501 s := sched.sudogcache
502 sched.sudogcache = s.next
503 s.next = nil
504 pp.sudogcache = append(pp.sudogcache, s)
505 }
506 unlock(&sched.sudoglock)
507
508 if len(pp.sudogcache) == 0 {
509 pp.sudogcache = append(pp.sudogcache, new(sudog))
510 }
511 }
512 n := len(pp.sudogcache)
513 s := pp.sudogcache[n-1]
514 pp.sudogcache[n-1] = nil
515 pp.sudogcache = pp.sudogcache[:n-1]
516 if s.elem != nil {
517 throw("acquireSudog: found s.elem != nil in cache")
518 }
519 releasem(mp)
520 return s
521 }
522
523
524 func releaseSudog(s *sudog) {
525 if s.elem != nil {
526 throw("runtime: sudog with non-nil elem")
527 }
528 if s.isSelect {
529 throw("runtime: sudog with non-false isSelect")
530 }
531 if s.next != nil {
532 throw("runtime: sudog with non-nil next")
533 }
534 if s.prev != nil {
535 throw("runtime: sudog with non-nil prev")
536 }
537 if s.waitlink != nil {
538 throw("runtime: sudog with non-nil waitlink")
539 }
540 if s.c != nil {
541 throw("runtime: sudog with non-nil c")
542 }
543 gp := getg()
544 if gp.param != nil {
545 throw("runtime: releaseSudog with non-nil gp.param")
546 }
547 mp := acquirem()
548 pp := mp.p.ptr()
549 if len(pp.sudogcache) == cap(pp.sudogcache) {
550
551 var first, last *sudog
552 for len(pp.sudogcache) > cap(pp.sudogcache)/2 {
553 n := len(pp.sudogcache)
554 p := pp.sudogcache[n-1]
555 pp.sudogcache[n-1] = nil
556 pp.sudogcache = pp.sudogcache[:n-1]
557 if first == nil {
558 first = p
559 } else {
560 last.next = p
561 }
562 last = p
563 }
564 lock(&sched.sudoglock)
565 last.next = sched.sudogcache
566 sched.sudogcache = first
567 unlock(&sched.sudoglock)
568 }
569 pp.sudogcache = append(pp.sudogcache, s)
570 releasem(mp)
571 }
572
573
574 func badmcall(fn func(*g)) {
575 throw("runtime: mcall called on m->g0 stack")
576 }
577
578 func badmcall2(fn func(*g)) {
579 throw("runtime: mcall function returned")
580 }
581
582 func badreflectcall() {
583 panic(plainError("arg size to reflect.call more than 1GB"))
584 }
585
586
587
588 func badmorestackg0() {
589 if !crashStackImplemented {
590 writeErrStr("fatal: morestack on g0\n")
591 return
592 }
593
594 g := getg()
595 switchToCrashStack(func() {
596 print("runtime: morestack on g0, stack [", hex(g.stack.lo), " ", hex(g.stack.hi), "], sp=", hex(g.sched.sp), ", called from\n")
597 g.m.traceback = 2
598 traceback1(g.sched.pc, g.sched.sp, g.sched.lr, g, 0)
599 print("\n")
600
601 throw("morestack on g0")
602 })
603 }
604
605
606
607 func badmorestackgsignal() {
608 writeErrStr("fatal: morestack on gsignal\n")
609 }
610
611
612 func badctxt() {
613 throw("ctxt != 0")
614 }
615
616
617
618 var gcrash g
619
620 var crashingG atomic.Pointer[g]
621
622
623
624
625
626
627
628
629
630 func switchToCrashStack(fn func()) {
631 me := getg()
632 if crashingG.CompareAndSwapNoWB(nil, me) {
633 switchToCrashStack0(fn)
634 abort()
635 }
636 if crashingG.Load() == me {
637
638 writeErrStr("fatal: recursive switchToCrashStack\n")
639 abort()
640 }
641
642 usleep_no_g(100)
643 writeErrStr("fatal: concurrent switchToCrashStack\n")
644 abort()
645 }
646
647
648
649
650 const crashStackImplemented = GOOS != "windows"
651
652
653 func switchToCrashStack0(fn func())
654
655 func lockedOSThread() bool {
656 gp := getg()
657 return gp.lockedm != 0 && gp.m.lockedg != 0
658 }
659
660 var (
661
662
663
664
665
666
667 allglock mutex
668 allgs []*g
669
670
671
672
673
674
675
676
677
678
679
680
681
682 allglen uintptr
683 allgptr **g
684 )
685
686 func allgadd(gp *g) {
687 if readgstatus(gp) == _Gidle {
688 throw("allgadd: bad status Gidle")
689 }
690
691 lock(&allglock)
692 allgs = append(allgs, gp)
693 if &allgs[0] != allgptr {
694 atomicstorep(unsafe.Pointer(&allgptr), unsafe.Pointer(&allgs[0]))
695 }
696 atomic.Storeuintptr(&allglen, uintptr(len(allgs)))
697 unlock(&allglock)
698 }
699
700
701
702
703 func allGsSnapshot() []*g {
704 assertWorldStoppedOrLockHeld(&allglock)
705
706
707
708
709
710
711 return allgs[:len(allgs):len(allgs)]
712 }
713
714
715 func atomicAllG() (**g, uintptr) {
716 length := atomic.Loaduintptr(&allglen)
717 ptr := (**g)(atomic.Loadp(unsafe.Pointer(&allgptr)))
718 return ptr, length
719 }
720
721
722 func atomicAllGIndex(ptr **g, i uintptr) *g {
723 return *(**g)(add(unsafe.Pointer(ptr), i*goarch.PtrSize))
724 }
725
726
727
728
729 func forEachG(fn func(gp *g)) {
730 lock(&allglock)
731 for _, gp := range allgs {
732 fn(gp)
733 }
734 unlock(&allglock)
735 }
736
737
738
739
740
741 func forEachGRace(fn func(gp *g)) {
742 ptr, length := atomicAllG()
743 for i := uintptr(0); i < length; i++ {
744 gp := atomicAllGIndex(ptr, i)
745 fn(gp)
746 }
747 return
748 }
749
750 const (
751
752
753 _GoidCacheBatch = 16
754 )
755
756
757
758 func cpuinit(env string) {
759 switch GOOS {
760 case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
761 cpu.DebugOptions = true
762 }
763 cpu.Initialize(env)
764
765
766
767 switch GOARCH {
768 case "386", "amd64":
769 x86HasPOPCNT = cpu.X86.HasPOPCNT
770 x86HasSSE41 = cpu.X86.HasSSE41
771 x86HasFMA = cpu.X86.HasFMA
772
773 case "arm":
774 armHasVFPv4 = cpu.ARM.HasVFPv4
775
776 case "arm64":
777 arm64HasATOMICS = cpu.ARM64.HasATOMICS
778
779 case "loong64":
780 loong64HasLAMCAS = cpu.Loong64.HasLAMCAS
781 loong64HasLAM_BH = cpu.Loong64.HasLAM_BH
782 loong64HasLSX = cpu.Loong64.HasLSX
783
784 case "riscv64":
785 riscv64HasZbb = cpu.RISCV64.HasZbb
786 }
787 }
788
789
790
791
792 func getGodebugEarly() string {
793 const prefix = "GODEBUG="
794 var env string
795 switch GOOS {
796 case "aix", "darwin", "ios", "dragonfly", "freebsd", "netbsd", "openbsd", "illumos", "solaris", "linux":
797
798
799
800 n := int32(0)
801 for argv_index(argv, argc+1+n) != nil {
802 n++
803 }
804
805 for i := int32(0); i < n; i++ {
806 p := argv_index(argv, argc+1+i)
807 s := unsafe.String(p, findnull(p))
808
809 if stringslite.HasPrefix(s, prefix) {
810 env = gostring(p)[len(prefix):]
811 break
812 }
813 }
814 }
815 return env
816 }
817
818
819
820
821
822
823
824
825
826 func schedinit() {
827 lockInit(&sched.lock, lockRankSched)
828 lockInit(&sched.sysmonlock, lockRankSysmon)
829 lockInit(&sched.deferlock, lockRankDefer)
830 lockInit(&sched.sudoglock, lockRankSudog)
831 lockInit(&deadlock, lockRankDeadlock)
832 lockInit(&paniclk, lockRankPanic)
833 lockInit(&allglock, lockRankAllg)
834 lockInit(&allpLock, lockRankAllp)
835 lockInit(&reflectOffs.lock, lockRankReflectOffs)
836 lockInit(&finlock, lockRankFin)
837 lockInit(&cpuprof.lock, lockRankCpuprof)
838 lockInit(&computeMaxProcsLock, lockRankComputeMaxProcs)
839 allocmLock.init(lockRankAllocmR, lockRankAllocmRInternal, lockRankAllocmW)
840 execLock.init(lockRankExecR, lockRankExecRInternal, lockRankExecW)
841 traceLockInit()
842
843
844
845 lockInit(&memstats.heapStats.noPLock, lockRankLeafRank)
846
847 lockVerifyMSize()
848
849
850
851 gp := getg()
852 if raceenabled {
853 gp.racectx, raceprocctx0 = raceinit()
854 }
855
856 sched.maxmcount = 10000
857 crashFD.Store(^uintptr(0))
858
859
860 worldStopped()
861
862 ticks.init()
863 moduledataverify()
864 stackinit()
865 randinit()
866 mallocinit()
867 godebug := getGodebugEarly()
868 cpuinit(godebug)
869 alginit()
870 mcommoninit(gp.m, -1)
871 modulesinit()
872 typelinksinit()
873 itabsinit()
874 stkobjinit()
875
876 sigsave(&gp.m.sigmask)
877 initSigmask = gp.m.sigmask
878
879 goargs()
880 goenvs()
881 secure()
882 checkfds()
883 parsedebugvars()
884 gcinit()
885
886
887
888 gcrash.stack = stackalloc(16384)
889 gcrash.stackguard0 = gcrash.stack.lo + 1000
890 gcrash.stackguard1 = gcrash.stack.lo + 1000
891
892
893
894
895
896 if disableMemoryProfiling {
897 MemProfileRate = 0
898 }
899
900
901 mProfStackInit(gp.m)
902 defaultGOMAXPROCSInit()
903
904 lock(&sched.lock)
905 sched.lastpoll.Store(nanotime())
906 var procs int32
907 if n, ok := strconv.Atoi32(gogetenv("GOMAXPROCS")); ok && n > 0 {
908 procs = n
909 sched.customGOMAXPROCS = true
910 } else {
911
912
913
914
915
916
917
918
919 procs = defaultGOMAXPROCS(numCPUStartup)
920 }
921 if procresize(procs) != nil {
922 throw("unknown runnable goroutine during bootstrap")
923 }
924 unlock(&sched.lock)
925
926
927 worldStarted()
928
929 if buildVersion == "" {
930
931
932 buildVersion = "unknown"
933 }
934 if len(modinfo) == 1 {
935
936
937 modinfo = ""
938 }
939 }
940
941 func dumpgstatus(gp *g) {
942 thisg := getg()
943 print("runtime: gp: gp=", gp, ", goid=", gp.goid, ", gp->atomicstatus=", readgstatus(gp), "\n")
944 print("runtime: getg: g=", thisg, ", goid=", thisg.goid, ", g->atomicstatus=", readgstatus(thisg), "\n")
945 }
946
947
948 func checkmcount() {
949 assertLockHeld(&sched.lock)
950
951
952
953
954
955
956
957
958
959 count := mcount() - int32(extraMInUse.Load()) - int32(extraMLength.Load())
960 if count > sched.maxmcount {
961 print("runtime: program exceeds ", sched.maxmcount, "-thread limit\n")
962 throw("thread exhaustion")
963 }
964 }
965
966
967
968
969
970 func mReserveID() int64 {
971 assertLockHeld(&sched.lock)
972
973 if sched.mnext+1 < sched.mnext {
974 throw("runtime: thread ID overflow")
975 }
976 id := sched.mnext
977 sched.mnext++
978 checkmcount()
979 return id
980 }
981
982
983 func mcommoninit(mp *m, id int64) {
984 gp := getg()
985
986
987 if gp != gp.m.g0 {
988 callers(1, mp.createstack[:])
989 }
990
991 lock(&sched.lock)
992
993 if id >= 0 {
994 mp.id = id
995 } else {
996 mp.id = mReserveID()
997 }
998
999 mrandinit(mp)
1000
1001 mpreinit(mp)
1002 if mp.gsignal != nil {
1003 mp.gsignal.stackguard1 = mp.gsignal.stack.lo + stackGuard
1004 }
1005
1006
1007
1008 mp.alllink = allm
1009
1010
1011
1012 atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp))
1013 unlock(&sched.lock)
1014
1015
1016 if iscgo || GOOS == "solaris" || GOOS == "illumos" || GOOS == "windows" {
1017 mp.cgoCallers = new(cgoCallers)
1018 }
1019 mProfStackInit(mp)
1020 }
1021
1022
1023
1024
1025
1026 func mProfStackInit(mp *m) {
1027 if debug.profstackdepth == 0 {
1028
1029
1030 return
1031 }
1032 mp.profStack = makeProfStackFP()
1033 mp.mLockProfile.stack = makeProfStackFP()
1034 }
1035
1036
1037
1038
1039 func makeProfStackFP() []uintptr {
1040
1041
1042
1043
1044
1045
1046 return make([]uintptr, 1+maxSkip+debug.profstackdepth)
1047 }
1048
1049
1050
1051 func makeProfStack() []uintptr { return make([]uintptr, debug.profstackdepth) }
1052
1053
1054 func pprof_makeProfStack() []uintptr { return makeProfStack() }
1055
1056 func (mp *m) becomeSpinning() {
1057 mp.spinning = true
1058 sched.nmspinning.Add(1)
1059 sched.needspinning.Store(0)
1060 }
1061
1062
1063
1064
1065
1066
1067
1068
1069 func (mp *m) snapshotAllp() []*p {
1070 mp.allpSnapshot = allp
1071 return mp.allpSnapshot
1072 }
1073
1074
1075
1076
1077
1078
1079
1080 func (mp *m) clearAllpSnapshot() {
1081 mp.allpSnapshot = nil
1082 }
1083
1084 func (mp *m) hasCgoOnStack() bool {
1085 return mp.ncgo > 0 || mp.isextra
1086 }
1087
1088 const (
1089
1090
1091 osHasLowResTimer = GOOS == "windows" || GOOS == "openbsd" || GOOS == "netbsd"
1092
1093
1094
1095 osHasLowResClockInt = goos.IsWindows
1096
1097
1098
1099 osHasLowResClock = osHasLowResClockInt > 0
1100 )
1101
1102
1103 func ready(gp *g, traceskip int, next bool) {
1104 status := readgstatus(gp)
1105
1106
1107 mp := acquirem()
1108 if status&^_Gscan != _Gwaiting {
1109 dumpgstatus(gp)
1110 throw("bad g->status in ready")
1111 }
1112
1113
1114 trace := traceAcquire()
1115 casgstatus(gp, _Gwaiting, _Grunnable)
1116 if trace.ok() {
1117 trace.GoUnpark(gp, traceskip)
1118 traceRelease(trace)
1119 }
1120 runqput(mp.p.ptr(), gp, next)
1121 wakep()
1122 releasem(mp)
1123 }
1124
1125
1126
1127 const freezeStopWait = 0x7fffffff
1128
1129
1130
1131 var freezing atomic.Bool
1132
1133
1134
1135
1136 func freezetheworld() {
1137 freezing.Store(true)
1138 if debug.dontfreezetheworld > 0 {
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163 usleep(1000)
1164 return
1165 }
1166
1167
1168
1169
1170 for i := 0; i < 5; i++ {
1171
1172 sched.stopwait = freezeStopWait
1173 sched.gcwaiting.Store(true)
1174
1175 if !preemptall() {
1176 break
1177 }
1178 usleep(1000)
1179 }
1180
1181 usleep(1000)
1182 preemptall()
1183 usleep(1000)
1184 }
1185
1186
1187
1188
1189
1190 func readgstatus(gp *g) uint32 {
1191 return gp.atomicstatus.Load()
1192 }
1193
1194
1195
1196
1197
1198 func casfrom_Gscanstatus(gp *g, oldval, newval uint32) {
1199 success := false
1200
1201
1202 switch oldval {
1203 default:
1204 print("runtime: casfrom_Gscanstatus bad oldval gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
1205 dumpgstatus(gp)
1206 throw("casfrom_Gscanstatus:top gp->status is not in scan state")
1207 case _Gscanrunnable,
1208 _Gscanwaiting,
1209 _Gscanrunning,
1210 _Gscansyscall,
1211 _Gscanpreempted:
1212 if newval == oldval&^_Gscan {
1213 success = gp.atomicstatus.CompareAndSwap(oldval, newval)
1214 }
1215 }
1216 if !success {
1217 print("runtime: casfrom_Gscanstatus failed gp=", gp, ", oldval=", hex(oldval), ", newval=", hex(newval), "\n")
1218 dumpgstatus(gp)
1219 throw("casfrom_Gscanstatus: gp->status is not in scan state")
1220 }
1221 releaseLockRankAndM(lockRankGscan)
1222 }
1223
1224
1225
1226 func castogscanstatus(gp *g, oldval, newval uint32) bool {
1227 switch oldval {
1228 case _Grunnable,
1229 _Grunning,
1230 _Gwaiting,
1231 _Gsyscall:
1232 if newval == oldval|_Gscan {
1233 r := gp.atomicstatus.CompareAndSwap(oldval, newval)
1234 if r {
1235 acquireLockRankAndM(lockRankGscan)
1236 }
1237 return r
1238
1239 }
1240 }
1241 print("runtime: castogscanstatus oldval=", hex(oldval), " newval=", hex(newval), "\n")
1242 throw("castogscanstatus")
1243 panic("not reached")
1244 }
1245
1246
1247
1248 var casgstatusAlwaysTrack = false
1249
1250
1251
1252
1253
1254
1255
1256 func casgstatus(gp *g, oldval, newval uint32) {
1257 if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval {
1258 systemstack(func() {
1259
1260
1261 print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n")
1262 throw("casgstatus: bad incoming values")
1263 })
1264 }
1265
1266 lockWithRankMayAcquire(nil, lockRankGscan)
1267
1268
1269 const yieldDelay = 5 * 1000
1270 var nextYield int64
1271
1272
1273
1274 for i := 0; !gp.atomicstatus.CompareAndSwap(oldval, newval); i++ {
1275 if oldval == _Gwaiting && gp.atomicstatus.Load() == _Grunnable {
1276 systemstack(func() {
1277
1278
1279 throw("casgstatus: waiting for Gwaiting but is Grunnable")
1280 })
1281 }
1282 if i == 0 {
1283 nextYield = nanotime() + yieldDelay
1284 }
1285 if nanotime() < nextYield {
1286 for x := 0; x < 10 && gp.atomicstatus.Load() != oldval; x++ {
1287 procyield(1)
1288 }
1289 } else {
1290 osyield()
1291 nextYield = nanotime() + yieldDelay/2
1292 }
1293 }
1294
1295 if gp.bubble != nil {
1296 systemstack(func() {
1297 gp.bubble.changegstatus(gp, oldval, newval)
1298 })
1299 }
1300
1301 if oldval == _Grunning {
1302
1303 if casgstatusAlwaysTrack || gp.trackingSeq%gTrackingPeriod == 0 {
1304 gp.tracking = true
1305 }
1306 gp.trackingSeq++
1307 }
1308 if !gp.tracking {
1309 return
1310 }
1311
1312
1313
1314
1315
1316
1317 switch oldval {
1318 case _Grunnable:
1319
1320
1321
1322 now := nanotime()
1323 gp.runnableTime += now - gp.trackingStamp
1324 gp.trackingStamp = 0
1325 case _Gwaiting:
1326 if !gp.waitreason.isMutexWait() {
1327
1328 break
1329 }
1330
1331
1332
1333
1334
1335 now := nanotime()
1336 sched.totalMutexWaitTime.Add((now - gp.trackingStamp) * gTrackingPeriod)
1337 gp.trackingStamp = 0
1338 }
1339 switch newval {
1340 case _Gwaiting:
1341 if !gp.waitreason.isMutexWait() {
1342
1343 break
1344 }
1345
1346 now := nanotime()
1347 gp.trackingStamp = now
1348 case _Grunnable:
1349
1350
1351 now := nanotime()
1352 gp.trackingStamp = now
1353 case _Grunning:
1354
1355
1356
1357 gp.tracking = false
1358 sched.timeToRun.record(gp.runnableTime)
1359 gp.runnableTime = 0
1360 }
1361 }
1362
1363
1364
1365
1366 func casGToWaiting(gp *g, old uint32, reason waitReason) {
1367
1368 gp.waitreason = reason
1369 casgstatus(gp, old, _Gwaiting)
1370 }
1371
1372
1373
1374
1375
1376
1377
1378
1379 func casGToWaitingForSuspendG(gp *g, old uint32, reason waitReason) {
1380 if !reason.isWaitingForSuspendG() {
1381 throw("casGToWaitingForSuspendG with non-isWaitingForSuspendG wait reason")
1382 }
1383 casGToWaiting(gp, old, reason)
1384 }
1385
1386
1387
1388
1389
1390 func casGToPreemptScan(gp *g, old, new uint32) {
1391 if old != _Grunning || new != _Gscan|_Gpreempted {
1392 throw("bad g transition")
1393 }
1394 acquireLockRankAndM(lockRankGscan)
1395 for !gp.atomicstatus.CompareAndSwap(_Grunning, _Gscan|_Gpreempted) {
1396 }
1397
1398
1399
1400
1401
1402
1403 }
1404
1405
1406
1407
1408 func casGFromPreempted(gp *g, old, new uint32) bool {
1409 if old != _Gpreempted || new != _Gwaiting {
1410 throw("bad g transition")
1411 }
1412 gp.waitreason = waitReasonPreempted
1413 if !gp.atomicstatus.CompareAndSwap(_Gpreempted, _Gwaiting) {
1414 return false
1415 }
1416 if bubble := gp.bubble; bubble != nil {
1417 bubble.changegstatus(gp, _Gpreempted, _Gwaiting)
1418 }
1419 return true
1420 }
1421
1422
1423 type stwReason uint8
1424
1425
1426
1427
1428 const (
1429 stwUnknown stwReason = iota
1430 stwGCMarkTerm
1431 stwGCSweepTerm
1432 stwWriteHeapDump
1433 stwGoroutineProfile
1434 stwGoroutineProfileCleanup
1435 stwAllGoroutinesStack
1436 stwReadMemStats
1437 stwAllThreadsSyscall
1438 stwGOMAXPROCS
1439 stwStartTrace
1440 stwStopTrace
1441 stwForTestCountPagesInUse
1442 stwForTestReadMetricsSlow
1443 stwForTestReadMemStatsSlow
1444 stwForTestPageCachePagesLeaked
1445 stwForTestResetDebugLog
1446 )
1447
1448 func (r stwReason) String() string {
1449 return stwReasonStrings[r]
1450 }
1451
1452 func (r stwReason) isGC() bool {
1453 return r == stwGCMarkTerm || r == stwGCSweepTerm
1454 }
1455
1456
1457
1458
1459 var stwReasonStrings = [...]string{
1460 stwUnknown: "unknown",
1461 stwGCMarkTerm: "GC mark termination",
1462 stwGCSweepTerm: "GC sweep termination",
1463 stwWriteHeapDump: "write heap dump",
1464 stwGoroutineProfile: "goroutine profile",
1465 stwGoroutineProfileCleanup: "goroutine profile cleanup",
1466 stwAllGoroutinesStack: "all goroutines stack trace",
1467 stwReadMemStats: "read mem stats",
1468 stwAllThreadsSyscall: "AllThreadsSyscall",
1469 stwGOMAXPROCS: "GOMAXPROCS",
1470 stwStartTrace: "start trace",
1471 stwStopTrace: "stop trace",
1472 stwForTestCountPagesInUse: "CountPagesInUse (test)",
1473 stwForTestReadMetricsSlow: "ReadMetricsSlow (test)",
1474 stwForTestReadMemStatsSlow: "ReadMemStatsSlow (test)",
1475 stwForTestPageCachePagesLeaked: "PageCachePagesLeaked (test)",
1476 stwForTestResetDebugLog: "ResetDebugLog (test)",
1477 }
1478
1479
1480
1481 type worldStop struct {
1482 reason stwReason
1483 startedStopping int64
1484 finishedStopping int64
1485 stoppingCPUTime int64
1486 }
1487
1488
1489
1490
1491 var stopTheWorldContext worldStop
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510 func stopTheWorld(reason stwReason) worldStop {
1511 semacquire(&worldsema)
1512 gp := getg()
1513 gp.m.preemptoff = reason.String()
1514 systemstack(func() {
1515 stopTheWorldContext = stopTheWorldWithSema(reason)
1516 })
1517 return stopTheWorldContext
1518 }
1519
1520
1521
1522
1523 func startTheWorld(w worldStop) {
1524 systemstack(func() { startTheWorldWithSema(0, w) })
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541 mp := acquirem()
1542 mp.preemptoff = ""
1543 semrelease1(&worldsema, true, 0)
1544 releasem(mp)
1545 }
1546
1547
1548
1549
1550 func stopTheWorldGC(reason stwReason) worldStop {
1551 semacquire(&gcsema)
1552 return stopTheWorld(reason)
1553 }
1554
1555
1556
1557
1558 func startTheWorldGC(w worldStop) {
1559 startTheWorld(w)
1560 semrelease(&gcsema)
1561 }
1562
1563
1564 var worldsema uint32 = 1
1565
1566
1567
1568
1569
1570
1571
1572 var gcsema uint32 = 1
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606 func stopTheWorldWithSema(reason stwReason) worldStop {
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619 casGToWaitingForSuspendG(getg().m.curg, _Grunning, waitReasonStoppingTheWorld)
1620
1621 trace := traceAcquire()
1622 if trace.ok() {
1623 trace.STWStart(reason)
1624 traceRelease(trace)
1625 }
1626 gp := getg()
1627
1628
1629
1630 if gp.m.locks > 0 {
1631 throw("stopTheWorld: holding locks")
1632 }
1633
1634 lock(&sched.lock)
1635 start := nanotime()
1636 sched.stopwait = gomaxprocs
1637 sched.gcwaiting.Store(true)
1638 preemptall()
1639
1640 gp.m.p.ptr().status = _Pgcstop
1641 gp.m.p.ptr().gcStopTime = start
1642 sched.stopwait--
1643
1644 trace = traceAcquire()
1645 for _, pp := range allp {
1646 s := pp.status
1647 if s == _Psyscall && atomic.Cas(&pp.status, s, _Pgcstop) {
1648 if trace.ok() {
1649 trace.ProcSteal(pp, false)
1650 }
1651 sched.nGsyscallNoP.Add(1)
1652 pp.syscalltick++
1653 pp.gcStopTime = nanotime()
1654 sched.stopwait--
1655 }
1656 }
1657 if trace.ok() {
1658 traceRelease(trace)
1659 }
1660
1661
1662 now := nanotime()
1663 for {
1664 pp, _ := pidleget(now)
1665 if pp == nil {
1666 break
1667 }
1668 pp.status = _Pgcstop
1669 pp.gcStopTime = nanotime()
1670 sched.stopwait--
1671 }
1672 wait := sched.stopwait > 0
1673 unlock(&sched.lock)
1674
1675
1676 if wait {
1677 for {
1678
1679 if notetsleep(&sched.stopnote, 100*1000) {
1680 noteclear(&sched.stopnote)
1681 break
1682 }
1683 preemptall()
1684 }
1685 }
1686
1687 finish := nanotime()
1688 startTime := finish - start
1689 if reason.isGC() {
1690 sched.stwStoppingTimeGC.record(startTime)
1691 } else {
1692 sched.stwStoppingTimeOther.record(startTime)
1693 }
1694
1695
1696
1697
1698
1699 stoppingCPUTime := int64(0)
1700 bad := ""
1701 if sched.stopwait != 0 {
1702 bad = "stopTheWorld: not stopped (stopwait != 0)"
1703 } else {
1704 for _, pp := range allp {
1705 if pp.status != _Pgcstop {
1706 bad = "stopTheWorld: not stopped (status != _Pgcstop)"
1707 }
1708 if pp.gcStopTime == 0 && bad == "" {
1709 bad = "stopTheWorld: broken CPU time accounting"
1710 }
1711 stoppingCPUTime += finish - pp.gcStopTime
1712 pp.gcStopTime = 0
1713 }
1714 }
1715 if freezing.Load() {
1716
1717
1718
1719
1720 lock(&deadlock)
1721 lock(&deadlock)
1722 }
1723 if bad != "" {
1724 throw(bad)
1725 }
1726
1727 worldStopped()
1728
1729
1730 casgstatus(getg().m.curg, _Gwaiting, _Grunning)
1731
1732 return worldStop{
1733 reason: reason,
1734 startedStopping: start,
1735 finishedStopping: finish,
1736 stoppingCPUTime: stoppingCPUTime,
1737 }
1738 }
1739
1740
1741
1742
1743
1744
1745
1746 func startTheWorldWithSema(now int64, w worldStop) int64 {
1747 assertWorldStopped()
1748
1749 mp := acquirem()
1750 if netpollinited() {
1751 list, delta := netpoll(0)
1752 injectglist(&list)
1753 netpollAdjustWaiters(delta)
1754 }
1755 lock(&sched.lock)
1756
1757 procs := gomaxprocs
1758 if newprocs != 0 {
1759 procs = newprocs
1760 newprocs = 0
1761 }
1762 p1 := procresize(procs)
1763 sched.gcwaiting.Store(false)
1764 if sched.sysmonwait.Load() {
1765 sched.sysmonwait.Store(false)
1766 notewakeup(&sched.sysmonnote)
1767 }
1768 unlock(&sched.lock)
1769
1770 worldStarted()
1771
1772 for p1 != nil {
1773 p := p1
1774 p1 = p1.link.ptr()
1775 if p.m != 0 {
1776 mp := p.m.ptr()
1777 p.m = 0
1778 if mp.nextp != 0 {
1779 throw("startTheWorld: inconsistent mp->nextp")
1780 }
1781 mp.nextp.set(p)
1782 notewakeup(&mp.park)
1783 } else {
1784
1785 newm(nil, p, -1)
1786 }
1787 }
1788
1789
1790 if now == 0 {
1791 now = nanotime()
1792 }
1793 totalTime := now - w.startedStopping
1794 if w.reason.isGC() {
1795 sched.stwTotalTimeGC.record(totalTime)
1796 } else {
1797 sched.stwTotalTimeOther.record(totalTime)
1798 }
1799 trace := traceAcquire()
1800 if trace.ok() {
1801 trace.STWDone()
1802 traceRelease(trace)
1803 }
1804
1805
1806
1807
1808 wakep()
1809
1810 releasem(mp)
1811
1812 return now
1813 }
1814
1815
1816
1817 func usesLibcall() bool {
1818 switch GOOS {
1819 case "aix", "darwin", "illumos", "ios", "solaris", "windows":
1820 return true
1821 case "openbsd":
1822 return GOARCH != "mips64"
1823 }
1824 return false
1825 }
1826
1827
1828
1829 func mStackIsSystemAllocated() bool {
1830 switch GOOS {
1831 case "aix", "darwin", "plan9", "illumos", "ios", "solaris", "windows":
1832 return true
1833 case "openbsd":
1834 return GOARCH != "mips64"
1835 }
1836 return false
1837 }
1838
1839
1840
1841 func mstart()
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852 func mstart0() {
1853 gp := getg()
1854
1855 osStack := gp.stack.lo == 0
1856 if osStack {
1857
1858
1859
1860
1861
1862
1863
1864
1865 size := gp.stack.hi
1866 if size == 0 {
1867 size = 16384 * sys.StackGuardMultiplier
1868 }
1869 gp.stack.hi = uintptr(noescape(unsafe.Pointer(&size)))
1870 gp.stack.lo = gp.stack.hi - size + 1024
1871 }
1872
1873
1874 gp.stackguard0 = gp.stack.lo + stackGuard
1875
1876
1877 gp.stackguard1 = gp.stackguard0
1878 mstart1()
1879
1880
1881 if mStackIsSystemAllocated() {
1882
1883
1884
1885 osStack = true
1886 }
1887 mexit(osStack)
1888 }
1889
1890
1891
1892
1893
1894 func mstart1() {
1895 gp := getg()
1896
1897 if gp != gp.m.g0 {
1898 throw("bad runtime·mstart")
1899 }
1900
1901
1902
1903
1904
1905
1906
1907 gp.sched.g = guintptr(unsafe.Pointer(gp))
1908 gp.sched.pc = sys.GetCallerPC()
1909 gp.sched.sp = sys.GetCallerSP()
1910
1911 asminit()
1912 minit()
1913
1914
1915
1916 if gp.m == &m0 {
1917 mstartm0()
1918 }
1919
1920 if debug.dataindependenttiming == 1 {
1921 sys.EnableDIT()
1922 }
1923
1924 if fn := gp.m.mstartfn; fn != nil {
1925 fn()
1926 }
1927
1928 if gp.m != &m0 {
1929 acquirep(gp.m.nextp.ptr())
1930 gp.m.nextp = 0
1931 }
1932 schedule()
1933 }
1934
1935
1936
1937
1938
1939
1940
1941 func mstartm0() {
1942
1943
1944
1945 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
1946 cgoHasExtraM = true
1947 newextram()
1948 }
1949 initsig(false)
1950 }
1951
1952
1953
1954
1955 func mPark() {
1956 gp := getg()
1957 notesleep(&gp.m.park)
1958 noteclear(&gp.m.park)
1959 }
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971 func mexit(osStack bool) {
1972 mp := getg().m
1973
1974 if mp == &m0 {
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986 handoffp(releasep())
1987 lock(&sched.lock)
1988 sched.nmfreed++
1989 checkdead()
1990 unlock(&sched.lock)
1991 mPark()
1992 throw("locked m0 woke up")
1993 }
1994
1995 sigblock(true)
1996 unminit()
1997
1998
1999 if mp.gsignal != nil {
2000 stackfree(mp.gsignal.stack)
2001 if valgrindenabled {
2002 valgrindDeregisterStack(mp.gsignal.valgrindStackID)
2003 mp.gsignal.valgrindStackID = 0
2004 }
2005
2006
2007
2008
2009 mp.gsignal = nil
2010 }
2011
2012
2013 vgetrandomDestroy(mp)
2014
2015
2016 lock(&sched.lock)
2017 for pprev := &allm; *pprev != nil; pprev = &(*pprev).alllink {
2018 if *pprev == mp {
2019 *pprev = mp.alllink
2020 goto found
2021 }
2022 }
2023 throw("m not found in allm")
2024 found:
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039 mp.freeWait.Store(freeMWait)
2040 mp.freelink = sched.freem
2041 sched.freem = mp
2042 unlock(&sched.lock)
2043
2044 atomic.Xadd64(&ncgocall, int64(mp.ncgocall))
2045 sched.totalRuntimeLockWaitTime.Add(mp.mLockProfile.waitTime.Load())
2046
2047
2048 handoffp(releasep())
2049
2050
2051
2052
2053
2054 lock(&sched.lock)
2055 sched.nmfreed++
2056 checkdead()
2057 unlock(&sched.lock)
2058
2059 if GOOS == "darwin" || GOOS == "ios" {
2060
2061
2062 if mp.signalPending.Load() != 0 {
2063 pendingPreemptSignals.Add(-1)
2064 }
2065 }
2066
2067
2068
2069 mdestroy(mp)
2070
2071 if osStack {
2072
2073 mp.freeWait.Store(freeMRef)
2074
2075
2076
2077 return
2078 }
2079
2080
2081
2082
2083
2084 exitThread(&mp.freeWait)
2085 }
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097 func forEachP(reason waitReason, fn func(*p)) {
2098 systemstack(func() {
2099 gp := getg().m.curg
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111 casGToWaitingForSuspendG(gp, _Grunning, reason)
2112 forEachPInternal(fn)
2113 casgstatus(gp, _Gwaiting, _Grunning)
2114 })
2115 }
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126 func forEachPInternal(fn func(*p)) {
2127 mp := acquirem()
2128 pp := getg().m.p.ptr()
2129
2130 lock(&sched.lock)
2131 if sched.safePointWait != 0 {
2132 throw("forEachP: sched.safePointWait != 0")
2133 }
2134 sched.safePointWait = gomaxprocs - 1
2135 sched.safePointFn = fn
2136
2137
2138 for _, p2 := range allp {
2139 if p2 != pp {
2140 atomic.Store(&p2.runSafePointFn, 1)
2141 }
2142 }
2143 preemptall()
2144
2145
2146
2147
2148
2149
2150
2151 for p := sched.pidle.ptr(); p != nil; p = p.link.ptr() {
2152 if atomic.Cas(&p.runSafePointFn, 1, 0) {
2153 fn(p)
2154 sched.safePointWait--
2155 }
2156 }
2157
2158 wait := sched.safePointWait > 0
2159 unlock(&sched.lock)
2160
2161
2162 fn(pp)
2163
2164
2165
2166 for _, p2 := range allp {
2167 s := p2.status
2168
2169
2170
2171 trace := traceAcquire()
2172 if s == _Psyscall && p2.runSafePointFn == 1 && atomic.Cas(&p2.status, s, _Pidle) {
2173 if trace.ok() {
2174
2175 trace.ProcSteal(p2, false)
2176 traceRelease(trace)
2177 }
2178 sched.nGsyscallNoP.Add(1)
2179 p2.syscalltick++
2180 handoffp(p2)
2181 } else if trace.ok() {
2182 traceRelease(trace)
2183 }
2184 }
2185
2186
2187 if wait {
2188 for {
2189
2190
2191
2192
2193 if notetsleep(&sched.safePointNote, 100*1000) {
2194 noteclear(&sched.safePointNote)
2195 break
2196 }
2197 preemptall()
2198 }
2199 }
2200 if sched.safePointWait != 0 {
2201 throw("forEachP: not done")
2202 }
2203 for _, p2 := range allp {
2204 if p2.runSafePointFn != 0 {
2205 throw("forEachP: P did not run fn")
2206 }
2207 }
2208
2209 lock(&sched.lock)
2210 sched.safePointFn = nil
2211 unlock(&sched.lock)
2212 releasem(mp)
2213 }
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226 func runSafePointFn() {
2227 p := getg().m.p.ptr()
2228
2229
2230
2231 if !atomic.Cas(&p.runSafePointFn, 1, 0) {
2232 return
2233 }
2234 sched.safePointFn(p)
2235 lock(&sched.lock)
2236 sched.safePointWait--
2237 if sched.safePointWait == 0 {
2238 notewakeup(&sched.safePointNote)
2239 }
2240 unlock(&sched.lock)
2241 }
2242
2243
2244
2245
2246 var cgoThreadStart unsafe.Pointer
2247
2248 type cgothreadstart struct {
2249 g guintptr
2250 tls *uint64
2251 fn unsafe.Pointer
2252 }
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263 func allocm(pp *p, fn func(), id int64) *m {
2264 allocmLock.rlock()
2265
2266
2267
2268
2269 acquirem()
2270
2271 gp := getg()
2272 if gp.m.p == 0 {
2273 acquirep(pp)
2274 }
2275
2276
2277
2278 if sched.freem != nil {
2279 lock(&sched.lock)
2280 var newList *m
2281 for freem := sched.freem; freem != nil; {
2282
2283 wait := freem.freeWait.Load()
2284 if wait == freeMWait {
2285 next := freem.freelink
2286 freem.freelink = newList
2287 newList = freem
2288 freem = next
2289 continue
2290 }
2291
2292
2293
2294 if traceEnabled() || traceShuttingDown() {
2295 traceThreadDestroy(freem)
2296 }
2297
2298
2299
2300 if wait == freeMStack {
2301
2302
2303
2304 systemstack(func() {
2305 stackfree(freem.g0.stack)
2306 if valgrindenabled {
2307 valgrindDeregisterStack(freem.g0.valgrindStackID)
2308 freem.g0.valgrindStackID = 0
2309 }
2310 })
2311 }
2312 freem = freem.freelink
2313 }
2314 sched.freem = newList
2315 unlock(&sched.lock)
2316 }
2317
2318 mp := &new(mPadded).m
2319 mp.mstartfn = fn
2320 mcommoninit(mp, id)
2321
2322
2323
2324 if iscgo || mStackIsSystemAllocated() {
2325 mp.g0 = malg(-1)
2326 } else {
2327 mp.g0 = malg(16384 * sys.StackGuardMultiplier)
2328 }
2329 mp.g0.m = mp
2330
2331 if pp == gp.m.p.ptr() {
2332 releasep()
2333 }
2334
2335 releasem(gp.m)
2336 allocmLock.runlock()
2337 return mp
2338 }
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379 func needm(signal bool) {
2380 if (iscgo || GOOS == "windows") && !cgoHasExtraM {
2381
2382
2383
2384
2385
2386
2387 writeErrStr("fatal error: cgo callback before cgo call\n")
2388 exit(1)
2389 }
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399 var sigmask sigset
2400 sigsave(&sigmask)
2401 sigblock(false)
2402
2403
2404
2405
2406 mp, last := getExtraM()
2407
2408
2409
2410
2411
2412
2413
2414
2415 mp.needextram = last
2416
2417
2418 mp.sigmask = sigmask
2419
2420
2421
2422 osSetupTLS(mp)
2423
2424
2425
2426 setg(mp.g0)
2427 sp := sys.GetCallerSP()
2428 callbackUpdateSystemStack(mp, sp, signal)
2429
2430
2431
2432
2433 mp.isExtraInC = false
2434
2435
2436 asminit()
2437 minit()
2438
2439
2440
2441
2442
2443
2444 var trace traceLocker
2445 if !signal {
2446 trace = traceAcquire()
2447 }
2448
2449
2450 casgstatus(mp.curg, _Gdead, _Gsyscall)
2451 sched.ngsys.Add(-1)
2452 sched.nGsyscallNoP.Add(1)
2453
2454 if !signal {
2455 if trace.ok() {
2456 trace.GoCreateSyscall(mp.curg)
2457 traceRelease(trace)
2458 }
2459 }
2460 mp.isExtraInSig = signal
2461 }
2462
2463
2464
2465
2466 func needAndBindM() {
2467 needm(false)
2468
2469 if _cgo_pthread_key_created != nil && *(*uintptr)(_cgo_pthread_key_created) != 0 {
2470 cgoBindM()
2471 }
2472 }
2473
2474
2475
2476
2477 func newextram() {
2478 c := extraMWaiters.Swap(0)
2479 if c > 0 {
2480 for i := uint32(0); i < c; i++ {
2481 oneNewExtraM()
2482 }
2483 } else if extraMLength.Load() == 0 {
2484
2485 oneNewExtraM()
2486 }
2487 }
2488
2489
2490 func oneNewExtraM() {
2491
2492
2493
2494
2495
2496 mp := allocm(nil, nil, -1)
2497 gp := malg(4096)
2498 gp.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
2499 gp.sched.sp = gp.stack.hi
2500 gp.sched.sp -= 4 * goarch.PtrSize
2501 gp.sched.lr = 0
2502 gp.sched.g = guintptr(unsafe.Pointer(gp))
2503 gp.syscallpc = gp.sched.pc
2504 gp.syscallsp = gp.sched.sp
2505 gp.stktopsp = gp.sched.sp
2506
2507
2508
2509
2510 casgstatus(gp, _Gidle, _Gdead)
2511 gp.m = mp
2512 mp.curg = gp
2513 mp.isextra = true
2514
2515 mp.isExtraInC = true
2516 mp.lockedInt++
2517 mp.lockedg.set(gp)
2518 gp.lockedm.set(mp)
2519 gp.goid = sched.goidgen.Add(1)
2520 if raceenabled {
2521 gp.racectx = racegostart(abi.FuncPCABIInternal(newextram) + sys.PCQuantum)
2522 }
2523
2524 allgadd(gp)
2525
2526
2527
2528
2529
2530 sched.ngsys.Add(1)
2531
2532
2533 addExtraM(mp)
2534 }
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569 func dropm() {
2570
2571
2572
2573 mp := getg().m
2574
2575
2576
2577
2578
2579 var trace traceLocker
2580 if !mp.isExtraInSig {
2581 trace = traceAcquire()
2582 }
2583
2584
2585 casgstatus(mp.curg, _Gsyscall, _Gdead)
2586 mp.curg.preemptStop = false
2587 sched.ngsys.Add(1)
2588 sched.nGsyscallNoP.Add(-1)
2589
2590 if !mp.isExtraInSig {
2591 if trace.ok() {
2592 trace.GoDestroySyscall()
2593 traceRelease(trace)
2594 }
2595 }
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610 mp.syscalltick--
2611
2612
2613
2614 mp.curg.trace.reset()
2615
2616
2617
2618
2619 if traceEnabled() || traceShuttingDown() {
2620
2621
2622
2623
2624
2625
2626
2627 lock(&sched.lock)
2628 traceThreadDestroy(mp)
2629 unlock(&sched.lock)
2630 }
2631 mp.isExtraInSig = false
2632
2633
2634
2635
2636
2637 sigmask := mp.sigmask
2638 sigblock(false)
2639 unminit()
2640
2641 setg(nil)
2642
2643
2644
2645 g0 := mp.g0
2646 g0.stack.hi = 0
2647 g0.stack.lo = 0
2648 g0.stackguard0 = 0
2649 g0.stackguard1 = 0
2650 mp.g0StackAccurate = false
2651
2652 putExtraM(mp)
2653
2654 msigrestore(sigmask)
2655 }
2656
2657
2658
2659
2660
2661
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676
2677 func cgoBindM() {
2678 if GOOS == "windows" || GOOS == "plan9" {
2679 fatal("bindm in unexpected GOOS")
2680 }
2681 g := getg()
2682 if g.m.g0 != g {
2683 fatal("the current g is not g0")
2684 }
2685 if _cgo_bindm != nil {
2686 asmcgocall(_cgo_bindm, unsafe.Pointer(g))
2687 }
2688 }
2689
2690
2691
2692
2693
2694
2695
2696
2697
2698
2699
2700
2701 func getm() uintptr {
2702 return uintptr(unsafe.Pointer(getg().m))
2703 }
2704
2705 var (
2706
2707
2708
2709
2710
2711
2712 extraM atomic.Uintptr
2713
2714 extraMLength atomic.Uint32
2715
2716 extraMWaiters atomic.Uint32
2717
2718
2719 extraMInUse atomic.Uint32
2720 )
2721
2722
2723
2724
2725
2726
2727
2728
2729 func lockextra(nilokay bool) *m {
2730 const locked = 1
2731
2732 incr := false
2733 for {
2734 old := extraM.Load()
2735 if old == locked {
2736 osyield_no_g()
2737 continue
2738 }
2739 if old == 0 && !nilokay {
2740 if !incr {
2741
2742
2743
2744 extraMWaiters.Add(1)
2745 incr = true
2746 }
2747 usleep_no_g(1)
2748 continue
2749 }
2750 if extraM.CompareAndSwap(old, locked) {
2751 return (*m)(unsafe.Pointer(old))
2752 }
2753 osyield_no_g()
2754 continue
2755 }
2756 }
2757
2758
2759 func unlockextra(mp *m, delta int32) {
2760 extraMLength.Add(delta)
2761 extraM.Store(uintptr(unsafe.Pointer(mp)))
2762 }
2763
2764
2765
2766
2767
2768
2769
2770
2771 func getExtraM() (mp *m, last bool) {
2772 mp = lockextra(false)
2773 extraMInUse.Add(1)
2774 unlockextra(mp.schedlink.ptr(), -1)
2775 return mp, mp.schedlink.ptr() == nil
2776 }
2777
2778
2779
2780
2781
2782 func putExtraM(mp *m) {
2783 extraMInUse.Add(-1)
2784 addExtraM(mp)
2785 }
2786
2787
2788
2789
2790 func addExtraM(mp *m) {
2791 mnext := lockextra(true)
2792 mp.schedlink.set(mnext)
2793 unlockextra(mp, 1)
2794 }
2795
2796 var (
2797
2798
2799
2800 allocmLock rwmutex
2801
2802
2803
2804
2805 execLock rwmutex
2806 )
2807
2808
2809
2810 const (
2811 failthreadcreate = "runtime: failed to create new OS thread\n"
2812 failallocatestack = "runtime: failed to allocate stack for the new OS thread\n"
2813 )
2814
2815
2816
2817
2818 var newmHandoff struct {
2819 lock mutex
2820
2821
2822
2823 newm muintptr
2824
2825
2826
2827 waiting bool
2828 wake note
2829
2830
2831
2832
2833 haveTemplateThread uint32
2834 }
2835
2836
2837
2838
2839
2840
2841
2842
2843 func newm(fn func(), pp *p, id int64) {
2844
2845
2846
2847
2848
2849
2850
2851
2852
2853
2854 acquirem()
2855
2856 mp := allocm(pp, fn, id)
2857 mp.nextp.set(pp)
2858 mp.sigmask = initSigmask
2859 if gp := getg(); gp != nil && gp.m != nil && (gp.m.lockedExt != 0 || gp.m.incgo) && GOOS != "plan9" {
2860
2861
2862
2863
2864
2865
2866
2867
2868
2869
2870
2871 lock(&newmHandoff.lock)
2872 if newmHandoff.haveTemplateThread == 0 {
2873 throw("on a locked thread with no template thread")
2874 }
2875 mp.schedlink = newmHandoff.newm
2876 newmHandoff.newm.set(mp)
2877 if newmHandoff.waiting {
2878 newmHandoff.waiting = false
2879 notewakeup(&newmHandoff.wake)
2880 }
2881 unlock(&newmHandoff.lock)
2882
2883
2884
2885 releasem(getg().m)
2886 return
2887 }
2888 newm1(mp)
2889 releasem(getg().m)
2890 }
2891
2892 func newm1(mp *m) {
2893 if iscgo {
2894 var ts cgothreadstart
2895 if _cgo_thread_start == nil {
2896 throw("_cgo_thread_start missing")
2897 }
2898 ts.g.set(mp.g0)
2899 ts.tls = (*uint64)(unsafe.Pointer(&mp.tls[0]))
2900 ts.fn = unsafe.Pointer(abi.FuncPCABI0(mstart))
2901 if msanenabled {
2902 msanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2903 }
2904 if asanenabled {
2905 asanwrite(unsafe.Pointer(&ts), unsafe.Sizeof(ts))
2906 }
2907 execLock.rlock()
2908 asmcgocall(_cgo_thread_start, unsafe.Pointer(&ts))
2909 execLock.runlock()
2910 return
2911 }
2912 execLock.rlock()
2913 newosproc(mp)
2914 execLock.runlock()
2915 }
2916
2917
2918
2919
2920
2921 func startTemplateThread() {
2922 if GOARCH == "wasm" {
2923 return
2924 }
2925
2926
2927
2928 mp := acquirem()
2929 if !atomic.Cas(&newmHandoff.haveTemplateThread, 0, 1) {
2930 releasem(mp)
2931 return
2932 }
2933 newm(templateThread, nil, -1)
2934 releasem(mp)
2935 }
2936
2937
2938
2939
2940
2941
2942
2943
2944
2945
2946
2947
2948
2949 func templateThread() {
2950 lock(&sched.lock)
2951 sched.nmsys++
2952 checkdead()
2953 unlock(&sched.lock)
2954
2955 for {
2956 lock(&newmHandoff.lock)
2957 for newmHandoff.newm != 0 {
2958 newm := newmHandoff.newm.ptr()
2959 newmHandoff.newm = 0
2960 unlock(&newmHandoff.lock)
2961 for newm != nil {
2962 next := newm.schedlink.ptr()
2963 newm.schedlink = 0
2964 newm1(newm)
2965 newm = next
2966 }
2967 lock(&newmHandoff.lock)
2968 }
2969 newmHandoff.waiting = true
2970 noteclear(&newmHandoff.wake)
2971 unlock(&newmHandoff.lock)
2972 notesleep(&newmHandoff.wake)
2973 }
2974 }
2975
2976
2977
2978 func stopm() {
2979 gp := getg()
2980
2981 if gp.m.locks != 0 {
2982 throw("stopm holding locks")
2983 }
2984 if gp.m.p != 0 {
2985 throw("stopm holding p")
2986 }
2987 if gp.m.spinning {
2988 throw("stopm spinning")
2989 }
2990
2991 lock(&sched.lock)
2992 mput(gp.m)
2993 unlock(&sched.lock)
2994 mPark()
2995 acquirep(gp.m.nextp.ptr())
2996 gp.m.nextp = 0
2997 }
2998
2999 func mspinning() {
3000
3001 getg().m.spinning = true
3002 }
3003
3004
3005
3006
3007
3008
3009
3010
3011
3012
3013
3014
3015
3016
3017
3018
3019
3020
3021 func startm(pp *p, spinning, lockheld bool) {
3022
3023
3024
3025
3026
3027
3028
3029
3030
3031
3032
3033
3034
3035
3036
3037
3038 mp := acquirem()
3039 if !lockheld {
3040 lock(&sched.lock)
3041 }
3042 if pp == nil {
3043 if spinning {
3044
3045
3046
3047 throw("startm: P required for spinning=true")
3048 }
3049 pp, _ = pidleget(0)
3050 if pp == nil {
3051 if !lockheld {
3052 unlock(&sched.lock)
3053 }
3054 releasem(mp)
3055 return
3056 }
3057 }
3058 nmp := mget()
3059 if nmp == nil {
3060
3061
3062
3063
3064
3065
3066
3067
3068
3069
3070
3071
3072
3073
3074 id := mReserveID()
3075 unlock(&sched.lock)
3076
3077 var fn func()
3078 if spinning {
3079
3080 fn = mspinning
3081 }
3082 newm(fn, pp, id)
3083
3084 if lockheld {
3085 lock(&sched.lock)
3086 }
3087
3088
3089 releasem(mp)
3090 return
3091 }
3092 if !lockheld {
3093 unlock(&sched.lock)
3094 }
3095 if nmp.spinning {
3096 throw("startm: m is spinning")
3097 }
3098 if nmp.nextp != 0 {
3099 throw("startm: m has p")
3100 }
3101 if spinning && !runqempty(pp) {
3102 throw("startm: p has runnable gs")
3103 }
3104
3105 nmp.spinning = spinning
3106 nmp.nextp.set(pp)
3107 notewakeup(&nmp.park)
3108
3109
3110 releasem(mp)
3111 }
3112
3113
3114
3115
3116
3117 func handoffp(pp *p) {
3118
3119
3120
3121
3122 if !runqempty(pp) || !sched.runq.empty() {
3123 startm(pp, false, false)
3124 return
3125 }
3126
3127 if (traceEnabled() || traceShuttingDown()) && traceReaderAvailable() != nil {
3128 startm(pp, false, false)
3129 return
3130 }
3131
3132 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(pp) {
3133 startm(pp, false, false)
3134 return
3135 }
3136
3137
3138 if sched.nmspinning.Load()+sched.npidle.Load() == 0 && sched.nmspinning.CompareAndSwap(0, 1) {
3139 sched.needspinning.Store(0)
3140 startm(pp, true, false)
3141 return
3142 }
3143 lock(&sched.lock)
3144 if sched.gcwaiting.Load() {
3145 pp.status = _Pgcstop
3146 pp.gcStopTime = nanotime()
3147 sched.stopwait--
3148 if sched.stopwait == 0 {
3149 notewakeup(&sched.stopnote)
3150 }
3151 unlock(&sched.lock)
3152 return
3153 }
3154 if pp.runSafePointFn != 0 && atomic.Cas(&pp.runSafePointFn, 1, 0) {
3155 sched.safePointFn(pp)
3156 sched.safePointWait--
3157 if sched.safePointWait == 0 {
3158 notewakeup(&sched.safePointNote)
3159 }
3160 }
3161 if !sched.runq.empty() {
3162 unlock(&sched.lock)
3163 startm(pp, false, false)
3164 return
3165 }
3166
3167
3168 if sched.npidle.Load() == gomaxprocs-1 && sched.lastpoll.Load() != 0 {
3169 unlock(&sched.lock)
3170 startm(pp, false, false)
3171 return
3172 }
3173
3174
3175
3176 when := pp.timers.wakeTime()
3177 pidleput(pp, 0)
3178 unlock(&sched.lock)
3179
3180 if when != 0 {
3181 wakeNetPoller(when)
3182 }
3183 }
3184
3185
3186
3187
3188
3189
3190
3191
3192
3193
3194
3195
3196
3197
3198 func wakep() {
3199
3200
3201 if sched.nmspinning.Load() != 0 || !sched.nmspinning.CompareAndSwap(0, 1) {
3202 return
3203 }
3204
3205
3206
3207
3208
3209
3210 mp := acquirem()
3211
3212 var pp *p
3213 lock(&sched.lock)
3214 pp, _ = pidlegetSpinning(0)
3215 if pp == nil {
3216 if sched.nmspinning.Add(-1) < 0 {
3217 throw("wakep: negative nmspinning")
3218 }
3219 unlock(&sched.lock)
3220 releasem(mp)
3221 return
3222 }
3223
3224
3225
3226
3227 unlock(&sched.lock)
3228
3229 startm(pp, true, false)
3230
3231 releasem(mp)
3232 }
3233
3234
3235
3236 func stoplockedm() {
3237 gp := getg()
3238
3239 if gp.m.lockedg == 0 || gp.m.lockedg.ptr().lockedm.ptr() != gp.m {
3240 throw("stoplockedm: inconsistent locking")
3241 }
3242 if gp.m.p != 0 {
3243
3244 pp := releasep()
3245 handoffp(pp)
3246 }
3247 incidlelocked(1)
3248
3249 mPark()
3250 status := readgstatus(gp.m.lockedg.ptr())
3251 if status&^_Gscan != _Grunnable {
3252 print("runtime:stoplockedm: lockedg (atomicstatus=", status, ") is not Grunnable or Gscanrunnable\n")
3253 dumpgstatus(gp.m.lockedg.ptr())
3254 throw("stoplockedm: not runnable")
3255 }
3256 acquirep(gp.m.nextp.ptr())
3257 gp.m.nextp = 0
3258 }
3259
3260
3261
3262
3263
3264 func startlockedm(gp *g) {
3265 mp := gp.lockedm.ptr()
3266 if mp == getg().m {
3267 throw("startlockedm: locked to me")
3268 }
3269 if mp.nextp != 0 {
3270 throw("startlockedm: m has p")
3271 }
3272
3273 incidlelocked(-1)
3274 pp := releasep()
3275 mp.nextp.set(pp)
3276 notewakeup(&mp.park)
3277 stopm()
3278 }
3279
3280
3281
3282 func gcstopm() {
3283 gp := getg()
3284
3285 if !sched.gcwaiting.Load() {
3286 throw("gcstopm: not waiting for gc")
3287 }
3288 if gp.m.spinning {
3289 gp.m.spinning = false
3290
3291
3292 if sched.nmspinning.Add(-1) < 0 {
3293 throw("gcstopm: negative nmspinning")
3294 }
3295 }
3296 pp := releasep()
3297 lock(&sched.lock)
3298 pp.status = _Pgcstop
3299 pp.gcStopTime = nanotime()
3300 sched.stopwait--
3301 if sched.stopwait == 0 {
3302 notewakeup(&sched.stopnote)
3303 }
3304 unlock(&sched.lock)
3305 stopm()
3306 }
3307
3308
3309
3310
3311
3312
3313
3314
3315
3316
3317 func execute(gp *g, inheritTime bool) {
3318 mp := getg().m
3319
3320 if goroutineProfile.active {
3321
3322
3323
3324 tryRecordGoroutineProfile(gp, nil, osyield)
3325 }
3326
3327
3328 mp.curg = gp
3329 gp.m = mp
3330 gp.syncSafePoint = false
3331 casgstatus(gp, _Grunnable, _Grunning)
3332 gp.waitsince = 0
3333 gp.preempt = false
3334 gp.stackguard0 = gp.stack.lo + stackGuard
3335 if !inheritTime {
3336 mp.p.ptr().schedtick++
3337 }
3338
3339
3340 hz := sched.profilehz
3341 if mp.profilehz != hz {
3342 setThreadCPUProfiler(hz)
3343 }
3344
3345 trace := traceAcquire()
3346 if trace.ok() {
3347 trace.GoStart()
3348 traceRelease(trace)
3349 }
3350
3351 gogo(&gp.sched)
3352 }
3353
3354
3355
3356
3357
3358 func findRunnable() (gp *g, inheritTime, tryWakeP bool) {
3359 mp := getg().m
3360
3361
3362
3363
3364
3365 top:
3366
3367
3368
3369 mp.clearAllpSnapshot()
3370
3371 pp := mp.p.ptr()
3372 if sched.gcwaiting.Load() {
3373 gcstopm()
3374 goto top
3375 }
3376 if pp.runSafePointFn != 0 {
3377 runSafePointFn()
3378 }
3379
3380
3381
3382
3383
3384 now, pollUntil, _ := pp.timers.check(0, nil)
3385
3386
3387 if traceEnabled() || traceShuttingDown() {
3388 gp := traceReader()
3389 if gp != nil {
3390 trace := traceAcquire()
3391 casgstatus(gp, _Gwaiting, _Grunnable)
3392 if trace.ok() {
3393 trace.GoUnpark(gp, 0)
3394 traceRelease(trace)
3395 }
3396 return gp, false, true
3397 }
3398 }
3399
3400
3401 if gcBlackenEnabled != 0 {
3402 gp, tnow := gcController.findRunnableGCWorker(pp, now)
3403 if gp != nil {
3404 return gp, false, true
3405 }
3406 now = tnow
3407 }
3408
3409
3410
3411
3412 if pp.schedtick%61 == 0 && !sched.runq.empty() {
3413 lock(&sched.lock)
3414 gp := globrunqget()
3415 unlock(&sched.lock)
3416 if gp != nil {
3417 return gp, false, false
3418 }
3419 }
3420
3421
3422 if fingStatus.Load()&(fingWait|fingWake) == fingWait|fingWake {
3423 if gp := wakefing(); gp != nil {
3424 ready(gp, 0, true)
3425 }
3426 }
3427
3428
3429 if gcCleanups.needsWake() {
3430 gcCleanups.wake()
3431 }
3432
3433 if *cgo_yield != nil {
3434 asmcgocall(*cgo_yield, nil)
3435 }
3436
3437
3438 if gp, inheritTime := runqget(pp); gp != nil {
3439 return gp, inheritTime, false
3440 }
3441
3442
3443 if !sched.runq.empty() {
3444 lock(&sched.lock)
3445 gp, q := globrunqgetbatch(int32(len(pp.runq)) / 2)
3446 unlock(&sched.lock)
3447 if gp != nil {
3448 if runqputbatch(pp, &q); !q.empty() {
3449 throw("Couldn't put Gs into empty local runq")
3450 }
3451 return gp, false, false
3452 }
3453 }
3454
3455
3456
3457
3458
3459
3460
3461
3462
3463
3464 if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 && sched.pollingNet.Swap(1) == 0 {
3465 list, delta := netpoll(0)
3466 sched.pollingNet.Store(0)
3467 if !list.empty() {
3468 gp := list.pop()
3469 injectglist(&list)
3470 netpollAdjustWaiters(delta)
3471 trace := traceAcquire()
3472 casgstatus(gp, _Gwaiting, _Grunnable)
3473 if trace.ok() {
3474 trace.GoUnpark(gp, 0)
3475 traceRelease(trace)
3476 }
3477 return gp, false, false
3478 }
3479 }
3480
3481
3482
3483
3484
3485
3486 if mp.spinning || 2*sched.nmspinning.Load() < gomaxprocs-sched.npidle.Load() {
3487 if !mp.spinning {
3488 mp.becomeSpinning()
3489 }
3490
3491 gp, inheritTime, tnow, w, newWork := stealWork(now)
3492 if gp != nil {
3493
3494 return gp, inheritTime, false
3495 }
3496 if newWork {
3497
3498
3499 goto top
3500 }
3501
3502 now = tnow
3503 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3504
3505 pollUntil = w
3506 }
3507 }
3508
3509
3510
3511
3512
3513 if gcBlackenEnabled != 0 && gcMarkWorkAvailable(pp) && gcController.addIdleMarkWorker() {
3514 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3515 if node != nil {
3516 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3517 gp := node.gp.ptr()
3518
3519 trace := traceAcquire()
3520 casgstatus(gp, _Gwaiting, _Grunnable)
3521 if trace.ok() {
3522 trace.GoUnpark(gp, 0)
3523 traceRelease(trace)
3524 }
3525 return gp, false, false
3526 }
3527 gcController.removeIdleMarkWorker()
3528 }
3529
3530
3531
3532
3533
3534 gp, otherReady := beforeIdle(now, pollUntil)
3535 if gp != nil {
3536 trace := traceAcquire()
3537 casgstatus(gp, _Gwaiting, _Grunnable)
3538 if trace.ok() {
3539 trace.GoUnpark(gp, 0)
3540 traceRelease(trace)
3541 }
3542 return gp, false, false
3543 }
3544 if otherReady {
3545 goto top
3546 }
3547
3548
3549
3550
3551
3552
3553
3554
3555
3556 allpSnapshot := mp.snapshotAllp()
3557
3558
3559 idlepMaskSnapshot := idlepMask
3560 timerpMaskSnapshot := timerpMask
3561
3562
3563 lock(&sched.lock)
3564 if sched.gcwaiting.Load() || pp.runSafePointFn != 0 {
3565 unlock(&sched.lock)
3566 goto top
3567 }
3568 if !sched.runq.empty() {
3569 gp, q := globrunqgetbatch(int32(len(pp.runq)) / 2)
3570 unlock(&sched.lock)
3571 if gp == nil {
3572 throw("global runq empty with non-zero runqsize")
3573 }
3574 if runqputbatch(pp, &q); !q.empty() {
3575 throw("Couldn't put Gs into empty local runq")
3576 }
3577 return gp, false, false
3578 }
3579 if !mp.spinning && sched.needspinning.Load() == 1 {
3580
3581 mp.becomeSpinning()
3582 unlock(&sched.lock)
3583 goto top
3584 }
3585 if releasep() != pp {
3586 throw("findrunnable: wrong p")
3587 }
3588 now = pidleput(pp, now)
3589 unlock(&sched.lock)
3590
3591
3592
3593
3594
3595
3596
3597
3598
3599
3600
3601
3602
3603
3604
3605
3606
3607
3608
3609
3610
3611
3612
3613
3614
3615
3616
3617
3618
3619
3620
3621
3622
3623
3624
3625
3626
3627 wasSpinning := mp.spinning
3628 if mp.spinning {
3629 mp.spinning = false
3630 if sched.nmspinning.Add(-1) < 0 {
3631 throw("findrunnable: negative nmspinning")
3632 }
3633
3634
3635
3636
3637
3638
3639
3640
3641
3642
3643
3644
3645 lock(&sched.lock)
3646 if !sched.runq.empty() {
3647 pp, _ := pidlegetSpinning(0)
3648 if pp != nil {
3649 gp, q := globrunqgetbatch(int32(len(pp.runq)) / 2)
3650 unlock(&sched.lock)
3651 if gp == nil {
3652 throw("global runq empty with non-zero runqsize")
3653 }
3654 if runqputbatch(pp, &q); !q.empty() {
3655 throw("Couldn't put Gs into empty local runq")
3656 }
3657 acquirep(pp)
3658 mp.becomeSpinning()
3659 return gp, false, false
3660 }
3661 }
3662 unlock(&sched.lock)
3663
3664 pp := checkRunqsNoP(allpSnapshot, idlepMaskSnapshot)
3665 if pp != nil {
3666 acquirep(pp)
3667 mp.becomeSpinning()
3668 goto top
3669 }
3670
3671
3672 pp, gp := checkIdleGCNoP()
3673 if pp != nil {
3674 acquirep(pp)
3675 mp.becomeSpinning()
3676
3677
3678 pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
3679 trace := traceAcquire()
3680 casgstatus(gp, _Gwaiting, _Grunnable)
3681 if trace.ok() {
3682 trace.GoUnpark(gp, 0)
3683 traceRelease(trace)
3684 }
3685 return gp, false, false
3686 }
3687
3688
3689
3690
3691
3692
3693
3694 pollUntil = checkTimersNoP(allpSnapshot, timerpMaskSnapshot, pollUntil)
3695 }
3696
3697
3698
3699
3700
3701 if netpollinited() && (netpollAnyWaiters() || pollUntil != 0) && sched.lastpoll.Swap(0) != 0 {
3702 sched.pollUntil.Store(pollUntil)
3703 if mp.p != 0 {
3704 throw("findrunnable: netpoll with p")
3705 }
3706 if mp.spinning {
3707 throw("findrunnable: netpoll with spinning")
3708 }
3709 delay := int64(-1)
3710 if pollUntil != 0 {
3711 if now == 0 {
3712 now = nanotime()
3713 }
3714 delay = pollUntil - now
3715 if delay < 0 {
3716 delay = 0
3717 }
3718 }
3719 if faketime != 0 {
3720
3721 delay = 0
3722 }
3723 list, delta := netpoll(delay)
3724
3725 now = nanotime()
3726 sched.pollUntil.Store(0)
3727 sched.lastpoll.Store(now)
3728 if faketime != 0 && list.empty() {
3729
3730
3731 stopm()
3732 goto top
3733 }
3734 lock(&sched.lock)
3735 pp, _ := pidleget(now)
3736 unlock(&sched.lock)
3737 if pp == nil {
3738 injectglist(&list)
3739 netpollAdjustWaiters(delta)
3740 } else {
3741 acquirep(pp)
3742 if !list.empty() {
3743 gp := list.pop()
3744 injectglist(&list)
3745 netpollAdjustWaiters(delta)
3746 trace := traceAcquire()
3747 casgstatus(gp, _Gwaiting, _Grunnable)
3748 if trace.ok() {
3749 trace.GoUnpark(gp, 0)
3750 traceRelease(trace)
3751 }
3752 return gp, false, false
3753 }
3754 if wasSpinning {
3755 mp.becomeSpinning()
3756 }
3757 goto top
3758 }
3759 } else if pollUntil != 0 && netpollinited() {
3760 pollerPollUntil := sched.pollUntil.Load()
3761 if pollerPollUntil == 0 || pollerPollUntil > pollUntil {
3762 netpollBreak()
3763 }
3764 }
3765 stopm()
3766 goto top
3767 }
3768
3769
3770
3771
3772
3773 func pollWork() bool {
3774 if !sched.runq.empty() {
3775 return true
3776 }
3777 p := getg().m.p.ptr()
3778 if !runqempty(p) {
3779 return true
3780 }
3781 if netpollinited() && netpollAnyWaiters() && sched.lastpoll.Load() != 0 {
3782 if list, delta := netpoll(0); !list.empty() {
3783 injectglist(&list)
3784 netpollAdjustWaiters(delta)
3785 return true
3786 }
3787 }
3788 return false
3789 }
3790
3791
3792
3793
3794
3795
3796
3797 func stealWork(now int64) (gp *g, inheritTime bool, rnow, pollUntil int64, newWork bool) {
3798 pp := getg().m.p.ptr()
3799
3800 ranTimer := false
3801
3802 const stealTries = 4
3803 for i := 0; i < stealTries; i++ {
3804 stealTimersOrRunNextG := i == stealTries-1
3805
3806 for enum := stealOrder.start(cheaprand()); !enum.done(); enum.next() {
3807 if sched.gcwaiting.Load() {
3808
3809 return nil, false, now, pollUntil, true
3810 }
3811 p2 := allp[enum.position()]
3812 if pp == p2 {
3813 continue
3814 }
3815
3816
3817
3818
3819
3820
3821
3822
3823
3824
3825
3826
3827
3828
3829 if stealTimersOrRunNextG && timerpMask.read(enum.position()) {
3830 tnow, w, ran := p2.timers.check(now, nil)
3831 now = tnow
3832 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3833 pollUntil = w
3834 }
3835 if ran {
3836
3837
3838
3839
3840
3841
3842
3843
3844 if gp, inheritTime := runqget(pp); gp != nil {
3845 return gp, inheritTime, now, pollUntil, ranTimer
3846 }
3847 ranTimer = true
3848 }
3849 }
3850
3851
3852 if !idlepMask.read(enum.position()) {
3853 if gp := runqsteal(pp, p2, stealTimersOrRunNextG); gp != nil {
3854 return gp, false, now, pollUntil, ranTimer
3855 }
3856 }
3857 }
3858 }
3859
3860
3861
3862
3863 return nil, false, now, pollUntil, ranTimer
3864 }
3865
3866
3867
3868
3869
3870
3871 func checkRunqsNoP(allpSnapshot []*p, idlepMaskSnapshot pMask) *p {
3872 for id, p2 := range allpSnapshot {
3873 if !idlepMaskSnapshot.read(uint32(id)) && !runqempty(p2) {
3874 lock(&sched.lock)
3875 pp, _ := pidlegetSpinning(0)
3876 if pp == nil {
3877
3878 unlock(&sched.lock)
3879 return nil
3880 }
3881 unlock(&sched.lock)
3882 return pp
3883 }
3884 }
3885
3886
3887 return nil
3888 }
3889
3890
3891
3892
3893 func checkTimersNoP(allpSnapshot []*p, timerpMaskSnapshot pMask, pollUntil int64) int64 {
3894 for id, p2 := range allpSnapshot {
3895 if timerpMaskSnapshot.read(uint32(id)) {
3896 w := p2.timers.wakeTime()
3897 if w != 0 && (pollUntil == 0 || w < pollUntil) {
3898 pollUntil = w
3899 }
3900 }
3901 }
3902
3903 return pollUntil
3904 }
3905
3906
3907
3908
3909
3910 func checkIdleGCNoP() (*p, *g) {
3911
3912
3913
3914
3915
3916
3917 if atomic.Load(&gcBlackenEnabled) == 0 || !gcController.needIdleMarkWorker() {
3918 return nil, nil
3919 }
3920 if !gcMarkWorkAvailable(nil) {
3921 return nil, nil
3922 }
3923
3924
3925
3926
3927
3928
3929
3930
3931
3932
3933
3934
3935
3936
3937
3938
3939
3940
3941 lock(&sched.lock)
3942 pp, now := pidlegetSpinning(0)
3943 if pp == nil {
3944 unlock(&sched.lock)
3945 return nil, nil
3946 }
3947
3948
3949 if gcBlackenEnabled == 0 || !gcController.addIdleMarkWorker() {
3950 pidleput(pp, now)
3951 unlock(&sched.lock)
3952 return nil, nil
3953 }
3954
3955 node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop())
3956 if node == nil {
3957 pidleput(pp, now)
3958 unlock(&sched.lock)
3959 gcController.removeIdleMarkWorker()
3960 return nil, nil
3961 }
3962
3963 unlock(&sched.lock)
3964
3965 return pp, node.gp.ptr()
3966 }
3967
3968
3969
3970
3971 func wakeNetPoller(when int64) {
3972 if sched.lastpoll.Load() == 0 {
3973
3974
3975
3976
3977 pollerPollUntil := sched.pollUntil.Load()
3978 if pollerPollUntil == 0 || pollerPollUntil > when {
3979 netpollBreak()
3980 }
3981 } else {
3982
3983
3984 if GOOS != "plan9" {
3985 wakep()
3986 }
3987 }
3988 }
3989
3990 func resetspinning() {
3991 gp := getg()
3992 if !gp.m.spinning {
3993 throw("resetspinning: not a spinning m")
3994 }
3995 gp.m.spinning = false
3996 nmspinning := sched.nmspinning.Add(-1)
3997 if nmspinning < 0 {
3998 throw("findrunnable: negative nmspinning")
3999 }
4000
4001
4002
4003 wakep()
4004 }
4005
4006
4007
4008
4009
4010
4011
4012
4013
4014 func injectglist(glist *gList) {
4015 if glist.empty() {
4016 return
4017 }
4018
4019
4020
4021 var tail *g
4022 trace := traceAcquire()
4023 for gp := glist.head.ptr(); gp != nil; gp = gp.schedlink.ptr() {
4024 tail = gp
4025 casgstatus(gp, _Gwaiting, _Grunnable)
4026 if trace.ok() {
4027 trace.GoUnpark(gp, 0)
4028 }
4029 }
4030 if trace.ok() {
4031 traceRelease(trace)
4032 }
4033
4034
4035 q := gQueue{glist.head, tail.guintptr(), glist.size}
4036 *glist = gList{}
4037
4038 startIdle := func(n int32) {
4039 for ; n > 0; n-- {
4040 mp := acquirem()
4041 lock(&sched.lock)
4042
4043 pp, _ := pidlegetSpinning(0)
4044 if pp == nil {
4045 unlock(&sched.lock)
4046 releasem(mp)
4047 break
4048 }
4049
4050 startm(pp, false, true)
4051 unlock(&sched.lock)
4052 releasem(mp)
4053 }
4054 }
4055
4056 pp := getg().m.p.ptr()
4057 if pp == nil {
4058 n := q.size
4059 lock(&sched.lock)
4060 globrunqputbatch(&q)
4061 unlock(&sched.lock)
4062 startIdle(n)
4063 return
4064 }
4065
4066 var globq gQueue
4067 npidle := sched.npidle.Load()
4068 for ; npidle > 0 && !q.empty(); npidle-- {
4069 g := q.pop()
4070 globq.pushBack(g)
4071 }
4072 if !globq.empty() {
4073 n := globq.size
4074 lock(&sched.lock)
4075 globrunqputbatch(&globq)
4076 unlock(&sched.lock)
4077 startIdle(n)
4078 }
4079
4080 if runqputbatch(pp, &q); !q.empty() {
4081 lock(&sched.lock)
4082 globrunqputbatch(&q)
4083 unlock(&sched.lock)
4084 }
4085
4086
4087
4088
4089
4090
4091
4092
4093
4094
4095
4096
4097
4098
4099 wakep()
4100 }
4101
4102
4103
4104 func schedule() {
4105 mp := getg().m
4106
4107 if mp.locks != 0 {
4108 throw("schedule: holding locks")
4109 }
4110
4111 if mp.lockedg != 0 {
4112 stoplockedm()
4113 execute(mp.lockedg.ptr(), false)
4114 }
4115
4116
4117
4118 if mp.incgo {
4119 throw("schedule: in cgo")
4120 }
4121
4122 top:
4123 pp := mp.p.ptr()
4124 pp.preempt = false
4125
4126
4127
4128
4129 if mp.spinning && (pp.runnext != 0 || pp.runqhead != pp.runqtail) {
4130 throw("schedule: spinning with local work")
4131 }
4132
4133 gp, inheritTime, tryWakeP := findRunnable()
4134
4135
4136
4137
4138 mp.clearAllpSnapshot()
4139
4140 if debug.dontfreezetheworld > 0 && freezing.Load() {
4141
4142
4143
4144
4145
4146
4147
4148 lock(&deadlock)
4149 lock(&deadlock)
4150 }
4151
4152
4153
4154
4155 if mp.spinning {
4156 resetspinning()
4157 }
4158
4159 if sched.disable.user && !schedEnabled(gp) {
4160
4161
4162
4163 lock(&sched.lock)
4164 if schedEnabled(gp) {
4165
4166
4167 unlock(&sched.lock)
4168 } else {
4169 sched.disable.runnable.pushBack(gp)
4170 unlock(&sched.lock)
4171 goto top
4172 }
4173 }
4174
4175
4176
4177 if tryWakeP {
4178 wakep()
4179 }
4180 if gp.lockedm != 0 {
4181
4182
4183 startlockedm(gp)
4184 goto top
4185 }
4186
4187 execute(gp, inheritTime)
4188 }
4189
4190
4191
4192
4193
4194
4195
4196
4197 func dropg() {
4198 gp := getg()
4199
4200 setMNoWB(&gp.m.curg.m, nil)
4201 setGNoWB(&gp.m.curg, nil)
4202 }
4203
4204 func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
4205 unlock((*mutex)(lock))
4206 return true
4207 }
4208
4209
4210 func park_m(gp *g) {
4211 mp := getg().m
4212
4213 trace := traceAcquire()
4214
4215
4216
4217
4218
4219 bubble := gp.bubble
4220 if bubble != nil {
4221 bubble.incActive()
4222 }
4223
4224 if trace.ok() {
4225
4226
4227
4228 trace.GoPark(mp.waitTraceBlockReason, mp.waitTraceSkip)
4229 }
4230
4231
4232 casgstatus(gp, _Grunning, _Gwaiting)
4233 if trace.ok() {
4234 traceRelease(trace)
4235 }
4236
4237 dropg()
4238
4239 if fn := mp.waitunlockf; fn != nil {
4240 ok := fn(gp, mp.waitlock)
4241 mp.waitunlockf = nil
4242 mp.waitlock = nil
4243 if !ok {
4244 trace := traceAcquire()
4245 casgstatus(gp, _Gwaiting, _Grunnable)
4246 if bubble != nil {
4247 bubble.decActive()
4248 }
4249 if trace.ok() {
4250 trace.GoUnpark(gp, 2)
4251 traceRelease(trace)
4252 }
4253 execute(gp, true)
4254 }
4255 }
4256
4257 if bubble != nil {
4258 bubble.decActive()
4259 }
4260
4261 schedule()
4262 }
4263
4264 func goschedImpl(gp *g, preempted bool) {
4265 trace := traceAcquire()
4266 status := readgstatus(gp)
4267 if status&^_Gscan != _Grunning {
4268 dumpgstatus(gp)
4269 throw("bad g status")
4270 }
4271 if trace.ok() {
4272
4273
4274
4275 if preempted {
4276 trace.GoPreempt()
4277 } else {
4278 trace.GoSched()
4279 }
4280 }
4281 casgstatus(gp, _Grunning, _Grunnable)
4282 if trace.ok() {
4283 traceRelease(trace)
4284 }
4285
4286 dropg()
4287 lock(&sched.lock)
4288 globrunqput(gp)
4289 unlock(&sched.lock)
4290
4291 if mainStarted {
4292 wakep()
4293 }
4294
4295 schedule()
4296 }
4297
4298
4299 func gosched_m(gp *g) {
4300 goschedImpl(gp, false)
4301 }
4302
4303
4304 func goschedguarded_m(gp *g) {
4305 if !canPreemptM(gp.m) {
4306 gogo(&gp.sched)
4307 }
4308 goschedImpl(gp, false)
4309 }
4310
4311 func gopreempt_m(gp *g) {
4312 goschedImpl(gp, true)
4313 }
4314
4315
4316
4317
4318 func preemptPark(gp *g) {
4319 status := readgstatus(gp)
4320 if status&^_Gscan != _Grunning {
4321 dumpgstatus(gp)
4322 throw("bad g status")
4323 }
4324
4325 if gp.asyncSafePoint {
4326
4327
4328
4329 f := findfunc(gp.sched.pc)
4330 if !f.valid() {
4331 throw("preempt at unknown pc")
4332 }
4333 if f.flag&abi.FuncFlagSPWrite != 0 {
4334 println("runtime: unexpected SPWRITE function", funcname(f), "in async preempt")
4335 throw("preempt SPWRITE")
4336 }
4337 }
4338
4339
4340
4341
4342
4343
4344
4345 casGToPreemptScan(gp, _Grunning, _Gscan|_Gpreempted)
4346 dropg()
4347
4348
4349
4350
4351
4352
4353
4354
4355
4356
4357
4358
4359
4360
4361
4362
4363 trace := traceAcquire()
4364 if trace.ok() {
4365 trace.GoPark(traceBlockPreempted, 0)
4366 }
4367 casfrom_Gscanstatus(gp, _Gscan|_Gpreempted, _Gpreempted)
4368 if trace.ok() {
4369 traceRelease(trace)
4370 }
4371 schedule()
4372 }
4373
4374
4375
4376
4377
4378
4379
4380
4381
4382
4383
4384
4385
4386
4387
4388 func goyield() {
4389 checkTimeouts()
4390 mcall(goyield_m)
4391 }
4392
4393 func goyield_m(gp *g) {
4394 trace := traceAcquire()
4395 pp := gp.m.p.ptr()
4396 if trace.ok() {
4397
4398
4399
4400 trace.GoPreempt()
4401 }
4402 casgstatus(gp, _Grunning, _Grunnable)
4403 if trace.ok() {
4404 traceRelease(trace)
4405 }
4406 dropg()
4407 runqput(pp, gp, false)
4408 schedule()
4409 }
4410
4411
4412 func goexit1() {
4413 if raceenabled {
4414 if gp := getg(); gp.bubble != nil {
4415 racereleasemergeg(gp, gp.bubble.raceaddr())
4416 }
4417 racegoend()
4418 }
4419 trace := traceAcquire()
4420 if trace.ok() {
4421 trace.GoEnd()
4422 traceRelease(trace)
4423 }
4424 mcall(goexit0)
4425 }
4426
4427
4428 func goexit0(gp *g) {
4429 gdestroy(gp)
4430 schedule()
4431 }
4432
4433 func gdestroy(gp *g) {
4434 mp := getg().m
4435 pp := mp.p.ptr()
4436
4437 casgstatus(gp, _Grunning, _Gdead)
4438 gcController.addScannableStack(pp, -int64(gp.stack.hi-gp.stack.lo))
4439 if isSystemGoroutine(gp, false) {
4440 sched.ngsys.Add(-1)
4441 }
4442 gp.m = nil
4443 locked := gp.lockedm != 0
4444 gp.lockedm = 0
4445 mp.lockedg = 0
4446 gp.preemptStop = false
4447 gp.paniconfault = false
4448 gp._defer = nil
4449 gp._panic = nil
4450 gp.writebuf = nil
4451 gp.waitreason = waitReasonZero
4452 gp.param = nil
4453 gp.labels = nil
4454 gp.timer = nil
4455 gp.bubble = nil
4456
4457 if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 {
4458
4459
4460
4461 assistWorkPerByte := gcController.assistWorkPerByte.Load()
4462 scanCredit := int64(assistWorkPerByte * float64(gp.gcAssistBytes))
4463 gcController.bgScanCredit.Add(scanCredit)
4464 gp.gcAssistBytes = 0
4465 }
4466
4467 dropg()
4468
4469 if GOARCH == "wasm" {
4470 gfput(pp, gp)
4471 return
4472 }
4473
4474 if locked && mp.lockedInt != 0 {
4475 print("runtime: mp.lockedInt = ", mp.lockedInt, "\n")
4476 if mp.isextra {
4477 throw("runtime.Goexit called in a thread that was not created by the Go runtime")
4478 }
4479 throw("exited a goroutine internally locked to the OS thread")
4480 }
4481 gfput(pp, gp)
4482 if locked {
4483
4484
4485
4486
4487
4488
4489 if GOOS != "plan9" {
4490 gogo(&mp.g0.sched)
4491 } else {
4492
4493
4494 mp.lockedExt = 0
4495 }
4496 }
4497 }
4498
4499
4500
4501
4502
4503
4504
4505
4506
4507 func save(pc, sp, bp uintptr) {
4508 gp := getg()
4509
4510 if gp == gp.m.g0 || gp == gp.m.gsignal {
4511
4512
4513
4514
4515
4516 throw("save on system g not allowed")
4517 }
4518
4519 gp.sched.pc = pc
4520 gp.sched.sp = sp
4521 gp.sched.lr = 0
4522 gp.sched.bp = bp
4523
4524
4525
4526 if gp.sched.ctxt != nil {
4527 badctxt()
4528 }
4529 }
4530
4531
4532
4533
4534
4535
4536
4537
4538
4539
4540
4541
4542
4543
4544
4545
4546
4547
4548
4549
4550
4551
4552
4553
4554
4555 func reentersyscall(pc, sp, bp uintptr) {
4556 trace := traceAcquire()
4557 gp := getg()
4558
4559
4560
4561 gp.m.locks++
4562
4563
4564
4565
4566
4567 gp.stackguard0 = stackPreempt
4568 gp.throwsplit = true
4569
4570
4571 save(pc, sp, bp)
4572 gp.syscallsp = sp
4573 gp.syscallpc = pc
4574 gp.syscallbp = bp
4575 casgstatus(gp, _Grunning, _Gsyscall)
4576 if staticLockRanking {
4577
4578
4579 save(pc, sp, bp)
4580 }
4581 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4582 systemstack(func() {
4583 print("entersyscall inconsistent sp ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4584 throw("entersyscall")
4585 })
4586 }
4587 if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp {
4588 systemstack(func() {
4589 print("entersyscall inconsistent bp ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4590 throw("entersyscall")
4591 })
4592 }
4593
4594 if trace.ok() {
4595 systemstack(func() {
4596 trace.GoSysCall()
4597 traceRelease(trace)
4598 })
4599
4600
4601
4602 save(pc, sp, bp)
4603 }
4604
4605 if sched.sysmonwait.Load() {
4606 systemstack(entersyscall_sysmon)
4607 save(pc, sp, bp)
4608 }
4609
4610 if gp.m.p.ptr().runSafePointFn != 0 {
4611
4612 systemstack(runSafePointFn)
4613 save(pc, sp, bp)
4614 }
4615
4616 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4617 pp := gp.m.p.ptr()
4618 pp.m = 0
4619 gp.m.oldp.set(pp)
4620 gp.m.p = 0
4621 atomic.Store(&pp.status, _Psyscall)
4622 if sched.gcwaiting.Load() {
4623 systemstack(entersyscall_gcwait)
4624 save(pc, sp, bp)
4625 }
4626
4627 gp.m.locks--
4628 }
4629
4630
4631
4632
4633
4634
4635
4636
4637
4638
4639
4640
4641
4642
4643
4644 func entersyscall() {
4645
4646
4647
4648
4649 fp := getcallerfp()
4650 reentersyscall(sys.GetCallerPC(), sys.GetCallerSP(), fp)
4651 }
4652
4653 func entersyscall_sysmon() {
4654 lock(&sched.lock)
4655 if sched.sysmonwait.Load() {
4656 sched.sysmonwait.Store(false)
4657 notewakeup(&sched.sysmonnote)
4658 }
4659 unlock(&sched.lock)
4660 }
4661
4662 func entersyscall_gcwait() {
4663 gp := getg()
4664 pp := gp.m.oldp.ptr()
4665
4666 lock(&sched.lock)
4667 trace := traceAcquire()
4668 if sched.stopwait > 0 && atomic.Cas(&pp.status, _Psyscall, _Pgcstop) {
4669 if trace.ok() {
4670
4671
4672
4673
4674
4675
4676
4677
4678
4679 trace.ProcSteal(pp, true)
4680 traceRelease(trace)
4681 }
4682 sched.nGsyscallNoP.Add(1)
4683 pp.gcStopTime = nanotime()
4684 pp.syscalltick++
4685 if sched.stopwait--; sched.stopwait == 0 {
4686 notewakeup(&sched.stopnote)
4687 }
4688 } else if trace.ok() {
4689 traceRelease(trace)
4690 }
4691 unlock(&sched.lock)
4692 }
4693
4694
4695
4696
4697
4698
4699
4700
4701
4702
4703
4704
4705
4706 func entersyscallblock() {
4707 gp := getg()
4708
4709 gp.m.locks++
4710 gp.throwsplit = true
4711 gp.stackguard0 = stackPreempt
4712 gp.m.syscalltick = gp.m.p.ptr().syscalltick
4713 gp.m.p.ptr().syscalltick++
4714
4715 sched.nGsyscallNoP.Add(1)
4716
4717
4718 pc := sys.GetCallerPC()
4719 sp := sys.GetCallerSP()
4720 bp := getcallerfp()
4721 save(pc, sp, bp)
4722 gp.syscallsp = gp.sched.sp
4723 gp.syscallpc = gp.sched.pc
4724 gp.syscallbp = gp.sched.bp
4725 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4726 sp1 := sp
4727 sp2 := gp.sched.sp
4728 sp3 := gp.syscallsp
4729 systemstack(func() {
4730 print("entersyscallblock inconsistent sp ", hex(sp1), " ", hex(sp2), " ", hex(sp3), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4731 throw("entersyscallblock")
4732 })
4733 }
4734 casgstatus(gp, _Grunning, _Gsyscall)
4735 if gp.syscallsp < gp.stack.lo || gp.stack.hi < gp.syscallsp {
4736 systemstack(func() {
4737 print("entersyscallblock inconsistent sp ", hex(sp), " ", hex(gp.sched.sp), " ", hex(gp.syscallsp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4738 throw("entersyscallblock")
4739 })
4740 }
4741 if gp.syscallbp != 0 && gp.syscallbp < gp.stack.lo || gp.stack.hi < gp.syscallbp {
4742 systemstack(func() {
4743 print("entersyscallblock inconsistent bp ", hex(bp), " ", hex(gp.sched.bp), " ", hex(gp.syscallbp), " [", hex(gp.stack.lo), ",", hex(gp.stack.hi), "]\n")
4744 throw("entersyscallblock")
4745 })
4746 }
4747
4748 systemstack(entersyscallblock_handoff)
4749
4750
4751 save(sys.GetCallerPC(), sys.GetCallerSP(), getcallerfp())
4752
4753 gp.m.locks--
4754 }
4755
4756 func entersyscallblock_handoff() {
4757 trace := traceAcquire()
4758 if trace.ok() {
4759 trace.GoSysCall()
4760 traceRelease(trace)
4761 }
4762 handoffp(releasep())
4763 }
4764
4765
4766
4767
4768
4769
4770
4771
4772
4773
4774
4775
4776
4777
4778
4779
4780
4781
4782
4783
4784
4785 func exitsyscall() {
4786 gp := getg()
4787
4788 gp.m.locks++
4789 if sys.GetCallerSP() > gp.syscallsp {
4790 throw("exitsyscall: syscall frame is no longer valid")
4791 }
4792
4793 gp.waitsince = 0
4794 oldp := gp.m.oldp.ptr()
4795 gp.m.oldp = 0
4796 if exitsyscallfast(oldp) {
4797
4798
4799 if goroutineProfile.active {
4800
4801
4802
4803 systemstack(func() {
4804 tryRecordGoroutineProfileWB(gp)
4805 })
4806 }
4807 trace := traceAcquire()
4808 if trace.ok() {
4809 lostP := oldp != gp.m.p.ptr() || gp.m.syscalltick != gp.m.p.ptr().syscalltick
4810 systemstack(func() {
4811
4812
4813
4814
4815 trace.GoSysExit(lostP)
4816 if lostP {
4817
4818
4819
4820
4821 trace.GoStart()
4822 }
4823 })
4824 }
4825
4826 gp.m.p.ptr().syscalltick++
4827
4828 casgstatus(gp, _Gsyscall, _Grunning)
4829 if trace.ok() {
4830 traceRelease(trace)
4831 }
4832
4833
4834
4835 gp.syscallsp = 0
4836 gp.m.locks--
4837 if gp.preempt {
4838
4839 gp.stackguard0 = stackPreempt
4840 } else {
4841
4842 gp.stackguard0 = gp.stack.lo + stackGuard
4843 }
4844 gp.throwsplit = false
4845
4846 if sched.disable.user && !schedEnabled(gp) {
4847
4848 Gosched()
4849 }
4850
4851 return
4852 }
4853
4854 gp.m.locks--
4855
4856
4857 mcall(exitsyscall0)
4858
4859
4860
4861
4862
4863
4864
4865 gp.syscallsp = 0
4866 gp.m.p.ptr().syscalltick++
4867 gp.throwsplit = false
4868 }
4869
4870
4871 func exitsyscallfast(oldp *p) bool {
4872
4873 if sched.stopwait == freezeStopWait {
4874 return false
4875 }
4876
4877
4878 trace := traceAcquire()
4879 if oldp != nil && oldp.status == _Psyscall && atomic.Cas(&oldp.status, _Psyscall, _Pidle) {
4880
4881 wirep(oldp)
4882 exitsyscallfast_reacquired(trace)
4883 if trace.ok() {
4884 traceRelease(trace)
4885 }
4886 return true
4887 }
4888 if trace.ok() {
4889 traceRelease(trace)
4890 }
4891
4892
4893 if sched.pidle != 0 {
4894 var ok bool
4895 systemstack(func() {
4896 ok = exitsyscallfast_pidle()
4897 })
4898 if ok {
4899 return true
4900 }
4901 }
4902 return false
4903 }
4904
4905
4906
4907
4908
4909
4910 func exitsyscallfast_reacquired(trace traceLocker) {
4911 gp := getg()
4912 if gp.m.syscalltick != gp.m.p.ptr().syscalltick {
4913 if trace.ok() {
4914
4915
4916
4917 systemstack(func() {
4918
4919
4920 trace.ProcSteal(gp.m.p.ptr(), true)
4921 trace.ProcStart()
4922 })
4923 }
4924 gp.m.p.ptr().syscalltick++
4925 }
4926 }
4927
4928 func exitsyscallfast_pidle() bool {
4929 lock(&sched.lock)
4930 pp, _ := pidleget(0)
4931 if pp != nil && sched.sysmonwait.Load() {
4932 sched.sysmonwait.Store(false)
4933 notewakeup(&sched.sysmonnote)
4934 }
4935 unlock(&sched.lock)
4936 if pp != nil {
4937 sched.nGsyscallNoP.Add(-1)
4938 acquirep(pp)
4939 return true
4940 }
4941 return false
4942 }
4943
4944
4945
4946
4947
4948
4949
4950 func exitsyscall0(gp *g) {
4951 var trace traceLocker
4952 traceExitingSyscall()
4953 trace = traceAcquire()
4954 casgstatus(gp, _Gsyscall, _Grunnable)
4955 traceExitedSyscall()
4956 if trace.ok() {
4957
4958
4959
4960
4961 trace.GoSysExit(true)
4962 traceRelease(trace)
4963 }
4964 sched.nGsyscallNoP.Add(-1)
4965 dropg()
4966 lock(&sched.lock)
4967 var pp *p
4968 if schedEnabled(gp) {
4969 pp, _ = pidleget(0)
4970 }
4971 var locked bool
4972 if pp == nil {
4973 globrunqput(gp)
4974
4975
4976
4977
4978
4979
4980 locked = gp.lockedm != 0
4981 } else if sched.sysmonwait.Load() {
4982 sched.sysmonwait.Store(false)
4983 notewakeup(&sched.sysmonnote)
4984 }
4985 unlock(&sched.lock)
4986 if pp != nil {
4987 acquirep(pp)
4988 execute(gp, false)
4989 }
4990 if locked {
4991
4992
4993
4994
4995 stoplockedm()
4996 execute(gp, false)
4997 }
4998 stopm()
4999 schedule()
5000 }
5001
5002
5003
5004
5005
5006
5007
5008
5009
5010
5011
5012
5013
5014 func syscall_runtime_BeforeFork() {
5015 gp := getg().m.curg
5016
5017
5018
5019
5020 gp.m.locks++
5021 sigsave(&gp.m.sigmask)
5022 sigblock(false)
5023
5024
5025
5026
5027
5028 gp.stackguard0 = stackFork
5029 }
5030
5031
5032
5033
5034
5035
5036
5037
5038
5039
5040
5041
5042
5043 func syscall_runtime_AfterFork() {
5044 gp := getg().m.curg
5045
5046
5047 gp.stackguard0 = gp.stack.lo + stackGuard
5048
5049 msigrestore(gp.m.sigmask)
5050
5051 gp.m.locks--
5052 }
5053
5054
5055
5056 var inForkedChild bool
5057
5058
5059
5060
5061
5062
5063
5064
5065
5066
5067
5068
5069
5070
5071
5072
5073
5074
5075
5076
5077 func syscall_runtime_AfterForkInChild() {
5078
5079
5080
5081
5082 inForkedChild = true
5083
5084 clearSignalHandlers()
5085
5086
5087
5088 msigrestore(getg().m.sigmask)
5089
5090 inForkedChild = false
5091 }
5092
5093
5094
5095
5096 var pendingPreemptSignals atomic.Int32
5097
5098
5099
5100
5101 func syscall_runtime_BeforeExec() {
5102
5103 execLock.lock()
5104
5105
5106
5107 if GOOS == "darwin" || GOOS == "ios" {
5108 for pendingPreemptSignals.Load() > 0 {
5109 osyield()
5110 }
5111 }
5112 }
5113
5114
5115
5116
5117 func syscall_runtime_AfterExec() {
5118 execLock.unlock()
5119 }
5120
5121
5122 func malg(stacksize int32) *g {
5123 newg := new(g)
5124 if stacksize >= 0 {
5125 stacksize = round2(stackSystem + stacksize)
5126 systemstack(func() {
5127 newg.stack = stackalloc(uint32(stacksize))
5128 if valgrindenabled {
5129 newg.valgrindStackID = valgrindRegisterStack(unsafe.Pointer(newg.stack.lo), unsafe.Pointer(newg.stack.hi))
5130 }
5131 })
5132 newg.stackguard0 = newg.stack.lo + stackGuard
5133 newg.stackguard1 = ^uintptr(0)
5134
5135
5136 *(*uintptr)(unsafe.Pointer(newg.stack.lo)) = 0
5137 }
5138 return newg
5139 }
5140
5141
5142
5143
5144 func newproc(fn *funcval) {
5145 gp := getg()
5146 pc := sys.GetCallerPC()
5147 systemstack(func() {
5148 newg := newproc1(fn, gp, pc, false, waitReasonZero)
5149
5150 pp := getg().m.p.ptr()
5151 runqput(pp, newg, true)
5152
5153 if mainStarted {
5154 wakep()
5155 }
5156 })
5157 }
5158
5159
5160
5161
5162 func newproc1(fn *funcval, callergp *g, callerpc uintptr, parked bool, waitreason waitReason) *g {
5163 if fn == nil {
5164 fatal("go of nil func value")
5165 }
5166
5167 mp := acquirem()
5168 pp := mp.p.ptr()
5169 newg := gfget(pp)
5170 if newg == nil {
5171 newg = malg(stackMin)
5172 casgstatus(newg, _Gidle, _Gdead)
5173 allgadd(newg)
5174 }
5175 if newg.stack.hi == 0 {
5176 throw("newproc1: newg missing stack")
5177 }
5178
5179 if readgstatus(newg) != _Gdead {
5180 throw("newproc1: new g is not Gdead")
5181 }
5182
5183 totalSize := uintptr(4*goarch.PtrSize + sys.MinFrameSize)
5184 totalSize = alignUp(totalSize, sys.StackAlign)
5185 sp := newg.stack.hi - totalSize
5186 if usesLR {
5187
5188 *(*uintptr)(unsafe.Pointer(sp)) = 0
5189 prepGoExitFrame(sp)
5190 }
5191 if GOARCH == "arm64" {
5192
5193 *(*uintptr)(unsafe.Pointer(sp - goarch.PtrSize)) = 0
5194 }
5195
5196 memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched))
5197 newg.sched.sp = sp
5198 newg.stktopsp = sp
5199 newg.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum
5200 newg.sched.g = guintptr(unsafe.Pointer(newg))
5201 gostartcallfn(&newg.sched, fn)
5202 newg.parentGoid = callergp.goid
5203 newg.gopc = callerpc
5204 newg.ancestors = saveAncestors(callergp)
5205 newg.startpc = fn.fn
5206 newg.runningCleanups.Store(false)
5207 if isSystemGoroutine(newg, false) {
5208 sched.ngsys.Add(1)
5209 } else {
5210
5211 newg.bubble = callergp.bubble
5212 if mp.curg != nil {
5213 newg.labels = mp.curg.labels
5214 }
5215 if goroutineProfile.active {
5216
5217
5218
5219
5220
5221 newg.goroutineProfiled.Store(goroutineProfileSatisfied)
5222 }
5223 }
5224
5225 newg.trackingSeq = uint8(cheaprand())
5226 if newg.trackingSeq%gTrackingPeriod == 0 {
5227 newg.tracking = true
5228 }
5229 gcController.addScannableStack(pp, int64(newg.stack.hi-newg.stack.lo))
5230
5231
5232 trace := traceAcquire()
5233 var status uint32 = _Grunnable
5234 if parked {
5235 status = _Gwaiting
5236 newg.waitreason = waitreason
5237 }
5238 if pp.goidcache == pp.goidcacheend {
5239
5240
5241
5242 pp.goidcache = sched.goidgen.Add(_GoidCacheBatch)
5243 pp.goidcache -= _GoidCacheBatch - 1
5244 pp.goidcacheend = pp.goidcache + _GoidCacheBatch
5245 }
5246 newg.goid = pp.goidcache
5247 casgstatus(newg, _Gdead, status)
5248 pp.goidcache++
5249 newg.trace.reset()
5250 if trace.ok() {
5251 trace.GoCreate(newg, newg.startpc, parked)
5252 traceRelease(trace)
5253 }
5254
5255
5256 if raceenabled {
5257 newg.racectx = racegostart(callerpc)
5258 newg.raceignore = 0
5259 if newg.labels != nil {
5260
5261
5262 racereleasemergeg(newg, unsafe.Pointer(&labelSync))
5263 }
5264 }
5265 pp.goroutinesCreated++
5266 releasem(mp)
5267
5268 return newg
5269 }
5270
5271
5272
5273
5274 func saveAncestors(callergp *g) *[]ancestorInfo {
5275
5276 if debug.tracebackancestors <= 0 || callergp.goid == 0 {
5277 return nil
5278 }
5279 var callerAncestors []ancestorInfo
5280 if callergp.ancestors != nil {
5281 callerAncestors = *callergp.ancestors
5282 }
5283 n := int32(len(callerAncestors)) + 1
5284 if n > debug.tracebackancestors {
5285 n = debug.tracebackancestors
5286 }
5287 ancestors := make([]ancestorInfo, n)
5288 copy(ancestors[1:], callerAncestors)
5289
5290 var pcs [tracebackInnerFrames]uintptr
5291 npcs := gcallers(callergp, 0, pcs[:])
5292 ipcs := make([]uintptr, npcs)
5293 copy(ipcs, pcs[:])
5294 ancestors[0] = ancestorInfo{
5295 pcs: ipcs,
5296 goid: callergp.goid,
5297 gopc: callergp.gopc,
5298 }
5299
5300 ancestorsp := new([]ancestorInfo)
5301 *ancestorsp = ancestors
5302 return ancestorsp
5303 }
5304
5305
5306
5307 func gfput(pp *p, gp *g) {
5308 if readgstatus(gp) != _Gdead {
5309 throw("gfput: bad status (not Gdead)")
5310 }
5311
5312 stksize := gp.stack.hi - gp.stack.lo
5313
5314 if stksize != uintptr(startingStackSize) {
5315
5316 stackfree(gp.stack)
5317 gp.stack.lo = 0
5318 gp.stack.hi = 0
5319 gp.stackguard0 = 0
5320 if valgrindenabled {
5321 valgrindDeregisterStack(gp.valgrindStackID)
5322 gp.valgrindStackID = 0
5323 }
5324 }
5325
5326 pp.gFree.push(gp)
5327 if pp.gFree.size >= 64 {
5328 var (
5329 stackQ gQueue
5330 noStackQ gQueue
5331 )
5332 for pp.gFree.size >= 32 {
5333 gp := pp.gFree.pop()
5334 if gp.stack.lo == 0 {
5335 noStackQ.push(gp)
5336 } else {
5337 stackQ.push(gp)
5338 }
5339 }
5340 lock(&sched.gFree.lock)
5341 sched.gFree.noStack.pushAll(noStackQ)
5342 sched.gFree.stack.pushAll(stackQ)
5343 unlock(&sched.gFree.lock)
5344 }
5345 }
5346
5347
5348
5349 func gfget(pp *p) *g {
5350 retry:
5351 if pp.gFree.empty() && (!sched.gFree.stack.empty() || !sched.gFree.noStack.empty()) {
5352 lock(&sched.gFree.lock)
5353
5354 for pp.gFree.size < 32 {
5355
5356 gp := sched.gFree.stack.pop()
5357 if gp == nil {
5358 gp = sched.gFree.noStack.pop()
5359 if gp == nil {
5360 break
5361 }
5362 }
5363 pp.gFree.push(gp)
5364 }
5365 unlock(&sched.gFree.lock)
5366 goto retry
5367 }
5368 gp := pp.gFree.pop()
5369 if gp == nil {
5370 return nil
5371 }
5372 if gp.stack.lo != 0 && gp.stack.hi-gp.stack.lo != uintptr(startingStackSize) {
5373
5374
5375
5376 systemstack(func() {
5377 stackfree(gp.stack)
5378 gp.stack.lo = 0
5379 gp.stack.hi = 0
5380 gp.stackguard0 = 0
5381 if valgrindenabled {
5382 valgrindDeregisterStack(gp.valgrindStackID)
5383 gp.valgrindStackID = 0
5384 }
5385 })
5386 }
5387 if gp.stack.lo == 0 {
5388
5389 systemstack(func() {
5390 gp.stack = stackalloc(startingStackSize)
5391 if valgrindenabled {
5392 gp.valgrindStackID = valgrindRegisterStack(unsafe.Pointer(gp.stack.lo), unsafe.Pointer(gp.stack.hi))
5393 }
5394 })
5395 gp.stackguard0 = gp.stack.lo + stackGuard
5396 } else {
5397 if raceenabled {
5398 racemalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5399 }
5400 if msanenabled {
5401 msanmalloc(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5402 }
5403 if asanenabled {
5404 asanunpoison(unsafe.Pointer(gp.stack.lo), gp.stack.hi-gp.stack.lo)
5405 }
5406 }
5407 return gp
5408 }
5409
5410
5411 func gfpurge(pp *p) {
5412 var (
5413 stackQ gQueue
5414 noStackQ gQueue
5415 )
5416 for !pp.gFree.empty() {
5417 gp := pp.gFree.pop()
5418 if gp.stack.lo == 0 {
5419 noStackQ.push(gp)
5420 } else {
5421 stackQ.push(gp)
5422 }
5423 }
5424 lock(&sched.gFree.lock)
5425 sched.gFree.noStack.pushAll(noStackQ)
5426 sched.gFree.stack.pushAll(stackQ)
5427 unlock(&sched.gFree.lock)
5428 }
5429
5430
5431 func Breakpoint() {
5432 breakpoint()
5433 }
5434
5435
5436
5437
5438
5439
5440 func dolockOSThread() {
5441 if GOARCH == "wasm" {
5442 return
5443 }
5444 gp := getg()
5445 gp.m.lockedg.set(gp)
5446 gp.lockedm.set(gp.m)
5447 }
5448
5449
5450
5451
5452
5453
5454
5455
5456
5457
5458
5459
5460
5461
5462
5463
5464
5465 func LockOSThread() {
5466 if atomic.Load(&newmHandoff.haveTemplateThread) == 0 && GOOS != "plan9" {
5467
5468
5469
5470 startTemplateThread()
5471 }
5472 gp := getg()
5473 gp.m.lockedExt++
5474 if gp.m.lockedExt == 0 {
5475 gp.m.lockedExt--
5476 panic("LockOSThread nesting overflow")
5477 }
5478 dolockOSThread()
5479 }
5480
5481
5482 func lockOSThread() {
5483 getg().m.lockedInt++
5484 dolockOSThread()
5485 }
5486
5487
5488
5489
5490
5491
5492 func dounlockOSThread() {
5493 if GOARCH == "wasm" {
5494 return
5495 }
5496 gp := getg()
5497 if gp.m.lockedInt != 0 || gp.m.lockedExt != 0 {
5498 return
5499 }
5500 gp.m.lockedg = 0
5501 gp.lockedm = 0
5502 }
5503
5504
5505
5506
5507
5508
5509
5510
5511
5512
5513
5514
5515
5516
5517
5518 func UnlockOSThread() {
5519 gp := getg()
5520 if gp.m.lockedExt == 0 {
5521 return
5522 }
5523 gp.m.lockedExt--
5524 dounlockOSThread()
5525 }
5526
5527
5528 func unlockOSThread() {
5529 gp := getg()
5530 if gp.m.lockedInt == 0 {
5531 systemstack(badunlockosthread)
5532 }
5533 gp.m.lockedInt--
5534 dounlockOSThread()
5535 }
5536
5537 func badunlockosthread() {
5538 throw("runtime: internal error: misuse of lockOSThread/unlockOSThread")
5539 }
5540
5541 func gcount(includeSys bool) int32 {
5542 n := int32(atomic.Loaduintptr(&allglen)) - sched.gFree.stack.size - sched.gFree.noStack.size
5543 if !includeSys {
5544 n -= sched.ngsys.Load()
5545 }
5546 for _, pp := range allp {
5547 n -= pp.gFree.size
5548 }
5549
5550
5551
5552 if n < 1 {
5553 n = 1
5554 }
5555 return n
5556 }
5557
5558 func mcount() int32 {
5559 return int32(sched.mnext - sched.nmfreed)
5560 }
5561
5562 var prof struct {
5563 signalLock atomic.Uint32
5564
5565
5566
5567 hz atomic.Int32
5568 }
5569
5570 func _System() { _System() }
5571 func _ExternalCode() { _ExternalCode() }
5572 func _LostExternalCode() { _LostExternalCode() }
5573 func _GC() { _GC() }
5574 func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() }
5575 func _LostContendedRuntimeLock() { _LostContendedRuntimeLock() }
5576 func _VDSO() { _VDSO() }
5577
5578
5579
5580
5581
5582 func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
5583 if prof.hz.Load() == 0 {
5584 return
5585 }
5586
5587
5588
5589
5590 if mp != nil && mp.profilehz == 0 {
5591 return
5592 }
5593
5594
5595
5596
5597
5598
5599
5600 if GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "arm" {
5601 if f := findfunc(pc); f.valid() {
5602 if stringslite.HasPrefix(funcname(f), "internal/runtime/atomic") {
5603 cpuprof.lostAtomic++
5604 return
5605 }
5606 }
5607 if GOARCH == "arm" && goarm < 7 && GOOS == "linux" && pc&0xffff0000 == 0xffff0000 {
5608
5609
5610
5611 cpuprof.lostAtomic++
5612 return
5613 }
5614 }
5615
5616
5617
5618
5619
5620
5621
5622 getg().m.mallocing++
5623
5624 var u unwinder
5625 var stk [maxCPUProfStack]uintptr
5626 n := 0
5627 if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 {
5628 cgoOff := 0
5629
5630
5631
5632
5633
5634 if mp.cgoCallersUse.Load() == 0 && mp.cgoCallers != nil && mp.cgoCallers[0] != 0 {
5635 for cgoOff < len(mp.cgoCallers) && mp.cgoCallers[cgoOff] != 0 {
5636 cgoOff++
5637 }
5638 n += copy(stk[:], mp.cgoCallers[:cgoOff])
5639 mp.cgoCallers[0] = 0
5640 }
5641
5642
5643 u.initAt(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, unwindSilentErrors)
5644 } else if usesLibcall() && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 {
5645
5646
5647 u.initAt(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), unwindSilentErrors)
5648 } else if mp != nil && mp.vdsoSP != 0 {
5649
5650
5651 u.initAt(mp.vdsoPC, mp.vdsoSP, 0, gp, unwindSilentErrors|unwindJumpStack)
5652 } else {
5653 u.initAt(pc, sp, lr, gp, unwindSilentErrors|unwindTrap|unwindJumpStack)
5654 }
5655 n += tracebackPCs(&u, 0, stk[n:])
5656
5657 if n <= 0 {
5658
5659
5660 n = 2
5661 if inVDSOPage(pc) {
5662 pc = abi.FuncPCABIInternal(_VDSO) + sys.PCQuantum
5663 } else if pc > firstmoduledata.etext {
5664
5665 pc = abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum
5666 }
5667 stk[0] = pc
5668 if mp.preemptoff != "" {
5669 stk[1] = abi.FuncPCABIInternal(_GC) + sys.PCQuantum
5670 } else {
5671 stk[1] = abi.FuncPCABIInternal(_System) + sys.PCQuantum
5672 }
5673 }
5674
5675 if prof.hz.Load() != 0 {
5676
5677
5678
5679 var tagPtr *unsafe.Pointer
5680 if gp != nil && gp.m != nil && gp.m.curg != nil {
5681 tagPtr = &gp.m.curg.labels
5682 }
5683 cpuprof.add(tagPtr, stk[:n])
5684
5685 gprof := gp
5686 var mp *m
5687 var pp *p
5688 if gp != nil && gp.m != nil {
5689 if gp.m.curg != nil {
5690 gprof = gp.m.curg
5691 }
5692 mp = gp.m
5693 pp = gp.m.p.ptr()
5694 }
5695 traceCPUSample(gprof, mp, pp, stk[:n])
5696 }
5697 getg().m.mallocing--
5698 }
5699
5700
5701
5702 func setcpuprofilerate(hz int32) {
5703
5704 if hz < 0 {
5705 hz = 0
5706 }
5707
5708
5709
5710 gp := getg()
5711 gp.m.locks++
5712
5713
5714
5715
5716 setThreadCPUProfiler(0)
5717
5718 for !prof.signalLock.CompareAndSwap(0, 1) {
5719 osyield()
5720 }
5721 if prof.hz.Load() != hz {
5722 setProcessCPUProfiler(hz)
5723 prof.hz.Store(hz)
5724 }
5725 prof.signalLock.Store(0)
5726
5727 lock(&sched.lock)
5728 sched.profilehz = hz
5729 unlock(&sched.lock)
5730
5731 if hz != 0 {
5732 setThreadCPUProfiler(hz)
5733 }
5734
5735 gp.m.locks--
5736 }
5737
5738
5739
5740 func (pp *p) init(id int32) {
5741 pp.id = id
5742 pp.status = _Pgcstop
5743 pp.sudogcache = pp.sudogbuf[:0]
5744 pp.deferpool = pp.deferpoolbuf[:0]
5745 pp.wbBuf.reset()
5746 if pp.mcache == nil {
5747 if id == 0 {
5748 if mcache0 == nil {
5749 throw("missing mcache?")
5750 }
5751
5752
5753 pp.mcache = mcache0
5754 } else {
5755 pp.mcache = allocmcache()
5756 }
5757 }
5758 if raceenabled && pp.raceprocctx == 0 {
5759 if id == 0 {
5760 pp.raceprocctx = raceprocctx0
5761 raceprocctx0 = 0
5762 } else {
5763 pp.raceprocctx = raceproccreate()
5764 }
5765 }
5766 lockInit(&pp.timers.mu, lockRankTimers)
5767
5768
5769
5770 timerpMask.set(id)
5771
5772
5773 idlepMask.clear(id)
5774 }
5775
5776
5777
5778
5779
5780 func (pp *p) destroy() {
5781 assertLockHeld(&sched.lock)
5782 assertWorldStopped()
5783
5784
5785 for pp.runqhead != pp.runqtail {
5786
5787 pp.runqtail--
5788 gp := pp.runq[pp.runqtail%uint32(len(pp.runq))].ptr()
5789
5790 globrunqputhead(gp)
5791 }
5792 if pp.runnext != 0 {
5793 globrunqputhead(pp.runnext.ptr())
5794 pp.runnext = 0
5795 }
5796
5797
5798 getg().m.p.ptr().timers.take(&pp.timers)
5799
5800
5801 if gcphase != _GCoff {
5802 wbBufFlush1(pp)
5803 pp.gcw.dispose()
5804 }
5805 clear(pp.sudogbuf[:])
5806 pp.sudogcache = pp.sudogbuf[:0]
5807 pp.pinnerCache = nil
5808 clear(pp.deferpoolbuf[:])
5809 pp.deferpool = pp.deferpoolbuf[:0]
5810 systemstack(func() {
5811 for i := 0; i < pp.mspancache.len; i++ {
5812
5813 mheap_.spanalloc.free(unsafe.Pointer(pp.mspancache.buf[i]))
5814 }
5815 pp.mspancache.len = 0
5816 lock(&mheap_.lock)
5817 pp.pcache.flush(&mheap_.pages)
5818 unlock(&mheap_.lock)
5819 })
5820 freemcache(pp.mcache)
5821 pp.mcache = nil
5822 gfpurge(pp)
5823 if raceenabled {
5824 if pp.timers.raceCtx != 0 {
5825
5826
5827
5828
5829
5830 mp := getg().m
5831 phold := mp.p.ptr()
5832 mp.p.set(pp)
5833
5834 racectxend(pp.timers.raceCtx)
5835 pp.timers.raceCtx = 0
5836
5837 mp.p.set(phold)
5838 }
5839 raceprocdestroy(pp.raceprocctx)
5840 pp.raceprocctx = 0
5841 }
5842 pp.gcAssistTime = 0
5843 gcCleanups.queued += pp.cleanupsQueued
5844 pp.cleanupsQueued = 0
5845 sched.goroutinesCreated.Add(int64(pp.goroutinesCreated))
5846 pp.goroutinesCreated = 0
5847 pp.xRegs.free()
5848 pp.status = _Pdead
5849 }
5850
5851
5852
5853
5854
5855
5856
5857
5858
5859 func procresize(nprocs int32) *p {
5860 assertLockHeld(&sched.lock)
5861 assertWorldStopped()
5862
5863 old := gomaxprocs
5864 if old < 0 || nprocs <= 0 {
5865 throw("procresize: invalid arg")
5866 }
5867 trace := traceAcquire()
5868 if trace.ok() {
5869 trace.Gomaxprocs(nprocs)
5870 traceRelease(trace)
5871 }
5872
5873
5874 now := nanotime()
5875 if sched.procresizetime != 0 {
5876 sched.totaltime += int64(old) * (now - sched.procresizetime)
5877 }
5878 sched.procresizetime = now
5879
5880 maskWords := (nprocs + 31) / 32
5881
5882
5883 if nprocs > int32(len(allp)) {
5884
5885
5886 lock(&allpLock)
5887 if nprocs <= int32(cap(allp)) {
5888 allp = allp[:nprocs]
5889 } else {
5890 nallp := make([]*p, nprocs)
5891
5892
5893 copy(nallp, allp[:cap(allp)])
5894 allp = nallp
5895 }
5896
5897 if maskWords <= int32(cap(idlepMask)) {
5898 idlepMask = idlepMask[:maskWords]
5899 timerpMask = timerpMask[:maskWords]
5900 } else {
5901 nidlepMask := make([]uint32, maskWords)
5902
5903 copy(nidlepMask, idlepMask)
5904 idlepMask = nidlepMask
5905
5906 ntimerpMask := make([]uint32, maskWords)
5907 copy(ntimerpMask, timerpMask)
5908 timerpMask = ntimerpMask
5909 }
5910 unlock(&allpLock)
5911 }
5912
5913
5914 for i := old; i < nprocs; i++ {
5915 pp := allp[i]
5916 if pp == nil {
5917 pp = new(p)
5918 }
5919 pp.init(i)
5920 atomicstorep(unsafe.Pointer(&allp[i]), unsafe.Pointer(pp))
5921 }
5922
5923 gp := getg()
5924 if gp.m.p != 0 && gp.m.p.ptr().id < nprocs {
5925
5926 gp.m.p.ptr().status = _Prunning
5927 gp.m.p.ptr().mcache.prepareForSweep()
5928 } else {
5929
5930
5931
5932
5933
5934 if gp.m.p != 0 {
5935 trace := traceAcquire()
5936 if trace.ok() {
5937
5938
5939
5940 trace.GoSched()
5941 trace.ProcStop(gp.m.p.ptr())
5942 traceRelease(trace)
5943 }
5944 gp.m.p.ptr().m = 0
5945 }
5946 gp.m.p = 0
5947 pp := allp[0]
5948 pp.m = 0
5949 pp.status = _Pidle
5950 acquirep(pp)
5951 trace := traceAcquire()
5952 if trace.ok() {
5953 trace.GoStart()
5954 traceRelease(trace)
5955 }
5956 }
5957
5958
5959 mcache0 = nil
5960
5961
5962 for i := nprocs; i < old; i++ {
5963 pp := allp[i]
5964 pp.destroy()
5965
5966 }
5967
5968
5969 if int32(len(allp)) != nprocs {
5970 lock(&allpLock)
5971 allp = allp[:nprocs]
5972 idlepMask = idlepMask[:maskWords]
5973 timerpMask = timerpMask[:maskWords]
5974 unlock(&allpLock)
5975 }
5976
5977 var runnablePs *p
5978 for i := nprocs - 1; i >= 0; i-- {
5979 pp := allp[i]
5980 if gp.m.p.ptr() == pp {
5981 continue
5982 }
5983 pp.status = _Pidle
5984 if runqempty(pp) {
5985 pidleput(pp, now)
5986 } else {
5987 pp.m.set(mget())
5988 pp.link.set(runnablePs)
5989 runnablePs = pp
5990 }
5991 }
5992 stealOrder.reset(uint32(nprocs))
5993 var int32p *int32 = &gomaxprocs
5994 atomic.Store((*uint32)(unsafe.Pointer(int32p)), uint32(nprocs))
5995 if old != nprocs {
5996
5997 gcCPULimiter.resetCapacity(now, nprocs)
5998 }
5999 return runnablePs
6000 }
6001
6002
6003
6004
6005
6006
6007
6008 func acquirep(pp *p) {
6009
6010 wirep(pp)
6011
6012
6013
6014
6015
6016 pp.mcache.prepareForSweep()
6017
6018 trace := traceAcquire()
6019 if trace.ok() {
6020 trace.ProcStart()
6021 traceRelease(trace)
6022 }
6023 }
6024
6025
6026
6027
6028
6029
6030
6031 func wirep(pp *p) {
6032 gp := getg()
6033
6034 if gp.m.p != 0 {
6035
6036
6037 systemstack(func() {
6038 throw("wirep: already in go")
6039 })
6040 }
6041 if pp.m != 0 || pp.status != _Pidle {
6042
6043
6044 systemstack(func() {
6045 id := int64(0)
6046 if pp.m != 0 {
6047 id = pp.m.ptr().id
6048 }
6049 print("wirep: p->m=", pp.m, "(", id, ") p->status=", pp.status, "\n")
6050 throw("wirep: invalid p state")
6051 })
6052 }
6053 gp.m.p.set(pp)
6054 pp.m.set(gp.m)
6055 pp.status = _Prunning
6056 }
6057
6058
6059 func releasep() *p {
6060 trace := traceAcquire()
6061 if trace.ok() {
6062 trace.ProcStop(getg().m.p.ptr())
6063 traceRelease(trace)
6064 }
6065 return releasepNoTrace()
6066 }
6067
6068
6069 func releasepNoTrace() *p {
6070 gp := getg()
6071
6072 if gp.m.p == 0 {
6073 throw("releasep: invalid arg")
6074 }
6075 pp := gp.m.p.ptr()
6076 if pp.m.ptr() != gp.m || pp.status != _Prunning {
6077 print("releasep: m=", gp.m, " m->p=", gp.m.p.ptr(), " p->m=", hex(pp.m), " p->status=", pp.status, "\n")
6078 throw("releasep: invalid p state")
6079 }
6080 gp.m.p = 0
6081 pp.m = 0
6082 pp.status = _Pidle
6083 return pp
6084 }
6085
6086 func incidlelocked(v int32) {
6087 lock(&sched.lock)
6088 sched.nmidlelocked += v
6089 if v > 0 {
6090 checkdead()
6091 }
6092 unlock(&sched.lock)
6093 }
6094
6095
6096
6097
6098 func checkdead() {
6099 assertLockHeld(&sched.lock)
6100
6101
6102
6103
6104
6105
6106 if (islibrary || isarchive) && GOARCH != "wasm" {
6107 return
6108 }
6109
6110
6111
6112
6113
6114 if panicking.Load() > 0 {
6115 return
6116 }
6117
6118
6119
6120
6121
6122 var run0 int32
6123 if !iscgo && cgoHasExtraM && extraMLength.Load() > 0 {
6124 run0 = 1
6125 }
6126
6127 run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys
6128 if run > run0 {
6129 return
6130 }
6131 if run < 0 {
6132 print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n")
6133 unlock(&sched.lock)
6134 throw("checkdead: inconsistent counts")
6135 }
6136
6137 grunning := 0
6138 forEachG(func(gp *g) {
6139 if isSystemGoroutine(gp, false) {
6140 return
6141 }
6142 s := readgstatus(gp)
6143 switch s &^ _Gscan {
6144 case _Gwaiting,
6145 _Gpreempted:
6146 grunning++
6147 case _Grunnable,
6148 _Grunning,
6149 _Gsyscall:
6150 print("runtime: checkdead: find g ", gp.goid, " in status ", s, "\n")
6151 unlock(&sched.lock)
6152 throw("checkdead: runnable g")
6153 }
6154 })
6155 if grunning == 0 {
6156 unlock(&sched.lock)
6157 fatal("no goroutines (main called runtime.Goexit) - deadlock!")
6158 }
6159
6160
6161 if faketime != 0 {
6162 if when := timeSleepUntil(); when < maxWhen {
6163 faketime = when
6164
6165
6166 pp, _ := pidleget(faketime)
6167 if pp == nil {
6168
6169
6170 unlock(&sched.lock)
6171 throw("checkdead: no p for timer")
6172 }
6173 mp := mget()
6174 if mp == nil {
6175
6176
6177 unlock(&sched.lock)
6178 throw("checkdead: no m for timer")
6179 }
6180
6181
6182
6183 sched.nmspinning.Add(1)
6184 mp.spinning = true
6185 mp.nextp.set(pp)
6186 notewakeup(&mp.park)
6187 return
6188 }
6189 }
6190
6191
6192 for _, pp := range allp {
6193 if len(pp.timers.heap) > 0 {
6194 return
6195 }
6196 }
6197
6198 unlock(&sched.lock)
6199 fatal("all goroutines are asleep - deadlock!")
6200 }
6201
6202
6203
6204
6205
6206
6207 var forcegcperiod int64 = 2 * 60 * 1e9
6208
6209
6210
6211
6212 const haveSysmon = GOARCH != "wasm"
6213
6214
6215
6216
6217 func sysmon() {
6218 lock(&sched.lock)
6219 sched.nmsys++
6220 checkdead()
6221 unlock(&sched.lock)
6222
6223 lastgomaxprocs := int64(0)
6224 lasttrace := int64(0)
6225 idle := 0
6226 delay := uint32(0)
6227
6228 for {
6229 if idle == 0 {
6230 delay = 20
6231 } else if idle > 50 {
6232 delay *= 2
6233 }
6234 if delay > 10*1000 {
6235 delay = 10 * 1000
6236 }
6237 usleep(delay)
6238
6239
6240
6241
6242
6243
6244
6245
6246
6247
6248
6249
6250
6251
6252
6253
6254 now := nanotime()
6255 if debug.schedtrace <= 0 && (sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs) {
6256 lock(&sched.lock)
6257 if sched.gcwaiting.Load() || sched.npidle.Load() == gomaxprocs {
6258 syscallWake := false
6259 next := timeSleepUntil()
6260 if next > now {
6261 sched.sysmonwait.Store(true)
6262 unlock(&sched.lock)
6263
6264
6265 sleep := forcegcperiod / 2
6266 if next-now < sleep {
6267 sleep = next - now
6268 }
6269 shouldRelax := sleep >= osRelaxMinNS
6270 if shouldRelax {
6271 osRelax(true)
6272 }
6273 syscallWake = notetsleep(&sched.sysmonnote, sleep)
6274 if shouldRelax {
6275 osRelax(false)
6276 }
6277 lock(&sched.lock)
6278 sched.sysmonwait.Store(false)
6279 noteclear(&sched.sysmonnote)
6280 }
6281 if syscallWake {
6282 idle = 0
6283 delay = 20
6284 }
6285 }
6286 unlock(&sched.lock)
6287 }
6288
6289 lock(&sched.sysmonlock)
6290
6291
6292 now = nanotime()
6293
6294
6295 if *cgo_yield != nil {
6296 asmcgocall(*cgo_yield, nil)
6297 }
6298
6299 lastpoll := sched.lastpoll.Load()
6300 if netpollinited() && lastpoll != 0 && lastpoll+10*1000*1000 < now {
6301 sched.lastpoll.CompareAndSwap(lastpoll, now)
6302 list, delta := netpoll(0)
6303 if !list.empty() {
6304
6305
6306
6307
6308
6309
6310
6311 incidlelocked(-1)
6312 injectglist(&list)
6313 incidlelocked(1)
6314 netpollAdjustWaiters(delta)
6315 }
6316 }
6317
6318 if debug.updatemaxprocs != 0 && lastgomaxprocs+1e9 <= now {
6319 sysmonUpdateGOMAXPROCS()
6320 lastgomaxprocs = now
6321 }
6322 if scavenger.sysmonWake.Load() != 0 {
6323
6324 scavenger.wake()
6325 }
6326
6327
6328 if retake(now) != 0 {
6329 idle = 0
6330 } else {
6331 idle++
6332 }
6333
6334 if t := (gcTrigger{kind: gcTriggerTime, now: now}); t.test() && forcegc.idle.Load() {
6335 lock(&forcegc.lock)
6336 forcegc.idle.Store(false)
6337 var list gList
6338 list.push(forcegc.g)
6339 injectglist(&list)
6340 unlock(&forcegc.lock)
6341 }
6342 if debug.schedtrace > 0 && lasttrace+int64(debug.schedtrace)*1000000 <= now {
6343 lasttrace = now
6344 schedtrace(debug.scheddetail > 0)
6345 }
6346 unlock(&sched.sysmonlock)
6347 }
6348 }
6349
6350 type sysmontick struct {
6351 schedtick uint32
6352 syscalltick uint32
6353 schedwhen int64
6354 syscallwhen int64
6355 }
6356
6357
6358
6359 const forcePreemptNS = 10 * 1000 * 1000
6360
6361 func retake(now int64) uint32 {
6362 n := 0
6363
6364
6365 lock(&allpLock)
6366
6367
6368
6369 for i := 0; i < len(allp); i++ {
6370 pp := allp[i]
6371 if pp == nil {
6372
6373
6374 continue
6375 }
6376 pd := &pp.sysmontick
6377 s := pp.status
6378 sysretake := false
6379 if s == _Prunning || s == _Psyscall {
6380
6381
6382
6383
6384 t := int64(pp.schedtick)
6385 if int64(pd.schedtick) != t {
6386 pd.schedtick = uint32(t)
6387 pd.schedwhen = now
6388 } else if pd.schedwhen+forcePreemptNS <= now {
6389 preemptone(pp)
6390
6391
6392 sysretake = true
6393 }
6394 }
6395 if s == _Psyscall {
6396
6397 t := int64(pp.syscalltick)
6398 if !sysretake && int64(pd.syscalltick) != t {
6399 pd.syscalltick = uint32(t)
6400 pd.syscallwhen = now
6401 continue
6402 }
6403
6404
6405
6406 if runqempty(pp) && sched.nmspinning.Load()+sched.npidle.Load() > 0 && pd.syscallwhen+10*1000*1000 > now {
6407 continue
6408 }
6409
6410 unlock(&allpLock)
6411
6412
6413
6414
6415 incidlelocked(-1)
6416 trace := traceAcquire()
6417 if atomic.Cas(&pp.status, s, _Pidle) {
6418 if trace.ok() {
6419 trace.ProcSteal(pp, false)
6420 traceRelease(trace)
6421 }
6422 sched.nGsyscallNoP.Add(1)
6423 n++
6424 pp.syscalltick++
6425 handoffp(pp)
6426 } else if trace.ok() {
6427 traceRelease(trace)
6428 }
6429 incidlelocked(1)
6430 lock(&allpLock)
6431 }
6432 }
6433 unlock(&allpLock)
6434 return uint32(n)
6435 }
6436
6437
6438
6439
6440
6441
6442 func preemptall() bool {
6443 res := false
6444 for _, pp := range allp {
6445 if pp.status != _Prunning {
6446 continue
6447 }
6448 if preemptone(pp) {
6449 res = true
6450 }
6451 }
6452 return res
6453 }
6454
6455
6456
6457
6458
6459
6460
6461
6462
6463
6464
6465 func preemptone(pp *p) bool {
6466 mp := pp.m.ptr()
6467 if mp == nil || mp == getg().m {
6468 return false
6469 }
6470 gp := mp.curg
6471 if gp == nil || gp == mp.g0 {
6472 return false
6473 }
6474
6475 gp.preempt = true
6476
6477
6478
6479
6480
6481 gp.stackguard0 = stackPreempt
6482
6483
6484 if preemptMSupported && debug.asyncpreemptoff == 0 {
6485 pp.preempt = true
6486 preemptM(mp)
6487 }
6488
6489 return true
6490 }
6491
6492 var starttime int64
6493
6494 func schedtrace(detailed bool) {
6495 now := nanotime()
6496 if starttime == 0 {
6497 starttime = now
6498 }
6499
6500 lock(&sched.lock)
6501 print("SCHED ", (now-starttime)/1e6, "ms: gomaxprocs=", gomaxprocs, " idleprocs=", sched.npidle.Load(), " threads=", mcount(), " spinningthreads=", sched.nmspinning.Load(), " needspinning=", sched.needspinning.Load(), " idlethreads=", sched.nmidle, " runqueue=", sched.runq.size)
6502 if detailed {
6503 print(" gcwaiting=", sched.gcwaiting.Load(), " nmidlelocked=", sched.nmidlelocked, " stopwait=", sched.stopwait, " sysmonwait=", sched.sysmonwait.Load(), "\n")
6504 }
6505
6506
6507
6508 for i, pp := range allp {
6509 h := atomic.Load(&pp.runqhead)
6510 t := atomic.Load(&pp.runqtail)
6511 if detailed {
6512 print(" P", i, ": status=", pp.status, " schedtick=", pp.schedtick, " syscalltick=", pp.syscalltick, " m=")
6513 mp := pp.m.ptr()
6514 if mp != nil {
6515 print(mp.id)
6516 } else {
6517 print("nil")
6518 }
6519 print(" runqsize=", t-h, " gfreecnt=", pp.gFree.size, " timerslen=", len(pp.timers.heap), "\n")
6520 } else {
6521
6522
6523 print(" ")
6524 if i == 0 {
6525 print("[ ")
6526 }
6527 print(t - h)
6528 if i == len(allp)-1 {
6529 print(" ]")
6530 }
6531 }
6532 }
6533
6534 if !detailed {
6535
6536 print(" schedticks=[ ")
6537 for _, pp := range allp {
6538 print(pp.schedtick)
6539 print(" ")
6540 }
6541 print("]\n")
6542 }
6543
6544 if !detailed {
6545 unlock(&sched.lock)
6546 return
6547 }
6548
6549 for mp := allm; mp != nil; mp = mp.alllink {
6550 pp := mp.p.ptr()
6551 print(" M", mp.id, ": p=")
6552 if pp != nil {
6553 print(pp.id)
6554 } else {
6555 print("nil")
6556 }
6557 print(" curg=")
6558 if mp.curg != nil {
6559 print(mp.curg.goid)
6560 } else {
6561 print("nil")
6562 }
6563 print(" mallocing=", mp.mallocing, " throwing=", mp.throwing, " preemptoff=", mp.preemptoff, " locks=", mp.locks, " dying=", mp.dying, " spinning=", mp.spinning, " blocked=", mp.blocked, " lockedg=")
6564 if lockedg := mp.lockedg.ptr(); lockedg != nil {
6565 print(lockedg.goid)
6566 } else {
6567 print("nil")
6568 }
6569 print("\n")
6570 }
6571
6572 forEachG(func(gp *g) {
6573 print(" G", gp.goid, ": status=", readgstatus(gp), "(", gp.waitreason.String(), ") m=")
6574 if gp.m != nil {
6575 print(gp.m.id)
6576 } else {
6577 print("nil")
6578 }
6579 print(" lockedm=")
6580 if lockedm := gp.lockedm.ptr(); lockedm != nil {
6581 print(lockedm.id)
6582 } else {
6583 print("nil")
6584 }
6585 print("\n")
6586 })
6587 unlock(&sched.lock)
6588 }
6589
6590 type updateMaxProcsGState struct {
6591 lock mutex
6592 g *g
6593 idle atomic.Bool
6594
6595
6596 procs int32
6597 }
6598
6599 var (
6600
6601
6602 updatemaxprocs = &godebugInc{name: "updatemaxprocs"}
6603
6604
6605
6606 updateMaxProcsG updateMaxProcsGState
6607
6608
6609
6610
6611
6612
6613
6614
6615
6616
6617
6618
6619
6620
6621
6622
6623
6624
6625
6626
6627
6628
6629
6630
6631
6632
6633
6634
6635
6636
6637
6638
6639
6640
6641
6642
6643
6644
6645
6646
6647
6648
6649
6650
6651
6652
6653
6654
6655 computeMaxProcsLock mutex
6656 )
6657
6658
6659
6660
6661 func defaultGOMAXPROCSUpdateEnable() {
6662 if debug.updatemaxprocs == 0 {
6663
6664
6665
6666
6667
6668
6669
6670
6671
6672
6673
6674 updatemaxprocs.IncNonDefault()
6675 return
6676 }
6677
6678 go updateMaxProcsGoroutine()
6679 }
6680
6681 func updateMaxProcsGoroutine() {
6682 updateMaxProcsG.g = getg()
6683 lockInit(&updateMaxProcsG.lock, lockRankUpdateMaxProcsG)
6684 for {
6685 lock(&updateMaxProcsG.lock)
6686 if updateMaxProcsG.idle.Load() {
6687 throw("updateMaxProcsGoroutine: phase error")
6688 }
6689 updateMaxProcsG.idle.Store(true)
6690 goparkunlock(&updateMaxProcsG.lock, waitReasonUpdateGOMAXPROCSIdle, traceBlockSystemGoroutine, 1)
6691
6692
6693 stw := stopTheWorldGC(stwGOMAXPROCS)
6694
6695
6696 lock(&sched.lock)
6697 custom := sched.customGOMAXPROCS
6698 unlock(&sched.lock)
6699 if custom {
6700 startTheWorldGC(stw)
6701 return
6702 }
6703
6704
6705
6706
6707
6708 newprocs = updateMaxProcsG.procs
6709 lock(&sched.lock)
6710 sched.customGOMAXPROCS = false
6711 unlock(&sched.lock)
6712
6713 startTheWorldGC(stw)
6714 }
6715 }
6716
6717 func sysmonUpdateGOMAXPROCS() {
6718
6719 lock(&computeMaxProcsLock)
6720
6721
6722 lock(&sched.lock)
6723 custom := sched.customGOMAXPROCS
6724 curr := gomaxprocs
6725 unlock(&sched.lock)
6726 if custom {
6727 unlock(&computeMaxProcsLock)
6728 return
6729 }
6730
6731
6732 procs := defaultGOMAXPROCS(0)
6733 unlock(&computeMaxProcsLock)
6734 if procs == curr {
6735
6736 return
6737 }
6738
6739
6740
6741
6742 if updateMaxProcsG.idle.Load() {
6743 lock(&updateMaxProcsG.lock)
6744 updateMaxProcsG.procs = procs
6745 updateMaxProcsG.idle.Store(false)
6746 var list gList
6747 list.push(updateMaxProcsG.g)
6748 injectglist(&list)
6749 unlock(&updateMaxProcsG.lock)
6750 }
6751 }
6752
6753
6754
6755
6756
6757
6758 func schedEnableUser(enable bool) {
6759 lock(&sched.lock)
6760 if sched.disable.user == !enable {
6761 unlock(&sched.lock)
6762 return
6763 }
6764 sched.disable.user = !enable
6765 if enable {
6766 n := sched.disable.runnable.size
6767 globrunqputbatch(&sched.disable.runnable)
6768 unlock(&sched.lock)
6769 for ; n != 0 && sched.npidle.Load() != 0; n-- {
6770 startm(nil, false, false)
6771 }
6772 } else {
6773 unlock(&sched.lock)
6774 }
6775 }
6776
6777
6778
6779
6780
6781 func schedEnabled(gp *g) bool {
6782 assertLockHeld(&sched.lock)
6783
6784 if sched.disable.user {
6785 return isSystemGoroutine(gp, true)
6786 }
6787 return true
6788 }
6789
6790
6791
6792
6793
6794
6795 func mput(mp *m) {
6796 assertLockHeld(&sched.lock)
6797
6798 mp.schedlink = sched.midle
6799 sched.midle.set(mp)
6800 sched.nmidle++
6801 checkdead()
6802 }
6803
6804
6805
6806
6807
6808
6809 func mget() *m {
6810 assertLockHeld(&sched.lock)
6811
6812 mp := sched.midle.ptr()
6813 if mp != nil {
6814 sched.midle = mp.schedlink
6815 sched.nmidle--
6816 }
6817 return mp
6818 }
6819
6820
6821
6822
6823
6824
6825 func globrunqput(gp *g) {
6826 assertLockHeld(&sched.lock)
6827
6828 sched.runq.pushBack(gp)
6829 }
6830
6831
6832
6833
6834
6835
6836 func globrunqputhead(gp *g) {
6837 assertLockHeld(&sched.lock)
6838
6839 sched.runq.push(gp)
6840 }
6841
6842
6843
6844
6845
6846
6847
6848 func globrunqputbatch(batch *gQueue) {
6849 assertLockHeld(&sched.lock)
6850
6851 sched.runq.pushBackAll(*batch)
6852 *batch = gQueue{}
6853 }
6854
6855
6856
6857 func globrunqget() *g {
6858 assertLockHeld(&sched.lock)
6859
6860 if sched.runq.size == 0 {
6861 return nil
6862 }
6863
6864 return sched.runq.pop()
6865 }
6866
6867
6868
6869 func globrunqgetbatch(n int32) (gp *g, q gQueue) {
6870 assertLockHeld(&sched.lock)
6871
6872 if sched.runq.size == 0 {
6873 return
6874 }
6875
6876 n = min(n, sched.runq.size, sched.runq.size/gomaxprocs+1)
6877
6878 gp = sched.runq.pop()
6879 n--
6880
6881 for ; n > 0; n-- {
6882 gp1 := sched.runq.pop()
6883 q.pushBack(gp1)
6884 }
6885 return
6886 }
6887
6888
6889 type pMask []uint32
6890
6891
6892 func (p pMask) read(id uint32) bool {
6893 word := id / 32
6894 mask := uint32(1) << (id % 32)
6895 return (atomic.Load(&p[word]) & mask) != 0
6896 }
6897
6898
6899 func (p pMask) set(id int32) {
6900 word := id / 32
6901 mask := uint32(1) << (id % 32)
6902 atomic.Or(&p[word], mask)
6903 }
6904
6905
6906 func (p pMask) clear(id int32) {
6907 word := id / 32
6908 mask := uint32(1) << (id % 32)
6909 atomic.And(&p[word], ^mask)
6910 }
6911
6912
6913
6914
6915
6916
6917
6918
6919
6920
6921
6922
6923 func pidleput(pp *p, now int64) int64 {
6924 assertLockHeld(&sched.lock)
6925
6926 if !runqempty(pp) {
6927 throw("pidleput: P has non-empty run queue")
6928 }
6929 if now == 0 {
6930 now = nanotime()
6931 }
6932 if pp.timers.len.Load() == 0 {
6933 timerpMask.clear(pp.id)
6934 }
6935 idlepMask.set(pp.id)
6936 pp.link = sched.pidle
6937 sched.pidle.set(pp)
6938 sched.npidle.Add(1)
6939 if !pp.limiterEvent.start(limiterEventIdle, now) {
6940 throw("must be able to track idle limiter event")
6941 }
6942 return now
6943 }
6944
6945
6946
6947
6948
6949
6950
6951
6952 func pidleget(now int64) (*p, int64) {
6953 assertLockHeld(&sched.lock)
6954
6955 pp := sched.pidle.ptr()
6956 if pp != nil {
6957
6958 if now == 0 {
6959 now = nanotime()
6960 }
6961 timerpMask.set(pp.id)
6962 idlepMask.clear(pp.id)
6963 sched.pidle = pp.link
6964 sched.npidle.Add(-1)
6965 pp.limiterEvent.stop(limiterEventIdle, now)
6966 }
6967 return pp, now
6968 }
6969
6970
6971
6972
6973
6974
6975
6976
6977
6978
6979
6980 func pidlegetSpinning(now int64) (*p, int64) {
6981 assertLockHeld(&sched.lock)
6982
6983 pp, now := pidleget(now)
6984 if pp == nil {
6985
6986
6987
6988 sched.needspinning.Store(1)
6989 return nil, now
6990 }
6991
6992 return pp, now
6993 }
6994
6995
6996
6997 func runqempty(pp *p) bool {
6998
6999
7000
7001
7002 for {
7003 head := atomic.Load(&pp.runqhead)
7004 tail := atomic.Load(&pp.runqtail)
7005 runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&pp.runnext)))
7006 if tail == atomic.Load(&pp.runqtail) {
7007 return head == tail && runnext == 0
7008 }
7009 }
7010 }
7011
7012
7013
7014
7015
7016
7017
7018
7019
7020
7021 const randomizeScheduler = raceenabled
7022
7023
7024
7025
7026
7027
7028 func runqput(pp *p, gp *g, next bool) {
7029 if !haveSysmon && next {
7030
7031
7032
7033
7034
7035
7036
7037
7038 next = false
7039 }
7040 if randomizeScheduler && next && randn(2) == 0 {
7041 next = false
7042 }
7043
7044 if next {
7045 retryNext:
7046 oldnext := pp.runnext
7047 if !pp.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {
7048 goto retryNext
7049 }
7050 if oldnext == 0 {
7051 return
7052 }
7053
7054 gp = oldnext.ptr()
7055 }
7056
7057 retry:
7058 h := atomic.LoadAcq(&pp.runqhead)
7059 t := pp.runqtail
7060 if t-h < uint32(len(pp.runq)) {
7061 pp.runq[t%uint32(len(pp.runq))].set(gp)
7062 atomic.StoreRel(&pp.runqtail, t+1)
7063 return
7064 }
7065 if runqputslow(pp, gp, h, t) {
7066 return
7067 }
7068
7069 goto retry
7070 }
7071
7072
7073
7074 func runqputslow(pp *p, gp *g, h, t uint32) bool {
7075 var batch [len(pp.runq)/2 + 1]*g
7076
7077
7078 n := t - h
7079 n = n / 2
7080 if n != uint32(len(pp.runq)/2) {
7081 throw("runqputslow: queue is not full")
7082 }
7083 for i := uint32(0); i < n; i++ {
7084 batch[i] = pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
7085 }
7086 if !atomic.CasRel(&pp.runqhead, h, h+n) {
7087 return false
7088 }
7089 batch[n] = gp
7090
7091 if randomizeScheduler {
7092 for i := uint32(1); i <= n; i++ {
7093 j := cheaprandn(i + 1)
7094 batch[i], batch[j] = batch[j], batch[i]
7095 }
7096 }
7097
7098
7099 for i := uint32(0); i < n; i++ {
7100 batch[i].schedlink.set(batch[i+1])
7101 }
7102
7103 q := gQueue{batch[0].guintptr(), batch[n].guintptr(), int32(n + 1)}
7104
7105
7106 lock(&sched.lock)
7107 globrunqputbatch(&q)
7108 unlock(&sched.lock)
7109 return true
7110 }
7111
7112
7113
7114
7115 func runqputbatch(pp *p, q *gQueue) {
7116 if q.empty() {
7117 return
7118 }
7119 h := atomic.LoadAcq(&pp.runqhead)
7120 t := pp.runqtail
7121 n := uint32(0)
7122 for !q.empty() && t-h < uint32(len(pp.runq)) {
7123 gp := q.pop()
7124 pp.runq[t%uint32(len(pp.runq))].set(gp)
7125 t++
7126 n++
7127 }
7128
7129 if randomizeScheduler {
7130 off := func(o uint32) uint32 {
7131 return (pp.runqtail + o) % uint32(len(pp.runq))
7132 }
7133 for i := uint32(1); i < n; i++ {
7134 j := cheaprandn(i + 1)
7135 pp.runq[off(i)], pp.runq[off(j)] = pp.runq[off(j)], pp.runq[off(i)]
7136 }
7137 }
7138
7139 atomic.StoreRel(&pp.runqtail, t)
7140
7141 return
7142 }
7143
7144
7145
7146
7147
7148 func runqget(pp *p) (gp *g, inheritTime bool) {
7149
7150 next := pp.runnext
7151
7152
7153
7154 if next != 0 && pp.runnext.cas(next, 0) {
7155 return next.ptr(), true
7156 }
7157
7158 for {
7159 h := atomic.LoadAcq(&pp.runqhead)
7160 t := pp.runqtail
7161 if t == h {
7162 return nil, false
7163 }
7164 gp := pp.runq[h%uint32(len(pp.runq))].ptr()
7165 if atomic.CasRel(&pp.runqhead, h, h+1) {
7166 return gp, false
7167 }
7168 }
7169 }
7170
7171
7172
7173 func runqdrain(pp *p) (drainQ gQueue) {
7174 oldNext := pp.runnext
7175 if oldNext != 0 && pp.runnext.cas(oldNext, 0) {
7176 drainQ.pushBack(oldNext.ptr())
7177 }
7178
7179 retry:
7180 h := atomic.LoadAcq(&pp.runqhead)
7181 t := pp.runqtail
7182 qn := t - h
7183 if qn == 0 {
7184 return
7185 }
7186 if qn > uint32(len(pp.runq)) {
7187 goto retry
7188 }
7189
7190 if !atomic.CasRel(&pp.runqhead, h, h+qn) {
7191 goto retry
7192 }
7193
7194
7195
7196
7197
7198
7199
7200
7201 for i := uint32(0); i < qn; i++ {
7202 gp := pp.runq[(h+i)%uint32(len(pp.runq))].ptr()
7203 drainQ.pushBack(gp)
7204 }
7205 return
7206 }
7207
7208
7209
7210
7211
7212 func runqgrab(pp *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 {
7213 for {
7214 h := atomic.LoadAcq(&pp.runqhead)
7215 t := atomic.LoadAcq(&pp.runqtail)
7216 n := t - h
7217 n = n - n/2
7218 if n == 0 {
7219 if stealRunNextG {
7220
7221 if next := pp.runnext; next != 0 {
7222 if pp.status == _Prunning {
7223
7224
7225
7226
7227
7228
7229
7230
7231
7232
7233 if !osHasLowResTimer {
7234 usleep(3)
7235 } else {
7236
7237
7238
7239 osyield()
7240 }
7241 }
7242 if !pp.runnext.cas(next, 0) {
7243 continue
7244 }
7245 batch[batchHead%uint32(len(batch))] = next
7246 return 1
7247 }
7248 }
7249 return 0
7250 }
7251 if n > uint32(len(pp.runq)/2) {
7252 continue
7253 }
7254 for i := uint32(0); i < n; i++ {
7255 g := pp.runq[(h+i)%uint32(len(pp.runq))]
7256 batch[(batchHead+i)%uint32(len(batch))] = g
7257 }
7258 if atomic.CasRel(&pp.runqhead, h, h+n) {
7259 return n
7260 }
7261 }
7262 }
7263
7264
7265
7266
7267 func runqsteal(pp, p2 *p, stealRunNextG bool) *g {
7268 t := pp.runqtail
7269 n := runqgrab(p2, &pp.runq, t, stealRunNextG)
7270 if n == 0 {
7271 return nil
7272 }
7273 n--
7274 gp := pp.runq[(t+n)%uint32(len(pp.runq))].ptr()
7275 if n == 0 {
7276 return gp
7277 }
7278 h := atomic.LoadAcq(&pp.runqhead)
7279 if t-h+n >= uint32(len(pp.runq)) {
7280 throw("runqsteal: runq overflow")
7281 }
7282 atomic.StoreRel(&pp.runqtail, t+n)
7283 return gp
7284 }
7285
7286
7287
7288 type gQueue struct {
7289 head guintptr
7290 tail guintptr
7291 size int32
7292 }
7293
7294
7295 func (q *gQueue) empty() bool {
7296 return q.head == 0
7297 }
7298
7299
7300 func (q *gQueue) push(gp *g) {
7301 gp.schedlink = q.head
7302 q.head.set(gp)
7303 if q.tail == 0 {
7304 q.tail.set(gp)
7305 }
7306 q.size++
7307 }
7308
7309
7310 func (q *gQueue) pushBack(gp *g) {
7311 gp.schedlink = 0
7312 if q.tail != 0 {
7313 q.tail.ptr().schedlink.set(gp)
7314 } else {
7315 q.head.set(gp)
7316 }
7317 q.tail.set(gp)
7318 q.size++
7319 }
7320
7321
7322
7323 func (q *gQueue) pushBackAll(q2 gQueue) {
7324 if q2.tail == 0 {
7325 return
7326 }
7327 q2.tail.ptr().schedlink = 0
7328 if q.tail != 0 {
7329 q.tail.ptr().schedlink = q2.head
7330 } else {
7331 q.head = q2.head
7332 }
7333 q.tail = q2.tail
7334 q.size += q2.size
7335 }
7336
7337
7338
7339 func (q *gQueue) pop() *g {
7340 gp := q.head.ptr()
7341 if gp != nil {
7342 q.head = gp.schedlink
7343 if q.head == 0 {
7344 q.tail = 0
7345 }
7346 q.size--
7347 }
7348 return gp
7349 }
7350
7351
7352 func (q *gQueue) popList() gList {
7353 stack := gList{q.head, q.size}
7354 *q = gQueue{}
7355 return stack
7356 }
7357
7358
7359
7360 type gList struct {
7361 head guintptr
7362 size int32
7363 }
7364
7365
7366 func (l *gList) empty() bool {
7367 return l.head == 0
7368 }
7369
7370
7371 func (l *gList) push(gp *g) {
7372 gp.schedlink = l.head
7373 l.head.set(gp)
7374 l.size++
7375 }
7376
7377
7378 func (l *gList) pushAll(q gQueue) {
7379 if !q.empty() {
7380 q.tail.ptr().schedlink = l.head
7381 l.head = q.head
7382 l.size += q.size
7383 }
7384 }
7385
7386
7387 func (l *gList) pop() *g {
7388 gp := l.head.ptr()
7389 if gp != nil {
7390 l.head = gp.schedlink
7391 l.size--
7392 }
7393 return gp
7394 }
7395
7396
7397 func setMaxThreads(in int) (out int) {
7398 lock(&sched.lock)
7399 out = int(sched.maxmcount)
7400 if in > 0x7fffffff {
7401 sched.maxmcount = 0x7fffffff
7402 } else {
7403 sched.maxmcount = int32(in)
7404 }
7405 checkmcount()
7406 unlock(&sched.lock)
7407 return
7408 }
7409
7410
7411
7412
7413
7414
7415
7416
7417
7418
7419
7420
7421
7422 func procPin() int {
7423 gp := getg()
7424 mp := gp.m
7425
7426 mp.locks++
7427 return int(mp.p.ptr().id)
7428 }
7429
7430
7431
7432
7433
7434
7435
7436
7437
7438
7439
7440
7441
7442 func procUnpin() {
7443 gp := getg()
7444 gp.m.locks--
7445 }
7446
7447
7448
7449 func sync_runtime_procPin() int {
7450 return procPin()
7451 }
7452
7453
7454
7455 func sync_runtime_procUnpin() {
7456 procUnpin()
7457 }
7458
7459
7460
7461 func sync_atomic_runtime_procPin() int {
7462 return procPin()
7463 }
7464
7465
7466
7467 func sync_atomic_runtime_procUnpin() {
7468 procUnpin()
7469 }
7470
7471
7472
7473
7474
7475 func internal_sync_runtime_canSpin(i int) bool {
7476
7477
7478
7479
7480
7481 if i >= active_spin || numCPUStartup <= 1 || gomaxprocs <= sched.npidle.Load()+sched.nmspinning.Load()+1 {
7482 return false
7483 }
7484 if p := getg().m.p.ptr(); !runqempty(p) {
7485 return false
7486 }
7487 return true
7488 }
7489
7490
7491
7492 func internal_sync_runtime_doSpin() {
7493 procyield(active_spin_cnt)
7494 }
7495
7496
7497
7498
7499
7500
7501
7502
7503
7504
7505
7506
7507
7508
7509
7510 func sync_runtime_canSpin(i int) bool {
7511 return internal_sync_runtime_canSpin(i)
7512 }
7513
7514
7515
7516
7517
7518
7519
7520
7521
7522
7523
7524
7525
7526 func sync_runtime_doSpin() {
7527 internal_sync_runtime_doSpin()
7528 }
7529
7530 var stealOrder randomOrder
7531
7532
7533
7534
7535
7536 type randomOrder struct {
7537 count uint32
7538 coprimes []uint32
7539 }
7540
7541 type randomEnum struct {
7542 i uint32
7543 count uint32
7544 pos uint32
7545 inc uint32
7546 }
7547
7548 func (ord *randomOrder) reset(count uint32) {
7549 ord.count = count
7550 ord.coprimes = ord.coprimes[:0]
7551 for i := uint32(1); i <= count; i++ {
7552 if gcd(i, count) == 1 {
7553 ord.coprimes = append(ord.coprimes, i)
7554 }
7555 }
7556 }
7557
7558 func (ord *randomOrder) start(i uint32) randomEnum {
7559 return randomEnum{
7560 count: ord.count,
7561 pos: i % ord.count,
7562 inc: ord.coprimes[i/ord.count%uint32(len(ord.coprimes))],
7563 }
7564 }
7565
7566 func (enum *randomEnum) done() bool {
7567 return enum.i == enum.count
7568 }
7569
7570 func (enum *randomEnum) next() {
7571 enum.i++
7572 enum.pos = (enum.pos + enum.inc) % enum.count
7573 }
7574
7575 func (enum *randomEnum) position() uint32 {
7576 return enum.pos
7577 }
7578
7579 func gcd(a, b uint32) uint32 {
7580 for b != 0 {
7581 a, b = b, a%b
7582 }
7583 return a
7584 }
7585
7586
7587
7588 type initTask struct {
7589 state uint32
7590 nfns uint32
7591
7592 }
7593
7594
7595
7596 var inittrace tracestat
7597
7598 type tracestat struct {
7599 active bool
7600 id uint64
7601 allocs uint64
7602 bytes uint64
7603 }
7604
7605 func doInit(ts []*initTask) {
7606 for _, t := range ts {
7607 doInit1(t)
7608 }
7609 }
7610
7611 func doInit1(t *initTask) {
7612 switch t.state {
7613 case 2:
7614 return
7615 case 1:
7616 throw("recursive call during initialization - linker skew")
7617 default:
7618 t.state = 1
7619
7620 var (
7621 start int64
7622 before tracestat
7623 )
7624
7625 if inittrace.active {
7626 start = nanotime()
7627
7628 before = inittrace
7629 }
7630
7631 if t.nfns == 0 {
7632
7633 throw("inittask with no functions")
7634 }
7635
7636 firstFunc := add(unsafe.Pointer(t), 8)
7637 for i := uint32(0); i < t.nfns; i++ {
7638 p := add(firstFunc, uintptr(i)*goarch.PtrSize)
7639 f := *(*func())(unsafe.Pointer(&p))
7640 f()
7641 }
7642
7643 if inittrace.active {
7644 end := nanotime()
7645
7646 after := inittrace
7647
7648 f := *(*func())(unsafe.Pointer(&firstFunc))
7649 pkg := funcpkgpath(findfunc(abi.FuncPCABIInternal(f)))
7650
7651 var sbuf [24]byte
7652 print("init ", pkg, " @")
7653 print(string(fmtNSAsMS(sbuf[:], uint64(start-runtimeInitTime))), " ms, ")
7654 print(string(fmtNSAsMS(sbuf[:], uint64(end-start))), " ms clock, ")
7655 print(string(itoa(sbuf[:], after.bytes-before.bytes)), " bytes, ")
7656 print(string(itoa(sbuf[:], after.allocs-before.allocs)), " allocs")
7657 print("\n")
7658 }
7659
7660 t.state = 2
7661 }
7662 }
7663
View as plain text