aboutsummaryrefslogtreecommitdiff
path: root/libgo
diff options
context:
space:
mode:
authorIan Lance Taylor <iant@golang.org>2020-12-21 20:51:18 -0800
committerIan Lance Taylor <iant@golang.org>2020-12-22 11:56:41 -0800
commit48357ce4f22c8298ea5fb01d6873bc7bf56180e5 (patch)
treebafb432735bae6fde8265a5c2d4a032c56c7780d /libgo
parent1fa5fc7408d05710cb63a5e91c5df6c787b51019 (diff)
downloadgcc-48357ce4f22c8298ea5fb01d6873bc7bf56180e5.zip
gcc-48357ce4f22c8298ea5fb01d6873bc7bf56180e5.tar.gz
gcc-48357ce4f22c8298ea5fb01d6873bc7bf56180e5.tar.bz2
runtime: omit nil-channel cases from selectgo's orders
This is the gofrontend version of https://golang.org/cl/245123. Original CL description: Currently, selectgo does an initial pass over the cases array to look for entries with nil channels, so they can be easily recognized and skipped later on. But this still involves actually visiting the cases. This commit changes selectgo to omit cases with nil channels when constructing pollorder, so that they'll be skipped over entirely later on. It also checks for caseDefault up front, which will facilitate changing it to use a "block bool" parameter instead. Updates golang/go#40410 This is being brought over to gofrontend as a step toward upgrading to Go1.16beta1, setting up for more compiler changes related to select handling. Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/279733
Diffstat (limited to 'libgo')
-rw-r--r--libgo/go/runtime/select.go68
1 files changed, 28 insertions, 40 deletions
diff --git a/libgo/go/runtime/select.go b/libgo/go/runtime/select.go
index 81e00ec..d5087fb 100644
--- a/libgo/go/runtime/select.go
+++ b/libgo/go/runtime/select.go
@@ -41,7 +41,7 @@ func sellock(scases []scase, lockorder []uint16) {
var c *hchan
for _, o := range lockorder {
c0 := scases[o].c
- if c0 != nil && c0 != c {
+ if c0 != c {
c = c0
lock(&c.lock)
}
@@ -57,11 +57,8 @@ func selunlock(scases []scase, lockorder []uint16) {
// the G that calls select runnable again and schedules it for execution.
// When the G runs on another M, it locks all the locks and frees sel.
// Now if the first M touches sel, it will access freed memory.
- for i := len(scases) - 1; i >= 0; i-- {
+ for i := len(lockorder) - 1; i >= 0; i-- {
c := scases[lockorder[i]].c
- if c == nil {
- break
- }
if i > 0 && c == scases[lockorder[i-1]].c {
continue // will unlock it on the next iteration
}
@@ -138,15 +135,6 @@ func selectgo(cas0 *scase, order0 *uint16, ncases int) (int, bool) {
pollorder := order1[:ncases:ncases]
lockorder := order1[ncases:][:ncases:ncases]
- // Replace send/receive cases involving nil channels with
- // caseNil so logic below can assume non-nil channel.
- for i := range scases {
- cas := &scases[i]
- if cas.c == nil && cas.kind != caseDefault {
- *cas = scase{}
- }
- }
-
var t0 int64
if blockprofilerate > 0 {
t0 = cputicks()
@@ -166,15 +154,31 @@ func selectgo(cas0 *scase, order0 *uint16, ncases int) (int, bool) {
}
// generate permuted order
- for i := 1; i < ncases; i++ {
- j := fastrandn(uint32(i + 1))
- pollorder[i] = pollorder[j]
+ dfli := -1
+ norder := 0
+ for i := range scases {
+ cas := &scases[i]
+
+ // Omit cases without channels from the poll and lock orders.
+ if cas.c == nil {
+ if cas.kind == caseDefault {
+ dfli = i
+ }
+ cas.elem = nil // allow GC
+ continue
+ }
+
+ j := fastrandn(uint32(norder + 1))
+ pollorder[norder] = pollorder[j]
pollorder[j] = uint16(i)
+ norder++
}
+ pollorder = pollorder[:norder]
+ lockorder = lockorder[:norder]
// sort the cases by Hchan address to get the locking order.
// simple heap sort, to guarantee n log n time and constant stack footprint.
- for i := 0; i < ncases; i++ {
+ for i := range lockorder {
j := i
// Start with the pollorder to permute cases on the same channel.
c := scases[pollorder[i]].c
@@ -185,7 +189,7 @@ func selectgo(cas0 *scase, order0 *uint16, ncases int) (int, bool) {
}
lockorder[j] = pollorder[i]
}
- for i := ncases - 1; i >= 0; i-- {
+ for i := len(lockorder) - 1; i >= 0; i-- {
o := lockorder[i]
c := scases[o].c
lockorder[i] = lockorder[0]
@@ -209,7 +213,7 @@ func selectgo(cas0 *scase, order0 *uint16, ncases int) (int, bool) {
}
if debugSelect {
- for i := 0; i+1 < ncases; i++ {
+ for i := 0; i+1 < len(lockorder); i++ {
if scases[lockorder[i]].c.sortkey() > scases[lockorder[i+1]].c.sortkey() {
print("i=", i, " x=", lockorder[i], " y=", lockorder[i+1], "\n")
throw("select: broken sort")
@@ -233,21 +237,16 @@ func selectgo(cas0 *scase, order0 *uint16, ncases int) (int, bool) {
loop:
// pass 1 - look for something already waiting
- var dfli int
- var dfl *scase
var casi int
var cas *scase
var caseReleaseTime int64 = -1
var recvOK bool
- for i := 0; i < ncases; i++ {
- casi = int(pollorder[i])
+ for _, casei := range pollorder {
+ casi = int(casei)
cas = &scases[casi]
c = cas.c
switch cas.kind {
- case caseNil:
- continue
-
case caseRecv:
sg = c.sendq.dequeue()
if sg != nil {
@@ -271,17 +270,12 @@ loop:
if c.qcount < c.dataqsiz {
goto bufsend
}
-
- case caseDefault:
- dfli = casi
- dfl = cas
}
}
- if dfl != nil {
+ if dfli >= 0 {
selunlock(scases, lockorder)
casi = dfli
- cas = dfl
goto retc
}
@@ -294,9 +288,6 @@ loop:
for _, casei := range lockorder {
casi = int(casei)
cas = &scases[casi]
- if cas.kind == caseNil {
- continue
- }
c = cas.c
sg := acquireSudog()
sg.g = gp
@@ -355,9 +346,6 @@ loop:
for _, casei := range lockorder {
k = &scases[casei]
- if k.kind == caseNil {
- continue
- }
if sg == sglist {
// sg has already been dequeued by the G that woke us up.
casi = int(casei)
@@ -468,7 +456,7 @@ retc:
// Check preemption, since unlike gc we don't check on every call.
// A test case for this one is BenchmarkPingPongHog in proc_test.go.
- if dfl != nil && getg().preempt {
+ if dfli >= 0 && getg().preempt {
checkPreempt()
}