aboutsummaryrefslogtreecommitdiff
path: root/libgo/go/runtime/map_fast64.go
diff options
context:
space:
mode:
Diffstat (limited to 'libgo/go/runtime/map_fast64.go')
-rw-r--r--libgo/go/runtime/map_fast64.go21
1 files changed, 16 insertions, 5 deletions
diff --git a/libgo/go/runtime/map_fast64.go b/libgo/go/runtime/map_fast64.go
index 26c60ae..14bdcee 100644
--- a/libgo/go/runtime/map_fast64.go
+++ b/libgo/go/runtime/map_fast64.go
@@ -122,7 +122,7 @@ again:
if h.growing() {
growWork_fast64(t, h, bucket)
}
- b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
+ b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
var insertb *bmap
var inserti uintptr
@@ -166,7 +166,7 @@ bucketloop:
}
if insertb == nil {
- // all current buckets are full, allocate a new one.
+ // The current bucket and all the overflow buckets connected to it are full, allocate a new one.
insertb = h.newoverflow(t, b)
inserti = 0 // not necessary, but avoids needlessly spilling inserti
}
@@ -212,7 +212,7 @@ again:
if h.growing() {
growWork_fast64(t, h, bucket)
}
- b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
+ b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
var insertb *bmap
var inserti uintptr
@@ -256,7 +256,7 @@ bucketloop:
}
if insertb == nil {
- // all current buckets are full, allocate a new one.
+ // The current bucket and all the overflow buckets connected to it are full, allocate a new one.
insertb = h.newoverflow(t, b)
inserti = 0 // not necessary, but avoids needlessly spilling inserti
}
@@ -308,7 +308,13 @@ search:
}
// Only clear key if there are pointers in it.
if t.key.ptrdata != 0 {
- memclrHasPointers(k, t.key.size)
+ if sys.PtrSize == 8 {
+ *(*unsafe.Pointer)(k) = nil
+ } else {
+ // There are three ways to squeeze at one ore more 32 bit pointers into 64 bits.
+ // Just call memclrHasPointers instead of trying to handle all cases here.
+ memclrHasPointers(k, 8)
+ }
}
e := add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.elemsize))
if t.elem.ptrdata != 0 {
@@ -348,6 +354,11 @@ search:
}
notLast:
h.count--
+ // Reset the hash seed to make it more difficult for attackers to
+ // repeatedly trigger hash collisions. See issue 25237.
+ if h.count == 0 {
+ h.hash0 = fastrand()
+ }
break search
}
}