aboutsummaryrefslogtreecommitdiff
path: root/libgo/go/runtime/map.go
diff options
context:
space:
mode:
Diffstat (limited to 'libgo/go/runtime/map.go')
-rw-r--r--libgo/go/runtime/map.go19
1 files changed, 14 insertions, 5 deletions
diff --git a/libgo/go/runtime/map.go b/libgo/go/runtime/map.go
index 1155fee..5b9d7102 100644
--- a/libgo/go/runtime/map.go
+++ b/libgo/go/runtime/map.go
@@ -177,8 +177,8 @@ type bmap struct {
// If you modify hiter, also change cmd/compile/internal/gc/reflect.go to indicate
// the layout of this structure.
type hiter struct {
- key unsafe.Pointer // Must be in first position. Write nil to indicate iteration end (see cmd/internal/gc/range.go).
- elem unsafe.Pointer // Must be in second position (see cmd/internal/gc/range.go).
+ key unsafe.Pointer // Must be in first position. Write nil to indicate iteration end (see cmd/compile/internal/gc/range.go).
+ elem unsafe.Pointer // Must be in second position (see cmd/compile/internal/gc/range.go).
t *maptype
h *hmap
buckets unsafe.Pointer // bucket ptr at hash_iter initialization time
@@ -634,7 +634,7 @@ again:
if h.growing() {
growWork(t, h, bucket)
}
- b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
+ b := (*bmap)(add(h.buckets, bucket*uintptr(t.bucketsize)))
top := tophash(hash)
var inserti *uint8
@@ -685,7 +685,7 @@ bucketloop:
}
if inserti == nil {
- // all current buckets are full, allocate a new one.
+ // The current bucket and all the overflow buckets connected to it are full, allocate a new one.
newb := h.newoverflow(t, b)
inserti = &newb.tophash[0]
insertk = add(unsafe.Pointer(newb), dataOffset)
@@ -815,6 +815,11 @@ search:
}
notLast:
h.count--
+ // Reset the hash seed to make it more difficult for attackers to
+ // repeatedly trigger hash collisions. See issue 25237.
+ if h.count == 0 {
+ h.hash0 = fastrand()
+ }
break search
}
}
@@ -1051,6 +1056,10 @@ func mapclear(t *maptype, h *hmap) {
h.noverflow = 0
h.count = 0
+ // Reset the hash seed to make it more difficult for attackers to
+ // repeatedly trigger hash collisions. See issue 25237.
+ h.hash0 = fastrand()
+
// Keep the mapextra allocation but clear any extra information.
if h.extra != nil {
*h.extra = mapextra{}
@@ -1429,5 +1438,5 @@ func reflectlite_maplen(h *hmap) int {
return h.count
}
-const maxZero = 1024 // must match value in cmd/compile/internal/gc/walk.go:zeroValSize
+const maxZero = 1024 // must match value in reflect/value.go:maxZero cmd/compile/internal/gc/walk.go:zeroValSize
var zeroVal [maxZero]byte