aboutsummaryrefslogtreecommitdiff
path: root/libgo/go
diff options
context:
space:
mode:
authorIan Lance Taylor <ian@gcc.gnu.org>2017-01-10 03:59:20 +0000
committerIan Lance Taylor <ian@gcc.gnu.org>2017-01-10 03:59:20 +0000
commit0c22e4415fe9e88acaa99e72d33f4500d557ce68 (patch)
treeab834b9b0b4730baa18d49e3c47ae5033b722809 /libgo/go
parent8462d909a7439fb0bf7095144dbf36524c7e61ea (diff)
downloadgcc-0c22e4415fe9e88acaa99e72d33f4500d557ce68.zip
gcc-0c22e4415fe9e88acaa99e72d33f4500d557ce68.tar.gz
gcc-0c22e4415fe9e88acaa99e72d33f4500d557ce68.tar.bz2
compiler, runtime: drop size arguments to hash/equal functions
Drop the size arguments for the hash/equal functions stored in type descriptors. Types know what size they are. To make this work, generate hash/equal functions for types that can use an identity comparison but are not a standard size and alignment. Drop the multiplications by 33 in the generated hash code and the reflect package hash code. They are not necessary since we started passing a seed value around, as the seed includes the hash of the earlier values. Copy the algorithms for standard types from the Go 1.7 runtime, replacing the C functions. Reviewed-on: https://go-review.googlesource.com/34983 From-SVN: r244256
Diffstat (limited to 'libgo/go')
-rw-r--r--libgo/go/reflect/type.go26
-rw-r--r--libgo/go/runtime/alg.go177
-rw-r--r--libgo/go/runtime/hashmap.go30
-rw-r--r--libgo/go/runtime/hashmap_fast.go12
-rw-r--r--libgo/go/runtime/type.go4
5 files changed, 197 insertions, 52 deletions
diff --git a/libgo/go/reflect/type.go b/libgo/go/reflect/type.go
index 13b326f..4f13f14 100644
--- a/libgo/go/reflect/type.go
+++ b/libgo/go/reflect/type.go
@@ -254,8 +254,8 @@ type rtype struct {
size uintptr
hash uint32 // hash of type; avoids computation in hash tables
- hashfn func(unsafe.Pointer, uintptr, uintptr) uintptr // hash function
- equalfn func(unsafe.Pointer, unsafe.Pointer, uintptr) bool // equality function
+ hashfn func(unsafe.Pointer, uintptr) uintptr // hash function
+ equalfn func(unsafe.Pointer, unsafe.Pointer) bool // equality function
gc unsafe.Pointer // garbage collection data
string *string // string form; unnecessary but undeniably useful
@@ -2203,23 +2203,20 @@ func StructOf(fields []StructField) Type {
typ.gc = unsafe.Pointer(&gc[0])
}
- typ.hashfn = func(p unsafe.Pointer, seed, size uintptr) uintptr {
+ typ.hashfn = func(p unsafe.Pointer, seed uintptr) uintptr {
ret := seed
- for i, ft := range typ.fields {
- if i > 0 {
- ret *= 33
- }
+ for _, ft := range typ.fields {
o := unsafe.Pointer(uintptr(p) + ft.offset)
- ret = ft.typ.hashfn(o, ret, ft.typ.size)
+ ret = ft.typ.hashfn(o, ret)
}
return ret
}
- typ.equalfn = func(p, q unsafe.Pointer, size uintptr) bool {
+ typ.equalfn = func(p, q unsafe.Pointer) bool {
for _, ft := range typ.fields {
pi := unsafe.Pointer(uintptr(p) + ft.offset)
qi := unsafe.Pointer(uintptr(q) + ft.offset)
- if !ft.typ.equalfn(pi, qi, ft.typ.size) {
+ if !ft.typ.equalfn(pi, qi) {
return false
}
}
@@ -2348,19 +2345,18 @@ func ArrayOf(count int, elem Type) Type {
array.kind &^= kindDirectIface
- array.hashfn = func(p unsafe.Pointer, seed, size uintptr) uintptr {
+ array.hashfn = func(p unsafe.Pointer, seed uintptr) uintptr {
ret := seed
for i := 0; i < count; i++ {
- ret *= 33
- ret = typ.hashfn(p, ret, typ.size)
+ ret = typ.hashfn(p, ret)
p = unsafe.Pointer(uintptr(p) + typ.size)
}
return ret
}
- array.equalfn = func(p1, p2 unsafe.Pointer, size uintptr) bool {
+ array.equalfn = func(p1, p2 unsafe.Pointer) bool {
for i := 0; i < count; i++ {
- if !typ.equalfn(p1, p2, typ.size) {
+ if !typ.equalfn(p1, p2) {
return false
}
p1 = unsafe.Pointer(uintptr(p1) + typ.size)
diff --git a/libgo/go/runtime/alg.go b/libgo/go/runtime/alg.go
index 5331231..426b7f6 100644
--- a/libgo/go/runtime/alg.go
+++ b/libgo/go/runtime/alg.go
@@ -12,8 +12,30 @@ import (
// For gccgo, use go:linkname to rename compiler-called functions to
// themselves, so that the compiler will export them.
//
+//go:linkname memhash0 runtime.memhash0
+//go:linkname memhash8 runtime.memhash8
+//go:linkname memhash16 runtime.memhash16
+//go:linkname memhash32 runtime.memhash32
+//go:linkname memhash64 runtime.memhash64
+//go:linkname memhash128 runtime.memhash128
+//go:linkname strhash runtime.strhash
+//go:linkname f32hash runtime.f32hash
+//go:linkname f64hash runtime.f64hash
+//go:linkname c64hash runtime.c64hash
+//go:linkname c128hash runtime.c128hash
//go:linkname interhash runtime.interhash
//go:linkname nilinterhash runtime.nilinterhash
+//go:linkname memequal0 runtime.memequal0
+//go:linkname memequal8 runtime.memequal8
+//go:linkname memequal16 runtime.memequal16
+//go:linkname memequal32 runtime.memequal32
+//go:linkname memequal64 runtime.memequal64
+//go:linkname memequal128 runtime.memequal128
+//go:linkname strequal runtime.strequal
+//go:linkname f32equal runtime.f32equal
+//go:linkname f64equal runtime.f64equal
+//go:linkname c64equal runtime.c64equal
+//go:linkname c128equal runtime.c128equal
//go:linkname interequal runtime.interequal
//go:linkname nilinterequal runtime.nilinterequal
//go:linkname efaceeq runtime.efaceeq
@@ -32,6 +54,25 @@ const (
c1 = uintptr((8-sys.PtrSize)/4*3267000013 + (sys.PtrSize-4)/4*23344194077549503)
)
+func memhash0(p unsafe.Pointer, h uintptr) uintptr {
+ return h
+}
+func memhash8(p unsafe.Pointer, h uintptr) uintptr {
+ return memhash(p, h, 1)
+}
+func memhash16(p unsafe.Pointer, h uintptr) uintptr {
+ return memhash(p, h, 2)
+}
+func memhash32(p unsafe.Pointer, h uintptr) uintptr {
+ return memhash(p, h, 4)
+}
+func memhash64(p unsafe.Pointer, h uintptr) uintptr {
+ return memhash(p, h, 8)
+}
+func memhash128(p unsafe.Pointer, h uintptr) uintptr {
+ return memhash(p, h, 16)
+}
+
var useAeshash bool
// in C code
@@ -46,6 +87,50 @@ func aeshashstr(p unsafe.Pointer, h uintptr) uintptr {
return aeshashbody(unsafe.Pointer(ps.str), h, uintptr(ps.len), aeskeysched[:])
}
+func strhash(a unsafe.Pointer, h uintptr) uintptr {
+ x := (*stringStruct)(a)
+ return memhash(x.str, h, uintptr(x.len))
+}
+
+// NOTE: Because NaN != NaN, a map can contain any
+// number of (mostly useless) entries keyed with NaNs.
+// To avoid long hash chains, we assign a random number
+// as the hash value for a NaN.
+
+func f32hash(p unsafe.Pointer, h uintptr) uintptr {
+ f := *(*float32)(p)
+ switch {
+ case f == 0:
+ return c1 * (c0 ^ h) // +0, -0
+ case f != f:
+ return c1 * (c0 ^ h ^ uintptr(fastrand1())) // any kind of NaN
+ default:
+ return memhash(p, h, 4)
+ }
+}
+
+func f64hash(p unsafe.Pointer, h uintptr) uintptr {
+ f := *(*float64)(p)
+ switch {
+ case f == 0:
+ return c1 * (c0 ^ h) // +0, -0
+ case f != f:
+ return c1 * (c0 ^ h ^ uintptr(fastrand1())) // any kind of NaN
+ default:
+ return memhash(p, h, 8)
+ }
+}
+
+func c64hash(p unsafe.Pointer, h uintptr) uintptr {
+ x := (*[2]float32)(p)
+ return f32hash(unsafe.Pointer(&x[1]), f32hash(unsafe.Pointer(&x[0]), h))
+}
+
+func c128hash(p unsafe.Pointer, h uintptr) uintptr {
+ x := (*[2]float64)(p)
+ return f64hash(unsafe.Pointer(&x[1]), f64hash(unsafe.Pointer(&x[0]), h))
+}
+
func interhash(p unsafe.Pointer, h uintptr, size uintptr) uintptr {
a := (*iface)(p)
tab := a.tab
@@ -58,13 +143,13 @@ func interhash(p unsafe.Pointer, h uintptr, size uintptr) uintptr {
panic(errorString("hash of unhashable type " + *t.string))
}
if isDirectIface(t) {
- return c1 * fn(unsafe.Pointer(&a.data), h^c0, t.size)
+ return c1 * fn(unsafe.Pointer(&a.data), h^c0)
} else {
- return c1 * fn(a.data, h^c0, t.size)
+ return c1 * fn(a.data, h^c0)
}
}
-func nilinterhash(p unsafe.Pointer, h uintptr, size uintptr) uintptr {
+func nilinterhash(p unsafe.Pointer, h uintptr) uintptr {
a := (*eface)(p)
t := a._type
if t == nil {
@@ -75,20 +160,51 @@ func nilinterhash(p unsafe.Pointer, h uintptr, size uintptr) uintptr {
panic(errorString("hash of unhashable type " + *t.string))
}
if isDirectIface(t) {
- return c1 * fn(unsafe.Pointer(&a.data), h^c0, t.size)
+ return c1 * fn(unsafe.Pointer(&a.data), h^c0)
} else {
- return c1 * fn(a.data, h^c0, t.size)
+ return c1 * fn(a.data, h^c0)
}
}
+func memequal0(p, q unsafe.Pointer) bool {
+ return true
+}
+func memequal8(p, q unsafe.Pointer) bool {
+ return *(*int8)(p) == *(*int8)(q)
+}
+func memequal16(p, q unsafe.Pointer) bool {
+ return *(*int16)(p) == *(*int16)(q)
+}
+func memequal32(p, q unsafe.Pointer) bool {
+ return *(*int32)(p) == *(*int32)(q)
+}
+func memequal64(p, q unsafe.Pointer) bool {
+ return *(*int64)(p) == *(*int64)(q)
+}
+func memequal128(p, q unsafe.Pointer) bool {
+ return *(*[2]int64)(p) == *(*[2]int64)(q)
+}
+func f32equal(p, q unsafe.Pointer) bool {
+ return *(*float32)(p) == *(*float32)(q)
+}
+func f64equal(p, q unsafe.Pointer) bool {
+ return *(*float64)(p) == *(*float64)(q)
+}
+func c64equal(p, q unsafe.Pointer) bool {
+ return *(*complex64)(p) == *(*complex64)(q)
+}
+func c128equal(p, q unsafe.Pointer) bool {
+ return *(*complex128)(p) == *(*complex128)(q)
+}
+func strequal(p, q unsafe.Pointer) bool {
+ return *(*string)(p) == *(*string)(q)
+}
func interequal(p, q unsafe.Pointer, size uintptr) bool {
return ifaceeq(*(*iface)(p), *(*iface)(q))
}
-
func nilinterequal(p, q unsafe.Pointer, size uintptr) bool {
return efaceeq(*(*eface)(p), *(*eface)(q))
}
-
func efaceeq(x, y eface) bool {
t := x._type
if !eqtype(t, y._type) {
@@ -104,9 +220,8 @@ func efaceeq(x, y eface) bool {
if isDirectIface(t) {
return x.data == y.data
}
- return eq(x.data, y.data, t.size)
+ return eq(x.data, y.data)
}
-
func ifaceeq(x, y iface) bool {
xtab := x.tab
if xtab == nil && y.tab == nil {
@@ -126,7 +241,7 @@ func ifaceeq(x, y iface) bool {
if isDirectIface(t) {
return x.data == y.data
}
- return eq(x.data, y.data, t.size)
+ return eq(x.data, y.data)
}
func ifacevaleq(x iface, t *_type, p unsafe.Pointer) bool {
@@ -144,7 +259,7 @@ func ifacevaleq(x iface, t *_type, p unsafe.Pointer) bool {
if isDirectIface(t) {
return x.data == p
}
- return eq(x.data, p, t.size)
+ return eq(x.data, p)
}
func ifaceefaceeq(x iface, y eface) bool {
@@ -165,7 +280,7 @@ func ifaceefaceeq(x iface, y eface) bool {
if isDirectIface(xt) {
return x.data == y.data
}
- return eq(x.data, y.data, xt.size)
+ return eq(x.data, y.data)
}
func efacevaleq(x eface, t *_type, p unsafe.Pointer) bool {
@@ -182,7 +297,7 @@ func efacevaleq(x eface, t *_type, p unsafe.Pointer) bool {
if isDirectIface(t) {
return x.data == p
}
- return eq(x.data, p, t.size)
+ return eq(x.data, p)
}
func eqstring(x, y string) bool {
@@ -213,13 +328,47 @@ func cmpstring(x, y string) int {
return 0
}
+// For the unsafe.Pointer type descriptor in libgo/runtime/go-unsafe-pointer.c.
+
+func pointerhash(p unsafe.Pointer, h uintptr) uintptr {
+ return memhash(p, h, unsafe.Sizeof(unsafe.Pointer))
+}
+
+func pointerequal(p, q unsafe.Pointer) bool {
+ return *(*unsafe.Pointer)(p) == *(*unsafe.Pointer)(q)
+}
+
// Force the creation of function descriptors for equality and hash
// functions. These will be referenced directly by the compiler.
var _ = memhash
+var _ = memhash0
+var _ = memhash8
+var _ = memhash16
+var _ = memhash32
+var _ = memhash64
+var _ = memhash128
+var _ = strhash
+var _ = f32hash
+var _ = f64hash
+var _ = c64hash
+var _ = c128hash
var _ = interhash
-var _ = interequal
var _ = nilinterhash
+var _ = memequal0
+var _ = memequal8
+var _ = memequal16
+var _ = memequal32
+var _ = memequal64
+var _ = memequal128
+var _ = f32equal
+var _ = f64equal
+var _ = c64equal
+var _ = c128equal
+var _ = strequal
+var _ = interequal
var _ = nilinterequal
+var _ = pointerhash
+var _ = pointerequal
const hashRandomBytes = sys.PtrSize / 4 * 64
diff --git a/libgo/go/runtime/hashmap.go b/libgo/go/runtime/hashmap.go
index aaf4fb4..77b33f3 100644
--- a/libgo/go/runtime/hashmap.go
+++ b/libgo/go/runtime/hashmap.go
@@ -300,7 +300,7 @@ func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
}
hashfn := t.key.hashfn
equalfn := t.key.equalfn
- hash := hashfn(key, uintptr(h.hash0), uintptr(t.keysize))
+ hash := hashfn(key, uintptr(h.hash0))
m := uintptr(1)<<h.B - 1
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
@@ -322,7 +322,7 @@ func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
if t.indirectkey {
k = *((*unsafe.Pointer)(k))
}
- if equalfn(key, k, uintptr(t.keysize)) {
+ if equalfn(key, k) {
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
if t.indirectvalue {
v = *((*unsafe.Pointer)(v))
@@ -355,7 +355,7 @@ func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool)
}
hashfn := t.key.hashfn
equalfn := t.key.equalfn
- hash := hashfn(key, uintptr(h.hash0), uintptr(t.keysize))
+ hash := hashfn(key, uintptr(h.hash0))
m := uintptr(1)<<h.B - 1
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
@@ -377,7 +377,7 @@ func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool)
if t.indirectkey {
k = *((*unsafe.Pointer)(k))
}
- if equalfn(key, k, uintptr(t.keysize)) {
+ if equalfn(key, k) {
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
if t.indirectvalue {
v = *((*unsafe.Pointer)(v))
@@ -402,7 +402,7 @@ func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe
}
hashfn := t.key.hashfn
equalfn := t.key.equalfn
- hash := hashfn(key, uintptr(h.hash0), uintptr(t.keysize))
+ hash := hashfn(key, uintptr(h.hash0))
m := uintptr(1)<<h.B - 1
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
@@ -424,7 +424,7 @@ func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe
if t.indirectkey {
k = *((*unsafe.Pointer)(k))
}
- if equalfn(key, k, uintptr(t.keysize)) {
+ if equalfn(key, k) {
v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
if t.indirectvalue {
v = *((*unsafe.Pointer)(v))
@@ -477,7 +477,7 @@ func mapassign1(t *maptype, h *hmap, key unsafe.Pointer, val unsafe.Pointer) {
hashfn := t.key.hashfn
equalfn := t.key.equalfn
- hash := hashfn(key, uintptr(h.hash0), uintptr(t.keysize))
+ hash := hashfn(key, uintptr(h.hash0))
if h.buckets == nil {
h.buckets = newarray(t.bucket, 1)
@@ -512,7 +512,7 @@ again:
if t.indirectkey {
k2 = *((*unsafe.Pointer)(k2))
}
- if !equalfn(key, k2, uintptr(t.keysize)) {
+ if !equalfn(key, k2) {
continue
}
// already have a mapping for key. Update it.
@@ -592,7 +592,7 @@ func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
hashfn := t.key.hashfn
equalfn := t.key.equalfn
- hash := hashfn(key, uintptr(h.hash0), uintptr(t.keysize))
+ hash := hashfn(key, uintptr(h.hash0))
bucket := hash & (uintptr(1)<<h.B - 1)
if h.oldbuckets != nil {
growWork(t, h, bucket)
@@ -612,7 +612,7 @@ func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
if t.indirectkey {
k2 = *((*unsafe.Pointer)(k2))
}
- if !equalfn(key, k2, uintptr(t.keysize)) {
+ if !equalfn(key, k2) {
continue
}
memclr(k, uintptr(t.keysize))
@@ -760,10 +760,10 @@ next:
if t.indirectkey {
k2 = *((*unsafe.Pointer)(k2))
}
- if t.reflexivekey || equalfn(k2, k2, uintptr(t.keysize)) {
+ if t.reflexivekey || equalfn(k2, k2) {
// If the item in the oldbucket is not destined for
// the current new bucket in the iteration, skip it.
- hash := hashfn(k2, uintptr(h.hash0), uintptr(t.keysize))
+ hash := hashfn(k2, uintptr(h.hash0))
if hash&(uintptr(1)<<it.B-1) != checkBucket {
continue
}
@@ -797,7 +797,7 @@ next:
if t.indirectkey {
k2 = *((*unsafe.Pointer)(k2))
}
- if t.reflexivekey || equalfn(k2, k2, uintptr(t.keysize)) {
+ if t.reflexivekey || equalfn(k2, k2) {
// Check the current hash table for the data.
// This code handles the case where the key
// has been deleted, updated, or deleted and reinserted.
@@ -913,9 +913,9 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
}
// Compute hash to make our evacuation decision (whether we need
// to send this key/value to bucket x or bucket y).
- hash := hashfn(k2, uintptr(h.hash0), uintptr(t.keysize))
+ hash := hashfn(k2, uintptr(h.hash0))
if h.flags&iterator != 0 {
- if !t.reflexivekey && !equalfn(k2, k2, uintptr(t.keysize)) {
+ if !t.reflexivekey && !equalfn(k2, k2) {
// If key != key (NaNs), then the hash could be (and probably
// will be) entirely different from the old hash. Moreover,
// it isn't reproducible. Reproducibility is required in the
diff --git a/libgo/go/runtime/hashmap_fast.go b/libgo/go/runtime/hashmap_fast.go
index 4850b16..c6cad9d 100644
--- a/libgo/go/runtime/hashmap_fast.go
+++ b/libgo/go/runtime/hashmap_fast.go
@@ -25,7 +25,7 @@ func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
// One-bucket table. No need to hash.
b = (*bmap)(h.buckets)
} else {
- hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0), uintptr(t.keysize))
+ hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
m := uintptr(1)<<h.B - 1
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
@@ -70,7 +70,7 @@ func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
// One-bucket table. No need to hash.
b = (*bmap)(h.buckets)
} else {
- hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0), uintptr(t.keysize))
+ hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
m := uintptr(1)<<h.B - 1
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
@@ -115,7 +115,7 @@ func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
// One-bucket table. No need to hash.
b = (*bmap)(h.buckets)
} else {
- hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0), uintptr(t.keysize))
+ hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
m := uintptr(1)<<h.B - 1
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
@@ -160,7 +160,7 @@ func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
// One-bucket table. No need to hash.
b = (*bmap)(h.buckets)
} else {
- hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0), uintptr(t.keysize))
+ hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
m := uintptr(1)<<h.B - 1
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
@@ -260,7 +260,7 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
return unsafe.Pointer(&zeroVal[0])
}
dohash:
- hash := t.key.hashfn(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0), uintptr(t.keysize))
+ hash := t.key.hashfn(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
m := uintptr(1)<<h.B - 1
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
@@ -363,7 +363,7 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
return unsafe.Pointer(&zeroVal[0]), false
}
dohash:
- hash := t.key.hashfn(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0), uintptr(t.keysize))
+ hash := t.key.hashfn(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
m := uintptr(1)<<h.B - 1
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
diff --git a/libgo/go/runtime/type.go b/libgo/go/runtime/type.go
index 4751a59..cfee35a 100644
--- a/libgo/go/runtime/type.go
+++ b/libgo/go/runtime/type.go
@@ -16,8 +16,8 @@ type _type struct {
size uintptr
hash uint32
- hashfn func(unsafe.Pointer, uintptr, uintptr) uintptr
- equalfn func(unsafe.Pointer, unsafe.Pointer, uintptr) bool
+ hashfn func(unsafe.Pointer, uintptr) uintptr
+ equalfn func(unsafe.Pointer, unsafe.Pointer) bool
gc unsafe.Pointer
string *string