aboutsummaryrefslogtreecommitdiff
path: root/libgo/go/runtime
diff options
context:
space:
mode:
authorIan Lance Taylor <ian@gcc.gnu.org>2020-01-02 21:55:32 +0000
committerIan Lance Taylor <ian@gcc.gnu.org>2020-01-02 21:55:32 +0000
commit10172a64cedd95b302a9709429a4832a879667da (patch)
tree736b88eb69baa715a8ac1dbfb4b059eece8a049f /libgo/go/runtime
parent9279b5ba4538da8041f074db3f5fcd9e8ecff93e (diff)
downloadgcc-10172a64cedd95b302a9709429a4832a879667da.zip
gcc-10172a64cedd95b302a9709429a4832a879667da.tar.gz
gcc-10172a64cedd95b302a9709429a4832a879667da.tar.bz2
compiler, runtime, reflect: generate hash functions only for map keys
Right now we generate hash functions for all types, just in case they are used as map keys. That's a lot of wasted effort and binary size for types which will never be used as a map key. Instead, generate hash functions only for types that we know are map keys. Just doing that is a bit too simple, since maps with an interface type as a key might have to hash any concrete key type that implements that interface. So for that case, implement hashing of such types at runtime (instead of with generated code). It will be slower, but only for maps with interface types as keys, and maybe only a bit slower as the aeshash time probably dominates the dispatch time. Reorg where we keep the equals and hash functions. Move the hash function from the key type to the map type, saving a field in every non-map type. That leaves only one function in the alg structure, so get rid of that and just keep the equal function in the type descriptor itself. While we're here, reorganize the rtype struct to more closely match the gc version. This is the gofrontend version of https://golang.org/cl/191198. Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/212843 From-SVN: r279848
Diffstat (limited to 'libgo/go/runtime')
-rw-r--r--libgo/go/runtime/alg.go89
-rw-r--r--libgo/go/runtime/map.go63
-rw-r--r--libgo/go/runtime/map_benchmark_test.go30
-rw-r--r--libgo/go/runtime/map_fast32.go18
-rw-r--r--libgo/go/runtime/map_fast64.go18
-rw-r--r--libgo/go/runtime/map_faststr.go14
-rw-r--r--libgo/go/runtime/map_test.go61
-rw-r--r--libgo/go/runtime/type.go38
8 files changed, 240 insertions, 91 deletions
diff --git a/libgo/go/runtime/alg.go b/libgo/go/runtime/alg.go
index f96a75d..e802fdd 100644
--- a/libgo/go/runtime/alg.go
+++ b/libgo/go/runtime/alg.go
@@ -69,6 +69,9 @@ func memhash128(p unsafe.Pointer, h uintptr) uintptr {
return memhash(p, h, 16)
}
+// runtime variable to check if the processor we're running on
+// actually supports the instructions used by the AES-based
+// hash implementation.
var useAeshash bool
// in C code
@@ -134,14 +137,17 @@ func interhash(p unsafe.Pointer, h uintptr) uintptr {
return h
}
t := *(**_type)(tab)
- fn := t.hashfn
- if fn == nil {
+ if t.equal == nil {
+ // Check hashability here. We could do this check inside
+ // typehash, but we want to report the topmost type in
+ // the error text (e.g. in a struct with a field of slice type
+ // we want to report the struct, not the slice).
panic(errorString("hash of unhashable type " + t.string()))
}
if isDirectIface(t) {
- return c1 * fn(unsafe.Pointer(&a.data), h^c0)
+ return c1 * typehash(t, unsafe.Pointer(&a.data), h^c0)
} else {
- return c1 * fn(a.data, h^c0)
+ return c1 * typehash(t, a.data, h^c0)
}
}
@@ -151,17 +157,74 @@ func nilinterhash(p unsafe.Pointer, h uintptr) uintptr {
if t == nil {
return h
}
- fn := t.hashfn
- if fn == nil {
+ if t.equal == nil {
+ // See comment in interhash above.
panic(errorString("hash of unhashable type " + t.string()))
}
if isDirectIface(t) {
- return c1 * fn(unsafe.Pointer(&a.data), h^c0)
+ return c1 * typehash(t, unsafe.Pointer(&a.data), h^c0)
} else {
- return c1 * fn(a.data, h^c0)
+ return c1 * typehash(t, a.data, h^c0)
+ }
+}
+
+// typehash computes the hash of the object of type t at address p.
+// h is the seed.
+// This function is seldom used. Most maps use for hashing either
+// fixed functions (e.g. f32hash) or compiler-generated functions
+// (e.g. for a type like struct { x, y string }). This implementation
+// is slower but more general and is used for hashing interface types
+// (called from interhash or nilinterhash, above) or for hashing in
+// maps generated by reflect.MapOf (reflect_typehash, below).
+func typehash(t *_type, p unsafe.Pointer, h uintptr) uintptr {
+ if t.tflag&tflagRegularMemory != 0 {
+ return memhash(p, h, t.size)
+ }
+ switch t.kind & kindMask {
+ case kindFloat32:
+ return f32hash(p, h)
+ case kindFloat64:
+ return f64hash(p, h)
+ case kindComplex64:
+ return c64hash(p, h)
+ case kindComplex128:
+ return c128hash(p, h)
+ case kindString:
+ return strhash(p, h)
+ case kindInterface:
+ i := (*interfacetype)(unsafe.Pointer(t))
+ if len(i.methods) == 0 {
+ return nilinterhash(p, h)
+ }
+ return interhash(p, h)
+ case kindArray:
+ a := (*arraytype)(unsafe.Pointer(t))
+ for i := uintptr(0); i < a.len; i++ {
+ h = typehash(a.elem, add(p, i*a.elem.size), h)
+ }
+ return h
+ case kindStruct:
+ s := (*structtype)(unsafe.Pointer(t))
+ for _, f := range s.fields {
+ // TODO: maybe we could hash several contiguous fields all at once.
+ if f.name != nil && *f.name == "_" {
+ continue
+ }
+ h = typehash(f.typ, add(p, f.offset()), h)
+ }
+ return h
+ default:
+ // Should never happen, as typehash should only be called
+ // with comparable types.
+ panic(errorString("hash of unhashable type " + t.string()))
}
}
+//go:linkname reflect_typehash reflect.typehash
+func reflect_typehash(t *_type, p unsafe.Pointer, h uintptr) uintptr {
+ return typehash(t, p, h)
+}
+
func memequal0(p, q unsafe.Pointer) bool {
return true
}
@@ -209,7 +272,7 @@ func efaceeq(x, y eface) bool {
if t == nil {
return true
}
- eq := t.equalfn
+ eq := t.equal
if eq == nil {
panic(errorString("comparing uncomparable type " + t.string()))
}
@@ -230,7 +293,7 @@ func ifaceeq(x, y iface) bool {
if t != *(**_type)(y.tab) {
return false
}
- eq := t.equalfn
+ eq := t.equal
if eq == nil {
panic(errorString("comparing uncomparable type " + t.string()))
}
@@ -251,7 +314,7 @@ func ifacevaleq(x iface, t *_type, p unsafe.Pointer) bool {
if xt != t {
return false
}
- eq := t.equalfn
+ eq := t.equal
if eq == nil {
panic(errorString("comparing uncomparable type " + t.string()))
}
@@ -272,7 +335,7 @@ func ifaceefaceeq(x iface, y eface) bool {
if xt != y._type {
return false
}
- eq := xt.equalfn
+ eq := xt.equal
if eq == nil {
panic(errorString("comparing uncomparable type " + xt.string()))
}
@@ -289,7 +352,7 @@ func efacevaleq(x eface, t *_type, p unsafe.Pointer) bool {
if x._type != t {
return false
}
- eq := t.equalfn
+ eq := t.equal
if eq == nil {
panic(errorString("comparing uncomparable type " + t.string()))
}
diff --git a/libgo/go/runtime/map.go b/libgo/go/runtime/map.go
index 349577b..3672908 100644
--- a/libgo/go/runtime/map.go
+++ b/libgo/go/runtime/map.go
@@ -421,18 +421,16 @@ func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
if msanenabled && h != nil {
msanread(key, t.key.size)
}
- hashfn := t.key.hashfn
- equalfn := t.key.equalfn
if h == nil || h.count == 0 {
if t.hashMightPanic() {
- hashfn(key, 0) // see issue 23734
+ t.hasher(key, 0) // see issue 23734
}
return unsafe.Pointer(&zeroVal[0])
}
if h.flags&hashWriting != 0 {
throw("concurrent map read and map write")
}
- hash := hashfn(key, uintptr(h.hash0))
+ hash := t.hasher(key, uintptr(h.hash0))
m := bucketMask(h.B)
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
@@ -459,7 +457,7 @@ bucketloop:
if t.indirectkey() {
k = *((*unsafe.Pointer)(k))
}
- if equalfn(key, k) {
+ if t.key.equal(key, k) {
e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
if t.indirectelem() {
e = *((*unsafe.Pointer)(e))
@@ -486,18 +484,16 @@ func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool)
if msanenabled && h != nil {
msanread(key, t.key.size)
}
- hashfn := t.key.hashfn
- equalfn := t.key.equalfn
if h == nil || h.count == 0 {
if t.hashMightPanic() {
- hashfn(key, 0) // see issue 23734
+ t.hasher(key, 0) // see issue 23734
}
return unsafe.Pointer(&zeroVal[0]), false
}
if h.flags&hashWriting != 0 {
throw("concurrent map read and map write")
}
- hash := hashfn(key, uintptr(h.hash0))
+ hash := t.hasher(key, uintptr(h.hash0))
m := bucketMask(h.B)
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
@@ -524,7 +520,7 @@ bucketloop:
if t.indirectkey() {
k = *((*unsafe.Pointer)(k))
}
- if equalfn(key, k) {
+ if t.key.equal(key, k) {
e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
if t.indirectelem() {
e = *((*unsafe.Pointer)(e))
@@ -546,9 +542,7 @@ func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe
if h == nil || h.count == 0 {
return nil, nil
}
- hashfn := t.key.hashfn
- equalfn := t.key.equalfn
- hash := hashfn(key, uintptr(h.hash0))
+ hash := t.hasher(key, uintptr(h.hash0))
m := bucketMask(h.B)
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
@@ -575,7 +569,7 @@ bucketloop:
if t.indirectkey() {
k = *((*unsafe.Pointer)(k))
}
- if equalfn(key, k) {
+ if t.key.equal(key, k) {
e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
if t.indirectelem() {
e = *((*unsafe.Pointer)(e))
@@ -625,11 +619,9 @@ func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
if h.flags&hashWriting != 0 {
throw("concurrent map writes")
}
- hashfn := t.key.hashfn
- equalfn := t.key.equalfn
- hash := hashfn(key, uintptr(h.hash0))
+ hash := t.hasher(key, uintptr(h.hash0))
- // Set hashWriting after calling alg.hash, since alg.hash may panic,
+ // Set hashWriting after calling t.hasher, since t.hasher may panic,
// in which case we have not actually done a write.
h.flags ^= hashWriting
@@ -666,7 +658,7 @@ bucketloop:
if t.indirectkey() {
k = *((*unsafe.Pointer)(k))
}
- if !equalfn(key, k) {
+ if !t.key.equal(key, k) {
continue
}
// already have a mapping for key. Update it.
@@ -735,11 +727,9 @@ func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
if msanenabled && h != nil {
msanread(key, t.key.size)
}
- hashfn := t.key.hashfn
- equalfn := t.key.equalfn
if h == nil || h.count == 0 {
if t.hashMightPanic() {
- hashfn(key, 0) // see issue 23734
+ t.hasher(key, 0) // see issue 23734
}
return
}
@@ -747,9 +737,9 @@ func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
throw("concurrent map writes")
}
- hash := hashfn(key, uintptr(h.hash0))
+ hash := t.hasher(key, uintptr(h.hash0))
- // Set hashWriting after calling alg.hash, since alg.hash may panic,
+ // Set hashWriting after calling t.hasher, since t.hasher may panic,
// in which case we have not actually done a write (delete).
h.flags ^= hashWriting
@@ -774,7 +764,7 @@ search:
if t.indirectkey() {
k2 = *((*unsafe.Pointer)(k2))
}
- if !equalfn(key, k2) {
+ if !t.key.equal(key, k2) {
continue
}
// Only clear key if there are pointers in it.
@@ -925,8 +915,6 @@ func mapiternext(it *hiter) {
b := it.bptr
i := it.i
checkBucket := it.checkBucket
- hashfn := t.key.hashfn
- equalfn := t.key.equalfn
next:
if b == nil {
@@ -980,10 +968,10 @@ next:
// through the oldbucket, skipping any keys that will go
// to the other new bucket (each oldbucket expands to two
// buckets during a grow).
- if t.reflexivekey() || equalfn(k, k) {
+ if t.reflexivekey() || t.key.equal(k, k) {
// If the item in the oldbucket is not destined for
// the current new bucket in the iteration, skip it.
- hash := hashfn(k, uintptr(h.hash0))
+ hash := t.hasher(k, uintptr(h.hash0))
if hash&bucketMask(it.B) != checkBucket {
continue
}
@@ -1001,7 +989,7 @@ next:
}
}
if (b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY) ||
- !(t.reflexivekey() || equalfn(k, k)) {
+ !(t.reflexivekey() || t.key.equal(k, k)) {
// This is the golden data, we can return it.
// OR
// key!=key, so the entry can't be deleted or updated, so we can just return it.
@@ -1238,8 +1226,8 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
if !h.sameSizeGrow() {
// Compute hash to make our evacuation decision (whether we need
// to send this key/elem to bucket x or bucket y).
- hash := t.key.hashfn(k2, uintptr(h.hash0))
- if h.flags&iterator != 0 && !t.reflexivekey() && !t.key.equalfn(k2, k2) {
+ hash := t.hasher(k2, uintptr(h.hash0))
+ if h.flags&iterator != 0 && !t.reflexivekey() && !t.key.equal(k2, k2) {
// If key != key (NaNs), then the hash could be (and probably
// will be) entirely different from the old hash. Moreover,
// it isn't reproducible. Reproducibility is required in the
@@ -1333,16 +1321,12 @@ func advanceEvacuationMark(h *hmap, t *maptype, newbit uintptr) {
}
}
-func ismapkey(t *_type) bool {
- return t.hashfn != nil
-}
-
// Reflect stubs. Called from ../reflect/asm_*.s
//go:linkname reflect_makemap reflect.makemap
func reflect_makemap(t *maptype, cap int) *hmap {
// Check invariants and reflects math.
- if !ismapkey(t.key) {
+ if t.key.equal == nil {
throw("runtime.reflect_makemap: unsupported map key type")
}
if t.key.size > maxKeySize && (!t.indirectkey() || t.keysize != uint8(sys.PtrSize)) ||
@@ -1445,10 +1429,5 @@ func reflectlite_maplen(h *hmap) int {
return h.count
}
-//go:linkname reflect_ismapkey reflect.ismapkey
-func reflect_ismapkey(t *_type) bool {
- return ismapkey(t)
-}
-
const maxZero = 1024 // must match value in cmd/compile/internal/gc/walk.go
var zeroVal [maxZero]byte
diff --git a/libgo/go/runtime/map_benchmark_test.go b/libgo/go/runtime/map_benchmark_test.go
index d37dadc..cf04ead 100644
--- a/libgo/go/runtime/map_benchmark_test.go
+++ b/libgo/go/runtime/map_benchmark_test.go
@@ -483,3 +483,33 @@ func BenchmarkMapStringConversion(b *testing.B) {
})
}
}
+
+var BoolSink bool
+
+func BenchmarkMapInterfaceString(b *testing.B) {
+ m := map[interface{}]bool{}
+
+ for i := 0; i < 100; i++ {
+ m[fmt.Sprintf("%d", i)] = true
+ }
+
+ key := (interface{})("A")
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ BoolSink = m[key]
+ }
+}
+func BenchmarkMapInterfacePtr(b *testing.B) {
+ m := map[interface{}]bool{}
+
+ for i := 0; i < 100; i++ {
+ i := i
+ m[&i] = true
+ }
+
+ key := new(int)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ BoolSink = m[key]
+ }
+}
diff --git a/libgo/go/runtime/map_fast32.go b/libgo/go/runtime/map_fast32.go
index 57b3c0f..fdc7f0e 100644
--- a/libgo/go/runtime/map_fast32.go
+++ b/libgo/go/runtime/map_fast32.go
@@ -33,7 +33,7 @@ func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
// One-bucket table. No need to hash.
b = (*bmap)(h.buckets)
} else {
- hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+ hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
m := bucketMask(h.B)
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
@@ -73,7 +73,7 @@ func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
// One-bucket table. No need to hash.
b = (*bmap)(h.buckets)
} else {
- hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+ hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
m := bucketMask(h.B)
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
@@ -108,9 +108,9 @@ func mapassign_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
if h.flags&hashWriting != 0 {
throw("concurrent map writes")
}
- hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+ hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
- // Set hashWriting after calling alg.hash for consistency with mapassign.
+ // Set hashWriting after calling t.hasher for consistency with mapassign.
h.flags ^= hashWriting
if h.buckets == nil {
@@ -198,9 +198,9 @@ func mapassign_fast32ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer
if h.flags&hashWriting != 0 {
throw("concurrent map writes")
}
- hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+ hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
- // Set hashWriting after calling alg.hash for consistency with mapassign.
+ // Set hashWriting after calling t.hasher for consistency with mapassign.
h.flags ^= hashWriting
if h.buckets == nil {
@@ -289,9 +289,9 @@ func mapdelete_fast32(t *maptype, h *hmap, key uint32) {
throw("concurrent map writes")
}
- hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+ hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
- // Set hashWriting after calling alg.hash for consistency with mapdelete
+ // Set hashWriting after calling t.hasher for consistency with mapdelete
h.flags ^= hashWriting
bucket := hash & bucketMask(h.B)
@@ -408,7 +408,7 @@ func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
if !h.sameSizeGrow() {
// Compute hash to make our evacuation decision (whether we need
// to send this key/elem to bucket x or bucket y).
- hash := t.key.hashfn(k, uintptr(h.hash0))
+ hash := t.hasher(k, uintptr(h.hash0))
if hash&newbit != 0 {
useY = 1
}
diff --git a/libgo/go/runtime/map_fast64.go b/libgo/go/runtime/map_fast64.go
index af86f74..26c60ae 100644
--- a/libgo/go/runtime/map_fast64.go
+++ b/libgo/go/runtime/map_fast64.go
@@ -33,7 +33,7 @@ func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
// One-bucket table. No need to hash.
b = (*bmap)(h.buckets)
} else {
- hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+ hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
m := bucketMask(h.B)
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
@@ -73,7 +73,7 @@ func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
// One-bucket table. No need to hash.
b = (*bmap)(h.buckets)
} else {
- hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+ hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
m := bucketMask(h.B)
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
@@ -108,9 +108,9 @@ func mapassign_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
if h.flags&hashWriting != 0 {
throw("concurrent map writes")
}
- hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+ hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
- // Set hashWriting after calling alg.hash for consistency with mapassign.
+ // Set hashWriting after calling t.hasher for consistency with mapassign.
h.flags ^= hashWriting
if h.buckets == nil {
@@ -198,9 +198,9 @@ func mapassign_fast64ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer
if h.flags&hashWriting != 0 {
throw("concurrent map writes")
}
- hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+ hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
- // Set hashWriting after calling alg.hash for consistency with mapassign.
+ // Set hashWriting after calling t.hasher for consistency with mapassign.
h.flags ^= hashWriting
if h.buckets == nil {
@@ -289,9 +289,9 @@ func mapdelete_fast64(t *maptype, h *hmap, key uint64) {
throw("concurrent map writes")
}
- hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+ hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
- // Set hashWriting after calling alg.hash for consistency with mapdelete
+ // Set hashWriting after calling t.hasher for consistency with mapdelete
h.flags ^= hashWriting
bucket := hash & bucketMask(h.B)
@@ -408,7 +408,7 @@ func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
if !h.sameSizeGrow() {
// Compute hash to make our evacuation decision (whether we need
// to send this key/elem to bucket x or bucket y).
- hash := t.key.hashfn(k, uintptr(h.hash0))
+ hash := t.hasher(k, uintptr(h.hash0))
if hash&newbit != 0 {
useY = 1
}
diff --git a/libgo/go/runtime/map_faststr.go b/libgo/go/runtime/map_faststr.go
index 3c5175d..1775214 100644
--- a/libgo/go/runtime/map_faststr.go
+++ b/libgo/go/runtime/map_faststr.go
@@ -83,7 +83,7 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
return unsafe.Pointer(&zeroVal[0])
}
dohash:
- hash := t.key.hashfn(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
+ hash := t.hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
m := bucketMask(h.B)
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
@@ -178,7 +178,7 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
return unsafe.Pointer(&zeroVal[0]), false
}
dohash:
- hash := t.key.hashfn(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
+ hash := t.hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
m := bucketMask(h.B)
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
@@ -218,9 +218,9 @@ func mapassign_faststr(t *maptype, h *hmap, s string) unsafe.Pointer {
throw("concurrent map writes")
}
key := stringStructOf(&s)
- hash := t.key.hashfn(noescape(unsafe.Pointer(&s)), uintptr(h.hash0))
+ hash := t.hasher(noescape(unsafe.Pointer(&s)), uintptr(h.hash0))
- // Set hashWriting after calling alg.hash for consistency with mapassign.
+ // Set hashWriting after calling t.hasher for consistency with mapassign.
h.flags ^= hashWriting
if h.buckets == nil {
@@ -314,9 +314,9 @@ func mapdelete_faststr(t *maptype, h *hmap, ky string) {
}
key := stringStructOf(&ky)
- hash := t.key.hashfn(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
+ hash := t.hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
- // Set hashWriting after calling alg.hash for consistency with mapdelete
+ // Set hashWriting after calling t.hasher for consistency with mapdelete
h.flags ^= hashWriting
bucket := hash & bucketMask(h.B)
@@ -436,7 +436,7 @@ func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) {
if !h.sameSizeGrow() {
// Compute hash to make our evacuation decision (whether we need
// to send this key/elem to bucket x or bucket y).
- hash := t.key.hashfn(k, uintptr(h.hash0))
+ hash := t.hasher(k, uintptr(h.hash0))
if hash&newbit != 0 {
useY = 1
}
diff --git a/libgo/go/runtime/map_test.go b/libgo/go/runtime/map_test.go
index bc5f738..b13d269 100644
--- a/libgo/go/runtime/map_test.go
+++ b/libgo/go/runtime/map_test.go
@@ -1172,3 +1172,64 @@ func TestMapTombstones(t *testing.T) {
}
runtime.MapTombstoneCheck(m)
}
+
+type canString int
+
+func (c canString) String() string {
+ return fmt.Sprintf("%d", int(c))
+}
+
+func TestMapInterfaceKey(t *testing.T) {
+ // Test all the special cases in runtime.typehash.
+ type GrabBag struct {
+ f32 float32
+ f64 float64
+ c64 complex64
+ c128 complex128
+ s string
+ i0 interface{}
+ i1 interface {
+ String() string
+ }
+ a [4]string
+ }
+
+ m := map[interface{}]bool{}
+ // Put a bunch of data in m, so that a bad hash is likely to
+ // lead to a bad bucket, which will lead to a missed lookup.
+ for i := 0; i < 1000; i++ {
+ m[i] = true
+ }
+ m[GrabBag{f32: 1.0}] = true
+ if !m[GrabBag{f32: 1.0}] {
+ panic("f32 not found")
+ }
+ m[GrabBag{f64: 1.0}] = true
+ if !m[GrabBag{f64: 1.0}] {
+ panic("f64 not found")
+ }
+ m[GrabBag{c64: 1.0i}] = true
+ if !m[GrabBag{c64: 1.0i}] {
+ panic("c64 not found")
+ }
+ m[GrabBag{c128: 1.0i}] = true
+ if !m[GrabBag{c128: 1.0i}] {
+ panic("c128 not found")
+ }
+ m[GrabBag{s: "foo"}] = true
+ if !m[GrabBag{s: "foo"}] {
+ panic("string not found")
+ }
+ m[GrabBag{i0: "foo"}] = true
+ if !m[GrabBag{i0: "foo"}] {
+ panic("interface{} not found")
+ }
+ m[GrabBag{i1: canString(5)}] = true
+ if !m[GrabBag{i1: canString(5)}] {
+ panic("interface{String() string} not found")
+ }
+ m[GrabBag{a: [4]string{"foo", "bar", "baz", "bop"}}] = true
+ if !m[GrabBag{a: [4]string{"foo", "bar", "baz", "bop"}}] {
+ panic("array not found")
+ }
+}
diff --git a/libgo/go/runtime/type.go b/libgo/go/runtime/type.go
index 63ad310..94abbb8 100644
--- a/libgo/go/runtime/type.go
+++ b/libgo/go/runtime/type.go
@@ -12,18 +12,32 @@ import (
"unsafe"
)
+// tflag is documented in reflect/type.go.
+//
+// tflag values must be kept in sync with copies in:
+// go/types.cc
+// reflect/type.go
+// internal/reflectlite/type.go
+type tflag uint8
+
+const (
+ tflagRegularMemory tflag = 1 << 3 // equal and hash can treat values of this type as a single region of t.size bytes
+)
+
type _type struct {
size uintptr
ptrdata uintptr
hash uint32
- kind uint8
- align int8
+ tflag tflag
+ align uint8
fieldAlign uint8
- _ uint8
-
- hashfn func(unsafe.Pointer, uintptr) uintptr
- equalfn func(unsafe.Pointer, unsafe.Pointer) bool
-
+ kind uint8
+ // function for comparing objects of this type
+ // (ptr to object A, ptr to object B) -> ==?
+ equal func(unsafe.Pointer, unsafe.Pointer) bool
+ // gcdata stores the GC type data for the garbage collector.
+ // If the KindGCProg bit is set in kind, gcdata is a GC program.
+ // Otherwise it is a ptrmask bitmap. See mbitmap.go for details.
gcdata *byte
_string *string
*uncommontype
@@ -74,10 +88,12 @@ type interfacetype struct {
}
type maptype struct {
- typ _type
- key *_type
- elem *_type
- bucket *_type // internal type representing a hash bucket
+ typ _type
+ key *_type
+ elem *_type
+ bucket *_type // internal type representing a hash bucket
+ // function for hashing keys (ptr to key, seed) -> hash
+ hasher func(unsafe.Pointer, uintptr) uintptr
keysize uint8 // size of key slot
elemsize uint8 // size of elem slot
bucketsize uint16 // size of bucket