aboutsummaryrefslogtreecommitdiff
path: root/libgo/go
diff options
context:
space:
mode:
authorIan Lance Taylor <ian@gcc.gnu.org>2016-12-08 16:37:54 +0000
committerIan Lance Taylor <ian@gcc.gnu.org>2016-12-08 16:37:54 +0000
commit453060a9062959ceb1522b8b99adeb01b2a3f7b7 (patch)
tree34a4ee9d0eb2d5e59c8a8ffc86e75b6e7b9a572c /libgo/go
parentb2264b0964560e724010aac2faf4f6a3ec2729f7 (diff)
downloadgcc-453060a9062959ceb1522b8b99adeb01b2a3f7b7.zip
gcc-453060a9062959ceb1522b8b99adeb01b2a3f7b7.tar.gz
gcc-453060a9062959ceb1522b8b99adeb01b2a3f7b7.tar.bz2
runtime: copy memory hash code from Go 1.7
Rewrite the AES hashing code from gc assembler to C code using intrinsics. The resulting code generates the same hash code for the same input as the gc code--that doesn't matter as such, but testing it ensures that the C code does something useful. Also change mips64pe32le to mips64p32le in configure script--noticed during CL review. Reviewed-on: https://go-review.googlesource.com/34022 From-SVN: r243445
Diffstat (limited to 'libgo/go')
-rw-r--r--libgo/go/runtime/alg.go45
-rw-r--r--libgo/go/runtime/hash32.go94
-rw-r--r--libgo/go/runtime/hash64.go94
-rw-r--r--libgo/go/runtime/os_gccgo.go23
-rw-r--r--libgo/go/runtime/runtime2.go12
-rw-r--r--libgo/go/runtime/stubs.go6
-rw-r--r--libgo/go/runtime/unaligned1.go17
-rw-r--r--libgo/go/runtime/unaligned2.go20
8 files changed, 305 insertions, 6 deletions
diff --git a/libgo/go/runtime/alg.go b/libgo/go/runtime/alg.go
index 8f7c3c0..5ec19d0 100644
--- a/libgo/go/runtime/alg.go
+++ b/libgo/go/runtime/alg.go
@@ -23,12 +23,29 @@ import (
//go:linkname efacevaleq runtime.efacevaleq
//go:linkname eqstring runtime.eqstring
//go:linkname cmpstring runtime.cmpstring
+//
+// Temporary to be called from C code.
+//go:linkname alginit runtime.alginit
const (
c0 = uintptr((8-sys.PtrSize)/4*2860486313 + (sys.PtrSize-4)/4*33054211828000289)
c1 = uintptr((8-sys.PtrSize)/4*3267000013 + (sys.PtrSize-4)/4*23344194077549503)
)
+var useAeshash bool
+
+// in C code
+func aeshashbody(p unsafe.Pointer, h, s uintptr, sched []byte) uintptr
+
+func aeshash(p unsafe.Pointer, h, s uintptr) uintptr {
+ return aeshashbody(p, h, s, aeskeysched[:])
+}
+
+func aeshashstr(p unsafe.Pointer, h uintptr) uintptr {
+ ps := (*stringStruct)(p)
+ return aeshashbody(unsafe.Pointer(ps.str), h, uintptr(ps.len), aeskeysched[:])
+}
+
func interhash(p unsafe.Pointer, h uintptr, size uintptr) uintptr {
a := (*iface)(p)
tab := a.tab
@@ -198,7 +215,35 @@ func cmpstring(x, y string) int {
// Force the creation of function descriptors for equality and hash
// functions. These will be referenced directly by the compiler.
+var _ = memhash
var _ = interhash
var _ = interequal
var _ = nilinterhash
var _ = nilinterequal
+
+const hashRandomBytes = sys.PtrSize / 4 * 64
+
+// used in asm_{386,amd64}.s to seed the hash function
+var aeskeysched [hashRandomBytes]byte
+
+// used in hash{32,64}.go to seed the hash function
+var hashkey [4]uintptr
+
+func alginit() {
+ // Install aes hash algorithm if we have the instructions we need
+ if (GOARCH == "386" || GOARCH == "amd64") &&
+ GOOS != "nacl" &&
+ cpuid_ecx&(1<<25) != 0 && // aes (aesenc)
+ cpuid_ecx&(1<<9) != 0 && // sse3 (pshufb)
+ cpuid_ecx&(1<<19) != 0 { // sse4.1 (pinsr{d,q})
+ useAeshash = true
+ // Initialize with random data so hash collisions will be hard to engineer.
+ getRandomData(aeskeysched[:])
+ return
+ }
+ getRandomData((*[len(hashkey) * sys.PtrSize]byte)(unsafe.Pointer(&hashkey))[:])
+ hashkey[0] |= 1 // make sure these numbers are odd
+ hashkey[1] |= 1
+ hashkey[2] |= 1
+ hashkey[3] |= 1
+}
diff --git a/libgo/go/runtime/hash32.go b/libgo/go/runtime/hash32.go
new file mode 100644
index 0000000..cfb3a58
--- /dev/null
+++ b/libgo/go/runtime/hash32.go
@@ -0,0 +1,94 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Hashing algorithm inspired by
+// xxhash: https://code.google.com/p/xxhash/
+// cityhash: https://code.google.com/p/cityhash/
+
+// +build 386 arm armbe m68k mipso32 mipsn32 mips mipsle ppc s390 sparc
+
+package runtime
+
+import "unsafe"
+
+// For gccgo, use go:linkname to rename compiler-called functions to
+// themselves, so that the compiler will export them.
+//
+//go:linkname memhash runtime.memhash
+
+const (
+ // Constants for multiplication: four random odd 32-bit numbers.
+ m1 = 3168982561
+ m2 = 3339683297
+ m3 = 832293441
+ m4 = 2336365089
+)
+
+func memhash(p unsafe.Pointer, seed, s uintptr) uintptr {
+ if GOARCH == "386" && GOOS != "nacl" && useAeshash {
+ return aeshash(p, seed, s)
+ }
+ h := uint32(seed + s*hashkey[0])
+tail:
+ switch {
+ case s == 0:
+ case s < 4:
+ h ^= uint32(*(*byte)(p))
+ h ^= uint32(*(*byte)(add(p, s>>1))) << 8
+ h ^= uint32(*(*byte)(add(p, s-1))) << 16
+ h = rotl_15(h*m1) * m2
+ case s == 4:
+ h ^= readUnaligned32(p)
+ h = rotl_15(h*m1) * m2
+ case s <= 8:
+ h ^= readUnaligned32(p)
+ h = rotl_15(h*m1) * m2
+ h ^= readUnaligned32(add(p, s-4))
+ h = rotl_15(h*m1) * m2
+ case s <= 16:
+ h ^= readUnaligned32(p)
+ h = rotl_15(h*m1) * m2
+ h ^= readUnaligned32(add(p, 4))
+ h = rotl_15(h*m1) * m2
+ h ^= readUnaligned32(add(p, s-8))
+ h = rotl_15(h*m1) * m2
+ h ^= readUnaligned32(add(p, s-4))
+ h = rotl_15(h*m1) * m2
+ default:
+ v1 := h
+ v2 := uint32(seed * hashkey[1])
+ v3 := uint32(seed * hashkey[2])
+ v4 := uint32(seed * hashkey[3])
+ for s >= 16 {
+ v1 ^= readUnaligned32(p)
+ v1 = rotl_15(v1*m1) * m2
+ p = add(p, 4)
+ v2 ^= readUnaligned32(p)
+ v2 = rotl_15(v2*m2) * m3
+ p = add(p, 4)
+ v3 ^= readUnaligned32(p)
+ v3 = rotl_15(v3*m3) * m4
+ p = add(p, 4)
+ v4 ^= readUnaligned32(p)
+ v4 = rotl_15(v4*m4) * m1
+ p = add(p, 4)
+ s -= 16
+ }
+ h = v1 ^ v2 ^ v3 ^ v4
+ goto tail
+ }
+ h ^= h >> 17
+ h *= m3
+ h ^= h >> 13
+ h *= m4
+ h ^= h >> 16
+ return uintptr(h)
+}
+
+// Note: in order to get the compiler to issue rotl instructions, we
+// need to constant fold the shift amount by hand.
+// TODO: convince the compiler to issue rotl instructions after inlining.
+func rotl_15(x uint32) uint32 {
+ return (x << 15) | (x >> (32 - 15))
+}
diff --git a/libgo/go/runtime/hash64.go b/libgo/go/runtime/hash64.go
new file mode 100644
index 0000000..551d5b5
--- /dev/null
+++ b/libgo/go/runtime/hash64.go
@@ -0,0 +1,94 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Hashing algorithm inspired by
+// xxhash: https://code.google.com/p/xxhash/
+// cityhash: https://code.google.com/p/cityhash/
+
+// +build amd64 amd64p32 arm64 mips64 mips64le ppc64 ppc64le s390x alpha arm64be ia64 mipso64 mipsn64 mips64p32 mips64p32le sparc64
+
+package runtime
+
+import "unsafe"
+
+// For gccgo, use go:linkname to rename compiler-called functions to
+// themselves, so that the compiler will export them.
+//
+//go:linkname memhash runtime.memhash
+
+const (
+ // Constants for multiplication: four random odd 64-bit numbers.
+ m1 = 16877499708836156737
+ m2 = 2820277070424839065
+ m3 = 9497967016996688599
+ m4 = 15839092249703872147
+)
+
+func memhash(p unsafe.Pointer, seed, s uintptr) uintptr {
+ if GOARCH == "amd64" && GOOS != "nacl" && useAeshash {
+ return aeshash(p, seed, s)
+ }
+ h := uint64(seed + s*hashkey[0])
+tail:
+ switch {
+ case s == 0:
+ case s < 4:
+ h ^= uint64(*(*byte)(p))
+ h ^= uint64(*(*byte)(add(p, s>>1))) << 8
+ h ^= uint64(*(*byte)(add(p, s-1))) << 16
+ h = rotl_31(h*m1) * m2
+ case s <= 8:
+ h ^= uint64(readUnaligned32(p))
+ h ^= uint64(readUnaligned32(add(p, s-4))) << 32
+ h = rotl_31(h*m1) * m2
+ case s <= 16:
+ h ^= readUnaligned64(p)
+ h = rotl_31(h*m1) * m2
+ h ^= readUnaligned64(add(p, s-8))
+ h = rotl_31(h*m1) * m2
+ case s <= 32:
+ h ^= readUnaligned64(p)
+ h = rotl_31(h*m1) * m2
+ h ^= readUnaligned64(add(p, 8))
+ h = rotl_31(h*m1) * m2
+ h ^= readUnaligned64(add(p, s-16))
+ h = rotl_31(h*m1) * m2
+ h ^= readUnaligned64(add(p, s-8))
+ h = rotl_31(h*m1) * m2
+ default:
+ v1 := h
+ v2 := uint64(seed * hashkey[1])
+ v3 := uint64(seed * hashkey[2])
+ v4 := uint64(seed * hashkey[3])
+ for s >= 32 {
+ v1 ^= readUnaligned64(p)
+ v1 = rotl_31(v1*m1) * m2
+ p = add(p, 8)
+ v2 ^= readUnaligned64(p)
+ v2 = rotl_31(v2*m2) * m3
+ p = add(p, 8)
+ v3 ^= readUnaligned64(p)
+ v3 = rotl_31(v3*m3) * m4
+ p = add(p, 8)
+ v4 ^= readUnaligned64(p)
+ v4 = rotl_31(v4*m4) * m1
+ p = add(p, 8)
+ s -= 32
+ }
+ h = v1 ^ v2 ^ v3 ^ v4
+ goto tail
+ }
+
+ h ^= h >> 29
+ h *= m3
+ h ^= h >> 32
+ return uintptr(h)
+}
+
+// Note: in order to get the compiler to issue rotl instructions, we
+// need to constant fold the shift amount by hand.
+// TODO: convince the compiler to issue rotl instructions after inlining.
+func rotl_31(x uint64) uint64 {
+ return (x << 31) | (x >> (64 - 31))
+}
diff --git a/libgo/go/runtime/os_gccgo.go b/libgo/go/runtime/os_gccgo.go
new file mode 100644
index 0000000..4609432
--- /dev/null
+++ b/libgo/go/runtime/os_gccgo.go
@@ -0,0 +1,23 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package runtime
+
+import (
+ "unsafe"
+)
+
+var urandom_dev = []byte("/dev/urandom\x00")
+
+func getRandomData(r []byte) {
+ if startupRandomData != nil {
+ n := copy(r, startupRandomData)
+ extendRandom(r, n)
+ return
+ }
+ fd := open(&urandom_dev[0], 0 /* O_RDONLY */, 0)
+ n := read(fd, unsafe.Pointer(&r[0]), int32(len(r)))
+ closefd(fd)
+ extendRandom(r, int(n))
+}
diff --git a/libgo/go/runtime/runtime2.go b/libgo/go/runtime/runtime2.go
index c8db7ad..4712318 100644
--- a/libgo/go/runtime/runtime2.go
+++ b/libgo/go/runtime/runtime2.go
@@ -5,6 +5,7 @@
package runtime
import (
+ "runtime/internal/sys"
"unsafe"
)
@@ -668,7 +669,6 @@ type forcegcstate struct {
// the ELF AT_RANDOM auxiliary vector (vdso_linux_amd64.go or os_linux_386.go).
var startupRandomData []byte
-/*
// extendRandom extends the random numbers in r[:n] to the whole slice r.
// Treats n<0 as n==0.
func extendRandom(r []byte, n int) {
@@ -689,7 +689,6 @@ func extendRandom(r []byte, n int) {
}
}
}
-*/
// deferred subroutine calls
// This is the gccgo version.
@@ -770,11 +769,12 @@ var (
sched schedt
-// newprocs int32
+ // newprocs int32
+
+ // Information about what cpu features are available.
+ // Set on startup.
+ cpuid_ecx uint32
-// Information about what cpu features are available.
-// Set on startup in asm_{x86,amd64}.s.
-// cpuid_ecx uint32
// cpuid_edx uint32
// cpuid_ebx7 uint32
// lfenceBeforeRdtsc bool
diff --git a/libgo/go/runtime/stubs.go b/libgo/go/runtime/stubs.go
index b2f1829..b4fee6b 100644
--- a/libgo/go/runtime/stubs.go
+++ b/libgo/go/runtime/stubs.go
@@ -248,6 +248,12 @@ func funcPC(f interface{}) uintptr {
return **(**uintptr)(i.data)
}
+// For gccgo, to communicate from the C code to the Go code.
+//go:linkname setCpuidECX runtime.setCpuidECX
+func setCpuidECX(v uint32) {
+ cpuid_ecx = v
+}
+
// typedmemmove copies a typed value.
// For gccgo for now.
//go:nosplit
diff --git a/libgo/go/runtime/unaligned1.go b/libgo/go/runtime/unaligned1.go
new file mode 100644
index 0000000..c94f19e
--- /dev/null
+++ b/libgo/go/runtime/unaligned1.go
@@ -0,0 +1,17 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build 386 amd64 amd64p32 arm64 ppc64 ppc64le s390x ppc s390 arm64be
+
+package runtime
+
+import "unsafe"
+
+func readUnaligned32(p unsafe.Pointer) uint32 {
+ return *(*uint32)(p)
+}
+
+func readUnaligned64(p unsafe.Pointer) uint64 {
+ return *(*uint64)(p)
+}
diff --git a/libgo/go/runtime/unaligned2.go b/libgo/go/runtime/unaligned2.go
new file mode 100644
index 0000000..e52d6ce
--- /dev/null
+++ b/libgo/go/runtime/unaligned2.go
@@ -0,0 +1,20 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build arm mips64 mips64le armbe m68k mipso32 mipsn32 mips mipsle sparc alpha ia64 mipso64 mipsn64 mips64p32 mips64p32le sparc64
+
+package runtime
+
+import "unsafe"
+
+// Note: These routines perform the read with an unspecified endianness.
+func readUnaligned32(p unsafe.Pointer) uint32 {
+ q := (*[4]byte)(p)
+ return uint32(q[0]) + uint32(q[1])<<8 + uint32(q[2])<<16 + uint32(q[3])<<24
+}
+
+func readUnaligned64(p unsafe.Pointer) uint64 {
+ q := (*[8]byte)(p)
+ return uint64(q[0]) + uint64(q[1])<<8 + uint64(q[2])<<16 + uint64(q[3])<<24 + uint64(q[4])<<32 + uint64(q[5])<<40 + uint64(q[6])<<48 + uint64(q[7])<<56
+}