diff options
author | Ian Lance Taylor <ian@gcc.gnu.org> | 2019-05-03 16:44:50 +0000 |
---|---|---|
committer | Ian Lance Taylor <ian@gcc.gnu.org> | 2019-05-03 16:44:50 +0000 |
commit | b8754cd88a24097bdc0c018ea480e6f726f6baa7 (patch) | |
tree | c74567ce247b0b3ab51c2b111256694ba980a0b5 /libgo | |
parent | b34bfeb1979da5a970ef9ac5213178d67f269dbb (diff) | |
parent | b119c05542a9574a1c27f9bba63efbafc12b248b (diff) | |
download | gcc-b8754cd88a24097bdc0c018ea480e6f726f6baa7.zip gcc-b8754cd88a24097bdc0c018ea480e6f726f6baa7.tar.gz gcc-b8754cd88a24097bdc0c018ea480e6f726f6baa7.tar.bz2 |
Merge from trunk revision 270851.
From-SVN: r270855
Diffstat (limited to 'libgo')
-rw-r--r-- | libgo/go/reflect/all_test.go | 5 | ||||
-rw-r--r-- | libgo/go/reflect/type.go | 17 | ||||
-rw-r--r-- | libgo/go/runtime/iface.go | 275 | ||||
-rw-r--r-- | libgo/go/runtime/map.go | 1 | ||||
-rw-r--r-- | libgo/go/runtime/pprof/proto.go | 2 | ||||
-rw-r--r-- | libgo/go/runtime/proc.go | 2 | ||||
-rw-r--r-- | libgo/go/runtime/runtime_mmap_test.go | 5 | ||||
-rw-r--r-- | libgo/runtime/go-callers.c | 2 |
8 files changed, 262 insertions, 47 deletions
diff --git a/libgo/go/reflect/all_test.go b/libgo/go/reflect/all_test.go index 599ab27..9452255 100644 --- a/libgo/go/reflect/all_test.go +++ b/libgo/go/reflect/all_test.go @@ -4136,7 +4136,6 @@ func TestArrayOfGenericAlg(t *testing.T) { } func TestArrayOfDirectIface(t *testing.T) { - t.Skip("skipping test because gccgo uses a different directiface value") { type T [1]*byte i1 := Zero(TypeOf(T{})).Interface() @@ -4775,9 +4774,6 @@ func TestStructOfGenericAlg(t *testing.T) { } } -/* -gccgo does not use the same directiface settings as gc. - func TestStructOfDirectIface(t *testing.T) { { type T struct{ X [1]*byte } @@ -4826,7 +4822,6 @@ func TestStructOfDirectIface(t *testing.T) { } } } -*/ type StructI int diff --git a/libgo/go/reflect/type.go b/libgo/go/reflect/type.go index ea97b7d9..fb2e5d4 100644 --- a/libgo/go/reflect/type.go +++ b/libgo/go/reflect/type.go @@ -2204,7 +2204,14 @@ func StructOf(fields []StructField) Type { typ.equalfn = nil } - typ.kind &^= kindDirectIface + switch { + case len(fs) == 1 && !ifaceIndir(fs[0].typ): + // structs of 1 direct iface type can be direct + typ.kind |= kindDirectIface + default: + typ.kind &^= kindDirectIface + } + typ.uncommonType = nil typ.ptrToThis = nil @@ -2405,7 +2412,13 @@ func ArrayOf(count int, elem Type) Type { array.ptrdata = array.size // overestimate but ok; must match program } - array.kind &^= kindDirectIface + switch { + case count == 1 && !ifaceIndir(typ): + // array of 1 direct iface type can be direct + array.kind |= kindDirectIface + default: + array.kind &^= kindDirectIface + } esize := typ.size diff --git a/libgo/go/runtime/iface.go b/libgo/go/runtime/iface.go index 8ed67c1..1c3a5f3 100644 --- a/libgo/go/runtime/iface.go +++ b/libgo/go/runtime/iface.go @@ -5,6 +5,8 @@ package runtime import ( + "runtime/internal/atomic" + "runtime/internal/sys" "unsafe" ) @@ -66,54 +68,166 @@ import ( // pointer to memory that holds the value. It follows from this that // kindDirectIface can only be set for a type whose representation is // simply a pointer. In the current gccgo implementation, this is set -// only for pointer types (including unsafe.Pointer). In the future it -// could also be set for other types: channels, maps, functions, -// single-field structs and single-element arrays whose single field -// is simply a pointer. +// for types that are pointer-shaped, including unsafe.Pointer, channels, +// maps, functions, single-field structs and single-element arrays whose +// single field is simply a pointer-shaped type. // For a nil interface value both fields in the interface struct are nil. -// Return the interface method table for a value of type rhs converted -// to an interface of type lhs. -func getitab(lhs, rhs *_type, canfail bool) unsafe.Pointer { - if rhs == nil { - return nil - } +// itabs are statically allocated or persistently allocated. They are +// never freed. For itabs allocated at run time, they are cached in +// itabTable, so we reuse the same itab for the same (interface, concrete) +// type pair. The gc runtime prepopulates the cache with statically +// allocated itabs. Currently we don't do that as we don't have a way to +// find all the statically allocated itabs. - if lhs.kind&kindMask != kindInterface { - throw("getitab called for non-interface type") +const itabInitSize = 512 + +var ( + itabLock mutex // lock for accessing itab table + itabTable = &itabTableInit // pointer to current table + itabTableInit = itabTableType{size: itabInitSize} // starter table +) + +// Cache entry type of itab table. +// For gccgo, this is not the data type we used in the interface header. +type itab struct { + inter *interfacetype + methods [2]unsafe.Pointer // method table. variable sized. first entry is the type descriptor. +} + +func (m *itab) _type() *_type { + return (*_type)(m.methods[0]) +} + +// Note: change the formula in the mallocgc call in itabAdd if you change these fields. +type itabTableType struct { + size uintptr // length of entries array. Always a power of 2. + count uintptr // current number of filled entries. + entries [itabInitSize]*itab // really [size] large +} + +func itabHashFunc(inter *interfacetype, typ *_type) uintptr { + // compiler has provided some good hash codes for us. + return uintptr(inter.typ.hash ^ typ.hash) +} + +// find finds the given interface/type pair in t. +// Returns nil if the given interface/type pair isn't present. +func (t *itabTableType) find(inter *interfacetype, typ *_type) *itab { + // Implemented using quadratic probing. + // Probe sequence is h(i) = h0 + i*(i+1)/2 mod 2^k. + // We're guaranteed to hit all table entries using this probe sequence. + mask := t.size - 1 + h := itabHashFunc(inter, typ) & mask + for i := uintptr(1); ; i++ { + p := (**itab)(add(unsafe.Pointer(&t.entries), h*sys.PtrSize)) + // Use atomic read here so if we see m != nil, we also see + // the initializations of the fields of m. + // m := *p + m := (*itab)(atomic.Loadp(unsafe.Pointer(p))) + if m == nil { + return nil + } + if m.inter == inter && m._type() == typ { + return m + } + h += i + h &= mask } +} - lhsi := (*interfacetype)(unsafe.Pointer(lhs)) +// itabAdd adds the given itab to the itab hash table. +// itabLock must be held. +func itabAdd(m *itab) { + // Bugs can lead to calling this while mallocing is set, + // typically because this is called while panicing. + // Crash reliably, rather than only when we need to grow + // the hash table. + if getg().m.mallocing != 0 { + throw("malloc deadlock") + } - if len(lhsi.methods) == 0 { - throw("getitab called for empty interface type") + t := itabTable + if t.count >= 3*(t.size/4) { // 75% load factor + // Grow hash table. + // t2 = new(itabTableType) + some additional entries + // We lie and tell malloc we want pointer-free memory because + // all the pointed-to values are not in the heap. + t2 := (*itabTableType)(mallocgc((2+2*t.size)*sys.PtrSize, nil, true)) + t2.size = t.size * 2 + + // Copy over entries. + // Note: while copying, other threads may look for an itab and + // fail to find it. That's ok, they will then try to get the itab lock + // and as a consequence wait until this copying is complete. + iterate_itabs(t2.add) + if t2.count != t.count { + throw("mismatched count during itab table copy") + } + // Publish new hash table. Use an atomic write: see comment in getitab. + atomicstorep(unsafe.Pointer(&itabTable), unsafe.Pointer(t2)) + // Adopt the new table as our own. + t = itabTable + // Note: the old table can be GC'ed here. } + t.add(m) +} - if rhs.uncommontype == nil || len(rhs.methods) == 0 { - if canfail { - return nil +// add adds the given itab to itab table t. +// itabLock must be held. +func (t *itabTableType) add(m *itab) { + // See comment in find about the probe sequence. + // Insert new itab in the first empty spot in the probe sequence. + mask := t.size - 1 + h := itabHashFunc(m.inter, m._type()) & mask + for i := uintptr(1); ; i++ { + p := (**itab)(add(unsafe.Pointer(&t.entries), h*sys.PtrSize)) + m2 := *p + if m2 == m { + // A given itab may be used in more than one module + // and thanks to the way global symbol resolution works, the + // pointed-to itab may already have been inserted into the + // global 'hash'. + return } - panic(&TypeAssertionError{nil, rhs, lhs, *lhsi.methods[0].name}) + if m2 == nil { + // Use atomic write here so if a reader sees m, it also + // sees the correctly initialized fields of m. + // NoWB is ok because m is not in heap memory. + // *p = m + atomic.StorepNoWB(unsafe.Pointer(p), unsafe.Pointer(m)) + t.count++ + return + } + h += i + h &= mask } +} - methods := make([]unsafe.Pointer, len(lhsi.methods)+1) - methods[0] = unsafe.Pointer(rhs) +// init fills in the m.methods array with all the code pointers for +// the m.inter/m._type pair. If the type does not implement the interface, +// it sets m.methods[1] to nil and returns the name of an interface function that is missing. +// It is ok to call this multiple times on the same m, even concurrently. +func (m *itab) init() string { + inter := m.inter + typ := m._type() + ni := len(inter.methods) + 1 + methods := (*[1 << 16]unsafe.Pointer)(unsafe.Pointer(&m.methods[0]))[:ni:ni] + var m1 unsafe.Pointer ri := 0 - for li := range lhsi.methods { - lhsMethod := &lhsi.methods[li] + for li := range inter.methods { + lhsMethod := &inter.methods[li] var rhsMethod *method for { - if ri >= len(rhs.methods) { - if canfail { - return nil - } - panic(&TypeAssertionError{nil, rhs, lhs, *lhsMethod.name}) + if ri >= len(typ.methods) { + m.methods[1] = nil + return *lhsMethod.name } - rhsMethod = &rhs.methods[ri] + rhsMethod = &typ.methods[ri] if (lhsMethod.name == rhsMethod.name || *lhsMethod.name == *rhsMethod.name) && (lhsMethod.pkgPath == rhsMethod.pkgPath || *lhsMethod.pkgPath == *rhsMethod.pkgPath) { break @@ -123,17 +237,96 @@ func getitab(lhs, rhs *_type, canfail bool) unsafe.Pointer { } if !eqtype(lhsMethod.typ, rhsMethod.mtyp) { - if canfail { - return nil - } - panic(&TypeAssertionError{nil, rhs, lhs, *lhsMethod.name}) + m.methods[1] = nil + return *lhsMethod.name } - methods[li+1] = unsafe.Pointer(rhsMethod.tfn) + if li == 0 { + m1 = rhsMethod.tfn // we'll set m.methods[1] at the end + } else { + methods[li+1] = rhsMethod.tfn + } ri++ } + m.methods[1] = m1 + return "" +} - return unsafe.Pointer(&methods[0]) +func iterate_itabs(fn func(*itab)) { + // Note: only runs during stop the world or with itabLock held, + // so no other locks/atomics needed. + t := itabTable + for i := uintptr(0); i < t.size; i++ { + m := *(**itab)(add(unsafe.Pointer(&t.entries), i*sys.PtrSize)) + if m != nil { + fn(m) + } + } +} + +// Return the interface method table for a value of type rhs converted +// to an interface of type lhs. +func getitab(lhs, rhs *_type, canfail bool) unsafe.Pointer { + if rhs == nil { + return nil + } + + if lhs.kind&kindMask != kindInterface { + throw("getitab called for non-interface type") + } + + lhsi := (*interfacetype)(unsafe.Pointer(lhs)) + + if len(lhsi.methods) == 0 { + throw("getitab called for empty interface type") + } + + if rhs.uncommontype == nil || len(rhs.methods) == 0 { + if canfail { + return nil + } + panic(&TypeAssertionError{nil, rhs, lhs, *lhsi.methods[0].name}) + } + + var m *itab + + // First, look in the existing table to see if we can find the itab we need. + // This is by far the most common case, so do it without locks. + // Use atomic to ensure we see any previous writes done by the thread + // that updates the itabTable field (with atomic.Storep in itabAdd). + t := (*itabTableType)(atomic.Loadp(unsafe.Pointer(&itabTable))) + if m = t.find(lhsi, rhs); m != nil { + goto finish + } + + // Not found. Grab the lock and try again. + lock(&itabLock) + if m = itabTable.find(lhsi, rhs); m != nil { + unlock(&itabLock) + goto finish + } + + // Entry doesn't exist yet. Make a new entry & add it. + m = (*itab)(persistentalloc(unsafe.Sizeof(itab{})+uintptr(len(lhsi.methods)-1)*sys.PtrSize, 0, &memstats.other_sys)) + m.inter = lhsi + m.methods[0] = unsafe.Pointer(rhs) + m.init() + itabAdd(m) + unlock(&itabLock) +finish: + if m.methods[1] != nil { + return unsafe.Pointer(&m.methods[0]) + } + if canfail { + return nil + } + // this can only happen if the conversion + // was already done once using the , ok form + // and we have a cached negative result. + // The cached result doesn't record which + // interface function was missing, so initialize + // the itab again to get the missing function name. + panic(&TypeAssertionError{nil, rhs, lhs, m.init()}) } // Return the interface method table for a value of type rhs converted @@ -264,7 +457,11 @@ func ifaceE2T2(t *_type, e eface, ret unsafe.Pointer) bool { typedmemclr(t, ret) return false } else { - typedmemmove(t, ret, e.data) + if isDirectIface(t) { + *(*unsafe.Pointer)(ret) = e.data + } else { + typedmemmove(t, ret, e.data) + } return true } } @@ -275,7 +472,11 @@ func ifaceI2T2(t *_type, i iface, ret unsafe.Pointer) bool { typedmemclr(t, ret) return false } else { - typedmemmove(t, ret, i.data) + if isDirectIface(t) { + *(*unsafe.Pointer)(ret) = i.data + } else { + typedmemmove(t, ret, i.data) + } return true } } diff --git a/libgo/go/runtime/map.go b/libgo/go/runtime/map.go index 5dd5283..b210f5a 100644 --- a/libgo/go/runtime/map.go +++ b/libgo/go/runtime/map.go @@ -72,6 +72,7 @@ import ( //go:linkname mapaccess2_fat runtime.mapaccess2_fat //go:linkname mapassign runtime.mapassign //go:linkname mapdelete runtime.mapdelete +//go:linkname mapclear runtime.mapclear //go:linkname mapiterinit runtime.mapiterinit //go:linkname mapiternext runtime.mapiternext diff --git a/libgo/go/runtime/pprof/proto.go b/libgo/go/runtime/pprof/proto.go index 27cd09e..ef3eeb1 100644 --- a/libgo/go/runtime/pprof/proto.go +++ b/libgo/go/runtime/pprof/proto.go @@ -29,7 +29,7 @@ func funcPC(f interface{}) uintptr { data unsafe.Pointer } i := (*iface)(unsafe.Pointer(&f)) - r := **(**uintptr)(i.data) + r := *(*uintptr)(i.data) if internalcpu.FunctionDescriptors { // With PPC64 ELF ABI v1 function descriptors the // function address is a pointer to a struct whose diff --git a/libgo/go/runtime/proc.go b/libgo/go/runtime/proc.go index 0e6c9e1..8146c1d 100644 --- a/libgo/go/runtime/proc.go +++ b/libgo/go/runtime/proc.go @@ -446,7 +446,7 @@ func releaseSudog(s *sudog) { //go:nosplit func funcPC(f interface{}) uintptr { i := (*iface)(unsafe.Pointer(&f)) - r := **(**uintptr)(i.data) + r := *(*uintptr)(i.data) if cpu.FunctionDescriptors { // With PPC64 ELF ABI v1 function descriptors the // function address is a pointer to a struct whose diff --git a/libgo/go/runtime/runtime_mmap_test.go b/libgo/go/runtime/runtime_mmap_test.go index c004041..188fd5d 100644 --- a/libgo/go/runtime/runtime_mmap_test.go +++ b/libgo/go/runtime/runtime_mmap_test.go @@ -32,6 +32,11 @@ func TestPhysPageSize(t *testing.T) { t.Fatalf("Mmap: %v", err) } + if runtime.GOOS == "aix" { + // AIX does not allow mapping a range that is already mapped. + runtime.Munmap(unsafe.Pointer(uintptr(b)), 2*ps) + } + // Mmap should fail at a half page into the buffer. _, err = runtime.Mmap(unsafe.Pointer(uintptr(b)+ps/2), ps, 0, runtime.MAP_ANON|runtime.MAP_PRIVATE|runtime.MAP_FIXED, -1, 0) if err == 0 { diff --git a/libgo/runtime/go-callers.c b/libgo/runtime/go-callers.c index a72b4e8..31ff474 100644 --- a/libgo/runtime/go-callers.c +++ b/libgo/runtime/go-callers.c @@ -75,7 +75,7 @@ callback (void *data, uintptr_t pc, const char *filename, int lineno, return 0; if (p - function > 3 && __builtin_strcmp (p - 3, "..r") == 0) return 0; - if (p - function > 6 && __builtin_strcmp (p - 6, "..stub") == 0) + if (p - function > 6 && __builtin_strncmp (p - 6, "..stub", 6) == 0) return 0; } |