From 8d266165b9c1b303efab1881120b9d5a6feb437a Mon Sep 17 00:00:00 2001 From: Ian Lance Taylor Date: Fri, 26 Apr 2019 17:20:55 +0000 Subject: runtime: fix TestPhysPageSize on AIX AIX doesn't allow to mmap an address range which is already mmap. Therefore, once the region has been allocated, it must munmap before being able to play with it. The corresponding Go Toolchain patch is CL 174059. Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/174138 From-SVN: r270615 --- libgo/go/runtime/runtime_mmap_test.go | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'libgo/go') diff --git a/libgo/go/runtime/runtime_mmap_test.go b/libgo/go/runtime/runtime_mmap_test.go index c004041..188fd5d 100644 --- a/libgo/go/runtime/runtime_mmap_test.go +++ b/libgo/go/runtime/runtime_mmap_test.go @@ -32,6 +32,11 @@ func TestPhysPageSize(t *testing.T) { t.Fatalf("Mmap: %v", err) } + if runtime.GOOS == "aix" { + // AIX does not allow mapping a range that is already mapped. + runtime.Munmap(unsafe.Pointer(uintptr(b)), 2*ps) + } + // Mmap should fail at a half page into the buffer. _, err = runtime.Mmap(unsafe.Pointer(uintptr(b)+ps/2), ps, 0, runtime.MAP_ANON|runtime.MAP_PRIVATE|runtime.MAP_FIXED, -1, 0) if err == 0 { -- cgit v1.1 From 1da37f43b21e0c35e57b627edfa99ec80d2976ee Mon Sep 17 00:00:00 2001 From: Ian Lance Taylor Date: Wed, 1 May 2019 20:27:36 +0000 Subject: runtime: persistentalloc and cache itabs Previously, each time we do an interface conversion for which the method table is not known at compile time, we allocate a new method table. This CL ports the mechanism of itab caching from the gc runtime, adapted to our itab representation and method finding mechanism. With the cache, we reuse the same itab for the same (interface, concrete) type pair. This reduces allocations in interface conversions. Unlike the gc runtime, we don't prepopulate the cache with statically allocated itabs, as currently we don't have a way to find them. This means we don't deduplicate run-time allocated itabs with compile-time allocated ones. But that is not too bad -- it is just a cache anyway. As now itabs are never freed, it is also possible to drop the write barrier for writing the first word of an interface header. I'll leave this optimization for the future. Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/171617 From-SVN: r270778 --- libgo/go/runtime/iface.go | 256 ++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 225 insertions(+), 31 deletions(-) (limited to 'libgo/go') diff --git a/libgo/go/runtime/iface.go b/libgo/go/runtime/iface.go index 8ed67c1..dc92476 100644 --- a/libgo/go/runtime/iface.go +++ b/libgo/go/runtime/iface.go @@ -5,6 +5,8 @@ package runtime import ( + "runtime/internal/atomic" + "runtime/internal/sys" "unsafe" ) @@ -73,47 +75,160 @@ import ( // For a nil interface value both fields in the interface struct are nil. -// Return the interface method table for a value of type rhs converted -// to an interface of type lhs. -func getitab(lhs, rhs *_type, canfail bool) unsafe.Pointer { - if rhs == nil { - return nil - } +// itabs are statically allocated or persistently allocated. They are +// never freed. For itabs allocated at run time, they are cached in +// itabTable, so we reuse the same itab for the same (interface, concrete) +// type pair. The gc runtime prepopulates the cache with statically +// allocated itabs. Currently we don't do that as we don't have a way to +// find all the statically allocated itabs. - if lhs.kind&kindMask != kindInterface { - throw("getitab called for non-interface type") +const itabInitSize = 512 + +var ( + itabLock mutex // lock for accessing itab table + itabTable = &itabTableInit // pointer to current table + itabTableInit = itabTableType{size: itabInitSize} // starter table +) + +// Cache entry type of itab table. +// For gccgo, this is not the data type we used in the interface header. +type itab struct { + inter *interfacetype + methods [2]unsafe.Pointer // method table. variable sized. first entry is the type descriptor. +} + +func (m *itab) _type() *_type { + return (*_type)(m.methods[0]) +} + +// Note: change the formula in the mallocgc call in itabAdd if you change these fields. +type itabTableType struct { + size uintptr // length of entries array. Always a power of 2. + count uintptr // current number of filled entries. + entries [itabInitSize]*itab // really [size] large +} + +func itabHashFunc(inter *interfacetype, typ *_type) uintptr { + // compiler has provided some good hash codes for us. + return uintptr(inter.typ.hash ^ typ.hash) +} + +// find finds the given interface/type pair in t. +// Returns nil if the given interface/type pair isn't present. +func (t *itabTableType) find(inter *interfacetype, typ *_type) *itab { + // Implemented using quadratic probing. + // Probe sequence is h(i) = h0 + i*(i+1)/2 mod 2^k. + // We're guaranteed to hit all table entries using this probe sequence. + mask := t.size - 1 + h := itabHashFunc(inter, typ) & mask + for i := uintptr(1); ; i++ { + p := (**itab)(add(unsafe.Pointer(&t.entries), h*sys.PtrSize)) + // Use atomic read here so if we see m != nil, we also see + // the initializations of the fields of m. + // m := *p + m := (*itab)(atomic.Loadp(unsafe.Pointer(p))) + if m == nil { + return nil + } + if m.inter == inter && m._type() == typ { + return m + } + h += i + h &= mask } +} - lhsi := (*interfacetype)(unsafe.Pointer(lhs)) +// itabAdd adds the given itab to the itab hash table. +// itabLock must be held. +func itabAdd(m *itab) { + // Bugs can lead to calling this while mallocing is set, + // typically because this is called while panicing. + // Crash reliably, rather than only when we need to grow + // the hash table. + if getg().m.mallocing != 0 { + throw("malloc deadlock") + } - if len(lhsi.methods) == 0 { - throw("getitab called for empty interface type") + t := itabTable + if t.count >= 3*(t.size/4) { // 75% load factor + // Grow hash table. + // t2 = new(itabTableType) + some additional entries + // We lie and tell malloc we want pointer-free memory because + // all the pointed-to values are not in the heap. + t2 := (*itabTableType)(mallocgc((2+2*t.size)*sys.PtrSize, nil, true)) + t2.size = t.size * 2 + + // Copy over entries. + // Note: while copying, other threads may look for an itab and + // fail to find it. That's ok, they will then try to get the itab lock + // and as a consequence wait until this copying is complete. + iterate_itabs(t2.add) + if t2.count != t.count { + throw("mismatched count during itab table copy") + } + // Publish new hash table. Use an atomic write: see comment in getitab. + atomicstorep(unsafe.Pointer(&itabTable), unsafe.Pointer(t2)) + // Adopt the new table as our own. + t = itabTable + // Note: the old table can be GC'ed here. } + t.add(m) +} - if rhs.uncommontype == nil || len(rhs.methods) == 0 { - if canfail { - return nil +// add adds the given itab to itab table t. +// itabLock must be held. +func (t *itabTableType) add(m *itab) { + // See comment in find about the probe sequence. + // Insert new itab in the first empty spot in the probe sequence. + mask := t.size - 1 + h := itabHashFunc(m.inter, m._type()) & mask + for i := uintptr(1); ; i++ { + p := (**itab)(add(unsafe.Pointer(&t.entries), h*sys.PtrSize)) + m2 := *p + if m2 == m { + // A given itab may be used in more than one module + // and thanks to the way global symbol resolution works, the + // pointed-to itab may already have been inserted into the + // global 'hash'. + return } - panic(&TypeAssertionError{nil, rhs, lhs, *lhsi.methods[0].name}) + if m2 == nil { + // Use atomic write here so if a reader sees m, it also + // sees the correctly initialized fields of m. + // NoWB is ok because m is not in heap memory. + // *p = m + atomic.StorepNoWB(unsafe.Pointer(p), unsafe.Pointer(m)) + t.count++ + return + } + h += i + h &= mask } +} - methods := make([]unsafe.Pointer, len(lhsi.methods)+1) - methods[0] = unsafe.Pointer(rhs) +// init fills in the m.methods array with all the code pointers for +// the m.inter/m._type pair. If the type does not implement the interface, +// it sets m.methods[1] to nil and returns the name of an interface function that is missing. +// It is ok to call this multiple times on the same m, even concurrently. +func (m *itab) init() string { + inter := m.inter + typ := m._type() + ni := len(inter.methods) + 1 + methods := (*[1 << 16]unsafe.Pointer)(unsafe.Pointer(&m.methods[0]))[:ni:ni] + var m1 unsafe.Pointer ri := 0 - for li := range lhsi.methods { - lhsMethod := &lhsi.methods[li] + for li := range inter.methods { + lhsMethod := &inter.methods[li] var rhsMethod *method for { - if ri >= len(rhs.methods) { - if canfail { - return nil - } - panic(&TypeAssertionError{nil, rhs, lhs, *lhsMethod.name}) + if ri >= len(typ.methods) { + m.methods[1] = nil + return *lhsMethod.name } - rhsMethod = &rhs.methods[ri] + rhsMethod = &typ.methods[ri] if (lhsMethod.name == rhsMethod.name || *lhsMethod.name == *rhsMethod.name) && (lhsMethod.pkgPath == rhsMethod.pkgPath || *lhsMethod.pkgPath == *rhsMethod.pkgPath) { break @@ -123,17 +238,96 @@ func getitab(lhs, rhs *_type, canfail bool) unsafe.Pointer { } if !eqtype(lhsMethod.typ, rhsMethod.mtyp) { - if canfail { - return nil - } - panic(&TypeAssertionError{nil, rhs, lhs, *lhsMethod.name}) + m.methods[1] = nil + return *lhsMethod.name } - methods[li+1] = unsafe.Pointer(rhsMethod.tfn) + if li == 0 { + m1 = rhsMethod.tfn // we'll set m.methods[1] at the end + } else { + methods[li+1] = rhsMethod.tfn + } ri++ } + m.methods[1] = m1 + return "" +} + +func iterate_itabs(fn func(*itab)) { + // Note: only runs during stop the world or with itabLock held, + // so no other locks/atomics needed. + t := itabTable + for i := uintptr(0); i < t.size; i++ { + m := *(**itab)(add(unsafe.Pointer(&t.entries), i*sys.PtrSize)) + if m != nil { + fn(m) + } + } +} - return unsafe.Pointer(&methods[0]) +// Return the interface method table for a value of type rhs converted +// to an interface of type lhs. +func getitab(lhs, rhs *_type, canfail bool) unsafe.Pointer { + if rhs == nil { + return nil + } + + if lhs.kind&kindMask != kindInterface { + throw("getitab called for non-interface type") + } + + lhsi := (*interfacetype)(unsafe.Pointer(lhs)) + + if len(lhsi.methods) == 0 { + throw("getitab called for empty interface type") + } + + if rhs.uncommontype == nil || len(rhs.methods) == 0 { + if canfail { + return nil + } + panic(&TypeAssertionError{nil, rhs, lhs, *lhsi.methods[0].name}) + } + + var m *itab + + // First, look in the existing table to see if we can find the itab we need. + // This is by far the most common case, so do it without locks. + // Use atomic to ensure we see any previous writes done by the thread + // that updates the itabTable field (with atomic.Storep in itabAdd). + t := (*itabTableType)(atomic.Loadp(unsafe.Pointer(&itabTable))) + if m = t.find(lhsi, rhs); m != nil { + goto finish + } + + // Not found. Grab the lock and try again. + lock(&itabLock) + if m = itabTable.find(lhsi, rhs); m != nil { + unlock(&itabLock) + goto finish + } + + // Entry doesn't exist yet. Make a new entry & add it. + m = (*itab)(persistentalloc(unsafe.Sizeof(itab{})+uintptr(len(lhsi.methods)-1)*sys.PtrSize, 0, &memstats.other_sys)) + m.inter = lhsi + m.methods[0] = unsafe.Pointer(rhs) + m.init() + itabAdd(m) + unlock(&itabLock) +finish: + if m.methods[1] != nil { + return unsafe.Pointer(&m.methods[0]) + } + if canfail { + return nil + } + // this can only happen if the conversion + // was already done once using the , ok form + // and we have a cached negative result. + // The cached result doesn't record which + // interface function was missing, so initialize + // the itab again to get the missing function name. + panic(&TypeAssertionError{nil, rhs, lhs, m.init()}) } // Return the interface method table for a value of type rhs converted -- cgit v1.1 From 5e87c2806ff4e9057c4c46fa1d9c8ac91ce3dae9 Mon Sep 17 00:00:00 2001 From: Ian Lance Taylor Date: Wed, 1 May 2019 21:34:16 +0000 Subject: compiler,runtime: do more direct interfaces A direct interface is an interface whose data word contains the actual data value, instead of a pointer to it. The gc toolchain creates a direct interface if the value is pointer shaped, that includes pointers (including unsafe.Pointer), functions, channels, maps, and structs and arrays containing a single pointer-shaped field. In gccgo, we only do this for pointers. This CL unifies direct interface types with gc. This reduces allocations when converting such types to interfaces. Our method functions used to always take pointer receivers, to make interface calls easy. Now for direct interface types, their value methods will take value receivers. For a pointer to those types, when converted to interface, the interface data contains the pointer. For that interface to call a value method, it will need a wrapper method that dereference the pointer and invokes the value method. The wrapper method, instead of the actual one, is put into the itable of the pointer type. In the runtime, adjust funcPC for the new layout of interfaces of functions. Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/168409 From-SVN: r270779 --- libgo/go/reflect/all_test.go | 5 ----- libgo/go/reflect/type.go | 17 +++++++++++++++-- libgo/go/runtime/iface.go | 19 +++++++++++++------ libgo/go/runtime/pprof/proto.go | 2 +- libgo/go/runtime/proc.go | 2 +- 5 files changed, 30 insertions(+), 15 deletions(-) (limited to 'libgo/go') diff --git a/libgo/go/reflect/all_test.go b/libgo/go/reflect/all_test.go index 599ab27..9452255 100644 --- a/libgo/go/reflect/all_test.go +++ b/libgo/go/reflect/all_test.go @@ -4136,7 +4136,6 @@ func TestArrayOfGenericAlg(t *testing.T) { } func TestArrayOfDirectIface(t *testing.T) { - t.Skip("skipping test because gccgo uses a different directiface value") { type T [1]*byte i1 := Zero(TypeOf(T{})).Interface() @@ -4775,9 +4774,6 @@ func TestStructOfGenericAlg(t *testing.T) { } } -/* -gccgo does not use the same directiface settings as gc. - func TestStructOfDirectIface(t *testing.T) { { type T struct{ X [1]*byte } @@ -4826,7 +4822,6 @@ func TestStructOfDirectIface(t *testing.T) { } } } -*/ type StructI int diff --git a/libgo/go/reflect/type.go b/libgo/go/reflect/type.go index ea97b7d..fb2e5d4 100644 --- a/libgo/go/reflect/type.go +++ b/libgo/go/reflect/type.go @@ -2204,7 +2204,14 @@ func StructOf(fields []StructField) Type { typ.equalfn = nil } - typ.kind &^= kindDirectIface + switch { + case len(fs) == 1 && !ifaceIndir(fs[0].typ): + // structs of 1 direct iface type can be direct + typ.kind |= kindDirectIface + default: + typ.kind &^= kindDirectIface + } + typ.uncommonType = nil typ.ptrToThis = nil @@ -2405,7 +2412,13 @@ func ArrayOf(count int, elem Type) Type { array.ptrdata = array.size // overestimate but ok; must match program } - array.kind &^= kindDirectIface + switch { + case count == 1 && !ifaceIndir(typ): + // array of 1 direct iface type can be direct + array.kind |= kindDirectIface + default: + array.kind &^= kindDirectIface + } esize := typ.size diff --git a/libgo/go/runtime/iface.go b/libgo/go/runtime/iface.go index dc92476..1c3a5f3 100644 --- a/libgo/go/runtime/iface.go +++ b/libgo/go/runtime/iface.go @@ -68,10 +68,9 @@ import ( // pointer to memory that holds the value. It follows from this that // kindDirectIface can only be set for a type whose representation is // simply a pointer. In the current gccgo implementation, this is set -// only for pointer types (including unsafe.Pointer). In the future it -// could also be set for other types: channels, maps, functions, -// single-field structs and single-element arrays whose single field -// is simply a pointer. +// for types that are pointer-shaped, including unsafe.Pointer, channels, +// maps, functions, single-field structs and single-element arrays whose +// single field is simply a pointer-shaped type. // For a nil interface value both fields in the interface struct are nil. @@ -458,7 +457,11 @@ func ifaceE2T2(t *_type, e eface, ret unsafe.Pointer) bool { typedmemclr(t, ret) return false } else { - typedmemmove(t, ret, e.data) + if isDirectIface(t) { + *(*unsafe.Pointer)(ret) = e.data + } else { + typedmemmove(t, ret, e.data) + } return true } } @@ -469,7 +472,11 @@ func ifaceI2T2(t *_type, i iface, ret unsafe.Pointer) bool { typedmemclr(t, ret) return false } else { - typedmemmove(t, ret, i.data) + if isDirectIface(t) { + *(*unsafe.Pointer)(ret) = i.data + } else { + typedmemmove(t, ret, i.data) + } return true } } diff --git a/libgo/go/runtime/pprof/proto.go b/libgo/go/runtime/pprof/proto.go index 27cd09e..ef3eeb1 100644 --- a/libgo/go/runtime/pprof/proto.go +++ b/libgo/go/runtime/pprof/proto.go @@ -29,7 +29,7 @@ func funcPC(f interface{}) uintptr { data unsafe.Pointer } i := (*iface)(unsafe.Pointer(&f)) - r := **(**uintptr)(i.data) + r := *(*uintptr)(i.data) if internalcpu.FunctionDescriptors { // With PPC64 ELF ABI v1 function descriptors the // function address is a pointer to a struct whose diff --git a/libgo/go/runtime/proc.go b/libgo/go/runtime/proc.go index 0e6c9e1..8146c1d 100644 --- a/libgo/go/runtime/proc.go +++ b/libgo/go/runtime/proc.go @@ -446,7 +446,7 @@ func releaseSudog(s *sudog) { //go:nosplit func funcPC(f interface{}) uintptr { i := (*iface)(unsafe.Pointer(&f)) - r := **(**uintptr)(i.data) + r := *(*uintptr)(i.data) if cpu.FunctionDescriptors { // With PPC64 ELF ABI v1 function descriptors the // function address is a pointer to a struct whose -- cgit v1.1 From 58dbd45339823deb30fe4f1e97f6664f118b2f62 Mon Sep 17 00:00:00 2001 From: Ian Lance Taylor Date: Wed, 1 May 2019 21:37:00 +0000 Subject: compiler: recognize and optimize map range clear Recognize for k := range m { delete(m, k) } for map m, and rewrite it to runtime.mapclear, as the gc compiler does. Reviewed-on: https://go-review.googlesource.com/c/gofrontend/+/169397 From-SVN: r270780 --- libgo/go/runtime/map.go | 1 + 1 file changed, 1 insertion(+) (limited to 'libgo/go') diff --git a/libgo/go/runtime/map.go b/libgo/go/runtime/map.go index 5dd5283..b210f5a 100644 --- a/libgo/go/runtime/map.go +++ b/libgo/go/runtime/map.go @@ -72,6 +72,7 @@ import ( //go:linkname mapaccess2_fat runtime.mapaccess2_fat //go:linkname mapassign runtime.mapassign //go:linkname mapdelete runtime.mapdelete +//go:linkname mapclear runtime.mapclear //go:linkname mapiterinit runtime.mapiterinit //go:linkname mapiternext runtime.mapiternext -- cgit v1.1