aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--gcc/go/gofrontend/MERGE2
-rw-r--r--gcc/go/gofrontend/gogo.cc26
-rw-r--r--gcc/go/gofrontend/gogo.h5
-rw-r--r--gcc/go/gofrontend/names.cc10
-rw-r--r--gcc/go/gofrontend/types.cc209
-rw-r--r--gcc/go/gofrontend/types.h15
-rw-r--r--libgo/go/internal/reflectlite/type.go40
-rw-r--r--libgo/go/reflect/type.go141
-rw-r--r--libgo/go/reflect/value.go3
-rw-r--r--libgo/go/runtime/alg.go89
-rw-r--r--libgo/go/runtime/map.go63
-rw-r--r--libgo/go/runtime/map_benchmark_test.go30
-rw-r--r--libgo/go/runtime/map_fast32.go18
-rw-r--r--libgo/go/runtime/map_fast64.go18
-rw-r--r--libgo/go/runtime/map_faststr.go14
-rw-r--r--libgo/go/runtime/map_test.go61
-rw-r--r--libgo/go/runtime/type.go38
-rw-r--r--libgo/runtime/go-unsafe-pointer.c24
18 files changed, 421 insertions, 385 deletions
diff --git a/gcc/go/gofrontend/MERGE b/gcc/go/gofrontend/MERGE
index 5e00d4f..ea09abe 100644
--- a/gcc/go/gofrontend/MERGE
+++ b/gcc/go/gofrontend/MERGE
@@ -1,4 +1,4 @@
-b5c950fb98042fe434edca0c2403234692f25cd4
+9163fa28b89222cd851c0d24bd6a1384d1379c55
The first line of this file holds the git revision number of the last
merge done from the gofrontend repository.
diff --git a/gcc/go/gofrontend/gogo.cc b/gcc/go/gofrontend/gogo.cc
index db533bb..b50d8a2 100644
--- a/gcc/go/gofrontend/gogo.cc
+++ b/gcc/go/gofrontend/gogo.cc
@@ -2722,7 +2722,7 @@ Gogo::clear_file_scope()
// parse tree is lowered.
void
-Gogo::queue_hash_function(Type* type, Named_type* name, int64_t size,
+Gogo::queue_hash_function(Type* type, int64_t size,
const std::string& hash_name,
Function_type* hash_fntype)
{
@@ -2730,7 +2730,7 @@ Gogo::queue_hash_function(Type* type, Named_type* name, int64_t size,
go_assert(!this->in_global_scope());
Specific_type_function::Specific_type_function_kind kind =
Specific_type_function::SPECIFIC_HASH;
- Specific_type_function* tsf = new Specific_type_function(type, name, size,
+ Specific_type_function* tsf = new Specific_type_function(type, NULL, size,
kind, hash_name,
hash_fntype);
this->specific_type_functions_.push_back(tsf);
@@ -2783,10 +2783,7 @@ Specific_type_functions::type(Type* t)
if (nt->is_alias())
return TRAVERSE_CONTINUE;
if (t->needs_specific_type_functions(this->gogo_))
- {
- t->equal_function(this->gogo_, nt, NULL);
- t->hash_function(this->gogo_, nt, NULL);
- }
+ t->equal_function(this->gogo_, nt, NULL);
// If this is a struct type, we don't want to make functions
// for the unnamed struct.
@@ -2820,10 +2817,15 @@ Specific_type_functions::type(Type* t)
case Type::TYPE_STRUCT:
case Type::TYPE_ARRAY:
if (t->needs_specific_type_functions(this->gogo_))
- {
- t->equal_function(this->gogo_, NULL, NULL);
- t->hash_function(this->gogo_, NULL, NULL);
- }
+ t->equal_function(this->gogo_, NULL, NULL);
+ break;
+
+ case Type::TYPE_MAP:
+ {
+ Type* key_type = t->map_type()->key_type();
+ if (key_type->needs_specific_type_functions(this->gogo_))
+ key_type->hash_function(this->gogo_, NULL);
+ }
break;
default:
@@ -2846,8 +2848,8 @@ Gogo::write_specific_type_functions()
Specific_type_function* tsf = this->specific_type_functions_.back();
this->specific_type_functions_.pop_back();
if (tsf->kind == Specific_type_function::SPECIFIC_HASH)
- tsf->type->write_hash_function(this, tsf->name, tsf->size,
- tsf->fnname, tsf->fntype);
+ tsf->type->write_hash_function(this, tsf->size, tsf->fnname,
+ tsf->fntype);
else
tsf->type->write_equal_function(this, tsf->name, tsf->size,
tsf->fnname, tsf->fntype);
diff --git a/gcc/go/gofrontend/gogo.h b/gcc/go/gofrontend/gogo.h
index cd4e38f..27d7b4c 100644
--- a/gcc/go/gofrontend/gogo.h
+++ b/gcc/go/gofrontend/gogo.h
@@ -602,8 +602,7 @@ class Gogo
// is used when a type-specific hash function is needed when not at
// top level.
void
- queue_hash_function(Type* type, Named_type* name, int64_t size,
- const std::string& hash_name,
+ queue_hash_function(Type* type, int64_t size, const std::string& hash_name,
Function_type* hash_fntype);
// Queue up a type-specific equal function to be written out. This
@@ -879,7 +878,7 @@ class Gogo
// Return the name of the hash function for TYPE.
std::string
- hash_function_name(const Type*, const Named_type*);
+ hash_function_name(const Type*);
// Return the name of the equal function for TYPE.
std::string
diff --git a/gcc/go/gofrontend/names.cc b/gcc/go/gofrontend/names.cc
index 959415a..f4ad1815 100644
--- a/gcc/go/gofrontend/names.cc
+++ b/gcc/go/gofrontend/names.cc
@@ -287,16 +287,12 @@ Gogo::stub_method_name(const Package* package, const std::string& mname)
return ret;
}
-// Return the name of the hash function for TYPE. If NAME is not NULL
-// it is the name of the type.
+// Return the name of the hash function for TYPE.
std::string
-Gogo::hash_function_name(const Type* type, const Named_type* name)
+Gogo::hash_function_name(const Type* type)
{
- const Type* rtype = type;
- if (name != NULL)
- rtype = name;
- std::string tname = rtype->mangled_name(this);
+ std::string tname = type->mangled_name(this);
return tname + "..hash";
}
diff --git a/gcc/go/gofrontend/types.cc b/gcc/go/gofrontend/types.cc
index 1004040..27d53df 100644
--- a/gcc/go/gofrontend/types.cc
+++ b/gcc/go/gofrontend/types.cc
@@ -1540,6 +1540,11 @@ Type::convert_builtin_named_types(Gogo* gogo)
}
}
+// Values to store in the tflag field of a type descriptor. This must
+// match the definitions in libgo/go/runtime/type.go.
+
+const int TFLAG_REGULAR_MEMORY = 1 << 3;
+
// Return the type of a type descriptor. We should really tie this to
// runtime.Type rather than copying it. This must match the struct "_type"
// declared in libgo/go/runtime/type.go.
@@ -1566,21 +1571,11 @@ Type::make_type_descriptor_type()
Type* void_type = Type::make_void_type();
Type* unsafe_pointer_type = Type::make_pointer_type(void_type);
- Typed_identifier_list *params = new Typed_identifier_list();
- params->push_back(Typed_identifier("key", unsafe_pointer_type, bloc));
- params->push_back(Typed_identifier("seed", uintptr_type, bloc));
-
- Typed_identifier_list* results = new Typed_identifier_list();
- results->push_back(Typed_identifier("", uintptr_type, bloc));
-
- Type* hash_fntype = Type::make_function_type(NULL, params, results,
- bloc);
-
- params = new Typed_identifier_list();
+ Typed_identifier_list* params = new Typed_identifier_list();
params->push_back(Typed_identifier("key1", unsafe_pointer_type, bloc));
params->push_back(Typed_identifier("key2", unsafe_pointer_type, bloc));
- results = new Typed_identifier_list();
+ Typed_identifier_list* results = new Typed_identifier_list();
results->push_back(Typed_identifier("", Type::lookup_bool_type(), bloc));
Type* equal_fntype = Type::make_function_type(NULL, params, results,
@@ -1624,11 +1619,11 @@ Type::make_type_descriptor_type()
"size", uintptr_type,
"ptrdata", uintptr_type,
"hash", uint32_type,
- "kind", uint8_type,
+ "tflag", uint8_type,
"align", uint8_type,
"fieldAlign", uint8_type,
- "hashfn", hash_fntype,
- "equalfn", equal_fntype,
+ "kind", uint8_type,
+ "equal", equal_fntype,
"gcdata", pointer_uint8_type,
"string", pointer_string_type,
"", pointer_uncommon_type,
@@ -1741,18 +1736,13 @@ Type::needs_specific_type_functions(Gogo* gogo)
}
// Return the runtime function that computes the hash of this type.
-// If NAME is not NULL it is the name of this type. HASH_FNTYPE is
-// the type of the hash function function, for convenience; it may be
-// NULL. This returns NULL if the type is not comparable.
+// HASH_FNTYPE is the type of the hash function function, for
+// convenience; it may be NULL. This returns NULL if the type is not
+// comparable.
Named_object*
-Type::hash_function(Gogo* gogo, Named_type* name, Function_type* hash_fntype)
+Type::hash_function(Gogo* gogo, Function_type* hash_fntype)
{
- // If the unaliased type is not a named type, then the type does not
- // have a name after all.
- if (name != NULL)
- name = name->unalias()->named_type();
-
if (!this->is_comparable())
return NULL;
@@ -1803,7 +1793,7 @@ Type::hash_function(Gogo* gogo, Named_type* name, Function_type* hash_fntype)
// We don't have a built-in function for a type of this
// size. Build a function to use that calls the generic
// hash functions for identity, passing the size.
- return this->build_hash_function(gogo, name, size, hash_fntype);
+ return this->build_hash_function(gogo, size, hash_fntype);
}
}
else
@@ -1861,7 +1851,7 @@ Type::hash_function(Gogo* gogo, Named_type* name, Function_type* hash_fntype)
// This is a struct which can not be compared using a simple
// identity function. We need to build a function to
// compute the hash.
- return this->build_hash_function(gogo, name, -1, hash_fntype);
+ return this->build_hash_function(gogo, -1, hash_fntype);
case Type::TYPE_ARRAY:
if (this->is_slice_type())
@@ -1875,7 +1865,7 @@ Type::hash_function(Gogo* gogo, Named_type* name, Function_type* hash_fntype)
// This is an array which can not be compared using a
// simple identity function. We need to build a
// function to compute the hash.
- return this->build_hash_function(gogo, name, -1, hash_fntype);
+ return this->build_hash_function(gogo, -1, hash_fntype);
}
break;
@@ -1895,7 +1885,6 @@ Type::hash_function(Gogo* gogo, Named_type* name, Function_type* hash_fntype)
}
}
-
Location bloc = Linemap::predeclared_location();
Named_object *hash_fn = Named_object::make_function_declaration(hash_fnname,
NULL,
@@ -1913,12 +1902,20 @@ Type::Type_function Type::type_hash_functions_table;
// this is a struct or array type that cannot use an identity
// comparison. Otherwise, it is a type that uses an identity
// comparison but is not one of the standard supported sizes.
+//
+// Unlike an equality function, hash functions are not in type
+// descriptors, so we can't assume that a named type has defined a
+// hash function in the package that defines the type. So hash
+// functions are always defined locally. FIXME: It would be better to
+// define hash functions with comdat linkage so that duplicate hash
+// functions can be coalesced at link time.
Named_object*
-Type::build_hash_function(Gogo* gogo, Named_type* name, int64_t size,
- Function_type* hash_fntype)
+Type::build_hash_function(Gogo* gogo, int64_t size, Function_type* hash_fntype)
{
- std::pair<Type*, Named_object*> val(name != NULL ? name : this, NULL);
+ Type* type = this->base();
+
+ std::pair<Type*, Named_object*> val(type, NULL);
std::pair<Type_function::iterator, bool> ins =
Type::type_hash_functions_table.insert(val);
if (!ins.second)
@@ -1927,30 +1924,19 @@ Type::build_hash_function(Gogo* gogo, Named_type* name, int64_t size,
return ins.first->second;
}
- std::string hash_name = gogo->hash_function_name(this, name);
+ std::string hash_name = gogo->hash_function_name(type);
Location bloc = Linemap::predeclared_location();
- const Package* package = NULL;
- bool is_defined_elsewhere =
- this->type_descriptor_defined_elsewhere(name, &package);
-
- Named_object* hash_fn;
- if (is_defined_elsewhere)
- hash_fn = Named_object::make_function_declaration(hash_name, package,
- hash_fntype, bloc);
- else
- hash_fn = gogo->declare_package_function(hash_name, hash_fntype, bloc);
+ Named_object* hash_fn = gogo->declare_package_function(hash_name,
+ hash_fntype, bloc);
ins.first->second = hash_fn;
- if (!is_defined_elsewhere)
- {
- if (gogo->in_global_scope())
- this->write_hash_function(gogo, name, size, hash_name, hash_fntype);
- else
- gogo->queue_hash_function(this, name, size, hash_name, hash_fntype);
- }
+ if (gogo->in_global_scope())
+ type->write_hash_function(gogo, size, hash_name, hash_fntype);
+ else
+ gogo->queue_hash_function(type, size, hash_name, hash_fntype);
return hash_fn;
}
@@ -1958,7 +1944,7 @@ Type::build_hash_function(Gogo* gogo, Named_type* name, int64_t size,
// Write the hash function for a type that needs it written specially.
void
-Type::write_hash_function(Gogo* gogo, Named_type* name, int64_t size,
+Type::write_hash_function(Gogo* gogo, int64_t size,
const std::string& hash_name,
Function_type* hash_fntype)
{
@@ -1979,12 +1965,10 @@ Type::write_hash_function(Gogo* gogo, Named_type* name, int64_t size,
if (size != -1)
this->write_identity_hash(gogo, size);
- else if (name != NULL && name->real_type()->named_type() != NULL)
- this->write_named_hash(gogo, name, hash_fntype);
else if (this->struct_type() != NULL)
- this->struct_type()->write_hash_function(gogo, name, hash_fntype);
+ this->struct_type()->write_hash_function(gogo, hash_fntype);
else if (this->array_type() != NULL)
- this->array_type()->write_hash_function(gogo, name, hash_fntype);
+ this->array_type()->write_hash_function(gogo, hash_fntype);
else
go_unreachable();
@@ -2052,54 +2036,6 @@ Type::write_identity_hash(Gogo* gogo, int64_t size)
gogo->add_statement(s);
}
-// Write a hash function that simply calls the hash function for a
-// named type. This is used when one named type is defined as
-// another. This ensures that this case works when the other named
-// type is defined in another package and relies on calling hash
-// functions defined only in that package.
-
-void
-Type::write_named_hash(Gogo* gogo, Named_type* name,
- Function_type* hash_fntype)
-{
- Location bloc = Linemap::predeclared_location();
-
- Named_type* base_type = name->real_type()->named_type();
- while (base_type->is_alias())
- {
- base_type = base_type->real_type()->named_type();
- go_assert(base_type != NULL);
- }
- go_assert(base_type != NULL);
-
- // The pointer to the type we are going to hash. This is an
- // unsafe.Pointer.
- Named_object* key_arg = gogo->lookup("key", NULL);
- go_assert(key_arg != NULL);
-
- // The seed argument to the hash function.
- Named_object* seed_arg = gogo->lookup("seed", NULL);
- go_assert(seed_arg != NULL);
-
- Named_object* hash_fn = name->real_type()->hash_function(gogo, base_type,
- hash_fntype);
-
- // Call the hash function for the base type.
- Expression* key_ref = Expression::make_var_reference(key_arg, bloc);
- Expression* seed_ref = Expression::make_var_reference(seed_arg, bloc);
- Expression_list* args = new Expression_list();
- args->push_back(key_ref);
- args->push_back(seed_ref);
- Expression* func = Expression::make_func_reference(hash_fn, NULL, bloc);
- Expression* call = Expression::make_call(func, args, false, bloc);
-
- // Return the hash of the base type.
- Expression_list* vals = new Expression_list();
- vals->push_back(call);
- Statement* s = Statement::make_return_statement(vals, bloc);
- gogo->add_statement(s);
-}
-
// Return the runtime function that compares whether two values of
// this type are equal. If NAME is not NULL it is the name of this
// type. EQUAL_FNTYPE is the type of the equality function, for
@@ -2572,9 +2508,11 @@ Type::type_descriptor_constructor(Gogo* gogo, int runtime_type_kind,
vals->push_back(Expression::make_integer_ul(h, p->type(), bloc));
++p;
- go_assert(p->is_field_name("kind"));
- vals->push_back(Expression::make_integer_ul(runtime_type_kind, p->type(),
- bloc));
+ go_assert(p->is_field_name("tflag"));
+ unsigned long tflag = 0;
+ if (this->compare_is_identity(gogo))
+ tflag |= TFLAG_REGULAR_MEMORY;
+ vals->push_back(Expression::make_integer_ul(tflag, p->type(), bloc));
++p;
go_assert(p->is_field_name("align"));
@@ -2587,18 +2525,12 @@ Type::type_descriptor_constructor(Gogo* gogo, int runtime_type_kind,
vals->push_back(Expression::make_type_info(this, type_info));
++p;
- go_assert(p->is_field_name("hashfn"));
- Function_type* hash_fntype = p->type()->function_type();
- Named_object* hash_fn = this->hash_function(gogo, name, hash_fntype);
- if (hash_fn == NULL)
- vals->push_back(Expression::make_cast(hash_fntype,
- Expression::make_nil(bloc),
- bloc));
- else
- vals->push_back(Expression::make_func_reference(hash_fn, NULL, bloc));
+ go_assert(p->is_field_name("kind"));
+ vals->push_back(Expression::make_integer_ul(runtime_type_kind, p->type(),
+ bloc));
++p;
- go_assert(p->is_field_name("equalfn"));
+ go_assert(p->is_field_name("equal"));
Function_type* equal_fntype = p->type()->function_type();
Named_object* equal_fn = this->equal_function(gogo, name, equal_fntype);
if (equal_fn == NULL)
@@ -6603,8 +6535,7 @@ Struct_type::do_type_descriptor(Gogo* gogo, Named_type* name)
// function.
void
-Struct_type::write_hash_function(Gogo* gogo, Named_type*,
- Function_type* hash_fntype)
+Struct_type::write_hash_function(Gogo* gogo, Function_type* hash_fntype)
{
Location bloc = Linemap::predeclared_location();
@@ -6650,8 +6581,7 @@ Struct_type::write_hash_function(Gogo* gogo, Named_type*,
subkey = Expression::make_cast(key_arg_type, subkey, bloc);
// Get the hash function to use for the type of this field.
- Named_object* hash_fn =
- pf->type()->hash_function(gogo, pf->type()->named_type(), hash_fntype);
+ Named_object* hash_fn = pf->type()->hash_function(gogo, hash_fntype);
// Call the hash function for the field, passing retval as the seed.
ref = Expression::make_temporary_reference(retval, bloc);
@@ -7447,8 +7377,7 @@ Array_type::do_hash_for_method(Gogo* gogo, int flags) const
// function.
void
-Array_type::write_hash_function(Gogo* gogo, Named_type* name,
- Function_type* hash_fntype)
+Array_type::write_hash_function(Gogo* gogo, Function_type* hash_fntype)
{
Location bloc = Linemap::predeclared_location();
@@ -7485,9 +7414,7 @@ Array_type::write_hash_function(Gogo* gogo, Named_type* name,
Expression* iref = Expression::make_temporary_reference(index, bloc);
Expression* aref = Expression::make_var_reference(key_arg, bloc);
- Type* pt = Type::make_pointer_type(name != NULL
- ? static_cast<Type*>(name)
- : static_cast<Type*>(this));
+ Type* pt = Type::make_pointer_type(static_cast<Type*>(this));
aref = Expression::make_cast(pt, aref, bloc);
For_range_statement* for_range = Statement::make_for_range_statement(iref,
NULL,
@@ -7497,9 +7424,8 @@ Array_type::write_hash_function(Gogo* gogo, Named_type* name,
gogo->start_block(bloc);
// Get the hash function for the element type.
- Named_object* hash_fn =
- this->element_type_->hash_function(gogo, this->element_type_->named_type(),
- hash_fntype);
+ Named_object* hash_fn = this->element_type_->hash_function(gogo,
+ hash_fntype);
// Get a pointer to this element in the loop.
Expression* subkey = Expression::make_temporary_reference(key, bloc);
@@ -8291,13 +8217,28 @@ Map_type::make_map_type_descriptor_type()
Type* uint8_type = Type::lookup_integer_type("uint8");
Type* uint16_type = Type::lookup_integer_type("uint16");
Type* uint32_type = Type::lookup_integer_type("uint32");
+ Type* uintptr_type = Type::lookup_integer_type("uintptr");
+ Type* void_type = Type::make_void_type();
+ Type* unsafe_pointer_type = Type::make_pointer_type(void_type);
+
+ Location bloc = Linemap::predeclared_location();
+ Typed_identifier_list *params = new Typed_identifier_list();
+ params->push_back(Typed_identifier("key", unsafe_pointer_type, bloc));
+ params->push_back(Typed_identifier("seed", uintptr_type, bloc));
+
+ Typed_identifier_list* results = new Typed_identifier_list();
+ results->push_back(Typed_identifier("", uintptr_type, bloc));
+
+ Type* hasher_fntype = Type::make_function_type(NULL, params, results,
+ bloc);
Struct_type* sf =
- Type::make_builtin_struct_type(8,
+ Type::make_builtin_struct_type(9,
"", tdt,
"key", ptdt,
"elem", ptdt,
"bucket", ptdt,
+ "hasher", hasher_fntype,
"keysize", uint8_type,
"valuesize", uint8_type,
"bucketsize", uint16_type,
@@ -8380,6 +8321,18 @@ Map_type::do_type_descriptor(Gogo* gogo, Named_type* name)
vals->push_back(Expression::make_type_descriptor(bucket_type, bloc));
++p;
+ go_assert(p->is_field_name("hasher"));
+ Function_type* hasher_fntype = p->type()->function_type();
+ Named_object* hasher_fn = this->key_type_->hash_function(gogo,
+ hasher_fntype);
+ if (hasher_fn == NULL)
+ vals->push_back(Expression::make_cast(hasher_fntype,
+ Expression::make_nil(bloc),
+ bloc));
+ else
+ vals->push_back(Expression::make_func_reference(hasher_fn, NULL, bloc));
+
+ ++p;
go_assert(p->is_field_name("keysize"));
if (keysize > Map_type::max_key_size)
vals->push_back(Expression::make_integer_int64(ptrsize, uint8_type, bloc));
diff --git a/gcc/go/gofrontend/types.h b/gcc/go/gofrontend/types.h
index 55c5912..ef81589 100644
--- a/gcc/go/gofrontend/types.h
+++ b/gcc/go/gofrontend/types.h
@@ -1061,7 +1061,7 @@ class Type
// Get the hash function for a type. Returns NULL if the type is
// not comparable.
Named_object*
- hash_function(Gogo*, Named_type* name, Function_type* hash_fntype);
+ hash_function(Gogo*, Function_type* hash_fntype);
// Write the equal function for a type.
void
@@ -1071,8 +1071,7 @@ class Type
// Write the hash function for a type.
void
- write_hash_function(Gogo*, Named_type*, int64_t size,
- const std::string& hash_name,
+ write_hash_function(Gogo*, int64_t size, const std::string& hash_name,
Function_type* hash_fntype);
// Return the alignment required by the memequalN function.
@@ -1284,8 +1283,7 @@ class Type
// Build the hash function for a type that needs specific functions.
Named_object*
- build_hash_function(Gogo*, Named_type*, int64_t size,
- Function_type* hash_fntype);
+ build_hash_function(Gogo*, int64_t size, Function_type* hash_fntype);
// Build the equal function for a type that needs specific functions.
Named_object*
@@ -1299,9 +1297,6 @@ class Type
write_identity_equal(Gogo*, int64_t size);
void
- write_named_hash(Gogo*, Named_type*, Function_type* hash_fntype);
-
- void
write_named_equal(Gogo*, Named_type*);
// Build a composite literal for the uncommon type information.
@@ -2628,7 +2623,7 @@ class Struct_type : public Type
// Write the hash function for this type.
void
- write_hash_function(Gogo*, Named_type*, Function_type*);
+ write_hash_function(Gogo*, Function_type*);
// Write the equality function for this type.
void
@@ -2815,7 +2810,7 @@ class Array_type : public Type
// Write the hash function for this type.
void
- write_hash_function(Gogo*, Named_type*, Function_type*);
+ write_hash_function(Gogo*, Function_type*);
// Write the equality function for this type.
void
diff --git a/libgo/go/internal/reflectlite/type.go b/libgo/go/internal/reflectlite/type.go
index 02b2aee..35cf1a4 100644
--- a/libgo/go/internal/reflectlite/type.go
+++ b/libgo/go/internal/reflectlite/type.go
@@ -110,33 +110,14 @@ const (
// available in the memory directly following the rtype value.
//
// tflag values must be kept in sync with copies in:
-// cmd/compile/internal/gc/reflect.go
-// cmd/link/internal/ld/decodesym.go
+// go/types.cc
// runtime/type.go
type tflag uint8
const (
- // tflagUncommon means that there is a pointer, *uncommonType,
- // just beyond the outer type structure.
- //
- // For example, if t.Kind() == Struct and t.tflag&tflagUncommon != 0,
- // then t has uncommonType data and it can be accessed as:
- //
- // type tUncommon struct {
- // structType
- // u uncommonType
- // }
- // u := &(*tUncommon)(unsafe.Pointer(t)).u
- tflagUncommon tflag = 1 << 0
-
- // tflagExtraStar means the name in the str field has an
- // extraneous '*' prefix. This is because for most types T in
- // a program, the type *T also exists and reusing the str data
- // saves binary size.
- tflagExtraStar tflag = 1 << 1
-
- // tflagNamed means the type has a name.
- tflagNamed tflag = 1 << 2
+ // tflagRegularMemory means that equal and hash functions can treat
+ // this type as a single region of t.size bytes.
+ tflagRegularMemory tflag = 1 << 3
)
// rtype is the common implementation of most values.
@@ -147,16 +128,15 @@ type rtype struct {
size uintptr
ptrdata uintptr // number of bytes in the type that can contain pointers
hash uint32 // hash of type; avoids computation in hash tables
- kind uint8 // enumeration for C
+ tflag tflag // extra type information flags
align uint8 // alignment of variable with this type
fieldAlign uint8 // alignment of struct field with this type
- _ uint8 // unused/padding
-
- hashfn func(unsafe.Pointer, uintptr) uintptr // hash function
- equalfn func(unsafe.Pointer, unsafe.Pointer) bool // equality function
-
+ kind uint8 // enumeration for C
+ // function for comparing objects of this type
+ // (ptr to object A, ptr to object B) -> ==?
+ equal func(unsafe.Pointer, unsafe.Pointer) bool
gcdata *byte // garbage collection data
- string *string // string form; unnecessary but undeniably useful
+ string *string // string form; unnecessary but undeniably useful
*uncommonType // (relatively) uncommon fields
ptrToThis *rtype // type for pointer to this type, may be zero
}
diff --git a/libgo/go/reflect/type.go b/libgo/go/reflect/type.go
index f82f5eb..41ed383 100644
--- a/libgo/go/reflect/type.go
+++ b/libgo/go/reflect/type.go
@@ -261,6 +261,20 @@ const (
UnsafePointer
)
+// tflag is used by an rtype to signal what extra type information is
+// available in the memory directly following the rtype value.
+//
+// tflag values must be kept in sync with copies in:
+// go/types.cc
+// runtime/type.go
+type tflag uint8
+
+const (
+ // tflagRegularMemory means that equal and hash functions can treat
+ // this type as a single region of t.size bytes.
+ tflagRegularMemory tflag = 1 << 3
+)
+
// rtype is the common implementation of most values.
// It is embedded in other struct types.
//
@@ -269,16 +283,15 @@ type rtype struct {
size uintptr
ptrdata uintptr // size of memory prefix holding all pointers
hash uint32 // hash of type; avoids computation in hash tables
- kind uint8 // enumeration for C
- align int8 // alignment of variable with this type
+ tflag tflag // extra type information flags
+ align uint8 // alignment of variable with this type
fieldAlign uint8 // alignment of struct field with this type
- _ uint8 // unused/padding
-
- hashfn func(unsafe.Pointer, uintptr) uintptr // hash function
- equalfn func(unsafe.Pointer, unsafe.Pointer) bool // equality function
-
+ kind uint8 // enumeration for C
+ // function for comparing objects of this type
+ // (ptr to object A, ptr to object B) -> ==?
+ equal func(unsafe.Pointer, unsafe.Pointer) bool
gcdata *byte // garbage collection data
- string *string // string form; unnecessary but undeniably useful
+ string *string // string form; unnecessary but undeniably useful
*uncommonType // (relatively) uncommon fields
ptrToThis *rtype // type for pointer to this type, if used in binary or has methods
}
@@ -350,9 +363,11 @@ type interfaceType struct {
// mapType represents a map type.
type mapType struct {
rtype
- key *rtype // map key type
- elem *rtype // map element (value) type
- bucket *rtype // internal bucket structure
+ key *rtype // map key type
+ elem *rtype // map element (value) type
+ bucket *rtype // internal bucket structure
+ // function for hashing keys (ptr to key, seed) -> hash
+ hasher func(unsafe.Pointer, uintptr) uintptr
keysize uint8 // size of key slot
valuesize uint8 // size of value slot
bucketsize uint16 // size of bucket
@@ -1178,31 +1193,7 @@ func (t *rtype) ConvertibleTo(u Type) bool {
}
func (t *rtype) Comparable() bool {
- switch t.Kind() {
- case Bool, Int, Int8, Int16, Int32, Int64,
- Uint, Uint8, Uint16, Uint32, Uint64, Uintptr,
- Float32, Float64, Complex64, Complex128,
- Chan, Interface, Ptr, String, UnsafePointer:
- return true
-
- case Func, Map, Slice:
- return false
-
- case Array:
- return (*arrayType)(unsafe.Pointer(t)).elem.Comparable()
-
- case Struct:
- tt := (*structType)(unsafe.Pointer(t))
- for i := range tt.fields {
- if !tt.fields[i].typ.Comparable() {
- return false
- }
- }
- return true
-
- default:
- panic("reflect: impossible")
- }
+ return t.equal != nil
}
// implements reports whether the type V implements the interface type T.
@@ -1457,6 +1448,7 @@ func ChanOf(dir ChanDir, t Type) Type {
var ichan interface{} = (chan unsafe.Pointer)(nil)
prototype := *(**chanType)(unsafe.Pointer(&ichan))
ch := *prototype
+ ch.tflag = tflagRegularMemory
ch.dir = uintptr(dir)
ch.string = &s
@@ -1481,8 +1473,6 @@ func ChanOf(dir ChanDir, t Type) Type {
return ti.(Type)
}
-func ismapkey(*rtype) bool // implemented in runtime
-
// MapOf returns the map type with the given key and element types.
// For example, if k represents int and e represents string,
// MapOf(k, e) represents map[int]string.
@@ -1493,7 +1483,7 @@ func MapOf(key, elem Type) Type {
ktyp := key.(*rtype)
etyp := elem.(*rtype)
- if !ismapkey(ktyp) {
+ if ktyp.equal == nil {
panic("reflect.MapOf: invalid key type " + ktyp.String())
}
@@ -1530,6 +1520,9 @@ func MapOf(key, elem Type) Type {
mt.ptrToThis = nil
mt.bucket = bucketOf(ktyp, etyp)
+ mt.hasher = func(p unsafe.Pointer, seed uintptr) uintptr {
+ return typehash(ktyp, p, seed)
+ }
mt.flags = 0
if ktyp.size > maxKeySize {
mt.keysize = uint8(ptrSize)
@@ -1851,7 +1844,7 @@ func bucketOf(ktyp, etyp *rtype) *rtype {
}
b := &rtype{
- align: int8(maxAlign),
+ align: uint8(maxAlign),
fieldAlign: uint8(maxAlign),
size: size,
kind: uint8(Struct),
@@ -1949,9 +1942,8 @@ func StructOf(fields []StructField) Type {
var (
hash = uint32(12)
size uintptr
- typalign int8
+ typalign uint8
comparable = true
- hashable = true
fs = make([]structField, len(fields))
repr = make([]byte, 0, 64)
@@ -2036,12 +2028,11 @@ func StructOf(fields []StructField) Type {
repr = append(repr, ';')
}
- comparable = comparable && (ft.equalfn != nil)
- hashable = hashable && (ft.hashfn != nil)
+ comparable = comparable && (ft.equal != nil)
offset := align(size, uintptr(ft.fieldAlign))
- if int8(ft.fieldAlign) > typalign {
- typalign = int8(ft.fieldAlign)
+ if ft.fieldAlign > typalign {
+ typalign = ft.fieldAlign
}
size = offset + ft.size
f.offsetEmbed |= offset << 1
@@ -2118,11 +2109,12 @@ func StructOf(fields []StructField) Type {
}
typ.string = &str
+ typ.tflag = 0 // TODO: set tflagRegularMemory
typ.hash = hash
typ.size = size
typ.ptrdata = typeptrdata(typ.common())
typ.align = typalign
- typ.fieldAlign = uint8(typalign)
+ typ.fieldAlign = typalign
if hasGCProg {
lastPtrField := 0
@@ -2189,32 +2181,18 @@ func StructOf(fields []StructField) Type {
}
typ.ptrdata = typeptrdata(typ.common())
- if hashable {
- typ.hashfn = func(p unsafe.Pointer, seed uintptr) uintptr {
- o := seed
- for _, ft := range typ.fields {
- pi := add(p, ft.offset(), "&x.field safe")
- o = ft.typ.hashfn(pi, o)
- }
- return o
- }
- } else {
- typ.hashfn = nil
- }
-
+ typ.equal = nil
if comparable {
- typ.equalfn = func(p, q unsafe.Pointer) bool {
+ typ.equal = func(p, q unsafe.Pointer) bool {
for _, ft := range typ.fields {
pi := add(p, ft.offset(), "&x.field safe")
qi := add(q, ft.offset(), "&x.field safe")
- if !ft.typ.equalfn(pi, qi) {
+ if !ft.typ.equal(pi, qi) {
return false
}
}
return true
}
- } else {
- typ.equalfn = nil
}
switch {
@@ -2322,6 +2300,7 @@ func ArrayOf(count int, elem Type) Type {
var iarray interface{} = [1]unsafe.Pointer{}
prototype := *(**arrayType)(unsafe.Pointer(&iarray))
array := *prototype
+ array.tflag = typ.tflag & tflagRegularMemory
array.string = &s
// gccgo uses a different hash.
@@ -2427,21 +2406,12 @@ func ArrayOf(count int, elem Type) Type {
array.ptrdata = array.size // overestimate but ok; must match program
}
- switch {
- case count == 1 && !ifaceIndir(typ):
- // array of 1 direct iface type can be direct
- array.kind |= kindDirectIface
- default:
- array.kind &^= kindDirectIface
- }
-
+ etyp := typ.common()
esize := typ.size
- if typ.equalfn == nil {
- array.equalfn = nil
- } else {
- eequal := typ.equalfn
- array.equalfn = func(p, q unsafe.Pointer) bool {
+ array.equal = nil
+ if eequal := etyp.equal; eequal != nil {
+ array.equal = func(p, q unsafe.Pointer) bool {
for i := 0; i < count; i++ {
pi := arrayAt(p, i, esize, "i < count")
qi := arrayAt(q, i, esize, "i < count")
@@ -2453,17 +2423,12 @@ func ArrayOf(count int, elem Type) Type {
}
}
- if typ.hashfn == nil {
- array.hashfn = nil
- } else {
- ehash := typ.hashfn
- array.hashfn = func(ptr unsafe.Pointer, seed uintptr) uintptr {
- o := seed
- for i := 0; i < count; i++ {
- o = ehash(arrayAt(ptr, i, esize, "i < count"), o)
- }
- return o
- }
+ switch {
+ case count == 1 && !ifaceIndir(typ):
+ // array of 1 direct iface type can be direct
+ array.kind |= kindDirectIface
+ default:
+ array.kind &^= kindDirectIface
}
ti, _ := lookupCache.LoadOrStore(ckey, &array.rtype)
diff --git a/libgo/go/reflect/value.go b/libgo/go/reflect/value.go
index c4a62c3..15a5024 100644
--- a/libgo/go/reflect/value.go
+++ b/libgo/go/reflect/value.go
@@ -2543,6 +2543,9 @@ func typedmemmove(t *rtype, dst, src unsafe.Pointer)
//go:noescape
func typedslicecopy(elemType *rtype, dst, src sliceHeader) int
+//go:noescape
+func typehash(t *rtype, p unsafe.Pointer, h uintptr) uintptr
+
// Dummy annotation marking that the value x escapes,
// for use in cases where the reflect code is so clever that
// the compiler cannot follow.
diff --git a/libgo/go/runtime/alg.go b/libgo/go/runtime/alg.go
index f96a75d..e802fdd 100644
--- a/libgo/go/runtime/alg.go
+++ b/libgo/go/runtime/alg.go
@@ -69,6 +69,9 @@ func memhash128(p unsafe.Pointer, h uintptr) uintptr {
return memhash(p, h, 16)
}
+// runtime variable to check if the processor we're running on
+// actually supports the instructions used by the AES-based
+// hash implementation.
var useAeshash bool
// in C code
@@ -134,14 +137,17 @@ func interhash(p unsafe.Pointer, h uintptr) uintptr {
return h
}
t := *(**_type)(tab)
- fn := t.hashfn
- if fn == nil {
+ if t.equal == nil {
+ // Check hashability here. We could do this check inside
+ // typehash, but we want to report the topmost type in
+ // the error text (e.g. in a struct with a field of slice type
+ // we want to report the struct, not the slice).
panic(errorString("hash of unhashable type " + t.string()))
}
if isDirectIface(t) {
- return c1 * fn(unsafe.Pointer(&a.data), h^c0)
+ return c1 * typehash(t, unsafe.Pointer(&a.data), h^c0)
} else {
- return c1 * fn(a.data, h^c0)
+ return c1 * typehash(t, a.data, h^c0)
}
}
@@ -151,17 +157,74 @@ func nilinterhash(p unsafe.Pointer, h uintptr) uintptr {
if t == nil {
return h
}
- fn := t.hashfn
- if fn == nil {
+ if t.equal == nil {
+ // See comment in interhash above.
panic(errorString("hash of unhashable type " + t.string()))
}
if isDirectIface(t) {
- return c1 * fn(unsafe.Pointer(&a.data), h^c0)
+ return c1 * typehash(t, unsafe.Pointer(&a.data), h^c0)
} else {
- return c1 * fn(a.data, h^c0)
+ return c1 * typehash(t, a.data, h^c0)
+ }
+}
+
+// typehash computes the hash of the object of type t at address p.
+// h is the seed.
+// This function is seldom used. Most maps use for hashing either
+// fixed functions (e.g. f32hash) or compiler-generated functions
+// (e.g. for a type like struct { x, y string }). This implementation
+// is slower but more general and is used for hashing interface types
+// (called from interhash or nilinterhash, above) or for hashing in
+// maps generated by reflect.MapOf (reflect_typehash, below).
+func typehash(t *_type, p unsafe.Pointer, h uintptr) uintptr {
+ if t.tflag&tflagRegularMemory != 0 {
+ return memhash(p, h, t.size)
+ }
+ switch t.kind & kindMask {
+ case kindFloat32:
+ return f32hash(p, h)
+ case kindFloat64:
+ return f64hash(p, h)
+ case kindComplex64:
+ return c64hash(p, h)
+ case kindComplex128:
+ return c128hash(p, h)
+ case kindString:
+ return strhash(p, h)
+ case kindInterface:
+ i := (*interfacetype)(unsafe.Pointer(t))
+ if len(i.methods) == 0 {
+ return nilinterhash(p, h)
+ }
+ return interhash(p, h)
+ case kindArray:
+ a := (*arraytype)(unsafe.Pointer(t))
+ for i := uintptr(0); i < a.len; i++ {
+ h = typehash(a.elem, add(p, i*a.elem.size), h)
+ }
+ return h
+ case kindStruct:
+ s := (*structtype)(unsafe.Pointer(t))
+ for _, f := range s.fields {
+ // TODO: maybe we could hash several contiguous fields all at once.
+ if f.name != nil && *f.name == "_" {
+ continue
+ }
+ h = typehash(f.typ, add(p, f.offset()), h)
+ }
+ return h
+ default:
+ // Should never happen, as typehash should only be called
+ // with comparable types.
+ panic(errorString("hash of unhashable type " + t.string()))
}
}
+//go:linkname reflect_typehash reflect.typehash
+func reflect_typehash(t *_type, p unsafe.Pointer, h uintptr) uintptr {
+ return typehash(t, p, h)
+}
+
func memequal0(p, q unsafe.Pointer) bool {
return true
}
@@ -209,7 +272,7 @@ func efaceeq(x, y eface) bool {
if t == nil {
return true
}
- eq := t.equalfn
+ eq := t.equal
if eq == nil {
panic(errorString("comparing uncomparable type " + t.string()))
}
@@ -230,7 +293,7 @@ func ifaceeq(x, y iface) bool {
if t != *(**_type)(y.tab) {
return false
}
- eq := t.equalfn
+ eq := t.equal
if eq == nil {
panic(errorString("comparing uncomparable type " + t.string()))
}
@@ -251,7 +314,7 @@ func ifacevaleq(x iface, t *_type, p unsafe.Pointer) bool {
if xt != t {
return false
}
- eq := t.equalfn
+ eq := t.equal
if eq == nil {
panic(errorString("comparing uncomparable type " + t.string()))
}
@@ -272,7 +335,7 @@ func ifaceefaceeq(x iface, y eface) bool {
if xt != y._type {
return false
}
- eq := xt.equalfn
+ eq := xt.equal
if eq == nil {
panic(errorString("comparing uncomparable type " + xt.string()))
}
@@ -289,7 +352,7 @@ func efacevaleq(x eface, t *_type, p unsafe.Pointer) bool {
if x._type != t {
return false
}
- eq := t.equalfn
+ eq := t.equal
if eq == nil {
panic(errorString("comparing uncomparable type " + t.string()))
}
diff --git a/libgo/go/runtime/map.go b/libgo/go/runtime/map.go
index 349577b..3672908 100644
--- a/libgo/go/runtime/map.go
+++ b/libgo/go/runtime/map.go
@@ -421,18 +421,16 @@ func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
if msanenabled && h != nil {
msanread(key, t.key.size)
}
- hashfn := t.key.hashfn
- equalfn := t.key.equalfn
if h == nil || h.count == 0 {
if t.hashMightPanic() {
- hashfn(key, 0) // see issue 23734
+ t.hasher(key, 0) // see issue 23734
}
return unsafe.Pointer(&zeroVal[0])
}
if h.flags&hashWriting != 0 {
throw("concurrent map read and map write")
}
- hash := hashfn(key, uintptr(h.hash0))
+ hash := t.hasher(key, uintptr(h.hash0))
m := bucketMask(h.B)
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
@@ -459,7 +457,7 @@ bucketloop:
if t.indirectkey() {
k = *((*unsafe.Pointer)(k))
}
- if equalfn(key, k) {
+ if t.key.equal(key, k) {
e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
if t.indirectelem() {
e = *((*unsafe.Pointer)(e))
@@ -486,18 +484,16 @@ func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool)
if msanenabled && h != nil {
msanread(key, t.key.size)
}
- hashfn := t.key.hashfn
- equalfn := t.key.equalfn
if h == nil || h.count == 0 {
if t.hashMightPanic() {
- hashfn(key, 0) // see issue 23734
+ t.hasher(key, 0) // see issue 23734
}
return unsafe.Pointer(&zeroVal[0]), false
}
if h.flags&hashWriting != 0 {
throw("concurrent map read and map write")
}
- hash := hashfn(key, uintptr(h.hash0))
+ hash := t.hasher(key, uintptr(h.hash0))
m := bucketMask(h.B)
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
@@ -524,7 +520,7 @@ bucketloop:
if t.indirectkey() {
k = *((*unsafe.Pointer)(k))
}
- if equalfn(key, k) {
+ if t.key.equal(key, k) {
e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
if t.indirectelem() {
e = *((*unsafe.Pointer)(e))
@@ -546,9 +542,7 @@ func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe
if h == nil || h.count == 0 {
return nil, nil
}
- hashfn := t.key.hashfn
- equalfn := t.key.equalfn
- hash := hashfn(key, uintptr(h.hash0))
+ hash := t.hasher(key, uintptr(h.hash0))
m := bucketMask(h.B)
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
@@ -575,7 +569,7 @@ bucketloop:
if t.indirectkey() {
k = *((*unsafe.Pointer)(k))
}
- if equalfn(key, k) {
+ if t.key.equal(key, k) {
e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
if t.indirectelem() {
e = *((*unsafe.Pointer)(e))
@@ -625,11 +619,9 @@ func mapassign(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
if h.flags&hashWriting != 0 {
throw("concurrent map writes")
}
- hashfn := t.key.hashfn
- equalfn := t.key.equalfn
- hash := hashfn(key, uintptr(h.hash0))
+ hash := t.hasher(key, uintptr(h.hash0))
- // Set hashWriting after calling alg.hash, since alg.hash may panic,
+ // Set hashWriting after calling t.hasher, since t.hasher may panic,
// in which case we have not actually done a write.
h.flags ^= hashWriting
@@ -666,7 +658,7 @@ bucketloop:
if t.indirectkey() {
k = *((*unsafe.Pointer)(k))
}
- if !equalfn(key, k) {
+ if !t.key.equal(key, k) {
continue
}
// already have a mapping for key. Update it.
@@ -735,11 +727,9 @@ func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
if msanenabled && h != nil {
msanread(key, t.key.size)
}
- hashfn := t.key.hashfn
- equalfn := t.key.equalfn
if h == nil || h.count == 0 {
if t.hashMightPanic() {
- hashfn(key, 0) // see issue 23734
+ t.hasher(key, 0) // see issue 23734
}
return
}
@@ -747,9 +737,9 @@ func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
throw("concurrent map writes")
}
- hash := hashfn(key, uintptr(h.hash0))
+ hash := t.hasher(key, uintptr(h.hash0))
- // Set hashWriting after calling alg.hash, since alg.hash may panic,
+ // Set hashWriting after calling t.hasher, since t.hasher may panic,
// in which case we have not actually done a write (delete).
h.flags ^= hashWriting
@@ -774,7 +764,7 @@ search:
if t.indirectkey() {
k2 = *((*unsafe.Pointer)(k2))
}
- if !equalfn(key, k2) {
+ if !t.key.equal(key, k2) {
continue
}
// Only clear key if there are pointers in it.
@@ -925,8 +915,6 @@ func mapiternext(it *hiter) {
b := it.bptr
i := it.i
checkBucket := it.checkBucket
- hashfn := t.key.hashfn
- equalfn := t.key.equalfn
next:
if b == nil {
@@ -980,10 +968,10 @@ next:
// through the oldbucket, skipping any keys that will go
// to the other new bucket (each oldbucket expands to two
// buckets during a grow).
- if t.reflexivekey() || equalfn(k, k) {
+ if t.reflexivekey() || t.key.equal(k, k) {
// If the item in the oldbucket is not destined for
// the current new bucket in the iteration, skip it.
- hash := hashfn(k, uintptr(h.hash0))
+ hash := t.hasher(k, uintptr(h.hash0))
if hash&bucketMask(it.B) != checkBucket {
continue
}
@@ -1001,7 +989,7 @@ next:
}
}
if (b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY) ||
- !(t.reflexivekey() || equalfn(k, k)) {
+ !(t.reflexivekey() || t.key.equal(k, k)) {
// This is the golden data, we can return it.
// OR
// key!=key, so the entry can't be deleted or updated, so we can just return it.
@@ -1238,8 +1226,8 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
if !h.sameSizeGrow() {
// Compute hash to make our evacuation decision (whether we need
// to send this key/elem to bucket x or bucket y).
- hash := t.key.hashfn(k2, uintptr(h.hash0))
- if h.flags&iterator != 0 && !t.reflexivekey() && !t.key.equalfn(k2, k2) {
+ hash := t.hasher(k2, uintptr(h.hash0))
+ if h.flags&iterator != 0 && !t.reflexivekey() && !t.key.equal(k2, k2) {
// If key != key (NaNs), then the hash could be (and probably
// will be) entirely different from the old hash. Moreover,
// it isn't reproducible. Reproducibility is required in the
@@ -1333,16 +1321,12 @@ func advanceEvacuationMark(h *hmap, t *maptype, newbit uintptr) {
}
}
-func ismapkey(t *_type) bool {
- return t.hashfn != nil
-}
-
// Reflect stubs. Called from ../reflect/asm_*.s
//go:linkname reflect_makemap reflect.makemap
func reflect_makemap(t *maptype, cap int) *hmap {
// Check invariants and reflects math.
- if !ismapkey(t.key) {
+ if t.key.equal == nil {
throw("runtime.reflect_makemap: unsupported map key type")
}
if t.key.size > maxKeySize && (!t.indirectkey() || t.keysize != uint8(sys.PtrSize)) ||
@@ -1445,10 +1429,5 @@ func reflectlite_maplen(h *hmap) int {
return h.count
}
-//go:linkname reflect_ismapkey reflect.ismapkey
-func reflect_ismapkey(t *_type) bool {
- return ismapkey(t)
-}
-
const maxZero = 1024 // must match value in cmd/compile/internal/gc/walk.go
var zeroVal [maxZero]byte
diff --git a/libgo/go/runtime/map_benchmark_test.go b/libgo/go/runtime/map_benchmark_test.go
index d37dadc..cf04ead 100644
--- a/libgo/go/runtime/map_benchmark_test.go
+++ b/libgo/go/runtime/map_benchmark_test.go
@@ -483,3 +483,33 @@ func BenchmarkMapStringConversion(b *testing.B) {
})
}
}
+
+var BoolSink bool
+
+func BenchmarkMapInterfaceString(b *testing.B) {
+ m := map[interface{}]bool{}
+
+ for i := 0; i < 100; i++ {
+ m[fmt.Sprintf("%d", i)] = true
+ }
+
+ key := (interface{})("A")
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ BoolSink = m[key]
+ }
+}
+func BenchmarkMapInterfacePtr(b *testing.B) {
+ m := map[interface{}]bool{}
+
+ for i := 0; i < 100; i++ {
+ i := i
+ m[&i] = true
+ }
+
+ key := new(int)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ BoolSink = m[key]
+ }
+}
diff --git a/libgo/go/runtime/map_fast32.go b/libgo/go/runtime/map_fast32.go
index 57b3c0f..fdc7f0e 100644
--- a/libgo/go/runtime/map_fast32.go
+++ b/libgo/go/runtime/map_fast32.go
@@ -33,7 +33,7 @@ func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
// One-bucket table. No need to hash.
b = (*bmap)(h.buckets)
} else {
- hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+ hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
m := bucketMask(h.B)
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
@@ -73,7 +73,7 @@ func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
// One-bucket table. No need to hash.
b = (*bmap)(h.buckets)
} else {
- hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+ hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
m := bucketMask(h.B)
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
@@ -108,9 +108,9 @@ func mapassign_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
if h.flags&hashWriting != 0 {
throw("concurrent map writes")
}
- hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+ hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
- // Set hashWriting after calling alg.hash for consistency with mapassign.
+ // Set hashWriting after calling t.hasher for consistency with mapassign.
h.flags ^= hashWriting
if h.buckets == nil {
@@ -198,9 +198,9 @@ func mapassign_fast32ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer
if h.flags&hashWriting != 0 {
throw("concurrent map writes")
}
- hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+ hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
- // Set hashWriting after calling alg.hash for consistency with mapassign.
+ // Set hashWriting after calling t.hasher for consistency with mapassign.
h.flags ^= hashWriting
if h.buckets == nil {
@@ -289,9 +289,9 @@ func mapdelete_fast32(t *maptype, h *hmap, key uint32) {
throw("concurrent map writes")
}
- hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+ hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
- // Set hashWriting after calling alg.hash for consistency with mapdelete
+ // Set hashWriting after calling t.hasher for consistency with mapdelete
h.flags ^= hashWriting
bucket := hash & bucketMask(h.B)
@@ -408,7 +408,7 @@ func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) {
if !h.sameSizeGrow() {
// Compute hash to make our evacuation decision (whether we need
// to send this key/elem to bucket x or bucket y).
- hash := t.key.hashfn(k, uintptr(h.hash0))
+ hash := t.hasher(k, uintptr(h.hash0))
if hash&newbit != 0 {
useY = 1
}
diff --git a/libgo/go/runtime/map_fast64.go b/libgo/go/runtime/map_fast64.go
index af86f74..26c60ae 100644
--- a/libgo/go/runtime/map_fast64.go
+++ b/libgo/go/runtime/map_fast64.go
@@ -33,7 +33,7 @@ func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
// One-bucket table. No need to hash.
b = (*bmap)(h.buckets)
} else {
- hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+ hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
m := bucketMask(h.B)
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
@@ -73,7 +73,7 @@ func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
// One-bucket table. No need to hash.
b = (*bmap)(h.buckets)
} else {
- hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+ hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
m := bucketMask(h.B)
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
@@ -108,9 +108,9 @@ func mapassign_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
if h.flags&hashWriting != 0 {
throw("concurrent map writes")
}
- hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+ hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
- // Set hashWriting after calling alg.hash for consistency with mapassign.
+ // Set hashWriting after calling t.hasher for consistency with mapassign.
h.flags ^= hashWriting
if h.buckets == nil {
@@ -198,9 +198,9 @@ func mapassign_fast64ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer
if h.flags&hashWriting != 0 {
throw("concurrent map writes")
}
- hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+ hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
- // Set hashWriting after calling alg.hash for consistency with mapassign.
+ // Set hashWriting after calling t.hasher for consistency with mapassign.
h.flags ^= hashWriting
if h.buckets == nil {
@@ -289,9 +289,9 @@ func mapdelete_fast64(t *maptype, h *hmap, key uint64) {
throw("concurrent map writes")
}
- hash := t.key.hashfn(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
+ hash := t.hasher(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
- // Set hashWriting after calling alg.hash for consistency with mapdelete
+ // Set hashWriting after calling t.hasher for consistency with mapdelete
h.flags ^= hashWriting
bucket := hash & bucketMask(h.B)
@@ -408,7 +408,7 @@ func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) {
if !h.sameSizeGrow() {
// Compute hash to make our evacuation decision (whether we need
// to send this key/elem to bucket x or bucket y).
- hash := t.key.hashfn(k, uintptr(h.hash0))
+ hash := t.hasher(k, uintptr(h.hash0))
if hash&newbit != 0 {
useY = 1
}
diff --git a/libgo/go/runtime/map_faststr.go b/libgo/go/runtime/map_faststr.go
index 3c5175d..1775214 100644
--- a/libgo/go/runtime/map_faststr.go
+++ b/libgo/go/runtime/map_faststr.go
@@ -83,7 +83,7 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
return unsafe.Pointer(&zeroVal[0])
}
dohash:
- hash := t.key.hashfn(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
+ hash := t.hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
m := bucketMask(h.B)
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
@@ -178,7 +178,7 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
return unsafe.Pointer(&zeroVal[0]), false
}
dohash:
- hash := t.key.hashfn(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
+ hash := t.hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
m := bucketMask(h.B)
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
@@ -218,9 +218,9 @@ func mapassign_faststr(t *maptype, h *hmap, s string) unsafe.Pointer {
throw("concurrent map writes")
}
key := stringStructOf(&s)
- hash := t.key.hashfn(noescape(unsafe.Pointer(&s)), uintptr(h.hash0))
+ hash := t.hasher(noescape(unsafe.Pointer(&s)), uintptr(h.hash0))
- // Set hashWriting after calling alg.hash for consistency with mapassign.
+ // Set hashWriting after calling t.hasher for consistency with mapassign.
h.flags ^= hashWriting
if h.buckets == nil {
@@ -314,9 +314,9 @@ func mapdelete_faststr(t *maptype, h *hmap, ky string) {
}
key := stringStructOf(&ky)
- hash := t.key.hashfn(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
+ hash := t.hasher(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
- // Set hashWriting after calling alg.hash for consistency with mapdelete
+ // Set hashWriting after calling t.hasher for consistency with mapdelete
h.flags ^= hashWriting
bucket := hash & bucketMask(h.B)
@@ -436,7 +436,7 @@ func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) {
if !h.sameSizeGrow() {
// Compute hash to make our evacuation decision (whether we need
// to send this key/elem to bucket x or bucket y).
- hash := t.key.hashfn(k, uintptr(h.hash0))
+ hash := t.hasher(k, uintptr(h.hash0))
if hash&newbit != 0 {
useY = 1
}
diff --git a/libgo/go/runtime/map_test.go b/libgo/go/runtime/map_test.go
index bc5f738..b13d269 100644
--- a/libgo/go/runtime/map_test.go
+++ b/libgo/go/runtime/map_test.go
@@ -1172,3 +1172,64 @@ func TestMapTombstones(t *testing.T) {
}
runtime.MapTombstoneCheck(m)
}
+
+type canString int
+
+func (c canString) String() string {
+ return fmt.Sprintf("%d", int(c))
+}
+
+func TestMapInterfaceKey(t *testing.T) {
+ // Test all the special cases in runtime.typehash.
+ type GrabBag struct {
+ f32 float32
+ f64 float64
+ c64 complex64
+ c128 complex128
+ s string
+ i0 interface{}
+ i1 interface {
+ String() string
+ }
+ a [4]string
+ }
+
+ m := map[interface{}]bool{}
+ // Put a bunch of data in m, so that a bad hash is likely to
+ // lead to a bad bucket, which will lead to a missed lookup.
+ for i := 0; i < 1000; i++ {
+ m[i] = true
+ }
+ m[GrabBag{f32: 1.0}] = true
+ if !m[GrabBag{f32: 1.0}] {
+ panic("f32 not found")
+ }
+ m[GrabBag{f64: 1.0}] = true
+ if !m[GrabBag{f64: 1.0}] {
+ panic("f64 not found")
+ }
+ m[GrabBag{c64: 1.0i}] = true
+ if !m[GrabBag{c64: 1.0i}] {
+ panic("c64 not found")
+ }
+ m[GrabBag{c128: 1.0i}] = true
+ if !m[GrabBag{c128: 1.0i}] {
+ panic("c128 not found")
+ }
+ m[GrabBag{s: "foo"}] = true
+ if !m[GrabBag{s: "foo"}] {
+ panic("string not found")
+ }
+ m[GrabBag{i0: "foo"}] = true
+ if !m[GrabBag{i0: "foo"}] {
+ panic("interface{} not found")
+ }
+ m[GrabBag{i1: canString(5)}] = true
+ if !m[GrabBag{i1: canString(5)}] {
+ panic("interface{String() string} not found")
+ }
+ m[GrabBag{a: [4]string{"foo", "bar", "baz", "bop"}}] = true
+ if !m[GrabBag{a: [4]string{"foo", "bar", "baz", "bop"}}] {
+ panic("array not found")
+ }
+}
diff --git a/libgo/go/runtime/type.go b/libgo/go/runtime/type.go
index 63ad310..94abbb8 100644
--- a/libgo/go/runtime/type.go
+++ b/libgo/go/runtime/type.go
@@ -12,18 +12,32 @@ import (
"unsafe"
)
+// tflag is documented in reflect/type.go.
+//
+// tflag values must be kept in sync with copies in:
+// go/types.cc
+// reflect/type.go
+// internal/reflectlite/type.go
+type tflag uint8
+
+const (
+ tflagRegularMemory tflag = 1 << 3 // equal and hash can treat values of this type as a single region of t.size bytes
+)
+
type _type struct {
size uintptr
ptrdata uintptr
hash uint32
- kind uint8
- align int8
+ tflag tflag
+ align uint8
fieldAlign uint8
- _ uint8
-
- hashfn func(unsafe.Pointer, uintptr) uintptr
- equalfn func(unsafe.Pointer, unsafe.Pointer) bool
-
+ kind uint8
+ // function for comparing objects of this type
+ // (ptr to object A, ptr to object B) -> ==?
+ equal func(unsafe.Pointer, unsafe.Pointer) bool
+ // gcdata stores the GC type data for the garbage collector.
+ // If the KindGCProg bit is set in kind, gcdata is a GC program.
+ // Otherwise it is a ptrmask bitmap. See mbitmap.go for details.
gcdata *byte
_string *string
*uncommontype
@@ -74,10 +88,12 @@ type interfacetype struct {
}
type maptype struct {
- typ _type
- key *_type
- elem *_type
- bucket *_type // internal type representing a hash bucket
+ typ _type
+ key *_type
+ elem *_type
+ bucket *_type // internal type representing a hash bucket
+ // function for hashing keys (ptr to key, seed) -> hash
+ hasher func(unsafe.Pointer, uintptr) uintptr
keysize uint8 // size of key slot
elemsize uint8 // size of elem slot
bucketsize uint16 // size of bucket
diff --git a/libgo/runtime/go-unsafe-pointer.c b/libgo/runtime/go-unsafe-pointer.c
index d987aca..364878e 100644
--- a/libgo/runtime/go-unsafe-pointer.c
+++ b/libgo/runtime/go-unsafe-pointer.c
@@ -36,8 +36,6 @@ static const String reflection_string =
const byte unsafe_Pointer_gc[] = { 1 };
-extern const FuncVal runtime_pointerhash_descriptor
- __asm__ (GOSYM_PREFIX "runtime.pointerhash..f");
extern const FuncVal runtime_pointerequal_descriptor
__asm__ (GOSYM_PREFIX "runtime.pointerequal..f");
@@ -49,17 +47,15 @@ const struct _type unsafe_Pointer =
sizeof (void *),
/* hash */
78501163U,
- /* kind */
- kindUnsafePointer | kindDirectIface,
+ /* tflag */
+ tflagRegularMemory,
/* align */
__alignof (void *),
/* fieldAlign */
offsetof (struct field_align, p) - 1,
- /* _ */
- 0,
- /* hashfn */
- &runtime_pointerhash_descriptor,
- /* equalfn */
+ /* kind */
+ kindUnsafePointer | kindDirectIface,
+ /* equal */
&runtime_pointerequal_descriptor,
/* gcdata */
unsafe_Pointer_gc,
@@ -101,16 +97,14 @@ const struct ptrtype pointer_unsafe_Pointer =
sizeof (void *),
/* hash */
1256018616U,
- /* kind */
- kindPtr | kindDirectIface,
+ /* tflag */
+ tflagRegularMemory,
/* align */
__alignof (void *),
/* fieldAlign */
offsetof (struct field_align, p) - 1,
- /* _ */
- 0,
- /*_hashfn */
- &runtime_pointerhash_descriptor,
+ /* kind */
+ kindPtr | kindDirectIface,
/* equalfn */
&runtime_pointerequal_descriptor,
/* gcdata */