PARAMS ((struct elf_link_hash_entry *, PTR));
static boolean elf_link_renumber_dynsyms
PARAMS ((struct elf_link_hash_entry *, PTR));
+static boolean elf_collect_hash_codes
+ PARAMS ((struct elf_link_hash_entry *, PTR));
/* Given an ELF BFD, add symbols to the global hash table as
appropriate. */
{
h->elf_link_hash_flags &=~ ELF_LINK_HASH_DEF_DYNAMIC;
hi->elf_link_hash_flags |= ELF_LINK_HASH_REF_DYNAMIC;
-#if 0
- /* Don't record the unversioned alias. This
- produces bogus ABS symbols on export as
- they never get defined peroperly. */
- if (! _bfd_elf_link_record_dynamic_symbol (info, hi))
- goto error_return;
-#endif
+ if (hi->elf_link_hash_flags
+ & (ELF_LINK_HASH_REF_REGULAR
+ | ELF_LINK_HASH_DEF_REGULAR))
+ {
+ if (! _bfd_elf_link_record_dynamic_symbol (info,
+ hi))
+ goto error_return;
+ }
}
/* Now set HI to H, so that the following code
16411, 32771, 0
};
+/* Compute bucket count for hashing table. We do not use a static set
+ of possible tables sizes anymore. Instead we determine for all
+ possible reasonable sizes of the table the outcome (i.e., the
+ number of collisions etc) and choose the best solution. The
+ weighting functions are not too simple to allow the table to grow
+ without bounds. Instead one of the weighting factors is the size.
+ Therefore the result is always a good payoff between few collisions
+ (= short chain lengths) and table size. */
+static size_t
+compute_bucket_count (info)
+ struct bfd_link_info *info;
+{
+ size_t dynsymcount = elf_hash_table (info)->dynsymcount;
+ size_t best_size;
+ unsigned long int *hashcodes;
+ unsigned long int *hashcodesp;
+ unsigned long int i;
+
+ /* Compute the hash values for all exported symbols. At the same
+ time store the values in an array so that we could use them for
+ optimizations. */
+ hashcodes = (unsigned long int *) bfd_malloc (dynsymcount
+ * sizeof (unsigned long int));
+ if (hashcodes == NULL)
+ return 0;
+ hashcodesp = hashcodes;
+
+ /* Put all hash values in HASHCODES. */
+ elf_link_hash_traverse (elf_hash_table (info),
+ elf_collect_hash_codes, &hashcodesp);
+
+/* We have a problem here. The following code to optimize the table size
+ requires an integer type with more the 32 bits. If BFD_HOST_U_64_BIT
+ is set or GCC 2 is used we know about such a type. */
+#if defined BFD_HOST_U_64_BIT || __GNUC__ >= 2
+# ifndef BFD_HOST_U_64_BIT
+# define BFD_HOST_U_64_BIT unsigned long long int
+# endif
+ if (info->optimize == true)
+ {
+ unsigned long int nsyms = hashcodesp - hashcodes;
+ size_t minsize;
+ size_t maxsize;
+ BFD_HOST_U_64_BIT best_chlen = ~((BFD_HOST_U_64_BIT) 0);
+ unsigned long int *counts ;
+
+ /* Possible optimization parameters: if we have NSYMS symbols we say
+ that the hashing table must at least have NSYMS/4 and at most
+ 2*NSYMS buckets. */
+ minsize = nsyms / 4;
+ if (minsize == 0)
+ minsize = 1;
+ best_size = maxsize = nsyms * 2;
+
+ /* Create array where we count the collisions in. We must use bfd_malloc
+ since the size could be large. */
+ counts = (unsigned long int *) bfd_malloc (maxsize
+ * sizeof (unsigned long int));
+ if (counts == NULL)
+ {
+ free (hashcodes);
+ return 0;
+ }
+
+ /* Compute the "optimal" size for the hash table. The criteria is a
+ minimal chain length. The minor criteria is (of course) the size
+ of the table. */
+ for (i = minsize; i < maxsize; ++i)
+ {
+ /* Walk through the array of hashcodes and count the collisions. */
+ BFD_HOST_U_64_BIT max;
+ unsigned long int j;
+ unsigned long int fact;
+
+ memset (counts, '\0', i * sizeof (unsigned long int));
+
+ /* Determine how often each hash bucket is used. */
+ for (j = 0; j < nsyms; ++j)
+ ++counts[hashcodes[j] % i];
+
+ /* For the weight function we need some information about the
+ pagesize on the target. This is information need not be 100%
+ accurate. Since this information is not available (so far) we
+ define it here to a reasonable default value. If it is crucial
+ to have a better value some day simply define this value. */
+# ifndef BFD_TARGET_PAGESIZE
+# define BFD_TARGET_PAGESIZE (4096)
+# endif
+
+ /* We in any case need 2 + NSYMS entries for the size values and
+ the chains. */
+ max = (2 + nsyms) * (ARCH_SIZE / 8);
+
+# if 1
+ /* Variant 1: optimize for short chains. We add the squares
+ of all the chain lengths (which favous many small chain
+ over a few long chains). */
+ for (j = 0; j < i; ++j)
+ max += counts[j] * counts[j];
+
+ /* This adds penalties for the overall size of the table. */
+ fact = i / (BFD_TARGET_PAGESIZE / (ARCH_SIZE / 8)) + 1;
+ max *= fact * fact;
+# else
+ /* Variant 2: Optimize a lot more for small table. Here we
+ also add squares of the size but we also add penalties for
+ empty slots (the +1 term). */
+ for (j = 0; j < i; ++j)
+ max += (1 + counts[j]) * (1 + counts[j]);
+
+ /* The overall size of the table is considered, but not as
+ strong as in variant 1, where it is squared. */
+ fact = i / (BFD_TARGET_PAGESIZE / (ARCH_SIZE / 8)) + 1;
+ max *= fact;
+# endif
+
+ /* Compare with current best results. */
+ if (max < best_chlen)
+ {
+ best_chlen = max;
+ best_size = i;
+ }
+ }
+
+ free (counts);
+ }
+ else
+#endif
+ {
+ /* This is the fallback solution if no 64bit type is available or if we
+ are not supposed to spend much time on optimizations. We select the
+ bucket count using a fixed set of numbers. */
+ for (i = 0; elf_buckets[i] != 0; i++)
+ {
+ best_size = elf_buckets[i];
+ if (dynsymcount < elf_buckets[i + 1])
+ break;
+ }
+ }
+
+ /* Free the arrays we needed. */
+ free (hashcodes);
+
+ return best_size;
+}
+
/* Set up the sizes and contents of the ELF dynamic sections. This is
called by the ELF linker emulation before_allocation routine. We
must set the sizes of the sections before the linker sets the
elf_swap_symbol_out (output_bfd, &isym,
(PTR) (Elf_External_Sym *) s->contents);
- for (i = 0; elf_buckets[i] != 0; i++)
- {
- bucketcount = elf_buckets[i];
- if (dynsymcount < elf_buckets[i + 1])
- break;
- }
+ /* Compute the size of the hashing table. As a side effect this
+ computes the hash values for all the names we export. */
+ bucketcount = compute_bucket_count (info);
s = bfd_get_section_by_name (dynobj, ".hash");
BFD_ASSERT (s != NULL);
}
}
+ /* If a symbol has no type and no size and does not require a PLT
+ entry, then we are probably about to do the wrong thing here: we
+ are probably going to create a COPY reloc for an empty object.
+ This case can arise when a shared object is built with assembly
+ code, and the assembly code fails to set the symbol type. */
+ if (h->size == 0
+ && h->type == STT_NOTYPE
+ && (h->elf_link_hash_flags & ELF_LINK_HASH_NEEDS_PLT) == 0)
+ (*_bfd_error_handler)
+ (_("warning: type and size of dynamic symbol `%s' are not defined"),
+ h->root.root.string);
+
dynobj = elf_hash_table (eif->info)->dynobj;
bed = get_elf_backend_data (dynobj);
if (! (*bed->elf_backend_adjust_dynamic_symbol) (eif->info, h))
if (h->dynindx != -1
&& elf_hash_table (finfo->info)->dynamic_sections_created)
{
- char *p, *copy;
- const char *name;
size_t bucketcount;
size_t bucket;
bfd_byte *bucketpos;
finfo->dynsym_sec->contents)
+ h->dynindx));
- /* We didn't include the version string in the dynamic string
- table, so we must not consider it in the hash table. */
- name = h->root.root.string;
- p = strchr (name, ELF_VER_CHR);
- if (p == NULL)
- copy = NULL;
- else
- {
- copy = bfd_alloc (finfo->output_bfd, p - name + 1);
- strncpy (copy, name, p - name);
- copy[p - name] = '\0';
- name = copy;
- }
-
bucketcount = elf_hash_table (finfo->info)->bucketcount;
- bucket = bfd_elf_hash ((const unsigned char *) name) % bucketcount;
+ bucket = h->elf_hash_value % bucketcount;
bucketpos = ((bfd_byte *) finfo->hash_sec->contents
+ (bucket + 2) * (ARCH_SIZE / 8));
chain = get_word (finfo->output_bfd, bucketpos);
((bfd_byte *) finfo->hash_sec->contents
+ (bucketcount + 2 + h->dynindx) * (ARCH_SIZE / 8)));
- if (copy != NULL)
- bfd_release (finfo->output_bfd, copy);
-
if (finfo->symver_sec != NULL && finfo->symver_sec->contents != NULL)
{
Elf_Internal_Versym iversym;
\f
/* Garbage collect unused sections. */
-static boolean elf_gc_mark
+static boolean elf_gc_mark
PARAMS ((struct bfd_link_info *info, asection *sec,
asection * (*gc_mark_hook)
PARAMS ((bfd *, struct bfd_link_info *, Elf_Internal_Rela *,
struct elf_link_hash_entry *, Elf_Internal_Sym *));
{
boolean ret = true;
-
+
sec->gc_mark = 1;
/* Look through the section relocs. */
/* None of this table's entries were referenced. Re-use the
parent's table. */
h->vtable_entries_used = h->vtable_parent->vtable_entries_used;
+ h->vtable_entries_size = h->vtable_parent->vtable_entries_size;
}
else
{
pu = h->vtable_parent->vtable_entries_used;
if (pu != NULL)
{
- n = h->vtable_parent->size / FILE_ALIGN;
+ n = h->vtable_parent->vtable_entries_size / FILE_ALIGN;
while (--n != 0)
{
if (*pu) *cu = true;
if (h->vtable_parent == NULL)
return true;
- BFD_ASSERT (h->root.type == bfd_link_hash_defined
+ BFD_ASSERT (h->root.type == bfd_link_hash_defined
|| h->root.type == bfd_link_hash_defweak);
sec = h->root.u.def.section;
if (rel->r_offset >= hstart && rel->r_offset < hend)
{
/* If the entry is in use, do nothing. */
- if (h->vtable_entries_used)
+ if (h->vtable_entries_used
+ && (rel->r_offset - hstart) < h->vtable_entries_size)
{
bfd_vma entry = (rel->r_offset - hstart) / FILE_ALIGN;
if (h->vtable_entries_used[entry])
struct elf_link_hash_entry *h, Elf_Internal_Sym *));
if (!get_elf_backend_data (abfd)->can_gc_sections
- || info->relocateable)
+ || info->relocateable
+ || elf_hash_table (info)->dynamic_sections_created)
return true;
/* Apply transitive closure to the vtable entry usage info. */
struct elf_link_hash_entry *h;
bfd_vma addend;
{
- if (h->vtable_entries_used == NULL)
+ if (addend >= h->vtable_entries_size)
{
+ size_t size, bytes;
+ boolean *ptr = h->vtable_entries_used;
+
+ /* While the symbol is undefined, we have to be prepared to handle
+ a zero size. */
+ if (h->root.type == bfd_link_hash_undefined)
+ size = addend;
+ else
+ {
+ size = h->size;
+ if (size < addend)
+ {
+ /* Oops! We've got a reference past the defined end of
+ the table. This is probably a bug -- shall we warn? */
+ size = addend;
+ }
+ }
+
/* Allocate one extra entry for use as a "done" flag for the
consolidation pass. */
- size_t size = (h->size / FILE_ALIGN + 1) * sizeof(boolean);
- h->vtable_entries_used = (boolean *) bfd_alloc (abfd, size);
- if (h->vtable_entries_used == NULL)
- return false;
+ bytes = (size / FILE_ALIGN + 1) * sizeof(boolean);
+
+ if (ptr)
+ {
+ size_t oldbytes;
+
+ ptr = realloc (ptr-1, bytes);
+ if (ptr == NULL)
+ return false;
+
+ oldbytes = (h->vtable_entries_size/FILE_ALIGN + 1) * sizeof(boolean);
+ memset (ptr + oldbytes, 0, bytes - oldbytes);
+ }
+ else
+ {
+ ptr = calloc (1, bytes);
+ if (ptr == NULL)
+ return false;
+ }
/* And arrange for that done flag to be at index -1. */
- memset (h->vtable_entries_used++, 0, size);
+ h->vtable_entries_used = ptr+1;
+ h->vtable_entries_size = size;
}
h->vtable_entries_used[addend / FILE_ALIGN] = true;
return true;
}
-/* And an accompanying bit to work out final got entry offsets once
+/* And an accompanying bit to work out final got entry offsets once
we're done. Should be called from final_link. */
boolean
/* Invoke the regular ELF backend linker to do all the work. */
return elf_bfd_final_link (abfd, info);
}
+
+/* This function will be called though elf_link_hash_traverse to store
+ all hash value of the exported symbols in an array. */
+
+static boolean
+elf_collect_hash_codes (h, data)
+ struct elf_link_hash_entry *h;
+ PTR data;
+{
+ unsigned long **valuep = (unsigned long **) data;
+ const char *name;
+ char *p;
+ unsigned long ha;
+ char *alc = NULL;
+
+ /* Ignore indirect symbols. These are added by the versioning code. */
+ if (h->dynindx == -1)
+ return true;
+
+ name = h->root.root.string;
+ p = strchr (name, ELF_VER_CHR);
+ if (p != NULL)
+ {
+ alc = bfd_malloc (p - name + 1);
+ memcpy (alc, name, p - name);
+ alc[p - name] = '\0';
+ name = alc;
+ }
+
+ /* Compute the hash value. */
+ ha = bfd_elf_hash (name);
+
+ /* Store the found hash value in the array given as the argument. */
+ *(*valuep)++ = ha;
+
+ /* And store it in the struct so that we can put it in the hash table
+ later. */
+ h->elf_hash_value = ha;
+
+ if (alc != NULL)
+ free (alc);
+
+ return true;
+}