+/* External entry points for sizing and building linker stubs. */
+
+/* Determine and set the size of the stub section for a final link.
+
+ The basic idea here is to examine all the relocations looking for
+ PC-relative calls to a target that is unreachable with a "bl"
+ instruction. */
+
+boolean
+elf32_hppa_size_stubs (output_bfd, stub_bfd, info, multi_subspace, group_size,
+ add_stub_section, layout_sections_again)
+ bfd *output_bfd;
+ bfd *stub_bfd;
+ struct bfd_link_info *info;
+ boolean multi_subspace;
+ bfd_signed_vma group_size;
+ asection * (*add_stub_section) PARAMS ((const char *, asection *));
+ void (*layout_sections_again) PARAMS ((void));
+{
+ bfd *input_bfd;
+ asection *section;
+ asection **input_list, **list;
+ Elf_Internal_Sym *local_syms, **all_local_syms;
+ unsigned int bfd_indx, bfd_count;
+ int top_id, top_index;
+ struct elf32_hppa_link_hash_table *htab;
+ bfd_size_type stub_group_size;
+ boolean stubs_always_before_branch;
+ boolean stub_changed = 0;
+ boolean ret = 0;
+ bfd_size_type amt;
+
+ htab = hppa_link_hash_table (info);
+
+ /* Stash our params away. */
+ htab->stub_bfd = stub_bfd;
+ htab->multi_subspace = multi_subspace;
+ htab->add_stub_section = add_stub_section;
+ htab->layout_sections_again = layout_sections_again;
+ stubs_always_before_branch = group_size < 0;
+ if (group_size < 0)
+ stub_group_size = -group_size;
+ else
+ stub_group_size = group_size;
+ if (stub_group_size == 1)
+ {
+ /* Default values. */
+ stub_group_size = 7680000;
+ if (htab->has_17bit_branch || htab->multi_subspace)
+ stub_group_size = 240000;
+ if (htab->has_12bit_branch)
+ stub_group_size = 7500;
+ }
+
+ /* Count the number of input BFDs and find the top input section id. */
+ for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
+ input_bfd != NULL;
+ input_bfd = input_bfd->link_next)
+ {
+ bfd_count += 1;
+ for (section = input_bfd->sections;
+ section != NULL;
+ section = section->next)
+ {
+ if (top_id < section->id)
+ top_id = section->id;
+ }
+ }
+
+ amt = sizeof (struct map_stub) * (top_id + 1);
+ htab->stub_group = (struct map_stub *) bfd_zmalloc (amt);
+ if (htab->stub_group == NULL)
+ return false;
+
+ /* Make a list of input sections for each output section included in
+ the link.
+
+ We can't use output_bfd->section_count here to find the top output
+ section index as some sections may have been removed, and
+ _bfd_strip_section_from_output doesn't renumber the indices. */
+ for (section = output_bfd->sections, top_index = 0;
+ section != NULL;
+ section = section->next)
+ {
+ if (top_index < section->index)
+ top_index = section->index;
+ }
+
+ amt = sizeof (asection *) * (top_index + 1);
+ input_list = (asection **) bfd_malloc (amt);
+ if (input_list == NULL)
+ return false;
+
+ /* For sections we aren't interested in, mark their entries with a
+ value we can check later. */
+ list = input_list + top_index;
+ do
+ *list = bfd_abs_section_ptr;
+ while (list-- != input_list);
+
+ for (section = output_bfd->sections;
+ section != NULL;
+ section = section->next)
+ {
+ if ((section->flags & SEC_CODE) != 0)
+ input_list[section->index] = NULL;
+ }
+
+ /* Now actually build the lists. */
+ for (input_bfd = info->input_bfds;
+ input_bfd != NULL;
+ input_bfd = input_bfd->link_next)
+ {
+ for (section = input_bfd->sections;
+ section != NULL;
+ section = section->next)
+ {
+ if (section->output_section != NULL
+ && section->output_section->owner == output_bfd
+ && section->output_section->index <= top_index)
+ {
+ list = input_list + section->output_section->index;
+ if (*list != bfd_abs_section_ptr)
+ {
+ /* Steal the link_sec pointer for our list. */
+#define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
+ /* This happens to make the list in reverse order,
+ which is what we want. */
+ PREV_SEC (section) = *list;
+ *list = section;
+ }
+ }
+ }
+ }
+
+ /* See whether we can group stub sections together. Grouping stub
+ sections may result in fewer stubs. More importantly, we need to
+ put all .init* and .fini* stubs at the beginning of the .init or
+ .fini output sections respectively, because glibc splits the
+ _init and _fini functions into multiple parts. Putting a stub in
+ the middle of a function is not a good idea. */
+ list = input_list + top_index;
+ do
+ {
+ asection *tail = *list;
+ if (tail == bfd_abs_section_ptr)
+ continue;
+ while (tail != NULL)
+ {
+ asection *curr;
+ asection *prev;
+ bfd_size_type total;
+
+ curr = tail;
+ if (tail->_cooked_size)
+ total = tail->_cooked_size;
+ else
+ total = tail->_raw_size;
+ while ((prev = PREV_SEC (curr)) != NULL
+ && ((total += curr->output_offset - prev->output_offset)
+ < stub_group_size))
+ curr = prev;
+
+ /* OK, the size from the start of CURR to the end is less
+ than 240000 bytes and thus can be handled by one stub
+ section. (or the tail section is itself larger than
+ 240000 bytes, in which case we may be toast.)
+ We should really be keeping track of the total size of
+ stubs added here, as stubs contribute to the final output
+ section size. That's a little tricky, and this way will
+ only break if stubs added total more than 22144 bytes, or
+ 2768 long branch stubs. It seems unlikely for more than
+ 2768 different functions to be called, especially from
+ code only 240000 bytes long. This limit used to be
+ 250000, but c++ code tends to generate lots of little
+ functions, and sometimes violated the assumption. */
+ do
+ {
+ prev = PREV_SEC (tail);
+ /* Set up this stub group. */
+ htab->stub_group[tail->id].link_sec = curr;
+ }
+ while (tail != curr && (tail = prev) != NULL);
+
+ /* But wait, there's more! Input sections up to 240000
+ bytes before the stub section can be handled by it too. */
+ if (!stubs_always_before_branch)
+ {
+ total = 0;
+ while (prev != NULL
+ && ((total += tail->output_offset - prev->output_offset)
+ < stub_group_size))
+ {
+ tail = prev;
+ prev = PREV_SEC (tail);
+ htab->stub_group[tail->id].link_sec = curr;
+ }
+ }
+ tail = prev;
+ }
+ }
+ while (list-- != input_list);
+ free (input_list);
+#undef PREV_SEC
+
+ /* We want to read in symbol extension records only once. To do this
+ we need to read in the local symbols in parallel and save them for
+ later use; so hold pointers to the local symbols in an array. */
+ amt = sizeof (Elf_Internal_Sym *) * bfd_count;
+ all_local_syms = (Elf_Internal_Sym **) bfd_zmalloc (amt);
+ if (all_local_syms == NULL)
+ return false;
+
+ /* Walk over all the input BFDs, swapping in local symbols.
+ If we are creating a shared library, create hash entries for the
+ export stubs. */
+ for (input_bfd = info->input_bfds, bfd_indx = 0;
+ input_bfd != NULL;
+ input_bfd = input_bfd->link_next, bfd_indx++)
+ {
+ Elf_Internal_Shdr *symtab_hdr;
+ Elf_Internal_Shdr *shndx_hdr;
+ Elf_Internal_Sym *isym;
+ Elf32_External_Sym *ext_syms, *esym, *end_sy;
+ Elf_External_Sym_Shndx *shndx_buf, *shndx;
+ bfd_size_type sec_size;
+
+ /* We'll need the symbol table in a second. */
+ symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
+ if (symtab_hdr->sh_info == 0)
+ continue;
+
+ /* We need an array of the local symbols attached to the input bfd.
+ Unfortunately, we're going to have to read & swap them in. */
+ sec_size = symtab_hdr->sh_info;
+ sec_size *= sizeof (Elf_Internal_Sym);
+ local_syms = (Elf_Internal_Sym *) bfd_malloc (sec_size);
+ if (local_syms == NULL)
+ goto error_ret_free_local;
+
+ all_local_syms[bfd_indx] = local_syms;
+ sec_size = symtab_hdr->sh_info;
+ sec_size *= sizeof (Elf32_External_Sym);
+ ext_syms = (Elf32_External_Sym *) bfd_malloc (sec_size);
+ if (ext_syms == NULL)
+ goto error_ret_free_local;
+
+ if (bfd_seek (input_bfd, symtab_hdr->sh_offset, SEEK_SET) != 0
+ || bfd_bread ((PTR) ext_syms, sec_size, input_bfd) != sec_size)
+ {
+ error_ret_free_ext_syms:
+ free (ext_syms);
+ goto error_ret_free_local;
+ }
+
+ shndx_buf = NULL;
+ shndx_hdr = &elf_tdata (input_bfd)->symtab_shndx_hdr;
+ if (shndx_hdr->sh_size != 0)
+ {
+ sec_size = symtab_hdr->sh_info;
+ sec_size *= sizeof (Elf_External_Sym_Shndx);
+ shndx_buf = (Elf_External_Sym_Shndx *) bfd_malloc (sec_size);
+ if (shndx_buf == NULL)
+ goto error_ret_free_ext_syms;
+
+ if (bfd_seek (input_bfd, shndx_hdr->sh_offset, SEEK_SET) != 0
+ || bfd_bread ((PTR) shndx_buf, sec_size, input_bfd) != sec_size)
+ {
+ free (shndx_buf);
+ goto error_ret_free_ext_syms;
+ }
+ }
+
+ /* Swap the local symbols in. */
+ for (esym = ext_syms, end_sy = esym + symtab_hdr->sh_info,
+ isym = local_syms, shndx = shndx_buf;
+ esym < end_sy;
+ esym++, isym++, shndx = (shndx ? shndx + 1 : NULL))
+ bfd_elf32_swap_symbol_in (input_bfd, esym, shndx, isym);
+
+ /* Now we can free the external symbols. */
+ free (shndx_buf);
+ free (ext_syms);
+
+ if (info->shared && htab->multi_subspace)
+ {
+ struct elf_link_hash_entry **sym_hashes;
+ struct elf_link_hash_entry **end_hashes;
+ unsigned int symcount;
+
+ symcount = (symtab_hdr->sh_size / sizeof (Elf32_External_Sym)
+ - symtab_hdr->sh_info);
+ sym_hashes = elf_sym_hashes (input_bfd);
+ end_hashes = sym_hashes + symcount;
+
+ /* Look through the global syms for functions; We need to
+ build export stubs for all globally visible functions. */
+ for (; sym_hashes < end_hashes; sym_hashes++)
+ {
+ struct elf32_hppa_link_hash_entry *hash;
+
+ hash = (struct elf32_hppa_link_hash_entry *) *sym_hashes;
+
+ while (hash->elf.root.type == bfd_link_hash_indirect
+ || hash->elf.root.type == bfd_link_hash_warning)
+ hash = ((struct elf32_hppa_link_hash_entry *)
+ hash->elf.root.u.i.link);
+
+ /* At this point in the link, undefined syms have been
+ resolved, so we need to check that the symbol was
+ defined in this BFD. */
+ if ((hash->elf.root.type == bfd_link_hash_defined
+ || hash->elf.root.type == bfd_link_hash_defweak)
+ && hash->elf.type == STT_FUNC
+ && hash->elf.root.u.def.section->output_section != NULL
+ && (hash->elf.root.u.def.section->output_section->owner
+ == output_bfd)
+ && hash->elf.root.u.def.section->owner == input_bfd
+ && (hash->elf.elf_link_hash_flags & ELF_LINK_HASH_DEF_REGULAR)
+ && !(hash->elf.elf_link_hash_flags & ELF_LINK_FORCED_LOCAL)
+ && ELF_ST_VISIBILITY (hash->elf.other) == STV_DEFAULT)
+ {
+ asection *sec;
+ const char *stub_name;
+ struct elf32_hppa_stub_hash_entry *stub_entry;
+
+ sec = hash->elf.root.u.def.section;
+ stub_name = hash->elf.root.root.string;
+ stub_entry = hppa_stub_hash_lookup (&htab->stub_hash_table,
+ stub_name,
+ false, false);
+ if (stub_entry == NULL)
+ {
+ stub_entry = hppa_add_stub (stub_name, sec, htab);
+ if (!stub_entry)
+ goto error_ret_free_local;
+
+ stub_entry->target_value = hash->elf.root.u.def.value;
+ stub_entry->target_section = hash->elf.root.u.def.section;
+ stub_entry->stub_type = hppa_stub_export;
+ stub_entry->h = hash;
+ stub_changed = 1;
+ }
+ else
+ {
+ (*_bfd_error_handler) (_("%s: duplicate export stub %s"),
+ bfd_archive_filename (input_bfd),
+ stub_name);
+ }
+ }
+ }
+ }
+ }
+
+ while (1)
+ {
+ asection *stub_sec;
+
+ for (input_bfd = info->input_bfds, bfd_indx = 0;
+ input_bfd != NULL;
+ input_bfd = input_bfd->link_next, bfd_indx++)
+ {
+ Elf_Internal_Shdr *symtab_hdr;
+
+ /* We'll need the symbol table in a second. */
+ symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
+ if (symtab_hdr->sh_info == 0)
+ continue;
+
+ local_syms = all_local_syms[bfd_indx];
+
+ /* Walk over each section attached to the input bfd. */
+ for (section = input_bfd->sections;
+ section != NULL;
+ section = section->next)
+ {
+ Elf_Internal_Shdr *input_rel_hdr;
+ Elf32_External_Rela *external_relocs, *erelaend, *erela;
+ Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
+
+ /* If there aren't any relocs, then there's nothing more
+ to do. */
+ if ((section->flags & SEC_RELOC) == 0
+ || section->reloc_count == 0)
+ continue;
+
+ /* If this section is a link-once section that will be
+ discarded, then don't create any stubs. */
+ if (section->output_section == NULL
+ || section->output_section->owner != output_bfd)
+ continue;
+
+ /* Allocate space for the external relocations. */
+ amt = section->reloc_count;
+ amt *= sizeof (Elf32_External_Rela);
+ external_relocs = (Elf32_External_Rela *) bfd_malloc (amt);
+ if (external_relocs == NULL)
+ {
+ goto error_ret_free_local;
+ }
+
+ /* Likewise for the internal relocations. */
+ amt = section->reloc_count;
+ amt *= sizeof (Elf_Internal_Rela);
+ internal_relocs = (Elf_Internal_Rela *) bfd_malloc (amt);
+ if (internal_relocs == NULL)
+ {
+ free (external_relocs);
+ goto error_ret_free_local;
+ }
+
+ /* Read in the external relocs. */
+ input_rel_hdr = &elf_section_data (section)->rel_hdr;
+ if (bfd_seek (input_bfd, input_rel_hdr->sh_offset, SEEK_SET) != 0
+ || bfd_bread ((PTR) external_relocs,
+ input_rel_hdr->sh_size,
+ input_bfd) != input_rel_hdr->sh_size)
+ {
+ free (external_relocs);
+ error_ret_free_internal:
+ free (internal_relocs);
+ goto error_ret_free_local;
+ }
+
+ /* Swap in the relocs. */
+ erela = external_relocs;
+ erelaend = erela + section->reloc_count;
+ irela = internal_relocs;
+ for (; erela < erelaend; erela++, irela++)
+ bfd_elf32_swap_reloca_in (input_bfd, erela, irela);
+
+ /* We're done with the external relocs, free them. */
+ free (external_relocs);
+
+ /* Now examine each relocation. */
+ irela = internal_relocs;
+ irelaend = irela + section->reloc_count;
+ for (; irela < irelaend; irela++)
+ {
+ unsigned int r_type, r_indx;
+ enum elf32_hppa_stub_type stub_type;
+ struct elf32_hppa_stub_hash_entry *stub_entry;
+ asection *sym_sec;
+ bfd_vma sym_value;
+ bfd_vma destination;
+ struct elf32_hppa_link_hash_entry *hash;
+ char *stub_name;
+ const asection *id_sec;
+
+ r_type = ELF32_R_TYPE (irela->r_info);
+ r_indx = ELF32_R_SYM (irela->r_info);
+
+ if (r_type >= (unsigned int) R_PARISC_UNIMPLEMENTED)
+ {
+ bfd_set_error (bfd_error_bad_value);
+ goto error_ret_free_internal;
+ }
+
+ /* Only look for stubs on call instructions. */
+ if (r_type != (unsigned int) R_PARISC_PCREL12F
+ && r_type != (unsigned int) R_PARISC_PCREL17F
+ && r_type != (unsigned int) R_PARISC_PCREL22F)
+ continue;
+
+ /* Now determine the call target, its name, value,
+ section. */
+ sym_sec = NULL;
+ sym_value = 0;
+ destination = 0;
+ hash = NULL;
+ if (r_indx < symtab_hdr->sh_info)
+ {
+ /* It's a local symbol. */
+ Elf_Internal_Sym *sym;
+ Elf_Internal_Shdr *hdr;
+
+ sym = local_syms + r_indx;
+ hdr = elf_elfsections (input_bfd)[sym->st_shndx];
+ sym_sec = hdr->bfd_section;
+ if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
+ sym_value = sym->st_value;
+ destination = (sym_value + irela->r_addend
+ + sym_sec->output_offset
+ + sym_sec->output_section->vma);
+ }
+ else
+ {
+ /* It's an external symbol. */
+ int e_indx;
+
+ e_indx = r_indx - symtab_hdr->sh_info;
+ hash = ((struct elf32_hppa_link_hash_entry *)
+ elf_sym_hashes (input_bfd)[e_indx]);
+
+ while (hash->elf.root.type == bfd_link_hash_indirect
+ || hash->elf.root.type == bfd_link_hash_warning)
+ hash = ((struct elf32_hppa_link_hash_entry *)
+ hash->elf.root.u.i.link);
+
+ if (hash->elf.root.type == bfd_link_hash_defined
+ || hash->elf.root.type == bfd_link_hash_defweak)
+ {
+ sym_sec = hash->elf.root.u.def.section;
+ sym_value = hash->elf.root.u.def.value;
+ if (sym_sec->output_section != NULL)
+ destination = (sym_value + irela->r_addend
+ + sym_sec->output_offset
+ + sym_sec->output_section->vma);
+ }
+ else if (hash->elf.root.type == bfd_link_hash_undefweak)
+ {
+ if (! info->shared)
+ continue;
+ }
+ else if (hash->elf.root.type == bfd_link_hash_undefined)
+ {
+ if (! (info->shared
+ && !info->no_undefined
+ && (ELF_ST_VISIBILITY (hash->elf.other)
+ == STV_DEFAULT)
+ && hash->elf.type != STT_PARISC_MILLI))
+ continue;
+ }
+ else
+ {
+ bfd_set_error (bfd_error_bad_value);
+ goto error_ret_free_internal;
+ }
+ }
+
+ /* Determine what (if any) linker stub is needed. */
+ stub_type = hppa_type_of_stub (section, irela, hash,
+ destination);
+ if (stub_type == hppa_stub_none)
+ continue;
+
+ /* Support for grouping stub sections. */
+ id_sec = htab->stub_group[section->id].link_sec;
+
+ /* Get the name of this stub. */
+ stub_name = hppa_stub_name (id_sec, sym_sec, hash, irela);
+ if (!stub_name)
+ goto error_ret_free_internal;
+
+ stub_entry = hppa_stub_hash_lookup (&htab->stub_hash_table,
+ stub_name,
+ false, false);
+ if (stub_entry != NULL)
+ {
+ /* The proper stub has already been created. */
+ free (stub_name);
+ continue;
+ }
+
+ stub_entry = hppa_add_stub (stub_name, section, htab);
+ if (stub_entry == NULL)
+ {
+ free (stub_name);
+ goto error_ret_free_local;
+ }
+
+ stub_entry->target_value = sym_value;
+ stub_entry->target_section = sym_sec;
+ stub_entry->stub_type = stub_type;
+ if (info->shared)
+ {
+ if (stub_type == hppa_stub_import)
+ stub_entry->stub_type = hppa_stub_import_shared;
+ else if (stub_type == hppa_stub_long_branch)
+ stub_entry->stub_type = hppa_stub_long_branch_shared;
+ }
+ stub_entry->h = hash;
+ stub_changed = 1;
+ }
+
+ /* We're done with the internal relocs, free them. */
+ free (internal_relocs);
+ }
+ }
+
+ if (!stub_changed)
+ break;
+
+ /* OK, we've added some stubs. Find out the new size of the
+ stub sections. */
+ for (stub_sec = htab->stub_bfd->sections;
+ stub_sec != NULL;
+ stub_sec = stub_sec->next)
+ {
+ stub_sec->_raw_size = 0;
+ stub_sec->_cooked_size = 0;
+ }
+
+ bfd_hash_traverse (&htab->stub_hash_table, hppa_size_one_stub, htab);
+
+ /* Ask the linker to do its stuff. */
+ (*htab->layout_sections_again) ();
+ stub_changed = 0;
+ }
+
+ ret = 1;
+
+ error_ret_free_local:
+ while (bfd_count-- > 0)
+ if (all_local_syms[bfd_count])
+ free (all_local_syms[bfd_count]);
+ free (all_local_syms);
+
+ return ret;
+}
+
+/* For a final link, this function is called after we have sized the
+ stubs to provide a value for __gp. */
+
+boolean
+elf32_hppa_set_gp (abfd, info)
+ bfd *abfd;
+ struct bfd_link_info *info;
+{
+ struct elf32_hppa_link_hash_table *htab;
+ struct elf_link_hash_entry *h;
+ asection *sec;
+ bfd_vma gp_val;
+
+ htab = hppa_link_hash_table (info);
+ h = elf_link_hash_lookup (&htab->elf, "$global$", false, false, false);
+
+ if (h != NULL
+ && (h->root.type == bfd_link_hash_defined
+ || h->root.type == bfd_link_hash_defweak))
+ {
+ gp_val = h->root.u.def.value;
+ sec = h->root.u.def.section;
+ }
+ else
+ {
+ /* Choose to point our LTP at, in this order, one of .plt, .got,
+ or .data, if these sections exist. In the case of choosing
+ .plt try to make the LTP ideal for addressing anywhere in the
+ .plt or .got with a 14 bit signed offset. Typically, the end
+ of the .plt is the start of the .got, so choose .plt + 0x2000
+ if either the .plt or .got is larger than 0x2000. If both
+ the .plt and .got are smaller than 0x2000, choose the end of
+ the .plt section. */
+
+ sec = htab->splt;
+ if (sec != NULL)
+ {
+ gp_val = sec->_raw_size;
+ if (gp_val > 0x2000
+ || (htab->sgot && htab->sgot->_raw_size > 0x2000))
+ {
+ gp_val = 0x2000;
+ }
+ }
+ else
+ {
+ gp_val = 0;
+ sec = htab->sgot;
+ if (sec != NULL)
+ {
+ /* We know we don't have a .plt. If .got is large,
+ offset our LTP. */
+ if (sec->_raw_size > 0x2000)
+ gp_val = 0x2000;
+ }
+ else
+ {
+ /* No .plt or .got. Who cares what the LTP is? */
+ sec = bfd_get_section_by_name (abfd, ".data");
+ }
+ }
+
+ if (h != NULL)
+ {
+ h->root.type = bfd_link_hash_defined;
+ h->root.u.def.value = gp_val;
+ if (sec != NULL)
+ h->root.u.def.section = sec;
+ else
+ h->root.u.def.section = bfd_abs_section_ptr;
+ }
+ }
+
+ if (sec != NULL && sec->output_section != NULL)
+ gp_val += sec->output_section->vma + sec->output_offset;
+
+ elf_gp (abfd) = gp_val;
+ return true;
+}
+
+/* Build all the stubs associated with the current output file. The
+ stubs are kept in a hash table attached to the main linker hash
+ table. We also set up the .plt entries for statically linked PIC
+ functions here. This function is called via hppaelf_finish in the
+ linker. */
+
+boolean
+elf32_hppa_build_stubs (info)
+ struct bfd_link_info *info;
+{
+ asection *stub_sec;
+ struct bfd_hash_table *table;
+ struct elf32_hppa_link_hash_table *htab;
+
+ htab = hppa_link_hash_table (info);
+
+ for (stub_sec = htab->stub_bfd->sections;
+ stub_sec != NULL;
+ stub_sec = stub_sec->next)
+ {
+ bfd_size_type size;
+
+ /* Allocate memory to hold the linker stubs. */
+ size = stub_sec->_raw_size;
+ stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size);
+ if (stub_sec->contents == NULL && size != 0)
+ return false;
+ stub_sec->_raw_size = 0;
+ }
+
+ /* Build the stubs as directed by the stub hash table. */
+ table = &htab->stub_hash_table;
+ bfd_hash_traverse (table, hppa_build_one_stub, info);
+
+ return true;
+}
+
+/* Perform a final link. */
+
+static boolean
+elf32_hppa_final_link (abfd, info)
+ bfd *abfd;
+ struct bfd_link_info *info;
+{
+ asection *s;
+
+ /* Invoke the regular ELF linker to do all the work. */
+ if (!bfd_elf32_bfd_final_link (abfd, info))
+ return false;
+
+ /* If we're producing a final executable, sort the contents of the
+ unwind section. Magic section names, but this is much safer than
+ having elf32_hppa_relocate_section remember where SEGREL32 relocs
+ occurred. Consider what happens if someone inept creates a
+ linker script that puts unwind information in .text. */
+ s = bfd_get_section_by_name (abfd, ".PARISC.unwind");
+ if (s != NULL)
+ {
+ bfd_size_type size;
+ char *contents;
+
+ size = s->_raw_size;
+ contents = bfd_malloc (size);
+ if (contents == NULL)
+ return false;
+
+ if (! bfd_get_section_contents (abfd, s, contents, (file_ptr) 0, size))
+ return false;
+
+ qsort (contents, (size_t) (size / 16), 16, hppa_unwind_entry_compare);
+
+ if (! bfd_set_section_contents (abfd, s, contents, (file_ptr) 0, size))
+ return false;
+ }
+ return true;
+}
+
+/* Record the lowest address for the data and text segments. */
+
+static void
+hppa_record_segment_addr (abfd, section, data)
+ bfd *abfd ATTRIBUTE_UNUSED;
+ asection *section;
+ PTR data;
+{
+ struct elf32_hppa_link_hash_table *htab;
+
+ htab = (struct elf32_hppa_link_hash_table *) data;
+
+ if ((section->flags & (SEC_ALLOC | SEC_LOAD)) == (SEC_ALLOC | SEC_LOAD))
+ {
+ bfd_vma value = section->vma - section->filepos;
+
+ if ((section->flags & SEC_READONLY) != 0)
+ {
+ if (value < htab->text_segment_base)
+ htab->text_segment_base = value;
+ }
+ else
+ {
+ if (value < htab->data_segment_base)
+ htab->data_segment_base = value;
+ }
+ }
+}
+
+/* Perform a relocation as part of a final link. */
+
+static bfd_reloc_status_type
+final_link_relocate (input_section, contents, rel, value, htab, sym_sec, h)
+ asection *input_section;
+ bfd_byte *contents;
+ const Elf_Internal_Rela *rel;
+ bfd_vma value;
+ struct elf32_hppa_link_hash_table *htab;
+ asection *sym_sec;
+ struct elf32_hppa_link_hash_entry *h;
+{
+ int insn;
+ unsigned int r_type = ELF32_R_TYPE (rel->r_info);
+ reloc_howto_type *howto = elf_hppa_howto_table + r_type;
+ int r_format = howto->bitsize;
+ enum hppa_reloc_field_selector_type_alt r_field;
+ bfd *input_bfd = input_section->owner;
+ bfd_vma offset = rel->r_offset;
+ bfd_vma max_branch_offset = 0;
+ bfd_byte *hit_data = contents + offset;
+ bfd_signed_vma addend = rel->r_addend;
+ bfd_vma location;
+ struct elf32_hppa_stub_hash_entry *stub_entry = NULL;
+ int val;
+
+ if (r_type == R_PARISC_NONE)
+ return bfd_reloc_ok;
+
+ insn = bfd_get_32 (input_bfd, hit_data);
+
+ /* Find out where we are and where we're going. */
+ location = (offset +
+ input_section->output_offset +
+ input_section->output_section->vma);
+
+ switch (r_type)
+ {
+ case R_PARISC_PCREL12F:
+ case R_PARISC_PCREL17F:
+ case R_PARISC_PCREL22F:
+ /* If this is a call to a function defined in another dynamic
+ library, or if it is a call to a PIC function in the same
+ object, or if this is a shared link and it is a call to a
+ weak symbol which may or may not be in the same object, then
+ find the import stub in the stub hash. */
+ if (sym_sec == NULL
+ || sym_sec->output_section == NULL
+ || (h != NULL
+ && ((h->maybe_pic_call
+ && !(input_section->flags & SEC_HAS_GOT_REF))
+ || (h->elf.root.type == bfd_link_hash_defweak
+ && h->elf.dynindx != -1
+ && h->elf.plt.offset != (bfd_vma) -1))))
+ {
+ stub_entry = hppa_get_stub_entry (input_section, sym_sec,
+ h, rel, htab);
+ if (stub_entry != NULL)
+ {
+ value = (stub_entry->stub_offset
+ + stub_entry->stub_sec->output_offset
+ + stub_entry->stub_sec->output_section->vma);
+ addend = 0;
+ }
+ else if (sym_sec == NULL && h != NULL
+ && h->elf.root.type == bfd_link_hash_undefweak)
+ {
+ /* It's OK if undefined weak. Calls to undefined weak
+ symbols behave as if the "called" function
+ immediately returns. We can thus call to a weak
+ function without first checking whether the function
+ is defined. */
+ value = location;
+ addend = 8;
+ }
+ else
+ return bfd_reloc_undefined;
+ }
+ /* Fall thru. */
+
+ case R_PARISC_PCREL21L:
+ case R_PARISC_PCREL17C:
+ case R_PARISC_PCREL17R:
+ case R_PARISC_PCREL14R:
+ case R_PARISC_PCREL14F:
+ /* Make it a pc relative offset. */
+ value -= location;
+ addend -= 8;
+ break;
+
+ case R_PARISC_DPREL21L:
+ case R_PARISC_DPREL14R:
+ case R_PARISC_DPREL14F:
+ /* For all the DP relative relocations, we need to examine the symbol's
+ section. If it's a code section, then "data pointer relative" makes
+ no sense. In that case we don't adjust the "value", and for 21 bit
+ addil instructions, we change the source addend register from %dp to
+ %r0. This situation commonly arises when a variable's "constness"
+ is declared differently from the way the variable is defined. For
+ instance: "extern int foo" with foo defined as "const int foo". */
+ if (sym_sec == NULL)
+ break;
+ if ((sym_sec->flags & SEC_CODE) != 0)
+ {
+ if ((insn & ((0x3f << 26) | (0x1f << 21)))
+ == (((int) OP_ADDIL << 26) | (27 << 21)))
+ {
+ insn &= ~ (0x1f << 21);
+#if 1 /* debug them. */
+ (*_bfd_error_handler)
+ (_("%s(%s+0x%lx): fixing %s"),
+ bfd_archive_filename (input_bfd),
+ input_section->name,
+ (long) rel->r_offset,
+ howto->name);
+#endif
+ }
+ /* Now try to make things easy for the dynamic linker. */
+
+ break;
+ }
+ /* Fall thru. */
+
+ case R_PARISC_DLTIND21L:
+ case R_PARISC_DLTIND14R:
+ case R_PARISC_DLTIND14F:
+ value -= elf_gp (input_section->output_section->owner);
+ break;
+
+ case R_PARISC_SEGREL32:
+ if ((sym_sec->flags & SEC_CODE) != 0)
+ value -= htab->text_segment_base;
+ else
+ value -= htab->data_segment_base;
+ break;
+
+ default:
+ break;
+ }
+
+ switch (r_type)
+ {
+ case R_PARISC_DIR32:
+ case R_PARISC_DIR14F:
+ case R_PARISC_DIR17F:
+ case R_PARISC_PCREL17C:
+ case R_PARISC_PCREL14F:
+ case R_PARISC_DPREL14F:
+ case R_PARISC_PLABEL32:
+ case R_PARISC_DLTIND14F:
+ case R_PARISC_SEGBASE:
+ case R_PARISC_SEGREL32:
+ r_field = e_fsel;
+ break;
+
+ case R_PARISC_DIR21L:
+ case R_PARISC_PCREL21L:
+ case R_PARISC_DPREL21L:
+ case R_PARISC_PLABEL21L:
+ case R_PARISC_DLTIND21L:
+ r_field = e_lrsel;
+ break;
+
+ case R_PARISC_DIR17R:
+ case R_PARISC_PCREL17R:
+ case R_PARISC_DIR14R:
+ case R_PARISC_PCREL14R:
+ case R_PARISC_DPREL14R:
+ case R_PARISC_PLABEL14R:
+ case R_PARISC_DLTIND14R:
+ r_field = e_rrsel;
+ break;
+
+ case R_PARISC_PCREL12F:
+ case R_PARISC_PCREL17F:
+ case R_PARISC_PCREL22F:
+ r_field = e_fsel;
+
+ if (r_type == (unsigned int) R_PARISC_PCREL17F)
+ {
+ max_branch_offset = (1 << (17-1)) << 2;
+ }
+ else if (r_type == (unsigned int) R_PARISC_PCREL12F)
+ {
+ max_branch_offset = (1 << (12-1)) << 2;
+ }
+ else
+ {
+ max_branch_offset = (1 << (22-1)) << 2;
+ }
+
+ /* sym_sec is NULL on undefined weak syms or when shared on
+ undefined syms. We've already checked for a stub for the
+ shared undefined case. */
+ if (sym_sec == NULL)
+ break;
+
+ /* If the branch is out of reach, then redirect the
+ call to the local stub for this function. */
+ if (value + addend + max_branch_offset >= 2*max_branch_offset)
+ {
+ stub_entry = hppa_get_stub_entry (input_section, sym_sec,
+ h, rel, htab);
+ if (stub_entry == NULL)
+ return bfd_reloc_undefined;
+
+ /* Munge up the value and addend so that we call the stub
+ rather than the procedure directly. */
+ value = (stub_entry->stub_offset
+ + stub_entry->stub_sec->output_offset
+ + stub_entry->stub_sec->output_section->vma
+ - location);
+ addend = -8;
+ }
+ break;
+
+ /* Something we don't know how to handle. */
+ default:
+ return bfd_reloc_notsupported;
+ }
+
+ /* Make sure we can reach the stub. */
+ if (max_branch_offset != 0
+ && value + addend + max_branch_offset >= 2*max_branch_offset)
+ {
+ (*_bfd_error_handler)
+ (_("%s(%s+0x%lx): cannot reach %s, recompile with -ffunction-sections"),
+ bfd_archive_filename (input_bfd),
+ input_section->name,
+ (long) rel->r_offset,
+ stub_entry->root.string);
+ bfd_set_error (bfd_error_bad_value);
+ return bfd_reloc_notsupported;
+ }
+
+ val = hppa_field_adjust (value, addend, r_field);
+
+ switch (r_type)
+ {
+ case R_PARISC_PCREL12F:
+ case R_PARISC_PCREL17C:
+ case R_PARISC_PCREL17F:
+ case R_PARISC_PCREL17R:
+ case R_PARISC_PCREL22F:
+ case R_PARISC_DIR17F:
+ case R_PARISC_DIR17R:
+ /* This is a branch. Divide the offset by four.
+ Note that we need to decide whether it's a branch or
+ otherwise by inspecting the reloc. Inspecting insn won't
+ work as insn might be from a .word directive. */
+ val >>= 2;
+ break;
+
+ default:
+ break;
+ }
+
+ insn = hppa_rebuild_insn (insn, val, r_format);
+
+ /* Update the instruction word. */
+ bfd_put_32 (input_bfd, (bfd_vma) insn, hit_data);
+ return bfd_reloc_ok;
+}
+
+/* Relocate an HPPA ELF section. */
+
+static boolean
+elf32_hppa_relocate_section (output_bfd, info, input_bfd, input_section,
+ contents, relocs, local_syms, local_sections)
+ bfd *output_bfd;
+ struct bfd_link_info *info;
+ bfd *input_bfd;
+ asection *input_section;
+ bfd_byte *contents;
+ Elf_Internal_Rela *relocs;
+ Elf_Internal_Sym *local_syms;
+ asection **local_sections;
+{
+ bfd_vma *local_got_offsets;
+ struct elf32_hppa_link_hash_table *htab;
+ Elf_Internal_Shdr *symtab_hdr;
+ Elf_Internal_Rela *rel;
+ Elf_Internal_Rela *relend;
+
+ symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
+
+ htab = hppa_link_hash_table (info);
+ local_got_offsets = elf_local_got_offsets (input_bfd);
+
+ rel = relocs;
+ relend = relocs + input_section->reloc_count;
+ for (; rel < relend; rel++)
+ {
+ unsigned int r_type;
+ reloc_howto_type *howto;
+ unsigned int r_symndx;
+ struct elf32_hppa_link_hash_entry *h;
+ Elf_Internal_Sym *sym;
+ asection *sym_sec;
+ bfd_vma relocation;
+ bfd_reloc_status_type r;
+ const char *sym_name;
+ boolean plabel;
+ boolean warned_undef;
+
+ r_type = ELF32_R_TYPE (rel->r_info);
+ if (r_type >= (unsigned int) R_PARISC_UNIMPLEMENTED)
+ {
+ bfd_set_error (bfd_error_bad_value);
+ return false;
+ }
+ if (r_type == (unsigned int) R_PARISC_GNU_VTENTRY
+ || r_type == (unsigned int) R_PARISC_GNU_VTINHERIT)
+ continue;
+
+ r_symndx = ELF32_R_SYM (rel->r_info);
+
+ if (info->relocateable)
+ {
+ /* This is a relocatable link. We don't have to change
+ anything, unless the reloc is against a section symbol,
+ in which case we have to adjust according to where the
+ section symbol winds up in the output section. */
+ if (r_symndx < symtab_hdr->sh_info)
+ {
+ sym = local_syms + r_symndx;
+ if (ELF_ST_TYPE (sym->st_info) == STT_SECTION)
+ {
+ sym_sec = local_sections[r_symndx];
+ rel->r_addend += sym_sec->output_offset;
+ }
+ }
+ continue;
+ }
+
+ /* This is a final link. */
+ h = NULL;
+ sym = NULL;
+ sym_sec = NULL;
+ warned_undef = false;
+ if (r_symndx < symtab_hdr->sh_info)
+ {
+ /* This is a local symbol, h defaults to NULL. */
+ sym = local_syms + r_symndx;
+ sym_sec = local_sections[r_symndx];
+ relocation = _bfd_elf_rela_local_sym (output_bfd, sym, sym_sec, rel);
+ }
+ else
+ {
+ int indx;
+
+ /* It's a global; Find its entry in the link hash. */
+ indx = r_symndx - symtab_hdr->sh_info;
+ h = ((struct elf32_hppa_link_hash_entry *)
+ elf_sym_hashes (input_bfd)[indx]);
+ while (h->elf.root.type == bfd_link_hash_indirect
+ || h->elf.root.type == bfd_link_hash_warning)
+ h = (struct elf32_hppa_link_hash_entry *) h->elf.root.u.i.link;
+
+ relocation = 0;
+ if (h->elf.root.type == bfd_link_hash_defined
+ || h->elf.root.type == bfd_link_hash_defweak)
+ {
+ sym_sec = h->elf.root.u.def.section;
+ /* If sym_sec->output_section is NULL, then it's a
+ symbol defined in a shared library. */
+ if (sym_sec->output_section != NULL)
+ relocation = (h->elf.root.u.def.value
+ + sym_sec->output_offset
+ + sym_sec->output_section->vma);
+ }
+ else if (h->elf.root.type == bfd_link_hash_undefweak)
+ ;
+ else if (info->shared && !info->no_undefined
+ && ELF_ST_VISIBILITY (h->elf.other) == STV_DEFAULT
+ && h->elf.type != STT_PARISC_MILLI)
+ {
+ if (info->symbolic && !info->allow_shlib_undefined)
+ {
+ if (!((*info->callbacks->undefined_symbol)
+ (info, h->elf.root.root.string, input_bfd,
+ input_section, rel->r_offset, false)))
+ return false;
+ warned_undef = true;
+ }
+ }
+ else
+ {
+ if (!((*info->callbacks->undefined_symbol)
+ (info, h->elf.root.root.string, input_bfd,
+ input_section, rel->r_offset, true)))
+ return false;
+ warned_undef = true;
+ }
+ }
+
+ /* Do any required modifications to the relocation value, and
+ determine what types of dynamic info we need to output, if
+ any. */
+ plabel = 0;
+ switch (r_type)
+ {
+ case R_PARISC_DLTIND14F:
+ case R_PARISC_DLTIND14R:
+ case R_PARISC_DLTIND21L:
+ {
+ bfd_vma off;
+ boolean do_got = 0;
+
+ /* Relocation is to the entry for this symbol in the
+ global offset table. */
+ if (h != NULL)
+ {
+ boolean dyn;
+
+ off = h->elf.got.offset;
+ dyn = htab->elf.dynamic_sections_created;
+ if (! WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info, &h->elf))
+ {
+ /* If we aren't going to call finish_dynamic_symbol,
+ then we need to handle initialisation of the .got
+ entry and create needed relocs here. Since the
+ offset must always be a multiple of 4, we use the
+ least significant bit to record whether we have
+ initialised it already. */
+ if ((off & 1) != 0)
+ off &= ~1;
+ else
+ {
+ h->elf.got.offset |= 1;
+ do_got = 1;
+ }
+ }
+ }
+ else
+ {
+ /* Local symbol case. */
+ if (local_got_offsets == NULL)
+ abort ();
+
+ off = local_got_offsets[r_symndx];
+
+ /* The offset must always be a multiple of 4. We use
+ the least significant bit to record whether we have
+ already generated the necessary reloc. */
+ if ((off & 1) != 0)
+ off &= ~1;
+ else
+ {
+ local_got_offsets[r_symndx] |= 1;
+ do_got = 1;
+ }
+ }
+
+ if (do_got)
+ {
+ if (info->shared)
+ {
+ /* Output a dynamic relocation for this GOT entry.
+ In this case it is relative to the base of the
+ object because the symbol index is zero. */
+ Elf_Internal_Rela outrel;
+ asection *srelgot = htab->srelgot;
+ Elf32_External_Rela *loc;
+
+ outrel.r_offset = (off
+ + htab->sgot->output_offset
+ + htab->sgot->output_section->vma);
+ outrel.r_info = ELF32_R_INFO (0, R_PARISC_DIR32);
+ outrel.r_addend = relocation;
+ loc = (Elf32_External_Rela *) srelgot->contents;
+ loc += srelgot->reloc_count++;
+ bfd_elf32_swap_reloca_out (output_bfd, &outrel, loc);
+ }
+ else
+ bfd_put_32 (output_bfd, relocation,
+ htab->sgot->contents + off);
+ }
+
+ if (off >= (bfd_vma) -2)
+ abort ();
+
+ /* Add the base of the GOT to the relocation value. */
+ relocation = (off
+ + htab->sgot->output_offset
+ + htab->sgot->output_section->vma);
+ }
+ break;
+
+ case R_PARISC_SEGREL32:
+ /* If this is the first SEGREL relocation, then initialize
+ the segment base values. */
+ if (htab->text_segment_base == (bfd_vma) -1)
+ bfd_map_over_sections (output_bfd, hppa_record_segment_addr, htab);
+ break;
+
+ case R_PARISC_PLABEL14R:
+ case R_PARISC_PLABEL21L:
+ case R_PARISC_PLABEL32:
+ if (htab->elf.dynamic_sections_created)
+ {
+ bfd_vma off;
+ boolean do_plt = 0;
+
+ /* If we have a global symbol with a PLT slot, then
+ redirect this relocation to it. */
+ if (h != NULL)
+ {
+ off = h->elf.plt.offset;
+ if (! WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, info, &h->elf))
+ {
+ /* In a non-shared link, adjust_dynamic_symbols
+ isn't called for symbols forced local. We
+ need to write out the plt entry here. */
+ if ((off & 1) != 0)
+ off &= ~1;
+ else
+ {
+ h->elf.plt.offset |= 1;
+ do_plt = 1;
+ }
+ }
+ }
+ else
+ {
+ bfd_vma *local_plt_offsets;
+
+ if (local_got_offsets == NULL)
+ abort ();
+
+ local_plt_offsets = local_got_offsets + symtab_hdr->sh_info;
+ off = local_plt_offsets[r_symndx];
+
+ /* As for the local .got entry case, we use the last
+ bit to record whether we've already initialised
+ this local .plt entry. */
+ if ((off & 1) != 0)
+ off &= ~1;
+ else
+ {
+ local_plt_offsets[r_symndx] |= 1;
+ do_plt = 1;
+ }
+ }
+
+ if (do_plt)
+ {
+ if (info->shared)
+ {
+ /* Output a dynamic IPLT relocation for this
+ PLT entry. */
+ Elf_Internal_Rela outrel;
+ asection *srelplt = htab->srelplt;
+ Elf32_External_Rela *loc;
+
+ outrel.r_offset = (off
+ + htab->splt->output_offset
+ + htab->splt->output_section->vma);
+ outrel.r_info = ELF32_R_INFO (0, R_PARISC_IPLT);
+ outrel.r_addend = relocation;
+ loc = (Elf32_External_Rela *) srelplt->contents;
+ loc += srelplt->reloc_count++;
+ bfd_elf32_swap_reloca_out (output_bfd, &outrel, loc);
+ }
+ else
+ {
+ bfd_put_32 (output_bfd,
+ relocation,
+ htab->splt->contents + off);
+ bfd_put_32 (output_bfd,
+ elf_gp (htab->splt->output_section->owner),
+ htab->splt->contents + off + 4);
+ }
+ }
+
+ if (off >= (bfd_vma) -2)
+ abort ();
+
+ /* PLABELs contain function pointers. Relocation is to
+ the entry for the function in the .plt. The magic +2
+ offset signals to $$dyncall that the function pointer
+ is in the .plt and thus has a gp pointer too.
+ Exception: Undefined PLABELs should have a value of
+ zero. */
+ if (h == NULL
+ || (h->elf.root.type != bfd_link_hash_undefweak
+ && h->elf.root.type != bfd_link_hash_undefined))
+ {
+ relocation = (off
+ + htab->splt->output_offset
+ + htab->splt->output_section->vma
+ + 2);
+ }
+ plabel = 1;
+ }
+ /* Fall through and possibly emit a dynamic relocation. */
+
+ case R_PARISC_DIR17F:
+ case R_PARISC_DIR17R:
+ case R_PARISC_DIR14F:
+ case R_PARISC_DIR14R:
+ case R_PARISC_DIR21L:
+ case R_PARISC_DPREL14F:
+ case R_PARISC_DPREL14R:
+ case R_PARISC_DPREL21L:
+ case R_PARISC_DIR32:
+ /* r_symndx will be zero only for relocs against symbols
+ from removed linkonce sections, or sections discarded by
+ a linker script. */
+ if (r_symndx == 0
+ || (input_section->flags & SEC_ALLOC) == 0)
+ break;
+
+ /* The reloc types handled here and this conditional
+ expression must match the code in ..check_relocs and
+ allocate_dynrelocs. ie. We need exactly the same condition
+ as in ..check_relocs, with some extra conditions (dynindx
+ test in this case) to cater for relocs removed by
+ allocate_dynrelocs. If you squint, the non-shared test
+ here does indeed match the one in ..check_relocs, the
+ difference being that here we test DEF_DYNAMIC as well as
+ !DEF_REGULAR. All common syms end up with !DEF_REGULAR,
+ which is why we can't use just that test here.
+ Conversely, DEF_DYNAMIC can't be used in check_relocs as
+ there all files have not been loaded. */
+ if ((info->shared
+ && (IS_ABSOLUTE_RELOC (r_type)
+ || (h != NULL
+ && h->elf.dynindx != -1
+ && (!info->symbolic
+ || (h->elf.elf_link_hash_flags
+ & ELF_LINK_HASH_DEF_REGULAR) == 0))))
+ || (!info->shared
+ && h != NULL
+ && h->elf.dynindx != -1
+ && (h->elf.elf_link_hash_flags & ELF_LINK_NON_GOT_REF) == 0
+ && (((h->elf.elf_link_hash_flags
+ & ELF_LINK_HASH_DEF_DYNAMIC) != 0
+ && (h->elf.elf_link_hash_flags
+ & ELF_LINK_HASH_DEF_REGULAR) == 0)
+ || h->elf.root.type == bfd_link_hash_undefweak
+ || h->elf.root.type == bfd_link_hash_undefined)))
+ {
+ Elf_Internal_Rela outrel;
+ boolean skip;
+ asection *sreloc;
+ Elf32_External_Rela *loc;
+
+ /* When generating a shared object, these relocations
+ are copied into the output file to be resolved at run
+ time. */
+
+ outrel.r_addend = rel->r_addend;
+ outrel.r_offset =
+ _bfd_elf_section_offset (output_bfd, info, input_section,
+ rel->r_offset);
+ skip = (outrel.r_offset == (bfd_vma) -1);
+ outrel.r_offset += (input_section->output_offset
+ + input_section->output_section->vma);
+
+ if (skip)
+ {
+ memset (&outrel, 0, sizeof (outrel));
+ }
+ else if (h != NULL
+ && h->elf.dynindx != -1
+ && (plabel
+ || !IS_ABSOLUTE_RELOC (r_type)
+ || !info->shared
+ || !info->symbolic
+ || (h->elf.elf_link_hash_flags
+ & ELF_LINK_HASH_DEF_REGULAR) == 0))
+ {
+ outrel.r_info = ELF32_R_INFO (h->elf.dynindx, r_type);
+ }
+ else /* It's a local symbol, or one marked to become local. */
+ {
+ int indx = 0;
+
+ /* Add the absolute offset of the symbol. */
+ outrel.r_addend += relocation;
+
+ /* Global plabels need to be processed by the
+ dynamic linker so that functions have at most one
+ fptr. For this reason, we need to differentiate
+ between global and local plabels, which we do by
+ providing the function symbol for a global plabel
+ reloc, and no symbol for local plabels. */
+ if (! plabel
+ && sym_sec != NULL
+ && sym_sec->output_section != NULL
+ && ! bfd_is_abs_section (sym_sec))
+ {
+ indx = elf_section_data (sym_sec->output_section)->dynindx;
+ /* We are turning this relocation into one
+ against a section symbol, so subtract out the
+ output section's address but not the offset
+ of the input section in the output section. */
+ outrel.r_addend -= sym_sec->output_section->vma;
+ }
+
+ outrel.r_info = ELF32_R_INFO (indx, r_type);
+ }
+#if 0
+ /* EH info can cause unaligned DIR32 relocs.
+ Tweak the reloc type for the dynamic linker. */
+ if (r_type == R_PARISC_DIR32 && (outrel.r_offset & 3) != 0)
+ outrel.r_info = ELF32_R_INFO (ELF32_R_SYM (outrel.r_info),
+ R_PARISC_DIR32U);
+#endif
+ sreloc = elf_section_data (input_section)->sreloc;
+ if (sreloc == NULL)
+ abort ();
+
+ loc = (Elf32_External_Rela *) sreloc->contents;
+ loc += sreloc->reloc_count++;
+ bfd_elf32_swap_reloca_out (output_bfd, &outrel, loc);
+ }
+ break;