if (num_group != (unsigned) -1)
{
- unsigned int i;
+ unsigned int search_offset = elf_tdata (abfd)->group_search_offset;
+ unsigned int j;
- for (i = 0; i < num_group; i++)
+ for (j = 0; j < num_group; j++)
{
+ /* Begin search from previous found group. */
+ unsigned i = (j + search_offset) % num_group;
+
Elf_Internal_Shdr *shdr = elf_tdata (abfd)->group_sect_ptr[i];
Elf_Internal_Group *idx;
bfd_size_type n_elt;
if (shdr->bfd_section != NULL)
elf_next_in_group (shdr->bfd_section) = newsect;
- i = num_group - 1;
+ elf_tdata (abfd)->group_search_offset = i;
+ j = num_group - 1;
break;
}
}
asection **hdrpp;
bfd_boolean phdr_in_segment = TRUE;
bfd_boolean writable;
+ bfd_boolean executable;
int tls_count = 0;
asection *first_tls = NULL;
asection *first_mbind = NULL;
if (maxpagesize == 0)
maxpagesize = 1;
writable = FALSE;
+ executable = FALSE;
dynsec = bfd_get_section_by_name (abfd, ".dynamic");
if (dynsec != NULL
&& (dynsec->flags & SEC_LOAD) == 0)
the previous section, then we need a new segment. */
new_segment = TRUE;
}
+ else if ((abfd->flags & D_PAGED) != 0
+ && (((last_hdr->lma + last_size - 1) & -maxpagesize)
+ == (hdr->lma & -maxpagesize)))
+ {
+ /* If we are demand paged then we can't map two disk
+ pages onto the same memory page. */
+ new_segment = FALSE;
+ }
/* In the next test we have to be careful when last_hdr->lma is close
to the end of the address space. If the aligned address wraps
around to the start of the address space, then there are no more
pages left in memory and it is OK to assume that the current
section can be included in the current segment. */
- else if ((BFD_ALIGN (last_hdr->lma + last_size, maxpagesize) + maxpagesize
- > last_hdr->lma)
- && (BFD_ALIGN (last_hdr->lma + last_size, maxpagesize) + maxpagesize
- <= hdr->lma))
+ else if ((BFD_ALIGN (last_hdr->lma + last_size, maxpagesize)
+ + maxpagesize > last_hdr->lma)
+ && (BFD_ALIGN (last_hdr->lma + last_size, maxpagesize)
+ + maxpagesize <= hdr->lma))
{
/* If putting this section in this segment would force us to
skip a page in the segment, then we need a new segment. */
new_segment = TRUE;
}
else if ((last_hdr->flags & (SEC_LOAD | SEC_THREAD_LOCAL)) == 0
- && (hdr->flags & (SEC_LOAD | SEC_THREAD_LOCAL)) != 0
- && ((abfd->flags & D_PAGED) == 0
- || (((last_hdr->lma + last_size - 1) & -maxpagesize)
- != (hdr->lma & -maxpagesize))))
+ && (hdr->flags & (SEC_LOAD | SEC_THREAD_LOCAL)) != 0)
{
/* We don't want to put a loaded section after a
nonloaded (ie. bss style) section in the same segment
as that will force the non-loaded section to be loaded.
- Consider .tbss sections as loaded for this purpose.
- However, like the writable/non-writable case below,
- if they are on the same page then they must be put
- in the same segment. */
+ Consider .tbss sections as loaded for this purpose. */
new_segment = TRUE;
}
else if ((abfd->flags & D_PAGED) == 0)
file, then there is no other reason for a new segment. */
new_segment = FALSE;
}
+ else if (info != NULL
+ && info->separate_code
+ && executable != ((hdr->flags & SEC_CODE) != 0))
+ {
+ new_segment = TRUE;
+ }
else if (! writable
- && (hdr->flags & SEC_READONLY) == 0
- && (((last_hdr->lma + last_size - 1) & -maxpagesize)
- != (hdr->lma & -maxpagesize)))
+ && (hdr->flags & SEC_READONLY) == 0)
{
/* We don't want to put a writable section in a read only
- segment, unless they are on the same page in memory
- anyhow. We already know that the last section does not
- bring us past the current section on the page, so the
- only case in which the new section is not on the same
- page as the previous section is when the previous section
- ends precisely on a page boundary. */
+ segment. */
new_segment = TRUE;
}
else
{
if ((hdr->flags & SEC_READONLY) == 0)
writable = TRUE;
+ if ((hdr->flags & SEC_CODE) != 0)
+ executable = TRUE;
last_hdr = hdr;
/* .tbss sections effectively have zero size. */
if ((hdr->flags & (SEC_THREAD_LOCAL | SEC_LOAD))
else
writable = FALSE;
+ if ((hdr->flags & SEC_CODE) == 0)
+ executable = FALSE;
+ else
+ executable = TRUE;
+
last_hdr = hdr;
/* .tbss sections effectively have zero size. */
if ((hdr->flags & (SEC_THREAD_LOCAL | SEC_LOAD)) != SEC_THREAD_LOCAL)
{
if (p->p_type == PT_GNU_RELRO)
{
- const Elf_Internal_Phdr *lp;
- struct elf_segment_map *lm;
+ bfd_vma start, end;
if (link_info != NULL)
{
/* During linking the range of the RELRO segment is passed
- in link_info. */
+ in link_info. Note that there may be padding between
+ relro_start and the first RELRO section. */
+ start = link_info->relro_start;
+ end = link_info->relro_end;
+ }
+ else if (m->count != 0)
+ {
+ if (!m->p_size_valid)
+ abort ();
+ start = m->sections[0]->vma;
+ end = start + m->p_size;
+ }
+ else
+ {
+ start = 0;
+ end = 0;
+ }
+
+ if (start < end)
+ {
+ struct elf_segment_map *lm;
+ const Elf_Internal_Phdr *lp;
+ unsigned int i;
+
+ /* Find a LOAD segment containing a section in the RELRO
+ segment. */
for (lm = elf_seg_map (abfd), lp = phdrs;
lm != NULL;
lm = lm->next, lp++)
{
if (lp->p_type == PT_LOAD
- && lp->p_vaddr < link_info->relro_end
+ && lp->p_memsz != 0
&& lm->count != 0
- && lm->sections[0]->vma >= link_info->relro_start)
+ && lm->sections[lm->count - 1]->vma >= start
+ && lm->sections[0]->vma < end)
break;
}
-
BFD_ASSERT (lm != NULL);
- }
- else
- {
- /* Otherwise we are copying an executable or shared
- library, but we need to use the same linker logic. */
- for (lp = phdrs; lp < phdrs + count; ++lp)
+
+ /* Find the section starting the RELRO segment. */
+ for (i = 0; i < lm->count; i++)
{
- if (lp->p_type == PT_LOAD
- && lp->p_paddr == p->p_paddr)
+ asection *s = lm->sections[i];
+ if (s->vma >= start
+ && s->vma < end
+ && s->size != 0)
break;
}
- }
+ BFD_ASSERT (i < lm->count);
+
+ p->p_vaddr = lm->sections[i]->vma;
+ p->p_paddr = lm->sections[i]->lma;
+ p->p_offset = lm->sections[i]->filepos;
+ p->p_memsz = end - p->p_vaddr;
+ p->p_filesz = p->p_memsz;
+
+ /* The RELRO segment typically ends a few bytes into
+ .got.plt but other layouts are possible. In cases
+ where the end does not match any loaded section (for
+ instance is in file padding), trim p_filesz back to
+ correspond to the end of loaded section contents. */
+ if (p->p_filesz > lp->p_vaddr + lp->p_filesz - p->p_vaddr)
+ p->p_filesz = lp->p_vaddr + lp->p_filesz - p->p_vaddr;
- if (lp < phdrs + count)
- {
- p->p_vaddr = lp->p_vaddr;
- p->p_paddr = lp->p_paddr;
- p->p_offset = lp->p_offset;
- if (link_info != NULL)
- p->p_filesz = link_info->relro_end - lp->p_vaddr;
- else if (m->p_size_valid)
- p->p_filesz = m->p_size;
- else
- abort ();
- p->p_memsz = p->p_filesz;
/* Preserve the alignment and flags if they are valid. The
gold linker generates RW/4 for the PT_GNU_RELRO section.
It is better for objcopy/strip to honor these attributes
changed or the programs updated. */
if (alloc > 1
&& tdata->phdr[0].p_type == PT_PHDR
- && ! bed->elf_backend_allow_non_load_phdr (abfd, tdata->phdr, alloc)
+ && (bed->elf_backend_allow_non_load_phdr == NULL
+ || !bed->elf_backend_allow_non_load_phdr (abfd, tdata->phdr,
+ alloc))
&& tdata->phdr[1].p_type == PT_LOAD
&& (tdata->phdr[1].p_vaddr > tdata->phdr[0].p_vaddr
|| (tdata->phdr[1].p_vaddr + tdata->phdr[1].p_memsz)
= _bfd_elf_strtab_offset (elf_shstrtab (abfd),
i_shdrp[count]->sh_name);
if (bed->elf_backend_section_processing)
- (*bed->elf_backend_section_processing) (abfd, i_shdrp[count]);
+ if (!(*bed->elf_backend_section_processing) (abfd, i_shdrp[count]))
+ return FALSE;
if (i_shdrp[count]->contents)
{
bfd_size_type amt = i_shdrp[count]->sh_size;
but the SHT_GROUP section is, then adjust its size. */
else if (s->output_section == discarded
&& isec->output_section != discarded)
- removed += 4;
+ {
+ struct bfd_elf_section_data *elf_sec = elf_section_data (s);
+ removed += 4;
+ if (elf_sec->rel.hdr != NULL
+ && (elf_sec->rel.hdr->sh_flags & SHF_GROUP) != 0)
+ removed += 4;
+ if (elf_sec->rela.hdr != NULL
+ && (elf_sec->rela.hdr->sh_flags & SHF_GROUP) != 0)
+ removed += 4;
+ }
s = elf_next_in_group (s);
if (s == first)
break;
if (discarded != NULL)
{
/* If we've been called for ld -r, then we need to
- adjust the input section size. This function may
- be called multiple times, so save the original
- size. */
+ adjust the input section size. */
if (isec->rawsize == 0)
isec->rawsize = isec->size;
isec->size = isec->rawsize - removed;
+ if (isec->size <= 4)
+ {
+ isec->size = 0;
+ isec->flags |= SEC_EXCLUDE;
+ }
}
else
{
/* Adjust the output section size when called from
objcopy. */
isec->output_section->size -= removed;
+ if (isec->output_section->size <= 4)
+ {
+ isec->output_section->size = 0;
+ isec->output_section->flags |= SEC_EXCLUDE;
+ }
}
}
}
else
return TRUE;
+ case NT_FREEBSD_PROCSTAT_PROC:
+ return elfcore_make_note_pseudosection (abfd, ".note.freebsdcore.proc",
+ note);
+
+ case NT_FREEBSD_PROCSTAT_FILES:
+ return elfcore_make_note_pseudosection (abfd, ".note.freebsdcore.files",
+ note);
+
+ case NT_FREEBSD_PROCSTAT_VMMAP:
+ return elfcore_make_note_pseudosection (abfd, ".note.freebsdcore.vmmap",
+ note);
+
case NT_FREEBSD_PROCSTAT_AUXV:
{
asection *sect = bfd_make_section_anyway_with_flags (abfd, ".auxv",
align is less than 4, we use 4 byte alignment. */
if (align < 4)
align = 4;
+ if (align != 4 && align != 8)
+ return FALSE;
p = buf;
while (p < buf + size)