2
0
mirror of https://github.com/bebbo/amigaos-binutils-2.14.git synced 2025-12-08 22:38:24 +00:00
Files
amigaos-binutils/bfd/elf32-hppa.c
2006-03-15 23:16:57 +00:00

4453 lines
129 KiB
C

/* BFD back-end for HP PA-RISC ELF files.
Copyright 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1999, 2000, 2001,
2002, 2003 Free Software Foundation, Inc.
Original code by
Center for Software Science
Department of Computer Science
University of Utah
Largely rewritten by Alan Modra <alan@linuxcare.com.au>
This file is part of BFD, the Binary File Descriptor library.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */
#include "bfd.h"
#include "sysdep.h"
#include "libbfd.h"
#include "elf-bfd.h"
#include "elf/hppa.h"
#include "libhppa.h"
#include "elf32-hppa.h"
#define ARCH_SIZE 32
#include "elf32-hppa.h"
#include "elf-hppa.h"
/* In order to gain some understanding of code in this file without
knowing all the intricate details of the linker, note the
following:
Functions named elf32_hppa_* are called by external routines, other
functions are only called locally. elf32_hppa_* functions appear
in this file more or less in the order in which they are called
from external routines. eg. elf32_hppa_check_relocs is called
early in the link process, elf32_hppa_finish_dynamic_sections is
one of the last functions. */
/* We use two hash tables to hold information for linking PA ELF objects.
The first is the elf32_hppa_link_hash_table which is derived
from the standard ELF linker hash table. We use this as a place to
attach other hash tables and static information.
The second is the stub hash table which is derived from the
base BFD hash table. The stub hash table holds the information
necessary to build the linker stubs during a link.
There are a number of different stubs generated by the linker.
Long branch stub:
: ldil LR'X,%r1
: be,n RR'X(%sr4,%r1)
PIC long branch stub:
: b,l .+8,%r1
: addil LR'X - ($PIC_pcrel$0 - 4),%r1
: be,n RR'X - ($PIC_pcrel$0 - 8)(%sr4,%r1)
Import stub to call shared library routine from normal object file
(single sub-space version)
: addil LR'lt_ptr+ltoff,%dp ; get procedure entry point
: ldw RR'lt_ptr+ltoff(%r1),%r21
: bv %r0(%r21)
: ldw RR'lt_ptr+ltoff+4(%r1),%r19 ; get new dlt value.
Import stub to call shared library routine from shared library
(single sub-space version)
: addil LR'ltoff,%r19 ; get procedure entry point
: ldw RR'ltoff(%r1),%r21
: bv %r0(%r21)
: ldw RR'ltoff+4(%r1),%r19 ; get new dlt value.
Import stub to call shared library routine from normal object file
(multiple sub-space support)
: addil LR'lt_ptr+ltoff,%dp ; get procedure entry point
: ldw RR'lt_ptr+ltoff(%r1),%r21
: ldw RR'lt_ptr+ltoff+4(%r1),%r19 ; get new dlt value.
: ldsid (%r21),%r1
: mtsp %r1,%sr0
: be 0(%sr0,%r21) ; branch to target
: stw %rp,-24(%sp) ; save rp
Import stub to call shared library routine from shared library
(multiple sub-space support)
: addil LR'ltoff,%r19 ; get procedure entry point
: ldw RR'ltoff(%r1),%r21
: ldw RR'ltoff+4(%r1),%r19 ; get new dlt value.
: ldsid (%r21),%r1
: mtsp %r1,%sr0
: be 0(%sr0,%r21) ; branch to target
: stw %rp,-24(%sp) ; save rp
Export stub to return from shared lib routine (multiple sub-space support)
One of these is created for each exported procedure in a shared
library (and stored in the shared lib). Shared lib routines are
called via the first instruction in the export stub so that we can
do an inter-space return. Not required for single sub-space.
: bl,n X,%rp ; trap the return
: nop
: ldw -24(%sp),%rp ; restore the original rp
: ldsid (%rp),%r1
: mtsp %r1,%sr0
: be,n 0(%sr0,%rp) ; inter-space return. */
#define PLT_ENTRY_SIZE 8
#define GOT_ENTRY_SIZE 4
#define ELF_DYNAMIC_INTERPRETER "/lib/ld.so.1"
static const bfd_byte plt_stub[] =
{
0x0e, 0x80, 0x10, 0x96, /* 1: ldw 0(%r20),%r22 */
0xea, 0xc0, 0xc0, 0x00, /* bv %r0(%r22) */
0x0e, 0x88, 0x10, 0x95, /* ldw 4(%r20),%r21 */
#define PLT_STUB_ENTRY (3*4)
0xea, 0x9f, 0x1f, 0xdd, /* b,l 1b,%r20 */
0xd6, 0x80, 0x1c, 0x1e, /* depi 0,31,2,%r20 */
0x00, 0xc0, 0xff, 0xee, /* 9: .word fixup_func */
0xde, 0xad, 0xbe, 0xef /* .word fixup_ltp */
};
/* Section name for stubs is the associated section name plus this
string. */
#define STUB_SUFFIX ".stub"
/* We don't need to copy certain PC- or GP-relative dynamic relocs
into a shared object's dynamic section. All the relocs of the
limited class we are interested in, are absolute. */
#ifndef RELATIVE_DYNRELOCS
#define RELATIVE_DYNRELOCS 0
#define IS_ABSOLUTE_RELOC(r_type) 1
#endif
enum elf32_hppa_stub_type {
hppa_stub_long_branch,
hppa_stub_long_branch_shared,
hppa_stub_import,
hppa_stub_import_shared,
hppa_stub_export,
hppa_stub_none
};
struct elf32_hppa_stub_hash_entry {
/* Base hash table entry structure. */
struct bfd_hash_entry root;
/* The stub section. */
asection *stub_sec;
/* Offset within stub_sec of the beginning of this stub. */
bfd_vma stub_offset;
/* Given the symbol's value and its section we can determine its final
value when building the stubs (so the stub knows where to jump. */
bfd_vma target_value;
asection *target_section;
enum elf32_hppa_stub_type stub_type;
/* The symbol table entry, if any, that this was derived from. */
struct elf32_hppa_link_hash_entry *h;
/* Where this stub is being called from, or, in the case of combined
stub sections, the first input section in the group. */
asection *id_sec;
};
struct elf32_hppa_link_hash_entry {
struct elf_link_hash_entry elf;
/* A pointer to the most recently used stub hash entry against this
symbol. */
struct elf32_hppa_stub_hash_entry *stub_cache;
/* Used to count relocations for delayed sizing of relocation
sections. */
struct elf32_hppa_dyn_reloc_entry {
/* Next relocation in the chain. */
struct elf32_hppa_dyn_reloc_entry *next;
/* The input section of the reloc. */
asection *sec;
/* Number of relocs copied in this section. */
bfd_size_type count;
#if RELATIVE_DYNRELOCS
/* Number of relative relocs copied for the input section. */
bfd_size_type relative_count;
#endif
} *dyn_relocs;
/* Set if the only reason we need a .plt entry is for a non-PIC to
PIC function call. */
unsigned int pic_call:1;
/* Set if this symbol is used by a plabel reloc. */
unsigned int plabel:1;
};
struct elf32_hppa_link_hash_table {
/* The main hash table. */
struct elf_link_hash_table elf;
/* The stub hash table. */
struct bfd_hash_table stub_hash_table;
/* Linker stub bfd. */
bfd *stub_bfd;
/* Linker call-backs. */
asection * (*add_stub_section) PARAMS ((const char *, asection *));
void (*layout_sections_again) PARAMS ((void));
/* Array to keep track of which stub sections have been created, and
information on stub grouping. */
struct map_stub {
/* This is the section to which stubs in the group will be
attached. */
asection *link_sec;
/* The stub section. */
asection *stub_sec;
} *stub_group;
/* Assorted information used by elf32_hppa_size_stubs. */
unsigned int bfd_count;
int top_index;
asection **input_list;
Elf_Internal_Sym **all_local_syms;
/* Short-cuts to get to dynamic linker sections. */
asection *sgot;
asection *srelgot;
asection *splt;
asection *srelplt;
asection *sdynbss;
asection *srelbss;
/* Used during a final link to store the base of the text and data
segments so that we can perform SEGREL relocations. */
bfd_vma text_segment_base;
bfd_vma data_segment_base;
/* Whether we support multiple sub-spaces for shared libs. */
unsigned int multi_subspace:1;
/* Flags set when various size branches are detected. Used to
select suitable defaults for the stub group size. */
unsigned int has_12bit_branch:1;
unsigned int has_17bit_branch:1;
unsigned int has_22bit_branch:1;
/* Set if we need a .plt stub to support lazy dynamic linking. */
unsigned int need_plt_stub:1;
/* Small local sym to section mapping cache. */
struct sym_sec_cache sym_sec;
};
/* Various hash macros and functions. */
#define hppa_link_hash_table(p) \
((struct elf32_hppa_link_hash_table *) ((p)->hash))
#define hppa_stub_hash_lookup(table, string, create, copy) \
((struct elf32_hppa_stub_hash_entry *) \
bfd_hash_lookup ((table), (string), (create), (copy)))
static struct bfd_hash_entry *stub_hash_newfunc
PARAMS ((struct bfd_hash_entry *, struct bfd_hash_table *, const char *));
static struct bfd_hash_entry *hppa_link_hash_newfunc
PARAMS ((struct bfd_hash_entry *, struct bfd_hash_table *, const char *));
static struct bfd_link_hash_table *elf32_hppa_link_hash_table_create
PARAMS ((bfd *));
static void elf32_hppa_link_hash_table_free
PARAMS ((struct bfd_link_hash_table *));
/* Stub handling functions. */
static char *hppa_stub_name
PARAMS ((const asection *, const asection *,
const struct elf32_hppa_link_hash_entry *,
const Elf_Internal_Rela *));
static struct elf32_hppa_stub_hash_entry *hppa_get_stub_entry
PARAMS ((const asection *, const asection *,
struct elf32_hppa_link_hash_entry *,
const Elf_Internal_Rela *,
struct elf32_hppa_link_hash_table *));
static struct elf32_hppa_stub_hash_entry *hppa_add_stub
PARAMS ((const char *, asection *, struct elf32_hppa_link_hash_table *));
static enum elf32_hppa_stub_type hppa_type_of_stub
PARAMS ((asection *, const Elf_Internal_Rela *,
struct elf32_hppa_link_hash_entry *, bfd_vma));
static bfd_boolean hppa_build_one_stub
PARAMS ((struct bfd_hash_entry *, PTR));
static bfd_boolean hppa_size_one_stub
PARAMS ((struct bfd_hash_entry *, PTR));
/* BFD and elf backend functions. */
static bfd_boolean elf32_hppa_object_p PARAMS ((bfd *));
static bfd_boolean elf32_hppa_add_symbol_hook
PARAMS ((bfd *, struct bfd_link_info *, const Elf_Internal_Sym *,
const char **, flagword *, asection **, bfd_vma *));
static bfd_boolean elf32_hppa_create_dynamic_sections
PARAMS ((bfd *, struct bfd_link_info *));
static void elf32_hppa_copy_indirect_symbol
PARAMS ((struct elf_backend_data *, struct elf_link_hash_entry *,
struct elf_link_hash_entry *));
static bfd_boolean elf32_hppa_check_relocs
PARAMS ((bfd *, struct bfd_link_info *,
asection *, const Elf_Internal_Rela *));
static asection *elf32_hppa_gc_mark_hook
PARAMS ((asection *, struct bfd_link_info *, Elf_Internal_Rela *,
struct elf_link_hash_entry *, Elf_Internal_Sym *));
static bfd_boolean elf32_hppa_gc_sweep_hook
PARAMS ((bfd *, struct bfd_link_info *,
asection *, const Elf_Internal_Rela *));
static void elf32_hppa_hide_symbol
PARAMS ((struct bfd_link_info *, struct elf_link_hash_entry *, bfd_boolean));
static bfd_boolean elf32_hppa_adjust_dynamic_symbol
PARAMS ((struct bfd_link_info *, struct elf_link_hash_entry *));
static bfd_boolean mark_PIC_calls
PARAMS ((struct elf_link_hash_entry *, PTR));
static bfd_boolean allocate_plt_static
PARAMS ((struct elf_link_hash_entry *, PTR));
static bfd_boolean allocate_dynrelocs
PARAMS ((struct elf_link_hash_entry *, PTR));
static bfd_boolean readonly_dynrelocs
PARAMS ((struct elf_link_hash_entry *, PTR));
static bfd_boolean clobber_millicode_symbols
PARAMS ((struct elf_link_hash_entry *, struct bfd_link_info *));
static bfd_boolean elf32_hppa_size_dynamic_sections
PARAMS ((bfd *, struct bfd_link_info *));
static void group_sections
PARAMS ((struct elf32_hppa_link_hash_table *, bfd_size_type, bfd_boolean));
static int get_local_syms
PARAMS ((bfd *, bfd *, struct bfd_link_info *));
static bfd_boolean elf32_hppa_final_link
PARAMS ((bfd *, struct bfd_link_info *));
static void hppa_record_segment_addr
PARAMS ((bfd *, asection *, PTR));
static bfd_reloc_status_type final_link_relocate
PARAMS ((asection *, bfd_byte *, const Elf_Internal_Rela *,
bfd_vma, struct elf32_hppa_link_hash_table *, asection *,
struct elf32_hppa_link_hash_entry *));
static bfd_boolean elf32_hppa_relocate_section
PARAMS ((bfd *, struct bfd_link_info *, bfd *, asection *,
bfd_byte *, Elf_Internal_Rela *, Elf_Internal_Sym *, asection **));
static bfd_boolean elf32_hppa_finish_dynamic_symbol
PARAMS ((bfd *, struct bfd_link_info *,
struct elf_link_hash_entry *, Elf_Internal_Sym *));
static enum elf_reloc_type_class elf32_hppa_reloc_type_class
PARAMS ((const Elf_Internal_Rela *));
static bfd_boolean elf32_hppa_finish_dynamic_sections
PARAMS ((bfd *, struct bfd_link_info *));
static void elf32_hppa_post_process_headers
PARAMS ((bfd *, struct bfd_link_info *));
static int elf32_hppa_elf_get_symbol_type
PARAMS ((Elf_Internal_Sym *, int));
/* Assorted hash table functions. */
/* Initialize an entry in the stub hash table. */
static struct bfd_hash_entry *
stub_hash_newfunc (entry, table, string)
struct bfd_hash_entry *entry;
struct bfd_hash_table *table;
const char *string;
{
/* Allocate the structure if it has not already been allocated by a
subclass. */
if (entry == NULL)
{
entry = bfd_hash_allocate (table,
sizeof (struct elf32_hppa_stub_hash_entry));
if (entry == NULL)
return entry;
}
/* Call the allocation method of the superclass. */
entry = bfd_hash_newfunc (entry, table, string);
if (entry != NULL)
{
struct elf32_hppa_stub_hash_entry *eh;
/* Initialize the local fields. */
eh = (struct elf32_hppa_stub_hash_entry *) entry;
eh->stub_sec = NULL;
eh->stub_offset = 0;
eh->target_value = 0;
eh->target_section = NULL;
eh->stub_type = hppa_stub_long_branch;
eh->h = NULL;
eh->id_sec = NULL;
}
return entry;
}
/* Initialize an entry in the link hash table. */
static struct bfd_hash_entry *
hppa_link_hash_newfunc (entry, table, string)
struct bfd_hash_entry *entry;
struct bfd_hash_table *table;
const char *string;
{
/* Allocate the structure if it has not already been allocated by a
subclass. */
if (entry == NULL)
{
entry = bfd_hash_allocate (table,
sizeof (struct elf32_hppa_link_hash_entry));
if (entry == NULL)
return entry;
}
/* Call the allocation method of the superclass. */
entry = _bfd_elf_link_hash_newfunc (entry, table, string);
if (entry != NULL)
{
struct elf32_hppa_link_hash_entry *eh;
/* Initialize the local fields. */
eh = (struct elf32_hppa_link_hash_entry *) entry;
eh->stub_cache = NULL;
eh->dyn_relocs = NULL;
eh->pic_call = 0;
eh->plabel = 0;
}
return entry;
}
/* Create the derived linker hash table. The PA ELF port uses the derived
hash table to keep information specific to the PA ELF linker (without
using static variables). */
static struct bfd_link_hash_table *
elf32_hppa_link_hash_table_create (abfd)
bfd *abfd;
{
struct elf32_hppa_link_hash_table *ret;
bfd_size_type amt = sizeof (*ret);
ret = (struct elf32_hppa_link_hash_table *) bfd_malloc (amt);
if (ret == NULL)
return NULL;
if (!_bfd_elf_link_hash_table_init (&ret->elf, abfd, hppa_link_hash_newfunc))
{
free (ret);
return NULL;
}
/* Init the stub hash table too. */
if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc))
return NULL;
ret->stub_bfd = NULL;
ret->add_stub_section = NULL;
ret->layout_sections_again = NULL;
ret->stub_group = NULL;
ret->sgot = NULL;
ret->srelgot = NULL;
ret->splt = NULL;
ret->srelplt = NULL;
ret->sdynbss = NULL;
ret->srelbss = NULL;
ret->text_segment_base = (bfd_vma) -1;
ret->data_segment_base = (bfd_vma) -1;
ret->multi_subspace = 0;
ret->has_12bit_branch = 0;
ret->has_17bit_branch = 0;
ret->has_22bit_branch = 0;
ret->need_plt_stub = 0;
ret->sym_sec.abfd = NULL;
return &ret->elf.root;
}
/* Free the derived linker hash table. */
static void
elf32_hppa_link_hash_table_free (hash)
struct bfd_link_hash_table *hash;
{
struct elf32_hppa_link_hash_table *ret
= (struct elf32_hppa_link_hash_table *) hash;
bfd_hash_table_free (&ret->stub_hash_table);
_bfd_generic_link_hash_table_free (hash);
}
/* Build a name for an entry in the stub hash table. */
static char *
hppa_stub_name (input_section, sym_sec, hash, rel)
const asection *input_section;
const asection *sym_sec;
const struct elf32_hppa_link_hash_entry *hash;
const Elf_Internal_Rela *rel;
{
char *stub_name;
bfd_size_type len;
if (hash)
{
len = 8 + 1 + strlen (hash->elf.root.root.string) + 1 + 8 + 1;
stub_name = bfd_malloc (len);
if (stub_name != NULL)
{
sprintf (stub_name, "%08x_%s+%x",
input_section->id & 0xffffffff,
hash->elf.root.root.string,
(int) rel->r_addend & 0xffffffff);
}
}
else
{
len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1;
stub_name = bfd_malloc (len);
if (stub_name != NULL)
{
sprintf (stub_name, "%08x_%x:%x+%x",
input_section->id & 0xffffffff,
sym_sec->id & 0xffffffff,
(int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
(int) rel->r_addend & 0xffffffff);
}
}
return stub_name;
}
/* Look up an entry in the stub hash. Stub entries are cached because
creating the stub name takes a bit of time. */
static struct elf32_hppa_stub_hash_entry *
hppa_get_stub_entry (input_section, sym_sec, hash, rel, htab)
const asection *input_section;
const asection *sym_sec;
struct elf32_hppa_link_hash_entry *hash;
const Elf_Internal_Rela *rel;
struct elf32_hppa_link_hash_table *htab;
{
struct elf32_hppa_stub_hash_entry *stub_entry;
const asection *id_sec;
/* If this input section is part of a group of sections sharing one
stub section, then use the id of the first section in the group.
Stub names need to include a section id, as there may well be
more than one stub used to reach say, printf, and we need to
distinguish between them. */
id_sec = htab->stub_group[input_section->id].link_sec;
if (hash != NULL && hash->stub_cache != NULL
&& hash->stub_cache->h == hash
&& hash->stub_cache->id_sec == id_sec)
{
stub_entry = hash->stub_cache;
}
else
{
char *stub_name;
stub_name = hppa_stub_name (id_sec, sym_sec, hash, rel);
if (stub_name == NULL)
return NULL;
stub_entry = hppa_stub_hash_lookup (&htab->stub_hash_table,
stub_name, FALSE, FALSE);
if (hash != NULL)
hash->stub_cache = stub_entry;
free (stub_name);
}
return stub_entry;
}
/* Add a new stub entry to the stub hash. Not all fields of the new
stub entry are initialised. */
static struct elf32_hppa_stub_hash_entry *
hppa_add_stub (stub_name, section, htab)
const char *stub_name;
asection *section;
struct elf32_hppa_link_hash_table *htab;
{
asection *link_sec;
asection *stub_sec;
struct elf32_hppa_stub_hash_entry *stub_entry;
link_sec = htab->stub_group[section->id].link_sec;
stub_sec = htab->stub_group[section->id].stub_sec;
if (stub_sec == NULL)
{
stub_sec = htab->stub_group[link_sec->id].stub_sec;
if (stub_sec == NULL)
{
size_t namelen;
bfd_size_type len;
char *s_name;
namelen = strlen (link_sec->name);
len = namelen + sizeof (STUB_SUFFIX);
s_name = bfd_alloc (htab->stub_bfd, len);
if (s_name == NULL)
return NULL;
memcpy (s_name, link_sec->name, namelen);
memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
stub_sec = (*htab->add_stub_section) (s_name, link_sec);
if (stub_sec == NULL)
return NULL;
htab->stub_group[link_sec->id].stub_sec = stub_sec;
}
htab->stub_group[section->id].stub_sec = stub_sec;
}
/* Enter this entry into the linker stub hash table. */
stub_entry = hppa_stub_hash_lookup (&htab->stub_hash_table, stub_name,
TRUE, FALSE);
if (stub_entry == NULL)
{
(*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
bfd_archive_filename (section->owner),
stub_name);
return NULL;
}
stub_entry->stub_sec = stub_sec;
stub_entry->stub_offset = 0;
stub_entry->id_sec = link_sec;
return stub_entry;
}
/* Determine the type of stub needed, if any, for a call. */
static enum elf32_hppa_stub_type
hppa_type_of_stub (input_sec, rel, hash, destination)
asection *input_sec;
const Elf_Internal_Rela *rel;
struct elf32_hppa_link_hash_entry *hash;
bfd_vma destination;
{
bfd_vma location;
bfd_vma branch_offset;
bfd_vma max_branch_offset;
unsigned int r_type;
if (hash != NULL
&& hash->elf.plt.offset != (bfd_vma) -1
&& (hash->elf.dynindx != -1 || hash->pic_call)
&& !hash->plabel)
{
/* We need an import stub. Decide between hppa_stub_import
and hppa_stub_import_shared later. */
return hppa_stub_import;
}
/* Determine where the call point is. */
location = (input_sec->output_offset
+ input_sec->output_section->vma
+ rel->r_offset);
branch_offset = destination - location - 8;
r_type = ELF32_R_TYPE (rel->r_info);
/* Determine if a long branch stub is needed. parisc branch offsets
are relative to the second instruction past the branch, ie. +8
bytes on from the branch instruction location. The offset is
signed and counts in units of 4 bytes. */
if (r_type == (unsigned int) R_PARISC_PCREL17F)
{
max_branch_offset = (1 << (17-1)) << 2;
}
else if (r_type == (unsigned int) R_PARISC_PCREL12F)
{
max_branch_offset = (1 << (12-1)) << 2;
}
else /* R_PARISC_PCREL22F. */
{
max_branch_offset = (1 << (22-1)) << 2;
}
if (branch_offset + max_branch_offset >= 2*max_branch_offset)
return hppa_stub_long_branch;
return hppa_stub_none;
}
/* Build one linker stub as defined by the stub hash table entry GEN_ENTRY.
IN_ARG contains the link info pointer. */
#define LDIL_R1 0x20200000 /* ldil LR'XXX,%r1 */
#define BE_SR4_R1 0xe0202002 /* be,n RR'XXX(%sr4,%r1) */
#define BL_R1 0xe8200000 /* b,l .+8,%r1 */
#define ADDIL_R1 0x28200000 /* addil LR'XXX,%r1,%r1 */
#define DEPI_R1 0xd4201c1e /* depi 0,31,2,%r1 */
#define ADDIL_DP 0x2b600000 /* addil LR'XXX,%dp,%r1 */
#define LDW_R1_R21 0x48350000 /* ldw RR'XXX(%sr0,%r1),%r21 */
#define BV_R0_R21 0xeaa0c000 /* bv %r0(%r21) */
#define LDW_R1_R19 0x48330000 /* ldw RR'XXX(%sr0,%r1),%r19 */
#define ADDIL_R19 0x2a600000 /* addil LR'XXX,%r19,%r1 */
#define LDW_R1_DP 0x483b0000 /* ldw RR'XXX(%sr0,%r1),%dp */
#define LDSID_R21_R1 0x02a010a1 /* ldsid (%sr0,%r21),%r1 */
#define MTSP_R1 0x00011820 /* mtsp %r1,%sr0 */
#define BE_SR0_R21 0xe2a00000 /* be 0(%sr0,%r21) */
#define STW_RP 0x6bc23fd1 /* stw %rp,-24(%sr0,%sp) */
#define BL22_RP 0xe800a002 /* b,l,n XXX,%rp */
#define BL_RP 0xe8400002 /* b,l,n XXX,%rp */
#define NOP 0x08000240 /* nop */
#define LDW_RP 0x4bc23fd1 /* ldw -24(%sr0,%sp),%rp */
#define LDSID_RP_R1 0x004010a1 /* ldsid (%sr0,%rp),%r1 */
#define BE_SR0_RP 0xe0400002 /* be,n 0(%sr0,%rp) */
#ifndef R19_STUBS
#define R19_STUBS 1
#endif
#if R19_STUBS
#define LDW_R1_DLT LDW_R1_R19
#else
#define LDW_R1_DLT LDW_R1_DP
#endif
static bfd_boolean
hppa_build_one_stub (gen_entry, in_arg)
struct bfd_hash_entry *gen_entry;
PTR in_arg;
{
struct elf32_hppa_stub_hash_entry *stub_entry;
struct bfd_link_info *info;
struct elf32_hppa_link_hash_table *htab;
asection *stub_sec;
bfd *stub_bfd;
bfd_byte *loc;
bfd_vma sym_value;
bfd_vma insn;
bfd_vma off;
int val;
int size;
/* Massage our args to the form they really have. */
stub_entry = (struct elf32_hppa_stub_hash_entry *) gen_entry;
info = (struct bfd_link_info *) in_arg;
htab = hppa_link_hash_table (info);
stub_sec = stub_entry->stub_sec;
/* Make a note of the offset within the stubs for this entry. */
stub_entry->stub_offset = stub_sec->_raw_size;
loc = stub_sec->contents + stub_entry->stub_offset;
stub_bfd = stub_sec->owner;
switch (stub_entry->stub_type)
{
case hppa_stub_long_branch:
/* Create the long branch. A long branch is formed with "ldil"
loading the upper bits of the target address into a register,
then branching with "be" which adds in the lower bits.
The "be" has its delay slot nullified. */
sym_value = (stub_entry->target_value
+ stub_entry->target_section->output_offset
+ stub_entry->target_section->output_section->vma);
val = hppa_field_adjust (sym_value, (bfd_signed_vma) 0, e_lrsel);
insn = hppa_rebuild_insn ((int) LDIL_R1, val, 21);
bfd_put_32 (stub_bfd, insn, loc);
val = hppa_field_adjust (sym_value, (bfd_signed_vma) 0, e_rrsel) >> 2;
insn = hppa_rebuild_insn ((int) BE_SR4_R1, val, 17);
bfd_put_32 (stub_bfd, insn, loc + 4);
size = 8;
break;
case hppa_stub_long_branch_shared:
/* Branches are relative. This is where we are going to. */
sym_value = (stub_entry->target_value
+ stub_entry->target_section->output_offset
+ stub_entry->target_section->output_section->vma);
/* And this is where we are coming from, more or less. */
sym_value -= (stub_entry->stub_offset
+ stub_sec->output_offset
+ stub_sec->output_section->vma);
bfd_put_32 (stub_bfd, (bfd_vma) BL_R1, loc);
val = hppa_field_adjust (sym_value, (bfd_signed_vma) -8, e_lrsel);
insn = hppa_rebuild_insn ((int) ADDIL_R1, val, 21);
bfd_put_32 (stub_bfd, insn, loc + 4);
val = hppa_field_adjust (sym_value, (bfd_signed_vma) -8, e_rrsel) >> 2;
insn = hppa_rebuild_insn ((int) BE_SR4_R1, val, 17);
bfd_put_32 (stub_bfd, insn, loc + 8);
size = 12;
break;
case hppa_stub_import:
case hppa_stub_import_shared:
off = stub_entry->h->elf.plt.offset;
if (off >= (bfd_vma) -2)
abort ();
off &= ~ (bfd_vma) 1;
sym_value = (off
+ htab->splt->output_offset
+ htab->splt->output_section->vma
- elf_gp (htab->splt->output_section->owner));
insn = ADDIL_DP;
#if R19_STUBS
if (stub_entry->stub_type == hppa_stub_import_shared)
insn = ADDIL_R19;
#endif
val = hppa_field_adjust (sym_value, (bfd_signed_vma) 0, e_lrsel),
insn = hppa_rebuild_insn ((int) insn, val, 21);
bfd_put_32 (stub_bfd, insn, loc);
/* It is critical to use lrsel/rrsel here because we are using
two different offsets (+0 and +4) from sym_value. If we use
lsel/rsel then with unfortunate sym_values we will round
sym_value+4 up to the next 2k block leading to a mis-match
between the lsel and rsel value. */
val = hppa_field_adjust (sym_value, (bfd_signed_vma) 0, e_rrsel);
insn = hppa_rebuild_insn ((int) LDW_R1_R21, val, 14);
bfd_put_32 (stub_bfd, insn, loc + 4);
if (htab->multi_subspace)
{
val = hppa_field_adjust (sym_value, (bfd_signed_vma) 4, e_rrsel);
insn = hppa_rebuild_insn ((int) LDW_R1_DLT, val, 14);
bfd_put_32 (stub_bfd, insn, loc + 8);
bfd_put_32 (stub_bfd, (bfd_vma) LDSID_R21_R1, loc + 12);
bfd_put_32 (stub_bfd, (bfd_vma) MTSP_R1, loc + 16);
bfd_put_32 (stub_bfd, (bfd_vma) BE_SR0_R21, loc + 20);
bfd_put_32 (stub_bfd, (bfd_vma) STW_RP, loc + 24);
size = 28;
}
else
{
bfd_put_32 (stub_bfd, (bfd_vma) BV_R0_R21, loc + 8);
val = hppa_field_adjust (sym_value, (bfd_signed_vma) 4, e_rrsel);
insn = hppa_rebuild_insn ((int) LDW_R1_DLT, val, 14);
bfd_put_32 (stub_bfd, insn, loc + 12);
size = 16;
}
if (!info->shared
&& stub_entry->h != NULL
&& stub_entry->h->pic_call)
{
/* Build the .plt entry needed to call a PIC function from
statically linked code. We don't need any relocs. */
bfd *dynobj;
struct elf32_hppa_link_hash_entry *eh;
bfd_vma value;
dynobj = htab->elf.dynobj;
eh = (struct elf32_hppa_link_hash_entry *) stub_entry->h;
if (eh->elf.root.type != bfd_link_hash_defined
&& eh->elf.root.type != bfd_link_hash_defweak)
abort ();
value = (eh->elf.root.u.def.value
+ eh->elf.root.u.def.section->output_offset
+ eh->elf.root.u.def.section->output_section->vma);
/* Fill in the entry in the procedure linkage table.
The format of a plt entry is
<funcaddr>
<__gp>. */
bfd_put_32 (htab->splt->owner, value,
htab->splt->contents + off);
value = elf_gp (htab->splt->output_section->owner);
bfd_put_32 (htab->splt->owner, value,
htab->splt->contents + off + 4);
}
break;
case hppa_stub_export:
/* Branches are relative. This is where we are going to. */
sym_value = (stub_entry->target_value
+ stub_entry->target_section->output_offset
+ stub_entry->target_section->output_section->vma);
/* And this is where we are coming from. */
sym_value -= (stub_entry->stub_offset
+ stub_sec->output_offset
+ stub_sec->output_section->vma);
if (sym_value - 8 + (1 << (17 + 1)) >= (1 << (17 + 2))
&& (!htab->has_22bit_branch
|| sym_value - 8 + (1 << (22 + 1)) >= (1 << (22 + 2))))
{
(*_bfd_error_handler)
(_("%s(%s+0x%lx): cannot reach %s, recompile with -ffunction-sections"),
bfd_archive_filename (stub_entry->target_section->owner),
stub_sec->name,
(long) stub_entry->stub_offset,
stub_entry->root.string);
bfd_set_error (bfd_error_bad_value);
return FALSE;
}
val = hppa_field_adjust (sym_value, (bfd_signed_vma) -8, e_fsel) >> 2;
if (!htab->has_22bit_branch)
insn = hppa_rebuild_insn ((int) BL_RP, val, 17);
else
insn = hppa_rebuild_insn ((int) BL22_RP, val, 22);
bfd_put_32 (stub_bfd, insn, loc);
bfd_put_32 (stub_bfd, (bfd_vma) NOP, loc + 4);
bfd_put_32 (stub_bfd, (bfd_vma) LDW_RP, loc + 8);
bfd_put_32 (stub_bfd, (bfd_vma) LDSID_RP_R1, loc + 12);
bfd_put_32 (stub_bfd, (bfd_vma) MTSP_R1, loc + 16);
bfd_put_32 (stub_bfd, (bfd_vma) BE_SR0_RP, loc + 20);
/* Point the function symbol at the stub. */
stub_entry->h->elf.root.u.def.section = stub_sec;
stub_entry->h->elf.root.u.def.value = stub_sec->_raw_size;
size = 24;
break;
default:
BFD_FAIL ();
return FALSE;
}
stub_sec->_raw_size += size;
return TRUE;
}
#undef LDIL_R1
#undef BE_SR4_R1
#undef BL_R1
#undef ADDIL_R1
#undef DEPI_R1
#undef ADDIL_DP
#undef LDW_R1_R21
#undef LDW_R1_DLT
#undef LDW_R1_R19
#undef ADDIL_R19
#undef LDW_R1_DP
#undef LDSID_R21_R1
#undef MTSP_R1
#undef BE_SR0_R21
#undef STW_RP
#undef BV_R0_R21
#undef BL_RP
#undef NOP
#undef LDW_RP
#undef LDSID_RP_R1
#undef BE_SR0_RP
/* As above, but don't actually build the stub. Just bump offset so
we know stub section sizes. */
static bfd_boolean
hppa_size_one_stub (gen_entry, in_arg)
struct bfd_hash_entry *gen_entry;
PTR in_arg;
{
struct elf32_hppa_stub_hash_entry *stub_entry;
struct elf32_hppa_link_hash_table *htab;
int size;
/* Massage our args to the form they really have. */
stub_entry = (struct elf32_hppa_stub_hash_entry *) gen_entry;
htab = (struct elf32_hppa_link_hash_table *) in_arg;
if (stub_entry->stub_type == hppa_stub_long_branch)
size = 8;
else if (stub_entry->stub_type == hppa_stub_long_branch_shared)
size = 12;
else if (stub_entry->stub_type == hppa_stub_export)
size = 24;
else /* hppa_stub_import or hppa_stub_import_shared. */
{
if (htab->multi_subspace)
size = 28;
else
size = 16;
}
stub_entry->stub_sec->_raw_size += size;
return TRUE;
}
/* Return nonzero if ABFD represents an HPPA ELF32 file.
Additionally we set the default architecture and machine. */
static bfd_boolean
elf32_hppa_object_p (abfd)
bfd *abfd;
{
Elf_Internal_Ehdr * i_ehdrp;
unsigned int flags;
i_ehdrp = elf_elfheader (abfd);
if (strcmp (bfd_get_target (abfd), "elf32-hppa-linux") == 0)
{
if (i_ehdrp->e_ident[EI_OSABI] != ELFOSABI_LINUX)
return FALSE;
}
else
{
if (i_ehdrp->e_ident[EI_OSABI] != ELFOSABI_HPUX)
return FALSE;
}
flags = i_ehdrp->e_flags;
switch (flags & (EF_PARISC_ARCH | EF_PARISC_WIDE))
{
case EFA_PARISC_1_0:
return bfd_default_set_arch_mach (abfd, bfd_arch_hppa, 10);
case EFA_PARISC_1_1:
return bfd_default_set_arch_mach (abfd, bfd_arch_hppa, 11);
case EFA_PARISC_2_0:
return bfd_default_set_arch_mach (abfd, bfd_arch_hppa, 20);
case EFA_PARISC_2_0 | EF_PARISC_WIDE:
return bfd_default_set_arch_mach (abfd, bfd_arch_hppa, 25);
}
return TRUE;
}
/* Undo the generic ELF code's subtraction of section->vma from the
value of each external symbol. */
static bfd_boolean
elf32_hppa_add_symbol_hook (abfd, info, sym, namep, flagsp, secp, valp)
bfd *abfd ATTRIBUTE_UNUSED;
struct bfd_link_info *info ATTRIBUTE_UNUSED;
const Elf_Internal_Sym *sym ATTRIBUTE_UNUSED;
const char **namep ATTRIBUTE_UNUSED;
flagword *flagsp ATTRIBUTE_UNUSED;
asection **secp;
bfd_vma *valp;
{
*valp += (*secp)->vma;
return TRUE;
}
/* Create the .plt and .got sections, and set up our hash table
short-cuts to various dynamic sections. */
static bfd_boolean
elf32_hppa_create_dynamic_sections (abfd, info)
bfd *abfd;
struct bfd_link_info *info;
{
struct elf32_hppa_link_hash_table *htab;
/* Don't try to create the .plt and .got twice. */
htab = hppa_link_hash_table (info);
if (htab->splt != NULL)
return TRUE;
/* Call the generic code to do most of the work. */
if (! _bfd_elf_create_dynamic_sections (abfd, info))
return FALSE;
htab->splt = bfd_get_section_by_name (abfd, ".plt");
htab->srelplt = bfd_get_section_by_name (abfd, ".rela.plt");
htab->sgot = bfd_get_section_by_name (abfd, ".got");
htab->srelgot = bfd_make_section (abfd, ".rela.got");
if (htab->srelgot == NULL
|| ! bfd_set_section_flags (abfd, htab->srelgot,
(SEC_ALLOC
| SEC_LOAD
| SEC_HAS_CONTENTS
| SEC_IN_MEMORY
| SEC_LINKER_CREATED
| SEC_READONLY))
|| ! bfd_set_section_alignment (abfd, htab->srelgot, 2))
return FALSE;
htab->sdynbss = bfd_get_section_by_name (abfd, ".dynbss");
htab->srelbss = bfd_get_section_by_name (abfd, ".rela.bss");
return TRUE;
}
/* Copy the extra info we tack onto an elf_link_hash_entry. */
static void
elf32_hppa_copy_indirect_symbol (bed, dir, ind)
struct elf_backend_data *bed;
struct elf_link_hash_entry *dir, *ind;
{
struct elf32_hppa_link_hash_entry *edir, *eind;
edir = (struct elf32_hppa_link_hash_entry *) dir;
eind = (struct elf32_hppa_link_hash_entry *) ind;
if (eind->dyn_relocs != NULL)
{
if (edir->dyn_relocs != NULL)
{
struct elf32_hppa_dyn_reloc_entry **pp;
struct elf32_hppa_dyn_reloc_entry *p;
if (ind->root.type == bfd_link_hash_indirect)
abort ();
/* Add reloc counts against the weak sym to the strong sym
list. Merge any entries against the same section. */
for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
{
struct elf32_hppa_dyn_reloc_entry *q;
for (q = edir->dyn_relocs; q != NULL; q = q->next)
if (q->sec == p->sec)
{
#if RELATIVE_DYNRELOCS
q->relative_count += p->relative_count;
#endif
q->count += p->count;
*pp = p->next;
break;
}
if (q == NULL)
pp = &p->next;
}
*pp = edir->dyn_relocs;
}
edir->dyn_relocs = eind->dyn_relocs;
eind->dyn_relocs = NULL;
}
_bfd_elf_link_hash_copy_indirect (bed, dir, ind);
}
/* Look through the relocs for a section during the first phase, and
calculate needed space in the global offset table, procedure linkage
table, and dynamic reloc sections. At this point we haven't
necessarily read all the input files. */
static bfd_boolean
elf32_hppa_check_relocs (abfd, info, sec, relocs)
bfd *abfd;
struct bfd_link_info *info;
asection *sec;
const Elf_Internal_Rela *relocs;
{
Elf_Internal_Shdr *symtab_hdr;
struct elf_link_hash_entry **sym_hashes;
const Elf_Internal_Rela *rel;
const Elf_Internal_Rela *rel_end;
struct elf32_hppa_link_hash_table *htab;
asection *sreloc;
asection *stubreloc;
if (info->relocateable)
return TRUE;
htab = hppa_link_hash_table (info);
symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
sym_hashes = elf_sym_hashes (abfd);
sreloc = NULL;
stubreloc = NULL;
rel_end = relocs + sec->reloc_count;
for (rel = relocs; rel < rel_end; rel++)
{
enum {
NEED_GOT = 1,
NEED_PLT = 2,
NEED_DYNREL = 4,
PLT_PLABEL = 8
};
unsigned int r_symndx, r_type;
struct elf32_hppa_link_hash_entry *h;
int need_entry;
r_symndx = ELF32_R_SYM (rel->r_info);
if (r_symndx < symtab_hdr->sh_info)
h = NULL;
else
h = ((struct elf32_hppa_link_hash_entry *)
sym_hashes[r_symndx - symtab_hdr->sh_info]);
r_type = ELF32_R_TYPE (rel->r_info);
switch (r_type)
{
case R_PARISC_DLTIND14F:
case R_PARISC_DLTIND14R:
case R_PARISC_DLTIND21L:
/* This symbol requires a global offset table entry. */
need_entry = NEED_GOT;
/* Mark this section as containing PIC code. */
sec->flags |= SEC_HAS_GOT_REF;
break;
case R_PARISC_PLABEL14R: /* "Official" procedure labels. */
case R_PARISC_PLABEL21L:
case R_PARISC_PLABEL32:
/* If the addend is non-zero, we break badly. */
if (rel->r_addend != 0)
abort ();
/* If we are creating a shared library, then we need to
create a PLT entry for all PLABELs, because PLABELs with
local symbols may be passed via a pointer to another
object. Additionally, output a dynamic relocation
pointing to the PLT entry.
For executables, the original 32-bit ABI allowed two
different styles of PLABELs (function pointers): For
global functions, the PLABEL word points into the .plt
two bytes past a (function address, gp) pair, and for
local functions the PLABEL points directly at the
function. The magic +2 for the first type allows us to
differentiate between the two. As you can imagine, this
is a real pain when it comes to generating code to call
functions indirectly or to compare function pointers.
We avoid the mess by always pointing a PLABEL into the
.plt, even for local functions. */
need_entry = PLT_PLABEL | NEED_PLT | NEED_DYNREL;
break;
case R_PARISC_PCREL12F:
htab->has_12bit_branch = 1;
goto branch_common;
case R_PARISC_PCREL17C:
case R_PARISC_PCREL17F:
htab->has_17bit_branch = 1;
goto branch_common;
case R_PARISC_PCREL22F:
htab->has_22bit_branch = 1;
branch_common:
/* Function calls might need to go through the .plt, and
might require long branch stubs. */
if (h == NULL)
{
/* We know local syms won't need a .plt entry, and if
they need a long branch stub we can't guarantee that
we can reach the stub. So just flag an error later
if we're doing a shared link and find we need a long
branch stub. */
continue;
}
else
{
/* Global symbols will need a .plt entry if they remain
global, and in most cases won't need a long branch
stub. Unfortunately, we have to cater for the case
where a symbol is forced local by versioning, or due
to symbolic linking, and we lose the .plt entry. */
need_entry = NEED_PLT;
if (h->elf.type == STT_PARISC_MILLI)
need_entry = 0;
}
break;
case R_PARISC_SEGBASE: /* Used to set segment base. */
case R_PARISC_SEGREL32: /* Relative reloc, used for unwind. */
case R_PARISC_PCREL14F: /* PC relative load/store. */
case R_PARISC_PCREL14R:
case R_PARISC_PCREL17R: /* External branches. */
case R_PARISC_PCREL21L: /* As above, and for load/store too. */
/* We don't need to propagate the relocation if linking a
shared object since these are section relative. */
continue;
case R_PARISC_DPREL14F: /* Used for gp rel data load/store. */
case R_PARISC_DPREL14R:
case R_PARISC_DPREL21L:
if (info->shared)
{
(*_bfd_error_handler)
(_("%s: relocation %s can not be used when making a shared object; recompile with -fPIC"),
bfd_archive_filename (abfd),
elf_hppa_howto_table[r_type].name);
bfd_set_error (bfd_error_bad_value);
return FALSE;
}
/* Fall through. */
case R_PARISC_DIR17F: /* Used for external branches. */
case R_PARISC_DIR17R:
case R_PARISC_DIR14F: /* Used for load/store from absolute locn. */
case R_PARISC_DIR14R:
case R_PARISC_DIR21L: /* As above, and for ext branches too. */
#if 0
/* Help debug shared library creation. Any of the above
relocs can be used in shared libs, but they may cause
pages to become unshared. */
if (info->shared)
{
(*_bfd_error_handler)
(_("%s: relocation %s should not be used when making a shared object; recompile with -fPIC"),
bfd_archive_filename (abfd),
elf_hppa_howto_table[r_type].name);
}
/* Fall through. */
#endif
case R_PARISC_DIR32: /* .word relocs. */
/* We may want to output a dynamic relocation later. */
need_entry = NEED_DYNREL;
break;
/* This relocation describes the C++ object vtable hierarchy.
Reconstruct it for later use during GC. */
case R_PARISC_GNU_VTINHERIT:
if (!_bfd_elf32_gc_record_vtinherit (abfd, sec,
&h->elf, rel->r_offset))
return FALSE;
continue;
/* This relocation describes which C++ vtable entries are actually
used. Record for later use during GC. */
case R_PARISC_GNU_VTENTRY:
if (!_bfd_elf32_gc_record_vtentry (abfd, sec,
&h->elf, rel->r_addend))
return FALSE;
continue;
default:
continue;
}
/* Now carry out our orders. */
if (need_entry & NEED_GOT)
{
/* Allocate space for a GOT entry, as well as a dynamic
relocation for this entry. */
if (htab->sgot == NULL)
{
if (htab->elf.dynobj == NULL)
htab->elf.dynobj = abfd;
if (!elf32_hppa_create_dynamic_sections (htab->elf.dynobj, info))
return FALSE;
}
if (h != NULL)
{
h->elf.got.refcount += 1;
}
else
{
bfd_signed_vma *local_got_refcounts;
/* This is a global offset table entry for a local symbol. */
local_got_refcounts = elf_local_got_refcounts (abfd);
if (local_got_refcounts == NULL)
{
bfd_size_type size;
/* Allocate space for local got offsets and local
plt offsets. Done this way to save polluting
elf_obj_tdata with another target specific
pointer. */
size = symtab_hdr->sh_info;
size *= 2 * sizeof (bfd_signed_vma);
local_got_refcounts = ((bfd_signed_vma *)
bfd_zalloc (abfd, size));
if (local_got_refcounts == NULL)
return FALSE;
elf_local_got_refcounts (abfd) = local_got_refcounts;
}
local_got_refcounts[r_symndx] += 1;
}
}
if (need_entry & NEED_PLT)
{
/* If we are creating a shared library, and this is a reloc
against a weak symbol or a global symbol in a dynamic
object, then we will be creating an import stub and a
.plt entry for the symbol. Similarly, on a normal link
to symbols defined in a dynamic object we'll need the
import stub and a .plt entry. We don't know yet whether
the symbol is defined or not, so make an entry anyway and
clean up later in adjust_dynamic_symbol. */
if ((sec->flags & SEC_ALLOC) != 0)
{
if (h != NULL)
{
h->elf.elf_link_hash_flags |= ELF_LINK_HASH_NEEDS_PLT;
h->elf.plt.refcount += 1;
/* If this .plt entry is for a plabel, mark it so
that adjust_dynamic_symbol will keep the entry
even if it appears to be local. */
if (need_entry & PLT_PLABEL)
h->plabel = 1;
}
else if (need_entry & PLT_PLABEL)
{
bfd_signed_vma *local_got_refcounts;
bfd_signed_vma *local_plt_refcounts;
local_got_refcounts = elf_local_got_refcounts (abfd);
if (local_got_refcounts == NULL)
{
bfd_size_type size;
/* Allocate space for local got offsets and local
plt offsets. */
size = symtab_hdr->sh_info;
size *= 2 * sizeof (bfd_signed_vma);
local_got_refcounts = ((bfd_signed_vma *)
bfd_zalloc (abfd, size));
if (local_got_refcounts == NULL)
return FALSE;
elf_local_got_refcounts (abfd) = local_got_refcounts;
}
local_plt_refcounts = (local_got_refcounts
+ symtab_hdr->sh_info);
local_plt_refcounts[r_symndx] += 1;
}
}
}
if (need_entry & NEED_DYNREL)
{
/* Flag this symbol as having a non-got, non-plt reference
so that we generate copy relocs if it turns out to be
dynamic. */
if (h != NULL && !info->shared)
h->elf.elf_link_hash_flags |= ELF_LINK_NON_GOT_REF;
/* If we are creating a shared library then we need to copy
the reloc into the shared library. However, if we are
linking with -Bsymbolic, we need only copy absolute
relocs or relocs against symbols that are not defined in
an object we are including in the link. PC- or DP- or
DLT-relative relocs against any local sym or global sym
with DEF_REGULAR set, can be discarded. At this point we
have not seen all the input files, so it is possible that
DEF_REGULAR is not set now but will be set later (it is
never cleared). We account for that possibility below by
storing information in the dyn_relocs field of the
hash table entry.
A similar situation to the -Bsymbolic case occurs when
creating shared libraries and symbol visibility changes
render the symbol local.
As it turns out, all the relocs we will be creating here
are absolute, so we cannot remove them on -Bsymbolic
links or visibility changes anyway. A STUB_REL reloc
is absolute too, as in that case it is the reloc in the
stub we will be creating, rather than copying the PCREL
reloc in the branch.
If on the other hand, we are creating an executable, we
may need to keep relocations for symbols satisfied by a
dynamic library if we manage to avoid copy relocs for the
symbol. */
if ((info->shared
&& (sec->flags & SEC_ALLOC) != 0
&& (IS_ABSOLUTE_RELOC (r_type)
|| (h != NULL
&& (!info->symbolic
|| h->elf.root.type == bfd_link_hash_defweak
|| (h->elf.elf_link_hash_flags
& ELF_LINK_HASH_DEF_REGULAR) == 0))))
|| (!info->shared
&& (sec->flags & SEC_ALLOC) != 0
&& h != NULL
&& (h->elf.root.type == bfd_link_hash_defweak
|| (h->elf.elf_link_hash_flags
& ELF_LINK_HASH_DEF_REGULAR) == 0)))
{
struct elf32_hppa_dyn_reloc_entry *p;
struct elf32_hppa_dyn_reloc_entry **head;
/* Create a reloc section in dynobj and make room for
this reloc. */
if (sreloc == NULL)
{
char *name;
bfd *dynobj;
name = (bfd_elf_string_from_elf_section
(abfd,
elf_elfheader (abfd)->e_shstrndx,
elf_section_data (sec)->rel_hdr.sh_name));
if (name == NULL)
{
(*_bfd_error_handler)
(_("Could not find relocation section for %s"),
sec->name);
bfd_set_error (bfd_error_bad_value);
return FALSE;
}
if (htab->elf.dynobj == NULL)
htab->elf.dynobj = abfd;
dynobj = htab->elf.dynobj;
sreloc = bfd_get_section_by_name (dynobj, name);
if (sreloc == NULL)
{
flagword flags;
sreloc = bfd_make_section (dynobj, name);
flags = (SEC_HAS_CONTENTS | SEC_READONLY
| SEC_IN_MEMORY | SEC_LINKER_CREATED);
if ((sec->flags & SEC_ALLOC) != 0)
flags |= SEC_ALLOC | SEC_LOAD;
if (sreloc == NULL
|| !bfd_set_section_flags (dynobj, sreloc, flags)
|| !bfd_set_section_alignment (dynobj, sreloc, 2))
return FALSE;
}
elf_section_data (sec)->sreloc = sreloc;
}
/* If this is a global symbol, we count the number of
relocations we need for this symbol. */
if (h != NULL)
{
head = &h->dyn_relocs;
}
else
{
/* Track dynamic relocs needed for local syms too.
We really need local syms available to do this
easily. Oh well. */
asection *s;
s = bfd_section_from_r_symndx (abfd, &htab->sym_sec,
sec, r_symndx);
if (s == NULL)
return FALSE;
head = ((struct elf32_hppa_dyn_reloc_entry **)
&elf_section_data (s)->local_dynrel);
}
p = *head;
if (p == NULL || p->sec != sec)
{
p = ((struct elf32_hppa_dyn_reloc_entry *)
bfd_alloc (htab->elf.dynobj,
(bfd_size_type) sizeof *p));
if (p == NULL)
return FALSE;
p->next = *head;
*head = p;
p->sec = sec;
p->count = 0;
#if RELATIVE_DYNRELOCS
p->relative_count = 0;
#endif
}
p->count += 1;
#if RELATIVE_DYNRELOCS
if (!IS_ABSOLUTE_RELOC (rtype))
p->relative_count += 1;
#endif
}
}
}
return TRUE;
}
/* Return the section that should be marked against garbage collection
for a given relocation. */
static asection *
elf32_hppa_gc_mark_hook (sec, info, rel, h, sym)
asection *sec;
struct bfd_link_info *info ATTRIBUTE_UNUSED;
Elf_Internal_Rela *rel;
struct elf_link_hash_entry *h;
Elf_Internal_Sym *sym;
{
if (h != NULL)
{
switch ((unsigned int) ELF32_R_TYPE (rel->r_info))
{
case R_PARISC_GNU_VTINHERIT:
case R_PARISC_GNU_VTENTRY:
break;
default:
switch (h->root.type)
{
case bfd_link_hash_defined:
case bfd_link_hash_defweak:
return h->root.u.def.section;
case bfd_link_hash_common:
return h->root.u.c.p->section;
default:
break;
}
}
}
else
return bfd_section_from_elf_index (sec->owner, sym->st_shndx);
return NULL;
}
/* Update the got and plt entry reference counts for the section being
removed. */
static bfd_boolean
elf32_hppa_gc_sweep_hook (abfd, info, sec, relocs)
bfd *abfd;
struct bfd_link_info *info ATTRIBUTE_UNUSED;
asection *sec;
const Elf_Internal_Rela *relocs;
{
Elf_Internal_Shdr *symtab_hdr;
struct elf_link_hash_entry **sym_hashes;
bfd_signed_vma *local_got_refcounts;
bfd_signed_vma *local_plt_refcounts;
const Elf_Internal_Rela *rel, *relend;
elf_section_data (sec)->local_dynrel = NULL;
symtab_hdr = &elf_tdata (abfd)->symtab_hdr;
sym_hashes = elf_sym_hashes (abfd);
local_got_refcounts = elf_local_got_refcounts (abfd);
local_plt_refcounts = local_got_refcounts;
if (local_plt_refcounts != NULL)
local_plt_refcounts += symtab_hdr->sh_info;
relend = relocs + sec->reloc_count;
for (rel = relocs; rel < relend; rel++)
{
unsigned long r_symndx;
unsigned int r_type;
struct elf_link_hash_entry *h = NULL;
r_symndx = ELF32_R_SYM (rel->r_info);
if (r_symndx >= symtab_hdr->sh_info)
{
struct <