138 lines
5.7 KiB
Diff
138 lines
5.7 KiB
Diff
From ec9b25736eeff0027ad771fc35b6b97d34064da7 Mon Sep 17 00:00:00 2001
|
|
From: Lv Ying <lvying6@huawei.com>
|
|
Date: Mon, 3 Jun 2024 00:46:30 -0400
|
|
Subject: [PATCH 1/2] elf: the hugepage feature of dynamic library keep
|
|
compatible with llvm lld
|
|
|
|
GNU LD contains RELRO and data, bss in the same PT_LOAD(PT_GNU_RELRO(.data.rel.ro .bss.rel.ro) .data .bss),
|
|
so RELRO filesz == RELRO memsz to avoid _dl_protect_relro modify behind data section to R-ONLY permission.
|
|
LLVM LLD put RELRO in the sperate PT_LOAD(PT_GNU_RELRO(.data.rel.ro .bss.rel.ro)); PT_LOAD(.data. .bss), and
|
|
default RELRO filesz < RELRO memsz, and LLVM can not keep compatible with GNU LD: RELRO filesz == RELRO memsz
|
|
|
|
hugepage feature of dynamic library is consistent with the default glibc dynamic library loading process:
|
|
mmap file first(mmap ELF file, use PT_LOAD's offset), then anonymous map the part of memsz beyond filesz.
|
|
hugepage feature mmap RELRO file(GNU LD) in 4KB, then try to mmap data and bss part use hugepage.
|
|
So RELRO filesz < RELRO memsz which will cause wrong relro len caculation.
|
|
|
|
Signed-off-by: Lv Ying <lvying6@huawei.com>
|
|
---
|
|
elf/dl-load.c | 5 +++
|
|
elf/dl-map-segments-hugepage.h | 66 ++++++++++++++++++++++++++++++++--
|
|
2 files changed, 67 insertions(+), 3 deletions(-)
|
|
|
|
diff --git a/elf/dl-load.c b/elf/dl-load.c
|
|
index 0c883cb0..5c63e520 100644
|
|
--- a/elf/dl-load.c
|
|
+++ b/elf/dl-load.c
|
|
@@ -1137,6 +1137,7 @@ _dl_map_object_from_fd (const char *name, const char *origname, int fd,
|
|
#ifdef HUGEPAGE_SHARED_LIB
|
|
bool use_hugepage = false;
|
|
char hp_bitmap[l->l_phnum];
|
|
+ const ElfW(Phdr) *relro_ph = NULL;
|
|
#endif
|
|
|
|
/* The struct is initialized to zero so this is not necessary:
|
|
@@ -1259,6 +1260,9 @@ _dl_map_object_from_fd (const char *name, const char *origname, int fd,
|
|
case PT_GNU_RELRO:
|
|
l->l_relro_addr = ph->p_vaddr;
|
|
l->l_relro_size = ph->p_memsz;
|
|
+#ifdef HUGEPAGE_SHARED_LIB
|
|
+ relro_ph = ph;
|
|
+#endif
|
|
break;
|
|
}
|
|
|
|
@@ -1301,6 +1305,7 @@ _dl_map_object_from_fd (const char *name, const char *origname, int fd,
|
|
if ((GLRO(dl_hugepage_mask) & DL_HUGEPAGE_LIB_LARGE_IN_FLAG) ||
|
|
((GLRO(dl_hugepage_mask) & DL_HUGEPAGE_PROBE_FLAG) && use_hugepage))
|
|
{
|
|
+ _llvm_ld_relro_hp_fallback(phdr, relro_ph, l, hp_bitmap);
|
|
errstring = _dl_map_segments_largein (l, fd, header, type, loadcmds, nloadcmds,
|
|
maplength, hp_bitmap);
|
|
if (__glibc_unlikely (errstring != NULL))
|
|
diff --git a/elf/dl-map-segments-hugepage.h b/elf/dl-map-segments-hugepage.h
|
|
index 218e93a0..0e043731 100644
|
|
--- a/elf/dl-map-segments-hugepage.h
|
|
+++ b/elf/dl-map-segments-hugepage.h
|
|
@@ -88,14 +88,73 @@ unmap_reserved_area:
|
|
return MAP_FAILED;
|
|
}
|
|
|
|
+/*
|
|
+ * With traditionad -z nosperate-code, GNU ld defaults to a RX/R/RW program header layout,
|
|
+ * With -z sperate-code(defallt on Linux/x86 form binutils 2.31 onwards), GNU ld defaults to a R/RX/R/RW program header layout
|
|
+ *
|
|
+ * ld.lld defaults to R/RX/RW(RELRO)/RW(non-RELRO), with --rosegment, ld.lld uses RX/RW(RELRO)/RW(non-RELRO)
|
|
+ * LLVM LLD put RELRO in the sperate PT_LOAD(PT_GNU_RELRO(.data.rel.ro .bss.rel.ro)); PT_LOAD(.data. .bss)
|
|
+ * LLVM LLD default RELRO filesz < RELRO memsz, and LLVM can not keep compatible with GNU LD: RELRO fiesz == RELRO memsz
|
|
+ *
|
|
+ * hugepage feature of dynamic library is consistent with the default glibc dynamic library loading process: mmap file part first, then map anonymous part
|
|
+ * As GNU LD RELRO fiesz == RELRO memsz, treat RELRO as file part,
|
|
+ * but RELRO filesz < RELRO memsz which will cause wrong relro len caculation
|
|
+ * so just mmap LLVM LLD RELRO PT_LOAD as normal page to avoid GNU LD relro len caculation
|
|
+ */
|
|
+static __always_inline void
|
|
+_llvm_ld_relro_hp_fallback(const ElfW(Phdr) *phdr, const ElfW(Phdr) *relro_ph,
|
|
+ struct link_map *l, char *hp_bitmap)
|
|
+{
|
|
+ const ElfW(Phdr) *ph;
|
|
+ char *hp_loadcmd;
|
|
+ int i = 0;
|
|
+
|
|
+ // all the PT_LOADs do not contain RELRO, do nothing
|
|
+ if (relro_ph == NULL)
|
|
+ return;
|
|
+
|
|
+ for (ph = phdr, hp_loadcmd = hp_bitmap; ph < &phdr[l->l_phnum]; ++ph)
|
|
+ if (ph->p_type == PT_LOAD)
|
|
+ {
|
|
+ // relro_ph->p_vaddr + relro_ph->p_memsz > ph->p_vaddr + ph->p_memsz
|
|
+ // --> llvm.ld no padding bits, but will report error in _dl_protect_relro
|
|
+ if (ph->p_vaddr <= relro_ph->p_vaddr && relro_ph->p_vaddr < ph->p_vaddr + ph->p_memsz &&
|
|
+ relro_ph->p_filesz < relro_ph->p_memsz)
|
|
+ {
|
|
+ if (__glibc_unlikely (GLRO(dl_debug_mask) & DL_DEBUG_FILES))
|
|
+ _dl_debug_printf("PT_LOAD[%d] contains LLVM LD layout style RELRO, this segment use normal page\n", i);
|
|
+
|
|
+ *hp_loadcmd = 0;
|
|
+ break;
|
|
+ }
|
|
+ else
|
|
+ {
|
|
+ hp_loadcmd++;
|
|
+ i++;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * | other sections | RELRO | non-RELRO |
|
|
+ * If RELRO is not at the beginning of the RW segment, the beginning position
|
|
+ * will also be mapped using 4KB page, so count the begining other sections in relro len
|
|
+ * RELRO is default at the begining position of GNU LD and ld.lld
|
|
+ * relro_len is GLRO(dl_pagesize) aligned
|
|
+ */
|
|
static __always_inline size_t
|
|
_get_relro_len(struct link_map *l, const struct loadcmd *c)
|
|
{
|
|
size_t relro_len = 0;
|
|
- if (c->mapstart == ALIGN_DOWN (l->l_relro_addr, GLRO(dl_pagesize)))
|
|
+
|
|
+ if (l->l_relro_size)
|
|
{
|
|
- relro_len = ALIGN_DOWN(l->l_relro_addr + l->l_relro_size, GLRO(dl_pagesize)) -
|
|
- ALIGN_DOWN(l->l_relro_addr, GLRO(dl_pagesize));
|
|
+ ElfW(Addr) relro_start = ALIGN_DOWN(l->l_relro_addr, GLRO(dl_pagesize));
|
|
+ ElfW(Addr) relro_end = ALIGN_DOWN((l->l_relro_addr + l->l_relro_size), GLRO(dl_pagesize));
|
|
+ if (c->mapstart <= relro_start && relro_start < c->allocend)
|
|
+ relro_len = relro_end - c->mapstart;
|
|
}
|
|
return relro_len;
|
|
}
|
|
--
|
|
2.33.0
|
|
|
|
|