Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Narrow bounds on kernel malloc allocations. #2261

Open
wants to merge 9 commits into
base: dev
Choose a base branch
from
62 changes: 49 additions & 13 deletions sys/kern/kern_malloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ dtrace_malloc_probe_func_t __read_mostly dtrace_malloc_probe;
#define MALLOC_DEBUG 1
#endif

#if defined(KASAN) || defined(DEBUG_REDZONE)
#if defined(KASAN) || defined(DEBUG_REDZONE) || defined(__CHERI_PURE_CAPABILITY__)
#define DEBUG_REDZONE_ARG_DEF , unsigned long osize
#define DEBUG_REDZONE_ARG , osize
#else
Expand Down Expand Up @@ -680,7 +680,7 @@ void *
int indx;
caddr_t va;
uma_zone_t zone;
#if defined(DEBUG_REDZONE) || defined(KASAN)
#if defined(DEBUG_REDZONE) || defined(KASAN) || defined(__CHERI_PURE_CAPABILITY__)
unsigned long osize = size;
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I admit a temptation to slap a __diagused on at least the local variables and make them unconditional.

#endif

Expand All @@ -696,6 +696,14 @@ void *
return (malloc_large(size, mtp, DOMAINSET_RR(), flags
DEBUG_REDZONE_ARG));

/*
* XXX-AM: Imply M_ZERO to ensure that non-representable padding
* space is zero-initialized.
*/
if (size != CHERI_REPRESENTABLE_LENGTH(size)) {
flags |= M_ZERO;
}

if (size & KMEM_ZMASK)
size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
indx = kmemsize[size >> KMEM_ZSHIFT];
Expand All @@ -722,10 +730,13 @@ void *
kasan_mark((void *)va, osize, size, KASAN_MALLOC_REDZONE);
#endif
#ifdef __CHERI_PURE_CAPABILITY__
KASSERT(cheri_getlen(va) <= CHERI_REPRESENTABLE_LENGTH(size),
("Invalid bounds: expected %zx found %zx",
(size_t)CHERI_REPRESENTABLE_LENGTH(size),
(size_t)cheri_getlen(va)));
/* Intentionally inexect bounds allow for non-representable sizes */
va = cheri_setbounds(va, osize);
KASSERT(cheri_gettag(va),
("Invalid malloc: %#p requested size %zx", va, osize));
KASSERT(cheri_getlen(va) == CHERI_REPRESENTABLE_LENGTH(osize),
("Invalid malloc: %#p expected length %zx", va,
(size_t)CHERI_REPRESENTABLE_LENGTH(osize)));
#endif
return ((void *) va);
}
Expand Down Expand Up @@ -765,7 +776,7 @@ malloc_domainset(size_t size, struct malloc_type *mtp, struct domainset *ds,
caddr_t va;
int domain;
int indx;
#if defined(KASAN) || defined(DEBUG_REDZONE)
#if defined(KASAN) || defined(DEBUG_REDZONE) || defined(__CHERI_PURE_CAPABILITY__)
unsigned long osize = size;
#endif

Expand All @@ -781,6 +792,11 @@ malloc_domainset(size_t size, struct malloc_type *mtp, struct domainset *ds,
return (malloc_large(size, mtp, DOMAINSET_RR(), flags
DEBUG_REDZONE_ARG));

/* XXX-AM: see malloc() */
if (size != CHERI_REPRESENTABLE_LENGTH(size)) {
flags |= M_ZERO;
}

vm_domainset_iter_policy_init(&di, ds, &domain, &flags);
do {
va = malloc_domain(&size, &indx, mtp, domain, flags);
Expand All @@ -803,6 +819,15 @@ malloc_domainset(size_t size, struct malloc_type *mtp, struct domainset *ds,
kmsan_mark(va, size, KMSAN_STATE_UNINIT);
kmsan_orig(va, size, KMSAN_TYPE_MALLOC, KMSAN_RET_ADDR);
}
#endif
#ifdef __CHERI_PURE_CAPABILITY__
/* Intentionally inexect bounds allow for non-representable sizes */
va = cheri_setbounds(va, osize);
KASSERT(cheri_gettag(va),
("Invalid malloc: %#p requested size %zx", va, osize));
KASSERT(cheri_getlen(va) == CHERI_REPRESENTABLE_LENGTH(osize),
("Invalid malloc: %#p expected length %zx", va,
(size_t)CHERI_REPRESENTABLE_LENGTH(osize)));
#endif
return (va);
}
Expand All @@ -821,7 +846,7 @@ void *
malloc_domainset_exec(size_t size, struct malloc_type *mtp, struct domainset *ds,
int flags)
{
#if defined(DEBUG_REDZONE) || defined(KASAN)
#if defined(DEBUG_REDZONE) || defined(KASAN) || defined(__CHERI_PURE_CAPABILITY__)
unsigned long osize = size;
#endif
#ifdef MALLOC_DEBUG
Expand Down Expand Up @@ -996,11 +1021,10 @@ _free(void *addr, struct malloc_type *mtp, bool dozero)
case __predict_true(SLAB_COOKIE_SLAB_PTR):
size = zone->uz_size;
#ifdef __CHERI_PURE_CAPABILITY__
if (__predict_false(cheri_getlen(addr) !=
CHERI_REPRESENTABLE_LENGTH(size)))
panic("Invalid bounds: expected %zx found %zx",
(size_t)CHERI_REPRESENTABLE_LENGTH(size),
cheri_getlen(addr));
addr = uma_zgrow_bounds(zone, addr);
KASSERT(cheri_getlen(addr) == size,
("vtozoneslab disagrees witht uma_zgrow_bounds: %zx != %zx",
cheri_getlen(addr), size));
#endif
#if defined(INVARIANTS) && !defined(KASAN)
free_save_type(addr, mtp, size);
Expand Down Expand Up @@ -1074,6 +1098,10 @@ realloc(void *addr, size_t size, struct malloc_type *mtp, int flags)
#endif
unsigned long alloc;
void *newaddr;
#ifdef __CHERI_PURE_CAPABILITY__
uma_keg_t keg;
int i;
#endif

KASSERT(mtp->ks_version == M_VERSION,
("realloc: bad malloc type version"));
Expand Down Expand Up @@ -1112,7 +1140,15 @@ realloc(void *addr, size_t size, struct malloc_type *mtp, int flags)
/* Get the size of the original block */
switch (GET_SLAB_COOKIE(slab)) {
case __predict_true(SLAB_COOKIE_SLAB_PTR):
#ifdef __CHERI_PURE_CAPABILITY__
alloc = zone->uz_size;
keg = zone->uz_keg;
i = slab_item_index(slab, keg, addr);
addr = slab_item(slab, keg, i);
addr = cheri_setboundsexact(addr, alloc);
#else
alloc = zone->uz_size;
#endif
break;
case SLAB_COOKIE_MALLOC_LARGE:
alloc = malloc_large_size(slab);
Expand Down
21 changes: 4 additions & 17 deletions sys/vm/uma_int.h
Original file line number Diff line number Diff line change
Expand Up @@ -413,24 +413,11 @@ slab_tohashslab(uma_slab_t slab)
static inline void *
slab_data(uma_slab_t slab, uma_keg_t keg)
{
void *data;

if ((keg->uk_flags & UMA_ZFLAG_OFFPAGE) == 0) {
data = (void *)cheri_kern_setboundsexact(
(uintptr_t)slab - keg->uk_pgoff, keg->uk_pgoff);
#ifdef __CHERI_PURE_CAPABILITY__
KASSERT(cheri_gettag(data),
("Unrepresentable slab uk_pgoff %x", keg->uk_pgoff));
#endif
} else {
data = slab_tohashslab(slab)->uhs_data;
#ifdef __CHERI_PURE_CAPABILITY__
KASSERT(cheri_getlen(data) == keg->uk_ppera * PAGE_SIZE,
("Unexpected offpage slab capability: %#p, "
"expected %d pages", data, keg->uk_ppera));
#endif
}
return (data);
if ((keg->uk_flags & UMA_ZFLAG_OFFPAGE) == 0)
return ((void *)((uintptr_t)slab - keg->uk_pgoff));
else
return (slab_tohashslab(slab)->uhs_data);
}

static inline void *
Expand Down