| .. | .. |
|---|
| 17 | 17 | ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \ |
|---|
| 18 | 18 | (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1))) |
|---|
| 19 | 19 | |
|---|
| 20 | | -/* gap between mmap and stack */ |
|---|
| 21 | | -#define MIN_GAP (128*1024*1024UL) |
|---|
| 22 | | -#define MAX_GAP ((STACK_TOP)/6*5) |
|---|
| 23 | | -#define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) |
|---|
| 24 | | - |
|---|
| 25 | | -static int mmap_is_legacy(struct rlimit *rlim_stack) |
|---|
| 26 | | -{ |
|---|
| 27 | | - if (current->personality & ADDR_COMPAT_LAYOUT) |
|---|
| 28 | | - return 1; |
|---|
| 29 | | - |
|---|
| 30 | | - if (rlim_stack->rlim_cur == RLIM_INFINITY) |
|---|
| 31 | | - return 1; |
|---|
| 32 | | - |
|---|
| 33 | | - return sysctl_legacy_va_layout; |
|---|
| 34 | | -} |
|---|
| 35 | | - |
|---|
| 36 | | -static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack) |
|---|
| 37 | | -{ |
|---|
| 38 | | - unsigned long gap = rlim_stack->rlim_cur; |
|---|
| 39 | | - unsigned long pad = stack_guard_gap; |
|---|
| 40 | | - |
|---|
| 41 | | - /* Account for stack randomization if necessary */ |
|---|
| 42 | | - if (current->flags & PF_RANDOMIZE) |
|---|
| 43 | | - pad += (STACK_RND_MASK << PAGE_SHIFT); |
|---|
| 44 | | - |
|---|
| 45 | | - /* Values close to RLIM_INFINITY can overflow. */ |
|---|
| 46 | | - if (gap + pad > gap) |
|---|
| 47 | | - gap += pad; |
|---|
| 48 | | - |
|---|
| 49 | | - if (gap < MIN_GAP) |
|---|
| 50 | | - gap = MIN_GAP; |
|---|
| 51 | | - else if (gap > MAX_GAP) |
|---|
| 52 | | - gap = MAX_GAP; |
|---|
| 53 | | - |
|---|
| 54 | | - return PAGE_ALIGN(STACK_TOP - gap - rnd); |
|---|
| 55 | | -} |
|---|
| 56 | | - |
|---|
| 57 | 20 | /* |
|---|
| 58 | 21 | * We need to ensure that shared mappings are correctly aligned to |
|---|
| 59 | 22 | * avoid aliasing issues with VIPT caches. We need to ensure that |
|---|
| .. | .. |
|---|
| 179 | 142 | } |
|---|
| 180 | 143 | |
|---|
| 181 | 144 | return addr; |
|---|
| 182 | | -} |
|---|
| 183 | | - |
|---|
| 184 | | -unsigned long arch_mmap_rnd(void) |
|---|
| 185 | | -{ |
|---|
| 186 | | - unsigned long rnd; |
|---|
| 187 | | - |
|---|
| 188 | | - rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1); |
|---|
| 189 | | - |
|---|
| 190 | | - return rnd << PAGE_SHIFT; |
|---|
| 191 | | -} |
|---|
| 192 | | - |
|---|
| 193 | | -void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) |
|---|
| 194 | | -{ |
|---|
| 195 | | - unsigned long random_factor = 0UL; |
|---|
| 196 | | - |
|---|
| 197 | | - if (current->flags & PF_RANDOMIZE) |
|---|
| 198 | | - random_factor = arch_mmap_rnd(); |
|---|
| 199 | | - |
|---|
| 200 | | - if (mmap_is_legacy(rlim_stack)) { |
|---|
| 201 | | - mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; |
|---|
| 202 | | - mm->get_unmapped_area = arch_get_unmapped_area; |
|---|
| 203 | | - } else { |
|---|
| 204 | | - mm->mmap_base = mmap_base(random_factor, rlim_stack); |
|---|
| 205 | | - mm->get_unmapped_area = arch_get_unmapped_area_topdown; |
|---|
| 206 | | - } |
|---|
| 207 | 145 | } |
|---|
| 208 | 146 | |
|---|
| 209 | 147 | /* |
|---|