| .. | .. |
|---|
| 14 | 14 | #include <linux/smp.h> |
|---|
| 15 | 15 | #include <linux/cpu.h> |
|---|
| 16 | 16 | #include <linux/sched.h> |
|---|
| 17 | +#include <linux/sched/mm.h> |
|---|
| 17 | 18 | #include <linux/irq.h> |
|---|
| 18 | 19 | #include <asm/cpuinfo.h> |
|---|
| 19 | 20 | #include <asm/mmu_context.h> |
|---|
| .. | .. |
|---|
| 113 | 114 | * All kernel threads share the same mm context; grab a |
|---|
| 114 | 115 | * reference and switch to it. |
|---|
| 115 | 116 | */ |
|---|
| 116 | | - atomic_inc(&mm->mm_count); |
|---|
| 117 | + mmgrab(mm); |
|---|
| 117 | 118 | current->active_mm = mm; |
|---|
| 118 | 119 | cpumask_set_cpu(cpu, mm_cpumask(mm)); |
|---|
| 119 | 120 | |
|---|
| .. | .. |
|---|
| 133 | 134 | set_cpu_online(cpu, true); |
|---|
| 134 | 135 | |
|---|
| 135 | 136 | local_irq_enable(); |
|---|
| 136 | | - |
|---|
| 137 | | - preempt_disable(); |
|---|
| 138 | 137 | /* |
|---|
| 139 | 138 | * OK, it's off to the idle thread for us |
|---|
| 140 | 139 | */ |
|---|
| .. | .. |
|---|
| 218 | 217 | local_flush_tlb_all(); |
|---|
| 219 | 218 | } |
|---|
| 220 | 219 | |
|---|
| 220 | +static inline void ipi_flush_tlb_mm(void *info) |
|---|
| 221 | +{ |
|---|
| 222 | + struct mm_struct *mm = (struct mm_struct *)info; |
|---|
| 223 | + |
|---|
| 224 | + local_flush_tlb_mm(mm); |
|---|
| 225 | +} |
|---|
| 226 | + |
|---|
| 227 | +static void smp_flush_tlb_mm(struct cpumask *cmask, struct mm_struct *mm) |
|---|
| 228 | +{ |
|---|
| 229 | + unsigned int cpuid; |
|---|
| 230 | + |
|---|
| 231 | + if (cpumask_empty(cmask)) |
|---|
| 232 | + return; |
|---|
| 233 | + |
|---|
| 234 | + cpuid = get_cpu(); |
|---|
| 235 | + |
|---|
| 236 | + if (cpumask_any_but(cmask, cpuid) >= nr_cpu_ids) { |
|---|
| 237 | + /* local cpu is the only cpu present in cpumask */ |
|---|
| 238 | + local_flush_tlb_mm(mm); |
|---|
| 239 | + } else { |
|---|
| 240 | + on_each_cpu_mask(cmask, ipi_flush_tlb_mm, mm, 1); |
|---|
| 241 | + } |
|---|
| 242 | + put_cpu(); |
|---|
| 243 | +} |
|---|
| 244 | + |
|---|
| 245 | +struct flush_tlb_data { |
|---|
| 246 | + unsigned long addr1; |
|---|
| 247 | + unsigned long addr2; |
|---|
| 248 | +}; |
|---|
| 249 | + |
|---|
| 250 | +static inline void ipi_flush_tlb_page(void *info) |
|---|
| 251 | +{ |
|---|
| 252 | + struct flush_tlb_data *fd = (struct flush_tlb_data *)info; |
|---|
| 253 | + |
|---|
| 254 | + local_flush_tlb_page(NULL, fd->addr1); |
|---|
| 255 | +} |
|---|
| 256 | + |
|---|
| 257 | +static inline void ipi_flush_tlb_range(void *info) |
|---|
| 258 | +{ |
|---|
| 259 | + struct flush_tlb_data *fd = (struct flush_tlb_data *)info; |
|---|
| 260 | + |
|---|
| 261 | + local_flush_tlb_range(NULL, fd->addr1, fd->addr2); |
|---|
| 262 | +} |
|---|
| 263 | + |
|---|
| 264 | +static void smp_flush_tlb_range(const struct cpumask *cmask, unsigned long start, |
|---|
| 265 | + unsigned long end) |
|---|
| 266 | +{ |
|---|
| 267 | + unsigned int cpuid; |
|---|
| 268 | + |
|---|
| 269 | + if (cpumask_empty(cmask)) |
|---|
| 270 | + return; |
|---|
| 271 | + |
|---|
| 272 | + cpuid = get_cpu(); |
|---|
| 273 | + |
|---|
| 274 | + if (cpumask_any_but(cmask, cpuid) >= nr_cpu_ids) { |
|---|
| 275 | + /* local cpu is the only cpu present in cpumask */ |
|---|
| 276 | + if ((end - start) <= PAGE_SIZE) |
|---|
| 277 | + local_flush_tlb_page(NULL, start); |
|---|
| 278 | + else |
|---|
| 279 | + local_flush_tlb_range(NULL, start, end); |
|---|
| 280 | + } else { |
|---|
| 281 | + struct flush_tlb_data fd; |
|---|
| 282 | + |
|---|
| 283 | + fd.addr1 = start; |
|---|
| 284 | + fd.addr2 = end; |
|---|
| 285 | + |
|---|
| 286 | + if ((end - start) <= PAGE_SIZE) |
|---|
| 287 | + on_each_cpu_mask(cmask, ipi_flush_tlb_page, &fd, 1); |
|---|
| 288 | + else |
|---|
| 289 | + on_each_cpu_mask(cmask, ipi_flush_tlb_range, &fd, 1); |
|---|
| 290 | + } |
|---|
| 291 | + put_cpu(); |
|---|
| 292 | +} |
|---|
| 293 | + |
|---|
| 221 | 294 | void flush_tlb_all(void) |
|---|
| 222 | 295 | { |
|---|
| 223 | 296 | on_each_cpu(ipi_flush_tlb_all, NULL, 1); |
|---|
| 224 | 297 | } |
|---|
| 225 | 298 | |
|---|
| 226 | | -/* |
|---|
| 227 | | - * FIXME: implement proper functionality instead of flush_tlb_all. |
|---|
| 228 | | - * *But*, as things currently stands, the local_tlb_flush_* functions will |
|---|
| 229 | | - * all boil down to local_tlb_flush_all anyway. |
|---|
| 230 | | - */ |
|---|
| 231 | 299 | void flush_tlb_mm(struct mm_struct *mm) |
|---|
| 232 | 300 | { |
|---|
| 233 | | - on_each_cpu(ipi_flush_tlb_all, NULL, 1); |
|---|
| 301 | + smp_flush_tlb_mm(mm_cpumask(mm), mm); |
|---|
| 234 | 302 | } |
|---|
| 235 | 303 | |
|---|
| 236 | 304 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) |
|---|
| 237 | 305 | { |
|---|
| 238 | | - on_each_cpu(ipi_flush_tlb_all, NULL, 1); |
|---|
| 306 | + smp_flush_tlb_range(mm_cpumask(vma->vm_mm), uaddr, uaddr + PAGE_SIZE); |
|---|
| 239 | 307 | } |
|---|
| 240 | 308 | |
|---|
| 241 | 309 | void flush_tlb_range(struct vm_area_struct *vma, |
|---|
| 242 | 310 | unsigned long start, unsigned long end) |
|---|
| 243 | 311 | { |
|---|
| 244 | | - on_each_cpu(ipi_flush_tlb_all, NULL, 1); |
|---|
| 312 | + const struct cpumask *cmask = vma ? mm_cpumask(vma->vm_mm) |
|---|
| 313 | + : cpu_online_mask; |
|---|
| 314 | + smp_flush_tlb_range(cmask, start, end); |
|---|
| 245 | 315 | } |
|---|
| 246 | 316 | |
|---|
| 247 | 317 | /* Instruction cache invalidate - performed on each cpu */ |
|---|