hc
2024-11-01 2f529f9b558ca1c1bd74be7437a84e4711743404
kernel/drivers/clocksource/mmio.c
....@@ -6,11 +6,30 @@
66 #include <linux/errno.h>
77 #include <linux/init.h>
88 #include <linux/slab.h>
9
+#include <linux/spinlock.h>
10
+#include <linux/uaccess.h>
11
+#include <linux/miscdevice.h>
12
+#include <linux/list.h>
13
+#include <linux/slab.h>
14
+#include <linux/fs.h>
15
+#include <linux/mm.h>
16
+#include <linux/mman.h>
17
+#include <linux/device.h>
918
10
-struct clocksource_mmio {
11
- void __iomem *reg;
12
- struct clocksource clksrc;
19
+struct clocksource_user_mapping {
20
+ struct mm_struct *mm;
21
+ struct clocksource_user_mmio *ucs;
22
+ void *regs;
23
+ struct hlist_node link;
24
+ atomic_t refs;
1325 };
26
+
27
+static struct class *user_mmio_class;
28
+static dev_t user_mmio_devt;
29
+
30
+static DEFINE_SPINLOCK(user_clksrcs_lock);
31
+static unsigned int user_clksrcs_count;
32
+static LIST_HEAD(user_clksrcs);
1433
1534 static inline struct clocksource_mmio *to_mmio_clksrc(struct clocksource *c)
1635 {
....@@ -38,6 +57,53 @@
3857 return ~(u64)readw_relaxed(to_mmio_clksrc(c)->reg) & c->mask;
3958 }
4059
60
+static inline struct clocksource_user_mmio *
61
+to_mmio_ucs(struct clocksource *c)
62
+{
63
+ return container_of(c, struct clocksource_user_mmio, mmio.clksrc);
64
+}
65
+
66
+u64 clocksource_dual_mmio_readl_up(struct clocksource *c)
67
+{
68
+ struct clocksource_user_mmio *ucs = to_mmio_ucs(c);
69
+ u32 upper, old_upper, lower;
70
+
71
+ upper = readl_relaxed(ucs->reg_upper);
72
+ do {
73
+ old_upper = upper;
74
+ lower = readl_relaxed(ucs->mmio.reg);
75
+ upper = readl_relaxed(ucs->reg_upper);
76
+ } while (upper != old_upper);
77
+
78
+ return (((u64)upper) << ucs->bits_lower) | lower;
79
+}
80
+
81
+u64 clocksource_dual_mmio_readw_up(struct clocksource *c)
82
+{
83
+ struct clocksource_user_mmio *ucs = to_mmio_ucs(c);
84
+ u16 upper, old_upper, lower;
85
+
86
+ upper = readw_relaxed(ucs->reg_upper);
87
+ do {
88
+ old_upper = upper;
89
+ lower = readw_relaxed(ucs->mmio.reg);
90
+ upper = readw_relaxed(ucs->reg_upper);
91
+ } while (upper != old_upper);
92
+
93
+ return (((u64)upper) << ucs->bits_lower) | lower;
94
+}
95
+
96
+static void mmio_base_init(const char *name,int rating, unsigned int bits,
97
+ u64 (*read)(struct clocksource *),
98
+ struct clocksource *cs)
99
+{
100
+ cs->name = name;
101
+ cs->rating = rating;
102
+ cs->read = read;
103
+ cs->mask = CLOCKSOURCE_MASK(bits);
104
+ cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
105
+}
106
+
41107 /**
42108 * clocksource_mmio_init - Initialize a simple mmio based clocksource
43109 * @base: Virtual address of the clock readout register
....@@ -52,6 +118,7 @@
52118 u64 (*read)(struct clocksource *))
53119 {
54120 struct clocksource_mmio *cs;
121
+ int err;
55122
56123 if (bits > 64 || bits < 16)
57124 return -EINVAL;
....@@ -61,12 +128,428 @@
61128 return -ENOMEM;
62129
63130 cs->reg = base;
64
- cs->clksrc.name = name;
65
- cs->clksrc.rating = rating;
66
- cs->clksrc.read = read;
67
- cs->clksrc.mask = CLOCKSOURCE_MASK(bits);
68
- cs->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS;
131
+ mmio_base_init(name, rating, bits, read, &cs->clksrc);
69132
70
- return clocksource_register_hz(&cs->clksrc, hz);
133
+ err = clocksource_register_hz(&cs->clksrc, hz);
134
+ if (err < 0) {
135
+ kfree(cs);
136
+ return err;
137
+ }
138
+
139
+ return err;
71140 }
72
-EXPORT_SYMBOL_GPL(clocksource_mmio_init);
141
+
142
+static void mmio_ucs_vmopen(struct vm_area_struct *vma)
143
+{
144
+ struct clocksource_user_mapping *mapping, *clone;
145
+ struct clocksource_user_mmio *ucs;
146
+ unsigned long h_key;
147
+
148
+ mapping = vma->vm_private_data;
149
+
150
+ if (mapping->mm == vma->vm_mm) {
151
+ atomic_inc(&mapping->refs);
152
+ } else if (mapping->mm) {
153
+ /*
154
+ * We must be duplicating the original mm upon fork(),
155
+ * clone the parent ucs mapping struct then rehash it
156
+ * on the child mm key. If we cannot get memory for
157
+ * this, mitigate the issue for users by preventing a
158
+ * stale parent mm from being matched later on by a
159
+ * process which reused its mm_struct (h_key is based
160
+ * on this struct address).
161
+ */
162
+ clone = kmalloc(sizeof(*mapping), GFP_KERNEL);
163
+ if (clone == NULL) {
164
+ pr_alert("out-of-memory for UCS mapping!\n");
165
+ atomic_inc(&mapping->refs);
166
+ mapping->mm = NULL;
167
+ return;
168
+ }
169
+ ucs = mapping->ucs;
170
+ clone->mm = vma->vm_mm;
171
+ clone->ucs = ucs;
172
+ clone->regs = mapping->regs;
173
+ atomic_set(&clone->refs, 1);
174
+ vma->vm_private_data = clone;
175
+ h_key = (unsigned long)vma->vm_mm / sizeof(*vma->vm_mm);
176
+ spin_lock(&ucs->lock);
177
+ hash_add(ucs->mappings, &clone->link, h_key);
178
+ spin_unlock(&ucs->lock);
179
+ }
180
+}
181
+
182
+static void mmio_ucs_vmclose(struct vm_area_struct *vma)
183
+{
184
+ struct clocksource_user_mapping *mapping;
185
+
186
+ mapping = vma->vm_private_data;
187
+
188
+ if (atomic_dec_and_test(&mapping->refs)) {
189
+ spin_lock(&mapping->ucs->lock);
190
+ hash_del(&mapping->link);
191
+ spin_unlock(&mapping->ucs->lock);
192
+ kfree(mapping);
193
+ }
194
+}
195
+
196
+static const struct vm_operations_struct mmio_ucs_vmops = {
197
+ .open = mmio_ucs_vmopen,
198
+ .close = mmio_ucs_vmclose,
199
+};
200
+
201
+static int mmio_ucs_mmap(struct file *file, struct vm_area_struct *vma)
202
+{
203
+ unsigned long addr, upper_pfn, lower_pfn;
204
+ struct clocksource_user_mapping *mapping, *tmp;
205
+ struct clocksource_user_mmio *ucs;
206
+ unsigned int bits_upper;
207
+ unsigned long h_key;
208
+ pgprot_t prot;
209
+ size_t pages;
210
+ int err;
211
+
212
+ pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
213
+ if (pages > 2)
214
+ return -EINVAL;
215
+
216
+ vma->vm_private_data = NULL;
217
+
218
+ ucs = file->private_data;
219
+ upper_pfn = ucs->phys_upper >> PAGE_SHIFT;
220
+ lower_pfn = ucs->phys_lower >> PAGE_SHIFT;
221
+ bits_upper = fls(ucs->mmio.clksrc.mask) - ucs->bits_lower;
222
+ if (pages == 2 && (!bits_upper || upper_pfn == lower_pfn))
223
+ return -EINVAL;
224
+
225
+ mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
226
+ if (!mapping)
227
+ return -ENOSPC;
228
+
229
+ mapping->mm = vma->vm_mm;
230
+ mapping->ucs = ucs;
231
+ mapping->regs = (void *)vma->vm_start;
232
+ atomic_set(&mapping->refs, 1);
233
+
234
+ vma->vm_private_data = mapping;
235
+ vma->vm_ops = &mmio_ucs_vmops;
236
+ prot = pgprot_noncached(vma->vm_page_prot);
237
+ addr = vma->vm_start;
238
+
239
+ err = remap_pfn_range(vma, addr, lower_pfn, PAGE_SIZE, prot);
240
+ if (err < 0)
241
+ goto fail;
242
+
243
+ if (pages > 1) {
244
+ addr += PAGE_SIZE;
245
+ err = remap_pfn_range(vma, addr, upper_pfn, PAGE_SIZE, prot);
246
+ if (err < 0)
247
+ goto fail;
248
+ }
249
+
250
+ h_key = (unsigned long)vma->vm_mm / sizeof(*vma->vm_mm);
251
+
252
+ spin_lock(&ucs->lock);
253
+ hash_for_each_possible(ucs->mappings, tmp, link, h_key) {
254
+ if (tmp->mm == vma->vm_mm) {
255
+ spin_unlock(&ucs->lock);
256
+ err = -EBUSY;
257
+ goto fail;
258
+ }
259
+ }
260
+ hash_add(ucs->mappings, &mapping->link, h_key);
261
+ spin_unlock(&ucs->lock);
262
+
263
+ return 0;
264
+fail:
265
+ kfree(mapping);
266
+
267
+ return err;
268
+}
269
+
270
+static long
271
+mmio_ucs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
272
+{
273
+ struct clocksource_user_mapping *mapping;
274
+ struct clksrc_user_mmio_info __user *u;
275
+ unsigned long upper_pfn, lower_pfn;
276
+ struct clksrc_user_mmio_info info;
277
+ struct clocksource_user_mmio *ucs;
278
+ unsigned int bits_upper;
279
+ void __user *map_base;
280
+ unsigned long h_key;
281
+ size_t size;
282
+
283
+ u = (struct clksrc_user_mmio_info __user *)arg;
284
+
285
+ switch (cmd) {
286
+ case CLKSRC_USER_MMIO_MAP:
287
+ break;
288
+ default:
289
+ return -ENOTTY;
290
+ }
291
+
292
+ h_key = (unsigned long)current->mm / sizeof(*current->mm);
293
+
294
+ ucs = file->private_data;
295
+ upper_pfn = ucs->phys_upper >> PAGE_SHIFT;
296
+ lower_pfn = ucs->phys_lower >> PAGE_SHIFT;
297
+ bits_upper = fls(ucs->mmio.clksrc.mask) - ucs->bits_lower;
298
+ size = PAGE_SIZE;
299
+ if (bits_upper && upper_pfn != lower_pfn)
300
+ size += PAGE_SIZE;
301
+
302
+ do {
303
+ spin_lock(&ucs->lock);
304
+ hash_for_each_possible(ucs->mappings, mapping, link, h_key) {
305
+ if (mapping->mm == current->mm) {
306
+ spin_unlock(&ucs->lock);
307
+ map_base = mapping->regs;
308
+ goto found;
309
+ }
310
+ }
311
+ spin_unlock(&ucs->lock);
312
+
313
+ map_base = (void *)
314
+ vm_mmap(file, 0, size, PROT_READ, MAP_SHARED, 0);
315
+ } while (IS_ERR(map_base) && PTR_ERR(map_base) == -EBUSY);
316
+
317
+ if (IS_ERR(map_base))
318
+ return PTR_ERR(map_base);
319
+
320
+found:
321
+ info.type = ucs->type;
322
+ info.reg_lower = map_base + offset_in_page(ucs->phys_lower);
323
+ info.mask_lower = ucs->mmio.clksrc.mask;
324
+ info.bits_lower = ucs->bits_lower;
325
+ info.reg_upper = NULL;
326
+ if (ucs->phys_upper)
327
+ info.reg_upper = map_base + (size - PAGE_SIZE)
328
+ + offset_in_page(ucs->phys_upper);
329
+ info.mask_upper = ucs->mask_upper;
330
+
331
+ return copy_to_user(u, &info, sizeof(*u));
332
+}
333
+
334
+static int mmio_ucs_open(struct inode *inode, struct file *file)
335
+{
336
+ struct clocksource_user_mmio *ucs;
337
+
338
+ if (file->f_mode & FMODE_WRITE)
339
+ return -EINVAL;
340
+
341
+ ucs = container_of(inode->i_cdev, typeof(*ucs), cdev);
342
+ file->private_data = ucs;
343
+
344
+ return 0;
345
+}
346
+
347
+static const struct file_operations mmio_ucs_fops = {
348
+ .owner = THIS_MODULE,
349
+ .unlocked_ioctl = mmio_ucs_ioctl,
350
+ .open = mmio_ucs_open,
351
+ .mmap = mmio_ucs_mmap,
352
+};
353
+
354
+static int __init
355
+ucs_create_cdev(struct class *class, struct clocksource_user_mmio *ucs)
356
+{
357
+ int err;
358
+
359
+ ucs->dev = device_create(class, NULL,
360
+ MKDEV(MAJOR(user_mmio_devt), ucs->id),
361
+ ucs, "ucs/%d", ucs->id);
362
+ if (IS_ERR(ucs->dev))
363
+ return PTR_ERR(ucs->dev);
364
+
365
+ spin_lock_init(&ucs->lock);
366
+ hash_init(ucs->mappings);
367
+
368
+ cdev_init(&ucs->cdev, &mmio_ucs_fops);
369
+ ucs->cdev.kobj.parent = &ucs->dev->kobj;
370
+
371
+ err = cdev_add(&ucs->cdev, ucs->dev->devt, 1);
372
+ if (err < 0)
373
+ goto err_device_destroy;
374
+
375
+ return 0;
376
+
377
+err_device_destroy:
378
+ device_destroy(class, MKDEV(MAJOR(user_mmio_devt), ucs->id));
379
+ return err;
380
+}
381
+
382
+static unsigned long default_revmap(void *virt)
383
+{
384
+ struct vm_struct *vm;
385
+
386
+ vm = find_vm_area(virt);
387
+ if (!vm)
388
+ return 0;
389
+
390
+ return vm->phys_addr + (virt - vm->addr);
391
+}
392
+
393
+int __init clocksource_user_mmio_init(struct clocksource_user_mmio *ucs,
394
+ const struct clocksource_mmio_regs *regs,
395
+ unsigned long hz)
396
+{
397
+ static u64 (*user_types[CLKSRC_MMIO_TYPE_NR])(struct clocksource *) = {
398
+ [CLKSRC_MMIO_L_UP] = clocksource_mmio_readl_up,
399
+ [CLKSRC_MMIO_L_DOWN] = clocksource_mmio_readl_down,
400
+ [CLKSRC_DMMIO_L_UP] = clocksource_dual_mmio_readl_up,
401
+ [CLKSRC_MMIO_W_UP] = clocksource_mmio_readw_up,
402
+ [CLKSRC_MMIO_W_DOWN] = clocksource_mmio_readw_down,
403
+ [CLKSRC_DMMIO_W_UP] = clocksource_dual_mmio_readw_up,
404
+ };
405
+ const char *name = ucs->mmio.clksrc.name;
406
+ unsigned long phys_upper = 0, phys_lower;
407
+ enum clksrc_user_mmio_type type;
408
+ unsigned long (*revmap)(void *);
409
+ int err;
410
+
411
+ if (regs->bits_lower > 32 || regs->bits_lower < 16 ||
412
+ regs->bits_upper > 32)
413
+ return -EINVAL;
414
+
415
+ for (type = 0; type < ARRAY_SIZE(user_types); type++)
416
+ if (ucs->mmio.clksrc.read == user_types[type])
417
+ break;
418
+
419
+ if (type == ARRAY_SIZE(user_types))
420
+ return -EINVAL;
421
+
422
+ if (!(ucs->mmio.clksrc.flags & CLOCK_SOURCE_IS_CONTINUOUS))
423
+ return -EINVAL;
424
+
425
+ revmap = regs->revmap;
426
+ if (!revmap)
427
+ revmap = default_revmap;
428
+
429
+ phys_lower = revmap(regs->reg_lower);
430
+ if (!phys_lower)
431
+ return -EINVAL;
432
+
433
+ if (regs->bits_upper) {
434
+ phys_upper = revmap(regs->reg_upper);
435
+ if (!phys_upper)
436
+ return -EINVAL;
437
+ }
438
+
439
+ ucs->mmio.reg = regs->reg_lower;
440
+ ucs->type = type;
441
+ ucs->bits_lower = regs->bits_lower;
442
+ ucs->reg_upper = regs->reg_upper;
443
+ ucs->mask_lower = CLOCKSOURCE_MASK(regs->bits_lower);
444
+ ucs->mask_upper = CLOCKSOURCE_MASK(regs->bits_upper);
445
+ ucs->phys_lower = phys_lower;
446
+ ucs->phys_upper = phys_upper;
447
+ spin_lock_init(&ucs->lock);
448
+
449
+ err = clocksource_register_hz(&ucs->mmio.clksrc, hz);
450
+ if (err < 0)
451
+ return err;
452
+
453
+ spin_lock(&user_clksrcs_lock);
454
+
455
+ ucs->id = user_clksrcs_count++;
456
+ if (ucs->id < CLKSRC_USER_MMIO_MAX)
457
+ list_add_tail(&ucs->link, &user_clksrcs);
458
+
459
+ spin_unlock(&user_clksrcs_lock);
460
+
461
+ if (ucs->id >= CLKSRC_USER_MMIO_MAX) {
462
+ pr_warn("%s: Too many clocksources\n", name);
463
+ err = -EAGAIN;
464
+ goto fail;
465
+ }
466
+
467
+ ucs->mmio.clksrc.vdso_type = CLOCKSOURCE_VDSO_MMIO + ucs->id;
468
+
469
+ if (user_mmio_class) {
470
+ err = ucs_create_cdev(user_mmio_class, ucs);
471
+ if (err < 0) {
472
+ pr_warn("%s: Failed to add character device\n", name);
473
+ goto fail;
474
+ }
475
+ }
476
+
477
+ return 0;
478
+
479
+fail:
480
+ clocksource_unregister(&ucs->mmio.clksrc);
481
+
482
+ return err;
483
+}
484
+
485
+int __init clocksource_user_single_mmio_init(
486
+ void __iomem *base, const char *name,
487
+ unsigned long hz, int rating, unsigned int bits,
488
+ u64 (*read)(struct clocksource *))
489
+{
490
+ struct clocksource_user_mmio *ucs;
491
+ struct clocksource_mmio_regs regs;
492
+ int ret;
493
+
494
+ ucs = kzalloc(sizeof(*ucs), GFP_KERNEL);
495
+ if (!ucs)
496
+ return -ENOMEM;
497
+
498
+ mmio_base_init(name, rating, bits, read, &ucs->mmio.clksrc);
499
+ regs.reg_lower = base;
500
+ regs.reg_upper = NULL;
501
+ regs.bits_lower = bits;
502
+ regs.bits_upper = 0;
503
+ regs.revmap = NULL;
504
+
505
+ ret = clocksource_user_mmio_init(ucs, &regs, hz);
506
+ if (ret)
507
+ kfree(ucs);
508
+
509
+ return ret;
510
+}
511
+
512
+static int __init mmio_clksrc_chr_dev_init(void)
513
+{
514
+ struct clocksource_user_mmio *ucs;
515
+ struct class *class;
516
+ int err;
517
+
518
+ class = class_create(THIS_MODULE, "mmio_ucs");
519
+ if (IS_ERR(class)) {
520
+ pr_err("couldn't create user mmio clocksources class\n");
521
+ return PTR_ERR(class);
522
+ }
523
+
524
+ err = alloc_chrdev_region(&user_mmio_devt, 0, CLKSRC_USER_MMIO_MAX,
525
+ "mmio_ucs");
526
+ if (err < 0) {
527
+ pr_err("failed to allocate user mmio clocksources character devivces region\n");
528
+ goto err_class_destroy;
529
+ }
530
+
531
+ /*
532
+ * Calling list_for_each_entry is safe here: clocksources are always
533
+ * added to the list tail, never removed.
534
+ */
535
+ spin_lock(&user_clksrcs_lock);
536
+ list_for_each_entry(ucs, &user_clksrcs, link) {
537
+ spin_unlock(&user_clksrcs_lock);
538
+
539
+ err = ucs_create_cdev(class, ucs);
540
+ if (err < 0)
541
+ pr_err("%s: Failed to add character device\n",
542
+ ucs->mmio.clksrc.name);
543
+
544
+ spin_lock(&user_clksrcs_lock);
545
+ }
546
+ user_mmio_class = class;
547
+ spin_unlock(&user_clksrcs_lock);
548
+
549
+ return 0;
550
+
551
+err_class_destroy:
552
+ class_destroy(class);
553
+ return err;
554
+}
555
+device_initcall(mmio_clksrc_chr_dev_init);