forked from ~ljy/RK356X_SDK_RELEASE

hc
2023-12-11 072de836f53be56a70cecf70b43ae43b7ce17376
kernel/drivers/hwtracing/intel_th/msu.c
....@@ -17,40 +17,63 @@
1717 #include <linux/mm.h>
1818 #include <linux/fs.h>
1919 #include <linux/io.h>
20
+#include <linux/workqueue.h>
2021 #include <linux/dma-mapping.h>
2122
2223 #ifdef CONFIG_X86
2324 #include <asm/set_memory.h>
2425 #endif
2526
27
+#include <linux/intel_th.h>
2628 #include "intel_th.h"
2729 #include "msu.h"
2830
2931 #define msc_dev(x) (&(x)->thdev->dev)
3032
31
-/**
32
- * struct msc_block - multiblock mode block descriptor
33
- * @bdesc: pointer to hardware descriptor (beginning of the block)
34
- * @addr: physical address of the block
33
+/*
34
+ * Lockout state transitions:
35
+ * READY -> INUSE -+-> LOCKED -+-> READY -> etc.
36
+ * \-----------/
37
+ * WIN_READY: window can be used by HW
38
+ * WIN_INUSE: window is in use
39
+ * WIN_LOCKED: window is filled up and is being processed by the buffer
40
+ * handling code
41
+ *
42
+ * All state transitions happen automatically, except for the LOCKED->READY,
43
+ * which needs to be signalled by the buffer code by calling
44
+ * intel_th_msc_window_unlock().
45
+ *
46
+ * When the interrupt handler has to switch to the next window, it checks
47
+ * whether it's READY, and if it is, it performs the switch and tracing
48
+ * continues. If it's LOCKED, it stops the trace.
3549 */
36
-struct msc_block {
37
- struct msc_block_desc *bdesc;
38
- dma_addr_t addr;
50
+enum lockout_state {
51
+ WIN_READY = 0,
52
+ WIN_INUSE,
53
+ WIN_LOCKED
3954 };
4055
4156 /**
4257 * struct msc_window - multiblock mode window descriptor
4358 * @entry: window list linkage (msc::win_list)
4459 * @pgoff: page offset into the buffer that this window starts at
60
+ * @lockout: lockout state, see comment below
61
+ * @lo_lock: lockout state serialization
4562 * @nr_blocks: number of blocks (pages) in this window
46
- * @block: array of block descriptors
63
+ * @nr_segs: number of segments in this window (<= @nr_blocks)
64
+ * @_sgt: array of block descriptors
65
+ * @sgt: array of block descriptors
4766 */
4867 struct msc_window {
4968 struct list_head entry;
5069 unsigned long pgoff;
70
+ enum lockout_state lockout;
71
+ spinlock_t lo_lock;
5172 unsigned int nr_blocks;
73
+ unsigned int nr_segs;
5274 struct msc *msc;
53
- struct msc_block block[0];
75
+ struct sg_table _sgt;
76
+ struct sg_table *sgt;
5477 };
5578
5679 /**
....@@ -72,8 +95,8 @@
7295 struct msc_window *start_win;
7396 struct msc_window *win;
7497 unsigned long offset;
75
- int start_block;
76
- int block;
98
+ struct scatterlist *start_block;
99
+ struct scatterlist *block;
77100 unsigned int block_off;
78101 unsigned int wrap_count;
79102 unsigned int eof;
....@@ -83,8 +106,11 @@
83106 * struct msc - MSC device representation
84107 * @reg_base: register window base address
85108 * @thdev: intel_th_device pointer
109
+ * @mbuf: MSU buffer, if assigned
110
+ * @mbuf_priv MSU buffer's private data, if @mbuf
86111 * @win_list: list of windows in multiblock mode
87112 * @single_sgt: single mode buffer
113
+ * @cur_win: current window
88114 * @nr_pages: total number of pages allocated for this buffer
89115 * @single_sz: amount of data in single mode
90116 * @single_wrap: single mode wrap occurred
....@@ -102,15 +128,24 @@
102128 */
103129 struct msc {
104130 void __iomem *reg_base;
131
+ void __iomem *msu_base;
105132 struct intel_th_device *thdev;
106133
134
+ const struct msu_buffer *mbuf;
135
+ void *mbuf_priv;
136
+
137
+ struct work_struct work;
107138 struct list_head win_list;
108139 struct sg_table single_sgt;
140
+ struct msc_window *cur_win;
141
+ struct msc_window *switch_on_unlock;
109142 unsigned long nr_pages;
110143 unsigned long single_sz;
111144 unsigned int single_wrap : 1;
112145 void *base;
113146 dma_addr_t base_addr;
147
+ u32 orig_addr;
148
+ u32 orig_sz;
114149
115150 /* <0: no buffer, 0: no users, >0: active users */
116151 atomic_t user_count;
....@@ -120,13 +155,112 @@
120155
121156 struct list_head iter_list;
122157
158
+ bool stop_on_full;
159
+
123160 /* config */
124161 unsigned int enabled : 1,
125
- wrap : 1;
162
+ wrap : 1,
163
+ do_irq : 1,
164
+ multi_is_broken : 1;
126165 unsigned int mode;
127166 unsigned int burst_len;
128167 unsigned int index;
129168 };
169
+
170
+static LIST_HEAD(msu_buffer_list);
171
+static DEFINE_MUTEX(msu_buffer_mutex);
172
+
173
+/**
174
+ * struct msu_buffer_entry - internal MSU buffer bookkeeping
175
+ * @entry: link to msu_buffer_list
176
+ * @mbuf: MSU buffer object
177
+ * @owner: module that provides this MSU buffer
178
+ */
179
+struct msu_buffer_entry {
180
+ struct list_head entry;
181
+ const struct msu_buffer *mbuf;
182
+ struct module *owner;
183
+};
184
+
185
+static struct msu_buffer_entry *__msu_buffer_entry_find(const char *name)
186
+{
187
+ struct msu_buffer_entry *mbe;
188
+
189
+ lockdep_assert_held(&msu_buffer_mutex);
190
+
191
+ list_for_each_entry(mbe, &msu_buffer_list, entry) {
192
+ if (!strcmp(mbe->mbuf->name, name))
193
+ return mbe;
194
+ }
195
+
196
+ return NULL;
197
+}
198
+
199
+static const struct msu_buffer *
200
+msu_buffer_get(const char *name)
201
+{
202
+ struct msu_buffer_entry *mbe;
203
+
204
+ mutex_lock(&msu_buffer_mutex);
205
+ mbe = __msu_buffer_entry_find(name);
206
+ if (mbe && !try_module_get(mbe->owner))
207
+ mbe = NULL;
208
+ mutex_unlock(&msu_buffer_mutex);
209
+
210
+ return mbe ? mbe->mbuf : NULL;
211
+}
212
+
213
+static void msu_buffer_put(const struct msu_buffer *mbuf)
214
+{
215
+ struct msu_buffer_entry *mbe;
216
+
217
+ mutex_lock(&msu_buffer_mutex);
218
+ mbe = __msu_buffer_entry_find(mbuf->name);
219
+ if (mbe)
220
+ module_put(mbe->owner);
221
+ mutex_unlock(&msu_buffer_mutex);
222
+}
223
+
224
+int intel_th_msu_buffer_register(const struct msu_buffer *mbuf,
225
+ struct module *owner)
226
+{
227
+ struct msu_buffer_entry *mbe;
228
+ int ret = 0;
229
+
230
+ mbe = kzalloc(sizeof(*mbe), GFP_KERNEL);
231
+ if (!mbe)
232
+ return -ENOMEM;
233
+
234
+ mutex_lock(&msu_buffer_mutex);
235
+ if (__msu_buffer_entry_find(mbuf->name)) {
236
+ ret = -EEXIST;
237
+ kfree(mbe);
238
+ goto unlock;
239
+ }
240
+
241
+ mbe->mbuf = mbuf;
242
+ mbe->owner = owner;
243
+ list_add_tail(&mbe->entry, &msu_buffer_list);
244
+unlock:
245
+ mutex_unlock(&msu_buffer_mutex);
246
+
247
+ return ret;
248
+}
249
+EXPORT_SYMBOL_GPL(intel_th_msu_buffer_register);
250
+
251
+void intel_th_msu_buffer_unregister(const struct msu_buffer *mbuf)
252
+{
253
+ struct msu_buffer_entry *mbe;
254
+
255
+ mutex_lock(&msu_buffer_mutex);
256
+ mbe = __msu_buffer_entry_find(mbuf->name);
257
+ if (mbe) {
258
+ list_del(&mbe->entry);
259
+ kfree(mbe);
260
+ }
261
+ mutex_unlock(&msu_buffer_mutex);
262
+}
263
+EXPORT_SYMBOL_GPL(intel_th_msu_buffer_unregister);
130264
131265 static inline bool msc_block_is_empty(struct msc_block_desc *bdesc)
132266 {
....@@ -141,72 +275,25 @@
141275 return false;
142276 }
143277
144
-/**
145
- * msc_oldest_window() - locate the window with oldest data
146
- * @msc: MSC device
147
- *
148
- * This should only be used in multiblock mode. Caller should hold the
149
- * msc::user_count reference.
150
- *
151
- * Return: the oldest window with valid data
152
- */
153
-static struct msc_window *msc_oldest_window(struct msc *msc)
278
+static inline struct scatterlist *msc_win_base_sg(struct msc_window *win)
154279 {
155
- struct msc_window *win;
156
- u32 reg = ioread32(msc->reg_base + REG_MSU_MSC0NWSA);
157
- unsigned long win_addr = (unsigned long)reg << PAGE_SHIFT;
158
- unsigned int found = 0;
159
-
160
- if (list_empty(&msc->win_list))
161
- return NULL;
162
-
163
- /*
164
- * we might need a radix tree for this, depending on how
165
- * many windows a typical user would allocate; ideally it's
166
- * something like 2, in which case we're good
167
- */
168
- list_for_each_entry(win, &msc->win_list, entry) {
169
- if (win->block[0].addr == win_addr)
170
- found++;
171
-
172
- /* skip the empty ones */
173
- if (msc_block_is_empty(win->block[0].bdesc))
174
- continue;
175
-
176
- if (found)
177
- return win;
178
- }
179
-
180
- return list_entry(msc->win_list.next, struct msc_window, entry);
280
+ return win->sgt->sgl;
181281 }
182282
183
-/**
184
- * msc_win_oldest_block() - locate the oldest block in a given window
185
- * @win: window to look at
186
- *
187
- * Return: index of the block with the oldest data
188
- */
189
-static unsigned int msc_win_oldest_block(struct msc_window *win)
283
+static inline struct msc_block_desc *msc_win_base(struct msc_window *win)
190284 {
191
- unsigned int blk;
192
- struct msc_block_desc *bdesc = win->block[0].bdesc;
285
+ return sg_virt(msc_win_base_sg(win));
286
+}
193287
194
- /* without wrapping, first block is the oldest */
195
- if (!msc_block_wrapped(bdesc))
196
- return 0;
288
+static inline dma_addr_t msc_win_base_dma(struct msc_window *win)
289
+{
290
+ return sg_dma_address(msc_win_base_sg(win));
291
+}
197292
198
- /*
199
- * with wrapping, last written block contains both the newest and the
200
- * oldest data for this window.
201
- */
202
- for (blk = 0; blk < win->nr_blocks; blk++) {
203
- bdesc = win->block[blk].bdesc;
204
-
205
- if (msc_block_last_written(bdesc))
206
- return blk;
207
- }
208
-
209
- return 0;
293
+static inline unsigned long
294
+msc_win_base_pfn(struct msc_window *win)
295
+{
296
+ return PFN_DOWN(msc_win_base_dma(win));
210297 }
211298
212299 /**
....@@ -228,22 +315,126 @@
228315 static struct msc_window *msc_next_window(struct msc_window *win)
229316 {
230317 if (msc_is_last_win(win))
231
- return list_entry(win->msc->win_list.next, struct msc_window,
232
- entry);
318
+ return list_first_entry(&win->msc->win_list, struct msc_window,
319
+ entry);
233320
234
- return list_entry(win->entry.next, struct msc_window, entry);
321
+ return list_next_entry(win, entry);
322
+}
323
+
324
+static size_t msc_win_total_sz(struct msc_window *win)
325
+{
326
+ struct scatterlist *sg;
327
+ unsigned int blk;
328
+ size_t size = 0;
329
+
330
+ for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
331
+ struct msc_block_desc *bdesc = sg_virt(sg);
332
+
333
+ if (msc_block_wrapped(bdesc))
334
+ return (size_t)win->nr_blocks << PAGE_SHIFT;
335
+
336
+ size += msc_total_sz(bdesc);
337
+ if (msc_block_last_written(bdesc))
338
+ break;
339
+ }
340
+
341
+ return size;
342
+}
343
+
344
+/**
345
+ * msc_find_window() - find a window matching a given sg_table
346
+ * @msc: MSC device
347
+ * @sgt: SG table of the window
348
+ * @nonempty: skip over empty windows
349
+ *
350
+ * Return: MSC window structure pointer or NULL if the window
351
+ * could not be found.
352
+ */
353
+static struct msc_window *
354
+msc_find_window(struct msc *msc, struct sg_table *sgt, bool nonempty)
355
+{
356
+ struct msc_window *win;
357
+ unsigned int found = 0;
358
+
359
+ if (list_empty(&msc->win_list))
360
+ return NULL;
361
+
362
+ /*
363
+ * we might need a radix tree for this, depending on how
364
+ * many windows a typical user would allocate; ideally it's
365
+ * something like 2, in which case we're good
366
+ */
367
+ list_for_each_entry(win, &msc->win_list, entry) {
368
+ if (win->sgt == sgt)
369
+ found++;
370
+
371
+ /* skip the empty ones */
372
+ if (nonempty && msc_block_is_empty(msc_win_base(win)))
373
+ continue;
374
+
375
+ if (found)
376
+ return win;
377
+ }
378
+
379
+ return NULL;
380
+}
381
+
382
+/**
383
+ * msc_oldest_window() - locate the window with oldest data
384
+ * @msc: MSC device
385
+ *
386
+ * This should only be used in multiblock mode. Caller should hold the
387
+ * msc::user_count reference.
388
+ *
389
+ * Return: the oldest window with valid data
390
+ */
391
+static struct msc_window *msc_oldest_window(struct msc *msc)
392
+{
393
+ struct msc_window *win;
394
+
395
+ if (list_empty(&msc->win_list))
396
+ return NULL;
397
+
398
+ win = msc_find_window(msc, msc_next_window(msc->cur_win)->sgt, true);
399
+ if (win)
400
+ return win;
401
+
402
+ return list_first_entry(&msc->win_list, struct msc_window, entry);
403
+}
404
+
405
+/**
406
+ * msc_win_oldest_sg() - locate the oldest block in a given window
407
+ * @win: window to look at
408
+ *
409
+ * Return: index of the block with the oldest data
410
+ */
411
+static struct scatterlist *msc_win_oldest_sg(struct msc_window *win)
412
+{
413
+ unsigned int blk;
414
+ struct scatterlist *sg;
415
+ struct msc_block_desc *bdesc = msc_win_base(win);
416
+
417
+ /* without wrapping, first block is the oldest */
418
+ if (!msc_block_wrapped(bdesc))
419
+ return msc_win_base_sg(win);
420
+
421
+ /*
422
+ * with wrapping, last written block contains both the newest and the
423
+ * oldest data for this window.
424
+ */
425
+ for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
426
+ struct msc_block_desc *bdesc = sg_virt(sg);
427
+
428
+ if (msc_block_last_written(bdesc))
429
+ return sg;
430
+ }
431
+
432
+ return msc_win_base_sg(win);
235433 }
236434
237435 static struct msc_block_desc *msc_iter_bdesc(struct msc_iter *iter)
238436 {
239
- return iter->win->block[iter->block].bdesc;
240
-}
241
-
242
-static void msc_iter_init(struct msc_iter *iter)
243
-{
244
- memset(iter, 0, sizeof(*iter));
245
- iter->start_block = -1;
246
- iter->block = -1;
437
+ return sg_virt(iter->block);
247438 }
248439
249440 static struct msc_iter *msc_iter_install(struct msc *msc)
....@@ -268,7 +459,6 @@
268459 goto unlock;
269460 }
270461
271
- msc_iter_init(iter);
272462 iter->msc = msc;
273463
274464 list_add_tail(&iter->entry, &msc->iter_list);
....@@ -289,10 +479,10 @@
289479
290480 static void msc_iter_block_start(struct msc_iter *iter)
291481 {
292
- if (iter->start_block != -1)
482
+ if (iter->start_block)
293483 return;
294484
295
- iter->start_block = msc_win_oldest_block(iter->win);
485
+ iter->start_block = msc_win_oldest_sg(iter->win);
296486 iter->block = iter->start_block;
297487 iter->wrap_count = 0;
298488
....@@ -316,7 +506,7 @@
316506 return -EINVAL;
317507
318508 iter->win = iter->start_win;
319
- iter->start_block = -1;
509
+ iter->start_block = NULL;
320510
321511 msc_iter_block_start(iter);
322512
....@@ -326,7 +516,7 @@
326516 static int msc_iter_win_advance(struct msc_iter *iter)
327517 {
328518 iter->win = msc_next_window(iter->win);
329
- iter->start_block = -1;
519
+ iter->start_block = NULL;
330520
331521 if (iter->win == iter->start_win) {
332522 iter->eof++;
....@@ -356,8 +546,10 @@
356546 return msc_iter_win_advance(iter);
357547
358548 /* block advance */
359
- if (++iter->block == iter->win->nr_blocks)
360
- iter->block = 0;
549
+ if (sg_is_last(iter->block))
550
+ iter->block = msc_win_base_sg(iter->win);
551
+ else
552
+ iter->block = sg_next(iter->block);
361553
362554 /* no wrapping, sanity check in case there is no last written block */
363555 if (!iter->wrap_count && iter->block == iter->start_block)
....@@ -462,20 +654,102 @@
462654 static void msc_buffer_clear_hw_header(struct msc *msc)
463655 {
464656 struct msc_window *win;
657
+ struct scatterlist *sg;
465658
466659 list_for_each_entry(win, &msc->win_list, entry) {
467660 unsigned int blk;
468661 size_t hw_sz = sizeof(struct msc_block_desc) -
469662 offsetof(struct msc_block_desc, hw_tag);
470663
471
- for (blk = 0; blk < win->nr_blocks; blk++) {
472
- struct msc_block_desc *bdesc = win->block[blk].bdesc;
664
+ for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
665
+ struct msc_block_desc *bdesc = sg_virt(sg);
473666
474667 memset(&bdesc->hw_tag, 0, hw_sz);
475668 }
476669 }
477670 }
478671
672
+static int intel_th_msu_init(struct msc *msc)
673
+{
674
+ u32 mintctl, msusts;
675
+
676
+ if (!msc->do_irq)
677
+ return 0;
678
+
679
+ if (!msc->mbuf)
680
+ return 0;
681
+
682
+ mintctl = ioread32(msc->msu_base + REG_MSU_MINTCTL);
683
+ mintctl |= msc->index ? M1BLIE : M0BLIE;
684
+ iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL);
685
+ if (mintctl != ioread32(msc->msu_base + REG_MSU_MINTCTL)) {
686
+ dev_info(msc_dev(msc), "MINTCTL ignores writes: no usable interrupts\n");
687
+ msc->do_irq = 0;
688
+ return 0;
689
+ }
690
+
691
+ msusts = ioread32(msc->msu_base + REG_MSU_MSUSTS);
692
+ iowrite32(msusts, msc->msu_base + REG_MSU_MSUSTS);
693
+
694
+ return 0;
695
+}
696
+
697
+static void intel_th_msu_deinit(struct msc *msc)
698
+{
699
+ u32 mintctl;
700
+
701
+ if (!msc->do_irq)
702
+ return;
703
+
704
+ mintctl = ioread32(msc->msu_base + REG_MSU_MINTCTL);
705
+ mintctl &= msc->index ? ~M1BLIE : ~M0BLIE;
706
+ iowrite32(mintctl, msc->msu_base + REG_MSU_MINTCTL);
707
+}
708
+
709
+static int msc_win_set_lockout(struct msc_window *win,
710
+ enum lockout_state expect,
711
+ enum lockout_state new)
712
+{
713
+ enum lockout_state old;
714
+ unsigned long flags;
715
+ int ret = 0;
716
+
717
+ if (!win->msc->mbuf)
718
+ return 0;
719
+
720
+ spin_lock_irqsave(&win->lo_lock, flags);
721
+ old = win->lockout;
722
+
723
+ if (old != expect) {
724
+ ret = -EINVAL;
725
+ goto unlock;
726
+ }
727
+
728
+ win->lockout = new;
729
+
730
+ if (old == expect && new == WIN_LOCKED)
731
+ atomic_inc(&win->msc->user_count);
732
+ else if (old == expect && old == WIN_LOCKED)
733
+ atomic_dec(&win->msc->user_count);
734
+
735
+unlock:
736
+ spin_unlock_irqrestore(&win->lo_lock, flags);
737
+
738
+ if (ret) {
739
+ if (expect == WIN_READY && old == WIN_LOCKED)
740
+ return -EBUSY;
741
+
742
+ /* from intel_th_msc_window_unlock(), don't warn if not locked */
743
+ if (expect == WIN_LOCKED && old == new)
744
+ return 0;
745
+
746
+ dev_warn_ratelimited(msc_dev(win->msc),
747
+ "expected lockout state %d, got %d\n",
748
+ expect, old);
749
+ }
750
+
751
+ return ret;
752
+}
479753 /**
480754 * msc_configure() - set up MSC hardware
481755 * @msc: the MSC device to configure
....@@ -493,8 +767,15 @@
493767 if (msc->mode > MSC_MODE_MULTI)
494768 return -EINVAL;
495769
496
- if (msc->mode == MSC_MODE_MULTI)
770
+ if (msc->mode == MSC_MODE_MULTI) {
771
+ if (msc_win_set_lockout(msc->cur_win, WIN_READY, WIN_INUSE))
772
+ return -EBUSY;
773
+
497774 msc_buffer_clear_hw_header(msc);
775
+ }
776
+
777
+ msc->orig_addr = ioread32(msc->reg_base + REG_MSU_MSC0BAR);
778
+ msc->orig_sz = ioread32(msc->reg_base + REG_MSU_MSC0SIZE);
498779
499780 reg = msc->base_addr >> PAGE_SHIFT;
500781 iowrite32(reg, msc->reg_base + REG_MSU_MSC0BAR);
....@@ -516,10 +797,14 @@
516797
517798 iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL);
518799
800
+ intel_th_msu_init(msc);
801
+
519802 msc->thdev->output.multiblock = msc->mode == MSC_MODE_MULTI;
520803 intel_th_trace_enable(msc->thdev);
521804 msc->enabled = 1;
522805
806
+ if (msc->mbuf && msc->mbuf->activate)
807
+ msc->mbuf->activate(msc->mbuf_priv);
523808
524809 return 0;
525810 }
....@@ -533,23 +818,21 @@
533818 */
534819 static void msc_disable(struct msc *msc)
535820 {
536
- unsigned long count;
821
+ struct msc_window *win = msc->cur_win;
537822 u32 reg;
538823
539824 lockdep_assert_held(&msc->buf_mutex);
540825
826
+ if (msc->mode == MSC_MODE_MULTI)
827
+ msc_win_set_lockout(win, WIN_INUSE, WIN_LOCKED);
828
+
829
+ if (msc->mbuf && msc->mbuf->deactivate)
830
+ msc->mbuf->deactivate(msc->mbuf_priv);
831
+ intel_th_msu_deinit(msc);
541832 intel_th_trace_disable(msc->thdev);
542833
543
- for (reg = 0, count = MSC_PLE_WAITLOOP_DEPTH;
544
- count && !(reg & MSCSTS_PLE); count--) {
545
- reg = ioread32(msc->reg_base + REG_MSU_MSC0STS);
546
- cpu_relax();
547
- }
548
-
549
- if (!count)
550
- dev_dbg(msc_dev(msc), "timeout waiting for MSC0 PLE\n");
551
-
552834 if (msc->mode == MSC_MODE_SINGLE) {
835
+ reg = ioread32(msc->reg_base + REG_MSU_MSC0STS);
553836 msc->single_wrap = !!(reg & MSCSTS_WRAPSTAT);
554837
555838 reg = ioread32(msc->reg_base + REG_MSU_MSC0MWP);
....@@ -561,16 +844,25 @@
561844 reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL);
562845 reg &= ~MSC_EN;
563846 iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL);
847
+
848
+ if (msc->mbuf && msc->mbuf->ready)
849
+ msc->mbuf->ready(msc->mbuf_priv, win->sgt,
850
+ msc_win_total_sz(win));
851
+
564852 msc->enabled = 0;
565853
566
- iowrite32(0, msc->reg_base + REG_MSU_MSC0BAR);
567
- iowrite32(0, msc->reg_base + REG_MSU_MSC0SIZE);
854
+ iowrite32(msc->orig_addr, msc->reg_base + REG_MSU_MSC0BAR);
855
+ iowrite32(msc->orig_sz, msc->reg_base + REG_MSU_MSC0SIZE);
568856
569857 dev_dbg(msc_dev(msc), "MSCnNWSA: %08x\n",
570858 ioread32(msc->reg_base + REG_MSU_MSC0NWSA));
571859
572860 reg = ioread32(msc->reg_base + REG_MSU_MSC0STS);
573861 dev_dbg(msc_dev(msc), "MSCnSTS: %08x\n", reg);
862
+
863
+ reg = ioread32(msc->reg_base + REG_MSU_MSUSTS);
864
+ reg &= msc->index ? MSUSTS_MSC1BLAST : MSUSTS_MSC0BLAST;
865
+ iowrite32(reg, msc->reg_base + REG_MSU_MSUSTS);
574866 }
575867
576868 static int intel_th_msc_activate(struct intel_th_device *thdev)
....@@ -698,6 +990,79 @@
698990 return virt_to_page(msc->base + (pgoff << PAGE_SHIFT));
699991 }
700992
993
+static int __msc_buffer_win_alloc(struct msc_window *win,
994
+ unsigned int nr_segs)
995
+{
996
+ struct scatterlist *sg_ptr;
997
+ void *block;
998
+ int i, ret;
999
+
1000
+ ret = sg_alloc_table(win->sgt, nr_segs, GFP_KERNEL);
1001
+ if (ret)
1002
+ return -ENOMEM;
1003
+
1004
+ for_each_sg(win->sgt->sgl, sg_ptr, nr_segs, i) {
1005
+ block = dma_alloc_coherent(msc_dev(win->msc)->parent->parent,
1006
+ PAGE_SIZE, &sg_dma_address(sg_ptr),
1007
+ GFP_KERNEL);
1008
+ if (!block)
1009
+ goto err_nomem;
1010
+
1011
+ sg_set_buf(sg_ptr, block, PAGE_SIZE);
1012
+ }
1013
+
1014
+ return nr_segs;
1015
+
1016
+err_nomem:
1017
+ for_each_sg(win->sgt->sgl, sg_ptr, i, ret)
1018
+ dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE,
1019
+ sg_virt(sg_ptr), sg_dma_address(sg_ptr));
1020
+
1021
+ sg_free_table(win->sgt);
1022
+
1023
+ return -ENOMEM;
1024
+}
1025
+
1026
+#ifdef CONFIG_X86
1027
+static void msc_buffer_set_uc(struct msc_window *win, unsigned int nr_segs)
1028
+{
1029
+ struct scatterlist *sg_ptr;
1030
+ int i;
1031
+
1032
+ for_each_sg(win->sgt->sgl, sg_ptr, nr_segs, i) {
1033
+ /* Set the page as uncached */
1034
+ set_memory_uc((unsigned long)sg_virt(sg_ptr),
1035
+ PFN_DOWN(sg_ptr->length));
1036
+ }
1037
+}
1038
+
1039
+static void msc_buffer_set_wb(struct msc_window *win)
1040
+{
1041
+ struct scatterlist *sg_ptr;
1042
+ int i;
1043
+
1044
+ for_each_sg(win->sgt->sgl, sg_ptr, win->nr_segs, i) {
1045
+ /* Reset the page to write-back */
1046
+ set_memory_wb((unsigned long)sg_virt(sg_ptr),
1047
+ PFN_DOWN(sg_ptr->length));
1048
+ }
1049
+}
1050
+#else /* !X86 */
1051
+static inline void
1052
+msc_buffer_set_uc(struct msc_window *win, unsigned int nr_segs) {}
1053
+static inline void msc_buffer_set_wb(struct msc_window *win) {}
1054
+#endif /* CONFIG_X86 */
1055
+
1056
+static struct page *msc_sg_page(struct scatterlist *sg)
1057
+{
1058
+ void *addr = sg_virt(sg);
1059
+
1060
+ if (is_vmalloc_addr(addr))
1061
+ return vmalloc_to_page(addr);
1062
+
1063
+ return sg_page(sg);
1064
+}
1065
+
7011066 /**
7021067 * msc_buffer_win_alloc() - alloc a window for a multiblock mode
7031068 * @msc: MSC device
....@@ -711,44 +1076,46 @@
7111076 static int msc_buffer_win_alloc(struct msc *msc, unsigned int nr_blocks)
7121077 {
7131078 struct msc_window *win;
714
- unsigned long size = PAGE_SIZE;
715
- int i, ret = -ENOMEM;
1079
+ int ret = -ENOMEM;
7161080
7171081 if (!nr_blocks)
7181082 return 0;
7191083
720
- win = kzalloc(offsetof(struct msc_window, block[nr_blocks]),
721
- GFP_KERNEL);
1084
+ win = kzalloc(sizeof(*win), GFP_KERNEL);
7221085 if (!win)
7231086 return -ENOMEM;
7241087
1088
+ win->msc = msc;
1089
+ win->sgt = &win->_sgt;
1090
+ win->lockout = WIN_READY;
1091
+ spin_lock_init(&win->lo_lock);
1092
+
7251093 if (!list_empty(&msc->win_list)) {
726
- struct msc_window *prev = list_entry(msc->win_list.prev,
727
- struct msc_window, entry);
1094
+ struct msc_window *prev = list_last_entry(&msc->win_list,
1095
+ struct msc_window,
1096
+ entry);
7281097
7291098 win->pgoff = prev->pgoff + prev->nr_blocks;
7301099 }
7311100
732
- for (i = 0; i < nr_blocks; i++) {
733
- win->block[i].bdesc =
734
- dma_alloc_coherent(msc_dev(msc)->parent->parent, size,
735
- &win->block[i].addr, GFP_KERNEL);
1101
+ if (msc->mbuf && msc->mbuf->alloc_window)
1102
+ ret = msc->mbuf->alloc_window(msc->mbuf_priv, &win->sgt,
1103
+ nr_blocks << PAGE_SHIFT);
1104
+ else
1105
+ ret = __msc_buffer_win_alloc(win, nr_blocks);
7361106
737
- if (!win->block[i].bdesc)
738
- goto err_nomem;
1107
+ if (ret <= 0)
1108
+ goto err_nomem;
7391109
740
-#ifdef CONFIG_X86
741
- /* Set the page as uncached */
742
- set_memory_uc((unsigned long)win->block[i].bdesc, 1);
743
-#endif
744
- }
1110
+ msc_buffer_set_uc(win, ret);
7451111
746
- win->msc = msc;
1112
+ win->nr_segs = ret;
7471113 win->nr_blocks = nr_blocks;
7481114
7491115 if (list_empty(&msc->win_list)) {
750
- msc->base = win->block[0].bdesc;
751
- msc->base_addr = win->block[0].addr;
1116
+ msc->base = msc_win_base(win);
1117
+ msc->base_addr = msc_win_base_dma(win);
1118
+ msc->cur_win = win;
7521119 }
7531120
7541121 list_add_tail(&win->entry, &msc->win_list);
....@@ -757,17 +1124,24 @@
7571124 return 0;
7581125
7591126 err_nomem:
760
- for (i--; i >= 0; i--) {
761
-#ifdef CONFIG_X86
762
- /* Reset the page to write-back before releasing */
763
- set_memory_wb((unsigned long)win->block[i].bdesc, 1);
764
-#endif
765
- dma_free_coherent(msc_dev(msc)->parent->parent, size,
766
- win->block[i].bdesc, win->block[i].addr);
767
- }
7681127 kfree(win);
7691128
7701129 return ret;
1130
+}
1131
+
1132
+static void __msc_buffer_win_free(struct msc *msc, struct msc_window *win)
1133
+{
1134
+ struct scatterlist *sg;
1135
+ int i;
1136
+
1137
+ for_each_sg(win->sgt->sgl, sg, win->nr_segs, i) {
1138
+ struct page *page = msc_sg_page(sg);
1139
+
1140
+ page->mapping = NULL;
1141
+ dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE,
1142
+ sg_virt(sg), sg_dma_address(sg));
1143
+ }
1144
+ sg_free_table(win->sgt);
7711145 }
7721146
7731147 /**
....@@ -780,8 +1154,6 @@
7801154 */
7811155 static void msc_buffer_win_free(struct msc *msc, struct msc_window *win)
7821156 {
783
- int i;
784
-
7851157 msc->nr_pages -= win->nr_blocks;
7861158
7871159 list_del(&win->entry);
....@@ -790,17 +1162,12 @@
7901162 msc->base_addr = 0;
7911163 }
7921164
793
- for (i = 0; i < win->nr_blocks; i++) {
794
- struct page *page = virt_to_page(win->block[i].bdesc);
1165
+ msc_buffer_set_wb(win);
7951166
796
- page->mapping = NULL;
797
-#ifdef CONFIG_X86
798
- /* Reset the page to write-back before releasing */
799
- set_memory_wb((unsigned long)win->block[i].bdesc, 1);
800
-#endif
801
- dma_free_coherent(msc_dev(win->msc)->parent->parent, PAGE_SIZE,
802
- win->block[i].bdesc, win->block[i].addr);
803
- }
1167
+ if (msc->mbuf && msc->mbuf->free_window)
1168
+ msc->mbuf->free_window(msc->mbuf_priv, win->sgt);
1169
+ else
1170
+ __msc_buffer_win_free(msc, win);
8041171
8051172 kfree(win);
8061173 }
....@@ -818,6 +1185,7 @@
8181185
8191186 /* call with msc::mutex locked */
8201187 list_for_each_entry(win, &msc->win_list, entry) {
1188
+ struct scatterlist *sg;
8211189 unsigned int blk;
8221190 u32 sw_tag = 0;
8231191
....@@ -827,35 +1195,34 @@
8271195 */
8281196 if (msc_is_last_win(win)) {
8291197 sw_tag |= MSC_SW_TAG_LASTWIN;
830
- next_win = list_entry(msc->win_list.next,
831
- struct msc_window, entry);
1198
+ next_win = list_first_entry(&msc->win_list,
1199
+ struct msc_window, entry);
8321200 } else {
833
- next_win = list_entry(win->entry.next,
834
- struct msc_window, entry);
1201
+ next_win = list_next_entry(win, entry);
8351202 }
8361203
837
- for (blk = 0; blk < win->nr_blocks; blk++) {
838
- struct msc_block_desc *bdesc = win->block[blk].bdesc;
1204
+ for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
1205
+ struct msc_block_desc *bdesc = sg_virt(sg);
8391206
8401207 memset(bdesc, 0, sizeof(*bdesc));
8411208
842
- bdesc->next_win = next_win->block[0].addr >> PAGE_SHIFT;
1209
+ bdesc->next_win = msc_win_base_pfn(next_win);
8431210
8441211 /*
8451212 * Similarly to last window, last block should point
8461213 * to the first one.
8471214 */
848
- if (blk == win->nr_blocks - 1) {
1215
+ if (blk == win->nr_segs - 1) {
8491216 sw_tag |= MSC_SW_TAG_LASTBLK;
850
- bdesc->next_blk =
851
- win->block[0].addr >> PAGE_SHIFT;
1217
+ bdesc->next_blk = msc_win_base_pfn(win);
8521218 } else {
853
- bdesc->next_blk =
854
- win->block[blk + 1].addr >> PAGE_SHIFT;
1219
+ dma_addr_t addr = sg_dma_address(sg_next(sg));
1220
+
1221
+ bdesc->next_blk = PFN_DOWN(addr);
8551222 }
8561223
8571224 bdesc->sw_tag = sw_tag;
858
- bdesc->block_sz = PAGE_SIZE / 64;
1225
+ bdesc->block_sz = sg->length / 64;
8591226 }
8601227 }
8611228
....@@ -1014,6 +1381,8 @@
10141381 static struct page *msc_buffer_get_page(struct msc *msc, unsigned long pgoff)
10151382 {
10161383 struct msc_window *win;
1384
+ struct scatterlist *sg;
1385
+ unsigned int blk;
10171386
10181387 if (msc->mode == MSC_MODE_SINGLE)
10191388 return msc_buffer_contig_get_page(msc, pgoff);
....@@ -1026,7 +1395,18 @@
10261395
10271396 found:
10281397 pgoff -= win->pgoff;
1029
- return virt_to_page(win->block[pgoff].bdesc);
1398
+
1399
+ for_each_sg(win->sgt->sgl, sg, win->nr_segs, blk) {
1400
+ struct page *page = msc_sg_page(sg);
1401
+ size_t pgsz = PFN_DOWN(sg->length);
1402
+
1403
+ if (pgoff < pgsz)
1404
+ return page + pgoff;
1405
+
1406
+ pgoff -= pgsz;
1407
+ }
1408
+
1409
+ return NULL;
10301410 }
10311411
10321412 /**
....@@ -1279,11 +1659,27 @@
12791659 .owner = THIS_MODULE,
12801660 };
12811661
1662
+static void intel_th_msc_wait_empty(struct intel_th_device *thdev)
1663
+{
1664
+ struct msc *msc = dev_get_drvdata(&thdev->dev);
1665
+ unsigned long count;
1666
+ u32 reg;
1667
+
1668
+ for (reg = 0, count = MSC_PLE_WAITLOOP_DEPTH;
1669
+ count && !(reg & MSCSTS_PLE); count--) {
1670
+ reg = __raw_readl(msc->reg_base + REG_MSU_MSC0STS);
1671
+ cpu_relax();
1672
+ }
1673
+
1674
+ if (!count)
1675
+ dev_dbg(msc_dev(msc), "timeout waiting for MSC0 PLE\n");
1676
+}
1677
+
12821678 static int intel_th_msc_init(struct msc *msc)
12831679 {
12841680 atomic_set(&msc->user_count, -1);
12851681
1286
- msc->mode = MSC_MODE_MULTI;
1682
+ msc->mode = msc->multi_is_broken ? MSC_MODE_SINGLE : MSC_MODE_MULTI;
12871683 mutex_init(&msc->buf_mutex);
12881684 INIT_LIST_HEAD(&msc->win_list);
12891685 INIT_LIST_HEAD(&msc->iter_list);
....@@ -1293,6 +1689,110 @@
12931689 __ffs(MSC_LEN);
12941690
12951691 return 0;
1692
+}
1693
+
1694
+static int msc_win_switch(struct msc *msc)
1695
+{
1696
+ struct msc_window *first;
1697
+
1698
+ if (list_empty(&msc->win_list))
1699
+ return -EINVAL;
1700
+
1701
+ first = list_first_entry(&msc->win_list, struct msc_window, entry);
1702
+
1703
+ if (msc_is_last_win(msc->cur_win))
1704
+ msc->cur_win = first;
1705
+ else
1706
+ msc->cur_win = list_next_entry(msc->cur_win, entry);
1707
+
1708
+ msc->base = msc_win_base(msc->cur_win);
1709
+ msc->base_addr = msc_win_base_dma(msc->cur_win);
1710
+
1711
+ intel_th_trace_switch(msc->thdev);
1712
+
1713
+ return 0;
1714
+}
1715
+
1716
+/**
1717
+ * intel_th_msc_window_unlock - put the window back in rotation
1718
+ * @dev: MSC device to which this relates
1719
+ * @sgt: buffer's sg_table for the window, does nothing if NULL
1720
+ */
1721
+void intel_th_msc_window_unlock(struct device *dev, struct sg_table *sgt)
1722
+{
1723
+ struct msc *msc = dev_get_drvdata(dev);
1724
+ struct msc_window *win;
1725
+
1726
+ if (!sgt)
1727
+ return;
1728
+
1729
+ win = msc_find_window(msc, sgt, false);
1730
+ if (!win)
1731
+ return;
1732
+
1733
+ msc_win_set_lockout(win, WIN_LOCKED, WIN_READY);
1734
+ if (msc->switch_on_unlock == win) {
1735
+ msc->switch_on_unlock = NULL;
1736
+ msc_win_switch(msc);
1737
+ }
1738
+}
1739
+EXPORT_SYMBOL_GPL(intel_th_msc_window_unlock);
1740
+
1741
+static void msc_work(struct work_struct *work)
1742
+{
1743
+ struct msc *msc = container_of(work, struct msc, work);
1744
+
1745
+ intel_th_msc_deactivate(msc->thdev);
1746
+}
1747
+
1748
+static irqreturn_t intel_th_msc_interrupt(struct intel_th_device *thdev)
1749
+{
1750
+ struct msc *msc = dev_get_drvdata(&thdev->dev);
1751
+ u32 msusts = ioread32(msc->msu_base + REG_MSU_MSUSTS);
1752
+ u32 mask = msc->index ? MSUSTS_MSC1BLAST : MSUSTS_MSC0BLAST;
1753
+ struct msc_window *win, *next_win;
1754
+
1755
+ if (!msc->do_irq || !msc->mbuf)
1756
+ return IRQ_NONE;
1757
+
1758
+ msusts &= mask;
1759
+
1760
+ if (!msusts)
1761
+ return msc->enabled ? IRQ_HANDLED : IRQ_NONE;
1762
+
1763
+ iowrite32(msusts, msc->msu_base + REG_MSU_MSUSTS);
1764
+
1765
+ if (!msc->enabled)
1766
+ return IRQ_NONE;
1767
+
1768
+ /* grab the window before we do the switch */
1769
+ win = msc->cur_win;
1770
+ if (!win)
1771
+ return IRQ_HANDLED;
1772
+ next_win = msc_next_window(win);
1773
+ if (!next_win)
1774
+ return IRQ_HANDLED;
1775
+
1776
+ /* next window: if READY, proceed, if LOCKED, stop the trace */
1777
+ if (msc_win_set_lockout(next_win, WIN_READY, WIN_INUSE)) {
1778
+ if (msc->stop_on_full)
1779
+ schedule_work(&msc->work);
1780
+ else
1781
+ msc->switch_on_unlock = next_win;
1782
+
1783
+ return IRQ_HANDLED;
1784
+ }
1785
+
1786
+ /* current window: INUSE -> LOCKED */
1787
+ msc_win_set_lockout(win, WIN_INUSE, WIN_LOCKED);
1788
+
1789
+ msc_win_switch(msc);
1790
+
1791
+ if (msc->mbuf && msc->mbuf->ready)
1792
+ msc->mbuf->ready(msc->mbuf_priv, win->sgt,
1793
+ msc_win_total_sz(win));
1794
+
1795
+ return IRQ_HANDLED;
12961796 }
12971797
12981798 static const char * const msc_mode[] = {
....@@ -1329,21 +1829,43 @@
13291829
13301830 static DEVICE_ATTR_RW(wrap);
13311831
1832
+static void msc_buffer_unassign(struct msc *msc)
1833
+{
1834
+ lockdep_assert_held(&msc->buf_mutex);
1835
+
1836
+ if (!msc->mbuf)
1837
+ return;
1838
+
1839
+ msc->mbuf->unassign(msc->mbuf_priv);
1840
+ msu_buffer_put(msc->mbuf);
1841
+ msc->mbuf_priv = NULL;
1842
+ msc->mbuf = NULL;
1843
+}
1844
+
13321845 static ssize_t
13331846 mode_show(struct device *dev, struct device_attribute *attr, char *buf)
13341847 {
13351848 struct msc *msc = dev_get_drvdata(dev);
1849
+ const char *mode = msc_mode[msc->mode];
1850
+ ssize_t ret;
13361851
1337
- return scnprintf(buf, PAGE_SIZE, "%s\n", msc_mode[msc->mode]);
1852
+ mutex_lock(&msc->buf_mutex);
1853
+ if (msc->mbuf)
1854
+ mode = msc->mbuf->name;
1855
+ ret = scnprintf(buf, PAGE_SIZE, "%s\n", mode);
1856
+ mutex_unlock(&msc->buf_mutex);
1857
+
1858
+ return ret;
13381859 }
13391860
13401861 static ssize_t
13411862 mode_store(struct device *dev, struct device_attribute *attr, const char *buf,
13421863 size_t size)
13431864 {
1865
+ const struct msu_buffer *mbuf = NULL;
13441866 struct msc *msc = dev_get_drvdata(dev);
13451867 size_t len = size;
1346
- char *cp;
1868
+ char *cp, *mode;
13471869 int i, ret;
13481870
13491871 if (!capable(CAP_SYS_RAWIO))
....@@ -1353,17 +1875,67 @@
13531875 if (cp)
13541876 len = cp - buf;
13551877
1356
- for (i = 0; i < ARRAY_SIZE(msc_mode); i++)
1357
- if (!strncmp(msc_mode[i], buf, len))
1358
- goto found;
1878
+ mode = kstrndup(buf, len, GFP_KERNEL);
1879
+ if (!mode)
1880
+ return -ENOMEM;
1881
+
1882
+ i = match_string(msc_mode, ARRAY_SIZE(msc_mode), mode);
1883
+ if (i >= 0) {
1884
+ kfree(mode);
1885
+ goto found;
1886
+ }
1887
+
1888
+ /* Buffer sinks only work with a usable IRQ */
1889
+ if (!msc->do_irq) {
1890
+ kfree(mode);
1891
+ return -EINVAL;
1892
+ }
1893
+
1894
+ mbuf = msu_buffer_get(mode);
1895
+ kfree(mode);
1896
+ if (mbuf)
1897
+ goto found;
13591898
13601899 return -EINVAL;
13611900
13621901 found:
1902
+ if (i == MSC_MODE_MULTI && msc->multi_is_broken)
1903
+ return -EOPNOTSUPP;
1904
+
13631905 mutex_lock(&msc->buf_mutex);
1906
+ ret = 0;
1907
+
1908
+ /* Same buffer: do nothing */
1909
+ if (mbuf && mbuf == msc->mbuf) {
1910
+ /* put the extra reference we just got */
1911
+ msu_buffer_put(mbuf);
1912
+ goto unlock;
1913
+ }
1914
+
13641915 ret = msc_buffer_unlocked_free_unless_used(msc);
1365
- if (!ret)
1366
- msc->mode = i;
1916
+ if (ret)
1917
+ goto unlock;
1918
+
1919
+ if (mbuf) {
1920
+ void *mbuf_priv = mbuf->assign(dev, &i);
1921
+
1922
+ if (!mbuf_priv) {
1923
+ ret = -ENOMEM;
1924
+ goto unlock;
1925
+ }
1926
+
1927
+ msc_buffer_unassign(msc);
1928
+ msc->mbuf_priv = mbuf_priv;
1929
+ msc->mbuf = mbuf;
1930
+ } else {
1931
+ msc_buffer_unassign(msc);
1932
+ }
1933
+
1934
+ msc->mode = i;
1935
+
1936
+unlock:
1937
+ if (ret && mbuf)
1938
+ msu_buffer_put(mbuf);
13671939 mutex_unlock(&msc->buf_mutex);
13681940
13691941 return ret ? ret : size;
....@@ -1469,10 +2041,67 @@
14692041
14702042 static DEVICE_ATTR_RW(nr_pages);
14712043
2044
+static ssize_t
2045
+win_switch_store(struct device *dev, struct device_attribute *attr,
2046
+ const char *buf, size_t size)
2047
+{
2048
+ struct msc *msc = dev_get_drvdata(dev);
2049
+ unsigned long val;
2050
+ int ret;
2051
+
2052
+ ret = kstrtoul(buf, 10, &val);
2053
+ if (ret)
2054
+ return ret;
2055
+
2056
+ if (val != 1)
2057
+ return -EINVAL;
2058
+
2059
+ ret = -EINVAL;
2060
+ mutex_lock(&msc->buf_mutex);
2061
+ /*
2062
+ * Window switch can only happen in the "multi" mode.
2063
+ * If a external buffer is engaged, they have the full
2064
+ * control over window switching.
2065
+ */
2066
+ if (msc->mode == MSC_MODE_MULTI && !msc->mbuf)
2067
+ ret = msc_win_switch(msc);
2068
+ mutex_unlock(&msc->buf_mutex);
2069
+
2070
+ return ret ? ret : size;
2071
+}
2072
+
2073
+static DEVICE_ATTR_WO(win_switch);
2074
+
2075
+static ssize_t stop_on_full_show(struct device *dev,
2076
+ struct device_attribute *attr, char *buf)
2077
+{
2078
+ struct msc *msc = dev_get_drvdata(dev);
2079
+
2080
+ return sprintf(buf, "%d\n", msc->stop_on_full);
2081
+}
2082
+
2083
+static ssize_t stop_on_full_store(struct device *dev,
2084
+ struct device_attribute *attr,
2085
+ const char *buf, size_t size)
2086
+{
2087
+ struct msc *msc = dev_get_drvdata(dev);
2088
+ int ret;
2089
+
2090
+ ret = kstrtobool(buf, &msc->stop_on_full);
2091
+ if (ret)
2092
+ return ret;
2093
+
2094
+ return size;
2095
+}
2096
+
2097
+static DEVICE_ATTR_RW(stop_on_full);
2098
+
14722099 static struct attribute *msc_output_attrs[] = {
14732100 &dev_attr_wrap.attr,
14742101 &dev_attr_mode.attr,
14752102 &dev_attr_nr_pages.attr,
2103
+ &dev_attr_win_switch.attr,
2104
+ &dev_attr_stop_on_full.attr,
14762105 NULL,
14772106 };
14782107
....@@ -1500,11 +2129,20 @@
15002129 if (!msc)
15012130 return -ENOMEM;
15022131
2132
+ res = intel_th_device_get_resource(thdev, IORESOURCE_IRQ, 1);
2133
+ if (!res)
2134
+ msc->do_irq = 1;
2135
+
2136
+ if (INTEL_TH_CAP(to_intel_th(thdev), multi_is_broken))
2137
+ msc->multi_is_broken = 1;
2138
+
15032139 msc->index = thdev->id;
15042140
15052141 msc->thdev = thdev;
15062142 msc->reg_base = base + msc->index * 0x100;
2143
+ msc->msu_base = base;
15072144
2145
+ INIT_WORK(&msc->work, msc_work);
15082146 err = intel_th_msc_init(msc);
15092147 if (err)
15102148 return err;
....@@ -1533,6 +2171,8 @@
15332171 static struct intel_th_driver intel_th_msc_driver = {
15342172 .probe = intel_th_msc_probe,
15352173 .remove = intel_th_msc_remove,
2174
+ .irq = intel_th_msc_interrupt,
2175
+ .wait_empty = intel_th_msc_wait_empty,
15362176 .activate = intel_th_msc_activate,
15372177 .deactivate = intel_th_msc_deactivate,
15382178 .fops = &intel_th_msc_fops,