hc
2024-02-20 102a0743326a03cd1a1202ceda21e175b7d3575c
kernel/sound/firewire/amdtp-stream.c
....@@ -1,14 +1,15 @@
1
+// SPDX-License-Identifier: GPL-2.0-only
12 /*
23 * Audio and Music Data Transmission Protocol (IEC 61883-6) streams
34 * with Common Isochronous Packet (IEC 61883-1) headers
45 *
56 * Copyright (c) Clemens Ladisch <clemens@ladisch.de>
6
- * Licensed under the terms of the GNU General Public License, version 2.
77 */
88
99 #include <linux/device.h>
1010 #include <linux/err.h>
1111 #include <linux/firewire.h>
12
+#include <linux/firewire-constants.h>
1213 #include <linux/module.h>
1314 #include <linux/slab.h>
1415 #include <sound/pcm.h>
....@@ -18,6 +19,8 @@
1819 #define TICKS_PER_CYCLE 3072
1920 #define CYCLES_PER_SECOND 8000
2021 #define TICKS_PER_SECOND (TICKS_PER_CYCLE * CYCLES_PER_SECOND)
22
+
23
+#define OHCI_MAX_SECOND 8
2124
2225 /* Always support Linux tracing subsystem. */
2326 #define CREATE_TRACE_POINTS
....@@ -52,14 +55,16 @@
5255 #define CIP_FMT_AM 0x10
5356 #define AMDTP_FDF_NO_DATA 0xff
5457
55
-/* TODO: make these configurable */
56
-#define INTERRUPT_INTERVAL 16
57
-#define QUEUE_LENGTH 48
58
+// For iso header, tstamp and 2 CIP header.
59
+#define IR_CTX_HEADER_SIZE_CIP 16
60
+// For iso header and tstamp.
61
+#define IR_CTX_HEADER_SIZE_NO_CIP 8
62
+#define HEADER_TSTAMP_MASK 0x0000ffff
5863
59
-#define IN_PACKET_HEADER_SIZE 4
60
-#define OUT_PACKET_HEADER_SIZE 0
64
+#define IT_PKT_HEADER_SIZE_CIP 8 // For 2 CIP header.
65
+#define IT_PKT_HEADER_SIZE_NO_CIP 0 // Nothing.
6166
62
-static void pcm_period_tasklet(unsigned long data);
67
+static void pcm_period_work(struct work_struct *work);
6368
6469 /**
6570 * amdtp_stream_init - initialize an AMDTP stream structure
....@@ -68,16 +73,16 @@
6873 * @dir: the direction of stream
6974 * @flags: the packet transmission method to use
7075 * @fmt: the value of fmt field in CIP header
71
- * @process_data_blocks: callback handler to process data blocks
76
+ * @process_ctx_payloads: callback handler to process payloads of isoc context
7277 * @protocol_size: the size to allocate newly for protocol
7378 */
7479 int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit,
7580 enum amdtp_stream_direction dir, enum cip_flags flags,
7681 unsigned int fmt,
77
- amdtp_stream_process_data_blocks_t process_data_blocks,
82
+ amdtp_stream_process_ctx_payloads_t process_ctx_payloads,
7883 unsigned int protocol_size)
7984 {
80
- if (process_data_blocks == NULL)
85
+ if (process_ctx_payloads == NULL)
8186 return -EINVAL;
8287
8388 s->protocol = kzalloc(protocol_size, GFP_KERNEL);
....@@ -89,14 +94,17 @@
8994 s->flags = flags;
9095 s->context = ERR_PTR(-1);
9196 mutex_init(&s->mutex);
92
- tasklet_init(&s->period_tasklet, pcm_period_tasklet, (unsigned long)s);
97
+ INIT_WORK(&s->period_work, pcm_period_work);
9398 s->packet_index = 0;
9499
95100 init_waitqueue_head(&s->callback_wait);
96101 s->callbacked = false;
97102
98103 s->fmt = fmt;
99
- s->process_data_blocks = process_data_blocks;
104
+ s->process_ctx_payloads = process_ctx_payloads;
105
+
106
+ if (dir == AMDTP_OUT_STREAM)
107
+ s->ctx_data.rx.syt_override = -1;
100108
101109 return 0;
102110 }
....@@ -140,6 +148,28 @@
140148 };
141149 EXPORT_SYMBOL(amdtp_rate_table);
142150
151
+static int apply_constraint_to_size(struct snd_pcm_hw_params *params,
152
+ struct snd_pcm_hw_rule *rule)
153
+{
154
+ struct snd_interval *s = hw_param_interval(params, rule->var);
155
+ const struct snd_interval *r =
156
+ hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_RATE);
157
+ struct snd_interval t = {0};
158
+ unsigned int step = 0;
159
+ int i;
160
+
161
+ for (i = 0; i < CIP_SFC_COUNT; ++i) {
162
+ if (snd_interval_test(r, amdtp_rate_table[i]))
163
+ step = max(step, amdtp_syt_intervals[i]);
164
+ }
165
+
166
+ t.min = roundup(s->min, step);
167
+ t.max = rounddown(s->max, step);
168
+ t.integer = 1;
169
+
170
+ return snd_interval_refine(s, &t);
171
+}
172
+
143173 /**
144174 * amdtp_stream_add_pcm_hw_constraints - add hw constraints for PCM substream
145175 * @s: the AMDTP stream, which must be initialized.
....@@ -149,6 +179,8 @@
149179 struct snd_pcm_runtime *runtime)
150180 {
151181 struct snd_pcm_hardware *hw = &runtime->hw;
182
+ unsigned int ctx_header_size;
183
+ unsigned int maximum_usec_per_period;
152184 int err;
153185
154186 hw->info = SNDRV_PCM_INFO_BATCH |
....@@ -169,19 +201,36 @@
169201 hw->period_bytes_max = hw->period_bytes_min * 2048;
170202 hw->buffer_bytes_max = hw->period_bytes_max * hw->periods_min;
171203
172
- /*
173
- * Currently firewire-lib processes 16 packets in one software
174
- * interrupt callback. This equals to 2msec but actually the
175
- * interval of the interrupts has a jitter.
176
- * Additionally, even if adding a constraint to fit period size to
177
- * 2msec, actual calculated frames per period doesn't equal to 2msec,
178
- * depending on sampling rate.
179
- * Anyway, the interval to call snd_pcm_period_elapsed() cannot 2msec.
180
- * Here let us use 5msec for safe period interrupt.
181
- */
204
+ // Linux driver for 1394 OHCI controller voluntarily flushes isoc
205
+ // context when total size of accumulated context header reaches
206
+ // PAGE_SIZE. This kicks work for the isoc context and brings
207
+ // callback in the middle of scheduled interrupts.
208
+ // Although AMDTP streams in the same domain use the same events per
209
+ // IRQ, use the largest size of context header between IT/IR contexts.
210
+ // Here, use the value of context header in IR context is for both
211
+ // contexts.
212
+ if (!(s->flags & CIP_NO_HEADER))
213
+ ctx_header_size = IR_CTX_HEADER_SIZE_CIP;
214
+ else
215
+ ctx_header_size = IR_CTX_HEADER_SIZE_NO_CIP;
216
+ maximum_usec_per_period = USEC_PER_SEC * PAGE_SIZE /
217
+ CYCLES_PER_SECOND / ctx_header_size;
218
+
219
+ // In IEC 61883-6, one isoc packet can transfer events up to the value
220
+ // of syt interval. This comes from the interval of isoc cycle. As 1394
221
+ // OHCI controller can generate hardware IRQ per isoc packet, the
222
+ // interval is 125 usec.
223
+ // However, there are two ways of transmission in IEC 61883-6; blocking
224
+ // and non-blocking modes. In blocking mode, the sequence of isoc packet
225
+ // includes 'empty' or 'NODATA' packets which include no event. In
226
+ // non-blocking mode, the number of events per packet is variable up to
227
+ // the syt interval.
228
+ // Due to the above protocol design, the minimum PCM frames per
229
+ // interrupt should be double of the value of syt interval, thus it is
230
+ // 250 usec.
182231 err = snd_pcm_hw_constraint_minmax(runtime,
183232 SNDRV_PCM_HW_PARAM_PERIOD_TIME,
184
- 5000, UINT_MAX);
233
+ 250, maximum_usec_per_period);
185234 if (err < 0)
186235 goto end;
187236
....@@ -194,16 +243,19 @@
194243 * number equals to SYT_INTERVAL. So the number is 8, 16 or 32,
195244 * depending on its sampling rate. For accurate period interrupt, it's
196245 * preferrable to align period/buffer sizes to current SYT_INTERVAL.
197
- *
198
- * TODO: These constraints can be improved with proper rules.
199
- * Currently apply LCM of SYT_INTERVALs.
200246 */
201
- err = snd_pcm_hw_constraint_step(runtime, 0,
202
- SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 32);
247
+ err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
248
+ apply_constraint_to_size, NULL,
249
+ SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
250
+ SNDRV_PCM_HW_PARAM_RATE, -1);
203251 if (err < 0)
204252 goto end;
205
- err = snd_pcm_hw_constraint_step(runtime, 0,
206
- SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 32);
253
+ err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
254
+ apply_constraint_to_size, NULL,
255
+ SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
256
+ SNDRV_PCM_HW_PARAM_RATE, -1);
257
+ if (err < 0)
258
+ goto end;
207259 end:
208260 return err;
209261 }
....@@ -234,11 +286,18 @@
234286 s->data_block_quadlets = data_block_quadlets;
235287 s->syt_interval = amdtp_syt_intervals[sfc];
236288
237
- /* default buffering in the device */
238
- s->transfer_delay = TRANSFER_DELAY_TICKS - TICKS_PER_CYCLE;
239
- if (s->flags & CIP_BLOCKING)
240
- /* additional buffering needed to adjust for no-data packets */
241
- s->transfer_delay += TICKS_PER_SECOND * s->syt_interval / rate;
289
+ // default buffering in the device.
290
+ if (s->direction == AMDTP_OUT_STREAM) {
291
+ s->ctx_data.rx.transfer_delay =
292
+ TRANSFER_DELAY_TICKS - TICKS_PER_CYCLE;
293
+
294
+ if (s->flags & CIP_BLOCKING) {
295
+ // additional buffering needed to adjust for no-data
296
+ // packets.
297
+ s->ctx_data.rx.transfer_delay +=
298
+ TICKS_PER_SECOND * s->syt_interval / rate;
299
+ }
300
+ }
242301
243302 return 0;
244303 }
....@@ -254,15 +313,15 @@
254313 unsigned int amdtp_stream_get_max_payload(struct amdtp_stream *s)
255314 {
256315 unsigned int multiplier = 1;
257
- unsigned int header_size = 0;
316
+ unsigned int cip_header_size = 0;
258317
259318 if (s->flags & CIP_JUMBO_PAYLOAD)
260319 multiplier = 5;
261320 if (!(s->flags & CIP_NO_HEADER))
262
- header_size = 8;
321
+ cip_header_size = sizeof(__be32) * 2;
263322
264
- return header_size +
265
- s->syt_interval * s->data_block_quadlets * 4 * multiplier;
323
+ return cip_header_size +
324
+ s->syt_interval * s->data_block_quadlets * sizeof(__be32) * multiplier;
266325 }
267326 EXPORT_SYMBOL(amdtp_stream_get_max_payload);
268327
....@@ -274,31 +333,32 @@
274333 */
275334 void amdtp_stream_pcm_prepare(struct amdtp_stream *s)
276335 {
277
- tasklet_kill(&s->period_tasklet);
336
+ cancel_work_sync(&s->period_work);
278337 s->pcm_buffer_pointer = 0;
279338 s->pcm_period_pointer = 0;
280339 }
281340 EXPORT_SYMBOL(amdtp_stream_pcm_prepare);
282341
283
-static unsigned int calculate_data_blocks(struct amdtp_stream *s,
284
- unsigned int syt)
342
+static unsigned int calculate_data_blocks(unsigned int *data_block_state,
343
+ bool is_blocking, bool is_no_info,
344
+ unsigned int syt_interval, enum cip_sfc sfc)
285345 {
286
- unsigned int phase, data_blocks;
346
+ unsigned int data_blocks;
287347
288348 /* Blocking mode. */
289
- if (s->flags & CIP_BLOCKING) {
349
+ if (is_blocking) {
290350 /* This module generate empty packet for 'no data'. */
291
- if (syt == CIP_SYT_NO_INFO)
351
+ if (is_no_info)
292352 data_blocks = 0;
293353 else
294
- data_blocks = s->syt_interval;
354
+ data_blocks = syt_interval;
295355 /* Non-blocking mode. */
296356 } else {
297
- if (!cip_sfc_is_base_44100(s->sfc)) {
298
- /* Sample_rate / 8000 is an integer, and precomputed. */
299
- data_blocks = s->data_block_state;
357
+ if (!cip_sfc_is_base_44100(sfc)) {
358
+ // Sample_rate / 8000 is an integer, and precomputed.
359
+ data_blocks = *data_block_state;
300360 } else {
301
- phase = s->data_block_state;
361
+ unsigned int phase = *data_block_state;
302362
303363 /*
304364 * This calculates the number of data blocks per packet so that
....@@ -308,30 +368,30 @@
308368 * as possible in the sequence (to prevent underruns of the
309369 * device's buffer).
310370 */
311
- if (s->sfc == CIP_SFC_44100)
371
+ if (sfc == CIP_SFC_44100)
312372 /* 6 6 5 6 5 6 5 ... */
313373 data_blocks = 5 + ((phase & 1) ^
314374 (phase == 0 || phase >= 40));
315375 else
316376 /* 12 11 11 11 11 ... or 23 22 22 22 22 ... */
317
- data_blocks = 11 * (s->sfc >> 1) + (phase == 0);
318
- if (++phase >= (80 >> (s->sfc >> 1)))
377
+ data_blocks = 11 * (sfc >> 1) + (phase == 0);
378
+ if (++phase >= (80 >> (sfc >> 1)))
319379 phase = 0;
320
- s->data_block_state = phase;
380
+ *data_block_state = phase;
321381 }
322382 }
323383
324384 return data_blocks;
325385 }
326386
327
-static unsigned int calculate_syt(struct amdtp_stream *s,
328
- unsigned int cycle)
387
+static unsigned int calculate_syt_offset(unsigned int *last_syt_offset,
388
+ unsigned int *syt_offset_state, enum cip_sfc sfc)
329389 {
330
- unsigned int syt_offset, phase, index, syt;
390
+ unsigned int syt_offset;
331391
332
- if (s->last_syt_offset < TICKS_PER_CYCLE) {
333
- if (!cip_sfc_is_base_44100(s->sfc))
334
- syt_offset = s->last_syt_offset + s->syt_offset_state;
392
+ if (*last_syt_offset < TICKS_PER_CYCLE) {
393
+ if (!cip_sfc_is_base_44100(sfc))
394
+ syt_offset = *last_syt_offset + *syt_offset_state;
335395 else {
336396 /*
337397 * The time, in ticks, of the n'th SYT_INTERVAL sample is:
....@@ -343,28 +403,24 @@
343403 * 1386 1386 1387 1386 1386 1386 1387 1386 1386 1386 1387 ...
344404 * This code generates _exactly_ the same sequence.
345405 */
346
- phase = s->syt_offset_state;
347
- index = phase % 13;
348
- syt_offset = s->last_syt_offset;
406
+ unsigned int phase = *syt_offset_state;
407
+ unsigned int index = phase % 13;
408
+
409
+ syt_offset = *last_syt_offset;
349410 syt_offset += 1386 + ((index && !(index & 3)) ||
350411 phase == 146);
351412 if (++phase >= 147)
352413 phase = 0;
353
- s->syt_offset_state = phase;
414
+ *syt_offset_state = phase;
354415 }
355416 } else
356
- syt_offset = s->last_syt_offset - TICKS_PER_CYCLE;
357
- s->last_syt_offset = syt_offset;
417
+ syt_offset = *last_syt_offset - TICKS_PER_CYCLE;
418
+ *last_syt_offset = syt_offset;
358419
359
- if (syt_offset < TICKS_PER_CYCLE) {
360
- syt_offset += s->transfer_delay;
361
- syt = (cycle + syt_offset / TICKS_PER_CYCLE) << 12;
362
- syt += syt_offset % TICKS_PER_CYCLE;
420
+ if (syt_offset >= TICKS_PER_CYCLE)
421
+ syt_offset = CIP_SYT_NO_INFO;
363422
364
- return syt & CIP_SYT_MASK;
365
- } else {
366
- return CIP_SYT_NO_INFO;
367
- }
423
+ return syt_offset;
368424 }
369425
370426 static void update_pcm_pointers(struct amdtp_stream *s,
....@@ -381,155 +437,112 @@
381437 s->pcm_period_pointer += frames;
382438 if (s->pcm_period_pointer >= pcm->runtime->period_size) {
383439 s->pcm_period_pointer -= pcm->runtime->period_size;
384
- tasklet_hi_schedule(&s->period_tasklet);
440
+ queue_work(system_highpri_wq, &s->period_work);
385441 }
386442 }
387443
388
-static void pcm_period_tasklet(unsigned long data)
444
+static void pcm_period_work(struct work_struct *work)
389445 {
390
- struct amdtp_stream *s = (void *)data;
446
+ struct amdtp_stream *s = container_of(work, struct amdtp_stream,
447
+ period_work);
391448 struct snd_pcm_substream *pcm = READ_ONCE(s->pcm);
392449
393450 if (pcm)
394451 snd_pcm_period_elapsed(pcm);
395452 }
396453
397
-static int queue_packet(struct amdtp_stream *s, unsigned int header_length,
398
- unsigned int payload_length)
454
+static int queue_packet(struct amdtp_stream *s, struct fw_iso_packet *params,
455
+ bool sched_irq)
399456 {
400
- struct fw_iso_packet p = {0};
401
- int err = 0;
457
+ int err;
402458
403
- if (IS_ERR(s->context))
404
- goto end;
459
+ params->interrupt = sched_irq;
460
+ params->tag = s->tag;
461
+ params->sy = 0;
405462
406
- p.interrupt = IS_ALIGNED(s->packet_index + 1, INTERRUPT_INTERVAL);
407
- p.tag = s->tag;
408
- p.header_length = header_length;
409
- if (payload_length > 0)
410
- p.payload_length = payload_length;
411
- else
412
- p.skip = true;
413
- err = fw_iso_context_queue(s->context, &p, &s->buffer.iso_buffer,
463
+ err = fw_iso_context_queue(s->context, params, &s->buffer.iso_buffer,
414464 s->buffer.packets[s->packet_index].offset);
415465 if (err < 0) {
416466 dev_err(&s->unit->device, "queueing error: %d\n", err);
417467 goto end;
418468 }
419469
420
- if (++s->packet_index >= QUEUE_LENGTH)
470
+ if (++s->packet_index >= s->queue_size)
421471 s->packet_index = 0;
422472 end:
423473 return err;
424474 }
425475
426476 static inline int queue_out_packet(struct amdtp_stream *s,
427
- unsigned int payload_length)
477
+ struct fw_iso_packet *params, bool sched_irq)
428478 {
429
- return queue_packet(s, OUT_PACKET_HEADER_SIZE, payload_length);
479
+ params->skip =
480
+ !!(params->header_length == 0 && params->payload_length == 0);
481
+ return queue_packet(s, params, sched_irq);
430482 }
431483
432
-static inline int queue_in_packet(struct amdtp_stream *s)
484
+static inline int queue_in_packet(struct amdtp_stream *s,
485
+ struct fw_iso_packet *params)
433486 {
434
- return queue_packet(s, IN_PACKET_HEADER_SIZE, s->max_payload_length);
487
+ // Queue one packet for IR context.
488
+ params->header_length = s->ctx_data.tx.ctx_header_size;
489
+ params->payload_length = s->ctx_data.tx.max_ctx_payload_length;
490
+ params->skip = false;
491
+ return queue_packet(s, params, false);
435492 }
436493
437
-static int handle_out_packet(struct amdtp_stream *s,
438
- unsigned int payload_length, unsigned int cycle,
439
- unsigned int index)
494
+static void generate_cip_header(struct amdtp_stream *s, __be32 cip_header[2],
495
+ unsigned int data_block_counter, unsigned int syt)
440496 {
441
- __be32 *buffer;
442
- unsigned int syt;
443
- unsigned int data_blocks;
444
- unsigned int pcm_frames;
445
- struct snd_pcm_substream *pcm;
446
-
447
- buffer = s->buffer.packets[s->packet_index].buffer;
448
- syt = calculate_syt(s, cycle);
449
- data_blocks = calculate_data_blocks(s, syt);
450
- pcm_frames = s->process_data_blocks(s, buffer + 2, data_blocks, &syt);
451
-
452
- if (s->flags & CIP_DBC_IS_END_EVENT)
453
- s->data_block_counter =
454
- (s->data_block_counter + data_blocks) & 0xff;
455
-
456
- buffer[0] = cpu_to_be32(READ_ONCE(s->source_node_id_field) |
497
+ cip_header[0] = cpu_to_be32(READ_ONCE(s->source_node_id_field) |
457498 (s->data_block_quadlets << CIP_DBS_SHIFT) |
458499 ((s->sph << CIP_SPH_SHIFT) & CIP_SPH_MASK) |
459
- s->data_block_counter);
460
- buffer[1] = cpu_to_be32(CIP_EOH |
461
- ((s->fmt << CIP_FMT_SHIFT) & CIP_FMT_MASK) |
462
- ((s->fdf << CIP_FDF_SHIFT) & CIP_FDF_MASK) |
463
- (syt & CIP_SYT_MASK));
464
-
465
- if (!(s->flags & CIP_DBC_IS_END_EVENT))
466
- s->data_block_counter =
467
- (s->data_block_counter + data_blocks) & 0xff;
468
- payload_length = 8 + data_blocks * 4 * s->data_block_quadlets;
469
-
470
- trace_out_packet(s, cycle, buffer, payload_length, index);
471
-
472
- if (queue_out_packet(s, payload_length) < 0)
473
- return -EIO;
474
-
475
- pcm = READ_ONCE(s->pcm);
476
- if (pcm && pcm_frames > 0)
477
- update_pcm_pointers(s, pcm, pcm_frames);
478
-
479
- /* No need to return the number of handled data blocks. */
480
- return 0;
500
+ data_block_counter);
501
+ cip_header[1] = cpu_to_be32(CIP_EOH |
502
+ ((s->fmt << CIP_FMT_SHIFT) & CIP_FMT_MASK) |
503
+ ((s->ctx_data.rx.fdf << CIP_FDF_SHIFT) & CIP_FDF_MASK) |
504
+ (syt & CIP_SYT_MASK));
481505 }
482506
483
-static int handle_out_packet_without_header(struct amdtp_stream *s,
484
- unsigned int payload_length, unsigned int cycle,
485
- unsigned int index)
507
+static void build_it_pkt_header(struct amdtp_stream *s, unsigned int cycle,
508
+ struct fw_iso_packet *params,
509
+ unsigned int data_blocks,
510
+ unsigned int data_block_counter,
511
+ unsigned int syt, unsigned int index)
486512 {
487
- __be32 *buffer;
488
- unsigned int syt;
489
- unsigned int data_blocks;
490
- unsigned int pcm_frames;
491
- struct snd_pcm_substream *pcm;
513
+ unsigned int payload_length;
514
+ __be32 *cip_header;
492515
493
- buffer = s->buffer.packets[s->packet_index].buffer;
494
- syt = calculate_syt(s, cycle);
495
- data_blocks = calculate_data_blocks(s, syt);
496
- pcm_frames = s->process_data_blocks(s, buffer, data_blocks, &syt);
497
- s->data_block_counter = (s->data_block_counter + data_blocks) & 0xff;
516
+ payload_length = data_blocks * sizeof(__be32) * s->data_block_quadlets;
517
+ params->payload_length = payload_length;
498518
499
- payload_length = data_blocks * 4 * s->data_block_quadlets;
519
+ if (!(s->flags & CIP_NO_HEADER)) {
520
+ cip_header = (__be32 *)params->header;
521
+ generate_cip_header(s, cip_header, data_block_counter, syt);
522
+ params->header_length = 2 * sizeof(__be32);
523
+ payload_length += params->header_length;
524
+ } else {
525
+ cip_header = NULL;
526
+ }
500527
501
- trace_out_packet_without_header(s, cycle, payload_length, data_blocks,
502
- index);
503
-
504
- if (queue_out_packet(s, payload_length) < 0)
505
- return -EIO;
506
-
507
- pcm = READ_ONCE(s->pcm);
508
- if (pcm && pcm_frames > 0)
509
- update_pcm_pointers(s, pcm, pcm_frames);
510
-
511
- /* No need to return the number of handled data blocks. */
512
- return 0;
528
+ trace_amdtp_packet(s, cycle, cip_header, payload_length, data_blocks,
529
+ data_block_counter, s->packet_index, index);
513530 }
514531
515
-static int handle_in_packet(struct amdtp_stream *s,
516
- unsigned int payload_length, unsigned int cycle,
517
- unsigned int index)
532
+static int check_cip_header(struct amdtp_stream *s, const __be32 *buf,
533
+ unsigned int payload_length,
534
+ unsigned int *data_blocks,
535
+ unsigned int *data_block_counter, unsigned int *syt)
518536 {
519
- __be32 *buffer;
520537 u32 cip_header[2];
521
- unsigned int sph, fmt, fdf, syt;
522
- unsigned int data_block_quadlets, data_block_counter, dbc_interval;
523
- unsigned int data_blocks;
524
- struct snd_pcm_substream *pcm;
525
- unsigned int pcm_frames;
538
+ unsigned int sph;
539
+ unsigned int fmt;
540
+ unsigned int fdf;
541
+ unsigned int dbc;
526542 bool lost;
527543
528
- buffer = s->buffer.packets[s->packet_index].buffer;
529
- cip_header[0] = be32_to_cpu(buffer[0]);
530
- cip_header[1] = be32_to_cpu(buffer[1]);
531
-
532
- trace_in_packet(s, cycle, cip_header, payload_length, index);
544
+ cip_header[0] = be32_to_cpu(buf[0]);
545
+ cip_header[1] = be32_to_cpu(buf[1]);
533546
534547 /*
535548 * This module supports 'Two-quadlet CIP header with SYT field'.
....@@ -541,9 +554,7 @@
541554 dev_info_ratelimited(&s->unit->device,
542555 "Invalid CIP header for AMDTP: %08X:%08X\n",
543556 cip_header[0], cip_header[1]);
544
- data_blocks = 0;
545
- pcm_frames = 0;
546
- goto end;
557
+ return -EAGAIN;
547558 }
548559
549560 /* Check valid protocol or not. */
....@@ -553,19 +564,17 @@
553564 dev_info_ratelimited(&s->unit->device,
554565 "Detect unexpected protocol: %08x %08x\n",
555566 cip_header[0], cip_header[1]);
556
- data_blocks = 0;
557
- pcm_frames = 0;
558
- goto end;
567
+ return -EAGAIN;
559568 }
560569
561570 /* Calculate data blocks */
562571 fdf = (cip_header[1] & CIP_FDF_MASK) >> CIP_FDF_SHIFT;
563
- if (payload_length < 12 ||
572
+ if (payload_length < sizeof(__be32) * 2 ||
564573 (fmt == CIP_FMT_AM && fdf == AMDTP_FDF_NO_DATA)) {
565
- data_blocks = 0;
574
+ *data_blocks = 0;
566575 } else {
567
- data_block_quadlets =
568
- (cip_header[0] & CIP_DBS_MASK) >> CIP_DBS_SHIFT;
576
+ unsigned int data_block_quadlets =
577
+ (cip_header[0] & CIP_DBS_MASK) >> CIP_DBS_SHIFT;
569578 /* avoid division by zero */
570579 if (data_block_quadlets == 0) {
571580 dev_err(&s->unit->device,
....@@ -576,111 +585,241 @@
576585 if (s->flags & CIP_WRONG_DBS)
577586 data_block_quadlets = s->data_block_quadlets;
578587
579
- data_blocks = (payload_length / 4 - 2) /
588
+ *data_blocks = (payload_length / sizeof(__be32) - 2) /
580589 data_block_quadlets;
581590 }
582591
583592 /* Check data block counter continuity */
584
- data_block_counter = cip_header[0] & CIP_DBC_MASK;
585
- if (data_blocks == 0 && (s->flags & CIP_EMPTY_HAS_WRONG_DBC) &&
586
- s->data_block_counter != UINT_MAX)
587
- data_block_counter = s->data_block_counter;
593
+ dbc = cip_header[0] & CIP_DBC_MASK;
594
+ if (*data_blocks == 0 && (s->flags & CIP_EMPTY_HAS_WRONG_DBC) &&
595
+ *data_block_counter != UINT_MAX)
596
+ dbc = *data_block_counter;
588597
589
- if (((s->flags & CIP_SKIP_DBC_ZERO_CHECK) &&
590
- data_block_counter == s->tx_first_dbc) ||
591
- s->data_block_counter == UINT_MAX) {
598
+ if ((dbc == 0x00 && (s->flags & CIP_SKIP_DBC_ZERO_CHECK)) ||
599
+ *data_block_counter == UINT_MAX) {
592600 lost = false;
593601 } else if (!(s->flags & CIP_DBC_IS_END_EVENT)) {
594
- lost = data_block_counter != s->data_block_counter;
602
+ lost = dbc != *data_block_counter;
595603 } else {
596
- if (data_blocks > 0 && s->tx_dbc_interval > 0)
597
- dbc_interval = s->tx_dbc_interval;
598
- else
599
- dbc_interval = data_blocks;
604
+ unsigned int dbc_interval;
600605
601
- lost = data_block_counter !=
602
- ((s->data_block_counter + dbc_interval) & 0xff);
606
+ if (*data_blocks > 0 && s->ctx_data.tx.dbc_interval > 0)
607
+ dbc_interval = s->ctx_data.tx.dbc_interval;
608
+ else
609
+ dbc_interval = *data_blocks;
610
+
611
+ lost = dbc != ((*data_block_counter + dbc_interval) & 0xff);
603612 }
604613
605614 if (lost) {
606615 dev_err(&s->unit->device,
607616 "Detect discontinuity of CIP: %02X %02X\n",
608
- s->data_block_counter, data_block_counter);
617
+ *data_block_counter, dbc);
609618 return -EIO;
610619 }
611620
612
- syt = be32_to_cpu(buffer[1]) & CIP_SYT_MASK;
613
- pcm_frames = s->process_data_blocks(s, buffer + 2, data_blocks, &syt);
621
+ *data_block_counter = dbc;
614622
615
- if (s->flags & CIP_DBC_IS_END_EVENT)
616
- s->data_block_counter = data_block_counter;
623
+ *syt = cip_header[1] & CIP_SYT_MASK;
624
+
625
+ return 0;
626
+}
627
+
628
+static int parse_ir_ctx_header(struct amdtp_stream *s, unsigned int cycle,
629
+ const __be32 *ctx_header,
630
+ unsigned int *payload_length,
631
+ unsigned int *data_blocks,
632
+ unsigned int *data_block_counter,
633
+ unsigned int *syt, unsigned int packet_index, unsigned int index)
634
+{
635
+ const __be32 *cip_header;
636
+ unsigned int cip_header_size;
637
+ int err;
638
+
639
+ *payload_length = be32_to_cpu(ctx_header[0]) >> ISO_DATA_LENGTH_SHIFT;
640
+
641
+ if (!(s->flags & CIP_NO_HEADER))
642
+ cip_header_size = 8;
617643 else
618
- s->data_block_counter =
619
- (data_block_counter + data_blocks) & 0xff;
620
-end:
621
- if (queue_in_packet(s) < 0)
644
+ cip_header_size = 0;
645
+
646
+ if (*payload_length > cip_header_size + s->ctx_data.tx.max_ctx_payload_length) {
647
+ dev_err(&s->unit->device,
648
+ "Detect jumbo payload: %04x %04x\n",
649
+ *payload_length, cip_header_size + s->ctx_data.tx.max_ctx_payload_length);
622650 return -EIO;
651
+ }
623652
624
- pcm = READ_ONCE(s->pcm);
625
- if (pcm && pcm_frames > 0)
626
- update_pcm_pointers(s, pcm, pcm_frames);
653
+ if (cip_header_size > 0) {
654
+ cip_header = ctx_header + 2;
655
+ err = check_cip_header(s, cip_header, *payload_length,
656
+ data_blocks, data_block_counter, syt);
657
+ if (err < 0)
658
+ return err;
659
+ } else {
660
+ cip_header = NULL;
661
+ err = 0;
662
+ *data_blocks = *payload_length / sizeof(__be32) /
663
+ s->data_block_quadlets;
664
+ *syt = 0;
627665
628
- return 0;
666
+ if (*data_block_counter == UINT_MAX)
667
+ *data_block_counter = 0;
668
+ }
669
+
670
+ trace_amdtp_packet(s, cycle, cip_header, *payload_length, *data_blocks,
671
+ *data_block_counter, packet_index, index);
672
+
673
+ return err;
629674 }
630675
631
-static int handle_in_packet_without_header(struct amdtp_stream *s,
632
- unsigned int payload_length, unsigned int cycle,
633
- unsigned int index)
676
+// In CYCLE_TIMER register of IEEE 1394, 7 bits are used to represent second. On
677
+// the other hand, in DMA descriptors of 1394 OHCI, 3 bits are used to represent
678
+// it. Thus, via Linux firewire subsystem, we can get the 3 bits for second.
679
+static inline u32 compute_cycle_count(__be32 ctx_header_tstamp)
634680 {
635
- __be32 *buffer;
636
- unsigned int payload_quadlets;
637
- unsigned int data_blocks;
638
- struct snd_pcm_substream *pcm;
639
- unsigned int pcm_frames;
640
-
641
- buffer = s->buffer.packets[s->packet_index].buffer;
642
- payload_quadlets = payload_length / 4;
643
- data_blocks = payload_quadlets / s->data_block_quadlets;
644
-
645
- trace_in_packet_without_header(s, cycle, payload_quadlets, data_blocks,
646
- index);
647
-
648
- pcm_frames = s->process_data_blocks(s, buffer, data_blocks, NULL);
649
- s->data_block_counter = (s->data_block_counter + data_blocks) & 0xff;
650
-
651
- if (queue_in_packet(s) < 0)
652
- return -EIO;
653
-
654
- pcm = READ_ONCE(s->pcm);
655
- if (pcm && pcm_frames > 0)
656
- update_pcm_pointers(s, pcm, pcm_frames);
657
-
658
- return 0;
659
-}
660
-
661
-/*
662
- * In CYCLE_TIMER register of IEEE 1394, 7 bits are used to represent second. On
663
- * the other hand, in DMA descriptors of 1394 OHCI, 3 bits are used to represent
664
- * it. Thus, via Linux firewire subsystem, we can get the 3 bits for second.
665
- */
666
-static inline u32 compute_cycle_count(u32 tstamp)
667
-{
681
+ u32 tstamp = be32_to_cpu(ctx_header_tstamp) & HEADER_TSTAMP_MASK;
668682 return (((tstamp >> 13) & 0x07) * 8000) + (tstamp & 0x1fff);
669683 }
670684
671685 static inline u32 increment_cycle_count(u32 cycle, unsigned int addend)
672686 {
673687 cycle += addend;
674
- if (cycle >= 8 * CYCLES_PER_SECOND)
675
- cycle -= 8 * CYCLES_PER_SECOND;
688
+ if (cycle >= OHCI_MAX_SECOND * CYCLES_PER_SECOND)
689
+ cycle -= OHCI_MAX_SECOND * CYCLES_PER_SECOND;
676690 return cycle;
677691 }
678692
679
-static inline u32 decrement_cycle_count(u32 cycle, unsigned int subtrahend)
693
+// Align to actual cycle count for the packet which is going to be scheduled.
694
+// This module queued the same number of isochronous cycle as the size of queue
695
+// to kip isochronous cycle, therefore it's OK to just increment the cycle by
696
+// the size of queue for scheduled cycle.
697
+static inline u32 compute_it_cycle(const __be32 ctx_header_tstamp,
698
+ unsigned int queue_size)
680699 {
681
- if (cycle < subtrahend)
682
- cycle += 8 * CYCLES_PER_SECOND;
683
- return cycle - subtrahend;
700
+ u32 cycle = compute_cycle_count(ctx_header_tstamp);
701
+ return increment_cycle_count(cycle, queue_size);
702
+}
703
+
704
+static int generate_device_pkt_descs(struct amdtp_stream *s,
705
+ struct pkt_desc *descs,
706
+ const __be32 *ctx_header,
707
+ unsigned int packets)
708
+{
709
+ unsigned int dbc = s->data_block_counter;
710
+ unsigned int packet_index = s->packet_index;
711
+ unsigned int queue_size = s->queue_size;
712
+ int i;
713
+ int err;
714
+
715
+ for (i = 0; i < packets; ++i) {
716
+ struct pkt_desc *desc = descs + i;
717
+ unsigned int cycle;
718
+ unsigned int payload_length;
719
+ unsigned int data_blocks;
720
+ unsigned int syt;
721
+
722
+ cycle = compute_cycle_count(ctx_header[1]);
723
+
724
+ err = parse_ir_ctx_header(s, cycle, ctx_header, &payload_length,
725
+ &data_blocks, &dbc, &syt, packet_index, i);
726
+ if (err < 0)
727
+ return err;
728
+
729
+ desc->cycle = cycle;
730
+ desc->syt = syt;
731
+ desc->data_blocks = data_blocks;
732
+ desc->data_block_counter = dbc;
733
+ desc->ctx_payload = s->buffer.packets[packet_index].buffer;
734
+
735
+ if (!(s->flags & CIP_DBC_IS_END_EVENT))
736
+ dbc = (dbc + desc->data_blocks) & 0xff;
737
+
738
+ ctx_header +=
739
+ s->ctx_data.tx.ctx_header_size / sizeof(*ctx_header);
740
+
741
+ packet_index = (packet_index + 1) % queue_size;
742
+ }
743
+
744
+ s->data_block_counter = dbc;
745
+
746
+ return 0;
747
+}
748
+
749
+static unsigned int compute_syt(unsigned int syt_offset, unsigned int cycle,
750
+ unsigned int transfer_delay)
751
+{
752
+ unsigned int syt;
753
+
754
+ syt_offset += transfer_delay;
755
+ syt = ((cycle + syt_offset / TICKS_PER_CYCLE) << 12) |
756
+ (syt_offset % TICKS_PER_CYCLE);
757
+ return syt & CIP_SYT_MASK;
758
+}
759
+
760
+static void generate_pkt_descs(struct amdtp_stream *s, struct pkt_desc *descs,
761
+ const __be32 *ctx_header, unsigned int packets,
762
+ const struct seq_desc *seq_descs,
763
+ unsigned int seq_size)
764
+{
765
+ unsigned int dbc = s->data_block_counter;
766
+ unsigned int seq_index = s->ctx_data.rx.seq_index;
767
+ int i;
768
+
769
+ for (i = 0; i < packets; ++i) {
770
+ struct pkt_desc *desc = descs + i;
771
+ unsigned int index = (s->packet_index + i) % s->queue_size;
772
+ const struct seq_desc *seq = seq_descs + seq_index;
773
+ unsigned int syt;
774
+
775
+ desc->cycle = compute_it_cycle(*ctx_header, s->queue_size);
776
+
777
+ syt = seq->syt_offset;
778
+ if (syt != CIP_SYT_NO_INFO) {
779
+ syt = compute_syt(syt, desc->cycle,
780
+ s->ctx_data.rx.transfer_delay);
781
+ }
782
+ desc->syt = syt;
783
+ desc->data_blocks = seq->data_blocks;
784
+
785
+ if (s->flags & CIP_DBC_IS_END_EVENT)
786
+ dbc = (dbc + desc->data_blocks) & 0xff;
787
+
788
+ desc->data_block_counter = dbc;
789
+
790
+ if (!(s->flags & CIP_DBC_IS_END_EVENT))
791
+ dbc = (dbc + desc->data_blocks) & 0xff;
792
+
793
+ desc->ctx_payload = s->buffer.packets[index].buffer;
794
+
795
+ seq_index = (seq_index + 1) % seq_size;
796
+
797
+ ++ctx_header;
798
+ }
799
+
800
+ s->data_block_counter = dbc;
801
+ s->ctx_data.rx.seq_index = seq_index;
802
+}
803
+
804
+static inline void cancel_stream(struct amdtp_stream *s)
805
+{
806
+ s->packet_index = -1;
807
+ if (in_interrupt())
808
+ amdtp_stream_pcm_abort(s);
809
+ WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN);
810
+}
811
+
812
+static void process_ctx_payloads(struct amdtp_stream *s,
813
+ const struct pkt_desc *descs,
814
+ unsigned int packets)
815
+{
816
+ struct snd_pcm_substream *pcm;
817
+ unsigned int pcm_frames;
818
+
819
+ pcm = READ_ONCE(s->pcm);
820
+ pcm_frames = s->process_ctx_payloads(s, descs, packets, pcm);
821
+ if (pcm)
822
+ update_pcm_pointers(s, pcm, pcm_frames);
684823 }
685824
686825 static void out_stream_callback(struct fw_iso_context *context, u32 tstamp,
....@@ -688,29 +827,57 @@
688827 void *private_data)
689828 {
690829 struct amdtp_stream *s = private_data;
691
- unsigned int i, packets = header_length / 4;
692
- u32 cycle;
830
+ const struct amdtp_domain *d = s->domain;
831
+ const __be32 *ctx_header = header;
832
+ unsigned int events_per_period = s->ctx_data.rx.events_per_period;
833
+ unsigned int event_count = s->ctx_data.rx.event_count;
834
+ unsigned int packets;
835
+ int i;
693836
694837 if (s->packet_index < 0)
695838 return;
696839
697
- cycle = compute_cycle_count(tstamp);
840
+ // Calculate the number of packets in buffer and check XRUN.
841
+ packets = header_length / sizeof(*ctx_header);
698842
699
- /* Align to actual cycle count for the last packet. */
700
- cycle = increment_cycle_count(cycle, QUEUE_LENGTH - packets);
843
+ generate_pkt_descs(s, s->pkt_descs, ctx_header, packets, d->seq_descs,
844
+ d->seq_size);
845
+
846
+ process_ctx_payloads(s, s->pkt_descs, packets);
701847
702848 for (i = 0; i < packets; ++i) {
703
- cycle = increment_cycle_count(cycle, 1);
704
- if (s->handle_packet(s, 0, cycle, i) < 0) {
705
- s->packet_index = -1;
706
- if (in_interrupt())
707
- amdtp_stream_pcm_abort(s);
708
- WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN);
849
+ const struct pkt_desc *desc = s->pkt_descs + i;
850
+ unsigned int syt;
851
+ struct {
852
+ struct fw_iso_packet params;
853
+ __be32 header[IT_PKT_HEADER_SIZE_CIP / sizeof(__be32)];
854
+ } template = { {0}, {0} };
855
+ bool sched_irq = false;
856
+
857
+ if (s->ctx_data.rx.syt_override < 0)
858
+ syt = desc->syt;
859
+ else
860
+ syt = s->ctx_data.rx.syt_override;
861
+
862
+ build_it_pkt_header(s, desc->cycle, &template.params,
863
+ desc->data_blocks, desc->data_block_counter,
864
+ syt, i);
865
+
866
+ if (s == s->domain->irq_target) {
867
+ event_count += desc->data_blocks;
868
+ if (event_count >= events_per_period) {
869
+ event_count -= events_per_period;
870
+ sched_irq = true;
871
+ }
872
+ }
873
+
874
+ if (queue_out_packet(s, &template.params, sched_irq) < 0) {
875
+ cancel_stream(s);
709876 return;
710877 }
711878 }
712879
713
- fw_iso_context_queue_flush(s->context);
880
+ s->ctx_data.rx.event_count = event_count;
714881 }
715882
716883 static void in_stream_callback(struct fw_iso_context *context, u32 tstamp,
....@@ -718,62 +885,125 @@
718885 void *private_data)
719886 {
720887 struct amdtp_stream *s = private_data;
721
- unsigned int i, packets;
722
- unsigned int payload_length, max_payload_length;
723
- __be32 *headers = header;
724
- u32 cycle;
888
+ __be32 *ctx_header = header;
889
+ unsigned int packets;
890
+ int i;
891
+ int err;
725892
726893 if (s->packet_index < 0)
727894 return;
728895
729
- /* The number of packets in buffer */
730
- packets = header_length / IN_PACKET_HEADER_SIZE;
896
+ // Calculate the number of packets in buffer and check XRUN.
897
+ packets = header_length / s->ctx_data.tx.ctx_header_size;
731898
732
- cycle = compute_cycle_count(tstamp);
733
-
734
- /* Align to actual cycle count for the last packet. */
735
- cycle = decrement_cycle_count(cycle, packets);
736
-
737
- /* For buffer-over-run prevention. */
738
- max_payload_length = s->max_payload_length;
739
-
740
- for (i = 0; i < packets; i++) {
741
- cycle = increment_cycle_count(cycle, 1);
742
-
743
- /* The number of bytes in this packet */
744
- payload_length =
745
- (be32_to_cpu(headers[i]) >> ISO_DATA_LENGTH_SHIFT);
746
- if (payload_length > max_payload_length) {
747
- dev_err(&s->unit->device,
748
- "Detect jumbo payload: %04x %04x\n",
749
- payload_length, max_payload_length);
750
- break;
899
+ err = generate_device_pkt_descs(s, s->pkt_descs, ctx_header, packets);
900
+ if (err < 0) {
901
+ if (err != -EAGAIN) {
902
+ cancel_stream(s);
903
+ return;
751904 }
752
-
753
- if (s->handle_packet(s, payload_length, cycle, i) < 0)
754
- break;
905
+ } else {
906
+ process_ctx_payloads(s, s->pkt_descs, packets);
755907 }
756908
757
- /* Queueing error or detecting invalid payload. */
758
- if (i < packets) {
759
- s->packet_index = -1;
760
- if (in_interrupt())
761
- amdtp_stream_pcm_abort(s);
762
- WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN);
763
- return;
764
- }
909
+ for (i = 0; i < packets; ++i) {
910
+ struct fw_iso_packet params = {0};
765911
766
- fw_iso_context_queue_flush(s->context);
912
+ if (queue_in_packet(s, &params) < 0) {
913
+ cancel_stream(s);
914
+ return;
915
+ }
916
+ }
767917 }
768918
769
-/* this is executed one time */
919
+static void pool_ideal_seq_descs(struct amdtp_domain *d, unsigned int packets)
920
+{
921
+ struct amdtp_stream *irq_target = d->irq_target;
922
+ unsigned int seq_tail = d->seq_tail;
923
+ unsigned int seq_size = d->seq_size;
924
+ unsigned int min_avail;
925
+ struct amdtp_stream *s;
926
+
927
+ min_avail = d->seq_size;
928
+ list_for_each_entry(s, &d->streams, list) {
929
+ unsigned int seq_index;
930
+ unsigned int avail;
931
+
932
+ if (s->direction == AMDTP_IN_STREAM)
933
+ continue;
934
+
935
+ seq_index = s->ctx_data.rx.seq_index;
936
+ avail = d->seq_tail;
937
+ if (seq_index > avail)
938
+ avail += d->seq_size;
939
+ avail -= seq_index;
940
+
941
+ if (avail < min_avail)
942
+ min_avail = avail;
943
+ }
944
+
945
+ while (min_avail < packets) {
946
+ struct seq_desc *desc = d->seq_descs + seq_tail;
947
+
948
+ desc->syt_offset = calculate_syt_offset(&d->last_syt_offset,
949
+ &d->syt_offset_state, irq_target->sfc);
950
+ desc->data_blocks = calculate_data_blocks(&d->data_block_state,
951
+ !!(irq_target->flags & CIP_BLOCKING),
952
+ desc->syt_offset == CIP_SYT_NO_INFO,
953
+ irq_target->syt_interval, irq_target->sfc);
954
+
955
+ ++seq_tail;
956
+ seq_tail %= seq_size;
957
+
958
+ ++min_avail;
959
+ }
960
+
961
+ d->seq_tail = seq_tail;
962
+}
963
+
964
+static void irq_target_callback(struct fw_iso_context *context, u32 tstamp,
965
+ size_t header_length, void *header,
966
+ void *private_data)
967
+{
968
+ struct amdtp_stream *irq_target = private_data;
969
+ struct amdtp_domain *d = irq_target->domain;
970
+ unsigned int packets = header_length / sizeof(__be32);
971
+ struct amdtp_stream *s;
972
+
973
+ // Record enough entries with extra 3 cycles at least.
974
+ pool_ideal_seq_descs(d, packets + 3);
975
+
976
+ out_stream_callback(context, tstamp, header_length, header, irq_target);
977
+ if (amdtp_streaming_error(irq_target))
978
+ goto error;
979
+
980
+ list_for_each_entry(s, &d->streams, list) {
981
+ if (s != irq_target && amdtp_stream_running(s)) {
982
+ fw_iso_context_flush_completions(s->context);
983
+ if (amdtp_streaming_error(s))
984
+ goto error;
985
+ }
986
+ }
987
+
988
+ return;
989
+error:
990
+ if (amdtp_stream_running(irq_target))
991
+ cancel_stream(irq_target);
992
+
993
+ list_for_each_entry(s, &d->streams, list) {
994
+ if (amdtp_stream_running(s))
995
+ cancel_stream(s);
996
+ }
997
+}
998
+
999
+// this is executed one time.
7701000 static void amdtp_stream_first_callback(struct fw_iso_context *context,
7711001 u32 tstamp, size_t header_length,
7721002 void *header, void *private_data)
7731003 {
7741004 struct amdtp_stream *s = private_data;
1005
+ const __be32 *ctx_header = header;
7751006 u32 cycle;
776
- unsigned int packets;
7771007
7781008 /*
7791009 * For in-stream, first packet has come.
....@@ -782,24 +1012,17 @@
7821012 s->callbacked = true;
7831013 wake_up(&s->callback_wait);
7841014
785
- cycle = compute_cycle_count(tstamp);
786
-
7871015 if (s->direction == AMDTP_IN_STREAM) {
788
- packets = header_length / IN_PACKET_HEADER_SIZE;
789
- cycle = decrement_cycle_count(cycle, packets);
1016
+ cycle = compute_cycle_count(ctx_header[1]);
1017
+
7901018 context->callback.sc = in_stream_callback;
791
- if (s->flags & CIP_NO_HEADER)
792
- s->handle_packet = handle_in_packet_without_header;
793
- else
794
- s->handle_packet = handle_in_packet;
7951019 } else {
796
- packets = header_length / 4;
797
- cycle = increment_cycle_count(cycle, QUEUE_LENGTH - packets);
798
- context->callback.sc = out_stream_callback;
799
- if (s->flags & CIP_NO_HEADER)
800
- s->handle_packet = handle_out_packet_without_header;
1020
+ cycle = compute_it_cycle(*ctx_header, s->queue_size);
1021
+
1022
+ if (s == s->domain->irq_target)
1023
+ context->callback.sc = irq_target_callback;
8011024 else
802
- s->handle_packet = handle_out_packet;
1025
+ context->callback.sc = out_stream_callback;
8031026 }
8041027
8051028 s->start_cycle = cycle;
....@@ -812,26 +1035,22 @@
8121035 * @s: the AMDTP stream to start
8131036 * @channel: the isochronous channel on the bus
8141037 * @speed: firewire speed code
1038
+ * @start_cycle: the isochronous cycle to start the context. Start immediately
1039
+ * if negative value is given.
1040
+ * @queue_size: The number of packets in the queue.
1041
+ * @idle_irq_interval: the interval to queue packet during initial state.
8151042 *
8161043 * The stream cannot be started until it has been configured with
8171044 * amdtp_stream_set_parameters() and it must be started before any PCM or MIDI
8181045 * device can be started.
8191046 */
820
-int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed)
1047
+static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed,
1048
+ int start_cycle, unsigned int queue_size,
1049
+ unsigned int idle_irq_interval)
8211050 {
822
- static const struct {
823
- unsigned int data_block;
824
- unsigned int syt_offset;
825
- } initial_state[] = {
826
- [CIP_SFC_32000] = { 4, 3072 },
827
- [CIP_SFC_48000] = { 6, 1024 },
828
- [CIP_SFC_96000] = { 12, 1024 },
829
- [CIP_SFC_192000] = { 24, 1024 },
830
- [CIP_SFC_44100] = { 0, 67 },
831
- [CIP_SFC_88200] = { 0, 67 },
832
- [CIP_SFC_176400] = { 0, 67 },
833
- };
834
- unsigned int header_size;
1051
+ bool is_irq_target = (s == s->domain->irq_target);
1052
+ unsigned int ctx_header_size;
1053
+ unsigned int max_ctx_payload_size;
8351054 enum dma_data_direction dir;
8361055 int type, tag, err;
8371056
....@@ -843,32 +1062,47 @@
8431062 goto err_unlock;
8441063 }
8451064
846
- if (s->direction == AMDTP_IN_STREAM)
847
- s->data_block_counter = UINT_MAX;
848
- else
849
- s->data_block_counter = 0;
850
- s->data_block_state = initial_state[s->sfc].data_block;
851
- s->syt_offset_state = initial_state[s->sfc].syt_offset;
852
- s->last_syt_offset = TICKS_PER_CYCLE;
1065
+ if (s->direction == AMDTP_IN_STREAM) {
1066
+ // NOTE: IT context should be used for constant IRQ.
1067
+ if (is_irq_target) {
1068
+ err = -EINVAL;
1069
+ goto err_unlock;
1070
+ }
8531071
854
- /* initialize packet buffer */
1072
+ s->data_block_counter = UINT_MAX;
1073
+ } else {
1074
+ s->data_block_counter = 0;
1075
+ }
1076
+
1077
+ // initialize packet buffer.
1078
+ max_ctx_payload_size = amdtp_stream_get_max_payload(s);
8551079 if (s->direction == AMDTP_IN_STREAM) {
8561080 dir = DMA_FROM_DEVICE;
8571081 type = FW_ISO_CONTEXT_RECEIVE;
858
- header_size = IN_PACKET_HEADER_SIZE;
1082
+ if (!(s->flags & CIP_NO_HEADER)) {
1083
+ max_ctx_payload_size -= 8;
1084
+ ctx_header_size = IR_CTX_HEADER_SIZE_CIP;
1085
+ } else {
1086
+ ctx_header_size = IR_CTX_HEADER_SIZE_NO_CIP;
1087
+ }
8591088 } else {
8601089 dir = DMA_TO_DEVICE;
8611090 type = FW_ISO_CONTEXT_TRANSMIT;
862
- header_size = OUT_PACKET_HEADER_SIZE;
1091
+ ctx_header_size = 0; // No effect for IT context.
1092
+
1093
+ if (!(s->flags & CIP_NO_HEADER))
1094
+ max_ctx_payload_size -= IT_PKT_HEADER_SIZE_CIP;
8631095 }
864
- err = iso_packets_buffer_init(&s->buffer, s->unit, QUEUE_LENGTH,
865
- amdtp_stream_get_max_payload(s), dir);
1096
+
1097
+ err = iso_packets_buffer_init(&s->buffer, s->unit, queue_size,
1098
+ max_ctx_payload_size, dir);
8661099 if (err < 0)
8671100 goto err_unlock;
1101
+ s->queue_size = queue_size;
8681102
8691103 s->context = fw_iso_context_create(fw_parent_device(s->unit)->card,
870
- type, channel, speed, header_size,
871
- amdtp_stream_first_callback, s);
1104
+ type, channel, speed, ctx_header_size,
1105
+ amdtp_stream_first_callback, s);
8721106 if (IS_ERR(s->context)) {
8731107 err = PTR_ERR(s->context);
8741108 if (err == -EBUSY)
....@@ -879,22 +1113,44 @@
8791113
8801114 amdtp_stream_update(s);
8811115
882
- if (s->direction == AMDTP_IN_STREAM)
883
- s->max_payload_length = amdtp_stream_get_max_payload(s);
1116
+ if (s->direction == AMDTP_IN_STREAM) {
1117
+ s->ctx_data.tx.max_ctx_payload_length = max_ctx_payload_size;
1118
+ s->ctx_data.tx.ctx_header_size = ctx_header_size;
1119
+ }
8841120
8851121 if (s->flags & CIP_NO_HEADER)
8861122 s->tag = TAG_NO_CIP_HEADER;
8871123 else
8881124 s->tag = TAG_CIP;
8891125
1126
+ s->pkt_descs = kcalloc(s->queue_size, sizeof(*s->pkt_descs),
1127
+ GFP_KERNEL);
1128
+ if (!s->pkt_descs) {
1129
+ err = -ENOMEM;
1130
+ goto err_context;
1131
+ }
1132
+
8901133 s->packet_index = 0;
8911134 do {
892
- if (s->direction == AMDTP_IN_STREAM)
893
- err = queue_in_packet(s);
894
- else
895
- err = queue_out_packet(s, 0);
1135
+ struct fw_iso_packet params;
1136
+
1137
+ if (s->direction == AMDTP_IN_STREAM) {
1138
+ err = queue_in_packet(s, &params);
1139
+ } else {
1140
+ bool sched_irq = false;
1141
+
1142
+ params.header_length = 0;
1143
+ params.payload_length = 0;
1144
+
1145
+ if (is_irq_target) {
1146
+ sched_irq = !((s->packet_index + 1) %
1147
+ idle_irq_interval);
1148
+ }
1149
+
1150
+ err = queue_out_packet(s, &params, sched_irq);
1151
+ }
8961152 if (err < 0)
897
- goto err_context;
1153
+ goto err_pkt_descs;
8981154 } while (s->packet_index > 0);
8991155
9001156 /* NOTE: TAG1 matches CIP. This just affects in stream. */
....@@ -903,14 +1159,15 @@
9031159 tag |= FW_ISO_CONTEXT_MATCH_TAG0;
9041160
9051161 s->callbacked = false;
906
- err = fw_iso_context_start(s->context, -1, 0, tag);
1162
+ err = fw_iso_context_start(s->context, start_cycle, 0, tag);
9071163 if (err < 0)
908
- goto err_context;
1164
+ goto err_pkt_descs;
9091165
9101166 mutex_unlock(&s->mutex);
9111167
9121168 return 0;
913
-
1169
+err_pkt_descs:
1170
+ kfree(s->pkt_descs);
9141171 err_context:
9151172 fw_iso_context_destroy(s->context);
9161173 s->context = ERR_PTR(-1);
....@@ -921,57 +1178,71 @@
9211178
9221179 return err;
9231180 }
924
-EXPORT_SYMBOL(amdtp_stream_start);
9251181
9261182 /**
927
- * amdtp_stream_pcm_pointer - get the PCM buffer position
1183
+ * amdtp_domain_stream_pcm_pointer - get the PCM buffer position
1184
+ * @d: the AMDTP domain.
9281185 * @s: the AMDTP stream that transports the PCM data
9291186 *
9301187 * Returns the current buffer position, in frames.
9311188 */
932
-unsigned long amdtp_stream_pcm_pointer(struct amdtp_stream *s)
1189
+unsigned long amdtp_domain_stream_pcm_pointer(struct amdtp_domain *d,
1190
+ struct amdtp_stream *s)
9331191 {
934
- /*
935
- * This function is called in software IRQ context of period_tasklet or
936
- * process context.
937
- *
938
- * When the software IRQ context was scheduled by software IRQ context
939
- * of IR/IT contexts, queued packets were already handled. Therefore,
940
- * no need to flush the queue in buffer anymore.
941
- *
942
- * When the process context reach here, some packets will be already
943
- * queued in the buffer. These packets should be handled immediately
944
- * to keep better granularity of PCM pointer.
945
- *
946
- * Later, the process context will sometimes schedules software IRQ
947
- * context of the period_tasklet. Then, no need to flush the queue by
948
- * the same reason as described for IR/IT contexts.
949
- */
950
- if (!in_interrupt() && amdtp_stream_running(s))
951
- fw_iso_context_flush_completions(s->context);
1192
+ struct amdtp_stream *irq_target = d->irq_target;
1193
+
1194
+ if (irq_target && amdtp_stream_running(irq_target)) {
1195
+ // This function is called in software IRQ context of
1196
+ // period_work or process context.
1197
+ //
1198
+ // When the software IRQ context was scheduled by software IRQ
1199
+ // context of IT contexts, queued packets were already handled.
1200
+ // Therefore, no need to flush the queue in buffer furthermore.
1201
+ //
1202
+ // When the process context reach here, some packets will be
1203
+ // already queued in the buffer. These packets should be handled
1204
+ // immediately to keep better granularity of PCM pointer.
1205
+ //
1206
+ // Later, the process context will sometimes schedules software
1207
+ // IRQ context of the period_work. Then, no need to flush the
1208
+ // queue by the same reason as described in the above
1209
+ if (current_work() != &s->period_work) {
1210
+ // Queued packet should be processed without any kernel
1211
+ // preemption to keep latency against bus cycle.
1212
+ preempt_disable();
1213
+ fw_iso_context_flush_completions(irq_target->context);
1214
+ preempt_enable();
1215
+ }
1216
+ }
9521217
9531218 return READ_ONCE(s->pcm_buffer_pointer);
9541219 }
955
-EXPORT_SYMBOL(amdtp_stream_pcm_pointer);
1220
+EXPORT_SYMBOL_GPL(amdtp_domain_stream_pcm_pointer);
9561221
9571222 /**
958
- * amdtp_stream_pcm_ack - acknowledge queued PCM frames
1223
+ * amdtp_domain_stream_pcm_ack - acknowledge queued PCM frames
1224
+ * @d: the AMDTP domain.
9591225 * @s: the AMDTP stream that transfers the PCM frames
9601226 *
9611227 * Returns zero always.
9621228 */
963
-int amdtp_stream_pcm_ack(struct amdtp_stream *s)
1229
+int amdtp_domain_stream_pcm_ack(struct amdtp_domain *d, struct amdtp_stream *s)
9641230 {
965
- /*
966
- * Process isochronous packets for recent isochronous cycle to handle
967
- * queued PCM frames.
968
- */
969
- if (amdtp_stream_running(s))
970
- fw_iso_context_flush_completions(s->context);
1231
+ struct amdtp_stream *irq_target = d->irq_target;
1232
+
1233
+ // Process isochronous packets for recent isochronous cycle to handle
1234
+ // queued PCM frames.
1235
+ if (irq_target && amdtp_stream_running(irq_target)) {
1236
+ // Queued packet should be processed without any kernel
1237
+ // preemption to keep latency against bus cycle.
1238
+ preempt_disable();
1239
+ fw_iso_context_flush_completions(irq_target->context);
1240
+ preempt_enable();
1241
+ }
9711242
9721243 return 0;
9731244 }
974
-EXPORT_SYMBOL(amdtp_stream_pcm_ack);
1245
+EXPORT_SYMBOL_GPL(amdtp_domain_stream_pcm_ack);
9751246
9761247 /**
9771248 * amdtp_stream_update - update the stream after a bus reset
....@@ -992,7 +1263,7 @@
9921263 * All PCM and MIDI devices of the stream must be stopped before the stream
9931264 * itself can be stopped.
9941265 */
995
-void amdtp_stream_stop(struct amdtp_stream *s)
1266
+static void amdtp_stream_stop(struct amdtp_stream *s)
9961267 {
9971268 mutex_lock(&s->mutex);
9981269
....@@ -1001,17 +1272,17 @@
10011272 return;
10021273 }
10031274
1004
- tasklet_kill(&s->period_tasklet);
1275
+ cancel_work_sync(&s->period_work);
10051276 fw_iso_context_stop(s->context);
10061277 fw_iso_context_destroy(s->context);
10071278 s->context = ERR_PTR(-1);
10081279 iso_packets_buffer_destroy(&s->buffer, s->unit);
1280
+ kfree(s->pkt_descs);
10091281
10101282 s->callbacked = false;
10111283
10121284 mutex_unlock(&s->mutex);
10131285 }
1014
-EXPORT_SYMBOL(amdtp_stream_stop);
10151286
10161287 /**
10171288 * amdtp_stream_pcm_abort - abort the running PCM device
....@@ -1029,3 +1300,237 @@
10291300 snd_pcm_stop_xrun(pcm);
10301301 }
10311302 EXPORT_SYMBOL(amdtp_stream_pcm_abort);
1303
+
1304
+/**
1305
+ * amdtp_domain_init - initialize an AMDTP domain structure
1306
+ * @d: the AMDTP domain to initialize.
1307
+ */
1308
+int amdtp_domain_init(struct amdtp_domain *d)
1309
+{
1310
+ INIT_LIST_HEAD(&d->streams);
1311
+
1312
+ d->events_per_period = 0;
1313
+
1314
+ d->seq_descs = NULL;
1315
+
1316
+ return 0;
1317
+}
1318
+EXPORT_SYMBOL_GPL(amdtp_domain_init);
1319
+
1320
+/**
1321
+ * amdtp_domain_destroy - destroy an AMDTP domain structure
1322
+ * @d: the AMDTP domain to destroy.
1323
+ */
1324
+void amdtp_domain_destroy(struct amdtp_domain *d)
1325
+{
1326
+ // At present nothing to do.
1327
+ return;
1328
+}
1329
+EXPORT_SYMBOL_GPL(amdtp_domain_destroy);
1330
+
1331
+/**
1332
+ * amdtp_domain_add_stream - register isoc context into the domain.
1333
+ * @d: the AMDTP domain.
1334
+ * @s: the AMDTP stream.
1335
+ * @channel: the isochronous channel on the bus.
1336
+ * @speed: firewire speed code.
1337
+ */
1338
+int amdtp_domain_add_stream(struct amdtp_domain *d, struct amdtp_stream *s,
1339
+ int channel, int speed)
1340
+{
1341
+ struct amdtp_stream *tmp;
1342
+
1343
+ list_for_each_entry(tmp, &d->streams, list) {
1344
+ if (s == tmp)
1345
+ return -EBUSY;
1346
+ }
1347
+
1348
+ list_add(&s->list, &d->streams);
1349
+
1350
+ s->channel = channel;
1351
+ s->speed = speed;
1352
+ s->domain = d;
1353
+
1354
+ return 0;
1355
+}
1356
+EXPORT_SYMBOL_GPL(amdtp_domain_add_stream);
1357
+
1358
+static int get_current_cycle_time(struct fw_card *fw_card, int *cur_cycle)
1359
+{
1360
+ int generation;
1361
+ int rcode;
1362
+ __be32 reg;
1363
+ u32 data;
1364
+
1365
+ // This is a request to local 1394 OHCI controller and expected to
1366
+ // complete without any event waiting.
1367
+ generation = fw_card->generation;
1368
+ smp_rmb(); // node_id vs. generation.
1369
+ rcode = fw_run_transaction(fw_card, TCODE_READ_QUADLET_REQUEST,
1370
+ fw_card->node_id, generation, SCODE_100,
1371
+ CSR_REGISTER_BASE + CSR_CYCLE_TIME,
1372
+ &reg, sizeof(reg));
1373
+ if (rcode != RCODE_COMPLETE)
1374
+ return -EIO;
1375
+
1376
+ data = be32_to_cpu(reg);
1377
+ *cur_cycle = data >> 12;
1378
+
1379
+ return 0;
1380
+}
1381
+
1382
+/**
1383
+ * amdtp_domain_start - start sending packets for isoc context in the domain.
1384
+ * @d: the AMDTP domain.
1385
+ * @ir_delay_cycle: the cycle delay to start all IR contexts.
1386
+ */
1387
+int amdtp_domain_start(struct amdtp_domain *d, unsigned int ir_delay_cycle)
1388
+{
1389
+ static const struct {
1390
+ unsigned int data_block;
1391
+ unsigned int syt_offset;
1392
+ } *entry, initial_state[] = {
1393
+ [CIP_SFC_32000] = { 4, 3072 },
1394
+ [CIP_SFC_48000] = { 6, 1024 },
1395
+ [CIP_SFC_96000] = { 12, 1024 },
1396
+ [CIP_SFC_192000] = { 24, 1024 },
1397
+ [CIP_SFC_44100] = { 0, 67 },
1398
+ [CIP_SFC_88200] = { 0, 67 },
1399
+ [CIP_SFC_176400] = { 0, 67 },
1400
+ };
1401
+ unsigned int events_per_buffer = d->events_per_buffer;
1402
+ unsigned int events_per_period = d->events_per_period;
1403
+ unsigned int idle_irq_interval;
1404
+ unsigned int queue_size;
1405
+ struct amdtp_stream *s;
1406
+ int cycle;
1407
+ bool found = false;
1408
+ int err;
1409
+
1410
+ // Select an IT context as IRQ target.
1411
+ list_for_each_entry(s, &d->streams, list) {
1412
+ if (s->direction == AMDTP_OUT_STREAM) {
1413
+ found = true;
1414
+ break;
1415
+ }
1416
+ }
1417
+ if (!found)
1418
+ return -ENXIO;
1419
+ d->irq_target = s;
1420
+
1421
+ // This is a case that AMDTP streams in domain run just for MIDI
1422
+ // substream. Use the number of events equivalent to 10 msec as
1423
+ // interval of hardware IRQ.
1424
+ if (events_per_period == 0)
1425
+ events_per_period = amdtp_rate_table[d->irq_target->sfc] / 100;
1426
+ if (events_per_buffer == 0)
1427
+ events_per_buffer = events_per_period * 3;
1428
+
1429
+ queue_size = DIV_ROUND_UP(CYCLES_PER_SECOND * events_per_buffer,
1430
+ amdtp_rate_table[d->irq_target->sfc]);
1431
+
1432
+ d->seq_descs = kcalloc(queue_size, sizeof(*d->seq_descs), GFP_KERNEL);
1433
+ if (!d->seq_descs)
1434
+ return -ENOMEM;
1435
+ d->seq_size = queue_size;
1436
+ d->seq_tail = 0;
1437
+
1438
+ entry = &initial_state[s->sfc];
1439
+ d->data_block_state = entry->data_block;
1440
+ d->syt_offset_state = entry->syt_offset;
1441
+ d->last_syt_offset = TICKS_PER_CYCLE;
1442
+
1443
+ if (ir_delay_cycle > 0) {
1444
+ struct fw_card *fw_card = fw_parent_device(s->unit)->card;
1445
+
1446
+ err = get_current_cycle_time(fw_card, &cycle);
1447
+ if (err < 0)
1448
+ goto error;
1449
+
1450
+ // No need to care overflow in cycle field because of enough
1451
+ // width.
1452
+ cycle += ir_delay_cycle;
1453
+
1454
+ // Round up to sec field.
1455
+ if ((cycle & 0x00001fff) >= CYCLES_PER_SECOND) {
1456
+ unsigned int sec;
1457
+
1458
+ // The sec field can overflow.
1459
+ sec = (cycle & 0xffffe000) >> 13;
1460
+ cycle = (++sec << 13) |
1461
+ ((cycle & 0x00001fff) / CYCLES_PER_SECOND);
1462
+ }
1463
+
1464
+ // In OHCI 1394 specification, lower 2 bits are available for
1465
+ // sec field.
1466
+ cycle &= 0x00007fff;
1467
+ } else {
1468
+ cycle = -1;
1469
+ }
1470
+
1471
+ list_for_each_entry(s, &d->streams, list) {
1472
+ int cycle_match;
1473
+
1474
+ if (s->direction == AMDTP_IN_STREAM) {
1475
+ cycle_match = cycle;
1476
+ } else {
1477
+ // IT context starts immediately.
1478
+ cycle_match = -1;
1479
+ s->ctx_data.rx.seq_index = 0;
1480
+ }
1481
+
1482
+ if (s != d->irq_target) {
1483
+ err = amdtp_stream_start(s, s->channel, s->speed,
1484
+ cycle_match, queue_size, 0);
1485
+ if (err < 0)
1486
+ goto error;
1487
+ }
1488
+ }
1489
+
1490
+ s = d->irq_target;
1491
+ s->ctx_data.rx.events_per_period = events_per_period;
1492
+ s->ctx_data.rx.event_count = 0;
1493
+ s->ctx_data.rx.seq_index = 0;
1494
+
1495
+ idle_irq_interval = DIV_ROUND_UP(CYCLES_PER_SECOND * events_per_period,
1496
+ amdtp_rate_table[d->irq_target->sfc]);
1497
+ err = amdtp_stream_start(s, s->channel, s->speed, -1, queue_size,
1498
+ idle_irq_interval);
1499
+ if (err < 0)
1500
+ goto error;
1501
+
1502
+ return 0;
1503
+error:
1504
+ list_for_each_entry(s, &d->streams, list)
1505
+ amdtp_stream_stop(s);
1506
+ kfree(d->seq_descs);
1507
+ d->seq_descs = NULL;
1508
+ return err;
1509
+}
1510
+EXPORT_SYMBOL_GPL(amdtp_domain_start);
1511
+
1512
+/**
1513
+ * amdtp_domain_stop - stop sending packets for isoc context in the same domain.
1514
+ * @d: the AMDTP domain to which the isoc contexts belong.
1515
+ */
1516
+void amdtp_domain_stop(struct amdtp_domain *d)
1517
+{
1518
+ struct amdtp_stream *s, *next;
1519
+
1520
+ if (d->irq_target)
1521
+ amdtp_stream_stop(d->irq_target);
1522
+
1523
+ list_for_each_entry_safe(s, next, &d->streams, list) {
1524
+ list_del(&s->list);
1525
+
1526
+ if (s != d->irq_target)
1527
+ amdtp_stream_stop(s);
1528
+ }
1529
+
1530
+ d->events_per_period = 0;
1531
+ d->irq_target = NULL;
1532
+
1533
+ kfree(d->seq_descs);
1534
+ d->seq_descs = NULL;
1535
+}
1536
+EXPORT_SYMBOL_GPL(amdtp_domain_stop);