.. | .. |
---|
| 1 | +// SPDX-License-Identifier: GPL-2.0-only |
---|
1 | 2 | /* |
---|
2 | 3 | * Audio and Music Data Transmission Protocol (IEC 61883-6) streams |
---|
3 | 4 | * with Common Isochronous Packet (IEC 61883-1) headers |
---|
4 | 5 | * |
---|
5 | 6 | * Copyright (c) Clemens Ladisch <clemens@ladisch.de> |
---|
6 | | - * Licensed under the terms of the GNU General Public License, version 2. |
---|
7 | 7 | */ |
---|
8 | 8 | |
---|
9 | 9 | #include <linux/device.h> |
---|
10 | 10 | #include <linux/err.h> |
---|
11 | 11 | #include <linux/firewire.h> |
---|
| 12 | +#include <linux/firewire-constants.h> |
---|
12 | 13 | #include <linux/module.h> |
---|
13 | 14 | #include <linux/slab.h> |
---|
14 | 15 | #include <sound/pcm.h> |
---|
.. | .. |
---|
18 | 19 | #define TICKS_PER_CYCLE 3072 |
---|
19 | 20 | #define CYCLES_PER_SECOND 8000 |
---|
20 | 21 | #define TICKS_PER_SECOND (TICKS_PER_CYCLE * CYCLES_PER_SECOND) |
---|
| 22 | + |
---|
| 23 | +#define OHCI_MAX_SECOND 8 |
---|
21 | 24 | |
---|
22 | 25 | /* Always support Linux tracing subsystem. */ |
---|
23 | 26 | #define CREATE_TRACE_POINTS |
---|
.. | .. |
---|
52 | 55 | #define CIP_FMT_AM 0x10 |
---|
53 | 56 | #define AMDTP_FDF_NO_DATA 0xff |
---|
54 | 57 | |
---|
55 | | -/* TODO: make these configurable */ |
---|
56 | | -#define INTERRUPT_INTERVAL 16 |
---|
57 | | -#define QUEUE_LENGTH 48 |
---|
| 58 | +// For iso header, tstamp and 2 CIP header. |
---|
| 59 | +#define IR_CTX_HEADER_SIZE_CIP 16 |
---|
| 60 | +// For iso header and tstamp. |
---|
| 61 | +#define IR_CTX_HEADER_SIZE_NO_CIP 8 |
---|
| 62 | +#define HEADER_TSTAMP_MASK 0x0000ffff |
---|
58 | 63 | |
---|
59 | | -#define IN_PACKET_HEADER_SIZE 4 |
---|
60 | | -#define OUT_PACKET_HEADER_SIZE 0 |
---|
| 64 | +#define IT_PKT_HEADER_SIZE_CIP 8 // For 2 CIP header. |
---|
| 65 | +#define IT_PKT_HEADER_SIZE_NO_CIP 0 // Nothing. |
---|
61 | 66 | |
---|
62 | | -static void pcm_period_tasklet(unsigned long data); |
---|
| 67 | +static void pcm_period_work(struct work_struct *work); |
---|
63 | 68 | |
---|
64 | 69 | /** |
---|
65 | 70 | * amdtp_stream_init - initialize an AMDTP stream structure |
---|
.. | .. |
---|
68 | 73 | * @dir: the direction of stream |
---|
69 | 74 | * @flags: the packet transmission method to use |
---|
70 | 75 | * @fmt: the value of fmt field in CIP header |
---|
71 | | - * @process_data_blocks: callback handler to process data blocks |
---|
| 76 | + * @process_ctx_payloads: callback handler to process payloads of isoc context |
---|
72 | 77 | * @protocol_size: the size to allocate newly for protocol |
---|
73 | 78 | */ |
---|
74 | 79 | int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit, |
---|
75 | 80 | enum amdtp_stream_direction dir, enum cip_flags flags, |
---|
76 | 81 | unsigned int fmt, |
---|
77 | | - amdtp_stream_process_data_blocks_t process_data_blocks, |
---|
| 82 | + amdtp_stream_process_ctx_payloads_t process_ctx_payloads, |
---|
78 | 83 | unsigned int protocol_size) |
---|
79 | 84 | { |
---|
80 | | - if (process_data_blocks == NULL) |
---|
| 85 | + if (process_ctx_payloads == NULL) |
---|
81 | 86 | return -EINVAL; |
---|
82 | 87 | |
---|
83 | 88 | s->protocol = kzalloc(protocol_size, GFP_KERNEL); |
---|
.. | .. |
---|
89 | 94 | s->flags = flags; |
---|
90 | 95 | s->context = ERR_PTR(-1); |
---|
91 | 96 | mutex_init(&s->mutex); |
---|
92 | | - tasklet_init(&s->period_tasklet, pcm_period_tasklet, (unsigned long)s); |
---|
| 97 | + INIT_WORK(&s->period_work, pcm_period_work); |
---|
93 | 98 | s->packet_index = 0; |
---|
94 | 99 | |
---|
95 | 100 | init_waitqueue_head(&s->callback_wait); |
---|
96 | 101 | s->callbacked = false; |
---|
97 | 102 | |
---|
98 | 103 | s->fmt = fmt; |
---|
99 | | - s->process_data_blocks = process_data_blocks; |
---|
| 104 | + s->process_ctx_payloads = process_ctx_payloads; |
---|
| 105 | + |
---|
| 106 | + if (dir == AMDTP_OUT_STREAM) |
---|
| 107 | + s->ctx_data.rx.syt_override = -1; |
---|
100 | 108 | |
---|
101 | 109 | return 0; |
---|
102 | 110 | } |
---|
.. | .. |
---|
140 | 148 | }; |
---|
141 | 149 | EXPORT_SYMBOL(amdtp_rate_table); |
---|
142 | 150 | |
---|
| 151 | +static int apply_constraint_to_size(struct snd_pcm_hw_params *params, |
---|
| 152 | + struct snd_pcm_hw_rule *rule) |
---|
| 153 | +{ |
---|
| 154 | + struct snd_interval *s = hw_param_interval(params, rule->var); |
---|
| 155 | + const struct snd_interval *r = |
---|
| 156 | + hw_param_interval_c(params, SNDRV_PCM_HW_PARAM_RATE); |
---|
| 157 | + struct snd_interval t = {0}; |
---|
| 158 | + unsigned int step = 0; |
---|
| 159 | + int i; |
---|
| 160 | + |
---|
| 161 | + for (i = 0; i < CIP_SFC_COUNT; ++i) { |
---|
| 162 | + if (snd_interval_test(r, amdtp_rate_table[i])) |
---|
| 163 | + step = max(step, amdtp_syt_intervals[i]); |
---|
| 164 | + } |
---|
| 165 | + |
---|
| 166 | + t.min = roundup(s->min, step); |
---|
| 167 | + t.max = rounddown(s->max, step); |
---|
| 168 | + t.integer = 1; |
---|
| 169 | + |
---|
| 170 | + return snd_interval_refine(s, &t); |
---|
| 171 | +} |
---|
| 172 | + |
---|
143 | 173 | /** |
---|
144 | 174 | * amdtp_stream_add_pcm_hw_constraints - add hw constraints for PCM substream |
---|
145 | 175 | * @s: the AMDTP stream, which must be initialized. |
---|
.. | .. |
---|
149 | 179 | struct snd_pcm_runtime *runtime) |
---|
150 | 180 | { |
---|
151 | 181 | struct snd_pcm_hardware *hw = &runtime->hw; |
---|
| 182 | + unsigned int ctx_header_size; |
---|
| 183 | + unsigned int maximum_usec_per_period; |
---|
152 | 184 | int err; |
---|
153 | 185 | |
---|
154 | 186 | hw->info = SNDRV_PCM_INFO_BATCH | |
---|
.. | .. |
---|
169 | 201 | hw->period_bytes_max = hw->period_bytes_min * 2048; |
---|
170 | 202 | hw->buffer_bytes_max = hw->period_bytes_max * hw->periods_min; |
---|
171 | 203 | |
---|
172 | | - /* |
---|
173 | | - * Currently firewire-lib processes 16 packets in one software |
---|
174 | | - * interrupt callback. This equals to 2msec but actually the |
---|
175 | | - * interval of the interrupts has a jitter. |
---|
176 | | - * Additionally, even if adding a constraint to fit period size to |
---|
177 | | - * 2msec, actual calculated frames per period doesn't equal to 2msec, |
---|
178 | | - * depending on sampling rate. |
---|
179 | | - * Anyway, the interval to call snd_pcm_period_elapsed() cannot 2msec. |
---|
180 | | - * Here let us use 5msec for safe period interrupt. |
---|
181 | | - */ |
---|
| 204 | + // Linux driver for 1394 OHCI controller voluntarily flushes isoc |
---|
| 205 | + // context when total size of accumulated context header reaches |
---|
| 206 | + // PAGE_SIZE. This kicks work for the isoc context and brings |
---|
| 207 | + // callback in the middle of scheduled interrupts. |
---|
| 208 | + // Although AMDTP streams in the same domain use the same events per |
---|
| 209 | + // IRQ, use the largest size of context header between IT/IR contexts. |
---|
| 210 | + // Here, use the value of context header in IR context is for both |
---|
| 211 | + // contexts. |
---|
| 212 | + if (!(s->flags & CIP_NO_HEADER)) |
---|
| 213 | + ctx_header_size = IR_CTX_HEADER_SIZE_CIP; |
---|
| 214 | + else |
---|
| 215 | + ctx_header_size = IR_CTX_HEADER_SIZE_NO_CIP; |
---|
| 216 | + maximum_usec_per_period = USEC_PER_SEC * PAGE_SIZE / |
---|
| 217 | + CYCLES_PER_SECOND / ctx_header_size; |
---|
| 218 | + |
---|
| 219 | + // In IEC 61883-6, one isoc packet can transfer events up to the value |
---|
| 220 | + // of syt interval. This comes from the interval of isoc cycle. As 1394 |
---|
| 221 | + // OHCI controller can generate hardware IRQ per isoc packet, the |
---|
| 222 | + // interval is 125 usec. |
---|
| 223 | + // However, there are two ways of transmission in IEC 61883-6; blocking |
---|
| 224 | + // and non-blocking modes. In blocking mode, the sequence of isoc packet |
---|
| 225 | + // includes 'empty' or 'NODATA' packets which include no event. In |
---|
| 226 | + // non-blocking mode, the number of events per packet is variable up to |
---|
| 227 | + // the syt interval. |
---|
| 228 | + // Due to the above protocol design, the minimum PCM frames per |
---|
| 229 | + // interrupt should be double of the value of syt interval, thus it is |
---|
| 230 | + // 250 usec. |
---|
182 | 231 | err = snd_pcm_hw_constraint_minmax(runtime, |
---|
183 | 232 | SNDRV_PCM_HW_PARAM_PERIOD_TIME, |
---|
184 | | - 5000, UINT_MAX); |
---|
| 233 | + 250, maximum_usec_per_period); |
---|
185 | 234 | if (err < 0) |
---|
186 | 235 | goto end; |
---|
187 | 236 | |
---|
.. | .. |
---|
194 | 243 | * number equals to SYT_INTERVAL. So the number is 8, 16 or 32, |
---|
195 | 244 | * depending on its sampling rate. For accurate period interrupt, it's |
---|
196 | 245 | * preferrable to align period/buffer sizes to current SYT_INTERVAL. |
---|
197 | | - * |
---|
198 | | - * TODO: These constraints can be improved with proper rules. |
---|
199 | | - * Currently apply LCM of SYT_INTERVALs. |
---|
200 | 246 | */ |
---|
201 | | - err = snd_pcm_hw_constraint_step(runtime, 0, |
---|
202 | | - SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 32); |
---|
| 247 | + err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, |
---|
| 248 | + apply_constraint_to_size, NULL, |
---|
| 249 | + SNDRV_PCM_HW_PARAM_PERIOD_SIZE, |
---|
| 250 | + SNDRV_PCM_HW_PARAM_RATE, -1); |
---|
203 | 251 | if (err < 0) |
---|
204 | 252 | goto end; |
---|
205 | | - err = snd_pcm_hw_constraint_step(runtime, 0, |
---|
206 | | - SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 32); |
---|
| 253 | + err = snd_pcm_hw_rule_add(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, |
---|
| 254 | + apply_constraint_to_size, NULL, |
---|
| 255 | + SNDRV_PCM_HW_PARAM_BUFFER_SIZE, |
---|
| 256 | + SNDRV_PCM_HW_PARAM_RATE, -1); |
---|
| 257 | + if (err < 0) |
---|
| 258 | + goto end; |
---|
207 | 259 | end: |
---|
208 | 260 | return err; |
---|
209 | 261 | } |
---|
.. | .. |
---|
234 | 286 | s->data_block_quadlets = data_block_quadlets; |
---|
235 | 287 | s->syt_interval = amdtp_syt_intervals[sfc]; |
---|
236 | 288 | |
---|
237 | | - /* default buffering in the device */ |
---|
238 | | - s->transfer_delay = TRANSFER_DELAY_TICKS - TICKS_PER_CYCLE; |
---|
239 | | - if (s->flags & CIP_BLOCKING) |
---|
240 | | - /* additional buffering needed to adjust for no-data packets */ |
---|
241 | | - s->transfer_delay += TICKS_PER_SECOND * s->syt_interval / rate; |
---|
| 289 | + // default buffering in the device. |
---|
| 290 | + if (s->direction == AMDTP_OUT_STREAM) { |
---|
| 291 | + s->ctx_data.rx.transfer_delay = |
---|
| 292 | + TRANSFER_DELAY_TICKS - TICKS_PER_CYCLE; |
---|
| 293 | + |
---|
| 294 | + if (s->flags & CIP_BLOCKING) { |
---|
| 295 | + // additional buffering needed to adjust for no-data |
---|
| 296 | + // packets. |
---|
| 297 | + s->ctx_data.rx.transfer_delay += |
---|
| 298 | + TICKS_PER_SECOND * s->syt_interval / rate; |
---|
| 299 | + } |
---|
| 300 | + } |
---|
242 | 301 | |
---|
243 | 302 | return 0; |
---|
244 | 303 | } |
---|
.. | .. |
---|
254 | 313 | unsigned int amdtp_stream_get_max_payload(struct amdtp_stream *s) |
---|
255 | 314 | { |
---|
256 | 315 | unsigned int multiplier = 1; |
---|
257 | | - unsigned int header_size = 0; |
---|
| 316 | + unsigned int cip_header_size = 0; |
---|
258 | 317 | |
---|
259 | 318 | if (s->flags & CIP_JUMBO_PAYLOAD) |
---|
260 | 319 | multiplier = 5; |
---|
261 | 320 | if (!(s->flags & CIP_NO_HEADER)) |
---|
262 | | - header_size = 8; |
---|
| 321 | + cip_header_size = sizeof(__be32) * 2; |
---|
263 | 322 | |
---|
264 | | - return header_size + |
---|
265 | | - s->syt_interval * s->data_block_quadlets * 4 * multiplier; |
---|
| 323 | + return cip_header_size + |
---|
| 324 | + s->syt_interval * s->data_block_quadlets * sizeof(__be32) * multiplier; |
---|
266 | 325 | } |
---|
267 | 326 | EXPORT_SYMBOL(amdtp_stream_get_max_payload); |
---|
268 | 327 | |
---|
.. | .. |
---|
274 | 333 | */ |
---|
275 | 334 | void amdtp_stream_pcm_prepare(struct amdtp_stream *s) |
---|
276 | 335 | { |
---|
277 | | - tasklet_kill(&s->period_tasklet); |
---|
| 336 | + cancel_work_sync(&s->period_work); |
---|
278 | 337 | s->pcm_buffer_pointer = 0; |
---|
279 | 338 | s->pcm_period_pointer = 0; |
---|
280 | 339 | } |
---|
281 | 340 | EXPORT_SYMBOL(amdtp_stream_pcm_prepare); |
---|
282 | 341 | |
---|
283 | | -static unsigned int calculate_data_blocks(struct amdtp_stream *s, |
---|
284 | | - unsigned int syt) |
---|
| 342 | +static unsigned int calculate_data_blocks(unsigned int *data_block_state, |
---|
| 343 | + bool is_blocking, bool is_no_info, |
---|
| 344 | + unsigned int syt_interval, enum cip_sfc sfc) |
---|
285 | 345 | { |
---|
286 | | - unsigned int phase, data_blocks; |
---|
| 346 | + unsigned int data_blocks; |
---|
287 | 347 | |
---|
288 | 348 | /* Blocking mode. */ |
---|
289 | | - if (s->flags & CIP_BLOCKING) { |
---|
| 349 | + if (is_blocking) { |
---|
290 | 350 | /* This module generate empty packet for 'no data'. */ |
---|
291 | | - if (syt == CIP_SYT_NO_INFO) |
---|
| 351 | + if (is_no_info) |
---|
292 | 352 | data_blocks = 0; |
---|
293 | 353 | else |
---|
294 | | - data_blocks = s->syt_interval; |
---|
| 354 | + data_blocks = syt_interval; |
---|
295 | 355 | /* Non-blocking mode. */ |
---|
296 | 356 | } else { |
---|
297 | | - if (!cip_sfc_is_base_44100(s->sfc)) { |
---|
298 | | - /* Sample_rate / 8000 is an integer, and precomputed. */ |
---|
299 | | - data_blocks = s->data_block_state; |
---|
| 357 | + if (!cip_sfc_is_base_44100(sfc)) { |
---|
| 358 | + // Sample_rate / 8000 is an integer, and precomputed. |
---|
| 359 | + data_blocks = *data_block_state; |
---|
300 | 360 | } else { |
---|
301 | | - phase = s->data_block_state; |
---|
| 361 | + unsigned int phase = *data_block_state; |
---|
302 | 362 | |
---|
303 | 363 | /* |
---|
304 | 364 | * This calculates the number of data blocks per packet so that |
---|
.. | .. |
---|
308 | 368 | * as possible in the sequence (to prevent underruns of the |
---|
309 | 369 | * device's buffer). |
---|
310 | 370 | */ |
---|
311 | | - if (s->sfc == CIP_SFC_44100) |
---|
| 371 | + if (sfc == CIP_SFC_44100) |
---|
312 | 372 | /* 6 6 5 6 5 6 5 ... */ |
---|
313 | 373 | data_blocks = 5 + ((phase & 1) ^ |
---|
314 | 374 | (phase == 0 || phase >= 40)); |
---|
315 | 375 | else |
---|
316 | 376 | /* 12 11 11 11 11 ... or 23 22 22 22 22 ... */ |
---|
317 | | - data_blocks = 11 * (s->sfc >> 1) + (phase == 0); |
---|
318 | | - if (++phase >= (80 >> (s->sfc >> 1))) |
---|
| 377 | + data_blocks = 11 * (sfc >> 1) + (phase == 0); |
---|
| 378 | + if (++phase >= (80 >> (sfc >> 1))) |
---|
319 | 379 | phase = 0; |
---|
320 | | - s->data_block_state = phase; |
---|
| 380 | + *data_block_state = phase; |
---|
321 | 381 | } |
---|
322 | 382 | } |
---|
323 | 383 | |
---|
324 | 384 | return data_blocks; |
---|
325 | 385 | } |
---|
326 | 386 | |
---|
327 | | -static unsigned int calculate_syt(struct amdtp_stream *s, |
---|
328 | | - unsigned int cycle) |
---|
| 387 | +static unsigned int calculate_syt_offset(unsigned int *last_syt_offset, |
---|
| 388 | + unsigned int *syt_offset_state, enum cip_sfc sfc) |
---|
329 | 389 | { |
---|
330 | | - unsigned int syt_offset, phase, index, syt; |
---|
| 390 | + unsigned int syt_offset; |
---|
331 | 391 | |
---|
332 | | - if (s->last_syt_offset < TICKS_PER_CYCLE) { |
---|
333 | | - if (!cip_sfc_is_base_44100(s->sfc)) |
---|
334 | | - syt_offset = s->last_syt_offset + s->syt_offset_state; |
---|
| 392 | + if (*last_syt_offset < TICKS_PER_CYCLE) { |
---|
| 393 | + if (!cip_sfc_is_base_44100(sfc)) |
---|
| 394 | + syt_offset = *last_syt_offset + *syt_offset_state; |
---|
335 | 395 | else { |
---|
336 | 396 | /* |
---|
337 | 397 | * The time, in ticks, of the n'th SYT_INTERVAL sample is: |
---|
.. | .. |
---|
343 | 403 | * 1386 1386 1387 1386 1386 1386 1387 1386 1386 1386 1387 ... |
---|
344 | 404 | * This code generates _exactly_ the same sequence. |
---|
345 | 405 | */ |
---|
346 | | - phase = s->syt_offset_state; |
---|
347 | | - index = phase % 13; |
---|
348 | | - syt_offset = s->last_syt_offset; |
---|
| 406 | + unsigned int phase = *syt_offset_state; |
---|
| 407 | + unsigned int index = phase % 13; |
---|
| 408 | + |
---|
| 409 | + syt_offset = *last_syt_offset; |
---|
349 | 410 | syt_offset += 1386 + ((index && !(index & 3)) || |
---|
350 | 411 | phase == 146); |
---|
351 | 412 | if (++phase >= 147) |
---|
352 | 413 | phase = 0; |
---|
353 | | - s->syt_offset_state = phase; |
---|
| 414 | + *syt_offset_state = phase; |
---|
354 | 415 | } |
---|
355 | 416 | } else |
---|
356 | | - syt_offset = s->last_syt_offset - TICKS_PER_CYCLE; |
---|
357 | | - s->last_syt_offset = syt_offset; |
---|
| 417 | + syt_offset = *last_syt_offset - TICKS_PER_CYCLE; |
---|
| 418 | + *last_syt_offset = syt_offset; |
---|
358 | 419 | |
---|
359 | | - if (syt_offset < TICKS_PER_CYCLE) { |
---|
360 | | - syt_offset += s->transfer_delay; |
---|
361 | | - syt = (cycle + syt_offset / TICKS_PER_CYCLE) << 12; |
---|
362 | | - syt += syt_offset % TICKS_PER_CYCLE; |
---|
| 420 | + if (syt_offset >= TICKS_PER_CYCLE) |
---|
| 421 | + syt_offset = CIP_SYT_NO_INFO; |
---|
363 | 422 | |
---|
364 | | - return syt & CIP_SYT_MASK; |
---|
365 | | - } else { |
---|
366 | | - return CIP_SYT_NO_INFO; |
---|
367 | | - } |
---|
| 423 | + return syt_offset; |
---|
368 | 424 | } |
---|
369 | 425 | |
---|
370 | 426 | static void update_pcm_pointers(struct amdtp_stream *s, |
---|
.. | .. |
---|
381 | 437 | s->pcm_period_pointer += frames; |
---|
382 | 438 | if (s->pcm_period_pointer >= pcm->runtime->period_size) { |
---|
383 | 439 | s->pcm_period_pointer -= pcm->runtime->period_size; |
---|
384 | | - tasklet_hi_schedule(&s->period_tasklet); |
---|
| 440 | + queue_work(system_highpri_wq, &s->period_work); |
---|
385 | 441 | } |
---|
386 | 442 | } |
---|
387 | 443 | |
---|
388 | | -static void pcm_period_tasklet(unsigned long data) |
---|
| 444 | +static void pcm_period_work(struct work_struct *work) |
---|
389 | 445 | { |
---|
390 | | - struct amdtp_stream *s = (void *)data; |
---|
| 446 | + struct amdtp_stream *s = container_of(work, struct amdtp_stream, |
---|
| 447 | + period_work); |
---|
391 | 448 | struct snd_pcm_substream *pcm = READ_ONCE(s->pcm); |
---|
392 | 449 | |
---|
393 | 450 | if (pcm) |
---|
394 | 451 | snd_pcm_period_elapsed(pcm); |
---|
395 | 452 | } |
---|
396 | 453 | |
---|
397 | | -static int queue_packet(struct amdtp_stream *s, unsigned int header_length, |
---|
398 | | - unsigned int payload_length) |
---|
| 454 | +static int queue_packet(struct amdtp_stream *s, struct fw_iso_packet *params, |
---|
| 455 | + bool sched_irq) |
---|
399 | 456 | { |
---|
400 | | - struct fw_iso_packet p = {0}; |
---|
401 | | - int err = 0; |
---|
| 457 | + int err; |
---|
402 | 458 | |
---|
403 | | - if (IS_ERR(s->context)) |
---|
404 | | - goto end; |
---|
| 459 | + params->interrupt = sched_irq; |
---|
| 460 | + params->tag = s->tag; |
---|
| 461 | + params->sy = 0; |
---|
405 | 462 | |
---|
406 | | - p.interrupt = IS_ALIGNED(s->packet_index + 1, INTERRUPT_INTERVAL); |
---|
407 | | - p.tag = s->tag; |
---|
408 | | - p.header_length = header_length; |
---|
409 | | - if (payload_length > 0) |
---|
410 | | - p.payload_length = payload_length; |
---|
411 | | - else |
---|
412 | | - p.skip = true; |
---|
413 | | - err = fw_iso_context_queue(s->context, &p, &s->buffer.iso_buffer, |
---|
| 463 | + err = fw_iso_context_queue(s->context, params, &s->buffer.iso_buffer, |
---|
414 | 464 | s->buffer.packets[s->packet_index].offset); |
---|
415 | 465 | if (err < 0) { |
---|
416 | 466 | dev_err(&s->unit->device, "queueing error: %d\n", err); |
---|
417 | 467 | goto end; |
---|
418 | 468 | } |
---|
419 | 469 | |
---|
420 | | - if (++s->packet_index >= QUEUE_LENGTH) |
---|
| 470 | + if (++s->packet_index >= s->queue_size) |
---|
421 | 471 | s->packet_index = 0; |
---|
422 | 472 | end: |
---|
423 | 473 | return err; |
---|
424 | 474 | } |
---|
425 | 475 | |
---|
426 | 476 | static inline int queue_out_packet(struct amdtp_stream *s, |
---|
427 | | - unsigned int payload_length) |
---|
| 477 | + struct fw_iso_packet *params, bool sched_irq) |
---|
428 | 478 | { |
---|
429 | | - return queue_packet(s, OUT_PACKET_HEADER_SIZE, payload_length); |
---|
| 479 | + params->skip = |
---|
| 480 | + !!(params->header_length == 0 && params->payload_length == 0); |
---|
| 481 | + return queue_packet(s, params, sched_irq); |
---|
430 | 482 | } |
---|
431 | 483 | |
---|
432 | | -static inline int queue_in_packet(struct amdtp_stream *s) |
---|
| 484 | +static inline int queue_in_packet(struct amdtp_stream *s, |
---|
| 485 | + struct fw_iso_packet *params) |
---|
433 | 486 | { |
---|
434 | | - return queue_packet(s, IN_PACKET_HEADER_SIZE, s->max_payload_length); |
---|
| 487 | + // Queue one packet for IR context. |
---|
| 488 | + params->header_length = s->ctx_data.tx.ctx_header_size; |
---|
| 489 | + params->payload_length = s->ctx_data.tx.max_ctx_payload_length; |
---|
| 490 | + params->skip = false; |
---|
| 491 | + return queue_packet(s, params, false); |
---|
435 | 492 | } |
---|
436 | 493 | |
---|
437 | | -static int handle_out_packet(struct amdtp_stream *s, |
---|
438 | | - unsigned int payload_length, unsigned int cycle, |
---|
439 | | - unsigned int index) |
---|
| 494 | +static void generate_cip_header(struct amdtp_stream *s, __be32 cip_header[2], |
---|
| 495 | + unsigned int data_block_counter, unsigned int syt) |
---|
440 | 496 | { |
---|
441 | | - __be32 *buffer; |
---|
442 | | - unsigned int syt; |
---|
443 | | - unsigned int data_blocks; |
---|
444 | | - unsigned int pcm_frames; |
---|
445 | | - struct snd_pcm_substream *pcm; |
---|
446 | | - |
---|
447 | | - buffer = s->buffer.packets[s->packet_index].buffer; |
---|
448 | | - syt = calculate_syt(s, cycle); |
---|
449 | | - data_blocks = calculate_data_blocks(s, syt); |
---|
450 | | - pcm_frames = s->process_data_blocks(s, buffer + 2, data_blocks, &syt); |
---|
451 | | - |
---|
452 | | - if (s->flags & CIP_DBC_IS_END_EVENT) |
---|
453 | | - s->data_block_counter = |
---|
454 | | - (s->data_block_counter + data_blocks) & 0xff; |
---|
455 | | - |
---|
456 | | - buffer[0] = cpu_to_be32(READ_ONCE(s->source_node_id_field) | |
---|
| 497 | + cip_header[0] = cpu_to_be32(READ_ONCE(s->source_node_id_field) | |
---|
457 | 498 | (s->data_block_quadlets << CIP_DBS_SHIFT) | |
---|
458 | 499 | ((s->sph << CIP_SPH_SHIFT) & CIP_SPH_MASK) | |
---|
459 | | - s->data_block_counter); |
---|
460 | | - buffer[1] = cpu_to_be32(CIP_EOH | |
---|
461 | | - ((s->fmt << CIP_FMT_SHIFT) & CIP_FMT_MASK) | |
---|
462 | | - ((s->fdf << CIP_FDF_SHIFT) & CIP_FDF_MASK) | |
---|
463 | | - (syt & CIP_SYT_MASK)); |
---|
464 | | - |
---|
465 | | - if (!(s->flags & CIP_DBC_IS_END_EVENT)) |
---|
466 | | - s->data_block_counter = |
---|
467 | | - (s->data_block_counter + data_blocks) & 0xff; |
---|
468 | | - payload_length = 8 + data_blocks * 4 * s->data_block_quadlets; |
---|
469 | | - |
---|
470 | | - trace_out_packet(s, cycle, buffer, payload_length, index); |
---|
471 | | - |
---|
472 | | - if (queue_out_packet(s, payload_length) < 0) |
---|
473 | | - return -EIO; |
---|
474 | | - |
---|
475 | | - pcm = READ_ONCE(s->pcm); |
---|
476 | | - if (pcm && pcm_frames > 0) |
---|
477 | | - update_pcm_pointers(s, pcm, pcm_frames); |
---|
478 | | - |
---|
479 | | - /* No need to return the number of handled data blocks. */ |
---|
480 | | - return 0; |
---|
| 500 | + data_block_counter); |
---|
| 501 | + cip_header[1] = cpu_to_be32(CIP_EOH | |
---|
| 502 | + ((s->fmt << CIP_FMT_SHIFT) & CIP_FMT_MASK) | |
---|
| 503 | + ((s->ctx_data.rx.fdf << CIP_FDF_SHIFT) & CIP_FDF_MASK) | |
---|
| 504 | + (syt & CIP_SYT_MASK)); |
---|
481 | 505 | } |
---|
482 | 506 | |
---|
483 | | -static int handle_out_packet_without_header(struct amdtp_stream *s, |
---|
484 | | - unsigned int payload_length, unsigned int cycle, |
---|
485 | | - unsigned int index) |
---|
| 507 | +static void build_it_pkt_header(struct amdtp_stream *s, unsigned int cycle, |
---|
| 508 | + struct fw_iso_packet *params, |
---|
| 509 | + unsigned int data_blocks, |
---|
| 510 | + unsigned int data_block_counter, |
---|
| 511 | + unsigned int syt, unsigned int index) |
---|
486 | 512 | { |
---|
487 | | - __be32 *buffer; |
---|
488 | | - unsigned int syt; |
---|
489 | | - unsigned int data_blocks; |
---|
490 | | - unsigned int pcm_frames; |
---|
491 | | - struct snd_pcm_substream *pcm; |
---|
| 513 | + unsigned int payload_length; |
---|
| 514 | + __be32 *cip_header; |
---|
492 | 515 | |
---|
493 | | - buffer = s->buffer.packets[s->packet_index].buffer; |
---|
494 | | - syt = calculate_syt(s, cycle); |
---|
495 | | - data_blocks = calculate_data_blocks(s, syt); |
---|
496 | | - pcm_frames = s->process_data_blocks(s, buffer, data_blocks, &syt); |
---|
497 | | - s->data_block_counter = (s->data_block_counter + data_blocks) & 0xff; |
---|
| 516 | + payload_length = data_blocks * sizeof(__be32) * s->data_block_quadlets; |
---|
| 517 | + params->payload_length = payload_length; |
---|
498 | 518 | |
---|
499 | | - payload_length = data_blocks * 4 * s->data_block_quadlets; |
---|
| 519 | + if (!(s->flags & CIP_NO_HEADER)) { |
---|
| 520 | + cip_header = (__be32 *)params->header; |
---|
| 521 | + generate_cip_header(s, cip_header, data_block_counter, syt); |
---|
| 522 | + params->header_length = 2 * sizeof(__be32); |
---|
| 523 | + payload_length += params->header_length; |
---|
| 524 | + } else { |
---|
| 525 | + cip_header = NULL; |
---|
| 526 | + } |
---|
500 | 527 | |
---|
501 | | - trace_out_packet_without_header(s, cycle, payload_length, data_blocks, |
---|
502 | | - index); |
---|
503 | | - |
---|
504 | | - if (queue_out_packet(s, payload_length) < 0) |
---|
505 | | - return -EIO; |
---|
506 | | - |
---|
507 | | - pcm = READ_ONCE(s->pcm); |
---|
508 | | - if (pcm && pcm_frames > 0) |
---|
509 | | - update_pcm_pointers(s, pcm, pcm_frames); |
---|
510 | | - |
---|
511 | | - /* No need to return the number of handled data blocks. */ |
---|
512 | | - return 0; |
---|
| 528 | + trace_amdtp_packet(s, cycle, cip_header, payload_length, data_blocks, |
---|
| 529 | + data_block_counter, s->packet_index, index); |
---|
513 | 530 | } |
---|
514 | 531 | |
---|
515 | | -static int handle_in_packet(struct amdtp_stream *s, |
---|
516 | | - unsigned int payload_length, unsigned int cycle, |
---|
517 | | - unsigned int index) |
---|
| 532 | +static int check_cip_header(struct amdtp_stream *s, const __be32 *buf, |
---|
| 533 | + unsigned int payload_length, |
---|
| 534 | + unsigned int *data_blocks, |
---|
| 535 | + unsigned int *data_block_counter, unsigned int *syt) |
---|
518 | 536 | { |
---|
519 | | - __be32 *buffer; |
---|
520 | 537 | u32 cip_header[2]; |
---|
521 | | - unsigned int sph, fmt, fdf, syt; |
---|
522 | | - unsigned int data_block_quadlets, data_block_counter, dbc_interval; |
---|
523 | | - unsigned int data_blocks; |
---|
524 | | - struct snd_pcm_substream *pcm; |
---|
525 | | - unsigned int pcm_frames; |
---|
| 538 | + unsigned int sph; |
---|
| 539 | + unsigned int fmt; |
---|
| 540 | + unsigned int fdf; |
---|
| 541 | + unsigned int dbc; |
---|
526 | 542 | bool lost; |
---|
527 | 543 | |
---|
528 | | - buffer = s->buffer.packets[s->packet_index].buffer; |
---|
529 | | - cip_header[0] = be32_to_cpu(buffer[0]); |
---|
530 | | - cip_header[1] = be32_to_cpu(buffer[1]); |
---|
531 | | - |
---|
532 | | - trace_in_packet(s, cycle, cip_header, payload_length, index); |
---|
| 544 | + cip_header[0] = be32_to_cpu(buf[0]); |
---|
| 545 | + cip_header[1] = be32_to_cpu(buf[1]); |
---|
533 | 546 | |
---|
534 | 547 | /* |
---|
535 | 548 | * This module supports 'Two-quadlet CIP header with SYT field'. |
---|
.. | .. |
---|
541 | 554 | dev_info_ratelimited(&s->unit->device, |
---|
542 | 555 | "Invalid CIP header for AMDTP: %08X:%08X\n", |
---|
543 | 556 | cip_header[0], cip_header[1]); |
---|
544 | | - data_blocks = 0; |
---|
545 | | - pcm_frames = 0; |
---|
546 | | - goto end; |
---|
| 557 | + return -EAGAIN; |
---|
547 | 558 | } |
---|
548 | 559 | |
---|
549 | 560 | /* Check valid protocol or not. */ |
---|
.. | .. |
---|
553 | 564 | dev_info_ratelimited(&s->unit->device, |
---|
554 | 565 | "Detect unexpected protocol: %08x %08x\n", |
---|
555 | 566 | cip_header[0], cip_header[1]); |
---|
556 | | - data_blocks = 0; |
---|
557 | | - pcm_frames = 0; |
---|
558 | | - goto end; |
---|
| 567 | + return -EAGAIN; |
---|
559 | 568 | } |
---|
560 | 569 | |
---|
561 | 570 | /* Calculate data blocks */ |
---|
562 | 571 | fdf = (cip_header[1] & CIP_FDF_MASK) >> CIP_FDF_SHIFT; |
---|
563 | | - if (payload_length < 12 || |
---|
| 572 | + if (payload_length < sizeof(__be32) * 2 || |
---|
564 | 573 | (fmt == CIP_FMT_AM && fdf == AMDTP_FDF_NO_DATA)) { |
---|
565 | | - data_blocks = 0; |
---|
| 574 | + *data_blocks = 0; |
---|
566 | 575 | } else { |
---|
567 | | - data_block_quadlets = |
---|
568 | | - (cip_header[0] & CIP_DBS_MASK) >> CIP_DBS_SHIFT; |
---|
| 576 | + unsigned int data_block_quadlets = |
---|
| 577 | + (cip_header[0] & CIP_DBS_MASK) >> CIP_DBS_SHIFT; |
---|
569 | 578 | /* avoid division by zero */ |
---|
570 | 579 | if (data_block_quadlets == 0) { |
---|
571 | 580 | dev_err(&s->unit->device, |
---|
.. | .. |
---|
576 | 585 | if (s->flags & CIP_WRONG_DBS) |
---|
577 | 586 | data_block_quadlets = s->data_block_quadlets; |
---|
578 | 587 | |
---|
579 | | - data_blocks = (payload_length / 4 - 2) / |
---|
| 588 | + *data_blocks = (payload_length / sizeof(__be32) - 2) / |
---|
580 | 589 | data_block_quadlets; |
---|
581 | 590 | } |
---|
582 | 591 | |
---|
583 | 592 | /* Check data block counter continuity */ |
---|
584 | | - data_block_counter = cip_header[0] & CIP_DBC_MASK; |
---|
585 | | - if (data_blocks == 0 && (s->flags & CIP_EMPTY_HAS_WRONG_DBC) && |
---|
586 | | - s->data_block_counter != UINT_MAX) |
---|
587 | | - data_block_counter = s->data_block_counter; |
---|
| 593 | + dbc = cip_header[0] & CIP_DBC_MASK; |
---|
| 594 | + if (*data_blocks == 0 && (s->flags & CIP_EMPTY_HAS_WRONG_DBC) && |
---|
| 595 | + *data_block_counter != UINT_MAX) |
---|
| 596 | + dbc = *data_block_counter; |
---|
588 | 597 | |
---|
589 | | - if (((s->flags & CIP_SKIP_DBC_ZERO_CHECK) && |
---|
590 | | - data_block_counter == s->tx_first_dbc) || |
---|
591 | | - s->data_block_counter == UINT_MAX) { |
---|
| 598 | + if ((dbc == 0x00 && (s->flags & CIP_SKIP_DBC_ZERO_CHECK)) || |
---|
| 599 | + *data_block_counter == UINT_MAX) { |
---|
592 | 600 | lost = false; |
---|
593 | 601 | } else if (!(s->flags & CIP_DBC_IS_END_EVENT)) { |
---|
594 | | - lost = data_block_counter != s->data_block_counter; |
---|
| 602 | + lost = dbc != *data_block_counter; |
---|
595 | 603 | } else { |
---|
596 | | - if (data_blocks > 0 && s->tx_dbc_interval > 0) |
---|
597 | | - dbc_interval = s->tx_dbc_interval; |
---|
598 | | - else |
---|
599 | | - dbc_interval = data_blocks; |
---|
| 604 | + unsigned int dbc_interval; |
---|
600 | 605 | |
---|
601 | | - lost = data_block_counter != |
---|
602 | | - ((s->data_block_counter + dbc_interval) & 0xff); |
---|
| 606 | + if (*data_blocks > 0 && s->ctx_data.tx.dbc_interval > 0) |
---|
| 607 | + dbc_interval = s->ctx_data.tx.dbc_interval; |
---|
| 608 | + else |
---|
| 609 | + dbc_interval = *data_blocks; |
---|
| 610 | + |
---|
| 611 | + lost = dbc != ((*data_block_counter + dbc_interval) & 0xff); |
---|
603 | 612 | } |
---|
604 | 613 | |
---|
605 | 614 | if (lost) { |
---|
606 | 615 | dev_err(&s->unit->device, |
---|
607 | 616 | "Detect discontinuity of CIP: %02X %02X\n", |
---|
608 | | - s->data_block_counter, data_block_counter); |
---|
| 617 | + *data_block_counter, dbc); |
---|
609 | 618 | return -EIO; |
---|
610 | 619 | } |
---|
611 | 620 | |
---|
612 | | - syt = be32_to_cpu(buffer[1]) & CIP_SYT_MASK; |
---|
613 | | - pcm_frames = s->process_data_blocks(s, buffer + 2, data_blocks, &syt); |
---|
| 621 | + *data_block_counter = dbc; |
---|
614 | 622 | |
---|
615 | | - if (s->flags & CIP_DBC_IS_END_EVENT) |
---|
616 | | - s->data_block_counter = data_block_counter; |
---|
| 623 | + *syt = cip_header[1] & CIP_SYT_MASK; |
---|
| 624 | + |
---|
| 625 | + return 0; |
---|
| 626 | +} |
---|
| 627 | + |
---|
| 628 | +static int parse_ir_ctx_header(struct amdtp_stream *s, unsigned int cycle, |
---|
| 629 | + const __be32 *ctx_header, |
---|
| 630 | + unsigned int *payload_length, |
---|
| 631 | + unsigned int *data_blocks, |
---|
| 632 | + unsigned int *data_block_counter, |
---|
| 633 | + unsigned int *syt, unsigned int packet_index, unsigned int index) |
---|
| 634 | +{ |
---|
| 635 | + const __be32 *cip_header; |
---|
| 636 | + unsigned int cip_header_size; |
---|
| 637 | + int err; |
---|
| 638 | + |
---|
| 639 | + *payload_length = be32_to_cpu(ctx_header[0]) >> ISO_DATA_LENGTH_SHIFT; |
---|
| 640 | + |
---|
| 641 | + if (!(s->flags & CIP_NO_HEADER)) |
---|
| 642 | + cip_header_size = 8; |
---|
617 | 643 | else |
---|
618 | | - s->data_block_counter = |
---|
619 | | - (data_block_counter + data_blocks) & 0xff; |
---|
620 | | -end: |
---|
621 | | - if (queue_in_packet(s) < 0) |
---|
| 644 | + cip_header_size = 0; |
---|
| 645 | + |
---|
| 646 | + if (*payload_length > cip_header_size + s->ctx_data.tx.max_ctx_payload_length) { |
---|
| 647 | + dev_err(&s->unit->device, |
---|
| 648 | + "Detect jumbo payload: %04x %04x\n", |
---|
| 649 | + *payload_length, cip_header_size + s->ctx_data.tx.max_ctx_payload_length); |
---|
622 | 650 | return -EIO; |
---|
| 651 | + } |
---|
623 | 652 | |
---|
624 | | - pcm = READ_ONCE(s->pcm); |
---|
625 | | - if (pcm && pcm_frames > 0) |
---|
626 | | - update_pcm_pointers(s, pcm, pcm_frames); |
---|
| 653 | + if (cip_header_size > 0) { |
---|
| 654 | + cip_header = ctx_header + 2; |
---|
| 655 | + err = check_cip_header(s, cip_header, *payload_length, |
---|
| 656 | + data_blocks, data_block_counter, syt); |
---|
| 657 | + if (err < 0) |
---|
| 658 | + return err; |
---|
| 659 | + } else { |
---|
| 660 | + cip_header = NULL; |
---|
| 661 | + err = 0; |
---|
| 662 | + *data_blocks = *payload_length / sizeof(__be32) / |
---|
| 663 | + s->data_block_quadlets; |
---|
| 664 | + *syt = 0; |
---|
627 | 665 | |
---|
628 | | - return 0; |
---|
| 666 | + if (*data_block_counter == UINT_MAX) |
---|
| 667 | + *data_block_counter = 0; |
---|
| 668 | + } |
---|
| 669 | + |
---|
| 670 | + trace_amdtp_packet(s, cycle, cip_header, *payload_length, *data_blocks, |
---|
| 671 | + *data_block_counter, packet_index, index); |
---|
| 672 | + |
---|
| 673 | + return err; |
---|
629 | 674 | } |
---|
630 | 675 | |
---|
631 | | -static int handle_in_packet_without_header(struct amdtp_stream *s, |
---|
632 | | - unsigned int payload_length, unsigned int cycle, |
---|
633 | | - unsigned int index) |
---|
| 676 | +// In CYCLE_TIMER register of IEEE 1394, 7 bits are used to represent second. On |
---|
| 677 | +// the other hand, in DMA descriptors of 1394 OHCI, 3 bits are used to represent |
---|
| 678 | +// it. Thus, via Linux firewire subsystem, we can get the 3 bits for second. |
---|
| 679 | +static inline u32 compute_cycle_count(__be32 ctx_header_tstamp) |
---|
634 | 680 | { |
---|
635 | | - __be32 *buffer; |
---|
636 | | - unsigned int payload_quadlets; |
---|
637 | | - unsigned int data_blocks; |
---|
638 | | - struct snd_pcm_substream *pcm; |
---|
639 | | - unsigned int pcm_frames; |
---|
640 | | - |
---|
641 | | - buffer = s->buffer.packets[s->packet_index].buffer; |
---|
642 | | - payload_quadlets = payload_length / 4; |
---|
643 | | - data_blocks = payload_quadlets / s->data_block_quadlets; |
---|
644 | | - |
---|
645 | | - trace_in_packet_without_header(s, cycle, payload_quadlets, data_blocks, |
---|
646 | | - index); |
---|
647 | | - |
---|
648 | | - pcm_frames = s->process_data_blocks(s, buffer, data_blocks, NULL); |
---|
649 | | - s->data_block_counter = (s->data_block_counter + data_blocks) & 0xff; |
---|
650 | | - |
---|
651 | | - if (queue_in_packet(s) < 0) |
---|
652 | | - return -EIO; |
---|
653 | | - |
---|
654 | | - pcm = READ_ONCE(s->pcm); |
---|
655 | | - if (pcm && pcm_frames > 0) |
---|
656 | | - update_pcm_pointers(s, pcm, pcm_frames); |
---|
657 | | - |
---|
658 | | - return 0; |
---|
659 | | -} |
---|
660 | | - |
---|
661 | | -/* |
---|
662 | | - * In CYCLE_TIMER register of IEEE 1394, 7 bits are used to represent second. On |
---|
663 | | - * the other hand, in DMA descriptors of 1394 OHCI, 3 bits are used to represent |
---|
664 | | - * it. Thus, via Linux firewire subsystem, we can get the 3 bits for second. |
---|
665 | | - */ |
---|
666 | | -static inline u32 compute_cycle_count(u32 tstamp) |
---|
667 | | -{ |
---|
| 681 | + u32 tstamp = be32_to_cpu(ctx_header_tstamp) & HEADER_TSTAMP_MASK; |
---|
668 | 682 | return (((tstamp >> 13) & 0x07) * 8000) + (tstamp & 0x1fff); |
---|
669 | 683 | } |
---|
670 | 684 | |
---|
671 | 685 | static inline u32 increment_cycle_count(u32 cycle, unsigned int addend) |
---|
672 | 686 | { |
---|
673 | 687 | cycle += addend; |
---|
674 | | - if (cycle >= 8 * CYCLES_PER_SECOND) |
---|
675 | | - cycle -= 8 * CYCLES_PER_SECOND; |
---|
| 688 | + if (cycle >= OHCI_MAX_SECOND * CYCLES_PER_SECOND) |
---|
| 689 | + cycle -= OHCI_MAX_SECOND * CYCLES_PER_SECOND; |
---|
676 | 690 | return cycle; |
---|
677 | 691 | } |
---|
678 | 692 | |
---|
679 | | -static inline u32 decrement_cycle_count(u32 cycle, unsigned int subtrahend) |
---|
| 693 | +// Align to actual cycle count for the packet which is going to be scheduled. |
---|
| 694 | +// This module queued the same number of isochronous cycle as the size of queue |
---|
| 695 | +// to kip isochronous cycle, therefore it's OK to just increment the cycle by |
---|
| 696 | +// the size of queue for scheduled cycle. |
---|
| 697 | +static inline u32 compute_it_cycle(const __be32 ctx_header_tstamp, |
---|
| 698 | + unsigned int queue_size) |
---|
680 | 699 | { |
---|
681 | | - if (cycle < subtrahend) |
---|
682 | | - cycle += 8 * CYCLES_PER_SECOND; |
---|
683 | | - return cycle - subtrahend; |
---|
| 700 | + u32 cycle = compute_cycle_count(ctx_header_tstamp); |
---|
| 701 | + return increment_cycle_count(cycle, queue_size); |
---|
| 702 | +} |
---|
| 703 | + |
---|
| 704 | +static int generate_device_pkt_descs(struct amdtp_stream *s, |
---|
| 705 | + struct pkt_desc *descs, |
---|
| 706 | + const __be32 *ctx_header, |
---|
| 707 | + unsigned int packets) |
---|
| 708 | +{ |
---|
| 709 | + unsigned int dbc = s->data_block_counter; |
---|
| 710 | + unsigned int packet_index = s->packet_index; |
---|
| 711 | + unsigned int queue_size = s->queue_size; |
---|
| 712 | + int i; |
---|
| 713 | + int err; |
---|
| 714 | + |
---|
| 715 | + for (i = 0; i < packets; ++i) { |
---|
| 716 | + struct pkt_desc *desc = descs + i; |
---|
| 717 | + unsigned int cycle; |
---|
| 718 | + unsigned int payload_length; |
---|
| 719 | + unsigned int data_blocks; |
---|
| 720 | + unsigned int syt; |
---|
| 721 | + |
---|
| 722 | + cycle = compute_cycle_count(ctx_header[1]); |
---|
| 723 | + |
---|
| 724 | + err = parse_ir_ctx_header(s, cycle, ctx_header, &payload_length, |
---|
| 725 | + &data_blocks, &dbc, &syt, packet_index, i); |
---|
| 726 | + if (err < 0) |
---|
| 727 | + return err; |
---|
| 728 | + |
---|
| 729 | + desc->cycle = cycle; |
---|
| 730 | + desc->syt = syt; |
---|
| 731 | + desc->data_blocks = data_blocks; |
---|
| 732 | + desc->data_block_counter = dbc; |
---|
| 733 | + desc->ctx_payload = s->buffer.packets[packet_index].buffer; |
---|
| 734 | + |
---|
| 735 | + if (!(s->flags & CIP_DBC_IS_END_EVENT)) |
---|
| 736 | + dbc = (dbc + desc->data_blocks) & 0xff; |
---|
| 737 | + |
---|
| 738 | + ctx_header += |
---|
| 739 | + s->ctx_data.tx.ctx_header_size / sizeof(*ctx_header); |
---|
| 740 | + |
---|
| 741 | + packet_index = (packet_index + 1) % queue_size; |
---|
| 742 | + } |
---|
| 743 | + |
---|
| 744 | + s->data_block_counter = dbc; |
---|
| 745 | + |
---|
| 746 | + return 0; |
---|
| 747 | +} |
---|
| 748 | + |
---|
| 749 | +static unsigned int compute_syt(unsigned int syt_offset, unsigned int cycle, |
---|
| 750 | + unsigned int transfer_delay) |
---|
| 751 | +{ |
---|
| 752 | + unsigned int syt; |
---|
| 753 | + |
---|
| 754 | + syt_offset += transfer_delay; |
---|
| 755 | + syt = ((cycle + syt_offset / TICKS_PER_CYCLE) << 12) | |
---|
| 756 | + (syt_offset % TICKS_PER_CYCLE); |
---|
| 757 | + return syt & CIP_SYT_MASK; |
---|
| 758 | +} |
---|
| 759 | + |
---|
| 760 | +static void generate_pkt_descs(struct amdtp_stream *s, struct pkt_desc *descs, |
---|
| 761 | + const __be32 *ctx_header, unsigned int packets, |
---|
| 762 | + const struct seq_desc *seq_descs, |
---|
| 763 | + unsigned int seq_size) |
---|
| 764 | +{ |
---|
| 765 | + unsigned int dbc = s->data_block_counter; |
---|
| 766 | + unsigned int seq_index = s->ctx_data.rx.seq_index; |
---|
| 767 | + int i; |
---|
| 768 | + |
---|
| 769 | + for (i = 0; i < packets; ++i) { |
---|
| 770 | + struct pkt_desc *desc = descs + i; |
---|
| 771 | + unsigned int index = (s->packet_index + i) % s->queue_size; |
---|
| 772 | + const struct seq_desc *seq = seq_descs + seq_index; |
---|
| 773 | + unsigned int syt; |
---|
| 774 | + |
---|
| 775 | + desc->cycle = compute_it_cycle(*ctx_header, s->queue_size); |
---|
| 776 | + |
---|
| 777 | + syt = seq->syt_offset; |
---|
| 778 | + if (syt != CIP_SYT_NO_INFO) { |
---|
| 779 | + syt = compute_syt(syt, desc->cycle, |
---|
| 780 | + s->ctx_data.rx.transfer_delay); |
---|
| 781 | + } |
---|
| 782 | + desc->syt = syt; |
---|
| 783 | + desc->data_blocks = seq->data_blocks; |
---|
| 784 | + |
---|
| 785 | + if (s->flags & CIP_DBC_IS_END_EVENT) |
---|
| 786 | + dbc = (dbc + desc->data_blocks) & 0xff; |
---|
| 787 | + |
---|
| 788 | + desc->data_block_counter = dbc; |
---|
| 789 | + |
---|
| 790 | + if (!(s->flags & CIP_DBC_IS_END_EVENT)) |
---|
| 791 | + dbc = (dbc + desc->data_blocks) & 0xff; |
---|
| 792 | + |
---|
| 793 | + desc->ctx_payload = s->buffer.packets[index].buffer; |
---|
| 794 | + |
---|
| 795 | + seq_index = (seq_index + 1) % seq_size; |
---|
| 796 | + |
---|
| 797 | + ++ctx_header; |
---|
| 798 | + } |
---|
| 799 | + |
---|
| 800 | + s->data_block_counter = dbc; |
---|
| 801 | + s->ctx_data.rx.seq_index = seq_index; |
---|
| 802 | +} |
---|
| 803 | + |
---|
| 804 | +static inline void cancel_stream(struct amdtp_stream *s) |
---|
| 805 | +{ |
---|
| 806 | + s->packet_index = -1; |
---|
| 807 | + if (in_interrupt()) |
---|
| 808 | + amdtp_stream_pcm_abort(s); |
---|
| 809 | + WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN); |
---|
| 810 | +} |
---|
| 811 | + |
---|
| 812 | +static void process_ctx_payloads(struct amdtp_stream *s, |
---|
| 813 | + const struct pkt_desc *descs, |
---|
| 814 | + unsigned int packets) |
---|
| 815 | +{ |
---|
| 816 | + struct snd_pcm_substream *pcm; |
---|
| 817 | + unsigned int pcm_frames; |
---|
| 818 | + |
---|
| 819 | + pcm = READ_ONCE(s->pcm); |
---|
| 820 | + pcm_frames = s->process_ctx_payloads(s, descs, packets, pcm); |
---|
| 821 | + if (pcm) |
---|
| 822 | + update_pcm_pointers(s, pcm, pcm_frames); |
---|
684 | 823 | } |
---|
685 | 824 | |
---|
686 | 825 | static void out_stream_callback(struct fw_iso_context *context, u32 tstamp, |
---|
.. | .. |
---|
688 | 827 | void *private_data) |
---|
689 | 828 | { |
---|
690 | 829 | struct amdtp_stream *s = private_data; |
---|
691 | | - unsigned int i, packets = header_length / 4; |
---|
692 | | - u32 cycle; |
---|
| 830 | + const struct amdtp_domain *d = s->domain; |
---|
| 831 | + const __be32 *ctx_header = header; |
---|
| 832 | + unsigned int events_per_period = s->ctx_data.rx.events_per_period; |
---|
| 833 | + unsigned int event_count = s->ctx_data.rx.event_count; |
---|
| 834 | + unsigned int packets; |
---|
| 835 | + int i; |
---|
693 | 836 | |
---|
694 | 837 | if (s->packet_index < 0) |
---|
695 | 838 | return; |
---|
696 | 839 | |
---|
697 | | - cycle = compute_cycle_count(tstamp); |
---|
| 840 | + // Calculate the number of packets in buffer and check XRUN. |
---|
| 841 | + packets = header_length / sizeof(*ctx_header); |
---|
698 | 842 | |
---|
699 | | - /* Align to actual cycle count for the last packet. */ |
---|
700 | | - cycle = increment_cycle_count(cycle, QUEUE_LENGTH - packets); |
---|
| 843 | + generate_pkt_descs(s, s->pkt_descs, ctx_header, packets, d->seq_descs, |
---|
| 844 | + d->seq_size); |
---|
| 845 | + |
---|
| 846 | + process_ctx_payloads(s, s->pkt_descs, packets); |
---|
701 | 847 | |
---|
702 | 848 | for (i = 0; i < packets; ++i) { |
---|
703 | | - cycle = increment_cycle_count(cycle, 1); |
---|
704 | | - if (s->handle_packet(s, 0, cycle, i) < 0) { |
---|
705 | | - s->packet_index = -1; |
---|
706 | | - if (in_interrupt()) |
---|
707 | | - amdtp_stream_pcm_abort(s); |
---|
708 | | - WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN); |
---|
| 849 | + const struct pkt_desc *desc = s->pkt_descs + i; |
---|
| 850 | + unsigned int syt; |
---|
| 851 | + struct { |
---|
| 852 | + struct fw_iso_packet params; |
---|
| 853 | + __be32 header[IT_PKT_HEADER_SIZE_CIP / sizeof(__be32)]; |
---|
| 854 | + } template = { {0}, {0} }; |
---|
| 855 | + bool sched_irq = false; |
---|
| 856 | + |
---|
| 857 | + if (s->ctx_data.rx.syt_override < 0) |
---|
| 858 | + syt = desc->syt; |
---|
| 859 | + else |
---|
| 860 | + syt = s->ctx_data.rx.syt_override; |
---|
| 861 | + |
---|
| 862 | + build_it_pkt_header(s, desc->cycle, &template.params, |
---|
| 863 | + desc->data_blocks, desc->data_block_counter, |
---|
| 864 | + syt, i); |
---|
| 865 | + |
---|
| 866 | + if (s == s->domain->irq_target) { |
---|
| 867 | + event_count += desc->data_blocks; |
---|
| 868 | + if (event_count >= events_per_period) { |
---|
| 869 | + event_count -= events_per_period; |
---|
| 870 | + sched_irq = true; |
---|
| 871 | + } |
---|
| 872 | + } |
---|
| 873 | + |
---|
| 874 | + if (queue_out_packet(s, &template.params, sched_irq) < 0) { |
---|
| 875 | + cancel_stream(s); |
---|
709 | 876 | return; |
---|
710 | 877 | } |
---|
711 | 878 | } |
---|
712 | 879 | |
---|
713 | | - fw_iso_context_queue_flush(s->context); |
---|
| 880 | + s->ctx_data.rx.event_count = event_count; |
---|
714 | 881 | } |
---|
715 | 882 | |
---|
716 | 883 | static void in_stream_callback(struct fw_iso_context *context, u32 tstamp, |
---|
.. | .. |
---|
718 | 885 | void *private_data) |
---|
719 | 886 | { |
---|
720 | 887 | struct amdtp_stream *s = private_data; |
---|
721 | | - unsigned int i, packets; |
---|
722 | | - unsigned int payload_length, max_payload_length; |
---|
723 | | - __be32 *headers = header; |
---|
724 | | - u32 cycle; |
---|
| 888 | + __be32 *ctx_header = header; |
---|
| 889 | + unsigned int packets; |
---|
| 890 | + int i; |
---|
| 891 | + int err; |
---|
725 | 892 | |
---|
726 | 893 | if (s->packet_index < 0) |
---|
727 | 894 | return; |
---|
728 | 895 | |
---|
729 | | - /* The number of packets in buffer */ |
---|
730 | | - packets = header_length / IN_PACKET_HEADER_SIZE; |
---|
| 896 | + // Calculate the number of packets in buffer and check XRUN. |
---|
| 897 | + packets = header_length / s->ctx_data.tx.ctx_header_size; |
---|
731 | 898 | |
---|
732 | | - cycle = compute_cycle_count(tstamp); |
---|
733 | | - |
---|
734 | | - /* Align to actual cycle count for the last packet. */ |
---|
735 | | - cycle = decrement_cycle_count(cycle, packets); |
---|
736 | | - |
---|
737 | | - /* For buffer-over-run prevention. */ |
---|
738 | | - max_payload_length = s->max_payload_length; |
---|
739 | | - |
---|
740 | | - for (i = 0; i < packets; i++) { |
---|
741 | | - cycle = increment_cycle_count(cycle, 1); |
---|
742 | | - |
---|
743 | | - /* The number of bytes in this packet */ |
---|
744 | | - payload_length = |
---|
745 | | - (be32_to_cpu(headers[i]) >> ISO_DATA_LENGTH_SHIFT); |
---|
746 | | - if (payload_length > max_payload_length) { |
---|
747 | | - dev_err(&s->unit->device, |
---|
748 | | - "Detect jumbo payload: %04x %04x\n", |
---|
749 | | - payload_length, max_payload_length); |
---|
750 | | - break; |
---|
| 899 | + err = generate_device_pkt_descs(s, s->pkt_descs, ctx_header, packets); |
---|
| 900 | + if (err < 0) { |
---|
| 901 | + if (err != -EAGAIN) { |
---|
| 902 | + cancel_stream(s); |
---|
| 903 | + return; |
---|
751 | 904 | } |
---|
752 | | - |
---|
753 | | - if (s->handle_packet(s, payload_length, cycle, i) < 0) |
---|
754 | | - break; |
---|
| 905 | + } else { |
---|
| 906 | + process_ctx_payloads(s, s->pkt_descs, packets); |
---|
755 | 907 | } |
---|
756 | 908 | |
---|
757 | | - /* Queueing error or detecting invalid payload. */ |
---|
758 | | - if (i < packets) { |
---|
759 | | - s->packet_index = -1; |
---|
760 | | - if (in_interrupt()) |
---|
761 | | - amdtp_stream_pcm_abort(s); |
---|
762 | | - WRITE_ONCE(s->pcm_buffer_pointer, SNDRV_PCM_POS_XRUN); |
---|
763 | | - return; |
---|
764 | | - } |
---|
| 909 | + for (i = 0; i < packets; ++i) { |
---|
| 910 | + struct fw_iso_packet params = {0}; |
---|
765 | 911 | |
---|
766 | | - fw_iso_context_queue_flush(s->context); |
---|
| 912 | + if (queue_in_packet(s, ¶ms) < 0) { |
---|
| 913 | + cancel_stream(s); |
---|
| 914 | + return; |
---|
| 915 | + } |
---|
| 916 | + } |
---|
767 | 917 | } |
---|
768 | 918 | |
---|
769 | | -/* this is executed one time */ |
---|
| 919 | +static void pool_ideal_seq_descs(struct amdtp_domain *d, unsigned int packets) |
---|
| 920 | +{ |
---|
| 921 | + struct amdtp_stream *irq_target = d->irq_target; |
---|
| 922 | + unsigned int seq_tail = d->seq_tail; |
---|
| 923 | + unsigned int seq_size = d->seq_size; |
---|
| 924 | + unsigned int min_avail; |
---|
| 925 | + struct amdtp_stream *s; |
---|
| 926 | + |
---|
| 927 | + min_avail = d->seq_size; |
---|
| 928 | + list_for_each_entry(s, &d->streams, list) { |
---|
| 929 | + unsigned int seq_index; |
---|
| 930 | + unsigned int avail; |
---|
| 931 | + |
---|
| 932 | + if (s->direction == AMDTP_IN_STREAM) |
---|
| 933 | + continue; |
---|
| 934 | + |
---|
| 935 | + seq_index = s->ctx_data.rx.seq_index; |
---|
| 936 | + avail = d->seq_tail; |
---|
| 937 | + if (seq_index > avail) |
---|
| 938 | + avail += d->seq_size; |
---|
| 939 | + avail -= seq_index; |
---|
| 940 | + |
---|
| 941 | + if (avail < min_avail) |
---|
| 942 | + min_avail = avail; |
---|
| 943 | + } |
---|
| 944 | + |
---|
| 945 | + while (min_avail < packets) { |
---|
| 946 | + struct seq_desc *desc = d->seq_descs + seq_tail; |
---|
| 947 | + |
---|
| 948 | + desc->syt_offset = calculate_syt_offset(&d->last_syt_offset, |
---|
| 949 | + &d->syt_offset_state, irq_target->sfc); |
---|
| 950 | + desc->data_blocks = calculate_data_blocks(&d->data_block_state, |
---|
| 951 | + !!(irq_target->flags & CIP_BLOCKING), |
---|
| 952 | + desc->syt_offset == CIP_SYT_NO_INFO, |
---|
| 953 | + irq_target->syt_interval, irq_target->sfc); |
---|
| 954 | + |
---|
| 955 | + ++seq_tail; |
---|
| 956 | + seq_tail %= seq_size; |
---|
| 957 | + |
---|
| 958 | + ++min_avail; |
---|
| 959 | + } |
---|
| 960 | + |
---|
| 961 | + d->seq_tail = seq_tail; |
---|
| 962 | +} |
---|
| 963 | + |
---|
| 964 | +static void irq_target_callback(struct fw_iso_context *context, u32 tstamp, |
---|
| 965 | + size_t header_length, void *header, |
---|
| 966 | + void *private_data) |
---|
| 967 | +{ |
---|
| 968 | + struct amdtp_stream *irq_target = private_data; |
---|
| 969 | + struct amdtp_domain *d = irq_target->domain; |
---|
| 970 | + unsigned int packets = header_length / sizeof(__be32); |
---|
| 971 | + struct amdtp_stream *s; |
---|
| 972 | + |
---|
| 973 | + // Record enough entries with extra 3 cycles at least. |
---|
| 974 | + pool_ideal_seq_descs(d, packets + 3); |
---|
| 975 | + |
---|
| 976 | + out_stream_callback(context, tstamp, header_length, header, irq_target); |
---|
| 977 | + if (amdtp_streaming_error(irq_target)) |
---|
| 978 | + goto error; |
---|
| 979 | + |
---|
| 980 | + list_for_each_entry(s, &d->streams, list) { |
---|
| 981 | + if (s != irq_target && amdtp_stream_running(s)) { |
---|
| 982 | + fw_iso_context_flush_completions(s->context); |
---|
| 983 | + if (amdtp_streaming_error(s)) |
---|
| 984 | + goto error; |
---|
| 985 | + } |
---|
| 986 | + } |
---|
| 987 | + |
---|
| 988 | + return; |
---|
| 989 | +error: |
---|
| 990 | + if (amdtp_stream_running(irq_target)) |
---|
| 991 | + cancel_stream(irq_target); |
---|
| 992 | + |
---|
| 993 | + list_for_each_entry(s, &d->streams, list) { |
---|
| 994 | + if (amdtp_stream_running(s)) |
---|
| 995 | + cancel_stream(s); |
---|
| 996 | + } |
---|
| 997 | +} |
---|
| 998 | + |
---|
| 999 | +// this is executed one time. |
---|
770 | 1000 | static void amdtp_stream_first_callback(struct fw_iso_context *context, |
---|
771 | 1001 | u32 tstamp, size_t header_length, |
---|
772 | 1002 | void *header, void *private_data) |
---|
773 | 1003 | { |
---|
774 | 1004 | struct amdtp_stream *s = private_data; |
---|
| 1005 | + const __be32 *ctx_header = header; |
---|
775 | 1006 | u32 cycle; |
---|
776 | | - unsigned int packets; |
---|
777 | 1007 | |
---|
778 | 1008 | /* |
---|
779 | 1009 | * For in-stream, first packet has come. |
---|
.. | .. |
---|
782 | 1012 | s->callbacked = true; |
---|
783 | 1013 | wake_up(&s->callback_wait); |
---|
784 | 1014 | |
---|
785 | | - cycle = compute_cycle_count(tstamp); |
---|
786 | | - |
---|
787 | 1015 | if (s->direction == AMDTP_IN_STREAM) { |
---|
788 | | - packets = header_length / IN_PACKET_HEADER_SIZE; |
---|
789 | | - cycle = decrement_cycle_count(cycle, packets); |
---|
| 1016 | + cycle = compute_cycle_count(ctx_header[1]); |
---|
| 1017 | + |
---|
790 | 1018 | context->callback.sc = in_stream_callback; |
---|
791 | | - if (s->flags & CIP_NO_HEADER) |
---|
792 | | - s->handle_packet = handle_in_packet_without_header; |
---|
793 | | - else |
---|
794 | | - s->handle_packet = handle_in_packet; |
---|
795 | 1019 | } else { |
---|
796 | | - packets = header_length / 4; |
---|
797 | | - cycle = increment_cycle_count(cycle, QUEUE_LENGTH - packets); |
---|
798 | | - context->callback.sc = out_stream_callback; |
---|
799 | | - if (s->flags & CIP_NO_HEADER) |
---|
800 | | - s->handle_packet = handle_out_packet_without_header; |
---|
| 1020 | + cycle = compute_it_cycle(*ctx_header, s->queue_size); |
---|
| 1021 | + |
---|
| 1022 | + if (s == s->domain->irq_target) |
---|
| 1023 | + context->callback.sc = irq_target_callback; |
---|
801 | 1024 | else |
---|
802 | | - s->handle_packet = handle_out_packet; |
---|
| 1025 | + context->callback.sc = out_stream_callback; |
---|
803 | 1026 | } |
---|
804 | 1027 | |
---|
805 | 1028 | s->start_cycle = cycle; |
---|
.. | .. |
---|
812 | 1035 | * @s: the AMDTP stream to start |
---|
813 | 1036 | * @channel: the isochronous channel on the bus |
---|
814 | 1037 | * @speed: firewire speed code |
---|
| 1038 | + * @start_cycle: the isochronous cycle to start the context. Start immediately |
---|
| 1039 | + * if negative value is given. |
---|
| 1040 | + * @queue_size: The number of packets in the queue. |
---|
| 1041 | + * @idle_irq_interval: the interval to queue packet during initial state. |
---|
815 | 1042 | * |
---|
816 | 1043 | * The stream cannot be started until it has been configured with |
---|
817 | 1044 | * amdtp_stream_set_parameters() and it must be started before any PCM or MIDI |
---|
818 | 1045 | * device can be started. |
---|
819 | 1046 | */ |
---|
820 | | -int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed) |
---|
| 1047 | +static int amdtp_stream_start(struct amdtp_stream *s, int channel, int speed, |
---|
| 1048 | + int start_cycle, unsigned int queue_size, |
---|
| 1049 | + unsigned int idle_irq_interval) |
---|
821 | 1050 | { |
---|
822 | | - static const struct { |
---|
823 | | - unsigned int data_block; |
---|
824 | | - unsigned int syt_offset; |
---|
825 | | - } initial_state[] = { |
---|
826 | | - [CIP_SFC_32000] = { 4, 3072 }, |
---|
827 | | - [CIP_SFC_48000] = { 6, 1024 }, |
---|
828 | | - [CIP_SFC_96000] = { 12, 1024 }, |
---|
829 | | - [CIP_SFC_192000] = { 24, 1024 }, |
---|
830 | | - [CIP_SFC_44100] = { 0, 67 }, |
---|
831 | | - [CIP_SFC_88200] = { 0, 67 }, |
---|
832 | | - [CIP_SFC_176400] = { 0, 67 }, |
---|
833 | | - }; |
---|
834 | | - unsigned int header_size; |
---|
| 1051 | + bool is_irq_target = (s == s->domain->irq_target); |
---|
| 1052 | + unsigned int ctx_header_size; |
---|
| 1053 | + unsigned int max_ctx_payload_size; |
---|
835 | 1054 | enum dma_data_direction dir; |
---|
836 | 1055 | int type, tag, err; |
---|
837 | 1056 | |
---|
.. | .. |
---|
843 | 1062 | goto err_unlock; |
---|
844 | 1063 | } |
---|
845 | 1064 | |
---|
846 | | - if (s->direction == AMDTP_IN_STREAM) |
---|
847 | | - s->data_block_counter = UINT_MAX; |
---|
848 | | - else |
---|
849 | | - s->data_block_counter = 0; |
---|
850 | | - s->data_block_state = initial_state[s->sfc].data_block; |
---|
851 | | - s->syt_offset_state = initial_state[s->sfc].syt_offset; |
---|
852 | | - s->last_syt_offset = TICKS_PER_CYCLE; |
---|
| 1065 | + if (s->direction == AMDTP_IN_STREAM) { |
---|
| 1066 | + // NOTE: IT context should be used for constant IRQ. |
---|
| 1067 | + if (is_irq_target) { |
---|
| 1068 | + err = -EINVAL; |
---|
| 1069 | + goto err_unlock; |
---|
| 1070 | + } |
---|
853 | 1071 | |
---|
854 | | - /* initialize packet buffer */ |
---|
| 1072 | + s->data_block_counter = UINT_MAX; |
---|
| 1073 | + } else { |
---|
| 1074 | + s->data_block_counter = 0; |
---|
| 1075 | + } |
---|
| 1076 | + |
---|
| 1077 | + // initialize packet buffer. |
---|
| 1078 | + max_ctx_payload_size = amdtp_stream_get_max_payload(s); |
---|
855 | 1079 | if (s->direction == AMDTP_IN_STREAM) { |
---|
856 | 1080 | dir = DMA_FROM_DEVICE; |
---|
857 | 1081 | type = FW_ISO_CONTEXT_RECEIVE; |
---|
858 | | - header_size = IN_PACKET_HEADER_SIZE; |
---|
| 1082 | + if (!(s->flags & CIP_NO_HEADER)) { |
---|
| 1083 | + max_ctx_payload_size -= 8; |
---|
| 1084 | + ctx_header_size = IR_CTX_HEADER_SIZE_CIP; |
---|
| 1085 | + } else { |
---|
| 1086 | + ctx_header_size = IR_CTX_HEADER_SIZE_NO_CIP; |
---|
| 1087 | + } |
---|
859 | 1088 | } else { |
---|
860 | 1089 | dir = DMA_TO_DEVICE; |
---|
861 | 1090 | type = FW_ISO_CONTEXT_TRANSMIT; |
---|
862 | | - header_size = OUT_PACKET_HEADER_SIZE; |
---|
| 1091 | + ctx_header_size = 0; // No effect for IT context. |
---|
| 1092 | + |
---|
| 1093 | + if (!(s->flags & CIP_NO_HEADER)) |
---|
| 1094 | + max_ctx_payload_size -= IT_PKT_HEADER_SIZE_CIP; |
---|
863 | 1095 | } |
---|
864 | | - err = iso_packets_buffer_init(&s->buffer, s->unit, QUEUE_LENGTH, |
---|
865 | | - amdtp_stream_get_max_payload(s), dir); |
---|
| 1096 | + |
---|
| 1097 | + err = iso_packets_buffer_init(&s->buffer, s->unit, queue_size, |
---|
| 1098 | + max_ctx_payload_size, dir); |
---|
866 | 1099 | if (err < 0) |
---|
867 | 1100 | goto err_unlock; |
---|
| 1101 | + s->queue_size = queue_size; |
---|
868 | 1102 | |
---|
869 | 1103 | s->context = fw_iso_context_create(fw_parent_device(s->unit)->card, |
---|
870 | | - type, channel, speed, header_size, |
---|
871 | | - amdtp_stream_first_callback, s); |
---|
| 1104 | + type, channel, speed, ctx_header_size, |
---|
| 1105 | + amdtp_stream_first_callback, s); |
---|
872 | 1106 | if (IS_ERR(s->context)) { |
---|
873 | 1107 | err = PTR_ERR(s->context); |
---|
874 | 1108 | if (err == -EBUSY) |
---|
.. | .. |
---|
879 | 1113 | |
---|
880 | 1114 | amdtp_stream_update(s); |
---|
881 | 1115 | |
---|
882 | | - if (s->direction == AMDTP_IN_STREAM) |
---|
883 | | - s->max_payload_length = amdtp_stream_get_max_payload(s); |
---|
| 1116 | + if (s->direction == AMDTP_IN_STREAM) { |
---|
| 1117 | + s->ctx_data.tx.max_ctx_payload_length = max_ctx_payload_size; |
---|
| 1118 | + s->ctx_data.tx.ctx_header_size = ctx_header_size; |
---|
| 1119 | + } |
---|
884 | 1120 | |
---|
885 | 1121 | if (s->flags & CIP_NO_HEADER) |
---|
886 | 1122 | s->tag = TAG_NO_CIP_HEADER; |
---|
887 | 1123 | else |
---|
888 | 1124 | s->tag = TAG_CIP; |
---|
889 | 1125 | |
---|
| 1126 | + s->pkt_descs = kcalloc(s->queue_size, sizeof(*s->pkt_descs), |
---|
| 1127 | + GFP_KERNEL); |
---|
| 1128 | + if (!s->pkt_descs) { |
---|
| 1129 | + err = -ENOMEM; |
---|
| 1130 | + goto err_context; |
---|
| 1131 | + } |
---|
| 1132 | + |
---|
890 | 1133 | s->packet_index = 0; |
---|
891 | 1134 | do { |
---|
892 | | - if (s->direction == AMDTP_IN_STREAM) |
---|
893 | | - err = queue_in_packet(s); |
---|
894 | | - else |
---|
895 | | - err = queue_out_packet(s, 0); |
---|
| 1135 | + struct fw_iso_packet params; |
---|
| 1136 | + |
---|
| 1137 | + if (s->direction == AMDTP_IN_STREAM) { |
---|
| 1138 | + err = queue_in_packet(s, ¶ms); |
---|
| 1139 | + } else { |
---|
| 1140 | + bool sched_irq = false; |
---|
| 1141 | + |
---|
| 1142 | + params.header_length = 0; |
---|
| 1143 | + params.payload_length = 0; |
---|
| 1144 | + |
---|
| 1145 | + if (is_irq_target) { |
---|
| 1146 | + sched_irq = !((s->packet_index + 1) % |
---|
| 1147 | + idle_irq_interval); |
---|
| 1148 | + } |
---|
| 1149 | + |
---|
| 1150 | + err = queue_out_packet(s, ¶ms, sched_irq); |
---|
| 1151 | + } |
---|
896 | 1152 | if (err < 0) |
---|
897 | | - goto err_context; |
---|
| 1153 | + goto err_pkt_descs; |
---|
898 | 1154 | } while (s->packet_index > 0); |
---|
899 | 1155 | |
---|
900 | 1156 | /* NOTE: TAG1 matches CIP. This just affects in stream. */ |
---|
.. | .. |
---|
903 | 1159 | tag |= FW_ISO_CONTEXT_MATCH_TAG0; |
---|
904 | 1160 | |
---|
905 | 1161 | s->callbacked = false; |
---|
906 | | - err = fw_iso_context_start(s->context, -1, 0, tag); |
---|
| 1162 | + err = fw_iso_context_start(s->context, start_cycle, 0, tag); |
---|
907 | 1163 | if (err < 0) |
---|
908 | | - goto err_context; |
---|
| 1164 | + goto err_pkt_descs; |
---|
909 | 1165 | |
---|
910 | 1166 | mutex_unlock(&s->mutex); |
---|
911 | 1167 | |
---|
912 | 1168 | return 0; |
---|
913 | | - |
---|
| 1169 | +err_pkt_descs: |
---|
| 1170 | + kfree(s->pkt_descs); |
---|
914 | 1171 | err_context: |
---|
915 | 1172 | fw_iso_context_destroy(s->context); |
---|
916 | 1173 | s->context = ERR_PTR(-1); |
---|
.. | .. |
---|
921 | 1178 | |
---|
922 | 1179 | return err; |
---|
923 | 1180 | } |
---|
924 | | -EXPORT_SYMBOL(amdtp_stream_start); |
---|
925 | 1181 | |
---|
926 | 1182 | /** |
---|
927 | | - * amdtp_stream_pcm_pointer - get the PCM buffer position |
---|
| 1183 | + * amdtp_domain_stream_pcm_pointer - get the PCM buffer position |
---|
| 1184 | + * @d: the AMDTP domain. |
---|
928 | 1185 | * @s: the AMDTP stream that transports the PCM data |
---|
929 | 1186 | * |
---|
930 | 1187 | * Returns the current buffer position, in frames. |
---|
931 | 1188 | */ |
---|
932 | | -unsigned long amdtp_stream_pcm_pointer(struct amdtp_stream *s) |
---|
| 1189 | +unsigned long amdtp_domain_stream_pcm_pointer(struct amdtp_domain *d, |
---|
| 1190 | + struct amdtp_stream *s) |
---|
933 | 1191 | { |
---|
934 | | - /* |
---|
935 | | - * This function is called in software IRQ context of period_tasklet or |
---|
936 | | - * process context. |
---|
937 | | - * |
---|
938 | | - * When the software IRQ context was scheduled by software IRQ context |
---|
939 | | - * of IR/IT contexts, queued packets were already handled. Therefore, |
---|
940 | | - * no need to flush the queue in buffer anymore. |
---|
941 | | - * |
---|
942 | | - * When the process context reach here, some packets will be already |
---|
943 | | - * queued in the buffer. These packets should be handled immediately |
---|
944 | | - * to keep better granularity of PCM pointer. |
---|
945 | | - * |
---|
946 | | - * Later, the process context will sometimes schedules software IRQ |
---|
947 | | - * context of the period_tasklet. Then, no need to flush the queue by |
---|
948 | | - * the same reason as described for IR/IT contexts. |
---|
949 | | - */ |
---|
950 | | - if (!in_interrupt() && amdtp_stream_running(s)) |
---|
951 | | - fw_iso_context_flush_completions(s->context); |
---|
| 1192 | + struct amdtp_stream *irq_target = d->irq_target; |
---|
| 1193 | + |
---|
| 1194 | + if (irq_target && amdtp_stream_running(irq_target)) { |
---|
| 1195 | + // This function is called in software IRQ context of |
---|
| 1196 | + // period_work or process context. |
---|
| 1197 | + // |
---|
| 1198 | + // When the software IRQ context was scheduled by software IRQ |
---|
| 1199 | + // context of IT contexts, queued packets were already handled. |
---|
| 1200 | + // Therefore, no need to flush the queue in buffer furthermore. |
---|
| 1201 | + // |
---|
| 1202 | + // When the process context reach here, some packets will be |
---|
| 1203 | + // already queued in the buffer. These packets should be handled |
---|
| 1204 | + // immediately to keep better granularity of PCM pointer. |
---|
| 1205 | + // |
---|
| 1206 | + // Later, the process context will sometimes schedules software |
---|
| 1207 | + // IRQ context of the period_work. Then, no need to flush the |
---|
| 1208 | + // queue by the same reason as described in the above |
---|
| 1209 | + if (current_work() != &s->period_work) { |
---|
| 1210 | + // Queued packet should be processed without any kernel |
---|
| 1211 | + // preemption to keep latency against bus cycle. |
---|
| 1212 | + preempt_disable(); |
---|
| 1213 | + fw_iso_context_flush_completions(irq_target->context); |
---|
| 1214 | + preempt_enable(); |
---|
| 1215 | + } |
---|
| 1216 | + } |
---|
952 | 1217 | |
---|
953 | 1218 | return READ_ONCE(s->pcm_buffer_pointer); |
---|
954 | 1219 | } |
---|
955 | | -EXPORT_SYMBOL(amdtp_stream_pcm_pointer); |
---|
| 1220 | +EXPORT_SYMBOL_GPL(amdtp_domain_stream_pcm_pointer); |
---|
956 | 1221 | |
---|
957 | 1222 | /** |
---|
958 | | - * amdtp_stream_pcm_ack - acknowledge queued PCM frames |
---|
| 1223 | + * amdtp_domain_stream_pcm_ack - acknowledge queued PCM frames |
---|
| 1224 | + * @d: the AMDTP domain. |
---|
959 | 1225 | * @s: the AMDTP stream that transfers the PCM frames |
---|
960 | 1226 | * |
---|
961 | 1227 | * Returns zero always. |
---|
962 | 1228 | */ |
---|
963 | | -int amdtp_stream_pcm_ack(struct amdtp_stream *s) |
---|
| 1229 | +int amdtp_domain_stream_pcm_ack(struct amdtp_domain *d, struct amdtp_stream *s) |
---|
964 | 1230 | { |
---|
965 | | - /* |
---|
966 | | - * Process isochronous packets for recent isochronous cycle to handle |
---|
967 | | - * queued PCM frames. |
---|
968 | | - */ |
---|
969 | | - if (amdtp_stream_running(s)) |
---|
970 | | - fw_iso_context_flush_completions(s->context); |
---|
| 1231 | + struct amdtp_stream *irq_target = d->irq_target; |
---|
| 1232 | + |
---|
| 1233 | + // Process isochronous packets for recent isochronous cycle to handle |
---|
| 1234 | + // queued PCM frames. |
---|
| 1235 | + if (irq_target && amdtp_stream_running(irq_target)) { |
---|
| 1236 | + // Queued packet should be processed without any kernel |
---|
| 1237 | + // preemption to keep latency against bus cycle. |
---|
| 1238 | + preempt_disable(); |
---|
| 1239 | + fw_iso_context_flush_completions(irq_target->context); |
---|
| 1240 | + preempt_enable(); |
---|
| 1241 | + } |
---|
971 | 1242 | |
---|
972 | 1243 | return 0; |
---|
973 | 1244 | } |
---|
974 | | -EXPORT_SYMBOL(amdtp_stream_pcm_ack); |
---|
| 1245 | +EXPORT_SYMBOL_GPL(amdtp_domain_stream_pcm_ack); |
---|
975 | 1246 | |
---|
976 | 1247 | /** |
---|
977 | 1248 | * amdtp_stream_update - update the stream after a bus reset |
---|
.. | .. |
---|
992 | 1263 | * All PCM and MIDI devices of the stream must be stopped before the stream |
---|
993 | 1264 | * itself can be stopped. |
---|
994 | 1265 | */ |
---|
995 | | -void amdtp_stream_stop(struct amdtp_stream *s) |
---|
| 1266 | +static void amdtp_stream_stop(struct amdtp_stream *s) |
---|
996 | 1267 | { |
---|
997 | 1268 | mutex_lock(&s->mutex); |
---|
998 | 1269 | |
---|
.. | .. |
---|
1001 | 1272 | return; |
---|
1002 | 1273 | } |
---|
1003 | 1274 | |
---|
1004 | | - tasklet_kill(&s->period_tasklet); |
---|
| 1275 | + cancel_work_sync(&s->period_work); |
---|
1005 | 1276 | fw_iso_context_stop(s->context); |
---|
1006 | 1277 | fw_iso_context_destroy(s->context); |
---|
1007 | 1278 | s->context = ERR_PTR(-1); |
---|
1008 | 1279 | iso_packets_buffer_destroy(&s->buffer, s->unit); |
---|
| 1280 | + kfree(s->pkt_descs); |
---|
1009 | 1281 | |
---|
1010 | 1282 | s->callbacked = false; |
---|
1011 | 1283 | |
---|
1012 | 1284 | mutex_unlock(&s->mutex); |
---|
1013 | 1285 | } |
---|
1014 | | -EXPORT_SYMBOL(amdtp_stream_stop); |
---|
1015 | 1286 | |
---|
1016 | 1287 | /** |
---|
1017 | 1288 | * amdtp_stream_pcm_abort - abort the running PCM device |
---|
.. | .. |
---|
1029 | 1300 | snd_pcm_stop_xrun(pcm); |
---|
1030 | 1301 | } |
---|
1031 | 1302 | EXPORT_SYMBOL(amdtp_stream_pcm_abort); |
---|
| 1303 | + |
---|
| 1304 | +/** |
---|
| 1305 | + * amdtp_domain_init - initialize an AMDTP domain structure |
---|
| 1306 | + * @d: the AMDTP domain to initialize. |
---|
| 1307 | + */ |
---|
| 1308 | +int amdtp_domain_init(struct amdtp_domain *d) |
---|
| 1309 | +{ |
---|
| 1310 | + INIT_LIST_HEAD(&d->streams); |
---|
| 1311 | + |
---|
| 1312 | + d->events_per_period = 0; |
---|
| 1313 | + |
---|
| 1314 | + d->seq_descs = NULL; |
---|
| 1315 | + |
---|
| 1316 | + return 0; |
---|
| 1317 | +} |
---|
| 1318 | +EXPORT_SYMBOL_GPL(amdtp_domain_init); |
---|
| 1319 | + |
---|
| 1320 | +/** |
---|
| 1321 | + * amdtp_domain_destroy - destroy an AMDTP domain structure |
---|
| 1322 | + * @d: the AMDTP domain to destroy. |
---|
| 1323 | + */ |
---|
| 1324 | +void amdtp_domain_destroy(struct amdtp_domain *d) |
---|
| 1325 | +{ |
---|
| 1326 | + // At present nothing to do. |
---|
| 1327 | + return; |
---|
| 1328 | +} |
---|
| 1329 | +EXPORT_SYMBOL_GPL(amdtp_domain_destroy); |
---|
| 1330 | + |
---|
| 1331 | +/** |
---|
| 1332 | + * amdtp_domain_add_stream - register isoc context into the domain. |
---|
| 1333 | + * @d: the AMDTP domain. |
---|
| 1334 | + * @s: the AMDTP stream. |
---|
| 1335 | + * @channel: the isochronous channel on the bus. |
---|
| 1336 | + * @speed: firewire speed code. |
---|
| 1337 | + */ |
---|
| 1338 | +int amdtp_domain_add_stream(struct amdtp_domain *d, struct amdtp_stream *s, |
---|
| 1339 | + int channel, int speed) |
---|
| 1340 | +{ |
---|
| 1341 | + struct amdtp_stream *tmp; |
---|
| 1342 | + |
---|
| 1343 | + list_for_each_entry(tmp, &d->streams, list) { |
---|
| 1344 | + if (s == tmp) |
---|
| 1345 | + return -EBUSY; |
---|
| 1346 | + } |
---|
| 1347 | + |
---|
| 1348 | + list_add(&s->list, &d->streams); |
---|
| 1349 | + |
---|
| 1350 | + s->channel = channel; |
---|
| 1351 | + s->speed = speed; |
---|
| 1352 | + s->domain = d; |
---|
| 1353 | + |
---|
| 1354 | + return 0; |
---|
| 1355 | +} |
---|
| 1356 | +EXPORT_SYMBOL_GPL(amdtp_domain_add_stream); |
---|
| 1357 | + |
---|
| 1358 | +static int get_current_cycle_time(struct fw_card *fw_card, int *cur_cycle) |
---|
| 1359 | +{ |
---|
| 1360 | + int generation; |
---|
| 1361 | + int rcode; |
---|
| 1362 | + __be32 reg; |
---|
| 1363 | + u32 data; |
---|
| 1364 | + |
---|
| 1365 | + // This is a request to local 1394 OHCI controller and expected to |
---|
| 1366 | + // complete without any event waiting. |
---|
| 1367 | + generation = fw_card->generation; |
---|
| 1368 | + smp_rmb(); // node_id vs. generation. |
---|
| 1369 | + rcode = fw_run_transaction(fw_card, TCODE_READ_QUADLET_REQUEST, |
---|
| 1370 | + fw_card->node_id, generation, SCODE_100, |
---|
| 1371 | + CSR_REGISTER_BASE + CSR_CYCLE_TIME, |
---|
| 1372 | + ®, sizeof(reg)); |
---|
| 1373 | + if (rcode != RCODE_COMPLETE) |
---|
| 1374 | + return -EIO; |
---|
| 1375 | + |
---|
| 1376 | + data = be32_to_cpu(reg); |
---|
| 1377 | + *cur_cycle = data >> 12; |
---|
| 1378 | + |
---|
| 1379 | + return 0; |
---|
| 1380 | +} |
---|
| 1381 | + |
---|
| 1382 | +/** |
---|
| 1383 | + * amdtp_domain_start - start sending packets for isoc context in the domain. |
---|
| 1384 | + * @d: the AMDTP domain. |
---|
| 1385 | + * @ir_delay_cycle: the cycle delay to start all IR contexts. |
---|
| 1386 | + */ |
---|
| 1387 | +int amdtp_domain_start(struct amdtp_domain *d, unsigned int ir_delay_cycle) |
---|
| 1388 | +{ |
---|
| 1389 | + static const struct { |
---|
| 1390 | + unsigned int data_block; |
---|
| 1391 | + unsigned int syt_offset; |
---|
| 1392 | + } *entry, initial_state[] = { |
---|
| 1393 | + [CIP_SFC_32000] = { 4, 3072 }, |
---|
| 1394 | + [CIP_SFC_48000] = { 6, 1024 }, |
---|
| 1395 | + [CIP_SFC_96000] = { 12, 1024 }, |
---|
| 1396 | + [CIP_SFC_192000] = { 24, 1024 }, |
---|
| 1397 | + [CIP_SFC_44100] = { 0, 67 }, |
---|
| 1398 | + [CIP_SFC_88200] = { 0, 67 }, |
---|
| 1399 | + [CIP_SFC_176400] = { 0, 67 }, |
---|
| 1400 | + }; |
---|
| 1401 | + unsigned int events_per_buffer = d->events_per_buffer; |
---|
| 1402 | + unsigned int events_per_period = d->events_per_period; |
---|
| 1403 | + unsigned int idle_irq_interval; |
---|
| 1404 | + unsigned int queue_size; |
---|
| 1405 | + struct amdtp_stream *s; |
---|
| 1406 | + int cycle; |
---|
| 1407 | + bool found = false; |
---|
| 1408 | + int err; |
---|
| 1409 | + |
---|
| 1410 | + // Select an IT context as IRQ target. |
---|
| 1411 | + list_for_each_entry(s, &d->streams, list) { |
---|
| 1412 | + if (s->direction == AMDTP_OUT_STREAM) { |
---|
| 1413 | + found = true; |
---|
| 1414 | + break; |
---|
| 1415 | + } |
---|
| 1416 | + } |
---|
| 1417 | + if (!found) |
---|
| 1418 | + return -ENXIO; |
---|
| 1419 | + d->irq_target = s; |
---|
| 1420 | + |
---|
| 1421 | + // This is a case that AMDTP streams in domain run just for MIDI |
---|
| 1422 | + // substream. Use the number of events equivalent to 10 msec as |
---|
| 1423 | + // interval of hardware IRQ. |
---|
| 1424 | + if (events_per_period == 0) |
---|
| 1425 | + events_per_period = amdtp_rate_table[d->irq_target->sfc] / 100; |
---|
| 1426 | + if (events_per_buffer == 0) |
---|
| 1427 | + events_per_buffer = events_per_period * 3; |
---|
| 1428 | + |
---|
| 1429 | + queue_size = DIV_ROUND_UP(CYCLES_PER_SECOND * events_per_buffer, |
---|
| 1430 | + amdtp_rate_table[d->irq_target->sfc]); |
---|
| 1431 | + |
---|
| 1432 | + d->seq_descs = kcalloc(queue_size, sizeof(*d->seq_descs), GFP_KERNEL); |
---|
| 1433 | + if (!d->seq_descs) |
---|
| 1434 | + return -ENOMEM; |
---|
| 1435 | + d->seq_size = queue_size; |
---|
| 1436 | + d->seq_tail = 0; |
---|
| 1437 | + |
---|
| 1438 | + entry = &initial_state[s->sfc]; |
---|
| 1439 | + d->data_block_state = entry->data_block; |
---|
| 1440 | + d->syt_offset_state = entry->syt_offset; |
---|
| 1441 | + d->last_syt_offset = TICKS_PER_CYCLE; |
---|
| 1442 | + |
---|
| 1443 | + if (ir_delay_cycle > 0) { |
---|
| 1444 | + struct fw_card *fw_card = fw_parent_device(s->unit)->card; |
---|
| 1445 | + |
---|
| 1446 | + err = get_current_cycle_time(fw_card, &cycle); |
---|
| 1447 | + if (err < 0) |
---|
| 1448 | + goto error; |
---|
| 1449 | + |
---|
| 1450 | + // No need to care overflow in cycle field because of enough |
---|
| 1451 | + // width. |
---|
| 1452 | + cycle += ir_delay_cycle; |
---|
| 1453 | + |
---|
| 1454 | + // Round up to sec field. |
---|
| 1455 | + if ((cycle & 0x00001fff) >= CYCLES_PER_SECOND) { |
---|
| 1456 | + unsigned int sec; |
---|
| 1457 | + |
---|
| 1458 | + // The sec field can overflow. |
---|
| 1459 | + sec = (cycle & 0xffffe000) >> 13; |
---|
| 1460 | + cycle = (++sec << 13) | |
---|
| 1461 | + ((cycle & 0x00001fff) / CYCLES_PER_SECOND); |
---|
| 1462 | + } |
---|
| 1463 | + |
---|
| 1464 | + // In OHCI 1394 specification, lower 2 bits are available for |
---|
| 1465 | + // sec field. |
---|
| 1466 | + cycle &= 0x00007fff; |
---|
| 1467 | + } else { |
---|
| 1468 | + cycle = -1; |
---|
| 1469 | + } |
---|
| 1470 | + |
---|
| 1471 | + list_for_each_entry(s, &d->streams, list) { |
---|
| 1472 | + int cycle_match; |
---|
| 1473 | + |
---|
| 1474 | + if (s->direction == AMDTP_IN_STREAM) { |
---|
| 1475 | + cycle_match = cycle; |
---|
| 1476 | + } else { |
---|
| 1477 | + // IT context starts immediately. |
---|
| 1478 | + cycle_match = -1; |
---|
| 1479 | + s->ctx_data.rx.seq_index = 0; |
---|
| 1480 | + } |
---|
| 1481 | + |
---|
| 1482 | + if (s != d->irq_target) { |
---|
| 1483 | + err = amdtp_stream_start(s, s->channel, s->speed, |
---|
| 1484 | + cycle_match, queue_size, 0); |
---|
| 1485 | + if (err < 0) |
---|
| 1486 | + goto error; |
---|
| 1487 | + } |
---|
| 1488 | + } |
---|
| 1489 | + |
---|
| 1490 | + s = d->irq_target; |
---|
| 1491 | + s->ctx_data.rx.events_per_period = events_per_period; |
---|
| 1492 | + s->ctx_data.rx.event_count = 0; |
---|
| 1493 | + s->ctx_data.rx.seq_index = 0; |
---|
| 1494 | + |
---|
| 1495 | + idle_irq_interval = DIV_ROUND_UP(CYCLES_PER_SECOND * events_per_period, |
---|
| 1496 | + amdtp_rate_table[d->irq_target->sfc]); |
---|
| 1497 | + err = amdtp_stream_start(s, s->channel, s->speed, -1, queue_size, |
---|
| 1498 | + idle_irq_interval); |
---|
| 1499 | + if (err < 0) |
---|
| 1500 | + goto error; |
---|
| 1501 | + |
---|
| 1502 | + return 0; |
---|
| 1503 | +error: |
---|
| 1504 | + list_for_each_entry(s, &d->streams, list) |
---|
| 1505 | + amdtp_stream_stop(s); |
---|
| 1506 | + kfree(d->seq_descs); |
---|
| 1507 | + d->seq_descs = NULL; |
---|
| 1508 | + return err; |
---|
| 1509 | +} |
---|
| 1510 | +EXPORT_SYMBOL_GPL(amdtp_domain_start); |
---|
| 1511 | + |
---|
| 1512 | +/** |
---|
| 1513 | + * amdtp_domain_stop - stop sending packets for isoc context in the same domain. |
---|
| 1514 | + * @d: the AMDTP domain to which the isoc contexts belong. |
---|
| 1515 | + */ |
---|
| 1516 | +void amdtp_domain_stop(struct amdtp_domain *d) |
---|
| 1517 | +{ |
---|
| 1518 | + struct amdtp_stream *s, *next; |
---|
| 1519 | + |
---|
| 1520 | + if (d->irq_target) |
---|
| 1521 | + amdtp_stream_stop(d->irq_target); |
---|
| 1522 | + |
---|
| 1523 | + list_for_each_entry_safe(s, next, &d->streams, list) { |
---|
| 1524 | + list_del(&s->list); |
---|
| 1525 | + |
---|
| 1526 | + if (s != d->irq_target) |
---|
| 1527 | + amdtp_stream_stop(s); |
---|
| 1528 | + } |
---|
| 1529 | + |
---|
| 1530 | + d->events_per_period = 0; |
---|
| 1531 | + d->irq_target = NULL; |
---|
| 1532 | + |
---|
| 1533 | + kfree(d->seq_descs); |
---|
| 1534 | + d->seq_descs = NULL; |
---|
| 1535 | +} |
---|
| 1536 | +EXPORT_SYMBOL_GPL(amdtp_domain_stop); |
---|