.. | .. |
---|
1 | 1 | // SPDX-License-Identifier: GPL-2.0 |
---|
2 | | -/** |
---|
| 2 | +/* |
---|
3 | 3 | * xhci-dbgcap.c - xHCI debug capability support |
---|
4 | 4 | * |
---|
5 | 5 | * Copyright (C) 2017 Intel Corporation |
---|
.. | .. |
---|
14 | 14 | #include "xhci-trace.h" |
---|
15 | 15 | #include "xhci-dbgcap.h" |
---|
16 | 16 | |
---|
17 | | -static inline void * |
---|
18 | | -dbc_dma_alloc_coherent(struct xhci_hcd *xhci, size_t size, |
---|
19 | | - dma_addr_t *dma_handle, gfp_t flags) |
---|
| 17 | +static void dbc_free_ctx(struct device *dev, struct xhci_container_ctx *ctx) |
---|
20 | 18 | { |
---|
21 | | - void *vaddr; |
---|
22 | | - |
---|
23 | | - vaddr = dma_alloc_coherent(xhci_to_hcd(xhci)->self.sysdev, |
---|
24 | | - size, dma_handle, flags); |
---|
25 | | - memset(vaddr, 0, size); |
---|
26 | | - return vaddr; |
---|
| 19 | + if (!ctx) |
---|
| 20 | + return; |
---|
| 21 | + dma_free_coherent(dev, ctx->size, ctx->bytes, ctx->dma); |
---|
| 22 | + kfree(ctx); |
---|
27 | 23 | } |
---|
28 | 24 | |
---|
29 | | -static inline void |
---|
30 | | -dbc_dma_free_coherent(struct xhci_hcd *xhci, size_t size, |
---|
31 | | - void *cpu_addr, dma_addr_t dma_handle) |
---|
| 25 | +/* we use only one segment for DbC rings */ |
---|
| 26 | +static void dbc_ring_free(struct device *dev, struct xhci_ring *ring) |
---|
32 | 27 | { |
---|
33 | | - if (cpu_addr) |
---|
34 | | - dma_free_coherent(xhci_to_hcd(xhci)->self.sysdev, |
---|
35 | | - size, cpu_addr, dma_handle); |
---|
| 28 | + if (!ring) |
---|
| 29 | + return; |
---|
| 30 | + |
---|
| 31 | + if (ring->first_seg && ring->first_seg->trbs) { |
---|
| 32 | + dma_free_coherent(dev, TRB_SEGMENT_SIZE, |
---|
| 33 | + ring->first_seg->trbs, |
---|
| 34 | + ring->first_seg->dma); |
---|
| 35 | + kfree(ring->first_seg); |
---|
| 36 | + } |
---|
| 37 | + kfree(ring); |
---|
36 | 38 | } |
---|
37 | 39 | |
---|
38 | 40 | static u32 xhci_dbc_populate_strings(struct dbc_str_descs *strings) |
---|
.. | .. |
---|
84 | 86 | return string_length; |
---|
85 | 87 | } |
---|
86 | 88 | |
---|
87 | | -static void xhci_dbc_init_contexts(struct xhci_hcd *xhci, u32 string_length) |
---|
| 89 | +static void xhci_dbc_init_contexts(struct xhci_dbc *dbc, u32 string_length) |
---|
88 | 90 | { |
---|
89 | | - struct xhci_dbc *dbc; |
---|
90 | 91 | struct dbc_info_context *info; |
---|
91 | 92 | struct xhci_ep_ctx *ep_ctx; |
---|
92 | 93 | u32 dev_info; |
---|
93 | 94 | dma_addr_t deq, dma; |
---|
94 | 95 | unsigned int max_burst; |
---|
95 | 96 | |
---|
96 | | - dbc = xhci->dbc; |
---|
97 | 97 | if (!dbc) |
---|
98 | 98 | return; |
---|
99 | 99 | |
---|
.. | .. |
---|
122 | 122 | ep_ctx->deq = cpu_to_le64(deq | dbc->ring_in->cycle_state); |
---|
123 | 123 | |
---|
124 | 124 | /* Set DbC context and info registers: */ |
---|
125 | | - xhci_write_64(xhci, dbc->ctx->dma, &dbc->regs->dccp); |
---|
| 125 | + lo_hi_writeq(dbc->ctx->dma, &dbc->regs->dccp); |
---|
126 | 126 | |
---|
127 | 127 | dev_info = cpu_to_le32((DBC_VENDOR_ID << 16) | DBC_PROTOCOL); |
---|
128 | 128 | writel(dev_info, &dbc->regs->devinfo1); |
---|
.. | .. |
---|
135 | 135 | __releases(&dbc->lock) |
---|
136 | 136 | __acquires(&dbc->lock) |
---|
137 | 137 | { |
---|
138 | | - struct dbc_ep *dep = req->dep; |
---|
139 | | - struct xhci_dbc *dbc = dep->dbc; |
---|
140 | | - struct xhci_hcd *xhci = dbc->xhci; |
---|
141 | | - struct device *dev = xhci_to_hcd(dbc->xhci)->self.sysdev; |
---|
| 138 | + struct xhci_dbc *dbc = req->dbc; |
---|
| 139 | + struct device *dev = dbc->dev; |
---|
142 | 140 | |
---|
143 | 141 | list_del_init(&req->list_pending); |
---|
144 | 142 | req->trb_dma = 0; |
---|
.. | .. |
---|
152 | 150 | dma_unmap_single(dev, |
---|
153 | 151 | req->dma, |
---|
154 | 152 | req->length, |
---|
155 | | - dbc_ep_dma_direction(dep)); |
---|
| 153 | + dbc_ep_dma_direction(req)); |
---|
156 | 154 | |
---|
157 | 155 | /* Give back the transfer request: */ |
---|
158 | 156 | spin_unlock(&dbc->lock); |
---|
159 | | - req->complete(xhci, req); |
---|
| 157 | + req->complete(dbc, req); |
---|
160 | 158 | spin_lock(&dbc->lock); |
---|
161 | 159 | } |
---|
162 | 160 | |
---|
.. | .. |
---|
181 | 179 | xhci_dbc_flush_single_request(req); |
---|
182 | 180 | } |
---|
183 | 181 | |
---|
184 | | -static void xhci_dbc_flush_reqests(struct xhci_dbc *dbc) |
---|
| 182 | +static void xhci_dbc_flush_requests(struct xhci_dbc *dbc) |
---|
185 | 183 | { |
---|
186 | 184 | xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_OUT]); |
---|
187 | 185 | xhci_dbc_flush_endpoint_requests(&dbc->eps[BULK_IN]); |
---|
188 | 186 | } |
---|
189 | 187 | |
---|
190 | 188 | struct dbc_request * |
---|
191 | | -dbc_alloc_request(struct dbc_ep *dep, gfp_t gfp_flags) |
---|
| 189 | +dbc_alloc_request(struct xhci_dbc *dbc, unsigned int direction, gfp_t flags) |
---|
192 | 190 | { |
---|
193 | 191 | struct dbc_request *req; |
---|
194 | 192 | |
---|
195 | | - req = kzalloc(sizeof(*req), gfp_flags); |
---|
| 193 | + if (direction != BULK_IN && |
---|
| 194 | + direction != BULK_OUT) |
---|
| 195 | + return NULL; |
---|
| 196 | + |
---|
| 197 | + if (!dbc) |
---|
| 198 | + return NULL; |
---|
| 199 | + |
---|
| 200 | + req = kzalloc(sizeof(*req), flags); |
---|
196 | 201 | if (!req) |
---|
197 | 202 | return NULL; |
---|
198 | 203 | |
---|
199 | | - req->dep = dep; |
---|
| 204 | + req->dbc = dbc; |
---|
200 | 205 | INIT_LIST_HEAD(&req->list_pending); |
---|
201 | 206 | INIT_LIST_HEAD(&req->list_pool); |
---|
202 | | - req->direction = dep->direction; |
---|
| 207 | + req->direction = direction; |
---|
203 | 208 | |
---|
204 | 209 | trace_xhci_dbc_alloc_request(req); |
---|
205 | 210 | |
---|
.. | .. |
---|
207 | 212 | } |
---|
208 | 213 | |
---|
209 | 214 | void |
---|
210 | | -dbc_free_request(struct dbc_ep *dep, struct dbc_request *req) |
---|
| 215 | +dbc_free_request(struct dbc_request *req) |
---|
211 | 216 | { |
---|
212 | 217 | trace_xhci_dbc_free_request(req); |
---|
213 | 218 | |
---|
.. | .. |
---|
243 | 248 | u64 addr; |
---|
244 | 249 | union xhci_trb *trb; |
---|
245 | 250 | unsigned int num_trbs; |
---|
246 | | - struct xhci_dbc *dbc = dep->dbc; |
---|
| 251 | + struct xhci_dbc *dbc = req->dbc; |
---|
247 | 252 | struct xhci_ring *ring = dep->ring; |
---|
248 | 253 | u32 length, control, cycle; |
---|
249 | 254 | |
---|
.. | .. |
---|
287 | 292 | } |
---|
288 | 293 | |
---|
289 | 294 | static int |
---|
290 | | -dbc_ep_do_queue(struct dbc_ep *dep, struct dbc_request *req) |
---|
| 295 | +dbc_ep_do_queue(struct dbc_request *req) |
---|
291 | 296 | { |
---|
292 | 297 | int ret; |
---|
293 | | - struct device *dev; |
---|
294 | | - struct xhci_dbc *dbc = dep->dbc; |
---|
295 | | - struct xhci_hcd *xhci = dbc->xhci; |
---|
296 | | - |
---|
297 | | - dev = xhci_to_hcd(xhci)->self.sysdev; |
---|
| 298 | + struct xhci_dbc *dbc = req->dbc; |
---|
| 299 | + struct device *dev = dbc->dev; |
---|
| 300 | + struct dbc_ep *dep = &dbc->eps[req->direction]; |
---|
298 | 301 | |
---|
299 | 302 | if (!req->length || !req->buf) |
---|
300 | 303 | return -EINVAL; |
---|
.. | .. |
---|
307 | 310 | req->length, |
---|
308 | 311 | dbc_ep_dma_direction(dep)); |
---|
309 | 312 | if (dma_mapping_error(dev, req->dma)) { |
---|
310 | | - xhci_err(xhci, "failed to map buffer\n"); |
---|
| 313 | + dev_err(dbc->dev, "failed to map buffer\n"); |
---|
311 | 314 | return -EFAULT; |
---|
312 | 315 | } |
---|
313 | 316 | |
---|
314 | 317 | ret = xhci_dbc_queue_bulk_tx(dep, req); |
---|
315 | 318 | if (ret) { |
---|
316 | | - xhci_err(xhci, "failed to queue trbs\n"); |
---|
| 319 | + dev_err(dbc->dev, "failed to queue trbs\n"); |
---|
317 | 320 | dma_unmap_single(dev, |
---|
318 | 321 | req->dma, |
---|
319 | 322 | req->length, |
---|
.. | .. |
---|
326 | 329 | return 0; |
---|
327 | 330 | } |
---|
328 | 331 | |
---|
329 | | -int dbc_ep_queue(struct dbc_ep *dep, struct dbc_request *req, |
---|
330 | | - gfp_t gfp_flags) |
---|
| 332 | +int dbc_ep_queue(struct dbc_request *req) |
---|
331 | 333 | { |
---|
332 | 334 | unsigned long flags; |
---|
333 | | - struct xhci_dbc *dbc = dep->dbc; |
---|
| 335 | + struct xhci_dbc *dbc = req->dbc; |
---|
334 | 336 | int ret = -ESHUTDOWN; |
---|
| 337 | + |
---|
| 338 | + if (!dbc) |
---|
| 339 | + return -ENODEV; |
---|
| 340 | + |
---|
| 341 | + if (req->direction != BULK_IN && |
---|
| 342 | + req->direction != BULK_OUT) |
---|
| 343 | + return -EINVAL; |
---|
335 | 344 | |
---|
336 | 345 | spin_lock_irqsave(&dbc->lock, flags); |
---|
337 | 346 | if (dbc->state == DS_CONFIGURED) |
---|
338 | | - ret = dbc_ep_do_queue(dep, req); |
---|
| 347 | + ret = dbc_ep_do_queue(req); |
---|
339 | 348 | spin_unlock_irqrestore(&dbc->lock, flags); |
---|
340 | 349 | |
---|
341 | 350 | mod_delayed_work(system_wq, &dbc->event_work, 0); |
---|
.. | .. |
---|
345 | 354 | return ret; |
---|
346 | 355 | } |
---|
347 | 356 | |
---|
348 | | -static inline void xhci_dbc_do_eps_init(struct xhci_hcd *xhci, bool direction) |
---|
| 357 | +static inline void xhci_dbc_do_eps_init(struct xhci_dbc *dbc, bool direction) |
---|
349 | 358 | { |
---|
350 | 359 | struct dbc_ep *dep; |
---|
351 | | - struct xhci_dbc *dbc = xhci->dbc; |
---|
352 | 360 | |
---|
353 | 361 | dep = &dbc->eps[direction]; |
---|
354 | 362 | dep->dbc = dbc; |
---|
.. | .. |
---|
358 | 366 | INIT_LIST_HEAD(&dep->list_pending); |
---|
359 | 367 | } |
---|
360 | 368 | |
---|
361 | | -static void xhci_dbc_eps_init(struct xhci_hcd *xhci) |
---|
| 369 | +static void xhci_dbc_eps_init(struct xhci_dbc *dbc) |
---|
362 | 370 | { |
---|
363 | | - xhci_dbc_do_eps_init(xhci, BULK_OUT); |
---|
364 | | - xhci_dbc_do_eps_init(xhci, BULK_IN); |
---|
| 371 | + xhci_dbc_do_eps_init(dbc, BULK_OUT); |
---|
| 372 | + xhci_dbc_do_eps_init(dbc, BULK_IN); |
---|
365 | 373 | } |
---|
366 | 374 | |
---|
367 | | -static void xhci_dbc_eps_exit(struct xhci_hcd *xhci) |
---|
| 375 | +static void xhci_dbc_eps_exit(struct xhci_dbc *dbc) |
---|
368 | 376 | { |
---|
369 | | - struct xhci_dbc *dbc = xhci->dbc; |
---|
370 | | - |
---|
371 | 377 | memset(dbc->eps, 0, sizeof(struct dbc_ep) * ARRAY_SIZE(dbc->eps)); |
---|
372 | 378 | } |
---|
373 | 379 | |
---|
374 | | -static int xhci_dbc_mem_init(struct xhci_hcd *xhci, gfp_t flags) |
---|
| 380 | +static int dbc_erst_alloc(struct device *dev, struct xhci_ring *evt_ring, |
---|
| 381 | + struct xhci_erst *erst, gfp_t flags) |
---|
| 382 | +{ |
---|
| 383 | + erst->entries = dma_alloc_coherent(dev, sizeof(struct xhci_erst_entry), |
---|
| 384 | + &erst->erst_dma_addr, flags); |
---|
| 385 | + if (!erst->entries) |
---|
| 386 | + return -ENOMEM; |
---|
| 387 | + |
---|
| 388 | + erst->num_entries = 1; |
---|
| 389 | + erst->entries[0].seg_addr = cpu_to_le64(evt_ring->first_seg->dma); |
---|
| 390 | + erst->entries[0].seg_size = cpu_to_le32(TRBS_PER_SEGMENT); |
---|
| 391 | + erst->entries[0].rsvd = 0; |
---|
| 392 | + return 0; |
---|
| 393 | +} |
---|
| 394 | + |
---|
| 395 | +static void dbc_erst_free(struct device *dev, struct xhci_erst *erst) |
---|
| 396 | +{ |
---|
| 397 | + if (erst->entries) |
---|
| 398 | + dma_free_coherent(dev, sizeof(struct xhci_erst_entry), |
---|
| 399 | + erst->entries, erst->erst_dma_addr); |
---|
| 400 | + erst->entries = NULL; |
---|
| 401 | +} |
---|
| 402 | + |
---|
| 403 | +static struct xhci_container_ctx * |
---|
| 404 | +dbc_alloc_ctx(struct device *dev, gfp_t flags) |
---|
| 405 | +{ |
---|
| 406 | + struct xhci_container_ctx *ctx; |
---|
| 407 | + |
---|
| 408 | + ctx = kzalloc(sizeof(*ctx), flags); |
---|
| 409 | + if (!ctx) |
---|
| 410 | + return NULL; |
---|
| 411 | + |
---|
| 412 | + /* xhci 7.6.9, all three contexts; info, ep-out and ep-in. Each 64 bytes*/ |
---|
| 413 | + ctx->size = 3 * DBC_CONTEXT_SIZE; |
---|
| 414 | + ctx->bytes = dma_alloc_coherent(dev, ctx->size, &ctx->dma, flags); |
---|
| 415 | + if (!ctx->bytes) { |
---|
| 416 | + kfree(ctx); |
---|
| 417 | + return NULL; |
---|
| 418 | + } |
---|
| 419 | + return ctx; |
---|
| 420 | +} |
---|
| 421 | + |
---|
| 422 | +static struct xhci_ring * |
---|
| 423 | +xhci_dbc_ring_alloc(struct device *dev, enum xhci_ring_type type, gfp_t flags) |
---|
| 424 | +{ |
---|
| 425 | + struct xhci_ring *ring; |
---|
| 426 | + struct xhci_segment *seg; |
---|
| 427 | + dma_addr_t dma; |
---|
| 428 | + |
---|
| 429 | + ring = kzalloc(sizeof(*ring), flags); |
---|
| 430 | + if (!ring) |
---|
| 431 | + return NULL; |
---|
| 432 | + |
---|
| 433 | + ring->num_segs = 1; |
---|
| 434 | + ring->type = type; |
---|
| 435 | + |
---|
| 436 | + seg = kzalloc(sizeof(*seg), flags); |
---|
| 437 | + if (!seg) |
---|
| 438 | + goto seg_fail; |
---|
| 439 | + |
---|
| 440 | + ring->first_seg = seg; |
---|
| 441 | + ring->last_seg = seg; |
---|
| 442 | + seg->next = seg; |
---|
| 443 | + |
---|
| 444 | + seg->trbs = dma_alloc_coherent(dev, TRB_SEGMENT_SIZE, &dma, flags); |
---|
| 445 | + if (!seg->trbs) |
---|
| 446 | + goto dma_fail; |
---|
| 447 | + |
---|
| 448 | + seg->dma = dma; |
---|
| 449 | + |
---|
| 450 | + /* Only event ring does not use link TRB */ |
---|
| 451 | + if (type != TYPE_EVENT) { |
---|
| 452 | + union xhci_trb *trb = &seg->trbs[TRBS_PER_SEGMENT - 1]; |
---|
| 453 | + |
---|
| 454 | + trb->link.segment_ptr = cpu_to_le64(dma); |
---|
| 455 | + trb->link.control = cpu_to_le32(LINK_TOGGLE | TRB_TYPE(TRB_LINK)); |
---|
| 456 | + } |
---|
| 457 | + INIT_LIST_HEAD(&ring->td_list); |
---|
| 458 | + xhci_initialize_ring_info(ring, 1); |
---|
| 459 | + return ring; |
---|
| 460 | +dma_fail: |
---|
| 461 | + kfree(seg); |
---|
| 462 | +seg_fail: |
---|
| 463 | + kfree(ring); |
---|
| 464 | + return NULL; |
---|
| 465 | +} |
---|
| 466 | + |
---|
| 467 | +static int xhci_dbc_mem_init(struct xhci_dbc *dbc, gfp_t flags) |
---|
375 | 468 | { |
---|
376 | 469 | int ret; |
---|
377 | 470 | dma_addr_t deq; |
---|
378 | 471 | u32 string_length; |
---|
379 | | - struct xhci_dbc *dbc = xhci->dbc; |
---|
| 472 | + struct device *dev = dbc->dev; |
---|
380 | 473 | |
---|
381 | 474 | /* Allocate various rings for events and transfers: */ |
---|
382 | | - dbc->ring_evt = xhci_ring_alloc(xhci, 1, 1, TYPE_EVENT, 0, flags); |
---|
| 475 | + dbc->ring_evt = xhci_dbc_ring_alloc(dev, TYPE_EVENT, flags); |
---|
383 | 476 | if (!dbc->ring_evt) |
---|
384 | 477 | goto evt_fail; |
---|
385 | 478 | |
---|
386 | | - dbc->ring_in = xhci_ring_alloc(xhci, 1, 1, TYPE_BULK, 0, flags); |
---|
| 479 | + dbc->ring_in = xhci_dbc_ring_alloc(dev, TYPE_BULK, flags); |
---|
387 | 480 | if (!dbc->ring_in) |
---|
388 | 481 | goto in_fail; |
---|
389 | 482 | |
---|
390 | | - dbc->ring_out = xhci_ring_alloc(xhci, 1, 1, TYPE_BULK, 0, flags); |
---|
| 483 | + dbc->ring_out = xhci_dbc_ring_alloc(dev, TYPE_BULK, flags); |
---|
391 | 484 | if (!dbc->ring_out) |
---|
392 | 485 | goto out_fail; |
---|
393 | 486 | |
---|
394 | 487 | /* Allocate and populate ERST: */ |
---|
395 | | - ret = xhci_alloc_erst(xhci, dbc->ring_evt, &dbc->erst, flags); |
---|
| 488 | + ret = dbc_erst_alloc(dev, dbc->ring_evt, &dbc->erst, flags); |
---|
396 | 489 | if (ret) |
---|
397 | 490 | goto erst_fail; |
---|
398 | 491 | |
---|
399 | 492 | /* Allocate context data structure: */ |
---|
400 | | - dbc->ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags); |
---|
| 493 | + dbc->ctx = dbc_alloc_ctx(dev, flags); /* was sysdev, and is still */ |
---|
401 | 494 | if (!dbc->ctx) |
---|
402 | 495 | goto ctx_fail; |
---|
403 | 496 | |
---|
404 | 497 | /* Allocate the string table: */ |
---|
405 | 498 | dbc->string_size = sizeof(struct dbc_str_descs); |
---|
406 | | - dbc->string = dbc_dma_alloc_coherent(xhci, |
---|
407 | | - dbc->string_size, |
---|
408 | | - &dbc->string_dma, |
---|
409 | | - flags); |
---|
| 499 | + dbc->string = dma_alloc_coherent(dev, dbc->string_size, |
---|
| 500 | + &dbc->string_dma, flags); |
---|
410 | 501 | if (!dbc->string) |
---|
411 | 502 | goto string_fail; |
---|
412 | 503 | |
---|
413 | 504 | /* Setup ERST register: */ |
---|
414 | 505 | writel(dbc->erst.erst_size, &dbc->regs->ersts); |
---|
415 | | - xhci_write_64(xhci, dbc->erst.erst_dma_addr, &dbc->regs->erstba); |
---|
| 506 | + |
---|
| 507 | + lo_hi_writeq(dbc->erst.erst_dma_addr, &dbc->regs->erstba); |
---|
416 | 508 | deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg, |
---|
417 | 509 | dbc->ring_evt->dequeue); |
---|
418 | | - xhci_write_64(xhci, deq, &dbc->regs->erdp); |
---|
| 510 | + lo_hi_writeq(deq, &dbc->regs->erdp); |
---|
419 | 511 | |
---|
420 | 512 | /* Setup strings and contexts: */ |
---|
421 | 513 | string_length = xhci_dbc_populate_strings(dbc->string); |
---|
422 | | - xhci_dbc_init_contexts(xhci, string_length); |
---|
| 514 | + xhci_dbc_init_contexts(dbc, string_length); |
---|
423 | 515 | |
---|
424 | | - mmiowb(); |
---|
425 | | - |
---|
426 | | - xhci_dbc_eps_init(xhci); |
---|
| 516 | + xhci_dbc_eps_init(dbc); |
---|
427 | 517 | dbc->state = DS_INITIALIZED; |
---|
428 | 518 | |
---|
429 | 519 | return 0; |
---|
430 | 520 | |
---|
431 | 521 | string_fail: |
---|
432 | | - xhci_free_container_ctx(xhci, dbc->ctx); |
---|
| 522 | + dbc_free_ctx(dev, dbc->ctx); |
---|
433 | 523 | dbc->ctx = NULL; |
---|
434 | 524 | ctx_fail: |
---|
435 | | - xhci_free_erst(xhci, &dbc->erst); |
---|
| 525 | + dbc_erst_free(dev, &dbc->erst); |
---|
436 | 526 | erst_fail: |
---|
437 | | - xhci_ring_free(xhci, dbc->ring_out); |
---|
| 527 | + dbc_ring_free(dev, dbc->ring_out); |
---|
438 | 528 | dbc->ring_out = NULL; |
---|
439 | 529 | out_fail: |
---|
440 | | - xhci_ring_free(xhci, dbc->ring_in); |
---|
| 530 | + dbc_ring_free(dev, dbc->ring_in); |
---|
441 | 531 | dbc->ring_in = NULL; |
---|
442 | 532 | in_fail: |
---|
443 | | - xhci_ring_free(xhci, dbc->ring_evt); |
---|
| 533 | + dbc_ring_free(dev, dbc->ring_evt); |
---|
444 | 534 | dbc->ring_evt = NULL; |
---|
445 | 535 | evt_fail: |
---|
446 | 536 | return -ENOMEM; |
---|
447 | 537 | } |
---|
448 | 538 | |
---|
449 | | -static void xhci_dbc_mem_cleanup(struct xhci_hcd *xhci) |
---|
| 539 | +static void xhci_dbc_mem_cleanup(struct xhci_dbc *dbc) |
---|
450 | 540 | { |
---|
451 | | - struct xhci_dbc *dbc = xhci->dbc; |
---|
452 | | - |
---|
453 | 541 | if (!dbc) |
---|
454 | 542 | return; |
---|
455 | 543 | |
---|
456 | | - xhci_dbc_eps_exit(xhci); |
---|
| 544 | + xhci_dbc_eps_exit(dbc); |
---|
457 | 545 | |
---|
458 | 546 | if (dbc->string) { |
---|
459 | | - dbc_dma_free_coherent(xhci, |
---|
460 | | - dbc->string_size, |
---|
461 | | - dbc->string, dbc->string_dma); |
---|
| 547 | + dma_free_coherent(dbc->dev, dbc->string_size, |
---|
| 548 | + dbc->string, dbc->string_dma); |
---|
462 | 549 | dbc->string = NULL; |
---|
463 | 550 | } |
---|
464 | 551 | |
---|
465 | | - xhci_free_container_ctx(xhci, dbc->ctx); |
---|
| 552 | + dbc_free_ctx(dbc->dev, dbc->ctx); |
---|
466 | 553 | dbc->ctx = NULL; |
---|
467 | 554 | |
---|
468 | | - xhci_free_erst(xhci, &dbc->erst); |
---|
469 | | - xhci_ring_free(xhci, dbc->ring_out); |
---|
470 | | - xhci_ring_free(xhci, dbc->ring_in); |
---|
471 | | - xhci_ring_free(xhci, dbc->ring_evt); |
---|
| 555 | + dbc_erst_free(dbc->dev, &dbc->erst); |
---|
| 556 | + dbc_ring_free(dbc->dev, dbc->ring_out); |
---|
| 557 | + dbc_ring_free(dbc->dev, dbc->ring_in); |
---|
| 558 | + dbc_ring_free(dbc->dev, dbc->ring_evt); |
---|
472 | 559 | dbc->ring_in = NULL; |
---|
473 | 560 | dbc->ring_out = NULL; |
---|
474 | 561 | dbc->ring_evt = NULL; |
---|
475 | 562 | } |
---|
476 | 563 | |
---|
477 | | -static int xhci_do_dbc_start(struct xhci_hcd *xhci) |
---|
| 564 | +static int xhci_do_dbc_start(struct xhci_dbc *dbc) |
---|
478 | 565 | { |
---|
479 | 566 | int ret; |
---|
480 | 567 | u32 ctrl; |
---|
481 | | - struct xhci_dbc *dbc = xhci->dbc; |
---|
482 | 568 | |
---|
483 | 569 | if (dbc->state != DS_DISABLED) |
---|
484 | 570 | return -EINVAL; |
---|
.. | .. |
---|
490 | 576 | if (ret) |
---|
491 | 577 | return ret; |
---|
492 | 578 | |
---|
493 | | - ret = xhci_dbc_mem_init(xhci, GFP_ATOMIC); |
---|
| 579 | + ret = xhci_dbc_mem_init(dbc, GFP_ATOMIC); |
---|
494 | 580 | if (ret) |
---|
495 | 581 | return ret; |
---|
496 | 582 | |
---|
.. | .. |
---|
508 | 594 | return 0; |
---|
509 | 595 | } |
---|
510 | 596 | |
---|
511 | | -static int xhci_do_dbc_stop(struct xhci_hcd *xhci) |
---|
| 597 | +static int xhci_do_dbc_stop(struct xhci_dbc *dbc) |
---|
512 | 598 | { |
---|
513 | | - struct xhci_dbc *dbc = xhci->dbc; |
---|
514 | | - |
---|
515 | 599 | if (dbc->state == DS_DISABLED) |
---|
516 | 600 | return -1; |
---|
517 | 601 | |
---|
.. | .. |
---|
521 | 605 | return 0; |
---|
522 | 606 | } |
---|
523 | 607 | |
---|
524 | | -static int xhci_dbc_start(struct xhci_hcd *xhci) |
---|
| 608 | +static int xhci_dbc_start(struct xhci_dbc *dbc) |
---|
525 | 609 | { |
---|
526 | 610 | int ret; |
---|
527 | 611 | unsigned long flags; |
---|
528 | | - struct xhci_dbc *dbc = xhci->dbc; |
---|
529 | 612 | |
---|
530 | 613 | WARN_ON(!dbc); |
---|
531 | 614 | |
---|
532 | | - pm_runtime_get_sync(xhci_to_hcd(xhci)->self.controller); |
---|
| 615 | + pm_runtime_get_sync(dbc->dev); /* note this was self.controller */ |
---|
533 | 616 | |
---|
534 | 617 | spin_lock_irqsave(&dbc->lock, flags); |
---|
535 | | - ret = xhci_do_dbc_start(xhci); |
---|
| 618 | + ret = xhci_do_dbc_start(dbc); |
---|
536 | 619 | spin_unlock_irqrestore(&dbc->lock, flags); |
---|
537 | 620 | |
---|
538 | 621 | if (ret) { |
---|
539 | | - pm_runtime_put(xhci_to_hcd(xhci)->self.controller); |
---|
| 622 | + pm_runtime_put(dbc->dev); /* note this was self.controller */ |
---|
540 | 623 | return ret; |
---|
541 | 624 | } |
---|
542 | 625 | |
---|
543 | 626 | return mod_delayed_work(system_wq, &dbc->event_work, 1); |
---|
544 | 627 | } |
---|
545 | 628 | |
---|
546 | | -static void xhci_dbc_stop(struct xhci_hcd *xhci) |
---|
| 629 | +static void xhci_dbc_stop(struct xhci_dbc *dbc) |
---|
547 | 630 | { |
---|
548 | 631 | int ret; |
---|
549 | 632 | unsigned long flags; |
---|
550 | | - struct xhci_dbc *dbc = xhci->dbc; |
---|
551 | | - struct dbc_port *port = &dbc->port; |
---|
552 | 633 | |
---|
553 | 634 | WARN_ON(!dbc); |
---|
554 | 635 | |
---|
| 636 | + switch (dbc->state) { |
---|
| 637 | + case DS_DISABLED: |
---|
| 638 | + return; |
---|
| 639 | + case DS_CONFIGURED: |
---|
| 640 | + case DS_STALLED: |
---|
| 641 | + if (dbc->driver->disconnect) |
---|
| 642 | + dbc->driver->disconnect(dbc); |
---|
| 643 | + break; |
---|
| 644 | + default: |
---|
| 645 | + break; |
---|
| 646 | + } |
---|
| 647 | + |
---|
555 | 648 | cancel_delayed_work_sync(&dbc->event_work); |
---|
556 | 649 | |
---|
557 | | - if (port->registered) |
---|
558 | | - xhci_dbc_tty_unregister_device(xhci); |
---|
559 | | - |
---|
560 | 650 | spin_lock_irqsave(&dbc->lock, flags); |
---|
561 | | - ret = xhci_do_dbc_stop(xhci); |
---|
| 651 | + ret = xhci_do_dbc_stop(dbc); |
---|
562 | 652 | spin_unlock_irqrestore(&dbc->lock, flags); |
---|
563 | 653 | |
---|
564 | 654 | if (!ret) { |
---|
565 | | - xhci_dbc_mem_cleanup(xhci); |
---|
566 | | - pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller); |
---|
| 655 | + xhci_dbc_mem_cleanup(dbc); |
---|
| 656 | + pm_runtime_put_sync(dbc->dev); /* note, was self.controller */ |
---|
567 | 657 | } |
---|
568 | 658 | } |
---|
569 | 659 | |
---|
570 | 660 | static void |
---|
571 | | -dbc_handle_port_status(struct xhci_hcd *xhci, union xhci_trb *event) |
---|
| 661 | +dbc_handle_port_status(struct xhci_dbc *dbc, union xhci_trb *event) |
---|
572 | 662 | { |
---|
573 | 663 | u32 portsc; |
---|
574 | | - struct xhci_dbc *dbc = xhci->dbc; |
---|
575 | 664 | |
---|
576 | 665 | portsc = readl(&dbc->regs->portsc); |
---|
577 | 666 | if (portsc & DBC_PORTSC_CONN_CHANGE) |
---|
578 | | - xhci_info(xhci, "DbC port connect change\n"); |
---|
| 667 | + dev_info(dbc->dev, "DbC port connect change\n"); |
---|
579 | 668 | |
---|
580 | 669 | if (portsc & DBC_PORTSC_RESET_CHANGE) |
---|
581 | | - xhci_info(xhci, "DbC port reset change\n"); |
---|
| 670 | + dev_info(dbc->dev, "DbC port reset change\n"); |
---|
582 | 671 | |
---|
583 | 672 | if (portsc & DBC_PORTSC_LINK_CHANGE) |
---|
584 | | - xhci_info(xhci, "DbC port link status change\n"); |
---|
| 673 | + dev_info(dbc->dev, "DbC port link status change\n"); |
---|
585 | 674 | |
---|
586 | 675 | if (portsc & DBC_PORTSC_CONFIG_CHANGE) |
---|
587 | | - xhci_info(xhci, "DbC config error change\n"); |
---|
| 676 | + dev_info(dbc->dev, "DbC config error change\n"); |
---|
588 | 677 | |
---|
589 | 678 | /* Port reset change bit will be cleared in other place: */ |
---|
590 | 679 | writel(portsc & ~DBC_PORTSC_RESET_CHANGE, &dbc->regs->portsc); |
---|
591 | 680 | } |
---|
592 | 681 | |
---|
593 | | -static void dbc_handle_xfer_event(struct xhci_hcd *xhci, union xhci_trb *event) |
---|
| 682 | +static void dbc_handle_xfer_event(struct xhci_dbc *dbc, union xhci_trb *event) |
---|
594 | 683 | { |
---|
595 | 684 | struct dbc_ep *dep; |
---|
596 | 685 | struct xhci_ring *ring; |
---|
.. | .. |
---|
604 | 693 | remain_length = EVENT_TRB_LEN(le32_to_cpu(event->generic.field[2])); |
---|
605 | 694 | ep_id = TRB_TO_EP_ID(le32_to_cpu(event->generic.field[3])); |
---|
606 | 695 | dep = (ep_id == EPID_OUT) ? |
---|
607 | | - get_out_ep(xhci) : get_in_ep(xhci); |
---|
| 696 | + get_out_ep(dbc) : get_in_ep(dbc); |
---|
608 | 697 | ring = dep->ring; |
---|
609 | 698 | |
---|
610 | 699 | switch (comp_code) { |
---|
611 | 700 | case COMP_SUCCESS: |
---|
612 | 701 | remain_length = 0; |
---|
613 | | - /* FALLTHROUGH */ |
---|
| 702 | + fallthrough; |
---|
614 | 703 | case COMP_SHORT_PACKET: |
---|
615 | 704 | status = 0; |
---|
616 | 705 | break; |
---|
.. | .. |
---|
618 | 707 | case COMP_BABBLE_DETECTED_ERROR: |
---|
619 | 708 | case COMP_USB_TRANSACTION_ERROR: |
---|
620 | 709 | case COMP_STALL_ERROR: |
---|
621 | | - xhci_warn(xhci, "tx error %d detected\n", comp_code); |
---|
| 710 | + dev_warn(dbc->dev, "tx error %d detected\n", comp_code); |
---|
622 | 711 | status = -comp_code; |
---|
623 | 712 | break; |
---|
624 | 713 | default: |
---|
625 | | - xhci_err(xhci, "unknown tx error %d\n", comp_code); |
---|
| 714 | + dev_err(dbc->dev, "unknown tx error %d\n", comp_code); |
---|
626 | 715 | status = -comp_code; |
---|
627 | 716 | break; |
---|
628 | 717 | } |
---|
.. | .. |
---|
636 | 725 | } |
---|
637 | 726 | |
---|
638 | 727 | if (!req) { |
---|
639 | | - xhci_warn(xhci, "no matched request\n"); |
---|
| 728 | + dev_warn(dbc->dev, "no matched request\n"); |
---|
640 | 729 | return; |
---|
641 | 730 | } |
---|
642 | 731 | |
---|
.. | .. |
---|
647 | 736 | xhci_dbc_giveback(req, status); |
---|
648 | 737 | } |
---|
649 | 738 | |
---|
| 739 | +static void inc_evt_deq(struct xhci_ring *ring) |
---|
| 740 | +{ |
---|
| 741 | + /* If on the last TRB of the segment go back to the beginning */ |
---|
| 742 | + if (ring->dequeue == &ring->deq_seg->trbs[TRBS_PER_SEGMENT - 1]) { |
---|
| 743 | + ring->cycle_state ^= 1; |
---|
| 744 | + ring->dequeue = ring->deq_seg->trbs; |
---|
| 745 | + return; |
---|
| 746 | + } |
---|
| 747 | + ring->dequeue++; |
---|
| 748 | +} |
---|
| 749 | + |
---|
650 | 750 | static enum evtreturn xhci_dbc_do_handle_events(struct xhci_dbc *dbc) |
---|
651 | 751 | { |
---|
652 | 752 | dma_addr_t deq; |
---|
653 | 753 | struct dbc_ep *dep; |
---|
654 | 754 | union xhci_trb *evt; |
---|
655 | 755 | u32 ctrl, portsc; |
---|
656 | | - struct xhci_hcd *xhci = dbc->xhci; |
---|
657 | 756 | bool update_erdp = false; |
---|
658 | 757 | |
---|
659 | 758 | /* DbC state machine: */ |
---|
.. | .. |
---|
666 | 765 | portsc = readl(&dbc->regs->portsc); |
---|
667 | 766 | if (portsc & DBC_PORTSC_CONN_STATUS) { |
---|
668 | 767 | dbc->state = DS_CONNECTED; |
---|
669 | | - xhci_info(xhci, "DbC connected\n"); |
---|
| 768 | + dev_info(dbc->dev, "DbC connected\n"); |
---|
670 | 769 | } |
---|
671 | 770 | |
---|
672 | 771 | return EVT_DONE; |
---|
.. | .. |
---|
674 | 773 | ctrl = readl(&dbc->regs->control); |
---|
675 | 774 | if (ctrl & DBC_CTRL_DBC_RUN) { |
---|
676 | 775 | dbc->state = DS_CONFIGURED; |
---|
677 | | - xhci_info(xhci, "DbC configured\n"); |
---|
| 776 | + dev_info(dbc->dev, "DbC configured\n"); |
---|
678 | 777 | portsc = readl(&dbc->regs->portsc); |
---|
679 | 778 | writel(portsc, &dbc->regs->portsc); |
---|
680 | 779 | return EVT_GSER; |
---|
.. | .. |
---|
686 | 785 | portsc = readl(&dbc->regs->portsc); |
---|
687 | 786 | if (!(portsc & DBC_PORTSC_PORT_ENABLED) && |
---|
688 | 787 | !(portsc & DBC_PORTSC_CONN_STATUS)) { |
---|
689 | | - xhci_info(xhci, "DbC cable unplugged\n"); |
---|
| 788 | + dev_info(dbc->dev, "DbC cable unplugged\n"); |
---|
690 | 789 | dbc->state = DS_ENABLED; |
---|
691 | | - xhci_dbc_flush_reqests(dbc); |
---|
| 790 | + xhci_dbc_flush_requests(dbc); |
---|
692 | 791 | |
---|
693 | 792 | return EVT_DISC; |
---|
694 | 793 | } |
---|
695 | 794 | |
---|
696 | 795 | /* Handle debug port reset event: */ |
---|
697 | 796 | if (portsc & DBC_PORTSC_RESET_CHANGE) { |
---|
698 | | - xhci_info(xhci, "DbC port reset\n"); |
---|
| 797 | + dev_info(dbc->dev, "DbC port reset\n"); |
---|
699 | 798 | writel(portsc, &dbc->regs->portsc); |
---|
700 | 799 | dbc->state = DS_ENABLED; |
---|
701 | | - xhci_dbc_flush_reqests(dbc); |
---|
| 800 | + xhci_dbc_flush_requests(dbc); |
---|
702 | 801 | |
---|
703 | 802 | return EVT_DISC; |
---|
704 | 803 | } |
---|
.. | .. |
---|
707 | 806 | ctrl = readl(&dbc->regs->control); |
---|
708 | 807 | if ((ctrl & DBC_CTRL_HALT_IN_TR) || |
---|
709 | 808 | (ctrl & DBC_CTRL_HALT_OUT_TR)) { |
---|
710 | | - xhci_info(xhci, "DbC Endpoint stall\n"); |
---|
| 809 | + dev_info(dbc->dev, "DbC Endpoint stall\n"); |
---|
711 | 810 | dbc->state = DS_STALLED; |
---|
712 | 811 | |
---|
713 | 812 | if (ctrl & DBC_CTRL_HALT_IN_TR) { |
---|
714 | | - dep = get_in_ep(xhci); |
---|
| 813 | + dep = get_in_ep(dbc); |
---|
715 | 814 | xhci_dbc_flush_endpoint_requests(dep); |
---|
716 | 815 | } |
---|
717 | 816 | |
---|
718 | 817 | if (ctrl & DBC_CTRL_HALT_OUT_TR) { |
---|
719 | | - dep = get_out_ep(xhci); |
---|
| 818 | + dep = get_out_ep(dbc); |
---|
720 | 819 | xhci_dbc_flush_endpoint_requests(dep); |
---|
721 | 820 | } |
---|
722 | 821 | |
---|
.. | .. |
---|
741 | 840 | |
---|
742 | 841 | return EVT_DONE; |
---|
743 | 842 | default: |
---|
744 | | - xhci_err(xhci, "Unknown DbC state %d\n", dbc->state); |
---|
| 843 | + dev_err(dbc->dev, "Unknown DbC state %d\n", dbc->state); |
---|
745 | 844 | break; |
---|
746 | 845 | } |
---|
747 | 846 | |
---|
.. | .. |
---|
759 | 858 | |
---|
760 | 859 | switch (le32_to_cpu(evt->event_cmd.flags) & TRB_TYPE_BITMASK) { |
---|
761 | 860 | case TRB_TYPE(TRB_PORT_STATUS): |
---|
762 | | - dbc_handle_port_status(xhci, evt); |
---|
| 861 | + dbc_handle_port_status(dbc, evt); |
---|
763 | 862 | break; |
---|
764 | 863 | case TRB_TYPE(TRB_TRANSFER): |
---|
765 | | - dbc_handle_xfer_event(xhci, evt); |
---|
| 864 | + dbc_handle_xfer_event(dbc, evt); |
---|
766 | 865 | break; |
---|
767 | 866 | default: |
---|
768 | 867 | break; |
---|
769 | 868 | } |
---|
770 | 869 | |
---|
771 | | - inc_deq(xhci, dbc->ring_evt); |
---|
| 870 | + inc_evt_deq(dbc->ring_evt); |
---|
| 871 | + |
---|
772 | 872 | evt = dbc->ring_evt->dequeue; |
---|
773 | 873 | update_erdp = true; |
---|
774 | 874 | } |
---|
.. | .. |
---|
777 | 877 | if (update_erdp) { |
---|
778 | 878 | deq = xhci_trb_virt_to_dma(dbc->ring_evt->deq_seg, |
---|
779 | 879 | dbc->ring_evt->dequeue); |
---|
780 | | - xhci_write_64(xhci, deq, &dbc->regs->erdp); |
---|
| 880 | + lo_hi_writeq(deq, &dbc->regs->erdp); |
---|
781 | 881 | } |
---|
782 | 882 | |
---|
783 | 883 | return EVT_DONE; |
---|
.. | .. |
---|
785 | 885 | |
---|
786 | 886 | static void xhci_dbc_handle_events(struct work_struct *work) |
---|
787 | 887 | { |
---|
788 | | - int ret; |
---|
789 | 888 | enum evtreturn evtr; |
---|
790 | 889 | struct xhci_dbc *dbc; |
---|
791 | 890 | unsigned long flags; |
---|
792 | | - struct xhci_hcd *xhci; |
---|
793 | 891 | |
---|
794 | 892 | dbc = container_of(to_delayed_work(work), struct xhci_dbc, event_work); |
---|
795 | | - xhci = dbc->xhci; |
---|
796 | 893 | |
---|
797 | 894 | spin_lock_irqsave(&dbc->lock, flags); |
---|
798 | 895 | evtr = xhci_dbc_do_handle_events(dbc); |
---|
.. | .. |
---|
800 | 897 | |
---|
801 | 898 | switch (evtr) { |
---|
802 | 899 | case EVT_GSER: |
---|
803 | | - ret = xhci_dbc_tty_register_device(xhci); |
---|
804 | | - if (ret) { |
---|
805 | | - xhci_err(xhci, "failed to alloc tty device\n"); |
---|
806 | | - break; |
---|
807 | | - } |
---|
808 | | - |
---|
809 | | - xhci_info(xhci, "DbC now attached to /dev/ttyDBC0\n"); |
---|
| 900 | + if (dbc->driver->configure) |
---|
| 901 | + dbc->driver->configure(dbc); |
---|
810 | 902 | break; |
---|
811 | 903 | case EVT_DISC: |
---|
812 | | - xhci_dbc_tty_unregister_device(xhci); |
---|
| 904 | + if (dbc->driver->disconnect) |
---|
| 905 | + dbc->driver->disconnect(dbc); |
---|
813 | 906 | break; |
---|
814 | 907 | case EVT_DONE: |
---|
815 | 908 | break; |
---|
816 | 909 | default: |
---|
817 | | - xhci_info(xhci, "stop handling dbc events\n"); |
---|
| 910 | + dev_info(dbc->dev, "stop handling dbc events\n"); |
---|
818 | 911 | return; |
---|
819 | 912 | } |
---|
820 | 913 | |
---|
.. | .. |
---|
867 | 960 | spin_unlock_irqrestore(&xhci->lock, flags); |
---|
868 | 961 | |
---|
869 | 962 | dbc->xhci = xhci; |
---|
| 963 | + dbc->dev = xhci_to_hcd(xhci)->self.sysdev; |
---|
870 | 964 | INIT_DELAYED_WORK(&dbc->event_work, xhci_dbc_handle_events); |
---|
871 | 965 | spin_lock_init(&dbc->lock); |
---|
872 | 966 | |
---|
.. | .. |
---|
915 | 1009 | const char *buf, size_t count) |
---|
916 | 1010 | { |
---|
917 | 1011 | struct xhci_hcd *xhci; |
---|
| 1012 | + struct xhci_dbc *dbc; |
---|
918 | 1013 | |
---|
919 | 1014 | xhci = hcd_to_xhci(dev_get_drvdata(dev)); |
---|
| 1015 | + dbc = xhci->dbc; |
---|
920 | 1016 | |
---|
921 | 1017 | if (!strncmp(buf, "enable", 6)) |
---|
922 | | - xhci_dbc_start(xhci); |
---|
| 1018 | + xhci_dbc_start(dbc); |
---|
923 | 1019 | else if (!strncmp(buf, "disable", 7)) |
---|
924 | | - xhci_dbc_stop(xhci); |
---|
| 1020 | + xhci_dbc_stop(dbc); |
---|
925 | 1021 | else |
---|
926 | 1022 | return -EINVAL; |
---|
927 | 1023 | |
---|
.. | .. |
---|
939 | 1035 | if (ret) |
---|
940 | 1036 | goto init_err3; |
---|
941 | 1037 | |
---|
942 | | - ret = xhci_dbc_tty_register_driver(xhci); |
---|
| 1038 | + ret = xhci_dbc_tty_probe(xhci); |
---|
943 | 1039 | if (ret) |
---|
944 | 1040 | goto init_err2; |
---|
945 | 1041 | |
---|
.. | .. |
---|
950 | 1046 | return 0; |
---|
951 | 1047 | |
---|
952 | 1048 | init_err1: |
---|
953 | | - xhci_dbc_tty_unregister_driver(); |
---|
| 1049 | + xhci_dbc_tty_remove(xhci->dbc); |
---|
954 | 1050 | init_err2: |
---|
955 | 1051 | xhci_do_dbc_exit(xhci); |
---|
956 | 1052 | init_err3: |
---|
.. | .. |
---|
965 | 1061 | return; |
---|
966 | 1062 | |
---|
967 | 1063 | device_remove_file(dev, &dev_attr_dbc); |
---|
968 | | - xhci_dbc_tty_unregister_driver(); |
---|
969 | | - xhci_dbc_stop(xhci); |
---|
| 1064 | + xhci_dbc_tty_remove(xhci->dbc); |
---|
| 1065 | + xhci_dbc_stop(xhci->dbc); |
---|
970 | 1066 | xhci_do_dbc_exit(xhci); |
---|
971 | 1067 | } |
---|
972 | 1068 | |
---|
.. | .. |
---|
981 | 1077 | if (dbc->state == DS_CONFIGURED) |
---|
982 | 1078 | dbc->resume_required = 1; |
---|
983 | 1079 | |
---|
984 | | - xhci_dbc_stop(xhci); |
---|
| 1080 | + xhci_dbc_stop(dbc); |
---|
985 | 1081 | |
---|
986 | 1082 | return 0; |
---|
987 | 1083 | } |
---|
.. | .. |
---|
996 | 1092 | |
---|
997 | 1093 | if (dbc->resume_required) { |
---|
998 | 1094 | dbc->resume_required = 0; |
---|
999 | | - xhci_dbc_start(xhci); |
---|
| 1095 | + xhci_dbc_start(dbc); |
---|
1000 | 1096 | } |
---|
1001 | 1097 | |
---|
1002 | 1098 | return ret; |
---|