| .. | .. |
|---|
| 12 | 12 | |
|---|
| 13 | 13 | #include "internals.h" |
|---|
| 14 | 14 | |
|---|
| 15 | | -#define SPI_MEM_MAX_BUSWIDTH 4 |
|---|
| 15 | +#define SPI_MEM_MAX_BUSWIDTH 8 |
|---|
| 16 | 16 | |
|---|
| 17 | 17 | /** |
|---|
| 18 | 18 | * spi_controller_dma_map_mem_op_data() - DMA-map the buffer attached to a |
|---|
| .. | .. |
|---|
| 108 | 108 | return 0; |
|---|
| 109 | 109 | |
|---|
| 110 | 110 | case 2: |
|---|
| 111 | | - if ((tx && (mode & (SPI_TX_DUAL | SPI_TX_QUAD))) || |
|---|
| 112 | | - (!tx && (mode & (SPI_RX_DUAL | SPI_RX_QUAD)))) |
|---|
| 111 | + if ((tx && |
|---|
| 112 | + (mode & (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL))) || |
|---|
| 113 | + (!tx && |
|---|
| 114 | + (mode & (SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL)))) |
|---|
| 113 | 115 | return 0; |
|---|
| 114 | 116 | |
|---|
| 115 | 117 | break; |
|---|
| 116 | 118 | |
|---|
| 117 | 119 | case 4: |
|---|
| 118 | | - if ((tx && (mode & SPI_TX_QUAD)) || |
|---|
| 119 | | - (!tx && (mode & SPI_RX_QUAD))) |
|---|
| 120 | + if ((tx && (mode & (SPI_TX_QUAD | SPI_TX_OCTAL))) || |
|---|
| 121 | + (!tx && (mode & (SPI_RX_QUAD | SPI_RX_OCTAL)))) |
|---|
| 122 | + return 0; |
|---|
| 123 | + |
|---|
| 124 | + break; |
|---|
| 125 | + |
|---|
| 126 | + case 8: |
|---|
| 127 | + if ((tx && (mode & SPI_TX_OCTAL)) || |
|---|
| 128 | + (!tx && (mode & SPI_RX_OCTAL))) |
|---|
| 120 | 129 | return 0; |
|---|
| 121 | 130 | |
|---|
| 122 | 131 | break; |
|---|
| .. | .. |
|---|
| 128 | 137 | return -ENOTSUPP; |
|---|
| 129 | 138 | } |
|---|
| 130 | 139 | |
|---|
| 131 | | -static bool spi_mem_default_supports_op(struct spi_mem *mem, |
|---|
| 132 | | - const struct spi_mem_op *op) |
|---|
| 140 | +static bool spi_mem_check_buswidth(struct spi_mem *mem, |
|---|
| 141 | + const struct spi_mem_op *op) |
|---|
| 133 | 142 | { |
|---|
| 134 | 143 | if (spi_check_buswidth_req(mem, op->cmd.buswidth, true)) |
|---|
| 135 | 144 | return false; |
|---|
| .. | .. |
|---|
| 142 | 151 | spi_check_buswidth_req(mem, op->dummy.buswidth, true)) |
|---|
| 143 | 152 | return false; |
|---|
| 144 | 153 | |
|---|
| 145 | | - if (op->data.nbytes && |
|---|
| 154 | + if (op->data.dir != SPI_MEM_NO_DATA && |
|---|
| 146 | 155 | spi_check_buswidth_req(mem, op->data.buswidth, |
|---|
| 147 | 156 | op->data.dir == SPI_MEM_DATA_OUT)) |
|---|
| 148 | 157 | return false; |
|---|
| 149 | 158 | |
|---|
| 150 | 159 | return true; |
|---|
| 160 | +} |
|---|
| 161 | + |
|---|
| 162 | +bool spi_mem_dtr_supports_op(struct spi_mem *mem, |
|---|
| 163 | + const struct spi_mem_op *op) |
|---|
| 164 | +{ |
|---|
| 165 | + if (op->cmd.nbytes != 2) |
|---|
| 166 | + return false; |
|---|
| 167 | + |
|---|
| 168 | + return spi_mem_check_buswidth(mem, op); |
|---|
| 169 | +} |
|---|
| 170 | +EXPORT_SYMBOL_GPL(spi_mem_dtr_supports_op); |
|---|
| 171 | + |
|---|
| 172 | +bool spi_mem_default_supports_op(struct spi_mem *mem, |
|---|
| 173 | + const struct spi_mem_op *op) |
|---|
| 174 | +{ |
|---|
| 175 | + if (op->cmd.dtr || op->addr.dtr || op->dummy.dtr || op->data.dtr) |
|---|
| 176 | + return false; |
|---|
| 177 | + |
|---|
| 178 | + if (op->cmd.nbytes != 1) |
|---|
| 179 | + return false; |
|---|
| 180 | + |
|---|
| 181 | + return spi_mem_check_buswidth(mem, op); |
|---|
| 151 | 182 | } |
|---|
| 152 | 183 | EXPORT_SYMBOL_GPL(spi_mem_default_supports_op); |
|---|
| 153 | 184 | |
|---|
| .. | .. |
|---|
| 161 | 192 | |
|---|
| 162 | 193 | static int spi_mem_check_op(const struct spi_mem_op *op) |
|---|
| 163 | 194 | { |
|---|
| 164 | | - if (!op->cmd.buswidth) |
|---|
| 195 | + if (!op->cmd.buswidth || !op->cmd.nbytes) |
|---|
| 165 | 196 | return -EINVAL; |
|---|
| 166 | 197 | |
|---|
| 167 | 198 | if ((op->addr.nbytes && !op->addr.buswidth) || |
|---|
| .. | .. |
|---|
| 213 | 244 | } |
|---|
| 214 | 245 | EXPORT_SYMBOL_GPL(spi_mem_supports_op); |
|---|
| 215 | 246 | |
|---|
| 247 | +static int spi_mem_access_start(struct spi_mem *mem) |
|---|
| 248 | +{ |
|---|
| 249 | + struct spi_controller *ctlr = mem->spi->controller; |
|---|
| 250 | + |
|---|
| 251 | + /* |
|---|
| 252 | + * Flush the message queue before executing our SPI memory |
|---|
| 253 | + * operation to prevent preemption of regular SPI transfers. |
|---|
| 254 | + */ |
|---|
| 255 | + spi_flush_queue(ctlr); |
|---|
| 256 | + |
|---|
| 257 | + if (ctlr->auto_runtime_pm) { |
|---|
| 258 | + int ret; |
|---|
| 259 | + |
|---|
| 260 | + ret = pm_runtime_get_sync(ctlr->dev.parent); |
|---|
| 261 | + if (ret < 0) { |
|---|
| 262 | + pm_runtime_put_noidle(ctlr->dev.parent); |
|---|
| 263 | + dev_err(&ctlr->dev, "Failed to power device: %d\n", |
|---|
| 264 | + ret); |
|---|
| 265 | + return ret; |
|---|
| 266 | + } |
|---|
| 267 | + } |
|---|
| 268 | + |
|---|
| 269 | + mutex_lock(&ctlr->bus_lock_mutex); |
|---|
| 270 | + mutex_lock(&ctlr->io_mutex); |
|---|
| 271 | + |
|---|
| 272 | + return 0; |
|---|
| 273 | +} |
|---|
| 274 | + |
|---|
| 275 | +static void spi_mem_access_end(struct spi_mem *mem) |
|---|
| 276 | +{ |
|---|
| 277 | + struct spi_controller *ctlr = mem->spi->controller; |
|---|
| 278 | + |
|---|
| 279 | + mutex_unlock(&ctlr->io_mutex); |
|---|
| 280 | + mutex_unlock(&ctlr->bus_lock_mutex); |
|---|
| 281 | + |
|---|
| 282 | + if (ctlr->auto_runtime_pm) |
|---|
| 283 | + pm_runtime_put(ctlr->dev.parent); |
|---|
| 284 | +} |
|---|
| 285 | + |
|---|
| 216 | 286 | /** |
|---|
| 217 | 287 | * spi_mem_exec_op() - Execute a memory operation |
|---|
| 218 | 288 | * @mem: the SPI memory |
|---|
| .. | .. |
|---|
| 241 | 311 | if (!spi_mem_internal_supports_op(mem, op)) |
|---|
| 242 | 312 | return -ENOTSUPP; |
|---|
| 243 | 313 | |
|---|
| 244 | | - if (ctlr->mem_ops) { |
|---|
| 245 | | - /* |
|---|
| 246 | | - * Flush the message queue before executing our SPI memory |
|---|
| 247 | | - * operation to prevent preemption of regular SPI transfers. |
|---|
| 248 | | - */ |
|---|
| 249 | | - spi_flush_queue(ctlr); |
|---|
| 314 | + if (ctlr->mem_ops && !mem->spi->cs_gpiod) { |
|---|
| 315 | + ret = spi_mem_access_start(mem); |
|---|
| 316 | + if (ret) |
|---|
| 317 | + return ret; |
|---|
| 250 | 318 | |
|---|
| 251 | | - if (ctlr->auto_runtime_pm) { |
|---|
| 252 | | - ret = pm_runtime_get_sync(ctlr->dev.parent); |
|---|
| 253 | | - if (ret < 0) { |
|---|
| 254 | | - dev_err(&ctlr->dev, |
|---|
| 255 | | - "Failed to power device: %d\n", |
|---|
| 256 | | - ret); |
|---|
| 257 | | - return ret; |
|---|
| 258 | | - } |
|---|
| 259 | | - } |
|---|
| 260 | | - |
|---|
| 261 | | - mutex_lock(&ctlr->bus_lock_mutex); |
|---|
| 262 | | - mutex_lock(&ctlr->io_mutex); |
|---|
| 263 | 319 | ret = ctlr->mem_ops->exec_op(mem, op); |
|---|
| 264 | | - mutex_unlock(&ctlr->io_mutex); |
|---|
| 265 | | - mutex_unlock(&ctlr->bus_lock_mutex); |
|---|
| 266 | 320 | |
|---|
| 267 | | - if (ctlr->auto_runtime_pm) |
|---|
| 268 | | - pm_runtime_put(ctlr->dev.parent); |
|---|
| 321 | + spi_mem_access_end(mem); |
|---|
| 269 | 322 | |
|---|
| 270 | 323 | /* |
|---|
| 271 | 324 | * Some controllers only optimize specific paths (typically the |
|---|
| .. | .. |
|---|
| 276 | 329 | return ret; |
|---|
| 277 | 330 | } |
|---|
| 278 | 331 | |
|---|
| 279 | | - tmpbufsize = sizeof(op->cmd.opcode) + op->addr.nbytes + |
|---|
| 280 | | - op->dummy.nbytes; |
|---|
| 332 | + tmpbufsize = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes; |
|---|
| 281 | 333 | |
|---|
| 282 | 334 | /* |
|---|
| 283 | 335 | * Allocate a buffer to transmit the CMD, ADDR cycles with kmalloc() so |
|---|
| .. | .. |
|---|
| 292 | 344 | |
|---|
| 293 | 345 | tmpbuf[0] = op->cmd.opcode; |
|---|
| 294 | 346 | xfers[xferpos].tx_buf = tmpbuf; |
|---|
| 295 | | - xfers[xferpos].len = sizeof(op->cmd.opcode); |
|---|
| 347 | + xfers[xferpos].len = op->cmd.nbytes; |
|---|
| 296 | 348 | xfers[xferpos].tx_nbits = op->cmd.buswidth; |
|---|
| 297 | 349 | spi_message_add_tail(&xfers[xferpos], &msg); |
|---|
| 298 | 350 | xferpos++; |
|---|
| .. | .. |
|---|
| 390 | 442 | struct spi_controller *ctlr = mem->spi->controller; |
|---|
| 391 | 443 | size_t len; |
|---|
| 392 | 444 | |
|---|
| 393 | | - len = sizeof(op->cmd.opcode) + op->addr.nbytes + op->dummy.nbytes; |
|---|
| 394 | | - |
|---|
| 395 | 445 | if (ctlr->mem_ops && ctlr->mem_ops->adjust_op_size) |
|---|
| 396 | 446 | return ctlr->mem_ops->adjust_op_size(mem, op); |
|---|
| 397 | 447 | |
|---|
| 398 | 448 | if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) { |
|---|
| 449 | + len = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes; |
|---|
| 450 | + |
|---|
| 399 | 451 | if (len > spi_max_transfer_size(mem->spi)) |
|---|
| 400 | 452 | return -EINVAL; |
|---|
| 401 | 453 | |
|---|
| .. | .. |
|---|
| 411 | 463 | } |
|---|
| 412 | 464 | EXPORT_SYMBOL_GPL(spi_mem_adjust_op_size); |
|---|
| 413 | 465 | |
|---|
| 466 | +static ssize_t spi_mem_no_dirmap_read(struct spi_mem_dirmap_desc *desc, |
|---|
| 467 | + u64 offs, size_t len, void *buf) |
|---|
| 468 | +{ |
|---|
| 469 | + struct spi_mem_op op = desc->info.op_tmpl; |
|---|
| 470 | + int ret; |
|---|
| 471 | + |
|---|
| 472 | + op.addr.val = desc->info.offset + offs; |
|---|
| 473 | + op.data.buf.in = buf; |
|---|
| 474 | + op.data.nbytes = len; |
|---|
| 475 | + ret = spi_mem_adjust_op_size(desc->mem, &op); |
|---|
| 476 | + if (ret) |
|---|
| 477 | + return ret; |
|---|
| 478 | + |
|---|
| 479 | + ret = spi_mem_exec_op(desc->mem, &op); |
|---|
| 480 | + if (ret) |
|---|
| 481 | + return ret; |
|---|
| 482 | + |
|---|
| 483 | + return op.data.nbytes; |
|---|
| 484 | +} |
|---|
| 485 | + |
|---|
| 486 | +static ssize_t spi_mem_no_dirmap_write(struct spi_mem_dirmap_desc *desc, |
|---|
| 487 | + u64 offs, size_t len, const void *buf) |
|---|
| 488 | +{ |
|---|
| 489 | + struct spi_mem_op op = desc->info.op_tmpl; |
|---|
| 490 | + int ret; |
|---|
| 491 | + |
|---|
| 492 | + op.addr.val = desc->info.offset + offs; |
|---|
| 493 | + op.data.buf.out = buf; |
|---|
| 494 | + op.data.nbytes = len; |
|---|
| 495 | + ret = spi_mem_adjust_op_size(desc->mem, &op); |
|---|
| 496 | + if (ret) |
|---|
| 497 | + return ret; |
|---|
| 498 | + |
|---|
| 499 | + ret = spi_mem_exec_op(desc->mem, &op); |
|---|
| 500 | + if (ret) |
|---|
| 501 | + return ret; |
|---|
| 502 | + |
|---|
| 503 | + return op.data.nbytes; |
|---|
| 504 | +} |
|---|
| 505 | + |
|---|
| 506 | +/** |
|---|
| 507 | + * spi_mem_dirmap_create() - Create a direct mapping descriptor |
|---|
| 508 | + * @mem: SPI mem device this direct mapping should be created for |
|---|
| 509 | + * @info: direct mapping information |
|---|
| 510 | + * |
|---|
| 511 | + * This function is creating a direct mapping descriptor which can then be used |
|---|
| 512 | + * to access the memory using spi_mem_dirmap_read() or spi_mem_dirmap_write(). |
|---|
| 513 | + * If the SPI controller driver does not support direct mapping, this function |
|---|
| 514 | + * falls back to an implementation using spi_mem_exec_op(), so that the caller |
|---|
| 515 | + * doesn't have to bother implementing a fallback on his own. |
|---|
| 516 | + * |
|---|
| 517 | + * Return: a valid pointer in case of success, and ERR_PTR() otherwise. |
|---|
| 518 | + */ |
|---|
| 519 | +struct spi_mem_dirmap_desc * |
|---|
| 520 | +spi_mem_dirmap_create(struct spi_mem *mem, |
|---|
| 521 | + const struct spi_mem_dirmap_info *info) |
|---|
| 522 | +{ |
|---|
| 523 | + struct spi_controller *ctlr = mem->spi->controller; |
|---|
| 524 | + struct spi_mem_dirmap_desc *desc; |
|---|
| 525 | + int ret = -ENOTSUPP; |
|---|
| 526 | + |
|---|
| 527 | + /* Make sure the number of address cycles is between 1 and 8 bytes. */ |
|---|
| 528 | + if (!info->op_tmpl.addr.nbytes || info->op_tmpl.addr.nbytes > 8) |
|---|
| 529 | + return ERR_PTR(-EINVAL); |
|---|
| 530 | + |
|---|
| 531 | + /* data.dir should either be SPI_MEM_DATA_IN or SPI_MEM_DATA_OUT. */ |
|---|
| 532 | + if (info->op_tmpl.data.dir == SPI_MEM_NO_DATA) |
|---|
| 533 | + return ERR_PTR(-EINVAL); |
|---|
| 534 | + |
|---|
| 535 | + desc = kzalloc(sizeof(*desc), GFP_KERNEL); |
|---|
| 536 | + if (!desc) |
|---|
| 537 | + return ERR_PTR(-ENOMEM); |
|---|
| 538 | + |
|---|
| 539 | + desc->mem = mem; |
|---|
| 540 | + desc->info = *info; |
|---|
| 541 | + if (ctlr->mem_ops && ctlr->mem_ops->dirmap_create) |
|---|
| 542 | + ret = ctlr->mem_ops->dirmap_create(desc); |
|---|
| 543 | + |
|---|
| 544 | + if (ret) { |
|---|
| 545 | + desc->nodirmap = true; |
|---|
| 546 | + if (!spi_mem_supports_op(desc->mem, &desc->info.op_tmpl)) |
|---|
| 547 | + ret = -ENOTSUPP; |
|---|
| 548 | + else |
|---|
| 549 | + ret = 0; |
|---|
| 550 | + } |
|---|
| 551 | + |
|---|
| 552 | + if (ret) { |
|---|
| 553 | + kfree(desc); |
|---|
| 554 | + return ERR_PTR(ret); |
|---|
| 555 | + } |
|---|
| 556 | + |
|---|
| 557 | + return desc; |
|---|
| 558 | +} |
|---|
| 559 | +EXPORT_SYMBOL_GPL(spi_mem_dirmap_create); |
|---|
| 560 | + |
|---|
| 561 | +/** |
|---|
| 562 | + * spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor |
|---|
| 563 | + * @desc: the direct mapping descriptor to destroy |
|---|
| 564 | + * |
|---|
| 565 | + * This function destroys a direct mapping descriptor previously created by |
|---|
| 566 | + * spi_mem_dirmap_create(). |
|---|
| 567 | + */ |
|---|
| 568 | +void spi_mem_dirmap_destroy(struct spi_mem_dirmap_desc *desc) |
|---|
| 569 | +{ |
|---|
| 570 | + struct spi_controller *ctlr = desc->mem->spi->controller; |
|---|
| 571 | + |
|---|
| 572 | + if (!desc->nodirmap && ctlr->mem_ops && ctlr->mem_ops->dirmap_destroy) |
|---|
| 573 | + ctlr->mem_ops->dirmap_destroy(desc); |
|---|
| 574 | + |
|---|
| 575 | + kfree(desc); |
|---|
| 576 | +} |
|---|
| 577 | +EXPORT_SYMBOL_GPL(spi_mem_dirmap_destroy); |
|---|
| 578 | + |
|---|
| 579 | +static void devm_spi_mem_dirmap_release(struct device *dev, void *res) |
|---|
| 580 | +{ |
|---|
| 581 | + struct spi_mem_dirmap_desc *desc = *(struct spi_mem_dirmap_desc **)res; |
|---|
| 582 | + |
|---|
| 583 | + spi_mem_dirmap_destroy(desc); |
|---|
| 584 | +} |
|---|
| 585 | + |
|---|
| 586 | +/** |
|---|
| 587 | + * devm_spi_mem_dirmap_create() - Create a direct mapping descriptor and attach |
|---|
| 588 | + * it to a device |
|---|
| 589 | + * @dev: device the dirmap desc will be attached to |
|---|
| 590 | + * @mem: SPI mem device this direct mapping should be created for |
|---|
| 591 | + * @info: direct mapping information |
|---|
| 592 | + * |
|---|
| 593 | + * devm_ variant of the spi_mem_dirmap_create() function. See |
|---|
| 594 | + * spi_mem_dirmap_create() for more details. |
|---|
| 595 | + * |
|---|
| 596 | + * Return: a valid pointer in case of success, and ERR_PTR() otherwise. |
|---|
| 597 | + */ |
|---|
| 598 | +struct spi_mem_dirmap_desc * |
|---|
| 599 | +devm_spi_mem_dirmap_create(struct device *dev, struct spi_mem *mem, |
|---|
| 600 | + const struct spi_mem_dirmap_info *info) |
|---|
| 601 | +{ |
|---|
| 602 | + struct spi_mem_dirmap_desc **ptr, *desc; |
|---|
| 603 | + |
|---|
| 604 | + ptr = devres_alloc(devm_spi_mem_dirmap_release, sizeof(*ptr), |
|---|
| 605 | + GFP_KERNEL); |
|---|
| 606 | + if (!ptr) |
|---|
| 607 | + return ERR_PTR(-ENOMEM); |
|---|
| 608 | + |
|---|
| 609 | + desc = spi_mem_dirmap_create(mem, info); |
|---|
| 610 | + if (IS_ERR(desc)) { |
|---|
| 611 | + devres_free(ptr); |
|---|
| 612 | + } else { |
|---|
| 613 | + *ptr = desc; |
|---|
| 614 | + devres_add(dev, ptr); |
|---|
| 615 | + } |
|---|
| 616 | + |
|---|
| 617 | + return desc; |
|---|
| 618 | +} |
|---|
| 619 | +EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_create); |
|---|
| 620 | + |
|---|
| 621 | +static int devm_spi_mem_dirmap_match(struct device *dev, void *res, void *data) |
|---|
| 622 | +{ |
|---|
| 623 | + struct spi_mem_dirmap_desc **ptr = res; |
|---|
| 624 | + |
|---|
| 625 | + if (WARN_ON(!ptr || !*ptr)) |
|---|
| 626 | + return 0; |
|---|
| 627 | + |
|---|
| 628 | + return *ptr == data; |
|---|
| 629 | +} |
|---|
| 630 | + |
|---|
| 631 | +/** |
|---|
| 632 | + * devm_spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor attached |
|---|
| 633 | + * to a device |
|---|
| 634 | + * @dev: device the dirmap desc is attached to |
|---|
| 635 | + * @desc: the direct mapping descriptor to destroy |
|---|
| 636 | + * |
|---|
| 637 | + * devm_ variant of the spi_mem_dirmap_destroy() function. See |
|---|
| 638 | + * spi_mem_dirmap_destroy() for more details. |
|---|
| 639 | + */ |
|---|
| 640 | +void devm_spi_mem_dirmap_destroy(struct device *dev, |
|---|
| 641 | + struct spi_mem_dirmap_desc *desc) |
|---|
| 642 | +{ |
|---|
| 643 | + devres_release(dev, devm_spi_mem_dirmap_release, |
|---|
| 644 | + devm_spi_mem_dirmap_match, desc); |
|---|
| 645 | +} |
|---|
| 646 | +EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_destroy); |
|---|
| 647 | + |
|---|
| 648 | +/** |
|---|
| 649 | + * spi_mem_dirmap_read() - Read data through a direct mapping |
|---|
| 650 | + * @desc: direct mapping descriptor |
|---|
| 651 | + * @offs: offset to start reading from. Note that this is not an absolute |
|---|
| 652 | + * offset, but the offset within the direct mapping which already has |
|---|
| 653 | + * its own offset |
|---|
| 654 | + * @len: length in bytes |
|---|
| 655 | + * @buf: destination buffer. This buffer must be DMA-able |
|---|
| 656 | + * |
|---|
| 657 | + * This function reads data from a memory device using a direct mapping |
|---|
| 658 | + * previously instantiated with spi_mem_dirmap_create(). |
|---|
| 659 | + * |
|---|
| 660 | + * Return: the amount of data read from the memory device or a negative error |
|---|
| 661 | + * code. Note that the returned size might be smaller than @len, and the caller |
|---|
| 662 | + * is responsible for calling spi_mem_dirmap_read() again when that happens. |
|---|
| 663 | + */ |
|---|
| 664 | +ssize_t spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc, |
|---|
| 665 | + u64 offs, size_t len, void *buf) |
|---|
| 666 | +{ |
|---|
| 667 | + struct spi_controller *ctlr = desc->mem->spi->controller; |
|---|
| 668 | + ssize_t ret; |
|---|
| 669 | + |
|---|
| 670 | + if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_IN) |
|---|
| 671 | + return -EINVAL; |
|---|
| 672 | + |
|---|
| 673 | + if (!len) |
|---|
| 674 | + return 0; |
|---|
| 675 | + |
|---|
| 676 | + if (desc->nodirmap) { |
|---|
| 677 | + ret = spi_mem_no_dirmap_read(desc, offs, len, buf); |
|---|
| 678 | + } else if (ctlr->mem_ops && ctlr->mem_ops->dirmap_read) { |
|---|
| 679 | + ret = spi_mem_access_start(desc->mem); |
|---|
| 680 | + if (ret) |
|---|
| 681 | + return ret; |
|---|
| 682 | + |
|---|
| 683 | + ret = ctlr->mem_ops->dirmap_read(desc, offs, len, buf); |
|---|
| 684 | + |
|---|
| 685 | + spi_mem_access_end(desc->mem); |
|---|
| 686 | + } else { |
|---|
| 687 | + ret = -ENOTSUPP; |
|---|
| 688 | + } |
|---|
| 689 | + |
|---|
| 690 | + return ret; |
|---|
| 691 | +} |
|---|
| 692 | +EXPORT_SYMBOL_GPL(spi_mem_dirmap_read); |
|---|
| 693 | + |
|---|
| 694 | +/** |
|---|
| 695 | + * spi_mem_dirmap_write() - Write data through a direct mapping |
|---|
| 696 | + * @desc: direct mapping descriptor |
|---|
| 697 | + * @offs: offset to start writing from. Note that this is not an absolute |
|---|
| 698 | + * offset, but the offset within the direct mapping which already has |
|---|
| 699 | + * its own offset |
|---|
| 700 | + * @len: length in bytes |
|---|
| 701 | + * @buf: source buffer. This buffer must be DMA-able |
|---|
| 702 | + * |
|---|
| 703 | + * This function writes data to a memory device using a direct mapping |
|---|
| 704 | + * previously instantiated with spi_mem_dirmap_create(). |
|---|
| 705 | + * |
|---|
| 706 | + * Return: the amount of data written to the memory device or a negative error |
|---|
| 707 | + * code. Note that the returned size might be smaller than @len, and the caller |
|---|
| 708 | + * is responsible for calling spi_mem_dirmap_write() again when that happens. |
|---|
| 709 | + */ |
|---|
| 710 | +ssize_t spi_mem_dirmap_write(struct spi_mem_dirmap_desc *desc, |
|---|
| 711 | + u64 offs, size_t len, const void *buf) |
|---|
| 712 | +{ |
|---|
| 713 | + struct spi_controller *ctlr = desc->mem->spi->controller; |
|---|
| 714 | + ssize_t ret; |
|---|
| 715 | + |
|---|
| 716 | + if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_OUT) |
|---|
| 717 | + return -EINVAL; |
|---|
| 718 | + |
|---|
| 719 | + if (!len) |
|---|
| 720 | + return 0; |
|---|
| 721 | + |
|---|
| 722 | + if (desc->nodirmap) { |
|---|
| 723 | + ret = spi_mem_no_dirmap_write(desc, offs, len, buf); |
|---|
| 724 | + } else if (ctlr->mem_ops && ctlr->mem_ops->dirmap_write) { |
|---|
| 725 | + ret = spi_mem_access_start(desc->mem); |
|---|
| 726 | + if (ret) |
|---|
| 727 | + return ret; |
|---|
| 728 | + |
|---|
| 729 | + ret = ctlr->mem_ops->dirmap_write(desc, offs, len, buf); |
|---|
| 730 | + |
|---|
| 731 | + spi_mem_access_end(desc->mem); |
|---|
| 732 | + } else { |
|---|
| 733 | + ret = -ENOTSUPP; |
|---|
| 734 | + } |
|---|
| 735 | + |
|---|
| 736 | + return ret; |
|---|
| 737 | +} |
|---|
| 738 | +EXPORT_SYMBOL_GPL(spi_mem_dirmap_write); |
|---|
| 739 | + |
|---|
| 414 | 740 | static inline struct spi_mem_driver *to_spi_mem_drv(struct device_driver *drv) |
|---|
| 415 | 741 | { |
|---|
| 416 | 742 | return container_of(drv, struct spi_mem_driver, spidrv.driver); |
|---|