hc
2024-11-01 2f529f9b558ca1c1bd74be7437a84e4711743404
kernel/drivers/spi/spi.c
....@@ -2729,6 +2729,9 @@
27292729 spin_lock_init(&ctlr->bus_lock_spinlock);
27302730 mutex_init(&ctlr->bus_lock_mutex);
27312731 mutex_init(&ctlr->io_mutex);
2732
+#ifdef CONFIG_SPI_OOB
2733
+ sema_init(&ctlr->bus_oob_lock_sem, 1);
2734
+#endif
27322735 ctlr->bus_lock_flag = 0;
27332736 init_completion(&ctlr->xfer_completion);
27342737 if (!ctlr->max_dma_len)
....@@ -3804,6 +3807,22 @@
38043807 * inline functions.
38053808 */
38063809
3810
+static void get_spi_bus(struct spi_controller *ctlr)
3811
+{
3812
+ mutex_lock(&ctlr->bus_lock_mutex);
3813
+#ifdef CONFIG_SPI_OOB
3814
+ down(&ctlr->bus_oob_lock_sem);
3815
+#endif
3816
+}
3817
+
3818
+static void put_spi_bus(struct spi_controller *ctlr)
3819
+{
3820
+#ifdef CONFIG_SPI_OOB
3821
+ up(&ctlr->bus_oob_lock_sem);
3822
+#endif
3823
+ mutex_unlock(&ctlr->bus_lock_mutex);
3824
+}
3825
+
38073826 static void spi_complete(void *arg)
38083827 {
38093828 complete(arg);
....@@ -3888,9 +3907,9 @@
38883907 {
38893908 int ret;
38903909
3891
- mutex_lock(&spi->controller->bus_lock_mutex);
3910
+ get_spi_bus(spi->controller);
38923911 ret = __spi_sync(spi, message);
3893
- mutex_unlock(&spi->controller->bus_lock_mutex);
3912
+ put_spi_bus(spi->controller);
38943913
38953914 return ret;
38963915 }
....@@ -3937,7 +3956,7 @@
39373956 {
39383957 unsigned long flags;
39393958
3940
- mutex_lock(&ctlr->bus_lock_mutex);
3959
+ get_spi_bus(ctlr);
39413960
39423961 spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
39433962 ctlr->bus_lock_flag = 1;
....@@ -3966,7 +3985,7 @@
39663985 {
39673986 ctlr->bus_lock_flag = 0;
39683987
3969
- mutex_unlock(&ctlr->bus_lock_mutex);
3988
+ put_spi_bus(ctlr);
39703989
39713990 return 0;
39723991 }
....@@ -4051,6 +4070,274 @@
40514070 }
40524071 EXPORT_SYMBOL_GPL(spi_write_then_read);
40534072
4073
+#ifdef CONFIG_SPI_OOB
4074
+
4075
+static int bus_lock_oob(struct spi_controller *ctlr)
4076
+{
4077
+ unsigned long flags;
4078
+ int ret = -EBUSY;
4079
+
4080
+ mutex_lock(&ctlr->bus_lock_mutex);
4081
+
4082
+ spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
4083
+
4084
+ if (!ctlr->bus_lock_flag && !down_trylock(&ctlr->bus_oob_lock_sem)) {
4085
+ ctlr->bus_lock_flag = 1;
4086
+ ret = 0;
4087
+ }
4088
+
4089
+ spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
4090
+
4091
+ mutex_unlock(&ctlr->bus_lock_mutex);
4092
+
4093
+ return ret;
4094
+}
4095
+
4096
+static int bus_unlock_oob(struct spi_controller *ctlr)
4097
+{
4098
+ ctlr->bus_lock_flag = 0;
4099
+ up(&ctlr->bus_oob_lock_sem);
4100
+
4101
+ return 0;
4102
+}
4103
+
4104
+static int prepare_oob_dma(struct spi_controller *ctlr,
4105
+ struct spi_oob_transfer *xfer)
4106
+{
4107
+ struct dma_async_tx_descriptor *desc;
4108
+ size_t len = xfer->setup.frame_len;
4109
+ dma_cookie_t cookie;
4110
+ dma_addr_t addr;
4111
+ int ret;
4112
+
4113
+ /* TX to second half of I/O buffer. */
4114
+ addr = xfer->dma_addr + xfer->aligned_frame_len;
4115
+ desc = dmaengine_prep_slave_single(ctlr->dma_tx, addr, len,
4116
+ DMA_MEM_TO_DEV,
4117
+ DMA_OOB_INTERRUPT|DMA_OOB_PULSE);
4118
+ if (!desc)
4119
+ return -EIO;
4120
+
4121
+ xfer->txd = desc;
4122
+ cookie = dmaengine_submit(desc);
4123
+ ret = dma_submit_error(cookie);
4124
+ if (ret)
4125
+ return ret;
4126
+
4127
+ dma_async_issue_pending(ctlr->dma_tx);
4128
+
4129
+ /* RX to first half of I/O buffer. */
4130
+ addr = xfer->dma_addr;
4131
+ desc = dmaengine_prep_slave_single(ctlr->dma_rx, addr, len,
4132
+ DMA_DEV_TO_MEM,
4133
+ DMA_OOB_INTERRUPT|DMA_OOB_PULSE);
4134
+ if (!desc) {
4135
+ ret = -EIO;
4136
+ goto fail_rx;
4137
+ }
4138
+
4139
+ desc->callback = xfer->setup.xfer_done;
4140
+ desc->callback_param = xfer;
4141
+
4142
+ xfer->rxd = desc;
4143
+ cookie = dmaengine_submit(desc);
4144
+ ret = dma_submit_error(cookie);
4145
+ if (ret)
4146
+ goto fail_rx;
4147
+
4148
+ dma_async_issue_pending(ctlr->dma_rx);
4149
+
4150
+ return 0;
4151
+
4152
+fail_rx:
4153
+ dmaengine_terminate_sync(ctlr->dma_tx);
4154
+
4155
+ return ret;
4156
+}
4157
+
4158
+static void unprepare_oob_dma(struct spi_controller *ctlr)
4159
+{
4160
+ dmaengine_terminate_sync(ctlr->dma_rx);
4161
+ dmaengine_terminate_sync(ctlr->dma_tx);
4162
+}
4163
+
4164
+/*
4165
+ * A simpler version of __spi_validate() for oob transfers.
4166
+ */
4167
+static int validate_oob_xfer(struct spi_device *spi,
4168
+ struct spi_oob_transfer *xfer)
4169
+{
4170
+ struct spi_controller *ctlr = spi->controller;
4171
+ struct spi_oob_setup *p = &xfer->setup;
4172
+ int w_size;
4173
+
4174
+ if (p->frame_len == 0)
4175
+ return -EINVAL;
4176
+
4177
+ if (!p->bits_per_word)
4178
+ p->bits_per_word = spi->bits_per_word;
4179
+
4180
+ if (!p->speed_hz)
4181
+ p->speed_hz = spi->max_speed_hz;
4182
+
4183
+ if (ctlr->max_speed_hz && p->speed_hz > ctlr->max_speed_hz)
4184
+ p->speed_hz = ctlr->max_speed_hz;
4185
+
4186
+ if (__spi_validate_bits_per_word(ctlr, p->bits_per_word))
4187
+ return -EINVAL;
4188
+
4189
+ if (p->bits_per_word <= 8)
4190
+ w_size = 1;
4191
+ else if (p->bits_per_word <= 16)
4192
+ w_size = 2;
4193
+ else
4194
+ w_size = 4;
4195
+
4196
+ if (p->frame_len % w_size)
4197
+ return -EINVAL;
4198
+
4199
+ if (p->speed_hz && ctlr->min_speed_hz &&
4200
+ p->speed_hz < ctlr->min_speed_hz)
4201
+ return -EINVAL;
4202
+
4203
+ return 0;
4204
+}
4205
+
4206
+int spi_prepare_oob_transfer(struct spi_device *spi,
4207
+ struct spi_oob_transfer *xfer)
4208
+{
4209
+ struct spi_controller *ctlr;
4210
+ dma_addr_t dma_addr;
4211
+ size_t alen, iolen;
4212
+ void *iobuf;
4213
+ int ret;
4214
+
4215
+ /* Controller must support oob transactions. */
4216
+ ctlr = spi->controller;
4217
+ if (!ctlr->prepare_oob_transfer)
4218
+ return -ENOTSUPP;
4219
+
4220
+ /* Out-of-band transfers require DMA support. */
4221
+ if (!ctlr->can_dma)
4222
+ return -ENODEV;
4223
+
4224
+ ret = validate_oob_xfer(spi, xfer);
4225
+ if (ret)
4226
+ return ret;
4227
+
4228
+ alen = L1_CACHE_ALIGN(xfer->setup.frame_len);
4229
+ /*
4230
+ * Allocate a single coherent I/O buffer which is twice as
4231
+ * large as the user specified transfer length, TX data goes
4232
+ * to the upper half, RX data to the lower half.
4233
+ */
4234
+ iolen = alen * 2;
4235
+ iobuf = dma_alloc_coherent(ctlr->dev.parent, iolen,
4236
+ &dma_addr, GFP_KERNEL);
4237
+ if (iobuf == NULL)
4238
+ return -ENOMEM;
4239
+
4240
+ xfer->spi = spi;
4241
+ xfer->dma_addr = dma_addr;
4242
+ xfer->io_buffer = iobuf;
4243
+ xfer->aligned_frame_len = alen;
4244
+ xfer->effective_speed_hz = 0;
4245
+
4246
+ ret = prepare_oob_dma(ctlr, xfer);
4247
+ if (ret)
4248
+ goto fail_prep_dma;
4249
+
4250
+ ret = bus_lock_oob(ctlr);
4251
+ if (ret)
4252
+ goto fail_bus_lock;
4253
+
4254
+ ret = ctlr->prepare_oob_transfer(ctlr, xfer);
4255
+ if (ret)
4256
+ goto fail_prep_xfer;
4257
+
4258
+ return 0;
4259
+
4260
+fail_prep_xfer:
4261
+ bus_unlock_oob(ctlr);
4262
+fail_bus_lock:
4263
+ unprepare_oob_dma(ctlr);
4264
+fail_prep_dma:
4265
+ dma_free_coherent(ctlr->dev.parent, iolen, iobuf, dma_addr);
4266
+
4267
+ return ret;
4268
+}
4269
+EXPORT_SYMBOL_GPL(spi_prepare_oob_transfer);
4270
+
4271
+void spi_start_oob_transfer(struct spi_oob_transfer *xfer)
4272
+{
4273
+ struct spi_device *spi = xfer->spi;
4274
+ struct spi_controller *ctlr = spi->controller;
4275
+
4276
+ ctlr->start_oob_transfer(ctlr, xfer);
4277
+}
4278
+EXPORT_SYMBOL_GPL(spi_start_oob_transfer);
4279
+
4280
+int spi_pulse_oob_transfer(struct spi_oob_transfer *xfer) /* oob stage */
4281
+{
4282
+ struct spi_device *spi = xfer->spi;
4283
+ struct spi_controller *ctlr = spi->controller;
4284
+ int ret;
4285
+
4286
+ if (ctlr->pulse_oob_transfer)
4287
+ ctlr->pulse_oob_transfer(ctlr, xfer);
4288
+
4289
+ ret = dma_pulse_oob(ctlr->dma_rx);
4290
+ if (likely(!ret))
4291
+ ret = dma_pulse_oob(ctlr->dma_tx);
4292
+
4293
+ return ret;
4294
+}
4295
+EXPORT_SYMBOL_GPL(spi_pulse_oob_transfer);
4296
+
4297
+void spi_terminate_oob_transfer(struct spi_oob_transfer *xfer)
4298
+{
4299
+ struct spi_device *spi = xfer->spi;
4300
+ struct spi_controller *ctlr = spi->controller;
4301
+
4302
+ if (ctlr->terminate_oob_transfer)
4303
+ ctlr->terminate_oob_transfer(ctlr, xfer);
4304
+
4305
+ unprepare_oob_dma(ctlr);
4306
+ bus_unlock_oob(ctlr);
4307
+ dma_free_coherent(ctlr->dev.parent, xfer->aligned_frame_len * 2,
4308
+ xfer->io_buffer, xfer->dma_addr);
4309
+}
4310
+EXPORT_SYMBOL_GPL(spi_terminate_oob_transfer);
4311
+
4312
+int spi_mmap_oob_transfer(struct vm_area_struct *vma,
4313
+ struct spi_oob_transfer *xfer)
4314
+{
4315
+ struct spi_device *spi = xfer->spi;
4316
+ struct spi_controller *ctlr = spi->controller;
4317
+ size_t len;
4318
+ int ret;
4319
+
4320
+ /*
4321
+ * We may have an IOMMU, rely on dma_mmap_coherent() for
4322
+ * dealing with the nitty-gritty details of mapping a coherent
4323
+ * buffer.
4324
+ */
4325
+ len = vma->vm_end - vma->vm_start;
4326
+ if (spi_get_oob_iolen(xfer) <= len)
4327
+ ret = dma_mmap_coherent(ctlr->dev.parent,
4328
+ vma,
4329
+ xfer->io_buffer,
4330
+ xfer->dma_addr,
4331
+ len);
4332
+ else
4333
+ ret = -EINVAL;
4334
+
4335
+ return ret;
4336
+}
4337
+EXPORT_SYMBOL_GPL(spi_mmap_oob_transfer);
4338
+
4339
+#endif /* SPI_OOB */
4340
+
40544341 /*-------------------------------------------------------------------------*/
40554342
40564343 #if IS_ENABLED(CONFIG_OF)