hc
2024-12-19 9370bb92b2d16684ee45cf24e879c93c509162da
kernel/drivers/misc/mei/client.c
....@@ -1,17 +1,7 @@
1
+// SPDX-License-Identifier: GPL-2.0
12 /*
2
- *
3
+ * Copyright (c) 2003-2020, Intel Corporation. All rights reserved.
34 * Intel Management Engine Interface (Intel MEI) Linux driver
4
- * Copyright (c) 2003-2012, Intel Corporation.
5
- *
6
- * This program is free software; you can redistribute it and/or modify it
7
- * under the terms and conditions of the GNU General Public License,
8
- * version 2, as published by the Free Software Foundation.
9
- *
10
- * This program is distributed in the hope it will be useful, but WITHOUT
11
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13
- * more details.
14
- *
155 */
166
177 #include <linux/sched/signal.h>
....@@ -320,23 +310,6 @@
320310 }
321311
322312 /**
323
- * mei_cl_cmp_id - tells if the clients are the same
324
- *
325
- * @cl1: host client 1
326
- * @cl2: host client 2
327
- *
328
- * Return: true - if the clients has same host and me ids
329
- * false - otherwise
330
- */
331
-static inline bool mei_cl_cmp_id(const struct mei_cl *cl1,
332
- const struct mei_cl *cl2)
333
-{
334
- return cl1 && cl2 &&
335
- (cl1->host_client_id == cl2->host_client_id) &&
336
- (mei_cl_me_id(cl1) == mei_cl_me_id(cl2));
337
-}
338
-
339
-/**
340313 * mei_io_cb_free - free mei_cb_private related memory
341314 *
342315 * @cb: mei callback struct
....@@ -382,6 +355,27 @@
382355 }
383356
384357 /**
358
+ * mei_cl_set_read_by_fp - set pending_read flag to vtag struct for given fp
359
+ *
360
+ * Locking: called under "dev->device_lock" lock
361
+ *
362
+ * @cl: mei client
363
+ * @fp: pointer to file structure
364
+ */
365
+static void mei_cl_set_read_by_fp(const struct mei_cl *cl,
366
+ const struct file *fp)
367
+{
368
+ struct mei_cl_vtag *cl_vtag;
369
+
370
+ list_for_each_entry(cl_vtag, &cl->vtag_map, list) {
371
+ if (cl_vtag->fp == fp) {
372
+ cl_vtag->pending_read = true;
373
+ return;
374
+ }
375
+ }
376
+}
377
+
378
+/**
385379 * mei_io_cb_init - allocate and initialize io callback
386380 *
387381 * @cl: mei client
....@@ -396,7 +390,7 @@
396390 {
397391 struct mei_cl_cb *cb;
398392
399
- cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
393
+ cb = kzalloc(sizeof(*cb), GFP_KERNEL);
400394 if (!cb)
401395 return NULL;
402396
....@@ -405,6 +399,8 @@
405399 cb->cl = cl;
406400 cb->buf_idx = 0;
407401 cb->fop_type = type;
402
+ cb->vtag = 0;
403
+
408404 return cb;
409405 }
410406
....@@ -420,8 +416,11 @@
420416 struct mei_cl_cb *cb, *next;
421417
422418 list_for_each_entry_safe(cb, next, head, list) {
423
- if (mei_cl_cmp_id(cl, cb->cl))
419
+ if (cl == cb->cl) {
424420 list_del_init(&cb->list);
421
+ if (cb->fop_type == MEI_FOP_READ)
422
+ mei_io_cb_free(cb);
423
+ }
425424 }
426425 }
427426
....@@ -430,14 +429,16 @@
430429 *
431430 * @head: An instance of our list structure
432431 * @cl: host client
432
+ * @fp: file pointer (matching cb file object), may be NULL
433433 */
434434 static void mei_io_tx_list_free_cl(struct list_head *head,
435
- const struct mei_cl *cl)
435
+ const struct mei_cl *cl,
436
+ const struct file *fp)
436437 {
437438 struct mei_cl_cb *cb, *next;
438439
439440 list_for_each_entry_safe(cb, next, head, list) {
440
- if (mei_cl_cmp_id(cl, cb->cl))
441
+ if (cl == cb->cl && (!fp || fp == cb->fp))
441442 mei_tx_cb_dequeue(cb);
442443 }
443444 }
....@@ -455,6 +456,19 @@
455456 list_for_each_entry_safe(cb, next, head, list)
456457 if (!fp || fp == cb->fp)
457458 mei_io_cb_free(cb);
459
+}
460
+
461
+/**
462
+ * mei_cl_free_pending - free pending cb
463
+ *
464
+ * @cl: host client
465
+ */
466
+static void mei_cl_free_pending(struct mei_cl *cl)
467
+{
468
+ struct mei_cl_cb *cb;
469
+
470
+ cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list);
471
+ mei_io_cb_free(cb);
458472 }
459473
460474 /**
....@@ -480,7 +494,7 @@
480494 if (length == 0)
481495 return cb;
482496
483
- cb->buf.data = kmalloc(length, GFP_KERNEL);
497
+ cb->buf.data = kmalloc(roundup(length, MEI_SLOT_SIZE), GFP_KERNEL);
484498 if (!cb->buf.data) {
485499 mei_io_cb_free(cb);
486500 return NULL;
....@@ -529,15 +543,19 @@
529543 *
530544 * Return: cb on success, NULL if cb is not found
531545 */
532
-struct mei_cl_cb *mei_cl_read_cb(const struct mei_cl *cl, const struct file *fp)
546
+struct mei_cl_cb *mei_cl_read_cb(struct mei_cl *cl, const struct file *fp)
533547 {
534548 struct mei_cl_cb *cb;
549
+ struct mei_cl_cb *ret_cb = NULL;
535550
551
+ spin_lock(&cl->rd_completed_lock);
536552 list_for_each_entry(cb, &cl->rd_completed, list)
537
- if (!fp || fp == cb->fp)
538
- return cb;
539
-
540
- return NULL;
553
+ if (!fp || fp == cb->fp) {
554
+ ret_cb = cb;
555
+ break;
556
+ }
557
+ spin_unlock(&cl->rd_completed_lock);
558
+ return ret_cb;
541559 }
542560
543561 /**
....@@ -558,12 +576,17 @@
558576 dev = cl->dev;
559577
560578 cl_dbg(dev, cl, "remove list entry belonging to cl\n");
561
- mei_io_tx_list_free_cl(&cl->dev->write_list, cl);
562
- mei_io_tx_list_free_cl(&cl->dev->write_waiting_list, cl);
563
- mei_io_list_flush_cl(&cl->dev->ctrl_wr_list, cl);
564
- mei_io_list_flush_cl(&cl->dev->ctrl_rd_list, cl);
565
- mei_io_list_free_fp(&cl->rd_pending, fp);
579
+ mei_io_tx_list_free_cl(&cl->dev->write_list, cl, fp);
580
+ mei_io_tx_list_free_cl(&cl->dev->write_waiting_list, cl, fp);
581
+ /* free pending and control cb only in final flush */
582
+ if (!fp) {
583
+ mei_io_list_flush_cl(&cl->dev->ctrl_wr_list, cl);
584
+ mei_io_list_flush_cl(&cl->dev->ctrl_rd_list, cl);
585
+ mei_cl_free_pending(cl);
586
+ }
587
+ spin_lock(&cl->rd_completed_lock);
566588 mei_io_list_free_fp(&cl->rd_completed, fp);
589
+ spin_unlock(&cl->rd_completed_lock);
567590
568591 return 0;
569592 }
....@@ -576,11 +599,13 @@
576599 */
577600 static void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
578601 {
579
- memset(cl, 0, sizeof(struct mei_cl));
602
+ memset(cl, 0, sizeof(*cl));
580603 init_waitqueue_head(&cl->wait);
581604 init_waitqueue_head(&cl->rx_wait);
582605 init_waitqueue_head(&cl->tx_wait);
583606 init_waitqueue_head(&cl->ev_wait);
607
+ INIT_LIST_HEAD(&cl->vtag_map);
608
+ spin_lock_init(&cl->rd_completed_lock);
584609 INIT_LIST_HEAD(&cl->rd_completed);
585610 INIT_LIST_HEAD(&cl->rd_pending);
586611 INIT_LIST_HEAD(&cl->link);
....@@ -599,7 +624,7 @@
599624 {
600625 struct mei_cl *cl;
601626
602
- cl = kmalloc(sizeof(struct mei_cl), GFP_KERNEL);
627
+ cl = kmalloc(sizeof(*cl), GFP_KERNEL);
603628 if (!cl)
604629 return NULL;
605630
....@@ -695,7 +720,7 @@
695720
696721 void mei_host_client_init(struct mei_device *dev)
697722 {
698
- dev->dev_state = MEI_DEV_ENABLED;
723
+ mei_set_devstate(dev, MEI_DEV_ENABLED);
699724 dev->reset_count = 0;
700725
701726 schedule_work(&dev->bus_rescan_work);
....@@ -776,8 +801,8 @@
776801 return;
777802
778803 cl->state = MEI_FILE_DISCONNECTED;
779
- mei_io_tx_list_free_cl(&dev->write_list, cl);
780
- mei_io_tx_list_free_cl(&dev->write_waiting_list, cl);
804
+ mei_io_tx_list_free_cl(&dev->write_list, cl, NULL);
805
+ mei_io_tx_list_free_cl(&dev->write_waiting_list, cl, NULL);
781806 mei_io_list_flush_cl(&dev->ctrl_rd_list, cl);
782807 mei_io_list_flush_cl(&dev->ctrl_wr_list, cl);
783808 mei_cl_wake_all(cl);
....@@ -1253,6 +1278,157 @@
12531278 }
12541279
12551280 /**
1281
+ * mei_cl_vtag_alloc - allocate and fill the vtag structure
1282
+ *
1283
+ * @fp: pointer to file structure
1284
+ * @vtag: vm tag
1285
+ *
1286
+ * Return:
1287
+ * * Pointer to allocated struct - on success
1288
+ * * ERR_PTR(-ENOMEM) on memory allocation failure
1289
+ */
1290
+struct mei_cl_vtag *mei_cl_vtag_alloc(struct file *fp, u8 vtag)
1291
+{
1292
+ struct mei_cl_vtag *cl_vtag;
1293
+
1294
+ cl_vtag = kzalloc(sizeof(*cl_vtag), GFP_KERNEL);
1295
+ if (!cl_vtag)
1296
+ return ERR_PTR(-ENOMEM);
1297
+
1298
+ INIT_LIST_HEAD(&cl_vtag->list);
1299
+ cl_vtag->vtag = vtag;
1300
+ cl_vtag->fp = fp;
1301
+
1302
+ return cl_vtag;
1303
+}
1304
+
1305
+/**
1306
+ * mei_cl_fp_by_vtag - obtain the file pointer by vtag
1307
+ *
1308
+ * @cl: host client
1309
+ * @vtag: vm tag
1310
+ *
1311
+ * Return:
1312
+ * * A file pointer - on success
1313
+ * * ERR_PTR(-ENOENT) if vtag is not found in the client vtag list
1314
+ */
1315
+const struct file *mei_cl_fp_by_vtag(const struct mei_cl *cl, u8 vtag)
1316
+{
1317
+ struct mei_cl_vtag *vtag_l;
1318
+
1319
+ list_for_each_entry(vtag_l, &cl->vtag_map, list)
1320
+ if (vtag_l->vtag == vtag)
1321
+ return vtag_l->fp;
1322
+
1323
+ return ERR_PTR(-ENOENT);
1324
+}
1325
+
1326
+/**
1327
+ * mei_cl_reset_read_by_vtag - reset pending_read flag by given vtag
1328
+ *
1329
+ * @cl: host client
1330
+ * @vtag: vm tag
1331
+ */
1332
+static void mei_cl_reset_read_by_vtag(const struct mei_cl *cl, u8 vtag)
1333
+{
1334
+ struct mei_cl_vtag *vtag_l;
1335
+
1336
+ list_for_each_entry(vtag_l, &cl->vtag_map, list) {
1337
+ if (vtag_l->vtag == vtag) {
1338
+ vtag_l->pending_read = false;
1339
+ break;
1340
+ }
1341
+ }
1342
+}
1343
+
1344
+/**
1345
+ * mei_cl_read_vtag_add_fc - add flow control for next pending reader
1346
+ * in the vtag list
1347
+ *
1348
+ * @cl: host client
1349
+ */
1350
+static void mei_cl_read_vtag_add_fc(struct mei_cl *cl)
1351
+{
1352
+ struct mei_cl_vtag *cl_vtag;
1353
+
1354
+ list_for_each_entry(cl_vtag, &cl->vtag_map, list) {
1355
+ if (cl_vtag->pending_read) {
1356
+ if (mei_cl_enqueue_ctrl_wr_cb(cl,
1357
+ mei_cl_mtu(cl),
1358
+ MEI_FOP_READ,
1359
+ cl_vtag->fp))
1360
+ cl->rx_flow_ctrl_creds++;
1361
+ break;
1362
+ }
1363
+ }
1364
+}
1365
+
1366
+/**
1367
+ * mei_cl_vt_support_check - check if client support vtags
1368
+ *
1369
+ * @cl: host client
1370
+ *
1371
+ * Return:
1372
+ * * 0 - supported, or not connected at all
1373
+ * * -EOPNOTSUPP - vtags are not supported by client
1374
+ */
1375
+int mei_cl_vt_support_check(const struct mei_cl *cl)
1376
+{
1377
+ struct mei_device *dev = cl->dev;
1378
+
1379
+ if (!dev->hbm_f_vt_supported)
1380
+ return -EOPNOTSUPP;
1381
+
1382
+ if (!cl->me_cl)
1383
+ return 0;
1384
+
1385
+ return cl->me_cl->props.vt_supported ? 0 : -EOPNOTSUPP;
1386
+}
1387
+
1388
+/**
1389
+ * mei_cl_add_rd_completed - add read completed callback to list with lock
1390
+ * and vtag check
1391
+ *
1392
+ * @cl: host client
1393
+ * @cb: callback block
1394
+ *
1395
+ */
1396
+void mei_cl_add_rd_completed(struct mei_cl *cl, struct mei_cl_cb *cb)
1397
+{
1398
+ const struct file *fp;
1399
+
1400
+ if (!mei_cl_vt_support_check(cl)) {
1401
+ fp = mei_cl_fp_by_vtag(cl, cb->vtag);
1402
+ if (IS_ERR(fp)) {
1403
+ /* client already disconnected, discarding */
1404
+ mei_io_cb_free(cb);
1405
+ return;
1406
+ }
1407
+ cb->fp = fp;
1408
+ mei_cl_reset_read_by_vtag(cl, cb->vtag);
1409
+ mei_cl_read_vtag_add_fc(cl);
1410
+ }
1411
+
1412
+ spin_lock(&cl->rd_completed_lock);
1413
+ list_add_tail(&cb->list, &cl->rd_completed);
1414
+ spin_unlock(&cl->rd_completed_lock);
1415
+}
1416
+
1417
+/**
1418
+ * mei_cl_del_rd_completed - free read completed callback with lock
1419
+ *
1420
+ * @cl: host client
1421
+ * @cb: callback block
1422
+ *
1423
+ */
1424
+void mei_cl_del_rd_completed(struct mei_cl *cl, struct mei_cl_cb *cb)
1425
+{
1426
+ spin_lock(&cl->rd_completed_lock);
1427
+ mei_io_cb_free(cb);
1428
+ spin_unlock(&cl->rd_completed_lock);
1429
+}
1430
+
1431
+/**
12561432 * mei_cl_notify_fop2req - convert fop to proper request
12571433 *
12581434 * @fop: client notification start response command
....@@ -1376,7 +1552,9 @@
13761552
13771553 mutex_unlock(&dev->device_lock);
13781554 wait_event_timeout(cl->wait,
1379
- cl->notify_en == request || !mei_cl_is_connected(cl),
1555
+ cl->notify_en == request ||
1556
+ cl->status ||
1557
+ !mei_cl_is_connected(cl),
13801558 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
13811559 mutex_lock(&dev->device_lock);
13821560
....@@ -1505,12 +1683,16 @@
15051683 return 0;
15061684
15071685 /* HW currently supports only one pending read */
1508
- if (cl->rx_flow_ctrl_creds)
1686
+ if (cl->rx_flow_ctrl_creds) {
1687
+ mei_cl_set_read_by_fp(cl, fp);
15091688 return -EBUSY;
1689
+ }
15101690
15111691 cb = mei_cl_enqueue_ctrl_wr_cb(cl, length, MEI_FOP_READ, fp);
15121692 if (!cb)
15131693 return -ENOMEM;
1694
+
1695
+ mei_cl_set_read_by_fp(cl, fp);
15141696
15151697 rets = pm_runtime_get(dev->dev);
15161698 if (rets < 0 && rets != -EINPROGRESS) {
....@@ -1540,21 +1722,67 @@
15401722 return rets;
15411723 }
15421724
1543
-/**
1544
- * mei_msg_hdr_init - initialize mei message header
1545
- *
1546
- * @mei_hdr: mei message header
1547
- * @cb: message callback structure
1548
- */
1549
-static void mei_msg_hdr_init(struct mei_msg_hdr *mei_hdr, struct mei_cl_cb *cb)
1725
+static inline u8 mei_ext_hdr_set_vtag(struct mei_ext_hdr *ext, u8 vtag)
15501726 {
1727
+ ext->type = MEI_EXT_HDR_VTAG;
1728
+ ext->ext_payload[0] = vtag;
1729
+ ext->length = mei_data2slots(sizeof(*ext));
1730
+ return ext->length;
1731
+}
1732
+
1733
+/**
1734
+ * mei_msg_hdr_init - allocate and initialize mei message header
1735
+ *
1736
+ * @cb: message callback structure
1737
+ *
1738
+ * Return: a pointer to initialized header
1739
+ */
1740
+static struct mei_msg_hdr *mei_msg_hdr_init(const struct mei_cl_cb *cb)
1741
+{
1742
+ size_t hdr_len;
1743
+ struct mei_ext_meta_hdr *meta;
1744
+ struct mei_ext_hdr *ext;
1745
+ struct mei_msg_hdr *mei_hdr;
1746
+ bool is_ext, is_vtag;
1747
+
1748
+ if (!cb)
1749
+ return ERR_PTR(-EINVAL);
1750
+
1751
+ /* Extended header for vtag is attached only on the first fragment */
1752
+ is_vtag = (cb->vtag && cb->buf_idx == 0);
1753
+ is_ext = is_vtag;
1754
+
1755
+ /* Compute extended header size */
1756
+ hdr_len = sizeof(*mei_hdr);
1757
+
1758
+ if (!is_ext)
1759
+ goto setup_hdr;
1760
+
1761
+ hdr_len += sizeof(*meta);
1762
+ if (is_vtag)
1763
+ hdr_len += sizeof(*ext);
1764
+
1765
+setup_hdr:
1766
+ mei_hdr = kzalloc(hdr_len, GFP_KERNEL);
1767
+ if (!mei_hdr)
1768
+ return ERR_PTR(-ENOMEM);
1769
+
15511770 mei_hdr->host_addr = mei_cl_host_addr(cb->cl);
15521771 mei_hdr->me_addr = mei_cl_me_id(cb->cl);
1553
- mei_hdr->length = 0;
1554
- mei_hdr->reserved = 0;
1555
- mei_hdr->msg_complete = 0;
1556
- mei_hdr->dma_ring = 0;
15571772 mei_hdr->internal = cb->internal;
1773
+ mei_hdr->extended = is_ext;
1774
+
1775
+ if (!is_ext)
1776
+ goto out;
1777
+
1778
+ meta = (struct mei_ext_meta_hdr *)mei_hdr->extension;
1779
+ if (is_vtag) {
1780
+ meta->count++;
1781
+ meta->size += mei_ext_hdr_set_vtag(meta->hdrs, cb->vtag);
1782
+ }
1783
+out:
1784
+ mei_hdr->length = hdr_len - sizeof(*mei_hdr);
1785
+ return mei_hdr;
15581786 }
15591787
15601788 /**
....@@ -1572,13 +1800,17 @@
15721800 {
15731801 struct mei_device *dev;
15741802 struct mei_msg_data *buf;
1575
- struct mei_msg_hdr mei_hdr;
1576
- size_t hdr_len = sizeof(mei_hdr);
1577
- size_t len;
1578
- size_t hbuf_len;
1803
+ struct mei_msg_hdr *mei_hdr = NULL;
1804
+ size_t hdr_len;
1805
+ size_t hbuf_len, dr_len;
1806
+ size_t buf_len;
1807
+ size_t data_len;
15791808 int hbuf_slots;
1809
+ u32 dr_slots;
1810
+ u32 dma_len;
15801811 int rets;
15811812 bool first_chunk;
1813
+ const void *data;
15821814
15831815 if (WARN_ON(!cl || !cl->dev))
15841816 return -ENODEV;
....@@ -1598,41 +1830,66 @@
15981830 return 0;
15991831 }
16001832
1601
- len = buf->size - cb->buf_idx;
1833
+ buf_len = buf->size - cb->buf_idx;
1834
+ data = buf->data + cb->buf_idx;
16021835 hbuf_slots = mei_hbuf_empty_slots(dev);
16031836 if (hbuf_slots < 0) {
16041837 rets = -EOVERFLOW;
16051838 goto err;
16061839 }
16071840
1608
- hbuf_len = mei_slots2data(hbuf_slots);
1841
+ hbuf_len = mei_slots2data(hbuf_slots) & MEI_MSG_MAX_LEN_MASK;
1842
+ dr_slots = mei_dma_ring_empty_slots(dev);
1843
+ dr_len = mei_slots2data(dr_slots);
16091844
1610
- mei_msg_hdr_init(&mei_hdr, cb);
1845
+ mei_hdr = mei_msg_hdr_init(cb);
1846
+ if (IS_ERR(mei_hdr)) {
1847
+ rets = PTR_ERR(mei_hdr);
1848
+ mei_hdr = NULL;
1849
+ goto err;
1850
+ }
1851
+
1852
+ cl_dbg(dev, cl, "Extended Header %d vtag = %d\n",
1853
+ mei_hdr->extended, cb->vtag);
1854
+
1855
+ hdr_len = sizeof(*mei_hdr) + mei_hdr->length;
16111856
16121857 /**
16131858 * Split the message only if we can write the whole host buffer
16141859 * otherwise wait for next time the host buffer is empty.
16151860 */
1616
- if (len + hdr_len <= hbuf_len) {
1617
- mei_hdr.length = len;
1618
- mei_hdr.msg_complete = 1;
1861
+ if (hdr_len + buf_len <= hbuf_len) {
1862
+ data_len = buf_len;
1863
+ mei_hdr->msg_complete = 1;
1864
+ } else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) {
1865
+ mei_hdr->dma_ring = 1;
1866
+ if (buf_len > dr_len)
1867
+ buf_len = dr_len;
1868
+ else
1869
+ mei_hdr->msg_complete = 1;
1870
+
1871
+ data_len = sizeof(dma_len);
1872
+ dma_len = buf_len;
1873
+ data = &dma_len;
16191874 } else if ((u32)hbuf_slots == mei_hbuf_depth(dev)) {
1620
- mei_hdr.length = hbuf_len - hdr_len;
1875
+ buf_len = hbuf_len - hdr_len;
1876
+ data_len = buf_len;
16211877 } else {
1878
+ kfree(mei_hdr);
16221879 return 0;
16231880 }
1881
+ mei_hdr->length += data_len;
16241882
1625
- cl_dbg(dev, cl, "buf: size = %zu idx = %zu\n",
1626
- cb->buf.size, cb->buf_idx);
1883
+ if (mei_hdr->dma_ring)
1884
+ mei_dma_ring_write(dev, buf->data + cb->buf_idx, buf_len);
1885
+ rets = mei_write_message(dev, mei_hdr, hdr_len, data, data_len);
16271886
1628
- rets = mei_write_message(dev, &mei_hdr, hdr_len,
1629
- buf->data + cb->buf_idx, mei_hdr.length);
16301887 if (rets)
16311888 goto err;
16321889
16331890 cl->status = 0;
16341891 cl->writing_state = MEI_WRITING;
1635
- cb->buf_idx += mei_hdr.length;
1892
+ cb->buf_idx += buf_len;
16361893
16371894 if (first_chunk) {
16381895 if (mei_cl_tx_flow_ctrl_creds_reduce(cl)) {
....@@ -1641,12 +1898,14 @@
16411898 }
16421899 }
16431900
1644
- if (mei_hdr.msg_complete)
1901
+ if (mei_hdr->msg_complete)
16451902 list_move_tail(&cb->list, &dev->write_waiting_list);
16461903
1904
+ kfree(mei_hdr);
16471905 return 0;
16481906
16491907 err:
1908
+ kfree(mei_hdr);
16501909 cl->status = rets;
16511910 list_move_tail(&cb->list, cmpl_list);
16521911 return rets;
....@@ -1665,13 +1924,17 @@
16651924 {
16661925 struct mei_device *dev;
16671926 struct mei_msg_data *buf;
1668
- struct mei_msg_hdr mei_hdr;
1669
- size_t hdr_len = sizeof(mei_hdr);
1670
- size_t len;
1671
- size_t hbuf_len;
1927
+ struct mei_msg_hdr *mei_hdr = NULL;
1928
+ size_t hdr_len;
1929
+ size_t hbuf_len, dr_len;
1930
+ size_t buf_len;
1931
+ size_t data_len;
16721932 int hbuf_slots;
1933
+ u32 dr_slots;
1934
+ u32 dma_len;
16731935 ssize_t rets;
16741936 bool blocking;
1937
+ const void *data;
16751938
16761939 if (WARN_ON(!cl || !cl->dev))
16771940 return -ENODEV;
....@@ -1682,10 +1945,12 @@
16821945 dev = cl->dev;
16831946
16841947 buf = &cb->buf;
1685
- len = buf->size;
1686
- blocking = cb->blocking;
1948
+ buf_len = buf->size;
16871949
1688
- cl_dbg(dev, cl, "len=%zd\n", len);
1950
+ cl_dbg(dev, cl, "buf_len=%zd\n", buf_len);
1951
+
1952
+ blocking = cb->blocking;
1953
+ data = buf->data;
16891954
16901955 rets = pm_runtime_get(dev->dev);
16911956 if (rets < 0 && rets != -EINPROGRESS) {
....@@ -1702,17 +1967,27 @@
17021967 if (rets < 0)
17031968 goto err;
17041969
1705
- mei_msg_hdr_init(&mei_hdr, cb);
1970
+ mei_hdr = mei_msg_hdr_init(cb);
1971
+ if (IS_ERR(mei_hdr)) {
1972
+ rets = -PTR_ERR(mei_hdr);
1973
+ mei_hdr = NULL;
1974
+ goto err;
1975
+ }
1976
+
1977
+ cl_dbg(dev, cl, "Extended Header %d vtag = %d\n",
1978
+ mei_hdr->extended, cb->vtag);
1979
+
1980
+ hdr_len = sizeof(*mei_hdr) + mei_hdr->length;
17061981
17071982 if (rets == 0) {
17081983 cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
1709
- rets = len;
1984
+ rets = buf_len;
17101985 goto out;
17111986 }
17121987
17131988 if (!mei_hbuf_acquire(dev)) {
17141989 cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n");
1715
- rets = len;
1990
+ rets = buf_len;
17161991 goto out;
17171992 }
17181993
....@@ -1722,17 +1997,34 @@
17221997 goto out;
17231998 }
17241999
1725
- hbuf_len = mei_slots2data(hbuf_slots);
2000
+ hbuf_len = mei_slots2data(hbuf_slots) & MEI_MSG_MAX_LEN_MASK;
2001
+ dr_slots = mei_dma_ring_empty_slots(dev);
2002
+ dr_len = mei_slots2data(dr_slots);
17262003
1727
- if (len + hdr_len <= hbuf_len) {
1728
- mei_hdr.length = len;
1729
- mei_hdr.msg_complete = 1;
2004
+ if (hdr_len + buf_len <= hbuf_len) {
2005
+ data_len = buf_len;
2006
+ mei_hdr->msg_complete = 1;
2007
+ } else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) {
2008
+ mei_hdr->dma_ring = 1;
2009
+ if (buf_len > dr_len)
2010
+ buf_len = dr_len;
2011
+ else
2012
+ mei_hdr->msg_complete = 1;
2013
+
2014
+ data_len = sizeof(dma_len);
2015
+ dma_len = buf_len;
2016
+ data = &dma_len;
17302017 } else {
1731
- mei_hdr.length = hbuf_len - hdr_len;
2018
+ buf_len = hbuf_len - hdr_len;
2019
+ data_len = buf_len;
17322020 }
17332021
1734
- rets = mei_write_message(dev, &mei_hdr, hdr_len,
1735
- buf->data, mei_hdr.length);
2022
+ mei_hdr->length += data_len;
2023
+
2024
+ if (mei_hdr->dma_ring)
2025
+ mei_dma_ring_write(dev, buf->data, buf_len);
2026
+ rets = mei_write_message(dev, mei_hdr, hdr_len, data, data_len);
2027
+
17362028 if (rets)
17372029 goto err;
17382030
....@@ -1741,10 +2033,12 @@
17412033 goto err;
17422034
17432035 cl->writing_state = MEI_WRITING;
1744
- cb->buf_idx = mei_hdr.length;
2036
+ cb->buf_idx = buf_len;
2037
+ /* restore return value */
2038
+ buf_len = buf->size;
17452039
17462040 out:
1747
- if (mei_hdr.msg_complete)
2041
+ if (mei_hdr->msg_complete)
17482042 mei_tx_cb_enqueue(cb, &dev->write_waiting_list);
17492043 else
17502044 mei_tx_cb_enqueue(cb, &dev->write_list);
....@@ -1769,7 +2063,7 @@
17692063 }
17702064 }
17712065
1772
- rets = len;
2066
+ rets = buf_len;
17732067 err:
17742068 cl_dbg(dev, cl, "rpm: autosuspend\n");
17752069 pm_runtime_mark_last_busy(dev->dev);
....@@ -1777,9 +2071,10 @@
17772071 free:
17782072 mei_io_cb_free(cb);
17792073
2074
+ kfree(mei_hdr);
2075
+
17802076 return rets;
17812077 }
1782
-
17832078
17842079 /**
17852080 * mei_cl_complete - processes completed operation for a client
....@@ -1804,7 +2099,7 @@
18042099 break;
18052100
18062101 case MEI_FOP_READ:
1807
- list_add_tail(&cb->list, &cl->rd_completed);
2102
+ mei_cl_add_rd_completed(cl, cb);
18082103 if (!mei_cl_is_fixed_address(cl) &&
18092104 !WARN_ON(!cl->rx_flow_ctrl_creds))
18102105 cl->rx_flow_ctrl_creds--;