forked from ~ljy/RK356X_SDK_RELEASE

hc
2024-05-10 61598093bbdd283a7edc367d900f223070ead8d2
kernel/drivers/char/ipmi/ipmi_msghandler.c
....@@ -11,6 +11,9 @@
1111 * Copyright 2002 MontaVista Software Inc.
1212 */
1313
14
+#define pr_fmt(fmt) "IPMI message handler: " fmt
15
+#define dev_fmt(fmt) pr_fmt(fmt)
16
+
1417 #include <linux/module.h>
1518 #include <linux/errno.h>
1619 #include <linux/poll.h>
....@@ -31,37 +34,17 @@
3134 #include <linux/uuid.h>
3235 #include <linux/nospec.h>
3336 #include <linux/vmalloc.h>
34
-
35
-#define PFX "IPMI message handler: "
37
+#include <linux/delay.h>
3638
3739 #define IPMI_DRIVER_VERSION "39.2"
3840
3941 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
4042 static int ipmi_init_msghandler(void);
41
-static void smi_recv_tasklet(unsigned long);
43
+static void smi_recv_tasklet(struct tasklet_struct *t);
4244 static void handle_new_recv_msgs(struct ipmi_smi *intf);
4345 static void need_waiter(struct ipmi_smi *intf);
4446 static int handle_one_recv_msg(struct ipmi_smi *intf,
4547 struct ipmi_smi_msg *msg);
46
-
47
-#ifdef DEBUG
48
-static void ipmi_debug_msg(const char *title, unsigned char *data,
49
- unsigned int len)
50
-{
51
- int i, pos;
52
- char buf[100];
53
-
54
- pos = snprintf(buf, sizeof(buf), "%s: ", title);
55
- for (i = 0; i < len; i++)
56
- pos += snprintf(buf + pos, sizeof(buf) - pos,
57
- " %2.2x", data[i]);
58
- pr_debug("%s\n", buf);
59
-}
60
-#else
61
-static void ipmi_debug_msg(const char *title, unsigned char *data,
62
- unsigned int len)
63
-{ }
64
-#endif
6548
6649 static bool initialized;
6750 static bool drvregistered;
....@@ -78,6 +61,7 @@
7861 #else
7962 #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_NONE
8063 #endif
64
+
8165 static enum ipmi_panic_event_op ipmi_send_panic_event = IPMI_PANIC_DEFAULT;
8266
8367 static int panic_op_write_handler(const char *val,
....@@ -107,19 +91,19 @@
10791 {
10892 switch (ipmi_send_panic_event) {
10993 case IPMI_SEND_PANIC_EVENT_NONE:
110
- strcpy(buffer, "none");
94
+ strcpy(buffer, "none\n");
11195 break;
11296
11397 case IPMI_SEND_PANIC_EVENT:
114
- strcpy(buffer, "event");
98
+ strcpy(buffer, "event\n");
11599 break;
116100
117101 case IPMI_SEND_PANIC_EVENT_STRING:
118
- strcpy(buffer, "string");
102
+ strcpy(buffer, "string\n");
119103 break;
120104
121105 default:
122
- strcpy(buffer, "???");
106
+ strcpy(buffer, "???\n");
123107 break;
124108 }
125109
....@@ -337,6 +321,7 @@
337321 int dyn_guid_set;
338322 struct kref usecount;
339323 struct work_struct remove_work;
324
+ unsigned char cc; /* completion code */
340325 };
341326 #define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev)
342327
....@@ -536,9 +521,27 @@
536521 unsigned int waiting_events_count; /* How many events in queue? */
537522 char delivering_events;
538523 char event_msg_printed;
524
+
525
+ /* How many users are waiting for events? */
539526 atomic_t event_waiters;
540527 unsigned int ticks_to_req_ev;
541
- int last_needs_timer;
528
+
529
+ spinlock_t watch_lock; /* For dealing with watch stuff below. */
530
+
531
+ /* How many users are waiting for commands? */
532
+ unsigned int command_waiters;
533
+
534
+ /* How many users are waiting for watchdogs? */
535
+ unsigned int watchdog_waiters;
536
+
537
+ /* How many users are waiting for message responses? */
538
+ unsigned int response_waiters;
539
+
540
+ /*
541
+ * Tells what the lower layer has last been asked to watch for,
542
+ * messages and/or watchdogs. Protected by watch_lock.
543
+ */
544
+ unsigned int last_watch_mask;
542545
543546 /*
544547 * The event receiver for my BMC, only really used at panic
....@@ -621,7 +624,9 @@
621624
622625 static LIST_HEAD(ipmi_interfaces);
623626 static DEFINE_MUTEX(ipmi_interfaces_mutex);
624
-struct srcu_struct ipmi_interfaces_srcu;
627
+#define ipmi_interfaces_mutex_held() \
628
+ lockdep_is_held(&ipmi_interfaces_mutex)
629
+static struct srcu_struct ipmi_interfaces_srcu;
625630
626631 /*
627632 * List of watchers that want to know when smi's are added and deleted.
....@@ -890,12 +895,14 @@
890895 rv = -EINVAL;
891896 }
892897 ipmi_free_recv_msg(msg);
893
- } else if (!oops_in_progress) {
898
+ } else if (oops_in_progress) {
894899 /*
895900 * If we are running in the panic context, calling the
896901 * receive handler doesn't much meaning and has a deadlock
897902 * risk. At this moment, simply skip it in that case.
898903 */
904
+ ipmi_free_recv_msg(msg);
905
+ } else {
899906 int index;
900907 struct ipmi_user *user = acquire_ipmi_user(msg->user, &index);
901908
....@@ -930,6 +937,64 @@
930937 msg->msg.data_len = 1;
931938 msg->msg.data = msg->msg_data;
932939 deliver_local_response(intf, msg);
940
+}
941
+
942
+static void smi_add_watch(struct ipmi_smi *intf, unsigned int flags)
943
+{
944
+ unsigned long iflags;
945
+
946
+ if (!intf->handlers->set_need_watch)
947
+ return;
948
+
949
+ spin_lock_irqsave(&intf->watch_lock, iflags);
950
+ if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES)
951
+ intf->response_waiters++;
952
+
953
+ if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG)
954
+ intf->watchdog_waiters++;
955
+
956
+ if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS)
957
+ intf->command_waiters++;
958
+
959
+ if ((intf->last_watch_mask & flags) != flags) {
960
+ intf->last_watch_mask |= flags;
961
+ intf->handlers->set_need_watch(intf->send_info,
962
+ intf->last_watch_mask);
963
+ }
964
+ spin_unlock_irqrestore(&intf->watch_lock, iflags);
965
+}
966
+
967
+static void smi_remove_watch(struct ipmi_smi *intf, unsigned int flags)
968
+{
969
+ unsigned long iflags;
970
+
971
+ if (!intf->handlers->set_need_watch)
972
+ return;
973
+
974
+ spin_lock_irqsave(&intf->watch_lock, iflags);
975
+ if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES)
976
+ intf->response_waiters--;
977
+
978
+ if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG)
979
+ intf->watchdog_waiters--;
980
+
981
+ if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS)
982
+ intf->command_waiters--;
983
+
984
+ flags = 0;
985
+ if (intf->response_waiters)
986
+ flags |= IPMI_WATCH_MASK_CHECK_MESSAGES;
987
+ if (intf->watchdog_waiters)
988
+ flags |= IPMI_WATCH_MASK_CHECK_WATCHDOG;
989
+ if (intf->command_waiters)
990
+ flags |= IPMI_WATCH_MASK_CHECK_COMMANDS;
991
+
992
+ if (intf->last_watch_mask != flags) {
993
+ intf->last_watch_mask = flags;
994
+ intf->handlers->set_need_watch(intf->send_info,
995
+ intf->last_watch_mask);
996
+ }
997
+ spin_unlock_irqrestore(&intf->watch_lock, iflags);
933998 }
934999
9351000 /*
....@@ -975,6 +1040,7 @@
9751040 *seq = i;
9761041 *seqid = intf->seq_table[i].seqid;
9771042 intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
1043
+ smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
9781044 need_waiter(intf);
9791045 } else {
9801046 rv = -EAGAIN;
....@@ -1013,6 +1079,7 @@
10131079 && (ipmi_addr_equal(addr, &msg->addr))) {
10141080 *recv_msg = msg;
10151081 intf->seq_table[seq].inuse = 0;
1082
+ smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
10161083 rv = 0;
10171084 }
10181085 }
....@@ -1074,6 +1141,7 @@
10741141 struct seq_table *ent = &intf->seq_table[seq];
10751142
10761143 ent->inuse = 0;
1144
+ smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
10771145 msg = ent->recv_msg;
10781146 rv = 0;
10791147 }
....@@ -1084,7 +1152,6 @@
10841152
10851153 return rv;
10861154 }
1087
-
10881155
10891156 static void free_user_work(struct work_struct *work)
10901157 {
....@@ -1162,11 +1229,9 @@
11621229 spin_lock_irqsave(&intf->seq_lock, flags);
11631230 list_add_rcu(&new_user->link, &intf->users);
11641231 spin_unlock_irqrestore(&intf->seq_lock, flags);
1165
- if (handler->ipmi_watchdog_pretimeout) {
1232
+ if (handler->ipmi_watchdog_pretimeout)
11661233 /* User wants pretimeouts, so make sure to watch for them. */
1167
- if (atomic_inc_return(&intf->event_waiters) == 1)
1168
- need_waiter(intf);
1169
- }
1234
+ smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG);
11701235 srcu_read_unlock(&ipmi_interfaces_srcu, index);
11711236 *user = new_user;
11721237 return 0;
....@@ -1219,6 +1284,7 @@
12191284 unsigned long flags;
12201285 struct cmd_rcvr *rcvr;
12211286 struct cmd_rcvr *rcvrs = NULL;
1287
+ struct module *owner;
12221288
12231289 if (!acquire_ipmi_user(user, &i)) {
12241290 /*
....@@ -1238,7 +1304,7 @@
12381304 user->handler->shutdown(user->handler_data);
12391305
12401306 if (user->handler->ipmi_watchdog_pretimeout)
1241
- atomic_dec(&intf->event_waiters);
1307
+ smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG);
12421308
12431309 if (user->gets_events)
12441310 atomic_dec(&intf->event_waiters);
....@@ -1251,6 +1317,7 @@
12511317 if (intf->seq_table[i].inuse
12521318 && (intf->seq_table[i].recv_msg->user == user)) {
12531319 intf->seq_table[i].inuse = 0;
1320
+ smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
12541321 ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
12551322 }
12561323 }
....@@ -1263,7 +1330,8 @@
12631330 * synchronize_srcu()) then free everything in that list.
12641331 */
12651332 mutex_lock(&intf->cmd_rcvrs_mutex);
1266
- list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
1333
+ list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
1334
+ lockdep_is_held(&intf->cmd_rcvrs_mutex)) {
12671335 if (rcvr->user == user) {
12681336 list_del_rcu(&rcvr->link);
12691337 rcvr->next = rcvrs;
....@@ -1278,8 +1346,9 @@
12781346 kfree(rcvr);
12791347 }
12801348
1349
+ owner = intf->owner;
12811350 kref_put(&intf->refcount, intf_free);
1282
- module_put(intf->owner);
1351
+ module_put(owner);
12831352 }
12841353
12851354 int ipmi_destroy_user(struct ipmi_user *user)
....@@ -1509,8 +1578,7 @@
15091578 list_move_tail(&msg->link, &msgs);
15101579 intf->waiting_events_count = 0;
15111580 if (intf->event_msg_printed) {
1512
- dev_warn(intf->si_dev,
1513
- PFX "Event queue no longer full\n");
1581
+ dev_warn(intf->si_dev, "Event queue no longer full\n");
15141582 intf->event_msg_printed = 0;
15151583 }
15161584
....@@ -1542,7 +1610,8 @@
15421610 {
15431611 struct cmd_rcvr *rcvr;
15441612
1545
- list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
1613
+ list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
1614
+ lockdep_is_held(&intf->cmd_rcvrs_mutex)) {
15461615 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
15471616 && (rcvr->chans & (1 << chan)))
15481617 return rcvr;
....@@ -1557,7 +1626,8 @@
15571626 {
15581627 struct cmd_rcvr *rcvr;
15591628
1560
- list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
1629
+ list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
1630
+ lockdep_is_held(&intf->cmd_rcvrs_mutex)) {
15611631 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
15621632 && (rcvr->chans & chans))
15631633 return 0;
....@@ -1595,8 +1665,7 @@
15951665 goto out_unlock;
15961666 }
15971667
1598
- if (atomic_inc_return(&intf->event_waiters) == 1)
1599
- need_waiter(intf);
1668
+ smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS);
16001669
16011670 list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
16021671
....@@ -1646,7 +1715,7 @@
16461715 synchronize_rcu();
16471716 release_ipmi_user(user, index);
16481717 while (rcvrs) {
1649
- atomic_dec(&intf->event_waiters);
1718
+ smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS);
16501719 rcvr = rcvrs;
16511720 rcvrs = rcvr->next;
16521721 kfree(rcvr);
....@@ -1763,22 +1832,19 @@
17631832 return smi_msg;
17641833 }
17651834
1766
-
17671835 static void smi_send(struct ipmi_smi *intf,
17681836 const struct ipmi_smi_handlers *handlers,
17691837 struct ipmi_smi_msg *smi_msg, int priority)
17701838 {
17711839 int run_to_completion = intf->run_to_completion;
1840
+ unsigned long flags = 0;
17721841
1773
- if (run_to_completion) {
1774
- smi_msg = smi_add_send_msg(intf, smi_msg, priority);
1775
- } else {
1776
- unsigned long flags;
1777
-
1842
+ if (!run_to_completion)
17781843 spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
1779
- smi_msg = smi_add_send_msg(intf, smi_msg, priority);
1844
+ smi_msg = smi_add_send_msg(intf, smi_msg, priority);
1845
+
1846
+ if (!run_to_completion)
17801847 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
1781
- }
17821848
17831849 if (smi_msg)
17841850 handlers->sender(intf->send_info, smi_msg);
....@@ -2158,7 +2224,8 @@
21582224 else {
21592225 smi_msg = ipmi_alloc_smi_msg();
21602226 if (smi_msg == NULL) {
2161
- ipmi_free_recv_msg(recv_msg);
2227
+ if (!supplied_recv)
2228
+ ipmi_free_recv_msg(recv_msg);
21622229 rv = -ENOMEM;
21632230 goto out;
21642231 }
....@@ -2202,7 +2269,7 @@
22022269 ipmi_free_smi_msg(smi_msg);
22032270 ipmi_free_recv_msg(recv_msg);
22042271 } else {
2205
- ipmi_debug_msg("Send", smi_msg->data, smi_msg->data_size);
2272
+ pr_debug("Send: %*ph\n", smi_msg->data_size, smi_msg->data);
22062273
22072274 smi_send(intf, intf->handlers, smi_msg, priority);
22082275 }
....@@ -2312,16 +2379,17 @@
23122379 || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
23132380 || (msg->msg.cmd != IPMI_GET_DEVICE_ID_CMD)) {
23142381 dev_warn(intf->si_dev,
2315
- PFX "invalid device_id msg: addr_type=%d netfn=%x cmd=%x\n",
2316
- msg->addr.addr_type, msg->msg.netfn, msg->msg.cmd);
2382
+ "invalid device_id msg: addr_type=%d netfn=%x cmd=%x\n",
2383
+ msg->addr.addr_type, msg->msg.netfn, msg->msg.cmd);
23172384 return;
23182385 }
23192386
23202387 rv = ipmi_demangle_device_id(msg->msg.netfn, msg->msg.cmd,
23212388 msg->msg.data, msg->msg.data_len, &intf->bmc->fetch_id);
23222389 if (rv) {
2323
- dev_warn(intf->si_dev,
2324
- PFX "device id demangle failed: %d\n", rv);
2390
+ dev_warn(intf->si_dev, "device id demangle failed: %d\n", rv);
2391
+ /* record completion code when error */
2392
+ intf->bmc->cc = msg->msg.data[0];
23252393 intf->bmc->dyn_id_set = 0;
23262394 } else {
23272395 /*
....@@ -2367,23 +2435,39 @@
23672435 static int __get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc)
23682436 {
23692437 int rv;
2370
-
2371
- bmc->dyn_id_set = 2;
2438
+ unsigned int retry_count = 0;
23722439
23732440 intf->null_user_handler = bmc_device_id_handler;
23742441
2442
+retry:
2443
+ bmc->cc = 0;
2444
+ bmc->dyn_id_set = 2;
2445
+
23752446 rv = send_get_device_id_cmd(intf);
23762447 if (rv)
2377
- return rv;
2448
+ goto out_reset_handler;
23782449
23792450 wait_event(intf->waitq, bmc->dyn_id_set != 2);
23802451
2381
- if (!bmc->dyn_id_set)
2452
+ if (!bmc->dyn_id_set) {
2453
+ if ((bmc->cc == IPMI_DEVICE_IN_FW_UPDATE_ERR
2454
+ || bmc->cc == IPMI_DEVICE_IN_INIT_ERR
2455
+ || bmc->cc == IPMI_NOT_IN_MY_STATE_ERR)
2456
+ && ++retry_count <= GET_DEVICE_ID_MAX_RETRY) {
2457
+ msleep(500);
2458
+ dev_warn(intf->si_dev,
2459
+ "BMC returned 0x%2.2x, retry get bmc device id\n",
2460
+ bmc->cc);
2461
+ goto retry;
2462
+ }
2463
+
23822464 rv = -EIO; /* Something went wrong in the fetch. */
2465
+ }
23832466
23842467 /* dyn_id_set makes the id data available. */
23852468 smp_rmb();
23862469
2470
+out_reset_handler:
23872471 intf->null_user_handler = NULL;
23882472
23892473 return rv;
....@@ -2703,7 +2787,7 @@
27032787 if (!guid_set)
27042788 return -ENOENT;
27052789
2706
- return snprintf(buf, 38, "%pUl\n", guid.b);
2790
+ return snprintf(buf, UUID_STRING_LEN + 1 + 1, "%pUl\n", &guid);
27072791 }
27082792 static DEVICE_ATTR_RO(guid);
27092793
....@@ -2758,9 +2842,9 @@
27582842 .groups = bmc_dev_attr_groups,
27592843 };
27602844
2761
-static int __find_bmc_guid(struct device *dev, void *data)
2845
+static int __find_bmc_guid(struct device *dev, const void *data)
27622846 {
2763
- guid_t *guid = data;
2847
+ const guid_t *guid = data;
27642848 struct bmc_device *bmc;
27652849 int rv;
27662850
....@@ -2796,9 +2880,9 @@
27962880 unsigned char device_id;
27972881 };
27982882
2799
-static int __find_bmc_prod_dev_id(struct device *dev, void *data)
2883
+static int __find_bmc_prod_dev_id(struct device *dev, const void *data)
28002884 {
2801
- struct prod_dev_id *cid = data;
2885
+ const struct prod_dev_id *cid = data;
28022886 struct bmc_device *bmc;
28032887 int rv;
28042888
....@@ -2944,8 +3028,7 @@
29443028 mutex_unlock(&bmc->dyn_mutex);
29453029
29463030 dev_info(intf->si_dev,
2947
- "ipmi: interfacing existing BMC (man_id: 0x%6.6x,"
2948
- " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
3031
+ "interfacing existing BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
29493032 bmc->id.manufacturer_id,
29503033 bmc->id.product_id,
29513034 bmc->id.device_id);
....@@ -2987,7 +3070,7 @@
29873070 rv = platform_device_register(&bmc->pdev);
29883071 if (rv) {
29893072 dev_err(intf->si_dev,
2990
- PFX " Unable to register bmc device: %d\n",
3073
+ "Unable to register bmc device: %d\n",
29913074 rv);
29923075 goto out_list_del;
29933076 }
....@@ -3005,8 +3088,7 @@
30053088 */
30063089 rv = sysfs_create_link(&intf->si_dev->kobj, &bmc->pdev.dev.kobj, "bmc");
30073090 if (rv) {
3008
- dev_err(intf->si_dev,
3009
- PFX "Unable to create bmc symlink: %d\n", rv);
3091
+ dev_err(intf->si_dev, "Unable to create bmc symlink: %d\n", rv);
30103092 goto out_put_bmc;
30113093 }
30123094
....@@ -3015,18 +3097,16 @@
30153097 intf->my_dev_name = kasprintf(GFP_KERNEL, "ipmi%d", intf_num);
30163098 if (!intf->my_dev_name) {
30173099 rv = -ENOMEM;
3018
- dev_err(intf->si_dev,
3019
- PFX "Unable to allocate link from BMC: %d\n", rv);
3100
+ dev_err(intf->si_dev, "Unable to allocate link from BMC: %d\n",
3101
+ rv);
30203102 goto out_unlink1;
30213103 }
30223104
30233105 rv = sysfs_create_link(&bmc->pdev.dev.kobj, &intf->si_dev->kobj,
30243106 intf->my_dev_name);
30253107 if (rv) {
3026
- kfree(intf->my_dev_name);
3027
- intf->my_dev_name = NULL;
3028
- dev_err(intf->si_dev,
3029
- PFX "Unable to create symlink to bmc: %d\n", rv);
3108
+ dev_err(intf->si_dev, "Unable to create symlink to bmc: %d\n",
3109
+ rv);
30303110 goto out_free_my_dev_name;
30313111 }
30323112
....@@ -3107,15 +3187,15 @@
31073187 goto out;
31083188 }
31093189
3110
- if (msg->msg.data_len < 17) {
3190
+ if (msg->msg.data_len < UUID_SIZE + 1) {
31113191 bmc->dyn_guid_set = 0;
31123192 dev_warn(intf->si_dev,
3113
- PFX "The GUID response from the BMC was too short, it was %d but should have been 17. Assuming GUID is not available.\n",
3114
- msg->msg.data_len);
3193
+ "The GUID response from the BMC was too short, it was %d but should have been %d. Assuming GUID is not available.\n",
3194
+ msg->msg.data_len, UUID_SIZE + 1);
31153195 goto out;
31163196 }
31173197
3118
- memcpy(bmc->fetch_guid.b, msg->msg.data + 1, 16);
3198
+ import_guid(&bmc->fetch_guid, msg->msg.data + 1);
31193199 /*
31203200 * Make sure the guid data is available before setting
31213201 * dyn_guid_set.
....@@ -3190,7 +3270,6 @@
31903270 /* It's the one we want */
31913271 if (msg->msg.data[0] != 0) {
31923272 /* Got an error from the channel, just go on. */
3193
-
31943273 if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) {
31953274 /*
31963275 * If the MC does not support this
....@@ -3234,7 +3313,7 @@
32343313 if (rv) {
32353314 /* Got an error somehow, just give up. */
32363315 dev_warn(intf->si_dev,
3237
- PFX "Error sending channel information for channel %d: %d\n",
3316
+ "Error sending channel information for channel %d: %d\n",
32383317 intf->curr_channel, rv);
32393318
32403319 intf->channel_list = intf->wchannels + set;
....@@ -3274,6 +3353,7 @@
32743353 dev_warn(intf->si_dev,
32753354 "Error sending channel information for channel 0, %d\n",
32763355 rv);
3356
+ intf->null_user_handler = NULL;
32773357 return -EIO;
32783358 }
32793359
....@@ -3375,14 +3455,14 @@
33753455 intf->curr_seq = 0;
33763456 spin_lock_init(&intf->waiting_rcv_msgs_lock);
33773457 INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
3378
- tasklet_init(&intf->recv_tasklet,
3379
- smi_recv_tasklet,
3380
- (unsigned long) intf);
3458
+ tasklet_setup(&intf->recv_tasklet,
3459
+ smi_recv_tasklet);
33813460 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
33823461 spin_lock_init(&intf->xmit_msgs_lock);
33833462 INIT_LIST_HEAD(&intf->xmit_msgs);
33843463 INIT_LIST_HEAD(&intf->hp_xmit_msgs);
33853464 spin_lock_init(&intf->events_lock);
3465
+ spin_lock_init(&intf->watch_lock);
33863466 atomic_set(&intf->event_waiters, 0);
33873467 intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
33883468 INIT_LIST_HEAD(&intf->waiting_events);
....@@ -3398,7 +3478,8 @@
33983478 /* Look for a hole in the numbers. */
33993479 i = 0;
34003480 link = &ipmi_interfaces;
3401
- list_for_each_entry_rcu(tintf, &ipmi_interfaces, link) {
3481
+ list_for_each_entry_rcu(tintf, &ipmi_interfaces, link,
3482
+ ipmi_interfaces_mutex_held()) {
34023483 if (tintf->intf_num != i) {
34033484 link = &tintf->link;
34043485 break;
....@@ -3461,12 +3542,16 @@
34613542 struct ipmi_smi_msg *msg,
34623543 unsigned char err)
34633544 {
3545
+ int rv;
34643546 msg->rsp[0] = msg->data[0] | 4;
34653547 msg->rsp[1] = msg->data[1];
34663548 msg->rsp[2] = err;
34673549 msg->rsp_size = 3;
3468
- /* It's an error, so it will never requeue, no need to check return. */
3469
- handle_one_recv_msg(intf, msg);
3550
+
3551
+ /* This will never requeue, but it may ask us to free the message. */
3552
+ rv = handle_one_recv_msg(intf, msg);
3553
+ if (rv == 0)
3554
+ ipmi_free_smi_msg(msg);
34703555 }
34713556
34723557 static void cleanup_smi_msgs(struct ipmi_smi *intf)
....@@ -3671,7 +3756,7 @@
36713756 msg->data[10] = ipmb_checksum(&msg->data[6], 4);
36723757 msg->data_size = 11;
36733758
3674
- ipmi_debug_msg("Invalid command:", msg->data, msg->data_size);
3759
+ pr_debug("Invalid command: %*ph\n", msg->data_size, msg->data);
36753760
36763761 rcu_read_lock();
36773762 if (!intf->in_shutdown) {
....@@ -4107,7 +4192,7 @@
41074192 * message.
41084193 */
41094194 dev_warn(intf->si_dev,
4110
- PFX "Event queue full, discarding incoming events\n");
4195
+ "Event queue full, discarding incoming events\n");
41114196 intf->event_msg_printed = 1;
41124197 }
41134198
....@@ -4126,7 +4211,7 @@
41264211 recv_msg = (struct ipmi_recv_msg *) msg->user_data;
41274212 if (recv_msg == NULL) {
41284213 dev_warn(intf->si_dev,
4129
- "IPMI message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vender for assistance\n");
4214
+ "IPMI message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n");
41304215 return 0;
41314216 }
41324217
....@@ -4158,11 +4243,57 @@
41584243 int requeue;
41594244 int chan;
41604245
4161
- ipmi_debug_msg("Recv:", msg->rsp, msg->rsp_size);
4162
- if (msg->rsp_size < 2) {
4246
+ pr_debug("Recv: %*ph\n", msg->rsp_size, msg->rsp);
4247
+
4248
+ if ((msg->data_size >= 2)
4249
+ && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
4250
+ && (msg->data[1] == IPMI_SEND_MSG_CMD)
4251
+ && (msg->user_data == NULL)) {
4252
+
4253
+ if (intf->in_shutdown)
4254
+ goto free_msg;
4255
+
4256
+ /*
4257
+ * This is the local response to a command send, start
4258
+ * the timer for these. The user_data will not be
4259
+ * NULL if this is a response send, and we will let
4260
+ * response sends just go through.
4261
+ */
4262
+
4263
+ /*
4264
+ * Check for errors, if we get certain errors (ones
4265
+ * that mean basically we can try again later), we
4266
+ * ignore them and start the timer. Otherwise we
4267
+ * report the error immediately.
4268
+ */
4269
+ if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
4270
+ && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
4271
+ && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)
4272
+ && (msg->rsp[2] != IPMI_BUS_ERR)
4273
+ && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) {
4274
+ int ch = msg->rsp[3] & 0xf;
4275
+ struct ipmi_channel *chans;
4276
+
4277
+ /* Got an error sending the message, handle it. */
4278
+
4279
+ chans = READ_ONCE(intf->channel_list)->c;
4280
+ if ((chans[ch].medium == IPMI_CHANNEL_MEDIUM_8023LAN)
4281
+ || (chans[ch].medium == IPMI_CHANNEL_MEDIUM_ASYNC))
4282
+ ipmi_inc_stat(intf, sent_lan_command_errs);
4283
+ else
4284
+ ipmi_inc_stat(intf, sent_ipmb_command_errs);
4285
+ intf_err_seq(intf, msg->msgid, msg->rsp[2]);
4286
+ } else
4287
+ /* The message was sent, start the timer. */
4288
+ intf_start_seq_timer(intf, msg->msgid);
4289
+free_msg:
4290
+ requeue = 0;
4291
+ goto out;
4292
+
4293
+ } else if (msg->rsp_size < 2) {
41634294 /* Message is too small to be correct. */
41644295 dev_warn(intf->si_dev,
4165
- PFX "BMC returned to small a message for netfn %x cmd %x, got %d bytes\n",
4296
+ "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n",
41664297 (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
41674298
41684299 /* Generate an error response for the message. */
....@@ -4177,7 +4308,7 @@
41774308 * marginally correct.
41784309 */
41794310 dev_warn(intf->si_dev,
4180
- PFX "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n",
4311
+ "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n",
41814312 (msg->data[0] >> 2) | 1, msg->data[1],
41824313 msg->rsp[0] >> 2, msg->rsp[1]);
41834314
....@@ -4364,10 +4495,10 @@
43644495 }
43654496 }
43664497
4367
-static void smi_recv_tasklet(unsigned long val)
4498
+static void smi_recv_tasklet(struct tasklet_struct *t)
43684499 {
43694500 unsigned long flags = 0; /* keep us warning-free. */
4370
- struct ipmi_smi *intf = (struct ipmi_smi *) val;
4501
+ struct ipmi_smi *intf = from_tasklet(intf, t, recv_tasklet);
43714502 int run_to_completion = intf->run_to_completion;
43724503 struct ipmi_smi_msg *newmsg = NULL;
43734504
....@@ -4398,6 +4529,7 @@
43984529 intf->curr_msg = newmsg;
43994530 }
44004531 }
4532
+
44014533 if (!run_to_completion)
44024534 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
44034535 if (newmsg)
....@@ -4415,62 +4547,16 @@
44154547 unsigned long flags = 0; /* keep us warning-free. */
44164548 int run_to_completion = intf->run_to_completion;
44174549
4418
- if ((msg->data_size >= 2)
4419
- && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
4420
- && (msg->data[1] == IPMI_SEND_MSG_CMD)
4421
- && (msg->user_data == NULL)) {
4422
-
4423
- if (intf->in_shutdown)
4424
- goto free_msg;
4425
-
4426
- /*
4427
- * This is the local response to a command send, start
4428
- * the timer for these. The user_data will not be
4429
- * NULL if this is a response send, and we will let
4430
- * response sends just go through.
4431
- */
4432
-
4433
- /*
4434
- * Check for errors, if we get certain errors (ones
4435
- * that mean basically we can try again later), we
4436
- * ignore them and start the timer. Otherwise we
4437
- * report the error immediately.
4438
- */
4439
- if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
4440
- && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
4441
- && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)
4442
- && (msg->rsp[2] != IPMI_BUS_ERR)
4443
- && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) {
4444
- int ch = msg->rsp[3] & 0xf;
4445
- struct ipmi_channel *chans;
4446
-
4447
- /* Got an error sending the message, handle it. */
4448
-
4449
- chans = READ_ONCE(intf->channel_list)->c;
4450
- if ((chans[ch].medium == IPMI_CHANNEL_MEDIUM_8023LAN)
4451
- || (chans[ch].medium == IPMI_CHANNEL_MEDIUM_ASYNC))
4452
- ipmi_inc_stat(intf, sent_lan_command_errs);
4453
- else
4454
- ipmi_inc_stat(intf, sent_ipmb_command_errs);
4455
- intf_err_seq(intf, msg->msgid, msg->rsp[2]);
4456
- } else
4457
- /* The message was sent, start the timer. */
4458
- intf_start_seq_timer(intf, msg->msgid);
4459
-
4460
-free_msg:
4461
- ipmi_free_smi_msg(msg);
4462
- } else {
4463
- /*
4464
- * To preserve message order, we keep a queue and deliver from
4465
- * a tasklet.
4466
- */
4467
- if (!run_to_completion)
4468
- spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4469
- list_add_tail(&msg->link, &intf->waiting_rcv_msgs);
4470
- if (!run_to_completion)
4471
- spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
4472
- flags);
4473
- }
4550
+ /*
4551
+ * To preserve message order, we keep a queue and deliver from
4552
+ * a tasklet.
4553
+ */
4554
+ if (!run_to_completion)
4555
+ spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4556
+ list_add_tail(&msg->link, &intf->waiting_rcv_msgs);
4557
+ if (!run_to_completion)
4558
+ spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
4559
+ flags);
44744560
44754561 if (!run_to_completion)
44764562 spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
....@@ -4484,7 +4570,7 @@
44844570 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
44854571
44864572 if (run_to_completion)
4487
- smi_recv_tasklet((unsigned long) intf);
4573
+ smi_recv_tasklet(&intf->recv_tasklet);
44884574 else
44894575 tasklet_schedule(&intf->recv_tasklet);
44904576 }
....@@ -4516,7 +4602,7 @@
45164602 smi_msg->data_size = recv_msg->msg.data_len;
45174603 smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
45184604
4519
- ipmi_debug_msg("Resend: ", smi_msg->data, smi_msg->data_size);
4605
+ pr_debug("Resend: %*ph\n", smi_msg->data_size, smi_msg->data);
45204606
45214607 return smi_msg;
45224608 }
....@@ -4525,7 +4611,7 @@
45254611 struct list_head *timeouts,
45264612 unsigned long timeout_period,
45274613 int slot, unsigned long *flags,
4528
- unsigned int *waiting_msgs)
4614
+ bool *need_timer)
45294615 {
45304616 struct ipmi_recv_msg *msg;
45314617
....@@ -4537,13 +4623,14 @@
45374623
45384624 if (timeout_period < ent->timeout) {
45394625 ent->timeout -= timeout_period;
4540
- (*waiting_msgs)++;
4626
+ *need_timer = true;
45414627 return;
45424628 }
45434629
45444630 if (ent->retries_left == 0) {
45454631 /* The message has used all its retries. */
45464632 ent->inuse = 0;
4633
+ smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
45474634 msg = ent->recv_msg;
45484635 list_add_tail(&msg->link, timeouts);
45494636 if (ent->broadcast)
....@@ -4556,7 +4643,7 @@
45564643 struct ipmi_smi_msg *smi_msg;
45574644 /* More retries, send again. */
45584645
4559
- (*waiting_msgs)++;
4646
+ *need_timer = true;
45604647
45614648 /*
45624649 * Start with the max timer, set to normal timer after
....@@ -4601,20 +4688,20 @@
46014688 }
46024689 }
46034690
4604
-static unsigned int ipmi_timeout_handler(struct ipmi_smi *intf,
4605
- unsigned long timeout_period)
4691
+static bool ipmi_timeout_handler(struct ipmi_smi *intf,
4692
+ unsigned long timeout_period)
46064693 {
46074694 struct list_head timeouts;
46084695 struct ipmi_recv_msg *msg, *msg2;
46094696 unsigned long flags;
46104697 int i;
4611
- unsigned int waiting_msgs = 0;
4698
+ bool need_timer = false;
46124699
46134700 if (!intf->bmc_registered) {
46144701 kref_get(&intf->refcount);
46154702 if (!schedule_work(&intf->bmc_reg_work)) {
46164703 kref_put(&intf->refcount, intf_free);
4617
- waiting_msgs++;
4704
+ need_timer = true;
46184705 }
46194706 }
46204707
....@@ -4634,7 +4721,7 @@
46344721 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
46354722 check_msg_timeout(intf, &intf->seq_table[i],
46364723 &timeouts, timeout_period, i,
4637
- &flags, &waiting_msgs);
4724
+ &flags, &need_timer);
46384725 spin_unlock_irqrestore(&intf->seq_lock, flags);
46394726
46404727 list_for_each_entry_safe(msg, msg2, &timeouts, link)
....@@ -4665,7 +4752,7 @@
46654752
46664753 tasklet_schedule(&intf->recv_tasklet);
46674754
4668
- return waiting_msgs;
4755
+ return need_timer;
46694756 }
46704757
46714758 static void ipmi_request_event(struct ipmi_smi *intf)
....@@ -4685,37 +4772,28 @@
46854772 static void ipmi_timeout(struct timer_list *unused)
46864773 {
46874774 struct ipmi_smi *intf;
4688
- int nt = 0, index;
4775
+ bool need_timer = false;
4776
+ int index;
46894777
46904778 if (atomic_read(&stop_operation))
46914779 return;
46924780
46934781 index = srcu_read_lock(&ipmi_interfaces_srcu);
46944782 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
4695
- int lnt = 0;
4696
-
46974783 if (atomic_read(&intf->event_waiters)) {
46984784 intf->ticks_to_req_ev--;
46994785 if (intf->ticks_to_req_ev == 0) {
47004786 ipmi_request_event(intf);
47014787 intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
47024788 }
4703
- lnt++;
4789
+ need_timer = true;
47044790 }
47054791
4706
- lnt += ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME);
4707
-
4708
- lnt = !!lnt;
4709
- if (lnt != intf->last_needs_timer &&
4710
- intf->handlers->set_need_watch)
4711
- intf->handlers->set_need_watch(intf->send_info, lnt);
4712
- intf->last_needs_timer = lnt;
4713
-
4714
- nt += lnt;
4792
+ need_timer |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME);
47154793 }
47164794 srcu_read_unlock(&ipmi_interfaces_srcu, index);
47174795
4718
- if (nt)
4796
+ if (need_timer)
47194797 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
47204798 }
47214799
....@@ -4732,7 +4810,9 @@
47324810 static void free_smi_msg(struct ipmi_smi_msg *msg)
47334811 {
47344812 atomic_dec(&smi_msg_inuse_count);
4735
- kfree(msg);
4813
+ /* Try to keep as much stuff out of the panic path as possible. */
4814
+ if (!oops_in_progress)
4815
+ kfree(msg);
47364816 }
47374817
47384818 struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
....@@ -4751,7 +4831,9 @@
47514831 static void free_recv_msg(struct ipmi_recv_msg *msg)
47524832 {
47534833 atomic_dec(&recv_msg_inuse_count);
4754
- kfree(msg);
4834
+ /* Try to keep as much stuff out of the panic path as possible. */
4835
+ if (!oops_in_progress)
4836
+ kfree(msg);
47554837 }
47564838
47574839 static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
....@@ -4769,7 +4851,7 @@
47694851
47704852 void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
47714853 {
4772
- if (msg->user)
4854
+ if (msg->user && !oops_in_progress)
47734855 kref_put(&msg->user->refcount, free_user);
47744856 msg->done(msg);
47754857 }
....@@ -5144,7 +5226,7 @@
51445226 * avoids problems with race conditions removing the timer
51455227 * here.
51465228 */
5147
- atomic_inc(&stop_operation);
5229
+ atomic_set(&stop_operation, 1);
51485230 del_timer_sync(&ipmi_timer);
51495231
51505232 initialized = false;
....@@ -5152,10 +5234,11 @@
51525234 /* Check for buffer leaks. */
51535235 count = atomic_read(&smi_msg_inuse_count);
51545236 if (count != 0)
5155
- pr_warn(PFX "SMI message count %d at exit\n", count);
5237
+ pr_warn("SMI message count %d at exit\n", count);
51565238 count = atomic_read(&recv_msg_inuse_count);
51575239 if (count != 0)
5158
- pr_warn(PFX "recv message count %d at exit\n", count);
5240
+ pr_warn("recv message count %d at exit\n", count);
5241
+
51595242 cleanup_srcu_struct(&ipmi_interfaces_srcu);
51605243 }
51615244 if (drvregistered)