From b22da3d8526a935aa31e086e63f60ff3246cb61c Mon Sep 17 00:00:00 2001 From: hc <hc@nodka.com> Date: Sat, 09 Dec 2023 07:24:11 +0000 Subject: [PATCH] add stmac read mac form eeprom --- kernel/drivers/char/ipmi/ipmi_msghandler.c | 443 ++++++++++++++++++++++++++++++++---------------------- 1 files changed, 260 insertions(+), 183 deletions(-) diff --git a/kernel/drivers/char/ipmi/ipmi_msghandler.c b/kernel/drivers/char/ipmi/ipmi_msghandler.c index 4cf3ef4..05e7339 100644 --- a/kernel/drivers/char/ipmi/ipmi_msghandler.c +++ b/kernel/drivers/char/ipmi/ipmi_msghandler.c @@ -11,6 +11,9 @@ * Copyright 2002 MontaVista Software Inc. */ +#define pr_fmt(fmt) "IPMI message handler: " fmt +#define dev_fmt(fmt) pr_fmt(fmt) + #include <linux/module.h> #include <linux/errno.h> #include <linux/poll.h> @@ -31,37 +34,17 @@ #include <linux/uuid.h> #include <linux/nospec.h> #include <linux/vmalloc.h> - -#define PFX "IPMI message handler: " +#include <linux/delay.h> #define IPMI_DRIVER_VERSION "39.2" static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void); static int ipmi_init_msghandler(void); -static void smi_recv_tasklet(unsigned long); +static void smi_recv_tasklet(struct tasklet_struct *t); static void handle_new_recv_msgs(struct ipmi_smi *intf); static void need_waiter(struct ipmi_smi *intf); static int handle_one_recv_msg(struct ipmi_smi *intf, struct ipmi_smi_msg *msg); - -#ifdef DEBUG -static void ipmi_debug_msg(const char *title, unsigned char *data, - unsigned int len) -{ - int i, pos; - char buf[100]; - - pos = snprintf(buf, sizeof(buf), "%s: ", title); - for (i = 0; i < len; i++) - pos += snprintf(buf + pos, sizeof(buf) - pos, - " %2.2x", data[i]); - pr_debug("%s\n", buf); -} -#else -static void ipmi_debug_msg(const char *title, unsigned char *data, - unsigned int len) -{ } -#endif static bool initialized; static bool drvregistered; @@ -78,6 +61,7 @@ #else #define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_NONE #endif + static enum ipmi_panic_event_op ipmi_send_panic_event = IPMI_PANIC_DEFAULT; static int panic_op_write_handler(const char *val, @@ -107,19 +91,19 @@ { switch (ipmi_send_panic_event) { case IPMI_SEND_PANIC_EVENT_NONE: - strcpy(buffer, "none"); + strcpy(buffer, "none\n"); break; case IPMI_SEND_PANIC_EVENT: - strcpy(buffer, "event"); + strcpy(buffer, "event\n"); break; case IPMI_SEND_PANIC_EVENT_STRING: - strcpy(buffer, "string"); + strcpy(buffer, "string\n"); break; default: - strcpy(buffer, "???"); + strcpy(buffer, "???\n"); break; } @@ -337,6 +321,7 @@ int dyn_guid_set; struct kref usecount; struct work_struct remove_work; + unsigned char cc; /* completion code */ }; #define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev) @@ -536,9 +521,27 @@ unsigned int waiting_events_count; /* How many events in queue? */ char delivering_events; char event_msg_printed; + + /* How many users are waiting for events? */ atomic_t event_waiters; unsigned int ticks_to_req_ev; - int last_needs_timer; + + spinlock_t watch_lock; /* For dealing with watch stuff below. */ + + /* How many users are waiting for commands? */ + unsigned int command_waiters; + + /* How many users are waiting for watchdogs? */ + unsigned int watchdog_waiters; + + /* How many users are waiting for message responses? */ + unsigned int response_waiters; + + /* + * Tells what the lower layer has last been asked to watch for, + * messages and/or watchdogs. Protected by watch_lock. + */ + unsigned int last_watch_mask; /* * The event receiver for my BMC, only really used at panic @@ -621,7 +624,9 @@ static LIST_HEAD(ipmi_interfaces); static DEFINE_MUTEX(ipmi_interfaces_mutex); -struct srcu_struct ipmi_interfaces_srcu; +#define ipmi_interfaces_mutex_held() \ + lockdep_is_held(&ipmi_interfaces_mutex) +static struct srcu_struct ipmi_interfaces_srcu; /* * List of watchers that want to know when smi's are added and deleted. @@ -890,12 +895,14 @@ rv = -EINVAL; } ipmi_free_recv_msg(msg); - } else if (!oops_in_progress) { + } else if (oops_in_progress) { /* * If we are running in the panic context, calling the * receive handler doesn't much meaning and has a deadlock * risk. At this moment, simply skip it in that case. */ + ipmi_free_recv_msg(msg); + } else { int index; struct ipmi_user *user = acquire_ipmi_user(msg->user, &index); @@ -930,6 +937,64 @@ msg->msg.data_len = 1; msg->msg.data = msg->msg_data; deliver_local_response(intf, msg); +} + +static void smi_add_watch(struct ipmi_smi *intf, unsigned int flags) +{ + unsigned long iflags; + + if (!intf->handlers->set_need_watch) + return; + + spin_lock_irqsave(&intf->watch_lock, iflags); + if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES) + intf->response_waiters++; + + if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG) + intf->watchdog_waiters++; + + if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS) + intf->command_waiters++; + + if ((intf->last_watch_mask & flags) != flags) { + intf->last_watch_mask |= flags; + intf->handlers->set_need_watch(intf->send_info, + intf->last_watch_mask); + } + spin_unlock_irqrestore(&intf->watch_lock, iflags); +} + +static void smi_remove_watch(struct ipmi_smi *intf, unsigned int flags) +{ + unsigned long iflags; + + if (!intf->handlers->set_need_watch) + return; + + spin_lock_irqsave(&intf->watch_lock, iflags); + if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES) + intf->response_waiters--; + + if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG) + intf->watchdog_waiters--; + + if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS) + intf->command_waiters--; + + flags = 0; + if (intf->response_waiters) + flags |= IPMI_WATCH_MASK_CHECK_MESSAGES; + if (intf->watchdog_waiters) + flags |= IPMI_WATCH_MASK_CHECK_WATCHDOG; + if (intf->command_waiters) + flags |= IPMI_WATCH_MASK_CHECK_COMMANDS; + + if (intf->last_watch_mask != flags) { + intf->last_watch_mask = flags; + intf->handlers->set_need_watch(intf->send_info, + intf->last_watch_mask); + } + spin_unlock_irqrestore(&intf->watch_lock, iflags); } /* @@ -975,6 +1040,7 @@ *seq = i; *seqid = intf->seq_table[i].seqid; intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ; + smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); need_waiter(intf); } else { rv = -EAGAIN; @@ -1013,6 +1079,7 @@ && (ipmi_addr_equal(addr, &msg->addr))) { *recv_msg = msg; intf->seq_table[seq].inuse = 0; + smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); rv = 0; } } @@ -1074,6 +1141,7 @@ struct seq_table *ent = &intf->seq_table[seq]; ent->inuse = 0; + smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); msg = ent->recv_msg; rv = 0; } @@ -1084,7 +1152,6 @@ return rv; } - static void free_user_work(struct work_struct *work) { @@ -1162,11 +1229,9 @@ spin_lock_irqsave(&intf->seq_lock, flags); list_add_rcu(&new_user->link, &intf->users); spin_unlock_irqrestore(&intf->seq_lock, flags); - if (handler->ipmi_watchdog_pretimeout) { + if (handler->ipmi_watchdog_pretimeout) /* User wants pretimeouts, so make sure to watch for them. */ - if (atomic_inc_return(&intf->event_waiters) == 1) - need_waiter(intf); - } + smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG); srcu_read_unlock(&ipmi_interfaces_srcu, index); *user = new_user; return 0; @@ -1238,7 +1303,7 @@ user->handler->shutdown(user->handler_data); if (user->handler->ipmi_watchdog_pretimeout) - atomic_dec(&intf->event_waiters); + smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG); if (user->gets_events) atomic_dec(&intf->event_waiters); @@ -1251,6 +1316,7 @@ if (intf->seq_table[i].inuse && (intf->seq_table[i].recv_msg->user == user)) { intf->seq_table[i].inuse = 0; + smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); ipmi_free_recv_msg(intf->seq_table[i].recv_msg); } } @@ -1263,7 +1329,8 @@ * synchronize_srcu()) then free everything in that list. */ mutex_lock(&intf->cmd_rcvrs_mutex); - list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) { + list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link, + lockdep_is_held(&intf->cmd_rcvrs_mutex)) { if (rcvr->user == user) { list_del_rcu(&rcvr->link); rcvr->next = rcvrs; @@ -1509,8 +1576,7 @@ list_move_tail(&msg->link, &msgs); intf->waiting_events_count = 0; if (intf->event_msg_printed) { - dev_warn(intf->si_dev, - PFX "Event queue no longer full\n"); + dev_warn(intf->si_dev, "Event queue no longer full\n"); intf->event_msg_printed = 0; } @@ -1542,7 +1608,8 @@ { struct cmd_rcvr *rcvr; - list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) { + list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link, + lockdep_is_held(&intf->cmd_rcvrs_mutex)) { if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd) && (rcvr->chans & (1 << chan))) return rcvr; @@ -1557,7 +1624,8 @@ { struct cmd_rcvr *rcvr; - list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) { + list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link, + lockdep_is_held(&intf->cmd_rcvrs_mutex)) { if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd) && (rcvr->chans & chans)) return 0; @@ -1595,8 +1663,7 @@ goto out_unlock; } - if (atomic_inc_return(&intf->event_waiters) == 1) - need_waiter(intf); + smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS); list_add_rcu(&rcvr->link, &intf->cmd_rcvrs); @@ -1646,7 +1713,7 @@ synchronize_rcu(); release_ipmi_user(user, index); while (rcvrs) { - atomic_dec(&intf->event_waiters); + smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS); rcvr = rcvrs; rcvrs = rcvr->next; kfree(rcvr); @@ -1763,22 +1830,19 @@ return smi_msg; } - static void smi_send(struct ipmi_smi *intf, const struct ipmi_smi_handlers *handlers, struct ipmi_smi_msg *smi_msg, int priority) { int run_to_completion = intf->run_to_completion; + unsigned long flags = 0; - if (run_to_completion) { - smi_msg = smi_add_send_msg(intf, smi_msg, priority); - } else { - unsigned long flags; - + if (!run_to_completion) spin_lock_irqsave(&intf->xmit_msgs_lock, flags); - smi_msg = smi_add_send_msg(intf, smi_msg, priority); + smi_msg = smi_add_send_msg(intf, smi_msg, priority); + + if (!run_to_completion) spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); - } if (smi_msg) handlers->sender(intf->send_info, smi_msg); @@ -2158,7 +2222,8 @@ else { smi_msg = ipmi_alloc_smi_msg(); if (smi_msg == NULL) { - ipmi_free_recv_msg(recv_msg); + if (!supplied_recv) + ipmi_free_recv_msg(recv_msg); rv = -ENOMEM; goto out; } @@ -2202,7 +2267,7 @@ ipmi_free_smi_msg(smi_msg); ipmi_free_recv_msg(recv_msg); } else { - ipmi_debug_msg("Send", smi_msg->data, smi_msg->data_size); + pr_debug("Send: %*ph\n", smi_msg->data_size, smi_msg->data); smi_send(intf, intf->handlers, smi_msg, priority); } @@ -2312,16 +2377,17 @@ || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE) || (msg->msg.cmd != IPMI_GET_DEVICE_ID_CMD)) { dev_warn(intf->si_dev, - PFX "invalid device_id msg: addr_type=%d netfn=%x cmd=%x\n", - msg->addr.addr_type, msg->msg.netfn, msg->msg.cmd); + "invalid device_id msg: addr_type=%d netfn=%x cmd=%x\n", + msg->addr.addr_type, msg->msg.netfn, msg->msg.cmd); return; } rv = ipmi_demangle_device_id(msg->msg.netfn, msg->msg.cmd, msg->msg.data, msg->msg.data_len, &intf->bmc->fetch_id); if (rv) { - dev_warn(intf->si_dev, - PFX "device id demangle failed: %d\n", rv); + dev_warn(intf->si_dev, "device id demangle failed: %d\n", rv); + /* record completion code when error */ + intf->bmc->cc = msg->msg.data[0]; intf->bmc->dyn_id_set = 0; } else { /* @@ -2367,23 +2433,39 @@ static int __get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc) { int rv; - - bmc->dyn_id_set = 2; + unsigned int retry_count = 0; intf->null_user_handler = bmc_device_id_handler; +retry: + bmc->cc = 0; + bmc->dyn_id_set = 2; + rv = send_get_device_id_cmd(intf); if (rv) - return rv; + goto out_reset_handler; wait_event(intf->waitq, bmc->dyn_id_set != 2); - if (!bmc->dyn_id_set) + if (!bmc->dyn_id_set) { + if ((bmc->cc == IPMI_DEVICE_IN_FW_UPDATE_ERR + || bmc->cc == IPMI_DEVICE_IN_INIT_ERR + || bmc->cc == IPMI_NOT_IN_MY_STATE_ERR) + && ++retry_count <= GET_DEVICE_ID_MAX_RETRY) { + msleep(500); + dev_warn(intf->si_dev, + "BMC returned 0x%2.2x, retry get bmc device id\n", + bmc->cc); + goto retry; + } + rv = -EIO; /* Something went wrong in the fetch. */ + } /* dyn_id_set makes the id data available. */ smp_rmb(); +out_reset_handler: intf->null_user_handler = NULL; return rv; @@ -2703,7 +2785,7 @@ if (!guid_set) return -ENOENT; - return snprintf(buf, 38, "%pUl\n", guid.b); + return snprintf(buf, UUID_STRING_LEN + 1 + 1, "%pUl\n", &guid); } static DEVICE_ATTR_RO(guid); @@ -2758,9 +2840,9 @@ .groups = bmc_dev_attr_groups, }; -static int __find_bmc_guid(struct device *dev, void *data) +static int __find_bmc_guid(struct device *dev, const void *data) { - guid_t *guid = data; + const guid_t *guid = data; struct bmc_device *bmc; int rv; @@ -2796,9 +2878,9 @@ unsigned char device_id; }; -static int __find_bmc_prod_dev_id(struct device *dev, void *data) +static int __find_bmc_prod_dev_id(struct device *dev, const void *data) { - struct prod_dev_id *cid = data; + const struct prod_dev_id *cid = data; struct bmc_device *bmc; int rv; @@ -2944,8 +3026,7 @@ mutex_unlock(&bmc->dyn_mutex); dev_info(intf->si_dev, - "ipmi: interfacing existing BMC (man_id: 0x%6.6x," - " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n", + "interfacing existing BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n", bmc->id.manufacturer_id, bmc->id.product_id, bmc->id.device_id); @@ -2987,7 +3068,7 @@ rv = platform_device_register(&bmc->pdev); if (rv) { dev_err(intf->si_dev, - PFX " Unable to register bmc device: %d\n", + "Unable to register bmc device: %d\n", rv); goto out_list_del; } @@ -3005,8 +3086,7 @@ */ rv = sysfs_create_link(&intf->si_dev->kobj, &bmc->pdev.dev.kobj, "bmc"); if (rv) { - dev_err(intf->si_dev, - PFX "Unable to create bmc symlink: %d\n", rv); + dev_err(intf->si_dev, "Unable to create bmc symlink: %d\n", rv); goto out_put_bmc; } @@ -3015,18 +3095,16 @@ intf->my_dev_name = kasprintf(GFP_KERNEL, "ipmi%d", intf_num); if (!intf->my_dev_name) { rv = -ENOMEM; - dev_err(intf->si_dev, - PFX "Unable to allocate link from BMC: %d\n", rv); + dev_err(intf->si_dev, "Unable to allocate link from BMC: %d\n", + rv); goto out_unlink1; } rv = sysfs_create_link(&bmc->pdev.dev.kobj, &intf->si_dev->kobj, intf->my_dev_name); if (rv) { - kfree(intf->my_dev_name); - intf->my_dev_name = NULL; - dev_err(intf->si_dev, - PFX "Unable to create symlink to bmc: %d\n", rv); + dev_err(intf->si_dev, "Unable to create symlink to bmc: %d\n", + rv); goto out_free_my_dev_name; } @@ -3107,15 +3185,15 @@ goto out; } - if (msg->msg.data_len < 17) { + if (msg->msg.data_len < UUID_SIZE + 1) { bmc->dyn_guid_set = 0; dev_warn(intf->si_dev, - PFX "The GUID response from the BMC was too short, it was %d but should have been 17. Assuming GUID is not available.\n", - msg->msg.data_len); + "The GUID response from the BMC was too short, it was %d but should have been %d. Assuming GUID is not available.\n", + msg->msg.data_len, UUID_SIZE + 1); goto out; } - memcpy(bmc->fetch_guid.b, msg->msg.data + 1, 16); + import_guid(&bmc->fetch_guid, msg->msg.data + 1); /* * Make sure the guid data is available before setting * dyn_guid_set. @@ -3190,7 +3268,6 @@ /* It's the one we want */ if (msg->msg.data[0] != 0) { /* Got an error from the channel, just go on. */ - if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) { /* * If the MC does not support this @@ -3234,7 +3311,7 @@ if (rv) { /* Got an error somehow, just give up. */ dev_warn(intf->si_dev, - PFX "Error sending channel information for channel %d: %d\n", + "Error sending channel information for channel %d: %d\n", intf->curr_channel, rv); intf->channel_list = intf->wchannels + set; @@ -3274,6 +3351,7 @@ dev_warn(intf->si_dev, "Error sending channel information for channel 0, %d\n", rv); + intf->null_user_handler = NULL; return -EIO; } @@ -3375,14 +3453,14 @@ intf->curr_seq = 0; spin_lock_init(&intf->waiting_rcv_msgs_lock); INIT_LIST_HEAD(&intf->waiting_rcv_msgs); - tasklet_init(&intf->recv_tasklet, - smi_recv_tasklet, - (unsigned long) intf); + tasklet_setup(&intf->recv_tasklet, + smi_recv_tasklet); atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0); spin_lock_init(&intf->xmit_msgs_lock); INIT_LIST_HEAD(&intf->xmit_msgs); INIT_LIST_HEAD(&intf->hp_xmit_msgs); spin_lock_init(&intf->events_lock); + spin_lock_init(&intf->watch_lock); atomic_set(&intf->event_waiters, 0); intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME; INIT_LIST_HEAD(&intf->waiting_events); @@ -3398,7 +3476,8 @@ /* Look for a hole in the numbers. */ i = 0; link = &ipmi_interfaces; - list_for_each_entry_rcu(tintf, &ipmi_interfaces, link) { + list_for_each_entry_rcu(tintf, &ipmi_interfaces, link, + ipmi_interfaces_mutex_held()) { if (tintf->intf_num != i) { link = &tintf->link; break; @@ -3671,7 +3750,7 @@ msg->data[10] = ipmb_checksum(&msg->data[6], 4); msg->data_size = 11; - ipmi_debug_msg("Invalid command:", msg->data, msg->data_size); + pr_debug("Invalid command: %*ph\n", msg->data_size, msg->data); rcu_read_lock(); if (!intf->in_shutdown) { @@ -4107,7 +4186,7 @@ * message. */ dev_warn(intf->si_dev, - PFX "Event queue full, discarding incoming events\n"); + "Event queue full, discarding incoming events\n"); intf->event_msg_printed = 1; } @@ -4126,7 +4205,7 @@ recv_msg = (struct ipmi_recv_msg *) msg->user_data; if (recv_msg == NULL) { dev_warn(intf->si_dev, - "IPMI message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vender for assistance\n"); + "IPMI message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n"); return 0; } @@ -4158,11 +4237,57 @@ int requeue; int chan; - ipmi_debug_msg("Recv:", msg->rsp, msg->rsp_size); - if (msg->rsp_size < 2) { + pr_debug("Recv: %*ph\n", msg->rsp_size, msg->rsp); + + if ((msg->data_size >= 2) + && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2)) + && (msg->data[1] == IPMI_SEND_MSG_CMD) + && (msg->user_data == NULL)) { + + if (intf->in_shutdown) + goto free_msg; + + /* + * This is the local response to a command send, start + * the timer for these. The user_data will not be + * NULL if this is a response send, and we will let + * response sends just go through. + */ + + /* + * Check for errors, if we get certain errors (ones + * that mean basically we can try again later), we + * ignore them and start the timer. Otherwise we + * report the error immediately. + */ + if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0) + && (msg->rsp[2] != IPMI_NODE_BUSY_ERR) + && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR) + && (msg->rsp[2] != IPMI_BUS_ERR) + && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) { + int ch = msg->rsp[3] & 0xf; + struct ipmi_channel *chans; + + /* Got an error sending the message, handle it. */ + + chans = READ_ONCE(intf->channel_list)->c; + if ((chans[ch].medium == IPMI_CHANNEL_MEDIUM_8023LAN) + || (chans[ch].medium == IPMI_CHANNEL_MEDIUM_ASYNC)) + ipmi_inc_stat(intf, sent_lan_command_errs); + else + ipmi_inc_stat(intf, sent_ipmb_command_errs); + intf_err_seq(intf, msg->msgid, msg->rsp[2]); + } else + /* The message was sent, start the timer. */ + intf_start_seq_timer(intf, msg->msgid); +free_msg: + requeue = 0; + goto out; + + } else if (msg->rsp_size < 2) { /* Message is too small to be correct. */ dev_warn(intf->si_dev, - PFX "BMC returned to small a message for netfn %x cmd %x, got %d bytes\n", + "BMC returned too small a message for netfn %x cmd %x, got %d bytes\n", (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size); /* Generate an error response for the message. */ @@ -4177,7 +4302,7 @@ * marginally correct. */ dev_warn(intf->si_dev, - PFX "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n", + "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n", (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp[0] >> 2, msg->rsp[1]); @@ -4364,10 +4489,10 @@ } } -static void smi_recv_tasklet(unsigned long val) +static void smi_recv_tasklet(struct tasklet_struct *t) { unsigned long flags = 0; /* keep us warning-free. */ - struct ipmi_smi *intf = (struct ipmi_smi *) val; + struct ipmi_smi *intf = from_tasklet(intf, t, recv_tasklet); int run_to_completion = intf->run_to_completion; struct ipmi_smi_msg *newmsg = NULL; @@ -4398,6 +4523,7 @@ intf->curr_msg = newmsg; } } + if (!run_to_completion) spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); if (newmsg) @@ -4415,62 +4541,16 @@ unsigned long flags = 0; /* keep us warning-free. */ int run_to_completion = intf->run_to_completion; - if ((msg->data_size >= 2) - && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2)) - && (msg->data[1] == IPMI_SEND_MSG_CMD) - && (msg->user_data == NULL)) { - - if (intf->in_shutdown) - goto free_msg; - - /* - * This is the local response to a command send, start - * the timer for these. The user_data will not be - * NULL if this is a response send, and we will let - * response sends just go through. - */ - - /* - * Check for errors, if we get certain errors (ones - * that mean basically we can try again later), we - * ignore them and start the timer. Otherwise we - * report the error immediately. - */ - if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0) - && (msg->rsp[2] != IPMI_NODE_BUSY_ERR) - && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR) - && (msg->rsp[2] != IPMI_BUS_ERR) - && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) { - int ch = msg->rsp[3] & 0xf; - struct ipmi_channel *chans; - - /* Got an error sending the message, handle it. */ - - chans = READ_ONCE(intf->channel_list)->c; - if ((chans[ch].medium == IPMI_CHANNEL_MEDIUM_8023LAN) - || (chans[ch].medium == IPMI_CHANNEL_MEDIUM_ASYNC)) - ipmi_inc_stat(intf, sent_lan_command_errs); - else - ipmi_inc_stat(intf, sent_ipmb_command_errs); - intf_err_seq(intf, msg->msgid, msg->rsp[2]); - } else - /* The message was sent, start the timer. */ - intf_start_seq_timer(intf, msg->msgid); - -free_msg: - ipmi_free_smi_msg(msg); - } else { - /* - * To preserve message order, we keep a queue and deliver from - * a tasklet. - */ - if (!run_to_completion) - spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); - list_add_tail(&msg->link, &intf->waiting_rcv_msgs); - if (!run_to_completion) - spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, - flags); - } + /* + * To preserve message order, we keep a queue and deliver from + * a tasklet. + */ + if (!run_to_completion) + spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); + list_add_tail(&msg->link, &intf->waiting_rcv_msgs); + if (!run_to_completion) + spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, + flags); if (!run_to_completion) spin_lock_irqsave(&intf->xmit_msgs_lock, flags); @@ -4484,7 +4564,7 @@ spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); if (run_to_completion) - smi_recv_tasklet((unsigned long) intf); + smi_recv_tasklet(&intf->recv_tasklet); else tasklet_schedule(&intf->recv_tasklet); } @@ -4516,7 +4596,7 @@ smi_msg->data_size = recv_msg->msg.data_len; smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid); - ipmi_debug_msg("Resend: ", smi_msg->data, smi_msg->data_size); + pr_debug("Resend: %*ph\n", smi_msg->data_size, smi_msg->data); return smi_msg; } @@ -4525,7 +4605,7 @@ struct list_head *timeouts, unsigned long timeout_period, int slot, unsigned long *flags, - unsigned int *waiting_msgs) + bool *need_timer) { struct ipmi_recv_msg *msg; @@ -4537,13 +4617,14 @@ if (timeout_period < ent->timeout) { ent->timeout -= timeout_period; - (*waiting_msgs)++; + *need_timer = true; return; } if (ent->retries_left == 0) { /* The message has used all its retries. */ ent->inuse = 0; + smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES); msg = ent->recv_msg; list_add_tail(&msg->link, timeouts); if (ent->broadcast) @@ -4556,7 +4637,7 @@ struct ipmi_smi_msg *smi_msg; /* More retries, send again. */ - (*waiting_msgs)++; + *need_timer = true; /* * Start with the max timer, set to normal timer after @@ -4601,20 +4682,20 @@ } } -static unsigned int ipmi_timeout_handler(struct ipmi_smi *intf, - unsigned long timeout_period) +static bool ipmi_timeout_handler(struct ipmi_smi *intf, + unsigned long timeout_period) { struct list_head timeouts; struct ipmi_recv_msg *msg, *msg2; unsigned long flags; int i; - unsigned int waiting_msgs = 0; + bool need_timer = false; if (!intf->bmc_registered) { kref_get(&intf->refcount); if (!schedule_work(&intf->bmc_reg_work)) { kref_put(&intf->refcount, intf_free); - waiting_msgs++; + need_timer = true; } } @@ -4634,7 +4715,7 @@ for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) check_msg_timeout(intf, &intf->seq_table[i], &timeouts, timeout_period, i, - &flags, &waiting_msgs); + &flags, &need_timer); spin_unlock_irqrestore(&intf->seq_lock, flags); list_for_each_entry_safe(msg, msg2, &timeouts, link) @@ -4665,7 +4746,7 @@ tasklet_schedule(&intf->recv_tasklet); - return waiting_msgs; + return need_timer; } static void ipmi_request_event(struct ipmi_smi *intf) @@ -4685,37 +4766,28 @@ static void ipmi_timeout(struct timer_list *unused) { struct ipmi_smi *intf; - int nt = 0, index; + bool need_timer = false; + int index; if (atomic_read(&stop_operation)) return; index = srcu_read_lock(&ipmi_interfaces_srcu); list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { - int lnt = 0; - if (atomic_read(&intf->event_waiters)) { intf->ticks_to_req_ev--; if (intf->ticks_to_req_ev == 0) { ipmi_request_event(intf); intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME; } - lnt++; + need_timer = true; } - lnt += ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME); - - lnt = !!lnt; - if (lnt != intf->last_needs_timer && - intf->handlers->set_need_watch) - intf->handlers->set_need_watch(intf->send_info, lnt); - intf->last_needs_timer = lnt; - - nt += lnt; + need_timer |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME); } srcu_read_unlock(&ipmi_interfaces_srcu, index); - if (nt) + if (need_timer) mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); } @@ -4732,7 +4804,9 @@ static void free_smi_msg(struct ipmi_smi_msg *msg) { atomic_dec(&smi_msg_inuse_count); - kfree(msg); + /* Try to keep as much stuff out of the panic path as possible. */ + if (!oops_in_progress) + kfree(msg); } struct ipmi_smi_msg *ipmi_alloc_smi_msg(void) @@ -4751,7 +4825,9 @@ static void free_recv_msg(struct ipmi_recv_msg *msg) { atomic_dec(&recv_msg_inuse_count); - kfree(msg); + /* Try to keep as much stuff out of the panic path as possible. */ + if (!oops_in_progress) + kfree(msg); } static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void) @@ -4769,7 +4845,7 @@ void ipmi_free_recv_msg(struct ipmi_recv_msg *msg) { - if (msg->user) + if (msg->user && !oops_in_progress) kref_put(&msg->user->refcount, free_user); msg->done(msg); } @@ -5144,7 +5220,7 @@ * avoids problems with race conditions removing the timer * here. */ - atomic_inc(&stop_operation); + atomic_set(&stop_operation, 1); del_timer_sync(&ipmi_timer); initialized = false; @@ -5152,10 +5228,11 @@ /* Check for buffer leaks. */ count = atomic_read(&smi_msg_inuse_count); if (count != 0) - pr_warn(PFX "SMI message count %d at exit\n", count); + pr_warn("SMI message count %d at exit\n", count); count = atomic_read(&recv_msg_inuse_count); if (count != 0) - pr_warn(PFX "recv message count %d at exit\n", count); + pr_warn("recv message count %d at exit\n", count); + cleanup_srcu_struct(&ipmi_interfaces_srcu); } if (drvregistered) -- Gitblit v1.6.2