Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * ipmi_msghandler.c
4 *
5 * Incoming and outgoing message routing for an IPMI interface.
6 *
7 * Author: MontaVista Software, Inc.
8 * Corey Minyard <minyard@mvista.com>
9 * source@mvista.com
10 *
11 * Copyright 2002 MontaVista Software Inc.
12 */
13
14#include <linux/module.h>
15#include <linux/errno.h>
16#include <linux/poll.h>
17#include <linux/sched.h>
18#include <linux/seq_file.h>
19#include <linux/spinlock.h>
20#include <linux/mutex.h>
21#include <linux/slab.h>
22#include <linux/ipmi.h>
23#include <linux/ipmi_smi.h>
24#include <linux/notifier.h>
25#include <linux/init.h>
26#include <linux/proc_fs.h>
27#include <linux/rcupdate.h>
28#include <linux/interrupt.h>
29#include <linux/moduleparam.h>
30#include <linux/workqueue.h>
31#include <linux/uuid.h>
32
33#define PFX "IPMI message handler: "
34
35#define IPMI_DRIVER_VERSION "39.2"
36
37static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
38static int ipmi_init_msghandler(void);
39static void smi_recv_tasklet(unsigned long);
40static void handle_new_recv_msgs(ipmi_smi_t intf);
41static void need_waiter(ipmi_smi_t intf);
42static int handle_one_recv_msg(ipmi_smi_t intf,
43 struct ipmi_smi_msg *msg);
44
45static int initialized;
46
47enum ipmi_panic_event_op {
48 IPMI_SEND_PANIC_EVENT_NONE,
49 IPMI_SEND_PANIC_EVENT,
50 IPMI_SEND_PANIC_EVENT_STRING
51};
52#ifdef CONFIG_IPMI_PANIC_STRING
53#define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_STRING
54#elif defined(CONFIG_IPMI_PANIC_EVENT)
55#define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT
56#else
57#define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_NONE
58#endif
59static enum ipmi_panic_event_op ipmi_send_panic_event = IPMI_PANIC_DEFAULT;
60
61static int panic_op_write_handler(const char *val,
62 const struct kernel_param *kp)
63{
64 char valcp[16];
65 char *s;
66
67 strncpy(valcp, val, 15);
68 valcp[15] = '\0';
69
70 s = strstrip(valcp);
71
72 if (strcmp(s, "none") == 0)
73 ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT_NONE;
74 else if (strcmp(s, "event") == 0)
75 ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT;
76 else if (strcmp(s, "string") == 0)
77 ipmi_send_panic_event = IPMI_SEND_PANIC_EVENT_STRING;
78 else
79 return -EINVAL;
80
81 return 0;
82}
83
84static int panic_op_read_handler(char *buffer, const struct kernel_param *kp)
85{
86 switch (ipmi_send_panic_event) {
87 case IPMI_SEND_PANIC_EVENT_NONE:
88 strcpy(buffer, "none");
89 break;
90
91 case IPMI_SEND_PANIC_EVENT:
92 strcpy(buffer, "event");
93 break;
94
95 case IPMI_SEND_PANIC_EVENT_STRING:
96 strcpy(buffer, "string");
97 break;
98
99 default:
100 strcpy(buffer, "???");
101 break;
102 }
103
104 return strlen(buffer);
105}
106
107static const struct kernel_param_ops panic_op_ops = {
108 .set = panic_op_write_handler,
109 .get = panic_op_read_handler
110};
111module_param_cb(panic_op, &panic_op_ops, NULL, 0600);
112MODULE_PARM_DESC(panic_op, "Sets if the IPMI driver will attempt to store panic information in the event log in the event of a panic. Set to 'none' for no, 'event' for a single event, or 'string' for a generic event and the panic string in IPMI OEM events.");
113
114
115#ifdef CONFIG_IPMI_PROC_INTERFACE
116static struct proc_dir_entry *proc_ipmi_root;
117#endif /* CONFIG_IPMI_PROC_INTERFACE */
118
119/* Remain in auto-maintenance mode for this amount of time (in ms). */
120#define IPMI_MAINTENANCE_MODE_TIMEOUT 30000
121
122#define MAX_EVENTS_IN_QUEUE 25
123
124/*
125 * Don't let a message sit in a queue forever, always time it with at lest
126 * the max message timer. This is in milliseconds.
127 */
128#define MAX_MSG_TIMEOUT 60000
129
130/* Call every ~1000 ms. */
131#define IPMI_TIMEOUT_TIME 1000
132
133/* How many jiffies does it take to get to the timeout time. */
134#define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
135
136/*
137 * Request events from the queue every second (this is the number of
138 * IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the
139 * future, IPMI will add a way to know immediately if an event is in
140 * the queue and this silliness can go away.
141 */
142#define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME))
143
144/* How long should we cache dynamic device IDs? */
145#define IPMI_DYN_DEV_ID_EXPIRY (10 * HZ)
146
147/*
148 * The main "user" data structure.
149 */
150struct ipmi_user {
151 struct list_head link;
152
153 /* Set to false when the user is destroyed. */
154 bool valid;
155
156 struct kref refcount;
157
158 /* The upper layer that handles receive messages. */
159 const struct ipmi_user_hndl *handler;
160 void *handler_data;
161
162 /* The interface this user is bound to. */
163 ipmi_smi_t intf;
164
165 /* Does this interface receive IPMI events? */
166 bool gets_events;
167};
168
169struct cmd_rcvr {
170 struct list_head link;
171
172 ipmi_user_t user;
173 unsigned char netfn;
174 unsigned char cmd;
175 unsigned int chans;
176
177 /*
178 * This is used to form a linked lised during mass deletion.
179 * Since this is in an RCU list, we cannot use the link above
180 * or change any data until the RCU period completes. So we
181 * use this next variable during mass deletion so we can have
182 * a list and don't have to wait and restart the search on
183 * every individual deletion of a command.
184 */
185 struct cmd_rcvr *next;
186};
187
188struct seq_table {
189 unsigned int inuse : 1;
190 unsigned int broadcast : 1;
191
192 unsigned long timeout;
193 unsigned long orig_timeout;
194 unsigned int retries_left;
195
196 /*
197 * To verify on an incoming send message response that this is
198 * the message that the response is for, we keep a sequence id
199 * and increment it every time we send a message.
200 */
201 long seqid;
202
203 /*
204 * This is held so we can properly respond to the message on a
205 * timeout, and it is used to hold the temporary data for
206 * retransmission, too.
207 */
208 struct ipmi_recv_msg *recv_msg;
209};
210
211/*
212 * Store the information in a msgid (long) to allow us to find a
213 * sequence table entry from the msgid.
214 */
215#define STORE_SEQ_IN_MSGID(seq, seqid) \
216 ((((seq) & 0x3f) << 26) | ((seqid) & 0x3ffffff))
217
218#define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
219 do { \
220 seq = (((msgid) >> 26) & 0x3f); \
221 seqid = ((msgid) & 0x3ffffff); \
222 } while (0)
223
224#define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3ffffff)
225
226#define IPMI_MAX_CHANNELS 16
227struct ipmi_channel {
228 unsigned char medium;
229 unsigned char protocol;
230};
231
232struct ipmi_channel_set {
233 struct ipmi_channel c[IPMI_MAX_CHANNELS];
234};
235
236struct ipmi_my_addrinfo {
237 /*
238 * My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR,
239 * but may be changed by the user.
240 */
241 unsigned char address;
242
243 /*
244 * My LUN. This should generally stay the SMS LUN, but just in
245 * case...
246 */
247 unsigned char lun;
248};
249
250#ifdef CONFIG_IPMI_PROC_INTERFACE
251struct ipmi_proc_entry {
252 char *name;
253 struct ipmi_proc_entry *next;
254};
255#endif
256
257/*
258 * Note that the product id, manufacturer id, guid, and device id are
259 * immutable in this structure, so dyn_mutex is not required for
260 * accessing those. If those change on a BMC, a new BMC is allocated.
261 */
262struct bmc_device {
263 struct platform_device pdev;
264 struct list_head intfs; /* Interfaces on this BMC. */
265 struct ipmi_device_id id;
266 struct ipmi_device_id fetch_id;
267 int dyn_id_set;
268 unsigned long dyn_id_expiry;
269 struct mutex dyn_mutex; /* Protects id, intfs, & dyn* */
270 guid_t guid;
271 guid_t fetch_guid;
272 int dyn_guid_set;
273 struct kref usecount;
274 struct work_struct remove_work;
275};
276#define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev)
277
278static int bmc_get_device_id(ipmi_smi_t intf, struct bmc_device *bmc,
279 struct ipmi_device_id *id,
280 bool *guid_set, guid_t *guid);
281
282/*
283 * Various statistics for IPMI, these index stats[] in the ipmi_smi
284 * structure.
285 */
286enum ipmi_stat_indexes {
287 /* Commands we got from the user that were invalid. */
288 IPMI_STAT_sent_invalid_commands = 0,
289
290 /* Commands we sent to the MC. */
291 IPMI_STAT_sent_local_commands,
292
293 /* Responses from the MC that were delivered to a user. */
294 IPMI_STAT_handled_local_responses,
295
296 /* Responses from the MC that were not delivered to a user. */
297 IPMI_STAT_unhandled_local_responses,
298
299 /* Commands we sent out to the IPMB bus. */
300 IPMI_STAT_sent_ipmb_commands,
301
302 /* Commands sent on the IPMB that had errors on the SEND CMD */
303 IPMI_STAT_sent_ipmb_command_errs,
304
305 /* Each retransmit increments this count. */
306 IPMI_STAT_retransmitted_ipmb_commands,
307
308 /*
309 * When a message times out (runs out of retransmits) this is
310 * incremented.
311 */
312 IPMI_STAT_timed_out_ipmb_commands,
313
314 /*
315 * This is like above, but for broadcasts. Broadcasts are
316 * *not* included in the above count (they are expected to
317 * time out).
318 */
319 IPMI_STAT_timed_out_ipmb_broadcasts,
320
321 /* Responses I have sent to the IPMB bus. */
322 IPMI_STAT_sent_ipmb_responses,
323
324 /* The response was delivered to the user. */
325 IPMI_STAT_handled_ipmb_responses,
326
327 /* The response had invalid data in it. */
328 IPMI_STAT_invalid_ipmb_responses,
329
330 /* The response didn't have anyone waiting for it. */
331 IPMI_STAT_unhandled_ipmb_responses,
332
333 /* Commands we sent out to the IPMB bus. */
334 IPMI_STAT_sent_lan_commands,
335
336 /* Commands sent on the IPMB that had errors on the SEND CMD */
337 IPMI_STAT_sent_lan_command_errs,
338
339 /* Each retransmit increments this count. */
340 IPMI_STAT_retransmitted_lan_commands,
341
342 /*
343 * When a message times out (runs out of retransmits) this is
344 * incremented.
345 */
346 IPMI_STAT_timed_out_lan_commands,
347
348 /* Responses I have sent to the IPMB bus. */
349 IPMI_STAT_sent_lan_responses,
350
351 /* The response was delivered to the user. */
352 IPMI_STAT_handled_lan_responses,
353
354 /* The response had invalid data in it. */
355 IPMI_STAT_invalid_lan_responses,
356
357 /* The response didn't have anyone waiting for it. */
358 IPMI_STAT_unhandled_lan_responses,
359
360 /* The command was delivered to the user. */
361 IPMI_STAT_handled_commands,
362
363 /* The command had invalid data in it. */
364 IPMI_STAT_invalid_commands,
365
366 /* The command didn't have anyone waiting for it. */
367 IPMI_STAT_unhandled_commands,
368
369 /* Invalid data in an event. */
370 IPMI_STAT_invalid_events,
371
372 /* Events that were received with the proper format. */
373 IPMI_STAT_events,
374
375 /* Retransmissions on IPMB that failed. */
376 IPMI_STAT_dropped_rexmit_ipmb_commands,
377
378 /* Retransmissions on LAN that failed. */
379 IPMI_STAT_dropped_rexmit_lan_commands,
380
381 /* This *must* remain last, add new values above this. */
382 IPMI_NUM_STATS
383};
384
385
386#define IPMI_IPMB_NUM_SEQ 64
387struct ipmi_smi {
388 /* What interface number are we? */
389 int intf_num;
390
391 struct kref refcount;
392
393 /* Set when the interface is being unregistered. */
394 bool in_shutdown;
395
396 /* Used for a list of interfaces. */
397 struct list_head link;
398
399 /*
400 * The list of upper layers that are using me. seq_lock
401 * protects this.
402 */
403 struct list_head users;
404
405 /* Used for wake ups at startup. */
406 wait_queue_head_t waitq;
407
408 /*
409 * Prevents the interface from being unregistered when the
410 * interface is used by being looked up through the BMC
411 * structure.
412 */
413 struct mutex bmc_reg_mutex;
414
415 struct bmc_device tmp_bmc;
416 struct bmc_device *bmc;
417 bool bmc_registered;
418 struct list_head bmc_link;
419 char *my_dev_name;
420 bool in_bmc_register; /* Handle recursive situations. Yuck. */
421 struct work_struct bmc_reg_work;
422
423 /*
424 * This is the lower-layer's sender routine. Note that you
425 * must either be holding the ipmi_interfaces_mutex or be in
426 * an umpreemptible region to use this. You must fetch the
427 * value into a local variable and make sure it is not NULL.
428 */
429 const struct ipmi_smi_handlers *handlers;
430 void *send_info;
431
432#ifdef CONFIG_IPMI_PROC_INTERFACE
433 /* A list of proc entries for this interface. */
434 struct mutex proc_entry_lock;
435 struct ipmi_proc_entry *proc_entries;
436
437 struct proc_dir_entry *proc_dir;
438 char proc_dir_name[10];
439#endif
440
441 /* Driver-model device for the system interface. */
442 struct device *si_dev;
443
444 /*
445 * A table of sequence numbers for this interface. We use the
446 * sequence numbers for IPMB messages that go out of the
447 * interface to match them up with their responses. A routine
448 * is called periodically to time the items in this list.
449 */
450 spinlock_t seq_lock;
451 struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
452 int curr_seq;
453
454 /*
455 * Messages queued for delivery. If delivery fails (out of memory
456 * for instance), They will stay in here to be processed later in a
457 * periodic timer interrupt. The tasklet is for handling received
458 * messages directly from the handler.
459 */
460 spinlock_t waiting_rcv_msgs_lock;
461 struct list_head waiting_rcv_msgs;
462 atomic_t watchdog_pretimeouts_to_deliver;
463 struct tasklet_struct recv_tasklet;
464
465 spinlock_t xmit_msgs_lock;
466 struct list_head xmit_msgs;
467 struct ipmi_smi_msg *curr_msg;
468 struct list_head hp_xmit_msgs;
469
470 /*
471 * The list of command receivers that are registered for commands
472 * on this interface.
473 */
474 struct mutex cmd_rcvrs_mutex;
475 struct list_head cmd_rcvrs;
476
477 /*
478 * Events that were queues because no one was there to receive
479 * them.
480 */
481 spinlock_t events_lock; /* For dealing with event stuff. */
482 struct list_head waiting_events;
483 unsigned int waiting_events_count; /* How many events in queue? */
484 char delivering_events;
485 char event_msg_printed;
486 atomic_t event_waiters;
487 unsigned int ticks_to_req_ev;
488 int last_needs_timer;
489
490 /*
491 * The event receiver for my BMC, only really used at panic
492 * shutdown as a place to store this.
493 */
494 unsigned char event_receiver;
495 unsigned char event_receiver_lun;
496 unsigned char local_sel_device;
497 unsigned char local_event_generator;
498
499 /* For handling of maintenance mode. */
500 int maintenance_mode;
501 bool maintenance_mode_enable;
502 int auto_maintenance_timeout;
503 spinlock_t maintenance_mode_lock; /* Used in a timer... */
504
505 /*
506 * A cheap hack, if this is non-null and a message to an
507 * interface comes in with a NULL user, call this routine with
508 * it. Note that the message will still be freed by the
509 * caller. This only works on the system interface.
510 *
511 * Protected by bmc_reg_mutex.
512 */
513 void (*null_user_handler)(ipmi_smi_t intf, struct ipmi_recv_msg *msg);
514
515 /*
516 * When we are scanning the channels for an SMI, this will
517 * tell which channel we are scanning.
518 */
519 int curr_channel;
520
521 /* Channel information */
522 struct ipmi_channel_set *channel_list;
523 unsigned int curr_working_cset; /* First index into the following. */
524 struct ipmi_channel_set wchannels[2];
525 struct ipmi_my_addrinfo addrinfo[IPMI_MAX_CHANNELS];
526 bool channels_ready;
527
528 atomic_t stats[IPMI_NUM_STATS];
529
530 /*
531 * run_to_completion duplicate of smb_info, smi_info
532 * and ipmi_serial_info structures. Used to decrease numbers of
533 * parameters passed by "low" level IPMI code.
534 */
535 int run_to_completion;
536};
537#define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
538
539static void __get_guid(ipmi_smi_t intf);
540static void __ipmi_bmc_unregister(ipmi_smi_t intf);
541static int __ipmi_bmc_register(ipmi_smi_t intf,
542 struct ipmi_device_id *id,
543 bool guid_set, guid_t *guid, int intf_num);
544static int __scan_channels(ipmi_smi_t intf, struct ipmi_device_id *id);
545
546
547/**
548 * The driver model view of the IPMI messaging driver.
549 */
550static struct platform_driver ipmidriver = {
551 .driver = {
552 .name = "ipmi",
553 .bus = &platform_bus_type
554 }
555};
556/*
557 * This mutex keeps us from adding the same BMC twice.
558 */
559static DEFINE_MUTEX(ipmidriver_mutex);
560
561static LIST_HEAD(ipmi_interfaces);
562static DEFINE_MUTEX(ipmi_interfaces_mutex);
563
564/*
565 * List of watchers that want to know when smi's are added and deleted.
566 */
567static LIST_HEAD(smi_watchers);
568static DEFINE_MUTEX(smi_watchers_mutex);
569
570#define ipmi_inc_stat(intf, stat) \
571 atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
572#define ipmi_get_stat(intf, stat) \
573 ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
574
575static const char * const addr_src_to_str[] = {
576 "invalid", "hotmod", "hardcoded", "SPMI", "ACPI", "SMBIOS", "PCI",
577 "device-tree", "platform"
578};
579
580const char *ipmi_addr_src_to_str(enum ipmi_addr_src src)
581{
582 if (src >= SI_LAST)
583 src = 0; /* Invalid */
584 return addr_src_to_str[src];
585}
586EXPORT_SYMBOL(ipmi_addr_src_to_str);
587
588static int is_lan_addr(struct ipmi_addr *addr)
589{
590 return addr->addr_type == IPMI_LAN_ADDR_TYPE;
591}
592
593static int is_ipmb_addr(struct ipmi_addr *addr)
594{
595 return addr->addr_type == IPMI_IPMB_ADDR_TYPE;
596}
597
598static int is_ipmb_bcast_addr(struct ipmi_addr *addr)
599{
600 return addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE;
601}
602
603static void free_recv_msg_list(struct list_head *q)
604{
605 struct ipmi_recv_msg *msg, *msg2;
606
607 list_for_each_entry_safe(msg, msg2, q, link) {
608 list_del(&msg->link);
609 ipmi_free_recv_msg(msg);
610 }
611}
612
613static void free_smi_msg_list(struct list_head *q)
614{
615 struct ipmi_smi_msg *msg, *msg2;
616
617 list_for_each_entry_safe(msg, msg2, q, link) {
618 list_del(&msg->link);
619 ipmi_free_smi_msg(msg);
620 }
621}
622
623static void clean_up_interface_data(ipmi_smi_t intf)
624{
625 int i;
626 struct cmd_rcvr *rcvr, *rcvr2;
627 struct list_head list;
628
629 tasklet_kill(&intf->recv_tasklet);
630
631 free_smi_msg_list(&intf->waiting_rcv_msgs);
632 free_recv_msg_list(&intf->waiting_events);
633
634 /*
635 * Wholesale remove all the entries from the list in the
636 * interface and wait for RCU to know that none are in use.
637 */
638 mutex_lock(&intf->cmd_rcvrs_mutex);
639 INIT_LIST_HEAD(&list);
640 list_splice_init_rcu(&intf->cmd_rcvrs, &list, synchronize_rcu);
641 mutex_unlock(&intf->cmd_rcvrs_mutex);
642
643 list_for_each_entry_safe(rcvr, rcvr2, &list, link)
644 kfree(rcvr);
645
646 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
647 if ((intf->seq_table[i].inuse)
648 && (intf->seq_table[i].recv_msg))
649 ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
650 }
651}
652
653static void intf_free(struct kref *ref)
654{
655 ipmi_smi_t intf = container_of(ref, struct ipmi_smi, refcount);
656
657 clean_up_interface_data(intf);
658 kfree(intf);
659}
660
661struct watcher_entry {
662 int intf_num;
663 ipmi_smi_t intf;
664 struct list_head link;
665};
666
667int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
668{
669 ipmi_smi_t intf;
670 LIST_HEAD(to_deliver);
671 struct watcher_entry *e, *e2;
672
673 mutex_lock(&smi_watchers_mutex);
674
675 mutex_lock(&ipmi_interfaces_mutex);
676
677 /* Build a list of things to deliver. */
678 list_for_each_entry(intf, &ipmi_interfaces, link) {
679 if (intf->intf_num == -1)
680 continue;
681 e = kmalloc(sizeof(*e), GFP_KERNEL);
682 if (!e)
683 goto out_err;
684 kref_get(&intf->refcount);
685 e->intf = intf;
686 e->intf_num = intf->intf_num;
687 list_add_tail(&e->link, &to_deliver);
688 }
689
690 /* We will succeed, so add it to the list. */
691 list_add(&watcher->link, &smi_watchers);
692
693 mutex_unlock(&ipmi_interfaces_mutex);
694
695 list_for_each_entry_safe(e, e2, &to_deliver, link) {
696 list_del(&e->link);
697 watcher->new_smi(e->intf_num, e->intf->si_dev);
698 kref_put(&e->intf->refcount, intf_free);
699 kfree(e);
700 }
701
702 mutex_unlock(&smi_watchers_mutex);
703
704 return 0;
705
706 out_err:
707 mutex_unlock(&ipmi_interfaces_mutex);
708 mutex_unlock(&smi_watchers_mutex);
709 list_for_each_entry_safe(e, e2, &to_deliver, link) {
710 list_del(&e->link);
711 kref_put(&e->intf->refcount, intf_free);
712 kfree(e);
713 }
714 return -ENOMEM;
715}
716EXPORT_SYMBOL(ipmi_smi_watcher_register);
717
718int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
719{
720 mutex_lock(&smi_watchers_mutex);
721 list_del(&(watcher->link));
722 mutex_unlock(&smi_watchers_mutex);
723 return 0;
724}
725EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
726
727/*
728 * Must be called with smi_watchers_mutex held.
729 */
730static void
731call_smi_watchers(int i, struct device *dev)
732{
733 struct ipmi_smi_watcher *w;
734
735 list_for_each_entry(w, &smi_watchers, link) {
736 if (try_module_get(w->owner)) {
737 w->new_smi(i, dev);
738 module_put(w->owner);
739 }
740 }
741}
742
743static int
744ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
745{
746 if (addr1->addr_type != addr2->addr_type)
747 return 0;
748
749 if (addr1->channel != addr2->channel)
750 return 0;
751
752 if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
753 struct ipmi_system_interface_addr *smi_addr1
754 = (struct ipmi_system_interface_addr *) addr1;
755 struct ipmi_system_interface_addr *smi_addr2
756 = (struct ipmi_system_interface_addr *) addr2;
757 return (smi_addr1->lun == smi_addr2->lun);
758 }
759
760 if (is_ipmb_addr(addr1) || is_ipmb_bcast_addr(addr1)) {
761 struct ipmi_ipmb_addr *ipmb_addr1
762 = (struct ipmi_ipmb_addr *) addr1;
763 struct ipmi_ipmb_addr *ipmb_addr2
764 = (struct ipmi_ipmb_addr *) addr2;
765
766 return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr)
767 && (ipmb_addr1->lun == ipmb_addr2->lun));
768 }
769
770 if (is_lan_addr(addr1)) {
771 struct ipmi_lan_addr *lan_addr1
772 = (struct ipmi_lan_addr *) addr1;
773 struct ipmi_lan_addr *lan_addr2
774 = (struct ipmi_lan_addr *) addr2;
775
776 return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID)
777 && (lan_addr1->local_SWID == lan_addr2->local_SWID)
778 && (lan_addr1->session_handle
779 == lan_addr2->session_handle)
780 && (lan_addr1->lun == lan_addr2->lun));
781 }
782
783 return 1;
784}
785
786int ipmi_validate_addr(struct ipmi_addr *addr, int len)
787{
788 if (len < sizeof(struct ipmi_system_interface_addr))
789 return -EINVAL;
790
791 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
792 if (addr->channel != IPMI_BMC_CHANNEL)
793 return -EINVAL;
794 return 0;
795 }
796
797 if ((addr->channel == IPMI_BMC_CHANNEL)
798 || (addr->channel >= IPMI_MAX_CHANNELS)
799 || (addr->channel < 0))
800 return -EINVAL;
801
802 if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
803 if (len < sizeof(struct ipmi_ipmb_addr))
804 return -EINVAL;
805 return 0;
806 }
807
808 if (is_lan_addr(addr)) {
809 if (len < sizeof(struct ipmi_lan_addr))
810 return -EINVAL;
811 return 0;
812 }
813
814 return -EINVAL;
815}
816EXPORT_SYMBOL(ipmi_validate_addr);
817
818unsigned int ipmi_addr_length(int addr_type)
819{
820 if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
821 return sizeof(struct ipmi_system_interface_addr);
822
823 if ((addr_type == IPMI_IPMB_ADDR_TYPE)
824 || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
825 return sizeof(struct ipmi_ipmb_addr);
826
827 if (addr_type == IPMI_LAN_ADDR_TYPE)
828 return sizeof(struct ipmi_lan_addr);
829
830 return 0;
831}
832EXPORT_SYMBOL(ipmi_addr_length);
833
834static void deliver_response(struct ipmi_recv_msg *msg)
835{
836 if (!msg->user) {
837 ipmi_smi_t intf = msg->user_msg_data;
838
839 /* Special handling for NULL users. */
840 if (intf->null_user_handler) {
841 intf->null_user_handler(intf, msg);
842 ipmi_inc_stat(intf, handled_local_responses);
843 } else {
844 /* No handler, so give up. */
845 ipmi_inc_stat(intf, unhandled_local_responses);
846 }
847 ipmi_free_recv_msg(msg);
848 } else if (!oops_in_progress) {
849 /*
850 * If we are running in the panic context, calling the
851 * receive handler doesn't much meaning and has a deadlock
852 * risk. At this moment, simply skip it in that case.
853 */
854
855 ipmi_user_t user = msg->user;
856 user->handler->ipmi_recv_hndl(msg, user->handler_data);
857 }
858}
859
860static void
861deliver_err_response(struct ipmi_recv_msg *msg, int err)
862{
863 msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
864 msg->msg_data[0] = err;
865 msg->msg.netfn |= 1; /* Convert to a response. */
866 msg->msg.data_len = 1;
867 msg->msg.data = msg->msg_data;
868 deliver_response(msg);
869}
870
871/*
872 * Find the next sequence number not being used and add the given
873 * message with the given timeout to the sequence table. This must be
874 * called with the interface's seq_lock held.
875 */
876static int intf_next_seq(ipmi_smi_t intf,
877 struct ipmi_recv_msg *recv_msg,
878 unsigned long timeout,
879 int retries,
880 int broadcast,
881 unsigned char *seq,
882 long *seqid)
883{
884 int rv = 0;
885 unsigned int i;
886
887 for (i = intf->curr_seq; (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
888 i = (i+1)%IPMI_IPMB_NUM_SEQ) {
889 if (!intf->seq_table[i].inuse)
890 break;
891 }
892
893 if (!intf->seq_table[i].inuse) {
894 intf->seq_table[i].recv_msg = recv_msg;
895
896 /*
897 * Start with the maximum timeout, when the send response
898 * comes in we will start the real timer.
899 */
900 intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
901 intf->seq_table[i].orig_timeout = timeout;
902 intf->seq_table[i].retries_left = retries;
903 intf->seq_table[i].broadcast = broadcast;
904 intf->seq_table[i].inuse = 1;
905 intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid);
906 *seq = i;
907 *seqid = intf->seq_table[i].seqid;
908 intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
909 need_waiter(intf);
910 } else {
911 rv = -EAGAIN;
912 }
913
914 return rv;
915}
916
917/*
918 * Return the receive message for the given sequence number and
919 * release the sequence number so it can be reused. Some other data
920 * is passed in to be sure the message matches up correctly (to help
921 * guard against message coming in after their timeout and the
922 * sequence number being reused).
923 */
924static int intf_find_seq(ipmi_smi_t intf,
925 unsigned char seq,
926 short channel,
927 unsigned char cmd,
928 unsigned char netfn,
929 struct ipmi_addr *addr,
930 struct ipmi_recv_msg **recv_msg)
931{
932 int rv = -ENODEV;
933 unsigned long flags;
934
935 if (seq >= IPMI_IPMB_NUM_SEQ)
936 return -EINVAL;
937
938 spin_lock_irqsave(&(intf->seq_lock), flags);
939 if (intf->seq_table[seq].inuse) {
940 struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
941
942 if ((msg->addr.channel == channel) && (msg->msg.cmd == cmd)
943 && (msg->msg.netfn == netfn)
944 && (ipmi_addr_equal(addr, &(msg->addr)))) {
945 *recv_msg = msg;
946 intf->seq_table[seq].inuse = 0;
947 rv = 0;
948 }
949 }
950 spin_unlock_irqrestore(&(intf->seq_lock), flags);
951
952 return rv;
953}
954
955
956/* Start the timer for a specific sequence table entry. */
957static int intf_start_seq_timer(ipmi_smi_t intf,
958 long msgid)
959{
960 int rv = -ENODEV;
961 unsigned long flags;
962 unsigned char seq;
963 unsigned long seqid;
964
965
966 GET_SEQ_FROM_MSGID(msgid, seq, seqid);
967
968 spin_lock_irqsave(&(intf->seq_lock), flags);
969 /*
970 * We do this verification because the user can be deleted
971 * while a message is outstanding.
972 */
973 if ((intf->seq_table[seq].inuse)
974 && (intf->seq_table[seq].seqid == seqid)) {
975 struct seq_table *ent = &(intf->seq_table[seq]);
976 ent->timeout = ent->orig_timeout;
977 rv = 0;
978 }
979 spin_unlock_irqrestore(&(intf->seq_lock), flags);
980
981 return rv;
982}
983
984/* Got an error for the send message for a specific sequence number. */
985static int intf_err_seq(ipmi_smi_t intf,
986 long msgid,
987 unsigned int err)
988{
989 int rv = -ENODEV;
990 unsigned long flags;
991 unsigned char seq;
992 unsigned long seqid;
993 struct ipmi_recv_msg *msg = NULL;
994
995
996 GET_SEQ_FROM_MSGID(msgid, seq, seqid);
997
998 spin_lock_irqsave(&(intf->seq_lock), flags);
999 /*
1000 * We do this verification because the user can be deleted
1001 * while a message is outstanding.
1002 */
1003 if ((intf->seq_table[seq].inuse)
1004 && (intf->seq_table[seq].seqid == seqid)) {
1005 struct seq_table *ent = &(intf->seq_table[seq]);
1006
1007 ent->inuse = 0;
1008 msg = ent->recv_msg;
1009 rv = 0;
1010 }
1011 spin_unlock_irqrestore(&(intf->seq_lock), flags);
1012
1013 if (msg)
1014 deliver_err_response(msg, err);
1015
1016 return rv;
1017}
1018
1019
1020int ipmi_create_user(unsigned int if_num,
1021 const struct ipmi_user_hndl *handler,
1022 void *handler_data,
1023 ipmi_user_t *user)
1024{
1025 unsigned long flags;
1026 ipmi_user_t new_user;
1027 int rv = 0;
1028 ipmi_smi_t intf;
1029
1030 /*
1031 * There is no module usecount here, because it's not
1032 * required. Since this can only be used by and called from
1033 * other modules, they will implicitly use this module, and
1034 * thus this can't be removed unless the other modules are
1035 * removed.
1036 */
1037
1038 if (handler == NULL)
1039 return -EINVAL;
1040
1041 /*
1042 * Make sure the driver is actually initialized, this handles
1043 * problems with initialization order.
1044 */
1045 if (!initialized) {
1046 rv = ipmi_init_msghandler();
1047 if (rv)
1048 return rv;
1049
1050 /*
1051 * The init code doesn't return an error if it was turned
1052 * off, but it won't initialize. Check that.
1053 */
1054 if (!initialized)
1055 return -ENODEV;
1056 }
1057
1058 new_user = kmalloc(sizeof(*new_user), GFP_KERNEL);
1059 if (!new_user)
1060 return -ENOMEM;
1061
1062 mutex_lock(&ipmi_interfaces_mutex);
1063 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
1064 if (intf->intf_num == if_num)
1065 goto found;
1066 }
1067 /* Not found, return an error */
1068 rv = -EINVAL;
1069 goto out_kfree;
1070
1071 found:
1072 /* Note that each existing user holds a refcount to the interface. */
1073 kref_get(&intf->refcount);
1074
1075 kref_init(&new_user->refcount);
1076 new_user->handler = handler;
1077 new_user->handler_data = handler_data;
1078 new_user->intf = intf;
1079 new_user->gets_events = false;
1080
1081 if (!try_module_get(intf->handlers->owner)) {
1082 rv = -ENODEV;
1083 goto out_kref;
1084 }
1085
1086 if (intf->handlers->inc_usecount) {
1087 rv = intf->handlers->inc_usecount(intf->send_info);
1088 if (rv) {
1089 module_put(intf->handlers->owner);
1090 goto out_kref;
1091 }
1092 }
1093
1094 /*
1095 * Hold the lock so intf->handlers is guaranteed to be good
1096 * until now
1097 */
1098 mutex_unlock(&ipmi_interfaces_mutex);
1099
1100 new_user->valid = true;
1101 spin_lock_irqsave(&intf->seq_lock, flags);
1102 list_add_rcu(&new_user->link, &intf->users);
1103 spin_unlock_irqrestore(&intf->seq_lock, flags);
1104 if (handler->ipmi_watchdog_pretimeout) {
1105 /* User wants pretimeouts, so make sure to watch for them. */
1106 if (atomic_inc_return(&intf->event_waiters) == 1)
1107 need_waiter(intf);
1108 }
1109 *user = new_user;
1110 return 0;
1111
1112out_kref:
1113 kref_put(&intf->refcount, intf_free);
1114out_kfree:
1115 mutex_unlock(&ipmi_interfaces_mutex);
1116 kfree(new_user);
1117 return rv;
1118}
1119EXPORT_SYMBOL(ipmi_create_user);
1120
1121int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data)
1122{
1123 int rv = 0;
1124 ipmi_smi_t intf;
1125 const struct ipmi_smi_handlers *handlers;
1126
1127 mutex_lock(&ipmi_interfaces_mutex);
1128 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
1129 if (intf->intf_num == if_num)
1130 goto found;
1131 }
1132 /* Not found, return an error */
1133 rv = -EINVAL;
1134 mutex_unlock(&ipmi_interfaces_mutex);
1135 return rv;
1136
1137found:
1138 handlers = intf->handlers;
1139 rv = -ENOSYS;
1140 if (handlers->get_smi_info)
1141 rv = handlers->get_smi_info(intf->send_info, data);
1142 mutex_unlock(&ipmi_interfaces_mutex);
1143
1144 return rv;
1145}
1146EXPORT_SYMBOL(ipmi_get_smi_info);
1147
1148static void free_user(struct kref *ref)
1149{
1150 ipmi_user_t user = container_of(ref, struct ipmi_user, refcount);
1151 kfree(user);
1152}
1153
1154int ipmi_destroy_user(ipmi_user_t user)
1155{
1156 ipmi_smi_t intf = user->intf;
1157 int i;
1158 unsigned long flags;
1159 struct cmd_rcvr *rcvr;
1160 struct cmd_rcvr *rcvrs = NULL;
1161
1162 user->valid = false;
1163
1164 if (user->handler->ipmi_watchdog_pretimeout)
1165 atomic_dec(&intf->event_waiters);
1166
1167 if (user->gets_events)
1168 atomic_dec(&intf->event_waiters);
1169
1170 /* Remove the user from the interface's sequence table. */
1171 spin_lock_irqsave(&intf->seq_lock, flags);
1172 list_del_rcu(&user->link);
1173
1174 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
1175 if (intf->seq_table[i].inuse
1176 && (intf->seq_table[i].recv_msg->user == user)) {
1177 intf->seq_table[i].inuse = 0;
1178 ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
1179 }
1180 }
1181 spin_unlock_irqrestore(&intf->seq_lock, flags);
1182
1183 /*
1184 * Remove the user from the command receiver's table. First
1185 * we build a list of everything (not using the standard link,
1186 * since other things may be using it till we do
1187 * synchronize_rcu()) then free everything in that list.
1188 */
1189 mutex_lock(&intf->cmd_rcvrs_mutex);
1190 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
1191 if (rcvr->user == user) {
1192 list_del_rcu(&rcvr->link);
1193 rcvr->next = rcvrs;
1194 rcvrs = rcvr;
1195 }
1196 }
1197 mutex_unlock(&intf->cmd_rcvrs_mutex);
1198 synchronize_rcu();
1199 while (rcvrs) {
1200 rcvr = rcvrs;
1201 rcvrs = rcvr->next;
1202 kfree(rcvr);
1203 }
1204
1205 mutex_lock(&ipmi_interfaces_mutex);
1206 if (intf->handlers) {
1207 module_put(intf->handlers->owner);
1208 if (intf->handlers->dec_usecount)
1209 intf->handlers->dec_usecount(intf->send_info);
1210 }
1211 mutex_unlock(&ipmi_interfaces_mutex);
1212
1213 kref_put(&intf->refcount, intf_free);
1214
1215 kref_put(&user->refcount, free_user);
1216
1217 return 0;
1218}
1219EXPORT_SYMBOL(ipmi_destroy_user);
1220
1221int ipmi_get_version(ipmi_user_t user,
1222 unsigned char *major,
1223 unsigned char *minor)
1224{
1225 struct ipmi_device_id id;
1226 int rv;
1227
1228 rv = bmc_get_device_id(user->intf, NULL, &id, NULL, NULL);
1229 if (rv)
1230 return rv;
1231
1232 *major = ipmi_version_major(&id);
1233 *minor = ipmi_version_minor(&id);
1234
1235 return 0;
1236}
1237EXPORT_SYMBOL(ipmi_get_version);
1238
1239int ipmi_set_my_address(ipmi_user_t user,
1240 unsigned int channel,
1241 unsigned char address)
1242{
1243 if (channel >= IPMI_MAX_CHANNELS)
1244 return -EINVAL;
1245 user->intf->addrinfo[channel].address = address;
1246 return 0;
1247}
1248EXPORT_SYMBOL(ipmi_set_my_address);
1249
1250int ipmi_get_my_address(ipmi_user_t user,
1251 unsigned int channel,
1252 unsigned char *address)
1253{
1254 if (channel >= IPMI_MAX_CHANNELS)
1255 return -EINVAL;
1256 *address = user->intf->addrinfo[channel].address;
1257 return 0;
1258}
1259EXPORT_SYMBOL(ipmi_get_my_address);
1260
1261int ipmi_set_my_LUN(ipmi_user_t user,
1262 unsigned int channel,
1263 unsigned char LUN)
1264{
1265 if (channel >= IPMI_MAX_CHANNELS)
1266 return -EINVAL;
1267 user->intf->addrinfo[channel].lun = LUN & 0x3;
1268 return 0;
1269}
1270EXPORT_SYMBOL(ipmi_set_my_LUN);
1271
1272int ipmi_get_my_LUN(ipmi_user_t user,
1273 unsigned int channel,
1274 unsigned char *address)
1275{
1276 if (channel >= IPMI_MAX_CHANNELS)
1277 return -EINVAL;
1278 *address = user->intf->addrinfo[channel].lun;
1279 return 0;
1280}
1281EXPORT_SYMBOL(ipmi_get_my_LUN);
1282
1283int ipmi_get_maintenance_mode(ipmi_user_t user)
1284{
1285 int mode;
1286 unsigned long flags;
1287
1288 spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags);
1289 mode = user->intf->maintenance_mode;
1290 spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags);
1291
1292 return mode;
1293}
1294EXPORT_SYMBOL(ipmi_get_maintenance_mode);
1295
1296static void maintenance_mode_update(ipmi_smi_t intf)
1297{
1298 if (intf->handlers->set_maintenance_mode)
1299 intf->handlers->set_maintenance_mode(
1300 intf->send_info, intf->maintenance_mode_enable);
1301}
1302
1303int ipmi_set_maintenance_mode(ipmi_user_t user, int mode)
1304{
1305 int rv = 0;
1306 unsigned long flags;
1307 ipmi_smi_t intf = user->intf;
1308
1309 spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1310 if (intf->maintenance_mode != mode) {
1311 switch (mode) {
1312 case IPMI_MAINTENANCE_MODE_AUTO:
1313 intf->maintenance_mode_enable
1314 = (intf->auto_maintenance_timeout > 0);
1315 break;
1316
1317 case IPMI_MAINTENANCE_MODE_OFF:
1318 intf->maintenance_mode_enable = false;
1319 break;
1320
1321 case IPMI_MAINTENANCE_MODE_ON:
1322 intf->maintenance_mode_enable = true;
1323 break;
1324
1325 default:
1326 rv = -EINVAL;
1327 goto out_unlock;
1328 }
1329 intf->maintenance_mode = mode;
1330
1331 maintenance_mode_update(intf);
1332 }
1333 out_unlock:
1334 spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags);
1335
1336 return rv;
1337}
1338EXPORT_SYMBOL(ipmi_set_maintenance_mode);
1339
1340int ipmi_set_gets_events(ipmi_user_t user, bool val)
1341{
1342 unsigned long flags;
1343 ipmi_smi_t intf = user->intf;
1344 struct ipmi_recv_msg *msg, *msg2;
1345 struct list_head msgs;
1346
1347 INIT_LIST_HEAD(&msgs);
1348
1349 spin_lock_irqsave(&intf->events_lock, flags);
1350 if (user->gets_events == val)
1351 goto out;
1352
1353 user->gets_events = val;
1354
1355 if (val) {
1356 if (atomic_inc_return(&intf->event_waiters) == 1)
1357 need_waiter(intf);
1358 } else {
1359 atomic_dec(&intf->event_waiters);
1360 }
1361
1362 if (intf->delivering_events)
1363 /*
1364 * Another thread is delivering events for this, so
1365 * let it handle any new events.
1366 */
1367 goto out;
1368
1369 /* Deliver any queued events. */
1370 while (user->gets_events && !list_empty(&intf->waiting_events)) {
1371 list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
1372 list_move_tail(&msg->link, &msgs);
1373 intf->waiting_events_count = 0;
1374 if (intf->event_msg_printed) {
1375 dev_warn(intf->si_dev,
1376 PFX "Event queue no longer full\n");
1377 intf->event_msg_printed = 0;
1378 }
1379
1380 intf->delivering_events = 1;
1381 spin_unlock_irqrestore(&intf->events_lock, flags);
1382
1383 list_for_each_entry_safe(msg, msg2, &msgs, link) {
1384 msg->user = user;
1385 kref_get(&user->refcount);
1386 deliver_response(msg);
1387 }
1388
1389 spin_lock_irqsave(&intf->events_lock, flags);
1390 intf->delivering_events = 0;
1391 }
1392
1393 out:
1394 spin_unlock_irqrestore(&intf->events_lock, flags);
1395
1396 return 0;
1397}
1398EXPORT_SYMBOL(ipmi_set_gets_events);
1399
1400static struct cmd_rcvr *find_cmd_rcvr(ipmi_smi_t intf,
1401 unsigned char netfn,
1402 unsigned char cmd,
1403 unsigned char chan)
1404{
1405 struct cmd_rcvr *rcvr;
1406
1407 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
1408 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1409 && (rcvr->chans & (1 << chan)))
1410 return rcvr;
1411 }
1412 return NULL;
1413}
1414
1415static int is_cmd_rcvr_exclusive(ipmi_smi_t intf,
1416 unsigned char netfn,
1417 unsigned char cmd,
1418 unsigned int chans)
1419{
1420 struct cmd_rcvr *rcvr;
1421
1422 list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link) {
1423 if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1424 && (rcvr->chans & chans))
1425 return 0;
1426 }
1427 return 1;
1428}
1429
1430int ipmi_register_for_cmd(ipmi_user_t user,
1431 unsigned char netfn,
1432 unsigned char cmd,
1433 unsigned int chans)
1434{
1435 ipmi_smi_t intf = user->intf;
1436 struct cmd_rcvr *rcvr;
1437 int rv = 0;
1438
1439
1440 rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
1441 if (!rcvr)
1442 return -ENOMEM;
1443 rcvr->cmd = cmd;
1444 rcvr->netfn = netfn;
1445 rcvr->chans = chans;
1446 rcvr->user = user;
1447
1448 mutex_lock(&intf->cmd_rcvrs_mutex);
1449 /* Make sure the command/netfn is not already registered. */
1450 if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) {
1451 rv = -EBUSY;
1452 goto out_unlock;
1453 }
1454
1455 if (atomic_inc_return(&intf->event_waiters) == 1)
1456 need_waiter(intf);
1457
1458 list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
1459
1460 out_unlock:
1461 mutex_unlock(&intf->cmd_rcvrs_mutex);
1462 if (rv)
1463 kfree(rcvr);
1464
1465 return rv;
1466}
1467EXPORT_SYMBOL(ipmi_register_for_cmd);
1468
1469int ipmi_unregister_for_cmd(ipmi_user_t user,
1470 unsigned char netfn,
1471 unsigned char cmd,
1472 unsigned int chans)
1473{
1474 ipmi_smi_t intf = user->intf;
1475 struct cmd_rcvr *rcvr;
1476 struct cmd_rcvr *rcvrs = NULL;
1477 int i, rv = -ENOENT;
1478
1479 mutex_lock(&intf->cmd_rcvrs_mutex);
1480 for (i = 0; i < IPMI_NUM_CHANNELS; i++) {
1481 if (((1 << i) & chans) == 0)
1482 continue;
1483 rcvr = find_cmd_rcvr(intf, netfn, cmd, i);
1484 if (rcvr == NULL)
1485 continue;
1486 if (rcvr->user == user) {
1487 rv = 0;
1488 rcvr->chans &= ~chans;
1489 if (rcvr->chans == 0) {
1490 list_del_rcu(&rcvr->link);
1491 rcvr->next = rcvrs;
1492 rcvrs = rcvr;
1493 }
1494 }
1495 }
1496 mutex_unlock(&intf->cmd_rcvrs_mutex);
1497 synchronize_rcu();
1498 while (rcvrs) {
1499 atomic_dec(&intf->event_waiters);
1500 rcvr = rcvrs;
1501 rcvrs = rcvr->next;
1502 kfree(rcvr);
1503 }
1504 return rv;
1505}
1506EXPORT_SYMBOL(ipmi_unregister_for_cmd);
1507
1508static unsigned char
1509ipmb_checksum(unsigned char *data, int size)
1510{
1511 unsigned char csum = 0;
1512
1513 for (; size > 0; size--, data++)
1514 csum += *data;
1515
1516 return -csum;
1517}
1518
1519static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg,
1520 struct kernel_ipmi_msg *msg,
1521 struct ipmi_ipmb_addr *ipmb_addr,
1522 long msgid,
1523 unsigned char ipmb_seq,
1524 int broadcast,
1525 unsigned char source_address,
1526 unsigned char source_lun)
1527{
1528 int i = broadcast;
1529
1530 /* Format the IPMB header data. */
1531 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1532 smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1533 smi_msg->data[2] = ipmb_addr->channel;
1534 if (broadcast)
1535 smi_msg->data[3] = 0;
1536 smi_msg->data[i+3] = ipmb_addr->slave_addr;
1537 smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3);
1538 smi_msg->data[i+5] = ipmb_checksum(&(smi_msg->data[i+3]), 2);
1539 smi_msg->data[i+6] = source_address;
1540 smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun;
1541 smi_msg->data[i+8] = msg->cmd;
1542
1543 /* Now tack on the data to the message. */
1544 if (msg->data_len > 0)
1545 memcpy(&(smi_msg->data[i+9]), msg->data,
1546 msg->data_len);
1547 smi_msg->data_size = msg->data_len + 9;
1548
1549 /* Now calculate the checksum and tack it on. */
1550 smi_msg->data[i+smi_msg->data_size]
1551 = ipmb_checksum(&(smi_msg->data[i+6]),
1552 smi_msg->data_size-6);
1553
1554 /*
1555 * Add on the checksum size and the offset from the
1556 * broadcast.
1557 */
1558 smi_msg->data_size += 1 + i;
1559
1560 smi_msg->msgid = msgid;
1561}
1562
1563static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg,
1564 struct kernel_ipmi_msg *msg,
1565 struct ipmi_lan_addr *lan_addr,
1566 long msgid,
1567 unsigned char ipmb_seq,
1568 unsigned char source_lun)
1569{
1570 /* Format the IPMB header data. */
1571 smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1572 smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1573 smi_msg->data[2] = lan_addr->channel;
1574 smi_msg->data[3] = lan_addr->session_handle;
1575 smi_msg->data[4] = lan_addr->remote_SWID;
1576 smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3);
1577 smi_msg->data[6] = ipmb_checksum(&(smi_msg->data[4]), 2);
1578 smi_msg->data[7] = lan_addr->local_SWID;
1579 smi_msg->data[8] = (ipmb_seq << 2) | source_lun;
1580 smi_msg->data[9] = msg->cmd;
1581
1582 /* Now tack on the data to the message. */
1583 if (msg->data_len > 0)
1584 memcpy(&(smi_msg->data[10]), msg->data,
1585 msg->data_len);
1586 smi_msg->data_size = msg->data_len + 10;
1587
1588 /* Now calculate the checksum and tack it on. */
1589 smi_msg->data[smi_msg->data_size]
1590 = ipmb_checksum(&(smi_msg->data[7]),
1591 smi_msg->data_size-7);
1592
1593 /*
1594 * Add on the checksum size and the offset from the
1595 * broadcast.
1596 */
1597 smi_msg->data_size += 1;
1598
1599 smi_msg->msgid = msgid;
1600}
1601
1602static struct ipmi_smi_msg *smi_add_send_msg(ipmi_smi_t intf,
1603 struct ipmi_smi_msg *smi_msg,
1604 int priority)
1605{
1606 if (intf->curr_msg) {
1607 if (priority > 0)
1608 list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs);
1609 else
1610 list_add_tail(&smi_msg->link, &intf->xmit_msgs);
1611 smi_msg = NULL;
1612 } else {
1613 intf->curr_msg = smi_msg;
1614 }
1615
1616 return smi_msg;
1617}
1618
1619
1620static void smi_send(ipmi_smi_t intf, const struct ipmi_smi_handlers *handlers,
1621 struct ipmi_smi_msg *smi_msg, int priority)
1622{
1623 int run_to_completion = intf->run_to_completion;
1624
1625 if (run_to_completion) {
1626 smi_msg = smi_add_send_msg(intf, smi_msg, priority);
1627 } else {
1628 unsigned long flags;
1629
1630 spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
1631 smi_msg = smi_add_send_msg(intf, smi_msg, priority);
1632 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
1633 }
1634
1635 if (smi_msg)
1636 handlers->sender(intf->send_info, smi_msg);
1637}
1638
1639/*
1640 * Separate from ipmi_request so that the user does not have to be
1641 * supplied in certain circumstances (mainly at panic time). If
1642 * messages are supplied, they will be freed, even if an error
1643 * occurs.
1644 */
1645static int i_ipmi_request(ipmi_user_t user,
1646 ipmi_smi_t intf,
1647 struct ipmi_addr *addr,
1648 long msgid,
1649 struct kernel_ipmi_msg *msg,
1650 void *user_msg_data,
1651 void *supplied_smi,
1652 struct ipmi_recv_msg *supplied_recv,
1653 int priority,
1654 unsigned char source_address,
1655 unsigned char source_lun,
1656 int retries,
1657 unsigned int retry_time_ms)
1658{
1659 int rv = 0;
1660 struct ipmi_smi_msg *smi_msg;
1661 struct ipmi_recv_msg *recv_msg;
1662 unsigned long flags;
1663
1664
1665 if (supplied_recv)
1666 recv_msg = supplied_recv;
1667 else {
1668 recv_msg = ipmi_alloc_recv_msg();
1669 if (recv_msg == NULL)
1670 return -ENOMEM;
1671 }
1672 recv_msg->user_msg_data = user_msg_data;
1673
1674 if (supplied_smi)
1675 smi_msg = (struct ipmi_smi_msg *) supplied_smi;
1676 else {
1677 smi_msg = ipmi_alloc_smi_msg();
1678 if (smi_msg == NULL) {
1679 ipmi_free_recv_msg(recv_msg);
1680 return -ENOMEM;
1681 }
1682 }
1683
1684 rcu_read_lock();
1685 if (intf->in_shutdown) {
1686 rv = -ENODEV;
1687 goto out_err;
1688 }
1689
1690 recv_msg->user = user;
1691 if (user)
1692 kref_get(&user->refcount);
1693 recv_msg->msgid = msgid;
1694 /*
1695 * Store the message to send in the receive message so timeout
1696 * responses can get the proper response data.
1697 */
1698 recv_msg->msg = *msg;
1699
1700 if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
1701 struct ipmi_system_interface_addr *smi_addr;
1702
1703 if (msg->netfn & 1) {
1704 /* Responses are not allowed to the SMI. */
1705 rv = -EINVAL;
1706 goto out_err;
1707 }
1708
1709 smi_addr = (struct ipmi_system_interface_addr *) addr;
1710 if (smi_addr->lun > 3) {
1711 ipmi_inc_stat(intf, sent_invalid_commands);
1712 rv = -EINVAL;
1713 goto out_err;
1714 }
1715
1716 memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr));
1717
1718 if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
1719 && ((msg->cmd == IPMI_SEND_MSG_CMD)
1720 || (msg->cmd == IPMI_GET_MSG_CMD)
1721 || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) {
1722 /*
1723 * We don't let the user do these, since we manage
1724 * the sequence numbers.
1725 */
1726 ipmi_inc_stat(intf, sent_invalid_commands);
1727 rv = -EINVAL;
1728 goto out_err;
1729 }
1730
1731 if (((msg->netfn == IPMI_NETFN_APP_REQUEST)
1732 && ((msg->cmd == IPMI_COLD_RESET_CMD)
1733 || (msg->cmd == IPMI_WARM_RESET_CMD)))
1734 || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST)) {
1735 spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1736 intf->auto_maintenance_timeout
1737 = IPMI_MAINTENANCE_MODE_TIMEOUT;
1738 if (!intf->maintenance_mode
1739 && !intf->maintenance_mode_enable) {
1740 intf->maintenance_mode_enable = true;
1741 maintenance_mode_update(intf);
1742 }
1743 spin_unlock_irqrestore(&intf->maintenance_mode_lock,
1744 flags);
1745 }
1746
1747 if ((msg->data_len + 2) > IPMI_MAX_MSG_LENGTH) {
1748 ipmi_inc_stat(intf, sent_invalid_commands);
1749 rv = -EMSGSIZE;
1750 goto out_err;
1751 }
1752
1753 smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
1754 smi_msg->data[1] = msg->cmd;
1755 smi_msg->msgid = msgid;
1756 smi_msg->user_data = recv_msg;
1757 if (msg->data_len > 0)
1758 memcpy(&(smi_msg->data[2]), msg->data, msg->data_len);
1759 smi_msg->data_size = msg->data_len + 2;
1760 ipmi_inc_stat(intf, sent_local_commands);
1761 } else if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
1762 struct ipmi_ipmb_addr *ipmb_addr;
1763 unsigned char ipmb_seq;
1764 long seqid;
1765 int broadcast = 0;
1766 struct ipmi_channel *chans;
1767
1768 if (addr->channel >= IPMI_MAX_CHANNELS) {
1769 ipmi_inc_stat(intf, sent_invalid_commands);
1770 rv = -EINVAL;
1771 goto out_err;
1772 }
1773
1774 chans = READ_ONCE(intf->channel_list)->c;
1775
1776 if (chans[addr->channel].medium != IPMI_CHANNEL_MEDIUM_IPMB) {
1777 ipmi_inc_stat(intf, sent_invalid_commands);
1778 rv = -EINVAL;
1779 goto out_err;
1780 }
1781
1782 if (retries < 0) {
1783 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)
1784 retries = 0; /* Don't retry broadcasts. */
1785 else
1786 retries = 4;
1787 }
1788 if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) {
1789 /*
1790 * Broadcasts add a zero at the beginning of the
1791 * message, but otherwise is the same as an IPMB
1792 * address.
1793 */
1794 addr->addr_type = IPMI_IPMB_ADDR_TYPE;
1795 broadcast = 1;
1796 }
1797
1798
1799 /* Default to 1 second retries. */
1800 if (retry_time_ms == 0)
1801 retry_time_ms = 1000;
1802
1803 /*
1804 * 9 for the header and 1 for the checksum, plus
1805 * possibly one for the broadcast.
1806 */
1807 if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) {
1808 ipmi_inc_stat(intf, sent_invalid_commands);
1809 rv = -EMSGSIZE;
1810 goto out_err;
1811 }
1812
1813 ipmb_addr = (struct ipmi_ipmb_addr *) addr;
1814 if (ipmb_addr->lun > 3) {
1815 ipmi_inc_stat(intf, sent_invalid_commands);
1816 rv = -EINVAL;
1817 goto out_err;
1818 }
1819
1820 memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr));
1821
1822 if (recv_msg->msg.netfn & 0x1) {
1823 /*
1824 * It's a response, so use the user's sequence
1825 * from msgid.
1826 */
1827 ipmi_inc_stat(intf, sent_ipmb_responses);
1828 format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid,
1829 msgid, broadcast,
1830 source_address, source_lun);
1831
1832 /*
1833 * Save the receive message so we can use it
1834 * to deliver the response.
1835 */
1836 smi_msg->user_data = recv_msg;
1837 } else {
1838 /* It's a command, so get a sequence for it. */
1839
1840 spin_lock_irqsave(&(intf->seq_lock), flags);
1841
1842 /*
1843 * Create a sequence number with a 1 second
1844 * timeout and 4 retries.
1845 */
1846 rv = intf_next_seq(intf,
1847 recv_msg,
1848 retry_time_ms,
1849 retries,
1850 broadcast,
1851 &ipmb_seq,
1852 &seqid);
1853 if (rv) {
1854 /*
1855 * We have used up all the sequence numbers,
1856 * probably, so abort.
1857 */
1858 spin_unlock_irqrestore(&(intf->seq_lock),
1859 flags);
1860 goto out_err;
1861 }
1862
1863 ipmi_inc_stat(intf, sent_ipmb_commands);
1864
1865 /*
1866 * Store the sequence number in the message,
1867 * so that when the send message response
1868 * comes back we can start the timer.
1869 */
1870 format_ipmb_msg(smi_msg, msg, ipmb_addr,
1871 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1872 ipmb_seq, broadcast,
1873 source_address, source_lun);
1874
1875 /*
1876 * Copy the message into the recv message data, so we
1877 * can retransmit it later if necessary.
1878 */
1879 memcpy(recv_msg->msg_data, smi_msg->data,
1880 smi_msg->data_size);
1881 recv_msg->msg.data = recv_msg->msg_data;
1882 recv_msg->msg.data_len = smi_msg->data_size;
1883
1884 /*
1885 * We don't unlock until here, because we need
1886 * to copy the completed message into the
1887 * recv_msg before we release the lock.
1888 * Otherwise, race conditions may bite us. I
1889 * know that's pretty paranoid, but I prefer
1890 * to be correct.
1891 */
1892 spin_unlock_irqrestore(&(intf->seq_lock), flags);
1893 }
1894 } else if (is_lan_addr(addr)) {
1895 struct ipmi_lan_addr *lan_addr;
1896 unsigned char ipmb_seq;
1897 long seqid;
1898 struct ipmi_channel *chans;
1899
1900 if (addr->channel >= IPMI_MAX_CHANNELS) {
1901 ipmi_inc_stat(intf, sent_invalid_commands);
1902 rv = -EINVAL;
1903 goto out_err;
1904 }
1905
1906 chans = READ_ONCE(intf->channel_list)->c;
1907
1908 if ((chans[addr->channel].medium
1909 != IPMI_CHANNEL_MEDIUM_8023LAN)
1910 && (chans[addr->channel].medium
1911 != IPMI_CHANNEL_MEDIUM_ASYNC)) {
1912 ipmi_inc_stat(intf, sent_invalid_commands);
1913 rv = -EINVAL;
1914 goto out_err;
1915 }
1916
1917 retries = 4;
1918
1919 /* Default to 1 second retries. */
1920 if (retry_time_ms == 0)
1921 retry_time_ms = 1000;
1922
1923 /* 11 for the header and 1 for the checksum. */
1924 if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) {
1925 ipmi_inc_stat(intf, sent_invalid_commands);
1926 rv = -EMSGSIZE;
1927 goto out_err;
1928 }
1929
1930 lan_addr = (struct ipmi_lan_addr *) addr;
1931 if (lan_addr->lun > 3) {
1932 ipmi_inc_stat(intf, sent_invalid_commands);
1933 rv = -EINVAL;
1934 goto out_err;
1935 }
1936
1937 memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr));
1938
1939 if (recv_msg->msg.netfn & 0x1) {
1940 /*
1941 * It's a response, so use the user's sequence
1942 * from msgid.
1943 */
1944 ipmi_inc_stat(intf, sent_lan_responses);
1945 format_lan_msg(smi_msg, msg, lan_addr, msgid,
1946 msgid, source_lun);
1947
1948 /*
1949 * Save the receive message so we can use it
1950 * to deliver the response.
1951 */
1952 smi_msg->user_data = recv_msg;
1953 } else {
1954 /* It's a command, so get a sequence for it. */
1955
1956 spin_lock_irqsave(&(intf->seq_lock), flags);
1957
1958 /*
1959 * Create a sequence number with a 1 second
1960 * timeout and 4 retries.
1961 */
1962 rv = intf_next_seq(intf,
1963 recv_msg,
1964 retry_time_ms,
1965 retries,
1966 0,
1967 &ipmb_seq,
1968 &seqid);
1969 if (rv) {
1970 /*
1971 * We have used up all the sequence numbers,
1972 * probably, so abort.
1973 */
1974 spin_unlock_irqrestore(&(intf->seq_lock),
1975 flags);
1976 goto out_err;
1977 }
1978
1979 ipmi_inc_stat(intf, sent_lan_commands);
1980
1981 /*
1982 * Store the sequence number in the message,
1983 * so that when the send message response
1984 * comes back we can start the timer.
1985 */
1986 format_lan_msg(smi_msg, msg, lan_addr,
1987 STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
1988 ipmb_seq, source_lun);
1989
1990 /*
1991 * Copy the message into the recv message data, so we
1992 * can retransmit it later if necessary.
1993 */
1994 memcpy(recv_msg->msg_data, smi_msg->data,
1995 smi_msg->data_size);
1996 recv_msg->msg.data = recv_msg->msg_data;
1997 recv_msg->msg.data_len = smi_msg->data_size;
1998
1999 /*
2000 * We don't unlock until here, because we need
2001 * to copy the completed message into the
2002 * recv_msg before we release the lock.
2003 * Otherwise, race conditions may bite us. I
2004 * know that's pretty paranoid, but I prefer
2005 * to be correct.
2006 */
2007 spin_unlock_irqrestore(&(intf->seq_lock), flags);
2008 }
2009 } else {
2010 /* Unknown address type. */
2011 ipmi_inc_stat(intf, sent_invalid_commands);
2012 rv = -EINVAL;
2013 goto out_err;
2014 }
2015
2016#ifdef DEBUG_MSGING
2017 {
2018 int m;
2019 for (m = 0; m < smi_msg->data_size; m++)
2020 printk(" %2.2x", smi_msg->data[m]);
2021 printk("\n");
2022 }
2023#endif
2024
2025 smi_send(intf, intf->handlers, smi_msg, priority);
2026 rcu_read_unlock();
2027
2028 return 0;
2029
2030 out_err:
2031 rcu_read_unlock();
2032 ipmi_free_smi_msg(smi_msg);
2033 ipmi_free_recv_msg(recv_msg);
2034 return rv;
2035}
2036
2037static int check_addr(ipmi_smi_t intf,
2038 struct ipmi_addr *addr,
2039 unsigned char *saddr,
2040 unsigned char *lun)
2041{
2042 if (addr->channel >= IPMI_MAX_CHANNELS)
2043 return -EINVAL;
2044 *lun = intf->addrinfo[addr->channel].lun;
2045 *saddr = intf->addrinfo[addr->channel].address;
2046 return 0;
2047}
2048
2049int ipmi_request_settime(ipmi_user_t user,
2050 struct ipmi_addr *addr,
2051 long msgid,
2052 struct kernel_ipmi_msg *msg,
2053 void *user_msg_data,
2054 int priority,
2055 int retries,
2056 unsigned int retry_time_ms)
2057{
2058 unsigned char saddr = 0, lun = 0;
2059 int rv;
2060
2061 if (!user)
2062 return -EINVAL;
2063 rv = check_addr(user->intf, addr, &saddr, &lun);
2064 if (rv)
2065 return rv;
2066 return i_ipmi_request(user,
2067 user->intf,
2068 addr,
2069 msgid,
2070 msg,
2071 user_msg_data,
2072 NULL, NULL,
2073 priority,
2074 saddr,
2075 lun,
2076 retries,
2077 retry_time_ms);
2078}
2079EXPORT_SYMBOL(ipmi_request_settime);
2080
2081int ipmi_request_supply_msgs(ipmi_user_t user,
2082 struct ipmi_addr *addr,
2083 long msgid,
2084 struct kernel_ipmi_msg *msg,
2085 void *user_msg_data,
2086 void *supplied_smi,
2087 struct ipmi_recv_msg *supplied_recv,
2088 int priority)
2089{
2090 unsigned char saddr = 0, lun = 0;
2091 int rv;
2092
2093 if (!user)
2094 return -EINVAL;
2095 rv = check_addr(user->intf, addr, &saddr, &lun);
2096 if (rv)
2097 return rv;
2098 return i_ipmi_request(user,
2099 user->intf,
2100 addr,
2101 msgid,
2102 msg,
2103 user_msg_data,
2104 supplied_smi,
2105 supplied_recv,
2106 priority,
2107 saddr,
2108 lun,
2109 -1, 0);
2110}
2111EXPORT_SYMBOL(ipmi_request_supply_msgs);
2112
2113static void bmc_device_id_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
2114{
2115 int rv;
2116
2117 if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2118 || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
2119 || (msg->msg.cmd != IPMI_GET_DEVICE_ID_CMD)) {
2120 dev_warn(intf->si_dev,
2121 PFX "invalid device_id msg: addr_type=%d netfn=%x cmd=%x\n",
2122 msg->addr.addr_type, msg->msg.netfn, msg->msg.cmd);
2123 return;
2124 }
2125
2126 rv = ipmi_demangle_device_id(msg->msg.netfn, msg->msg.cmd,
2127 msg->msg.data, msg->msg.data_len, &intf->bmc->fetch_id);
2128 if (rv) {
2129 dev_warn(intf->si_dev,
2130 PFX "device id demangle failed: %d\n", rv);
2131 intf->bmc->dyn_id_set = 0;
2132 } else {
2133 /*
2134 * Make sure the id data is available before setting
2135 * dyn_id_set.
2136 */
2137 smp_wmb();
2138 intf->bmc->dyn_id_set = 1;
2139 }
2140
2141 wake_up(&intf->waitq);
2142}
2143
2144static int
2145send_get_device_id_cmd(ipmi_smi_t intf)
2146{
2147 struct ipmi_system_interface_addr si;
2148 struct kernel_ipmi_msg msg;
2149
2150 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2151 si.channel = IPMI_BMC_CHANNEL;
2152 si.lun = 0;
2153
2154 msg.netfn = IPMI_NETFN_APP_REQUEST;
2155 msg.cmd = IPMI_GET_DEVICE_ID_CMD;
2156 msg.data = NULL;
2157 msg.data_len = 0;
2158
2159 return i_ipmi_request(NULL,
2160 intf,
2161 (struct ipmi_addr *) &si,
2162 0,
2163 &msg,
2164 intf,
2165 NULL,
2166 NULL,
2167 0,
2168 intf->addrinfo[0].address,
2169 intf->addrinfo[0].lun,
2170 -1, 0);
2171}
2172
2173static int __get_device_id(ipmi_smi_t intf, struct bmc_device *bmc)
2174{
2175 int rv;
2176
2177 bmc->dyn_id_set = 2;
2178
2179 intf->null_user_handler = bmc_device_id_handler;
2180
2181 rv = send_get_device_id_cmd(intf);
2182 if (rv)
2183 return rv;
2184
2185 wait_event(intf->waitq, bmc->dyn_id_set != 2);
2186
2187 if (!bmc->dyn_id_set)
2188 rv = -EIO; /* Something went wrong in the fetch. */
2189
2190 /* dyn_id_set makes the id data available. */
2191 smp_rmb();
2192
2193 intf->null_user_handler = NULL;
2194
2195 return rv;
2196}
2197
2198/*
2199 * Fetch the device id for the bmc/interface. You must pass in either
2200 * bmc or intf, this code will get the other one. If the data has
2201 * been recently fetched, this will just use the cached data. Otherwise
2202 * it will run a new fetch.
2203 *
2204 * Except for the first time this is called (in ipmi_register_smi()),
2205 * this will always return good data;
2206 */
2207static int __bmc_get_device_id(ipmi_smi_t intf, struct bmc_device *bmc,
2208 struct ipmi_device_id *id,
2209 bool *guid_set, guid_t *guid, int intf_num)
2210{
2211 int rv = 0;
2212 int prev_dyn_id_set, prev_guid_set;
2213 bool intf_set = intf != NULL;
2214
2215 if (!intf) {
2216 mutex_lock(&bmc->dyn_mutex);
2217retry_bmc_lock:
2218 if (list_empty(&bmc->intfs)) {
2219 mutex_unlock(&bmc->dyn_mutex);
2220 return -ENOENT;
2221 }
2222 intf = list_first_entry(&bmc->intfs, struct ipmi_smi,
2223 bmc_link);
2224 kref_get(&intf->refcount);
2225 mutex_unlock(&bmc->dyn_mutex);
2226 mutex_lock(&intf->bmc_reg_mutex);
2227 mutex_lock(&bmc->dyn_mutex);
2228 if (intf != list_first_entry(&bmc->intfs, struct ipmi_smi,
2229 bmc_link)) {
2230 mutex_unlock(&intf->bmc_reg_mutex);
2231 kref_put(&intf->refcount, intf_free);
2232 goto retry_bmc_lock;
2233 }
2234 } else {
2235 mutex_lock(&intf->bmc_reg_mutex);
2236 bmc = intf->bmc;
2237 mutex_lock(&bmc->dyn_mutex);
2238 kref_get(&intf->refcount);
2239 }
2240
2241 /* If we have a valid and current ID, just return that. */
2242 if (intf->in_bmc_register ||
2243 (bmc->dyn_id_set && time_is_after_jiffies(bmc->dyn_id_expiry)))
2244 goto out_noprocessing;
2245
2246 prev_guid_set = bmc->dyn_guid_set;
2247 __get_guid(intf);
2248
2249 prev_dyn_id_set = bmc->dyn_id_set;
2250 rv = __get_device_id(intf, bmc);
2251 if (rv)
2252 goto out;
2253
2254 /*
2255 * The guid, device id, manufacturer id, and product id should
2256 * not change on a BMC. If it does we have to do some dancing.
2257 */
2258 if (!intf->bmc_registered
2259 || (!prev_guid_set && bmc->dyn_guid_set)
2260 || (!prev_dyn_id_set && bmc->dyn_id_set)
2261 || (prev_guid_set && bmc->dyn_guid_set
2262 && !guid_equal(&bmc->guid, &bmc->fetch_guid))
2263 || bmc->id.device_id != bmc->fetch_id.device_id
2264 || bmc->id.manufacturer_id != bmc->fetch_id.manufacturer_id
2265 || bmc->id.product_id != bmc->fetch_id.product_id) {
2266 struct ipmi_device_id id = bmc->fetch_id;
2267 int guid_set = bmc->dyn_guid_set;
2268 guid_t guid;
2269
2270 guid = bmc->fetch_guid;
2271 mutex_unlock(&bmc->dyn_mutex);
2272
2273 __ipmi_bmc_unregister(intf);
2274 /* Fill in the temporary BMC for good measure. */
2275 intf->bmc->id = id;
2276 intf->bmc->dyn_guid_set = guid_set;
2277 intf->bmc->guid = guid;
2278 if (__ipmi_bmc_register(intf, &id, guid_set, &guid, intf_num))
2279 need_waiter(intf); /* Retry later on an error. */
2280 else
2281 __scan_channels(intf, &id);
2282
2283
2284 if (!intf_set) {
2285 /*
2286 * We weren't given the interface on the
2287 * command line, so restart the operation on
2288 * the next interface for the BMC.
2289 */
2290 mutex_unlock(&intf->bmc_reg_mutex);
2291 mutex_lock(&bmc->dyn_mutex);
2292 goto retry_bmc_lock;
2293 }
2294
2295 /* We have a new BMC, set it up. */
2296 bmc = intf->bmc;
2297 mutex_lock(&bmc->dyn_mutex);
2298 goto out_noprocessing;
2299 } else if (memcmp(&bmc->fetch_id, &bmc->id, sizeof(bmc->id)))
2300 /* Version info changes, scan the channels again. */
2301 __scan_channels(intf, &bmc->fetch_id);
2302
2303 bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY;
2304
2305out:
2306 if (rv && prev_dyn_id_set) {
2307 rv = 0; /* Ignore failures if we have previous data. */
2308 bmc->dyn_id_set = prev_dyn_id_set;
2309 }
2310 if (!rv) {
2311 bmc->id = bmc->fetch_id;
2312 if (bmc->dyn_guid_set)
2313 bmc->guid = bmc->fetch_guid;
2314 else if (prev_guid_set)
2315 /*
2316 * The guid used to be valid and it failed to fetch,
2317 * just use the cached value.
2318 */
2319 bmc->dyn_guid_set = prev_guid_set;
2320 }
2321out_noprocessing:
2322 if (!rv) {
2323 if (id)
2324 *id = bmc->id;
2325
2326 if (guid_set)
2327 *guid_set = bmc->dyn_guid_set;
2328
2329 if (guid && bmc->dyn_guid_set)
2330 *guid = bmc->guid;
2331 }
2332
2333 mutex_unlock(&bmc->dyn_mutex);
2334 mutex_unlock(&intf->bmc_reg_mutex);
2335
2336 kref_put(&intf->refcount, intf_free);
2337 return rv;
2338}
2339
2340static int bmc_get_device_id(ipmi_smi_t intf, struct bmc_device *bmc,
2341 struct ipmi_device_id *id,
2342 bool *guid_set, guid_t *guid)
2343{
2344 return __bmc_get_device_id(intf, bmc, id, guid_set, guid, -1);
2345}
2346
2347#ifdef CONFIG_IPMI_PROC_INTERFACE
2348static int smi_ipmb_proc_show(struct seq_file *m, void *v)
2349{
2350 ipmi_smi_t intf = m->private;
2351 int i;
2352
2353 seq_printf(m, "%x", intf->addrinfo[0].address);
2354 for (i = 1; i < IPMI_MAX_CHANNELS; i++)
2355 seq_printf(m, " %x", intf->addrinfo[i].address);
2356 seq_putc(m, '\n');
2357
2358 return 0;
2359}
2360
2361static int smi_ipmb_proc_open(struct inode *inode, struct file *file)
2362{
2363 return single_open(file, smi_ipmb_proc_show, PDE_DATA(inode));
2364}
2365
2366static const struct file_operations smi_ipmb_proc_ops = {
2367 .open = smi_ipmb_proc_open,
2368 .read = seq_read,
2369 .llseek = seq_lseek,
2370 .release = single_release,
2371};
2372
2373static int smi_version_proc_show(struct seq_file *m, void *v)
2374{
2375 ipmi_smi_t intf = m->private;
2376 struct ipmi_device_id id;
2377 int rv;
2378
2379 rv = bmc_get_device_id(intf, NULL, &id, NULL, NULL);
2380 if (rv)
2381 return rv;
2382
2383 seq_printf(m, "%u.%u\n",
2384 ipmi_version_major(&id),
2385 ipmi_version_minor(&id));
2386
2387 return 0;
2388}
2389
2390static int smi_version_proc_open(struct inode *inode, struct file *file)
2391{
2392 return single_open(file, smi_version_proc_show, PDE_DATA(inode));
2393}
2394
2395static const struct file_operations smi_version_proc_ops = {
2396 .open = smi_version_proc_open,
2397 .read = seq_read,
2398 .llseek = seq_lseek,
2399 .release = single_release,
2400};
2401
2402static int smi_stats_proc_show(struct seq_file *m, void *v)
2403{
2404 ipmi_smi_t intf = m->private;
2405
2406 seq_printf(m, "sent_invalid_commands: %u\n",
2407 ipmi_get_stat(intf, sent_invalid_commands));
2408 seq_printf(m, "sent_local_commands: %u\n",
2409 ipmi_get_stat(intf, sent_local_commands));
2410 seq_printf(m, "handled_local_responses: %u\n",
2411 ipmi_get_stat(intf, handled_local_responses));
2412 seq_printf(m, "unhandled_local_responses: %u\n",
2413 ipmi_get_stat(intf, unhandled_local_responses));
2414 seq_printf(m, "sent_ipmb_commands: %u\n",
2415 ipmi_get_stat(intf, sent_ipmb_commands));
2416 seq_printf(m, "sent_ipmb_command_errs: %u\n",
2417 ipmi_get_stat(intf, sent_ipmb_command_errs));
2418 seq_printf(m, "retransmitted_ipmb_commands: %u\n",
2419 ipmi_get_stat(intf, retransmitted_ipmb_commands));
2420 seq_printf(m, "timed_out_ipmb_commands: %u\n",
2421 ipmi_get_stat(intf, timed_out_ipmb_commands));
2422 seq_printf(m, "timed_out_ipmb_broadcasts: %u\n",
2423 ipmi_get_stat(intf, timed_out_ipmb_broadcasts));
2424 seq_printf(m, "sent_ipmb_responses: %u\n",
2425 ipmi_get_stat(intf, sent_ipmb_responses));
2426 seq_printf(m, "handled_ipmb_responses: %u\n",
2427 ipmi_get_stat(intf, handled_ipmb_responses));
2428 seq_printf(m, "invalid_ipmb_responses: %u\n",
2429 ipmi_get_stat(intf, invalid_ipmb_responses));
2430 seq_printf(m, "unhandled_ipmb_responses: %u\n",
2431 ipmi_get_stat(intf, unhandled_ipmb_responses));
2432 seq_printf(m, "sent_lan_commands: %u\n",
2433 ipmi_get_stat(intf, sent_lan_commands));
2434 seq_printf(m, "sent_lan_command_errs: %u\n",
2435 ipmi_get_stat(intf, sent_lan_command_errs));
2436 seq_printf(m, "retransmitted_lan_commands: %u\n",
2437 ipmi_get_stat(intf, retransmitted_lan_commands));
2438 seq_printf(m, "timed_out_lan_commands: %u\n",
2439 ipmi_get_stat(intf, timed_out_lan_commands));
2440 seq_printf(m, "sent_lan_responses: %u\n",
2441 ipmi_get_stat(intf, sent_lan_responses));
2442 seq_printf(m, "handled_lan_responses: %u\n",
2443 ipmi_get_stat(intf, handled_lan_responses));
2444 seq_printf(m, "invalid_lan_responses: %u\n",
2445 ipmi_get_stat(intf, invalid_lan_responses));
2446 seq_printf(m, "unhandled_lan_responses: %u\n",
2447 ipmi_get_stat(intf, unhandled_lan_responses));
2448 seq_printf(m, "handled_commands: %u\n",
2449 ipmi_get_stat(intf, handled_commands));
2450 seq_printf(m, "invalid_commands: %u\n",
2451 ipmi_get_stat(intf, invalid_commands));
2452 seq_printf(m, "unhandled_commands: %u\n",
2453 ipmi_get_stat(intf, unhandled_commands));
2454 seq_printf(m, "invalid_events: %u\n",
2455 ipmi_get_stat(intf, invalid_events));
2456 seq_printf(m, "events: %u\n",
2457 ipmi_get_stat(intf, events));
2458 seq_printf(m, "failed rexmit LAN msgs: %u\n",
2459 ipmi_get_stat(intf, dropped_rexmit_lan_commands));
2460 seq_printf(m, "failed rexmit IPMB msgs: %u\n",
2461 ipmi_get_stat(intf, dropped_rexmit_ipmb_commands));
2462 return 0;
2463}
2464
2465static int smi_stats_proc_open(struct inode *inode, struct file *file)
2466{
2467 return single_open(file, smi_stats_proc_show, PDE_DATA(inode));
2468}
2469
2470static const struct file_operations smi_stats_proc_ops = {
2471 .open = smi_stats_proc_open,
2472 .read = seq_read,
2473 .llseek = seq_lseek,
2474 .release = single_release,
2475};
2476
2477int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name,
2478 const struct file_operations *proc_ops,
2479 void *data)
2480{
2481 int rv = 0;
2482 struct proc_dir_entry *file;
2483 struct ipmi_proc_entry *entry;
2484
2485 /* Create a list element. */
2486 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2487 if (!entry)
2488 return -ENOMEM;
2489 entry->name = kstrdup(name, GFP_KERNEL);
2490 if (!entry->name) {
2491 kfree(entry);
2492 return -ENOMEM;
2493 }
2494
2495 file = proc_create_data(name, 0, smi->proc_dir, proc_ops, data);
2496 if (!file) {
2497 kfree(entry->name);
2498 kfree(entry);
2499 rv = -ENOMEM;
2500 } else {
2501 mutex_lock(&smi->proc_entry_lock);
2502 /* Stick it on the list. */
2503 entry->next = smi->proc_entries;
2504 smi->proc_entries = entry;
2505 mutex_unlock(&smi->proc_entry_lock);
2506 }
2507
2508 return rv;
2509}
2510EXPORT_SYMBOL(ipmi_smi_add_proc_entry);
2511
2512static int add_proc_entries(ipmi_smi_t smi, int num)
2513{
2514 int rv = 0;
2515
2516 sprintf(smi->proc_dir_name, "%d", num);
2517 smi->proc_dir = proc_mkdir(smi->proc_dir_name, proc_ipmi_root);
2518 if (!smi->proc_dir)
2519 rv = -ENOMEM;
2520
2521 if (rv == 0)
2522 rv = ipmi_smi_add_proc_entry(smi, "stats",
2523 &smi_stats_proc_ops,
2524 smi);
2525
2526 if (rv == 0)
2527 rv = ipmi_smi_add_proc_entry(smi, "ipmb",
2528 &smi_ipmb_proc_ops,
2529 smi);
2530
2531 if (rv == 0)
2532 rv = ipmi_smi_add_proc_entry(smi, "version",
2533 &smi_version_proc_ops,
2534 smi);
2535
2536 return rv;
2537}
2538
2539static void remove_proc_entries(ipmi_smi_t smi)
2540{
2541 struct ipmi_proc_entry *entry;
2542
2543 mutex_lock(&smi->proc_entry_lock);
2544 while (smi->proc_entries) {
2545 entry = smi->proc_entries;
2546 smi->proc_entries = entry->next;
2547
2548 remove_proc_entry(entry->name, smi->proc_dir);
2549 kfree(entry->name);
2550 kfree(entry);
2551 }
2552 mutex_unlock(&smi->proc_entry_lock);
2553 remove_proc_entry(smi->proc_dir_name, proc_ipmi_root);
2554}
2555#endif /* CONFIG_IPMI_PROC_INTERFACE */
2556
2557static ssize_t device_id_show(struct device *dev,
2558 struct device_attribute *attr,
2559 char *buf)
2560{
2561 struct bmc_device *bmc = to_bmc_device(dev);
2562 struct ipmi_device_id id;
2563 int rv;
2564
2565 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2566 if (rv)
2567 return rv;
2568
2569 return snprintf(buf, 10, "%u\n", id.device_id);
2570}
2571static DEVICE_ATTR_RO(device_id);
2572
2573static ssize_t provides_device_sdrs_show(struct device *dev,
2574 struct device_attribute *attr,
2575 char *buf)
2576{
2577 struct bmc_device *bmc = to_bmc_device(dev);
2578 struct ipmi_device_id id;
2579 int rv;
2580
2581 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2582 if (rv)
2583 return rv;
2584
2585 return snprintf(buf, 10, "%u\n", (id.device_revision & 0x80) >> 7);
2586}
2587static DEVICE_ATTR_RO(provides_device_sdrs);
2588
2589static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
2590 char *buf)
2591{
2592 struct bmc_device *bmc = to_bmc_device(dev);
2593 struct ipmi_device_id id;
2594 int rv;
2595
2596 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2597 if (rv)
2598 return rv;
2599
2600 return snprintf(buf, 20, "%u\n", id.device_revision & 0x0F);
2601}
2602static DEVICE_ATTR_RO(revision);
2603
2604static ssize_t firmware_revision_show(struct device *dev,
2605 struct device_attribute *attr,
2606 char *buf)
2607{
2608 struct bmc_device *bmc = to_bmc_device(dev);
2609 struct ipmi_device_id id;
2610 int rv;
2611
2612 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2613 if (rv)
2614 return rv;
2615
2616 return snprintf(buf, 20, "%u.%x\n", id.firmware_revision_1,
2617 id.firmware_revision_2);
2618}
2619static DEVICE_ATTR_RO(firmware_revision);
2620
2621static ssize_t ipmi_version_show(struct device *dev,
2622 struct device_attribute *attr,
2623 char *buf)
2624{
2625 struct bmc_device *bmc = to_bmc_device(dev);
2626 struct ipmi_device_id id;
2627 int rv;
2628
2629 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2630 if (rv)
2631 return rv;
2632
2633 return snprintf(buf, 20, "%u.%u\n",
2634 ipmi_version_major(&id),
2635 ipmi_version_minor(&id));
2636}
2637static DEVICE_ATTR_RO(ipmi_version);
2638
2639static ssize_t add_dev_support_show(struct device *dev,
2640 struct device_attribute *attr,
2641 char *buf)
2642{
2643 struct bmc_device *bmc = to_bmc_device(dev);
2644 struct ipmi_device_id id;
2645 int rv;
2646
2647 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2648 if (rv)
2649 return rv;
2650
2651 return snprintf(buf, 10, "0x%02x\n", id.additional_device_support);
2652}
2653static DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show,
2654 NULL);
2655
2656static ssize_t manufacturer_id_show(struct device *dev,
2657 struct device_attribute *attr,
2658 char *buf)
2659{
2660 struct bmc_device *bmc = to_bmc_device(dev);
2661 struct ipmi_device_id id;
2662 int rv;
2663
2664 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2665 if (rv)
2666 return rv;
2667
2668 return snprintf(buf, 20, "0x%6.6x\n", id.manufacturer_id);
2669}
2670static DEVICE_ATTR_RO(manufacturer_id);
2671
2672static ssize_t product_id_show(struct device *dev,
2673 struct device_attribute *attr,
2674 char *buf)
2675{
2676 struct bmc_device *bmc = to_bmc_device(dev);
2677 struct ipmi_device_id id;
2678 int rv;
2679
2680 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2681 if (rv)
2682 return rv;
2683
2684 return snprintf(buf, 10, "0x%4.4x\n", id.product_id);
2685}
2686static DEVICE_ATTR_RO(product_id);
2687
2688static ssize_t aux_firmware_rev_show(struct device *dev,
2689 struct device_attribute *attr,
2690 char *buf)
2691{
2692 struct bmc_device *bmc = to_bmc_device(dev);
2693 struct ipmi_device_id id;
2694 int rv;
2695
2696 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2697 if (rv)
2698 return rv;
2699
2700 return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n",
2701 id.aux_firmware_revision[3],
2702 id.aux_firmware_revision[2],
2703 id.aux_firmware_revision[1],
2704 id.aux_firmware_revision[0]);
2705}
2706static DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL);
2707
2708static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
2709 char *buf)
2710{
2711 struct bmc_device *bmc = to_bmc_device(dev);
2712 bool guid_set;
2713 guid_t guid;
2714 int rv;
2715
2716 rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, &guid);
2717 if (rv)
2718 return rv;
2719 if (!guid_set)
2720 return -ENOENT;
2721
2722 return snprintf(buf, 38, "%pUl\n", guid.b);
2723}
2724static DEVICE_ATTR_RO(guid);
2725
2726static struct attribute *bmc_dev_attrs[] = {
2727 &dev_attr_device_id.attr,
2728 &dev_attr_provides_device_sdrs.attr,
2729 &dev_attr_revision.attr,
2730 &dev_attr_firmware_revision.attr,
2731 &dev_attr_ipmi_version.attr,
2732 &dev_attr_additional_device_support.attr,
2733 &dev_attr_manufacturer_id.attr,
2734 &dev_attr_product_id.attr,
2735 &dev_attr_aux_firmware_revision.attr,
2736 &dev_attr_guid.attr,
2737 NULL
2738};
2739
2740static umode_t bmc_dev_attr_is_visible(struct kobject *kobj,
2741 struct attribute *attr, int idx)
2742{
2743 struct device *dev = kobj_to_dev(kobj);
2744 struct bmc_device *bmc = to_bmc_device(dev);
2745 umode_t mode = attr->mode;
2746 int rv;
2747
2748 if (attr == &dev_attr_aux_firmware_revision.attr) {
2749 struct ipmi_device_id id;
2750
2751 rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2752 return (!rv && id.aux_firmware_revision_set) ? mode : 0;
2753 }
2754 if (attr == &dev_attr_guid.attr) {
2755 bool guid_set;
2756
2757 rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, NULL);
2758 return (!rv && guid_set) ? mode : 0;
2759 }
2760 return mode;
2761}
2762
2763static const struct attribute_group bmc_dev_attr_group = {
2764 .attrs = bmc_dev_attrs,
2765 .is_visible = bmc_dev_attr_is_visible,
2766};
2767
2768static const struct attribute_group *bmc_dev_attr_groups[] = {
2769 &bmc_dev_attr_group,
2770 NULL
2771};
2772
2773static const struct device_type bmc_device_type = {
2774 .groups = bmc_dev_attr_groups,
2775};
2776
2777static int __find_bmc_guid(struct device *dev, void *data)
2778{
2779 guid_t *guid = data;
2780 struct bmc_device *bmc;
2781 int rv;
2782
2783 if (dev->type != &bmc_device_type)
2784 return 0;
2785
2786 bmc = to_bmc_device(dev);
2787 rv = bmc->dyn_guid_set && guid_equal(&bmc->guid, guid);
2788 if (rv)
2789 rv = kref_get_unless_zero(&bmc->usecount);
2790 return rv;
2791}
2792
2793/*
2794 * Returns with the bmc's usecount incremented, if it is non-NULL.
2795 */
2796static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv,
2797 guid_t *guid)
2798{
2799 struct device *dev;
2800 struct bmc_device *bmc = NULL;
2801
2802 dev = driver_find_device(drv, NULL, guid, __find_bmc_guid);
2803 if (dev) {
2804 bmc = to_bmc_device(dev);
2805 put_device(dev);
2806 }
2807 return bmc;
2808}
2809
2810struct prod_dev_id {
2811 unsigned int product_id;
2812 unsigned char device_id;
2813};
2814
2815static int __find_bmc_prod_dev_id(struct device *dev, void *data)
2816{
2817 struct prod_dev_id *cid = data;
2818 struct bmc_device *bmc;
2819 int rv;
2820
2821 if (dev->type != &bmc_device_type)
2822 return 0;
2823
2824 bmc = to_bmc_device(dev);
2825 rv = (bmc->id.product_id == cid->product_id
2826 && bmc->id.device_id == cid->device_id);
2827 if (rv)
2828 rv = kref_get_unless_zero(&bmc->usecount);
2829 return rv;
2830}
2831
2832/*
2833 * Returns with the bmc's usecount incremented, if it is non-NULL.
2834 */
2835static struct bmc_device *ipmi_find_bmc_prod_dev_id(
2836 struct device_driver *drv,
2837 unsigned int product_id, unsigned char device_id)
2838{
2839 struct prod_dev_id id = {
2840 .product_id = product_id,
2841 .device_id = device_id,
2842 };
2843 struct device *dev;
2844 struct bmc_device *bmc = NULL;
2845
2846 dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id);
2847 if (dev) {
2848 bmc = to_bmc_device(dev);
2849 put_device(dev);
2850 }
2851 return bmc;
2852}
2853
2854static DEFINE_IDA(ipmi_bmc_ida);
2855
2856static void
2857release_bmc_device(struct device *dev)
2858{
2859 kfree(to_bmc_device(dev));
2860}
2861
2862static void cleanup_bmc_work(struct work_struct *work)
2863{
2864 struct bmc_device *bmc = container_of(work, struct bmc_device,
2865 remove_work);
2866 int id = bmc->pdev.id; /* Unregister overwrites id */
2867
2868 platform_device_unregister(&bmc->pdev);
2869 ida_simple_remove(&ipmi_bmc_ida, id);
2870}
2871
2872static void
2873cleanup_bmc_device(struct kref *ref)
2874{
2875 struct bmc_device *bmc = container_of(ref, struct bmc_device, usecount);
2876
2877 /*
2878 * Remove the platform device in a work queue to avoid issues
2879 * with removing the device attributes while reading a device
2880 * attribute.
2881 */
2882 schedule_work(&bmc->remove_work);
2883}
2884
2885/*
2886 * Must be called with intf->bmc_reg_mutex held.
2887 */
2888static void __ipmi_bmc_unregister(ipmi_smi_t intf)
2889{
2890 struct bmc_device *bmc = intf->bmc;
2891
2892 if (!intf->bmc_registered)
2893 return;
2894
2895 sysfs_remove_link(&intf->si_dev->kobj, "bmc");
2896 sysfs_remove_link(&bmc->pdev.dev.kobj, intf->my_dev_name);
2897 kfree(intf->my_dev_name);
2898 intf->my_dev_name = NULL;
2899
2900 mutex_lock(&bmc->dyn_mutex);
2901 list_del(&intf->bmc_link);
2902 mutex_unlock(&bmc->dyn_mutex);
2903 intf->bmc = &intf->tmp_bmc;
2904 kref_put(&bmc->usecount, cleanup_bmc_device);
2905 intf->bmc_registered = false;
2906}
2907
2908static void ipmi_bmc_unregister(ipmi_smi_t intf)
2909{
2910 mutex_lock(&intf->bmc_reg_mutex);
2911 __ipmi_bmc_unregister(intf);
2912 mutex_unlock(&intf->bmc_reg_mutex);
2913}
2914
2915/*
2916 * Must be called with intf->bmc_reg_mutex held.
2917 */
2918static int __ipmi_bmc_register(ipmi_smi_t intf,
2919 struct ipmi_device_id *id,
2920 bool guid_set, guid_t *guid, int intf_num)
2921{
2922 int rv;
2923 struct bmc_device *bmc;
2924 struct bmc_device *old_bmc;
2925
2926 /*
2927 * platform_device_register() can cause bmc_reg_mutex to
2928 * be claimed because of the is_visible functions of
2929 * the attributes. Eliminate possible recursion and
2930 * release the lock.
2931 */
2932 intf->in_bmc_register = true;
2933 mutex_unlock(&intf->bmc_reg_mutex);
2934
2935 /*
2936 * Try to find if there is an bmc_device struct
2937 * representing the interfaced BMC already
2938 */
2939 mutex_lock(&ipmidriver_mutex);
2940 if (guid_set)
2941 old_bmc = ipmi_find_bmc_guid(&ipmidriver.driver, guid);
2942 else
2943 old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver.driver,
2944 id->product_id,
2945 id->device_id);
2946
2947 /*
2948 * If there is already an bmc_device, free the new one,
2949 * otherwise register the new BMC device
2950 */
2951 if (old_bmc) {
2952 bmc = old_bmc;
2953 /*
2954 * Note: old_bmc already has usecount incremented by
2955 * the BMC find functions.
2956 */
2957 intf->bmc = old_bmc;
2958 mutex_lock(&bmc->dyn_mutex);
2959 list_add_tail(&intf->bmc_link, &bmc->intfs);
2960 mutex_unlock(&bmc->dyn_mutex);
2961
2962 dev_info(intf->si_dev,
2963 "ipmi: interfacing existing BMC (man_id: 0x%6.6x,"
2964 " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
2965 bmc->id.manufacturer_id,
2966 bmc->id.product_id,
2967 bmc->id.device_id);
2968 } else {
2969 bmc = kzalloc(sizeof(*bmc), GFP_KERNEL);
2970 if (!bmc) {
2971 rv = -ENOMEM;
2972 goto out;
2973 }
2974 INIT_LIST_HEAD(&bmc->intfs);
2975 mutex_init(&bmc->dyn_mutex);
2976 INIT_WORK(&bmc->remove_work, cleanup_bmc_work);
2977
2978 bmc->id = *id;
2979 bmc->dyn_id_set = 1;
2980 bmc->dyn_guid_set = guid_set;
2981 bmc->guid = *guid;
2982 bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY;
2983
2984 bmc->pdev.name = "ipmi_bmc";
2985
2986 rv = ida_simple_get(&ipmi_bmc_ida, 0, 0, GFP_KERNEL);
2987 if (rv < 0)
2988 goto out;
2989 bmc->pdev.dev.driver = &ipmidriver.driver;
2990 bmc->pdev.id = rv;
2991 bmc->pdev.dev.release = release_bmc_device;
2992 bmc->pdev.dev.type = &bmc_device_type;
2993 kref_init(&bmc->usecount);
2994
2995 intf->bmc = bmc;
2996 mutex_lock(&bmc->dyn_mutex);
2997 list_add_tail(&intf->bmc_link, &bmc->intfs);
2998 mutex_unlock(&bmc->dyn_mutex);
2999
3000 rv = platform_device_register(&bmc->pdev);
3001 if (rv) {
3002 dev_err(intf->si_dev,
3003 PFX " Unable to register bmc device: %d\n",
3004 rv);
3005 goto out_list_del;
3006 }
3007
3008 dev_info(intf->si_dev,
3009 "Found new BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
3010 bmc->id.manufacturer_id,
3011 bmc->id.product_id,
3012 bmc->id.device_id);
3013 }
3014
3015 /*
3016 * create symlink from system interface device to bmc device
3017 * and back.
3018 */
3019 rv = sysfs_create_link(&intf->si_dev->kobj, &bmc->pdev.dev.kobj, "bmc");
3020 if (rv) {
3021 dev_err(intf->si_dev,
3022 PFX "Unable to create bmc symlink: %d\n", rv);
3023 goto out_put_bmc;
3024 }
3025
3026 if (intf_num == -1)
3027 intf_num = intf->intf_num;
3028 intf->my_dev_name = kasprintf(GFP_KERNEL, "ipmi%d", intf_num);
3029 if (!intf->my_dev_name) {
3030 rv = -ENOMEM;
3031 dev_err(intf->si_dev,
3032 PFX "Unable to allocate link from BMC: %d\n", rv);
3033 goto out_unlink1;
3034 }
3035
3036 rv = sysfs_create_link(&bmc->pdev.dev.kobj, &intf->si_dev->kobj,
3037 intf->my_dev_name);
3038 if (rv) {
3039 kfree(intf->my_dev_name);
3040 intf->my_dev_name = NULL;
3041 dev_err(intf->si_dev,
3042 PFX "Unable to create symlink to bmc: %d\n", rv);
3043 goto out_free_my_dev_name;
3044 }
3045
3046 intf->bmc_registered = true;
3047
3048out:
3049 mutex_unlock(&ipmidriver_mutex);
3050 mutex_lock(&intf->bmc_reg_mutex);
3051 intf->in_bmc_register = false;
3052 return rv;
3053
3054
3055out_free_my_dev_name:
3056 kfree(intf->my_dev_name);
3057 intf->my_dev_name = NULL;
3058
3059out_unlink1:
3060 sysfs_remove_link(&intf->si_dev->kobj, "bmc");
3061
3062out_put_bmc:
3063 mutex_lock(&bmc->dyn_mutex);
3064 list_del(&intf->bmc_link);
3065 mutex_unlock(&bmc->dyn_mutex);
3066 intf->bmc = &intf->tmp_bmc;
3067 kref_put(&bmc->usecount, cleanup_bmc_device);
3068 goto out;
3069
3070out_list_del:
3071 mutex_lock(&bmc->dyn_mutex);
3072 list_del(&intf->bmc_link);
3073 mutex_unlock(&bmc->dyn_mutex);
3074 intf->bmc = &intf->tmp_bmc;
3075 put_device(&bmc->pdev.dev);
3076 goto out;
3077}
3078
3079static int
3080send_guid_cmd(ipmi_smi_t intf, int chan)
3081{
3082 struct kernel_ipmi_msg msg;
3083 struct ipmi_system_interface_addr si;
3084
3085 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3086 si.channel = IPMI_BMC_CHANNEL;
3087 si.lun = 0;
3088
3089 msg.netfn = IPMI_NETFN_APP_REQUEST;
3090 msg.cmd = IPMI_GET_DEVICE_GUID_CMD;
3091 msg.data = NULL;
3092 msg.data_len = 0;
3093 return i_ipmi_request(NULL,
3094 intf,
3095 (struct ipmi_addr *) &si,
3096 0,
3097 &msg,
3098 intf,
3099 NULL,
3100 NULL,
3101 0,
3102 intf->addrinfo[0].address,
3103 intf->addrinfo[0].lun,
3104 -1, 0);
3105}
3106
3107static void guid_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
3108{
3109 struct bmc_device *bmc = intf->bmc;
3110
3111 if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3112 || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
3113 || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD))
3114 /* Not for me */
3115 return;
3116
3117 if (msg->msg.data[0] != 0) {
3118 /* Error from getting the GUID, the BMC doesn't have one. */
3119 bmc->dyn_guid_set = 0;
3120 goto out;
3121 }
3122
3123 if (msg->msg.data_len < 17) {
3124 bmc->dyn_guid_set = 0;
3125 dev_warn(intf->si_dev,
3126 PFX "The GUID response from the BMC was too short, it was %d but should have been 17. Assuming GUID is not available.\n",
3127 msg->msg.data_len);
3128 goto out;
3129 }
3130
3131 memcpy(bmc->fetch_guid.b, msg->msg.data + 1, 16);
3132 /*
3133 * Make sure the guid data is available before setting
3134 * dyn_guid_set.
3135 */
3136 smp_wmb();
3137 bmc->dyn_guid_set = 1;
3138 out:
3139 wake_up(&intf->waitq);
3140}
3141
3142static void __get_guid(ipmi_smi_t intf)
3143{
3144 int rv;
3145 struct bmc_device *bmc = intf->bmc;
3146
3147 bmc->dyn_guid_set = 2;
3148 intf->null_user_handler = guid_handler;
3149 rv = send_guid_cmd(intf, 0);
3150 if (rv)
3151 /* Send failed, no GUID available. */
3152 bmc->dyn_guid_set = 0;
3153
3154 wait_event(intf->waitq, bmc->dyn_guid_set != 2);
3155
3156 /* dyn_guid_set makes the guid data available. */
3157 smp_rmb();
3158
3159 intf->null_user_handler = NULL;
3160}
3161
3162static int
3163send_channel_info_cmd(ipmi_smi_t intf, int chan)
3164{
3165 struct kernel_ipmi_msg msg;
3166 unsigned char data[1];
3167 struct ipmi_system_interface_addr si;
3168
3169 si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3170 si.channel = IPMI_BMC_CHANNEL;
3171 si.lun = 0;
3172
3173 msg.netfn = IPMI_NETFN_APP_REQUEST;
3174 msg.cmd = IPMI_GET_CHANNEL_INFO_CMD;
3175 msg.data = data;
3176 msg.data_len = 1;
3177 data[0] = chan;
3178 return i_ipmi_request(NULL,
3179 intf,
3180 (struct ipmi_addr *) &si,
3181 0,
3182 &msg,
3183 intf,
3184 NULL,
3185 NULL,
3186 0,
3187 intf->addrinfo[0].address,
3188 intf->addrinfo[0].lun,
3189 -1, 0);
3190}
3191
3192static void
3193channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
3194{
3195 int rv = 0;
3196 int ch;
3197 unsigned int set = intf->curr_working_cset;
3198 struct ipmi_channel *chans;
3199
3200 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3201 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
3202 && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD)) {
3203 /* It's the one we want */
3204 if (msg->msg.data[0] != 0) {
3205 /* Got an error from the channel, just go on. */
3206
3207 if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) {
3208 /*
3209 * If the MC does not support this
3210 * command, that is legal. We just
3211 * assume it has one IPMB at channel
3212 * zero.
3213 */
3214 intf->wchannels[set].c[0].medium
3215 = IPMI_CHANNEL_MEDIUM_IPMB;
3216 intf->wchannels[set].c[0].protocol
3217 = IPMI_CHANNEL_PROTOCOL_IPMB;
3218
3219 intf->channel_list = intf->wchannels + set;
3220 intf->channels_ready = true;
3221 wake_up(&intf->waitq);
3222 goto out;
3223 }
3224 goto next_channel;
3225 }
3226 if (msg->msg.data_len < 4) {
3227 /* Message not big enough, just go on. */
3228 goto next_channel;
3229 }
3230 ch = intf->curr_channel;
3231 chans = intf->wchannels[set].c;
3232 chans[ch].medium = msg->msg.data[2] & 0x7f;
3233 chans[ch].protocol = msg->msg.data[3] & 0x1f;
3234
3235 next_channel:
3236 intf->curr_channel++;
3237 if (intf->curr_channel >= IPMI_MAX_CHANNELS) {
3238 intf->channel_list = intf->wchannels + set;
3239 intf->channels_ready = true;
3240 wake_up(&intf->waitq);
3241 } else {
3242 intf->channel_list = intf->wchannels + set;
3243 intf->channels_ready = true;
3244 rv = send_channel_info_cmd(intf, intf->curr_channel);
3245 }
3246
3247 if (rv) {
3248 /* Got an error somehow, just give up. */
3249 dev_warn(intf->si_dev,
3250 PFX "Error sending channel information for channel %d: %d\n",
3251 intf->curr_channel, rv);
3252
3253 intf->channel_list = intf->wchannels + set;
3254 intf->channels_ready = true;
3255 wake_up(&intf->waitq);
3256 }
3257 }
3258 out:
3259 return;
3260}
3261
3262/*
3263 * Must be holding intf->bmc_reg_mutex to call this.
3264 */
3265static int __scan_channels(ipmi_smi_t intf, struct ipmi_device_id *id)
3266{
3267 int rv;
3268
3269 if (ipmi_version_major(id) > 1
3270 || (ipmi_version_major(id) == 1
3271 && ipmi_version_minor(id) >= 5)) {
3272 unsigned int set;
3273
3274 /*
3275 * Start scanning the channels to see what is
3276 * available.
3277 */
3278 set = !intf->curr_working_cset;
3279 intf->curr_working_cset = set;
3280 memset(&intf->wchannels[set], 0,
3281 sizeof(struct ipmi_channel_set));
3282
3283 intf->null_user_handler = channel_handler;
3284 intf->curr_channel = 0;
3285 rv = send_channel_info_cmd(intf, 0);
3286 if (rv) {
3287 dev_warn(intf->si_dev,
3288 "Error sending channel information for channel 0, %d\n",
3289 rv);
3290 return -EIO;
3291 }
3292
3293 /* Wait for the channel info to be read. */
3294 wait_event(intf->waitq, intf->channels_ready);
3295 intf->null_user_handler = NULL;
3296 } else {
3297 unsigned int set = intf->curr_working_cset;
3298
3299 /* Assume a single IPMB channel at zero. */
3300 intf->wchannels[set].c[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
3301 intf->wchannels[set].c[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
3302 intf->channel_list = intf->wchannels + set;
3303 intf->channels_ready = true;
3304 }
3305
3306 return 0;
3307}
3308
3309static void ipmi_poll(ipmi_smi_t intf)
3310{
3311 if (intf->handlers->poll)
3312 intf->handlers->poll(intf->send_info);
3313 /* In case something came in */
3314 handle_new_recv_msgs(intf);
3315}
3316
3317void ipmi_poll_interface(ipmi_user_t user)
3318{
3319 ipmi_poll(user->intf);
3320}
3321EXPORT_SYMBOL(ipmi_poll_interface);
3322
3323static void redo_bmc_reg(struct work_struct *work)
3324{
3325 ipmi_smi_t intf = container_of(work, struct ipmi_smi, bmc_reg_work);
3326
3327 if (!intf->in_shutdown)
3328 bmc_get_device_id(intf, NULL, NULL, NULL, NULL);
3329
3330 kref_put(&intf->refcount, intf_free);
3331}
3332
3333int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
3334 void *send_info,
3335 struct device *si_dev,
3336 unsigned char slave_addr)
3337{
3338 int i, j;
3339 int rv;
3340 ipmi_smi_t intf;
3341 ipmi_smi_t tintf;
3342 struct list_head *link;
3343 struct ipmi_device_id id;
3344
3345 /*
3346 * Make sure the driver is actually initialized, this handles
3347 * problems with initialization order.
3348 */
3349 if (!initialized) {
3350 rv = ipmi_init_msghandler();
3351 if (rv)
3352 return rv;
3353 /*
3354 * The init code doesn't return an error if it was turned
3355 * off, but it won't initialize. Check that.
3356 */
3357 if (!initialized)
3358 return -ENODEV;
3359 }
3360
3361 intf = kzalloc(sizeof(*intf), GFP_KERNEL);
3362 if (!intf)
3363 return -ENOMEM;
3364
3365 intf->bmc = &intf->tmp_bmc;
3366 INIT_LIST_HEAD(&intf->bmc->intfs);
3367 mutex_init(&intf->bmc->dyn_mutex);
3368 INIT_LIST_HEAD(&intf->bmc_link);
3369 mutex_init(&intf->bmc_reg_mutex);
3370 intf->intf_num = -1; /* Mark it invalid for now. */
3371 kref_init(&intf->refcount);
3372 INIT_WORK(&intf->bmc_reg_work, redo_bmc_reg);
3373 intf->si_dev = si_dev;
3374 for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
3375 intf->addrinfo[j].address = IPMI_BMC_SLAVE_ADDR;
3376 intf->addrinfo[j].lun = 2;
3377 }
3378 if (slave_addr != 0)
3379 intf->addrinfo[0].address = slave_addr;
3380 INIT_LIST_HEAD(&intf->users);
3381 intf->handlers = handlers;
3382 intf->send_info = send_info;
3383 spin_lock_init(&intf->seq_lock);
3384 for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
3385 intf->seq_table[j].inuse = 0;
3386 intf->seq_table[j].seqid = 0;
3387 }
3388 intf->curr_seq = 0;
3389#ifdef CONFIG_IPMI_PROC_INTERFACE
3390 mutex_init(&intf->proc_entry_lock);
3391#endif
3392 spin_lock_init(&intf->waiting_rcv_msgs_lock);
3393 INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
3394 tasklet_init(&intf->recv_tasklet,
3395 smi_recv_tasklet,
3396 (unsigned long) intf);
3397 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
3398 spin_lock_init(&intf->xmit_msgs_lock);
3399 INIT_LIST_HEAD(&intf->xmit_msgs);
3400 INIT_LIST_HEAD(&intf->hp_xmit_msgs);
3401 spin_lock_init(&intf->events_lock);
3402 atomic_set(&intf->event_waiters, 0);
3403 intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
3404 INIT_LIST_HEAD(&intf->waiting_events);
3405 intf->waiting_events_count = 0;
3406 mutex_init(&intf->cmd_rcvrs_mutex);
3407 spin_lock_init(&intf->maintenance_mode_lock);
3408 INIT_LIST_HEAD(&intf->cmd_rcvrs);
3409 init_waitqueue_head(&intf->waitq);
3410 for (i = 0; i < IPMI_NUM_STATS; i++)
3411 atomic_set(&intf->stats[i], 0);
3412
3413#ifdef CONFIG_IPMI_PROC_INTERFACE
3414 intf->proc_dir = NULL;
3415#endif
3416
3417 mutex_lock(&smi_watchers_mutex);
3418 mutex_lock(&ipmi_interfaces_mutex);
3419 /* Look for a hole in the numbers. */
3420 i = 0;
3421 link = &ipmi_interfaces;
3422 list_for_each_entry_rcu(tintf, &ipmi_interfaces, link) {
3423 if (tintf->intf_num != i) {
3424 link = &tintf->link;
3425 break;
3426 }
3427 i++;
3428 }
3429 /* Add the new interface in numeric order. */
3430 if (i == 0)
3431 list_add_rcu(&intf->link, &ipmi_interfaces);
3432 else
3433 list_add_tail_rcu(&intf->link, link);
3434
3435 rv = handlers->start_processing(send_info, intf);
3436 if (rv)
3437 goto out;
3438
3439 rv = __bmc_get_device_id(intf, NULL, &id, NULL, NULL, i);
3440 if (rv) {
3441 dev_err(si_dev, "Unable to get the device id: %d\n", rv);
3442 goto out;
3443 }
3444
3445 mutex_lock(&intf->bmc_reg_mutex);
3446 rv = __scan_channels(intf, &id);
3447 mutex_unlock(&intf->bmc_reg_mutex);
3448 if (rv)
3449 goto out;
3450
3451#ifdef CONFIG_IPMI_PROC_INTERFACE
3452 rv = add_proc_entries(intf, i);
3453#endif
3454
3455 out:
3456 if (rv) {
3457 ipmi_bmc_unregister(intf);
3458#ifdef CONFIG_IPMI_PROC_INTERFACE
3459 if (intf->proc_dir)
3460 remove_proc_entries(intf);
3461#endif
3462 intf->handlers = NULL;
3463 list_del_rcu(&intf->link);
3464 mutex_unlock(&ipmi_interfaces_mutex);
3465 mutex_unlock(&smi_watchers_mutex);
3466 synchronize_rcu();
3467 kref_put(&intf->refcount, intf_free);
3468 } else {
3469 /*
3470 * Keep memory order straight for RCU readers. Make
3471 * sure everything else is committed to memory before
3472 * setting intf_num to mark the interface valid.
3473 */
3474 smp_wmb();
3475 intf->intf_num = i;
3476 mutex_unlock(&ipmi_interfaces_mutex);
3477 /* After this point the interface is legal to use. */
3478 call_smi_watchers(i, intf->si_dev);
3479 mutex_unlock(&smi_watchers_mutex);
3480 }
3481
3482 return rv;
3483}
3484EXPORT_SYMBOL(ipmi_register_smi);
3485
3486static void deliver_smi_err_response(ipmi_smi_t intf,
3487 struct ipmi_smi_msg *msg,
3488 unsigned char err)
3489{
3490 msg->rsp[0] = msg->data[0] | 4;
3491 msg->rsp[1] = msg->data[1];
3492 msg->rsp[2] = err;
3493 msg->rsp_size = 3;
3494 /* It's an error, so it will never requeue, no need to check return. */
3495 handle_one_recv_msg(intf, msg);
3496}
3497
3498static void cleanup_smi_msgs(ipmi_smi_t intf)
3499{
3500 int i;
3501 struct seq_table *ent;
3502 struct ipmi_smi_msg *msg;
3503 struct list_head *entry;
3504 struct list_head tmplist;
3505
3506 /* Clear out our transmit queues and hold the messages. */
3507 INIT_LIST_HEAD(&tmplist);
3508 list_splice_tail(&intf->hp_xmit_msgs, &tmplist);
3509 list_splice_tail(&intf->xmit_msgs, &tmplist);
3510
3511 /* Current message first, to preserve order */
3512 while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) {
3513 /* Wait for the message to clear out. */
3514 schedule_timeout(1);
3515 }
3516
3517 /* No need for locks, the interface is down. */
3518
3519 /*
3520 * Return errors for all pending messages in queue and in the
3521 * tables waiting for remote responses.
3522 */
3523 while (!list_empty(&tmplist)) {
3524 entry = tmplist.next;
3525 list_del(entry);
3526 msg = list_entry(entry, struct ipmi_smi_msg, link);
3527 deliver_smi_err_response(intf, msg, IPMI_ERR_UNSPECIFIED);
3528 }
3529
3530 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
3531 ent = &(intf->seq_table[i]);
3532 if (!ent->inuse)
3533 continue;
3534 deliver_err_response(ent->recv_msg, IPMI_ERR_UNSPECIFIED);
3535 }
3536}
3537
3538int ipmi_unregister_smi(ipmi_smi_t intf)
3539{
3540 struct ipmi_smi_watcher *w;
3541 int intf_num = intf->intf_num;
3542 ipmi_user_t user;
3543
3544 mutex_lock(&smi_watchers_mutex);
3545 mutex_lock(&ipmi_interfaces_mutex);
3546 intf->intf_num = -1;
3547 intf->in_shutdown = true;
3548 list_del_rcu(&intf->link);
3549 mutex_unlock(&ipmi_interfaces_mutex);
3550 synchronize_rcu();
3551
3552 cleanup_smi_msgs(intf);
3553
3554 /* Clean up the effects of users on the lower-level software. */
3555 mutex_lock(&ipmi_interfaces_mutex);
3556 rcu_read_lock();
3557 list_for_each_entry_rcu(user, &intf->users, link) {
3558 module_put(intf->handlers->owner);
3559 if (intf->handlers->dec_usecount)
3560 intf->handlers->dec_usecount(intf->send_info);
3561 }
3562 rcu_read_unlock();
3563 intf->handlers = NULL;
3564 mutex_unlock(&ipmi_interfaces_mutex);
3565
3566#ifdef CONFIG_IPMI_PROC_INTERFACE
3567 remove_proc_entries(intf);
3568#endif
3569 ipmi_bmc_unregister(intf);
3570
3571 /*
3572 * Call all the watcher interfaces to tell them that
3573 * an interface is gone.
3574 */
3575 list_for_each_entry(w, &smi_watchers, link)
3576 w->smi_gone(intf_num);
3577 mutex_unlock(&smi_watchers_mutex);
3578
3579 kref_put(&intf->refcount, intf_free);
3580 return 0;
3581}
3582EXPORT_SYMBOL(ipmi_unregister_smi);
3583
3584static int handle_ipmb_get_msg_rsp(ipmi_smi_t intf,
3585 struct ipmi_smi_msg *msg)
3586{
3587 struct ipmi_ipmb_addr ipmb_addr;
3588 struct ipmi_recv_msg *recv_msg;
3589
3590 /*
3591 * This is 11, not 10, because the response must contain a
3592 * completion code.
3593 */
3594 if (msg->rsp_size < 11) {
3595 /* Message not big enough, just ignore it. */
3596 ipmi_inc_stat(intf, invalid_ipmb_responses);
3597 return 0;
3598 }
3599
3600 if (msg->rsp[2] != 0) {
3601 /* An error getting the response, just ignore it. */
3602 return 0;
3603 }
3604
3605 ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
3606 ipmb_addr.slave_addr = msg->rsp[6];
3607 ipmb_addr.channel = msg->rsp[3] & 0x0f;
3608 ipmb_addr.lun = msg->rsp[7] & 3;
3609
3610 /*
3611 * It's a response from a remote entity. Look up the sequence
3612 * number and handle the response.
3613 */
3614 if (intf_find_seq(intf,
3615 msg->rsp[7] >> 2,
3616 msg->rsp[3] & 0x0f,
3617 msg->rsp[8],
3618 (msg->rsp[4] >> 2) & (~1),
3619 (struct ipmi_addr *) &(ipmb_addr),
3620 &recv_msg)) {
3621 /*
3622 * We were unable to find the sequence number,
3623 * so just nuke the message.
3624 */
3625 ipmi_inc_stat(intf, unhandled_ipmb_responses);
3626 return 0;
3627 }
3628
3629 memcpy(recv_msg->msg_data,
3630 &(msg->rsp[9]),
3631 msg->rsp_size - 9);
3632 /*
3633 * The other fields matched, so no need to set them, except
3634 * for netfn, which needs to be the response that was
3635 * returned, not the request value.
3636 */
3637 recv_msg->msg.netfn = msg->rsp[4] >> 2;
3638 recv_msg->msg.data = recv_msg->msg_data;
3639 recv_msg->msg.data_len = msg->rsp_size - 10;
3640 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3641 ipmi_inc_stat(intf, handled_ipmb_responses);
3642 deliver_response(recv_msg);
3643
3644 return 0;
3645}
3646
3647static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
3648 struct ipmi_smi_msg *msg)
3649{
3650 struct cmd_rcvr *rcvr;
3651 int rv = 0;
3652 unsigned char netfn;
3653 unsigned char cmd;
3654 unsigned char chan;
3655 ipmi_user_t user = NULL;
3656 struct ipmi_ipmb_addr *ipmb_addr;
3657 struct ipmi_recv_msg *recv_msg;
3658
3659 if (msg->rsp_size < 10) {
3660 /* Message not big enough, just ignore it. */
3661 ipmi_inc_stat(intf, invalid_commands);
3662 return 0;
3663 }
3664
3665 if (msg->rsp[2] != 0) {
3666 /* An error getting the response, just ignore it. */
3667 return 0;
3668 }
3669
3670 netfn = msg->rsp[4] >> 2;
3671 cmd = msg->rsp[8];
3672 chan = msg->rsp[3] & 0xf;
3673
3674 rcu_read_lock();
3675 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3676 if (rcvr) {
3677 user = rcvr->user;
3678 kref_get(&user->refcount);
3679 } else
3680 user = NULL;
3681 rcu_read_unlock();
3682
3683 if (user == NULL) {
3684 /* We didn't find a user, deliver an error response. */
3685 ipmi_inc_stat(intf, unhandled_commands);
3686
3687 msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
3688 msg->data[1] = IPMI_SEND_MSG_CMD;
3689 msg->data[2] = msg->rsp[3];
3690 msg->data[3] = msg->rsp[6];
3691 msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
3692 msg->data[5] = ipmb_checksum(&(msg->data[3]), 2);
3693 msg->data[6] = intf->addrinfo[msg->rsp[3] & 0xf].address;
3694 /* rqseq/lun */
3695 msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
3696 msg->data[8] = msg->rsp[8]; /* cmd */
3697 msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE;
3698 msg->data[10] = ipmb_checksum(&(msg->data[6]), 4);
3699 msg->data_size = 11;
3700
3701#ifdef DEBUG_MSGING
3702 {
3703 int m;
3704 printk("Invalid command:");
3705 for (m = 0; m < msg->data_size; m++)
3706 printk(" %2.2x", msg->data[m]);
3707 printk("\n");
3708 }
3709#endif
3710 rcu_read_lock();
3711 if (!intf->in_shutdown) {
3712 smi_send(intf, intf->handlers, msg, 0);
3713 /*
3714 * We used the message, so return the value
3715 * that causes it to not be freed or
3716 * queued.
3717 */
3718 rv = -1;
3719 }
3720 rcu_read_unlock();
3721 } else {
3722 /* Deliver the message to the user. */
3723 ipmi_inc_stat(intf, handled_commands);
3724
3725 recv_msg = ipmi_alloc_recv_msg();
3726 if (!recv_msg) {
3727 /*
3728 * We couldn't allocate memory for the
3729 * message, so requeue it for handling
3730 * later.
3731 */
3732 rv = 1;
3733 kref_put(&user->refcount, free_user);
3734 } else {
3735 /* Extract the source address from the data. */
3736 ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
3737 ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
3738 ipmb_addr->slave_addr = msg->rsp[6];
3739 ipmb_addr->lun = msg->rsp[7] & 3;
3740 ipmb_addr->channel = msg->rsp[3] & 0xf;
3741
3742 /*
3743 * Extract the rest of the message information
3744 * from the IPMB header.
3745 */
3746 recv_msg->user = user;
3747 recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
3748 recv_msg->msgid = msg->rsp[7] >> 2;
3749 recv_msg->msg.netfn = msg->rsp[4] >> 2;
3750 recv_msg->msg.cmd = msg->rsp[8];
3751 recv_msg->msg.data = recv_msg->msg_data;
3752
3753 /*
3754 * We chop off 10, not 9 bytes because the checksum
3755 * at the end also needs to be removed.
3756 */
3757 recv_msg->msg.data_len = msg->rsp_size - 10;
3758 memcpy(recv_msg->msg_data,
3759 &(msg->rsp[9]),
3760 msg->rsp_size - 10);
3761 deliver_response(recv_msg);
3762 }
3763 }
3764
3765 return rv;
3766}
3767
3768static int handle_lan_get_msg_rsp(ipmi_smi_t intf,
3769 struct ipmi_smi_msg *msg)
3770{
3771 struct ipmi_lan_addr lan_addr;
3772 struct ipmi_recv_msg *recv_msg;
3773
3774
3775 /*
3776 * This is 13, not 12, because the response must contain a
3777 * completion code.
3778 */
3779 if (msg->rsp_size < 13) {
3780 /* Message not big enough, just ignore it. */
3781 ipmi_inc_stat(intf, invalid_lan_responses);
3782 return 0;
3783 }
3784
3785 if (msg->rsp[2] != 0) {
3786 /* An error getting the response, just ignore it. */
3787 return 0;
3788 }
3789
3790 lan_addr.addr_type = IPMI_LAN_ADDR_TYPE;
3791 lan_addr.session_handle = msg->rsp[4];
3792 lan_addr.remote_SWID = msg->rsp[8];
3793 lan_addr.local_SWID = msg->rsp[5];
3794 lan_addr.channel = msg->rsp[3] & 0x0f;
3795 lan_addr.privilege = msg->rsp[3] >> 4;
3796 lan_addr.lun = msg->rsp[9] & 3;
3797
3798 /*
3799 * It's a response from a remote entity. Look up the sequence
3800 * number and handle the response.
3801 */
3802 if (intf_find_seq(intf,
3803 msg->rsp[9] >> 2,
3804 msg->rsp[3] & 0x0f,
3805 msg->rsp[10],
3806 (msg->rsp[6] >> 2) & (~1),
3807 (struct ipmi_addr *) &(lan_addr),
3808 &recv_msg)) {
3809 /*
3810 * We were unable to find the sequence number,
3811 * so just nuke the message.
3812 */
3813 ipmi_inc_stat(intf, unhandled_lan_responses);
3814 return 0;
3815 }
3816
3817 memcpy(recv_msg->msg_data,
3818 &(msg->rsp[11]),
3819 msg->rsp_size - 11);
3820 /*
3821 * The other fields matched, so no need to set them, except
3822 * for netfn, which needs to be the response that was
3823 * returned, not the request value.
3824 */
3825 recv_msg->msg.netfn = msg->rsp[6] >> 2;
3826 recv_msg->msg.data = recv_msg->msg_data;
3827 recv_msg->msg.data_len = msg->rsp_size - 12;
3828 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3829 ipmi_inc_stat(intf, handled_lan_responses);
3830 deliver_response(recv_msg);
3831
3832 return 0;
3833}
3834
3835static int handle_lan_get_msg_cmd(ipmi_smi_t intf,
3836 struct ipmi_smi_msg *msg)
3837{
3838 struct cmd_rcvr *rcvr;
3839 int rv = 0;
3840 unsigned char netfn;
3841 unsigned char cmd;
3842 unsigned char chan;
3843 ipmi_user_t user = NULL;
3844 struct ipmi_lan_addr *lan_addr;
3845 struct ipmi_recv_msg *recv_msg;
3846
3847 if (msg->rsp_size < 12) {
3848 /* Message not big enough, just ignore it. */
3849 ipmi_inc_stat(intf, invalid_commands);
3850 return 0;
3851 }
3852
3853 if (msg->rsp[2] != 0) {
3854 /* An error getting the response, just ignore it. */
3855 return 0;
3856 }
3857
3858 netfn = msg->rsp[6] >> 2;
3859 cmd = msg->rsp[10];
3860 chan = msg->rsp[3] & 0xf;
3861
3862 rcu_read_lock();
3863 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3864 if (rcvr) {
3865 user = rcvr->user;
3866 kref_get(&user->refcount);
3867 } else
3868 user = NULL;
3869 rcu_read_unlock();
3870
3871 if (user == NULL) {
3872 /* We didn't find a user, just give up. */
3873 ipmi_inc_stat(intf, unhandled_commands);
3874
3875 /*
3876 * Don't do anything with these messages, just allow
3877 * them to be freed.
3878 */
3879 rv = 0;
3880 } else {
3881 /* Deliver the message to the user. */
3882 ipmi_inc_stat(intf, handled_commands);
3883
3884 recv_msg = ipmi_alloc_recv_msg();
3885 if (!recv_msg) {
3886 /*
3887 * We couldn't allocate memory for the
3888 * message, so requeue it for handling later.
3889 */
3890 rv = 1;
3891 kref_put(&user->refcount, free_user);
3892 } else {
3893 /* Extract the source address from the data. */
3894 lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
3895 lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
3896 lan_addr->session_handle = msg->rsp[4];
3897 lan_addr->remote_SWID = msg->rsp[8];
3898 lan_addr->local_SWID = msg->rsp[5];
3899 lan_addr->lun = msg->rsp[9] & 3;
3900 lan_addr->channel = msg->rsp[3] & 0xf;
3901 lan_addr->privilege = msg->rsp[3] >> 4;
3902
3903 /*
3904 * Extract the rest of the message information
3905 * from the IPMB header.
3906 */
3907 recv_msg->user = user;
3908 recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
3909 recv_msg->msgid = msg->rsp[9] >> 2;
3910 recv_msg->msg.netfn = msg->rsp[6] >> 2;
3911 recv_msg->msg.cmd = msg->rsp[10];
3912 recv_msg->msg.data = recv_msg->msg_data;
3913
3914 /*
3915 * We chop off 12, not 11 bytes because the checksum
3916 * at the end also needs to be removed.
3917 */
3918 recv_msg->msg.data_len = msg->rsp_size - 12;
3919 memcpy(recv_msg->msg_data,
3920 &(msg->rsp[11]),
3921 msg->rsp_size - 12);
3922 deliver_response(recv_msg);
3923 }
3924 }
3925
3926 return rv;
3927}
3928
3929/*
3930 * This routine will handle "Get Message" command responses with
3931 * channels that use an OEM Medium. The message format belongs to
3932 * the OEM. See IPMI 2.0 specification, Chapter 6 and
3933 * Chapter 22, sections 22.6 and 22.24 for more details.
3934 */
3935static int handle_oem_get_msg_cmd(ipmi_smi_t intf,
3936 struct ipmi_smi_msg *msg)
3937{
3938 struct cmd_rcvr *rcvr;
3939 int rv = 0;
3940 unsigned char netfn;
3941 unsigned char cmd;
3942 unsigned char chan;
3943 ipmi_user_t user = NULL;
3944 struct ipmi_system_interface_addr *smi_addr;
3945 struct ipmi_recv_msg *recv_msg;
3946
3947 /*
3948 * We expect the OEM SW to perform error checking
3949 * so we just do some basic sanity checks
3950 */
3951 if (msg->rsp_size < 4) {
3952 /* Message not big enough, just ignore it. */
3953 ipmi_inc_stat(intf, invalid_commands);
3954 return 0;
3955 }
3956
3957 if (msg->rsp[2] != 0) {
3958 /* An error getting the response, just ignore it. */
3959 return 0;
3960 }
3961
3962 /*
3963 * This is an OEM Message so the OEM needs to know how
3964 * handle the message. We do no interpretation.
3965 */
3966 netfn = msg->rsp[0] >> 2;
3967 cmd = msg->rsp[1];
3968 chan = msg->rsp[3] & 0xf;
3969
3970 rcu_read_lock();
3971 rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3972 if (rcvr) {
3973 user = rcvr->user;
3974 kref_get(&user->refcount);
3975 } else
3976 user = NULL;
3977 rcu_read_unlock();
3978
3979 if (user == NULL) {
3980 /* We didn't find a user, just give up. */
3981 ipmi_inc_stat(intf, unhandled_commands);
3982
3983 /*
3984 * Don't do anything with these messages, just allow
3985 * them to be freed.
3986 */
3987
3988 rv = 0;
3989 } else {
3990 /* Deliver the message to the user. */
3991 ipmi_inc_stat(intf, handled_commands);
3992
3993 recv_msg = ipmi_alloc_recv_msg();
3994 if (!recv_msg) {
3995 /*
3996 * We couldn't allocate memory for the
3997 * message, so requeue it for handling
3998 * later.
3999 */
4000 rv = 1;
4001 kref_put(&user->refcount, free_user);
4002 } else {
4003 /*
4004 * OEM Messages are expected to be delivered via
4005 * the system interface to SMS software. We might
4006 * need to visit this again depending on OEM
4007 * requirements
4008 */
4009 smi_addr = ((struct ipmi_system_interface_addr *)
4010 &(recv_msg->addr));
4011 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4012 smi_addr->channel = IPMI_BMC_CHANNEL;
4013 smi_addr->lun = msg->rsp[0] & 3;
4014
4015 recv_msg->user = user;
4016 recv_msg->user_msg_data = NULL;
4017 recv_msg->recv_type = IPMI_OEM_RECV_TYPE;
4018 recv_msg->msg.netfn = msg->rsp[0] >> 2;
4019 recv_msg->msg.cmd = msg->rsp[1];
4020 recv_msg->msg.data = recv_msg->msg_data;
4021
4022 /*
4023 * The message starts at byte 4 which follows the
4024 * the Channel Byte in the "GET MESSAGE" command
4025 */
4026 recv_msg->msg.data_len = msg->rsp_size - 4;
4027 memcpy(recv_msg->msg_data,
4028 &(msg->rsp[4]),
4029 msg->rsp_size - 4);
4030 deliver_response(recv_msg);
4031 }
4032 }
4033
4034 return rv;
4035}
4036
4037static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
4038 struct ipmi_smi_msg *msg)
4039{
4040 struct ipmi_system_interface_addr *smi_addr;
4041
4042 recv_msg->msgid = 0;
4043 smi_addr = (struct ipmi_system_interface_addr *) &(recv_msg->addr);
4044 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4045 smi_addr->channel = IPMI_BMC_CHANNEL;
4046 smi_addr->lun = msg->rsp[0] & 3;
4047 recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE;
4048 recv_msg->msg.netfn = msg->rsp[0] >> 2;
4049 recv_msg->msg.cmd = msg->rsp[1];
4050 memcpy(recv_msg->msg_data, &(msg->rsp[3]), msg->rsp_size - 3);
4051 recv_msg->msg.data = recv_msg->msg_data;
4052 recv_msg->msg.data_len = msg->rsp_size - 3;
4053}
4054
4055static int handle_read_event_rsp(ipmi_smi_t intf,
4056 struct ipmi_smi_msg *msg)
4057{
4058 struct ipmi_recv_msg *recv_msg, *recv_msg2;
4059 struct list_head msgs;
4060 ipmi_user_t user;
4061 int rv = 0;
4062 int deliver_count = 0;
4063 unsigned long flags;
4064
4065 if (msg->rsp_size < 19) {
4066 /* Message is too small to be an IPMB event. */
4067 ipmi_inc_stat(intf, invalid_events);
4068 return 0;
4069 }
4070
4071 if (msg->rsp[2] != 0) {
4072 /* An error getting the event, just ignore it. */
4073 return 0;
4074 }
4075
4076 INIT_LIST_HEAD(&msgs);
4077
4078 spin_lock_irqsave(&intf->events_lock, flags);
4079
4080 ipmi_inc_stat(intf, events);
4081
4082 /*
4083 * Allocate and fill in one message for every user that is
4084 * getting events.
4085 */
4086 rcu_read_lock();
4087 list_for_each_entry_rcu(user, &intf->users, link) {
4088 if (!user->gets_events)
4089 continue;
4090
4091 recv_msg = ipmi_alloc_recv_msg();
4092 if (!recv_msg) {
4093 rcu_read_unlock();
4094 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
4095 link) {
4096 list_del(&recv_msg->link);
4097 ipmi_free_recv_msg(recv_msg);
4098 }
4099 /*
4100 * We couldn't allocate memory for the
4101 * message, so requeue it for handling
4102 * later.
4103 */
4104 rv = 1;
4105 goto out;
4106 }
4107
4108 deliver_count++;
4109
4110 copy_event_into_recv_msg(recv_msg, msg);
4111 recv_msg->user = user;
4112 kref_get(&user->refcount);
4113 list_add_tail(&(recv_msg->link), &msgs);
4114 }
4115 rcu_read_unlock();
4116
4117 if (deliver_count) {
4118 /* Now deliver all the messages. */
4119 list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
4120 list_del(&recv_msg->link);
4121 deliver_response(recv_msg);
4122 }
4123 } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
4124 /*
4125 * No one to receive the message, put it in queue if there's
4126 * not already too many things in the queue.
4127 */
4128 recv_msg = ipmi_alloc_recv_msg();
4129 if (!recv_msg) {
4130 /*
4131 * We couldn't allocate memory for the
4132 * message, so requeue it for handling
4133 * later.
4134 */
4135 rv = 1;
4136 goto out;
4137 }
4138
4139 copy_event_into_recv_msg(recv_msg, msg);
4140 list_add_tail(&(recv_msg->link), &(intf->waiting_events));
4141 intf->waiting_events_count++;
4142 } else if (!intf->event_msg_printed) {
4143 /*
4144 * There's too many things in the queue, discard this
4145 * message.
4146 */
4147 dev_warn(intf->si_dev,
4148 PFX "Event queue full, discarding incoming events\n");
4149 intf->event_msg_printed = 1;
4150 }
4151
4152 out:
4153 spin_unlock_irqrestore(&(intf->events_lock), flags);
4154
4155 return rv;
4156}
4157
4158static int handle_bmc_rsp(ipmi_smi_t intf,
4159 struct ipmi_smi_msg *msg)
4160{
4161 struct ipmi_recv_msg *recv_msg;
4162 struct ipmi_user *user;
4163
4164 recv_msg = (struct ipmi_recv_msg *) msg->user_data;
4165 if (recv_msg == NULL) {
4166 dev_warn(intf->si_dev,
4167 "IPMI message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vender for assistance\n");
4168 return 0;
4169 }
4170
4171 user = recv_msg->user;
4172 /* Make sure the user still exists. */
4173 if (user && !user->valid) {
4174 /* The user for the message went away, so give up. */
4175 ipmi_inc_stat(intf, unhandled_local_responses);
4176 ipmi_free_recv_msg(recv_msg);
4177 } else {
4178 struct ipmi_system_interface_addr *smi_addr;
4179
4180 ipmi_inc_stat(intf, handled_local_responses);
4181 recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
4182 recv_msg->msgid = msg->msgid;
4183 smi_addr = ((struct ipmi_system_interface_addr *)
4184 &(recv_msg->addr));
4185 smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4186 smi_addr->channel = IPMI_BMC_CHANNEL;
4187 smi_addr->lun = msg->rsp[0] & 3;
4188 recv_msg->msg.netfn = msg->rsp[0] >> 2;
4189 recv_msg->msg.cmd = msg->rsp[1];
4190 memcpy(recv_msg->msg_data,
4191 &(msg->rsp[2]),
4192 msg->rsp_size - 2);
4193 recv_msg->msg.data = recv_msg->msg_data;
4194 recv_msg->msg.data_len = msg->rsp_size - 2;
4195 deliver_response(recv_msg);
4196 }
4197
4198 return 0;
4199}
4200
4201/*
4202 * Handle a received message. Return 1 if the message should be requeued,
4203 * 0 if the message should be freed, or -1 if the message should not
4204 * be freed or requeued.
4205 */
4206static int handle_one_recv_msg(ipmi_smi_t intf,
4207 struct ipmi_smi_msg *msg)
4208{
4209 int requeue;
4210 int chan;
4211
4212#ifdef DEBUG_MSGING
4213 int m;
4214 printk("Recv:");
4215 for (m = 0; m < msg->rsp_size; m++)
4216 printk(" %2.2x", msg->rsp[m]);
4217 printk("\n");
4218#endif
4219 if (msg->rsp_size < 2) {
4220 /* Message is too small to be correct. */
4221 dev_warn(intf->si_dev,
4222 PFX "BMC returned to small a message for netfn %x cmd %x, got %d bytes\n",
4223 (msg->data[0] >> 2) | 1, msg->data[1], msg->rsp_size);
4224
4225 /* Generate an error response for the message. */
4226 msg->rsp[0] = msg->data[0] | (1 << 2);
4227 msg->rsp[1] = msg->data[1];
4228 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
4229 msg->rsp_size = 3;
4230 } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))
4231 || (msg->rsp[1] != msg->data[1])) {
4232 /*
4233 * The NetFN and Command in the response is not even
4234 * marginally correct.
4235 */
4236 dev_warn(intf->si_dev,
4237 PFX "BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n",
4238 (msg->data[0] >> 2) | 1, msg->data[1],
4239 msg->rsp[0] >> 2, msg->rsp[1]);
4240
4241 /* Generate an error response for the message. */
4242 msg->rsp[0] = msg->data[0] | (1 << 2);
4243 msg->rsp[1] = msg->data[1];
4244 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
4245 msg->rsp_size = 3;
4246 }
4247
4248 if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
4249 && (msg->rsp[1] == IPMI_SEND_MSG_CMD)
4250 && (msg->user_data != NULL)) {
4251 /*
4252 * It's a response to a response we sent. For this we
4253 * deliver a send message response to the user.
4254 */
4255 struct ipmi_recv_msg *recv_msg = msg->user_data;
4256
4257 requeue = 0;
4258 if (msg->rsp_size < 2)
4259 /* Message is too small to be correct. */
4260 goto out;
4261
4262 chan = msg->data[2] & 0x0f;
4263 if (chan >= IPMI_MAX_CHANNELS)
4264 /* Invalid channel number */
4265 goto out;
4266
4267 if (!recv_msg)
4268 goto out;
4269
4270 /* Make sure the user still exists. */
4271 if (!recv_msg->user || !recv_msg->user->valid)
4272 goto out;
4273
4274 recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE;
4275 recv_msg->msg.data = recv_msg->msg_data;
4276 recv_msg->msg.data_len = 1;
4277 recv_msg->msg_data[0] = msg->rsp[2];
4278 deliver_response(recv_msg);
4279 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
4280 && (msg->rsp[1] == IPMI_GET_MSG_CMD)) {
4281 struct ipmi_channel *chans;
4282
4283 /* It's from the receive queue. */
4284 chan = msg->rsp[3] & 0xf;
4285 if (chan >= IPMI_MAX_CHANNELS) {
4286 /* Invalid channel number */
4287 requeue = 0;
4288 goto out;
4289 }
4290
4291 /*
4292 * We need to make sure the channels have been initialized.
4293 * The channel_handler routine will set the "curr_channel"
4294 * equal to or greater than IPMI_MAX_CHANNELS when all the
4295 * channels for this interface have been initialized.
4296 */
4297 if (!intf->channels_ready) {
4298 requeue = 0; /* Throw the message away */
4299 goto out;
4300 }
4301
4302 chans = READ_ONCE(intf->channel_list)->c;
4303
4304 switch (chans[chan].medium) {
4305 case IPMI_CHANNEL_MEDIUM_IPMB:
4306 if (msg->rsp[4] & 0x04) {
4307 /*
4308 * It's a response, so find the
4309 * requesting message and send it up.
4310 */
4311 requeue = handle_ipmb_get_msg_rsp(intf, msg);
4312 } else {
4313 /*
4314 * It's a command to the SMS from some other
4315 * entity. Handle that.
4316 */
4317 requeue = handle_ipmb_get_msg_cmd(intf, msg);
4318 }
4319 break;
4320
4321 case IPMI_CHANNEL_MEDIUM_8023LAN:
4322 case IPMI_CHANNEL_MEDIUM_ASYNC:
4323 if (msg->rsp[6] & 0x04) {
4324 /*
4325 * It's a response, so find the
4326 * requesting message and send it up.
4327 */
4328 requeue = handle_lan_get_msg_rsp(intf, msg);
4329 } else {
4330 /*
4331 * It's a command to the SMS from some other
4332 * entity. Handle that.
4333 */
4334 requeue = handle_lan_get_msg_cmd(intf, msg);
4335 }
4336 break;
4337
4338 default:
4339 /* Check for OEM Channels. Clients had better
4340 register for these commands. */
4341 if ((chans[chan].medium >= IPMI_CHANNEL_MEDIUM_OEM_MIN)
4342 && (chans[chan].medium
4343 <= IPMI_CHANNEL_MEDIUM_OEM_MAX)) {
4344 requeue = handle_oem_get_msg_cmd(intf, msg);
4345 } else {
4346 /*
4347 * We don't handle the channel type, so just
4348 * free the message.
4349 */
4350 requeue = 0;
4351 }
4352 }
4353
4354 } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
4355 && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) {
4356 /* It's an asynchronous event. */
4357 requeue = handle_read_event_rsp(intf, msg);
4358 } else {
4359 /* It's a response from the local BMC. */
4360 requeue = handle_bmc_rsp(intf, msg);
4361 }
4362
4363 out:
4364 return requeue;
4365}
4366
4367/*
4368 * If there are messages in the queue or pretimeouts, handle them.
4369 */
4370static void handle_new_recv_msgs(ipmi_smi_t intf)
4371{
4372 struct ipmi_smi_msg *smi_msg;
4373 unsigned long flags = 0;
4374 int rv;
4375 int run_to_completion = intf->run_to_completion;
4376
4377 /* See if any waiting messages need to be processed. */
4378 if (!run_to_completion)
4379 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4380 while (!list_empty(&intf->waiting_rcv_msgs)) {
4381 smi_msg = list_entry(intf->waiting_rcv_msgs.next,
4382 struct ipmi_smi_msg, link);
4383 list_del(&smi_msg->link);
4384 if (!run_to_completion)
4385 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
4386 flags);
4387 rv = handle_one_recv_msg(intf, smi_msg);
4388 if (!run_to_completion)
4389 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4390 if (rv > 0) {
4391 /*
4392 * To preserve message order, quit if we
4393 * can't handle a message. Add the message
4394 * back at the head, this is safe because this
4395 * tasklet is the only thing that pulls the
4396 * messages.
4397 */
4398 list_add(&smi_msg->link, &intf->waiting_rcv_msgs);
4399 break;
4400 } else {
4401 if (rv == 0)
4402 /* Message handled */
4403 ipmi_free_smi_msg(smi_msg);
4404 /* If rv < 0, fatal error, del but don't free. */
4405 }
4406 }
4407 if (!run_to_completion)
4408 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags);
4409
4410 /*
4411 * If the pretimout count is non-zero, decrement one from it and
4412 * deliver pretimeouts to all the users.
4413 */
4414 if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) {
4415 ipmi_user_t user;
4416
4417 rcu_read_lock();
4418 list_for_each_entry_rcu(user, &intf->users, link) {
4419 if (user->handler->ipmi_watchdog_pretimeout)
4420 user->handler->ipmi_watchdog_pretimeout(
4421 user->handler_data);
4422 }
4423 rcu_read_unlock();
4424 }
4425}
4426
4427static void smi_recv_tasklet(unsigned long val)
4428{
4429 unsigned long flags = 0; /* keep us warning-free. */
4430 ipmi_smi_t intf = (ipmi_smi_t) val;
4431 int run_to_completion = intf->run_to_completion;
4432 struct ipmi_smi_msg *newmsg = NULL;
4433
4434 /*
4435 * Start the next message if available.
4436 *
4437 * Do this here, not in the actual receiver, because we may deadlock
4438 * because the lower layer is allowed to hold locks while calling
4439 * message delivery.
4440 */
4441
4442 rcu_read_lock();
4443
4444 if (!run_to_completion)
4445 spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
4446 if (intf->curr_msg == NULL && !intf->in_shutdown) {
4447 struct list_head *entry = NULL;
4448
4449 /* Pick the high priority queue first. */
4450 if (!list_empty(&intf->hp_xmit_msgs))
4451 entry = intf->hp_xmit_msgs.next;
4452 else if (!list_empty(&intf->xmit_msgs))
4453 entry = intf->xmit_msgs.next;
4454
4455 if (entry) {
4456 list_del(entry);
4457 newmsg = list_entry(entry, struct ipmi_smi_msg, link);
4458 intf->curr_msg = newmsg;
4459 }
4460 }
4461 if (!run_to_completion)
4462 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
4463 if (newmsg)
4464 intf->handlers->sender(intf->send_info, newmsg);
4465
4466 rcu_read_unlock();
4467
4468 handle_new_recv_msgs(intf);
4469}
4470
4471/* Handle a new message from the lower layer. */
4472void ipmi_smi_msg_received(ipmi_smi_t intf,
4473 struct ipmi_smi_msg *msg)
4474{
4475 unsigned long flags = 0; /* keep us warning-free. */
4476 int run_to_completion = intf->run_to_completion;
4477
4478 if ((msg->data_size >= 2)
4479 && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
4480 && (msg->data[1] == IPMI_SEND_MSG_CMD)
4481 && (msg->user_data == NULL)) {
4482
4483 if (intf->in_shutdown)
4484 goto free_msg;
4485
4486 /*
4487 * This is the local response to a command send, start
4488 * the timer for these. The user_data will not be
4489 * NULL if this is a response send, and we will let
4490 * response sends just go through.
4491 */
4492
4493 /*
4494 * Check for errors, if we get certain errors (ones
4495 * that mean basically we can try again later), we
4496 * ignore them and start the timer. Otherwise we
4497 * report the error immediately.
4498 */
4499 if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
4500 && (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
4501 && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)
4502 && (msg->rsp[2] != IPMI_BUS_ERR)
4503 && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) {
4504 int ch = msg->rsp[3] & 0xf;
4505 struct ipmi_channel *chans;
4506
4507 /* Got an error sending the message, handle it. */
4508
4509 chans = READ_ONCE(intf->channel_list)->c;
4510 if ((chans[ch].medium == IPMI_CHANNEL_MEDIUM_8023LAN)
4511 || (chans[ch].medium == IPMI_CHANNEL_MEDIUM_ASYNC))
4512 ipmi_inc_stat(intf, sent_lan_command_errs);
4513 else
4514 ipmi_inc_stat(intf, sent_ipmb_command_errs);
4515 intf_err_seq(intf, msg->msgid, msg->rsp[2]);
4516 } else
4517 /* The message was sent, start the timer. */
4518 intf_start_seq_timer(intf, msg->msgid);
4519
4520free_msg:
4521 ipmi_free_smi_msg(msg);
4522 } else {
4523 /*
4524 * To preserve message order, we keep a queue and deliver from
4525 * a tasklet.
4526 */
4527 if (!run_to_completion)
4528 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4529 list_add_tail(&msg->link, &intf->waiting_rcv_msgs);
4530 if (!run_to_completion)
4531 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
4532 flags);
4533 }
4534
4535 if (!run_to_completion)
4536 spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
4537 /*
4538 * We can get an asynchronous event or receive message in addition
4539 * to commands we send.
4540 */
4541 if (msg == intf->curr_msg)
4542 intf->curr_msg = NULL;
4543 if (!run_to_completion)
4544 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
4545
4546 if (run_to_completion)
4547 smi_recv_tasklet((unsigned long) intf);
4548 else
4549 tasklet_schedule(&intf->recv_tasklet);
4550}
4551EXPORT_SYMBOL(ipmi_smi_msg_received);
4552
4553void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
4554{
4555 if (intf->in_shutdown)
4556 return;
4557
4558 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1);
4559 tasklet_schedule(&intf->recv_tasklet);
4560}
4561EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
4562
4563static struct ipmi_smi_msg *
4564smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
4565 unsigned char seq, long seqid)
4566{
4567 struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg();
4568 if (!smi_msg)
4569 /*
4570 * If we can't allocate the message, then just return, we
4571 * get 4 retries, so this should be ok.
4572 */
4573 return NULL;
4574
4575 memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len);
4576 smi_msg->data_size = recv_msg->msg.data_len;
4577 smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
4578
4579#ifdef DEBUG_MSGING
4580 {
4581 int m;
4582 printk("Resend: ");
4583 for (m = 0; m < smi_msg->data_size; m++)
4584 printk(" %2.2x", smi_msg->data[m]);
4585 printk("\n");
4586 }
4587#endif
4588 return smi_msg;
4589}
4590
4591static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
4592 struct list_head *timeouts,
4593 unsigned long timeout_period,
4594 int slot, unsigned long *flags,
4595 unsigned int *waiting_msgs)
4596{
4597 struct ipmi_recv_msg *msg;
4598 const struct ipmi_smi_handlers *handlers;
4599
4600 if (intf->in_shutdown)
4601 return;
4602
4603 if (!ent->inuse)
4604 return;
4605
4606 if (timeout_period < ent->timeout) {
4607 ent->timeout -= timeout_period;
4608 (*waiting_msgs)++;
4609 return;
4610 }
4611
4612 if (ent->retries_left == 0) {
4613 /* The message has used all its retries. */
4614 ent->inuse = 0;
4615 msg = ent->recv_msg;
4616 list_add_tail(&msg->link, timeouts);
4617 if (ent->broadcast)
4618 ipmi_inc_stat(intf, timed_out_ipmb_broadcasts);
4619 else if (is_lan_addr(&ent->recv_msg->addr))
4620 ipmi_inc_stat(intf, timed_out_lan_commands);
4621 else
4622 ipmi_inc_stat(intf, timed_out_ipmb_commands);
4623 } else {
4624 struct ipmi_smi_msg *smi_msg;
4625 /* More retries, send again. */
4626
4627 (*waiting_msgs)++;
4628
4629 /*
4630 * Start with the max timer, set to normal timer after
4631 * the message is sent.
4632 */
4633 ent->timeout = MAX_MSG_TIMEOUT;
4634 ent->retries_left--;
4635 smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
4636 ent->seqid);
4637 if (!smi_msg) {
4638 if (is_lan_addr(&ent->recv_msg->addr))
4639 ipmi_inc_stat(intf,
4640 dropped_rexmit_lan_commands);
4641 else
4642 ipmi_inc_stat(intf,
4643 dropped_rexmit_ipmb_commands);
4644 return;
4645 }
4646
4647 spin_unlock_irqrestore(&intf->seq_lock, *flags);
4648
4649 /*
4650 * Send the new message. We send with a zero
4651 * priority. It timed out, I doubt time is that
4652 * critical now, and high priority messages are really
4653 * only for messages to the local MC, which don't get
4654 * resent.
4655 */
4656 handlers = intf->handlers;
4657 if (handlers) {
4658 if (is_lan_addr(&ent->recv_msg->addr))
4659 ipmi_inc_stat(intf,
4660 retransmitted_lan_commands);
4661 else
4662 ipmi_inc_stat(intf,
4663 retransmitted_ipmb_commands);
4664
4665 smi_send(intf, handlers, smi_msg, 0);
4666 } else
4667 ipmi_free_smi_msg(smi_msg);
4668
4669 spin_lock_irqsave(&intf->seq_lock, *flags);
4670 }
4671}
4672
4673static unsigned int ipmi_timeout_handler(ipmi_smi_t intf,
4674 unsigned long timeout_period)
4675{
4676 struct list_head timeouts;
4677 struct ipmi_recv_msg *msg, *msg2;
4678 unsigned long flags;
4679 int i;
4680 unsigned int waiting_msgs = 0;
4681
4682 if (!intf->bmc_registered) {
4683 kref_get(&intf->refcount);
4684 if (!schedule_work(&intf->bmc_reg_work)) {
4685 kref_put(&intf->refcount, intf_free);
4686 waiting_msgs++;
4687 }
4688 }
4689
4690 /*
4691 * Go through the seq table and find any messages that
4692 * have timed out, putting them in the timeouts
4693 * list.
4694 */
4695 INIT_LIST_HEAD(&timeouts);
4696 spin_lock_irqsave(&intf->seq_lock, flags);
4697 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
4698 check_msg_timeout(intf, &(intf->seq_table[i]),
4699 &timeouts, timeout_period, i,
4700 &flags, &waiting_msgs);
4701 spin_unlock_irqrestore(&intf->seq_lock, flags);
4702
4703 list_for_each_entry_safe(msg, msg2, &timeouts, link)
4704 deliver_err_response(msg, IPMI_TIMEOUT_COMPLETION_CODE);
4705
4706 /*
4707 * Maintenance mode handling. Check the timeout
4708 * optimistically before we claim the lock. It may
4709 * mean a timeout gets missed occasionally, but that
4710 * only means the timeout gets extended by one period
4711 * in that case. No big deal, and it avoids the lock
4712 * most of the time.
4713 */
4714 if (intf->auto_maintenance_timeout > 0) {
4715 spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
4716 if (intf->auto_maintenance_timeout > 0) {
4717 intf->auto_maintenance_timeout
4718 -= timeout_period;
4719 if (!intf->maintenance_mode
4720 && (intf->auto_maintenance_timeout <= 0)) {
4721 intf->maintenance_mode_enable = false;
4722 maintenance_mode_update(intf);
4723 }
4724 }
4725 spin_unlock_irqrestore(&intf->maintenance_mode_lock,
4726 flags);
4727 }
4728
4729 tasklet_schedule(&intf->recv_tasklet);
4730
4731 return waiting_msgs;
4732}
4733
4734static void ipmi_request_event(ipmi_smi_t intf)
4735{
4736 /* No event requests when in maintenance mode. */
4737 if (intf->maintenance_mode_enable)
4738 return;
4739
4740 if (!intf->in_shutdown)
4741 intf->handlers->request_events(intf->send_info);
4742}
4743
4744static struct timer_list ipmi_timer;
4745
4746static atomic_t stop_operation;
4747
4748static void ipmi_timeout(struct timer_list *unused)
4749{
4750 ipmi_smi_t intf;
4751 int nt = 0;
4752
4753 if (atomic_read(&stop_operation))
4754 return;
4755
4756 rcu_read_lock();
4757 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
4758 int lnt = 0;
4759
4760 if (atomic_read(&intf->event_waiters)) {
4761 intf->ticks_to_req_ev--;
4762 if (intf->ticks_to_req_ev == 0) {
4763 ipmi_request_event(intf);
4764 intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
4765 }
4766 lnt++;
4767 }
4768
4769 lnt += ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME);
4770
4771 lnt = !!lnt;
4772 if (lnt != intf->last_needs_timer &&
4773 intf->handlers->set_need_watch)
4774 intf->handlers->set_need_watch(intf->send_info, lnt);
4775 intf->last_needs_timer = lnt;
4776
4777 nt += lnt;
4778 }
4779 rcu_read_unlock();
4780
4781 if (nt)
4782 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
4783}
4784
4785static void need_waiter(ipmi_smi_t intf)
4786{
4787 /* Racy, but worst case we start the timer twice. */
4788 if (!timer_pending(&ipmi_timer))
4789 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
4790}
4791
4792static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
4793static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
4794
4795static void free_smi_msg(struct ipmi_smi_msg *msg)
4796{
4797 atomic_dec(&smi_msg_inuse_count);
4798 kfree(msg);
4799}
4800
4801struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
4802{
4803 struct ipmi_smi_msg *rv;
4804 rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
4805 if (rv) {
4806 rv->done = free_smi_msg;
4807 rv->user_data = NULL;
4808 atomic_inc(&smi_msg_inuse_count);
4809 }
4810 return rv;
4811}
4812EXPORT_SYMBOL(ipmi_alloc_smi_msg);
4813
4814static void free_recv_msg(struct ipmi_recv_msg *msg)
4815{
4816 atomic_dec(&recv_msg_inuse_count);
4817 kfree(msg);
4818}
4819
4820static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
4821{
4822 struct ipmi_recv_msg *rv;
4823
4824 rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
4825 if (rv) {
4826 rv->user = NULL;
4827 rv->done = free_recv_msg;
4828 atomic_inc(&recv_msg_inuse_count);
4829 }
4830 return rv;
4831}
4832
4833void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
4834{
4835 if (msg->user)
4836 kref_put(&msg->user->refcount, free_user);
4837 msg->done(msg);
4838}
4839EXPORT_SYMBOL(ipmi_free_recv_msg);
4840
4841static atomic_t panic_done_count = ATOMIC_INIT(0);
4842
4843static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
4844{
4845 atomic_dec(&panic_done_count);
4846}
4847
4848static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
4849{
4850 atomic_dec(&panic_done_count);
4851}
4852
4853/*
4854 * Inside a panic, send a message and wait for a response.
4855 */
4856static void ipmi_panic_request_and_wait(ipmi_smi_t intf,
4857 struct ipmi_addr *addr,
4858 struct kernel_ipmi_msg *msg)
4859{
4860 struct ipmi_smi_msg smi_msg;
4861 struct ipmi_recv_msg recv_msg;
4862 int rv;
4863
4864 smi_msg.done = dummy_smi_done_handler;
4865 recv_msg.done = dummy_recv_done_handler;
4866 atomic_add(2, &panic_done_count);
4867 rv = i_ipmi_request(NULL,
4868 intf,
4869 addr,
4870 0,
4871 msg,
4872 intf,
4873 &smi_msg,
4874 &recv_msg,
4875 0,
4876 intf->addrinfo[0].address,
4877 intf->addrinfo[0].lun,
4878 0, 1); /* Don't retry, and don't wait. */
4879 if (rv)
4880 atomic_sub(2, &panic_done_count);
4881 else if (intf->handlers->flush_messages)
4882 intf->handlers->flush_messages(intf->send_info);
4883
4884 while (atomic_read(&panic_done_count) != 0)
4885 ipmi_poll(intf);
4886}
4887
4888static void event_receiver_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
4889{
4890 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
4891 && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE)
4892 && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD)
4893 && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
4894 /* A get event receiver command, save it. */
4895 intf->event_receiver = msg->msg.data[1];
4896 intf->event_receiver_lun = msg->msg.data[2] & 0x3;
4897 }
4898}
4899
4900static void device_id_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg)
4901{
4902 if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
4903 && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
4904 && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD)
4905 && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
4906 /*
4907 * A get device id command, save if we are an event
4908 * receiver or generator.
4909 */
4910 intf->local_sel_device = (msg->msg.data[6] >> 2) & 1;
4911 intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
4912 }
4913}
4914
4915static void send_panic_events(char *str)
4916{
4917 struct kernel_ipmi_msg msg;
4918 ipmi_smi_t intf;
4919 unsigned char data[16];
4920 struct ipmi_system_interface_addr *si;
4921 struct ipmi_addr addr;
4922
4923 if (ipmi_send_panic_event == IPMI_SEND_PANIC_EVENT_NONE)
4924 return;
4925
4926 si = (struct ipmi_system_interface_addr *) &addr;
4927 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4928 si->channel = IPMI_BMC_CHANNEL;
4929 si->lun = 0;
4930
4931 /* Fill in an event telling that we have failed. */
4932 msg.netfn = 0x04; /* Sensor or Event. */
4933 msg.cmd = 2; /* Platform event command. */
4934 msg.data = data;
4935 msg.data_len = 8;
4936 data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */
4937 data[1] = 0x03; /* This is for IPMI 1.0. */
4938 data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
4939 data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
4940 data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
4941
4942 /*
4943 * Put a few breadcrumbs in. Hopefully later we can add more things
4944 * to make the panic events more useful.
4945 */
4946 if (str) {
4947 data[3] = str[0];
4948 data[6] = str[1];
4949 data[7] = str[2];
4950 }
4951
4952 /* For every registered interface, send the event. */
4953 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
4954 if (!intf->handlers || !intf->handlers->poll)
4955 /* Interface is not ready or can't run at panic time. */
4956 continue;
4957
4958 /* Send the event announcing the panic. */
4959 ipmi_panic_request_and_wait(intf, &addr, &msg);
4960 }
4961
4962 /*
4963 * On every interface, dump a bunch of OEM event holding the
4964 * string.
4965 */
4966 if (ipmi_send_panic_event != IPMI_SEND_PANIC_EVENT_STRING || !str)
4967 return;
4968
4969 /* For every registered interface, send the event. */
4970 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
4971 char *p = str;
4972 struct ipmi_ipmb_addr *ipmb;
4973 int j;
4974
4975 if (intf->intf_num == -1)
4976 /* Interface was not ready yet. */
4977 continue;
4978
4979 /*
4980 * intf_num is used as an marker to tell if the
4981 * interface is valid. Thus we need a read barrier to
4982 * make sure data fetched before checking intf_num
4983 * won't be used.
4984 */
4985 smp_rmb();
4986
4987 /*
4988 * First job here is to figure out where to send the
4989 * OEM events. There's no way in IPMI to send OEM
4990 * events using an event send command, so we have to
4991 * find the SEL to put them in and stick them in
4992 * there.
4993 */
4994
4995 /* Get capabilities from the get device id. */
4996 intf->local_sel_device = 0;
4997 intf->local_event_generator = 0;
4998 intf->event_receiver = 0;
4999
5000 /* Request the device info from the local MC. */
5001 msg.netfn = IPMI_NETFN_APP_REQUEST;
5002 msg.cmd = IPMI_GET_DEVICE_ID_CMD;
5003 msg.data = NULL;
5004 msg.data_len = 0;
5005 intf->null_user_handler = device_id_fetcher;
5006 ipmi_panic_request_and_wait(intf, &addr, &msg);
5007
5008 if (intf->local_event_generator) {
5009 /* Request the event receiver from the local MC. */
5010 msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST;
5011 msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD;
5012 msg.data = NULL;
5013 msg.data_len = 0;
5014 intf->null_user_handler = event_receiver_fetcher;
5015 ipmi_panic_request_and_wait(intf, &addr, &msg);
5016 }
5017 intf->null_user_handler = NULL;
5018
5019 /*
5020 * Validate the event receiver. The low bit must not
5021 * be 1 (it must be a valid IPMB address), it cannot
5022 * be zero, and it must not be my address.
5023 */
5024 if (((intf->event_receiver & 1) == 0)
5025 && (intf->event_receiver != 0)
5026 && (intf->event_receiver != intf->addrinfo[0].address)) {
5027 /*
5028 * The event receiver is valid, send an IPMB
5029 * message.
5030 */
5031 ipmb = (struct ipmi_ipmb_addr *) &addr;
5032 ipmb->addr_type = IPMI_IPMB_ADDR_TYPE;
5033 ipmb->channel = 0; /* FIXME - is this right? */
5034 ipmb->lun = intf->event_receiver_lun;
5035 ipmb->slave_addr = intf->event_receiver;
5036 } else if (intf->local_sel_device) {
5037 /*
5038 * The event receiver was not valid (or was
5039 * me), but I am an SEL device, just dump it
5040 * in my SEL.
5041 */
5042 si = (struct ipmi_system_interface_addr *) &addr;
5043 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
5044 si->channel = IPMI_BMC_CHANNEL;
5045 si->lun = 0;
5046 } else
5047 continue; /* No where to send the event. */
5048
5049 msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */
5050 msg.cmd = IPMI_ADD_SEL_ENTRY_CMD;
5051 msg.data = data;
5052 msg.data_len = 16;
5053
5054 j = 0;
5055 while (*p) {
5056 int size = strlen(p);
5057
5058 if (size > 11)
5059 size = 11;
5060 data[0] = 0;
5061 data[1] = 0;
5062 data[2] = 0xf0; /* OEM event without timestamp. */
5063 data[3] = intf->addrinfo[0].address;
5064 data[4] = j++; /* sequence # */
5065 /*
5066 * Always give 11 bytes, so strncpy will fill
5067 * it with zeroes for me.
5068 */
5069 strncpy(data+5, p, 11);
5070 p += size;
5071
5072 ipmi_panic_request_and_wait(intf, &addr, &msg);
5073 }
5074 }
5075}
5076
5077static int has_panicked;
5078
5079static int panic_event(struct notifier_block *this,
5080 unsigned long event,
5081 void *ptr)
5082{
5083 ipmi_smi_t intf;
5084
5085 if (has_panicked)
5086 return NOTIFY_DONE;
5087 has_panicked = 1;
5088
5089 /* For every registered interface, set it to run to completion. */
5090 list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
5091 if (!intf->handlers)
5092 /* Interface is not ready. */
5093 continue;
5094
5095 /*
5096 * If we were interrupted while locking xmit_msgs_lock or
5097 * waiting_rcv_msgs_lock, the corresponding list may be
5098 * corrupted. In this case, drop items on the list for
5099 * the safety.
5100 */
5101 if (!spin_trylock(&intf->xmit_msgs_lock)) {
5102 INIT_LIST_HEAD(&intf->xmit_msgs);
5103 INIT_LIST_HEAD(&intf->hp_xmit_msgs);
5104 } else
5105 spin_unlock(&intf->xmit_msgs_lock);
5106
5107 if (!spin_trylock(&intf->waiting_rcv_msgs_lock))
5108 INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
5109 else
5110 spin_unlock(&intf->waiting_rcv_msgs_lock);
5111
5112 intf->run_to_completion = 1;
5113 if (intf->handlers->set_run_to_completion)
5114 intf->handlers->set_run_to_completion(intf->send_info,
5115 1);
5116 }
5117
5118 send_panic_events(ptr);
5119
5120 return NOTIFY_DONE;
5121}
5122
5123static struct notifier_block panic_block = {
5124 .notifier_call = panic_event,
5125 .next = NULL,
5126 .priority = 200 /* priority: INT_MAX >= x >= 0 */
5127};
5128
5129static int ipmi_init_msghandler(void)
5130{
5131 int rv;
5132
5133 if (initialized)
5134 return 0;
5135
5136 rv = driver_register(&ipmidriver.driver);
5137 if (rv) {
5138 pr_err(PFX "Could not register IPMI driver\n");
5139 return rv;
5140 }
5141
5142 pr_info("ipmi message handler version " IPMI_DRIVER_VERSION "\n");
5143
5144#ifdef CONFIG_IPMI_PROC_INTERFACE
5145 proc_ipmi_root = proc_mkdir("ipmi", NULL);
5146 if (!proc_ipmi_root) {
5147 pr_err(PFX "Unable to create IPMI proc dir");
5148 driver_unregister(&ipmidriver.driver);
5149 return -ENOMEM;
5150 }
5151
5152#endif /* CONFIG_IPMI_PROC_INTERFACE */
5153
5154 timer_setup(&ipmi_timer, ipmi_timeout, 0);
5155 mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
5156
5157 atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
5158
5159 initialized = 1;
5160
5161 return 0;
5162}
5163
5164static int __init ipmi_init_msghandler_mod(void)
5165{
5166 ipmi_init_msghandler();
5167 return 0;
5168}
5169
5170static void __exit cleanup_ipmi(void)
5171{
5172 int count;
5173
5174 if (!initialized)
5175 return;
5176
5177 atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block);
5178
5179 /*
5180 * This can't be called if any interfaces exist, so no worry
5181 * about shutting down the interfaces.
5182 */
5183
5184 /*
5185 * Tell the timer to stop, then wait for it to stop. This
5186 * avoids problems with race conditions removing the timer
5187 * here.
5188 */
5189 atomic_inc(&stop_operation);
5190 del_timer_sync(&ipmi_timer);
5191
5192#ifdef CONFIG_IPMI_PROC_INTERFACE
5193 proc_remove(proc_ipmi_root);
5194#endif /* CONFIG_IPMI_PROC_INTERFACE */
5195
5196 driver_unregister(&ipmidriver.driver);
5197
5198 initialized = 0;
5199
5200 /* Check for buffer leaks. */
5201 count = atomic_read(&smi_msg_inuse_count);
5202 if (count != 0)
5203 pr_warn(PFX "SMI message count %d at exit\n", count);
5204 count = atomic_read(&recv_msg_inuse_count);
5205 if (count != 0)
5206 pr_warn(PFX "recv message count %d at exit\n", count);
5207}
5208module_exit(cleanup_ipmi);
5209
5210module_init(ipmi_init_msghandler_mod);
5211MODULE_LICENSE("GPL");
5212MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
5213MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI"
5214 " interface.");
5215MODULE_VERSION(IPMI_DRIVER_VERSION);
5216MODULE_SOFTDEP("post: ipmi_devintf");