Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/char/ipmi/ipmi_msghandler.c
29269 views
1
// SPDX-License-Identifier: GPL-2.0+
2
/*
3
* ipmi_msghandler.c
4
*
5
* Incoming and outgoing message routing for an IPMI interface.
6
*
7
* Author: MontaVista Software, Inc.
8
* Corey Minyard <[email protected]>
9
* [email protected]
10
*
11
* Copyright 2002 MontaVista Software Inc.
12
*/
13
14
#define pr_fmt(fmt) "IPMI message handler: " fmt
15
#define dev_fmt(fmt) pr_fmt(fmt)
16
17
#include <linux/module.h>
18
#include <linux/errno.h>
19
#include <linux/panic_notifier.h>
20
#include <linux/poll.h>
21
#include <linux/sched.h>
22
#include <linux/seq_file.h>
23
#include <linux/spinlock.h>
24
#include <linux/mutex.h>
25
#include <linux/slab.h>
26
#include <linux/ipmi.h>
27
#include <linux/ipmi_smi.h>
28
#include <linux/notifier.h>
29
#include <linux/init.h>
30
#include <linux/rcupdate.h>
31
#include <linux/interrupt.h>
32
#include <linux/moduleparam.h>
33
#include <linux/workqueue.h>
34
#include <linux/uuid.h>
35
#include <linux/nospec.h>
36
#include <linux/vmalloc.h>
37
#include <linux/delay.h>
38
39
#define IPMI_DRIVER_VERSION "39.2"
40
41
static struct ipmi_recv_msg *ipmi_alloc_recv_msg(struct ipmi_user *user);
42
static void ipmi_set_recv_msg_user(struct ipmi_recv_msg *msg,
43
struct ipmi_user *user);
44
static int ipmi_init_msghandler(void);
45
static void smi_work(struct work_struct *t);
46
static void handle_new_recv_msgs(struct ipmi_smi *intf);
47
static void need_waiter(struct ipmi_smi *intf);
48
static int handle_one_recv_msg(struct ipmi_smi *intf,
49
struct ipmi_smi_msg *msg);
50
static void intf_free(struct kref *ref);
51
52
static bool initialized;
53
static bool drvregistered;
54
55
static struct timer_list ipmi_timer;
56
57
/* Numbers in this enumerator should be mapped to ipmi_panic_event_str */
58
enum ipmi_panic_event_op {
59
IPMI_SEND_PANIC_EVENT_NONE,
60
IPMI_SEND_PANIC_EVENT,
61
IPMI_SEND_PANIC_EVENT_STRING,
62
IPMI_SEND_PANIC_EVENT_MAX
63
};
64
65
/* Indices in this array should be mapped to enum ipmi_panic_event_op */
66
static const char *const ipmi_panic_event_str[] = { "none", "event", "string", NULL };
67
68
#ifdef CONFIG_IPMI_PANIC_STRING
69
#define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_STRING
70
#elif defined(CONFIG_IPMI_PANIC_EVENT)
71
#define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT
72
#else
73
#define IPMI_PANIC_DEFAULT IPMI_SEND_PANIC_EVENT_NONE
74
#endif
75
76
static enum ipmi_panic_event_op ipmi_send_panic_event = IPMI_PANIC_DEFAULT;
77
78
static int panic_op_write_handler(const char *val,
79
const struct kernel_param *kp)
80
{
81
char valcp[16];
82
int e;
83
84
strscpy(valcp, val, sizeof(valcp));
85
e = match_string(ipmi_panic_event_str, -1, strstrip(valcp));
86
if (e < 0)
87
return e;
88
89
ipmi_send_panic_event = e;
90
return 0;
91
}
92
93
static int panic_op_read_handler(char *buffer, const struct kernel_param *kp)
94
{
95
const char *event_str;
96
97
if (ipmi_send_panic_event >= IPMI_SEND_PANIC_EVENT_MAX)
98
event_str = "???";
99
else
100
event_str = ipmi_panic_event_str[ipmi_send_panic_event];
101
102
return sprintf(buffer, "%s\n", event_str);
103
}
104
105
static const struct kernel_param_ops panic_op_ops = {
106
.set = panic_op_write_handler,
107
.get = panic_op_read_handler
108
};
109
module_param_cb(panic_op, &panic_op_ops, NULL, 0600);
110
MODULE_PARM_DESC(panic_op, "Sets if the IPMI driver will attempt to store panic information in the event log in the event of a panic. Set to 'none' for no, 'event' for a single event, or 'string' for a generic event and the panic string in IPMI OEM events.");
111
112
113
#define MAX_EVENTS_IN_QUEUE 25
114
115
/* Remain in auto-maintenance mode for this amount of time (in ms). */
116
static unsigned long maintenance_mode_timeout_ms = 30000;
117
module_param(maintenance_mode_timeout_ms, ulong, 0644);
118
MODULE_PARM_DESC(maintenance_mode_timeout_ms,
119
"The time (milliseconds) after the last maintenance message that the connection stays in maintenance mode.");
120
121
/*
122
* Don't let a message sit in a queue forever, always time it with at lest
123
* the max message timer. This is in milliseconds.
124
*/
125
#define MAX_MSG_TIMEOUT 60000
126
127
/*
128
* Timeout times below are in milliseconds, and are done off a 1
129
* second timer. So setting the value to 1000 would mean anything
130
* between 0 and 1000ms. So really the only reasonable minimum
131
* setting it 2000ms, which is between 1 and 2 seconds.
132
*/
133
134
/* The default timeout for message retries. */
135
static unsigned long default_retry_ms = 2000;
136
module_param(default_retry_ms, ulong, 0644);
137
MODULE_PARM_DESC(default_retry_ms,
138
"The time (milliseconds) between retry sends");
139
140
/* The default timeout for maintenance mode message retries. */
141
static unsigned long default_maintenance_retry_ms = 3000;
142
module_param(default_maintenance_retry_ms, ulong, 0644);
143
MODULE_PARM_DESC(default_maintenance_retry_ms,
144
"The time (milliseconds) between retry sends in maintenance mode");
145
146
/* The default maximum number of retries */
147
static unsigned int default_max_retries = 4;
148
module_param(default_max_retries, uint, 0644);
149
MODULE_PARM_DESC(default_max_retries,
150
"The time (milliseconds) between retry sends in maintenance mode");
151
152
/* The default maximum number of users that may register. */
153
static unsigned int max_users = 30;
154
module_param(max_users, uint, 0644);
155
MODULE_PARM_DESC(max_users,
156
"The most users that may use the IPMI stack at one time.");
157
158
/* The default maximum number of message a user may have outstanding. */
159
static unsigned int max_msgs_per_user = 100;
160
module_param(max_msgs_per_user, uint, 0644);
161
MODULE_PARM_DESC(max_msgs_per_user,
162
"The most message a user may have outstanding.");
163
164
/* Call every ~1000 ms. */
165
#define IPMI_TIMEOUT_TIME 1000
166
167
/* How many jiffies does it take to get to the timeout time. */
168
#define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
169
170
/*
171
* Request events from the queue every second (this is the number of
172
* IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the
173
* future, IPMI will add a way to know immediately if an event is in
174
* the queue and this silliness can go away.
175
*/
176
#define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME))
177
178
/* How long should we cache dynamic device IDs? */
179
#define IPMI_DYN_DEV_ID_EXPIRY (10 * HZ)
180
181
/*
182
* The main "user" data structure.
183
*/
184
struct ipmi_user {
185
struct list_head link;
186
187
struct kref refcount;
188
refcount_t destroyed;
189
190
/* The upper layer that handles receive messages. */
191
const struct ipmi_user_hndl *handler;
192
void *handler_data;
193
194
/* The interface this user is bound to. */
195
struct ipmi_smi *intf;
196
197
/* Does this interface receive IPMI events? */
198
bool gets_events;
199
200
atomic_t nr_msgs;
201
};
202
203
struct cmd_rcvr {
204
struct list_head link;
205
206
struct ipmi_user *user;
207
unsigned char netfn;
208
unsigned char cmd;
209
unsigned int chans;
210
211
/*
212
* This is used to form a linked lised during mass deletion.
213
* Since this is in an RCU list, we cannot use the link above
214
* or change any data until the RCU period completes. So we
215
* use this next variable during mass deletion so we can have
216
* a list and don't have to wait and restart the search on
217
* every individual deletion of a command.
218
*/
219
struct cmd_rcvr *next;
220
};
221
222
struct seq_table {
223
unsigned int inuse : 1;
224
unsigned int broadcast : 1;
225
226
unsigned long timeout;
227
unsigned long orig_timeout;
228
unsigned int retries_left;
229
230
/*
231
* To verify on an incoming send message response that this is
232
* the message that the response is for, we keep a sequence id
233
* and increment it every time we send a message.
234
*/
235
long seqid;
236
237
/*
238
* This is held so we can properly respond to the message on a
239
* timeout, and it is used to hold the temporary data for
240
* retransmission, too.
241
*/
242
struct ipmi_recv_msg *recv_msg;
243
};
244
245
/*
246
* Store the information in a msgid (long) to allow us to find a
247
* sequence table entry from the msgid.
248
*/
249
#define STORE_SEQ_IN_MSGID(seq, seqid) \
250
((((seq) & 0x3f) << 26) | ((seqid) & 0x3ffffff))
251
252
#define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \
253
do { \
254
seq = (((msgid) >> 26) & 0x3f); \
255
seqid = ((msgid) & 0x3ffffff); \
256
} while (0)
257
258
#define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3ffffff)
259
260
#define IPMI_MAX_CHANNELS 16
261
struct ipmi_channel {
262
unsigned char medium;
263
unsigned char protocol;
264
};
265
266
struct ipmi_channel_set {
267
struct ipmi_channel c[IPMI_MAX_CHANNELS];
268
};
269
270
struct ipmi_my_addrinfo {
271
/*
272
* My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR,
273
* but may be changed by the user.
274
*/
275
unsigned char address;
276
277
/*
278
* My LUN. This should generally stay the SMS LUN, but just in
279
* case...
280
*/
281
unsigned char lun;
282
};
283
284
/*
285
* Note that the product id, manufacturer id, guid, and device id are
286
* immutable in this structure, so dyn_mutex is not required for
287
* accessing those. If those change on a BMC, a new BMC is allocated.
288
*/
289
struct bmc_device {
290
struct platform_device pdev;
291
struct list_head intfs; /* Interfaces on this BMC. */
292
struct ipmi_device_id id;
293
struct ipmi_device_id fetch_id;
294
int dyn_id_set;
295
unsigned long dyn_id_expiry;
296
struct mutex dyn_mutex; /* Protects id, intfs, & dyn* */
297
guid_t guid;
298
guid_t fetch_guid;
299
int dyn_guid_set;
300
struct kref usecount;
301
struct work_struct remove_work;
302
unsigned char cc; /* completion code */
303
};
304
#define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev)
305
306
static struct workqueue_struct *bmc_remove_work_wq;
307
308
static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
309
struct ipmi_device_id *id,
310
bool *guid_set, guid_t *guid);
311
312
/*
313
* Various statistics for IPMI, these index stats[] in the ipmi_smi
314
* structure.
315
*/
316
enum ipmi_stat_indexes {
317
/* Commands we got from the user that were invalid. */
318
IPMI_STAT_sent_invalid_commands = 0,
319
320
/* Commands we sent to the MC. */
321
IPMI_STAT_sent_local_commands,
322
323
/* Responses from the MC that were delivered to a user. */
324
IPMI_STAT_handled_local_responses,
325
326
/* Responses from the MC that were not delivered to a user. */
327
IPMI_STAT_unhandled_local_responses,
328
329
/* Commands we sent out to the IPMB bus. */
330
IPMI_STAT_sent_ipmb_commands,
331
332
/* Commands sent on the IPMB that had errors on the SEND CMD */
333
IPMI_STAT_sent_ipmb_command_errs,
334
335
/* Each retransmit increments this count. */
336
IPMI_STAT_retransmitted_ipmb_commands,
337
338
/*
339
* When a message times out (runs out of retransmits) this is
340
* incremented.
341
*/
342
IPMI_STAT_timed_out_ipmb_commands,
343
344
/*
345
* This is like above, but for broadcasts. Broadcasts are
346
* *not* included in the above count (they are expected to
347
* time out).
348
*/
349
IPMI_STAT_timed_out_ipmb_broadcasts,
350
351
/* Responses I have sent to the IPMB bus. */
352
IPMI_STAT_sent_ipmb_responses,
353
354
/* The response was delivered to the user. */
355
IPMI_STAT_handled_ipmb_responses,
356
357
/* The response had invalid data in it. */
358
IPMI_STAT_invalid_ipmb_responses,
359
360
/* The response didn't have anyone waiting for it. */
361
IPMI_STAT_unhandled_ipmb_responses,
362
363
/* Commands we sent out to the IPMB bus. */
364
IPMI_STAT_sent_lan_commands,
365
366
/* Commands sent on the IPMB that had errors on the SEND CMD */
367
IPMI_STAT_sent_lan_command_errs,
368
369
/* Each retransmit increments this count. */
370
IPMI_STAT_retransmitted_lan_commands,
371
372
/*
373
* When a message times out (runs out of retransmits) this is
374
* incremented.
375
*/
376
IPMI_STAT_timed_out_lan_commands,
377
378
/* Responses I have sent to the IPMB bus. */
379
IPMI_STAT_sent_lan_responses,
380
381
/* The response was delivered to the user. */
382
IPMI_STAT_handled_lan_responses,
383
384
/* The response had invalid data in it. */
385
IPMI_STAT_invalid_lan_responses,
386
387
/* The response didn't have anyone waiting for it. */
388
IPMI_STAT_unhandled_lan_responses,
389
390
/* The command was delivered to the user. */
391
IPMI_STAT_handled_commands,
392
393
/* The command had invalid data in it. */
394
IPMI_STAT_invalid_commands,
395
396
/* The command didn't have anyone waiting for it. */
397
IPMI_STAT_unhandled_commands,
398
399
/* Invalid data in an event. */
400
IPMI_STAT_invalid_events,
401
402
/* Events that were received with the proper format. */
403
IPMI_STAT_events,
404
405
/* Retransmissions on IPMB that failed. */
406
IPMI_STAT_dropped_rexmit_ipmb_commands,
407
408
/* Retransmissions on LAN that failed. */
409
IPMI_STAT_dropped_rexmit_lan_commands,
410
411
/* This *must* remain last, add new values above this. */
412
IPMI_NUM_STATS
413
};
414
415
416
#define IPMI_IPMB_NUM_SEQ 64
417
struct ipmi_smi {
418
struct module *owner;
419
420
/* What interface number are we? */
421
int intf_num;
422
423
struct kref refcount;
424
425
/* Set when the interface is being unregistered. */
426
bool in_shutdown;
427
428
/* Used for a list of interfaces. */
429
struct list_head link;
430
431
/*
432
* The list of upper layers that are using me.
433
*/
434
struct list_head users;
435
struct mutex users_mutex;
436
atomic_t nr_users;
437
struct device_attribute nr_users_devattr;
438
struct device_attribute nr_msgs_devattr;
439
struct device_attribute maintenance_mode_devattr;
440
441
442
/* Used for wake ups at startup. */
443
wait_queue_head_t waitq;
444
445
/*
446
* Prevents the interface from being unregistered when the
447
* interface is used by being looked up through the BMC
448
* structure.
449
*/
450
struct mutex bmc_reg_mutex;
451
452
struct bmc_device tmp_bmc;
453
struct bmc_device *bmc;
454
bool bmc_registered;
455
struct list_head bmc_link;
456
char *my_dev_name;
457
bool in_bmc_register; /* Handle recursive situations. Yuck. */
458
struct work_struct bmc_reg_work;
459
460
const struct ipmi_smi_handlers *handlers;
461
void *send_info;
462
463
/* Driver-model device for the system interface. */
464
struct device *si_dev;
465
466
/*
467
* A table of sequence numbers for this interface. We use the
468
* sequence numbers for IPMB messages that go out of the
469
* interface to match them up with their responses. A routine
470
* is called periodically to time the items in this list.
471
*/
472
struct mutex seq_lock;
473
struct seq_table seq_table[IPMI_IPMB_NUM_SEQ];
474
int curr_seq;
475
476
/*
477
* Messages queued for deliver to the user.
478
*/
479
struct mutex user_msgs_mutex;
480
struct list_head user_msgs;
481
482
/*
483
* Messages queued for processing. If processing fails (out
484
* of memory for instance), They will stay in here to be
485
* processed later in a periodic timer interrupt. The
486
* workqueue is for handling received messages directly from
487
* the handler.
488
*/
489
spinlock_t waiting_rcv_msgs_lock;
490
struct list_head waiting_rcv_msgs;
491
atomic_t watchdog_pretimeouts_to_deliver;
492
struct work_struct smi_work;
493
494
spinlock_t xmit_msgs_lock;
495
struct list_head xmit_msgs;
496
struct ipmi_smi_msg *curr_msg;
497
struct list_head hp_xmit_msgs;
498
499
/*
500
* The list of command receivers that are registered for commands
501
* on this interface.
502
*/
503
struct mutex cmd_rcvrs_mutex;
504
struct list_head cmd_rcvrs;
505
506
/*
507
* Events that were queues because no one was there to receive
508
* them.
509
*/
510
struct mutex events_mutex; /* For dealing with event stuff. */
511
struct list_head waiting_events;
512
unsigned int waiting_events_count; /* How many events in queue? */
513
char event_msg_printed;
514
515
/* How many users are waiting for events? */
516
atomic_t event_waiters;
517
unsigned int ticks_to_req_ev;
518
519
spinlock_t watch_lock; /* For dealing with watch stuff below. */
520
521
/* How many users are waiting for commands? */
522
unsigned int command_waiters;
523
524
/* How many users are waiting for watchdogs? */
525
unsigned int watchdog_waiters;
526
527
/* How many users are waiting for message responses? */
528
unsigned int response_waiters;
529
530
/*
531
* Tells what the lower layer has last been asked to watch for,
532
* messages and/or watchdogs. Protected by watch_lock.
533
*/
534
unsigned int last_watch_mask;
535
536
/*
537
* The event receiver for my BMC, only really used at panic
538
* shutdown as a place to store this.
539
*/
540
unsigned char event_receiver;
541
unsigned char event_receiver_lun;
542
unsigned char local_sel_device;
543
unsigned char local_event_generator;
544
545
/* For handling of maintenance mode. */
546
int maintenance_mode;
547
548
#define IPMI_MAINTENANCE_MODE_STATE_OFF 0
549
#define IPMI_MAINTENANCE_MODE_STATE_FIRMWARE 1
550
#define IPMI_MAINTENANCE_MODE_STATE_RESET 2
551
int maintenance_mode_state;
552
int auto_maintenance_timeout;
553
spinlock_t maintenance_mode_lock; /* Used in a timer... */
554
555
/*
556
* If we are doing maintenance on something on IPMB, extend
557
* the timeout time to avoid timeouts writing firmware and
558
* such.
559
*/
560
int ipmb_maintenance_mode_timeout;
561
562
/*
563
* A cheap hack, if this is non-null and a message to an
564
* interface comes in with a NULL user, call this routine with
565
* it. Note that the message will still be freed by the
566
* caller. This only works on the system interface.
567
*
568
* Protected by bmc_reg_mutex.
569
*/
570
void (*null_user_handler)(struct ipmi_smi *intf,
571
struct ipmi_recv_msg *msg);
572
573
/*
574
* When we are scanning the channels for an SMI, this will
575
* tell which channel we are scanning.
576
*/
577
int curr_channel;
578
579
/* Channel information */
580
struct ipmi_channel_set *channel_list;
581
unsigned int curr_working_cset; /* First index into the following. */
582
struct ipmi_channel_set wchannels[2];
583
struct ipmi_my_addrinfo addrinfo[IPMI_MAX_CHANNELS];
584
bool channels_ready;
585
586
atomic_t stats[IPMI_NUM_STATS];
587
588
/*
589
* run_to_completion duplicate of smb_info, smi_info
590
* and ipmi_serial_info structures. Used to decrease numbers of
591
* parameters passed by "low" level IPMI code.
592
*/
593
int run_to_completion;
594
};
595
#define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev)
596
597
static void __get_guid(struct ipmi_smi *intf);
598
static void __ipmi_bmc_unregister(struct ipmi_smi *intf);
599
static int __ipmi_bmc_register(struct ipmi_smi *intf,
600
struct ipmi_device_id *id,
601
bool guid_set, guid_t *guid, int intf_num);
602
static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id);
603
604
static void free_ipmi_user(struct kref *ref)
605
{
606
struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
607
struct module *owner;
608
609
owner = user->intf->owner;
610
kref_put(&user->intf->refcount, intf_free);
611
module_put(owner);
612
vfree(user);
613
}
614
615
static void release_ipmi_user(struct ipmi_user *user)
616
{
617
kref_put(&user->refcount, free_ipmi_user);
618
}
619
620
static struct ipmi_user *acquire_ipmi_user(struct ipmi_user *user)
621
{
622
if (!kref_get_unless_zero(&user->refcount))
623
return NULL;
624
return user;
625
}
626
627
/*
628
* The driver model view of the IPMI messaging driver.
629
*/
630
static struct platform_driver ipmidriver = {
631
.driver = {
632
.name = "ipmi",
633
.bus = &platform_bus_type
634
}
635
};
636
/*
637
* This mutex keeps us from adding the same BMC twice.
638
*/
639
static DEFINE_MUTEX(ipmidriver_mutex);
640
641
static LIST_HEAD(ipmi_interfaces);
642
static DEFINE_MUTEX(ipmi_interfaces_mutex);
643
644
/*
645
* List of watchers that want to know when smi's are added and deleted.
646
*/
647
static LIST_HEAD(smi_watchers);
648
static DEFINE_MUTEX(smi_watchers_mutex);
649
650
#define ipmi_inc_stat(intf, stat) \
651
atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
652
#define ipmi_get_stat(intf, stat) \
653
((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
654
655
static const char * const addr_src_to_str[] = {
656
"invalid", "hotmod", "hardcoded", "SPMI", "ACPI", "SMBIOS", "PCI",
657
"device-tree", "platform"
658
};
659
660
const char *ipmi_addr_src_to_str(enum ipmi_addr_src src)
661
{
662
if (src >= SI_LAST)
663
src = 0; /* Invalid */
664
return addr_src_to_str[src];
665
}
666
EXPORT_SYMBOL(ipmi_addr_src_to_str);
667
668
static int is_lan_addr(struct ipmi_addr *addr)
669
{
670
return addr->addr_type == IPMI_LAN_ADDR_TYPE;
671
}
672
673
static int is_ipmb_addr(struct ipmi_addr *addr)
674
{
675
return addr->addr_type == IPMI_IPMB_ADDR_TYPE;
676
}
677
678
static int is_ipmb_bcast_addr(struct ipmi_addr *addr)
679
{
680
return addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE;
681
}
682
683
static int is_ipmb_direct_addr(struct ipmi_addr *addr)
684
{
685
return addr->addr_type == IPMI_IPMB_DIRECT_ADDR_TYPE;
686
}
687
688
static void free_recv_msg_list(struct list_head *q)
689
{
690
struct ipmi_recv_msg *msg, *msg2;
691
692
list_for_each_entry_safe(msg, msg2, q, link) {
693
list_del(&msg->link);
694
ipmi_free_recv_msg(msg);
695
}
696
}
697
698
static void free_smi_msg_list(struct list_head *q)
699
{
700
struct ipmi_smi_msg *msg, *msg2;
701
702
list_for_each_entry_safe(msg, msg2, q, link) {
703
list_del(&msg->link);
704
ipmi_free_smi_msg(msg);
705
}
706
}
707
708
static void intf_free(struct kref *ref)
709
{
710
struct ipmi_smi *intf = container_of(ref, struct ipmi_smi, refcount);
711
int i;
712
struct cmd_rcvr *rcvr, *rcvr2;
713
714
free_smi_msg_list(&intf->waiting_rcv_msgs);
715
free_recv_msg_list(&intf->waiting_events);
716
717
/*
718
* Wholesale remove all the entries from the list in the
719
* interface. No need for locks, this is single-threaded.
720
*/
721
list_for_each_entry_safe(rcvr, rcvr2, &intf->cmd_rcvrs, link)
722
kfree(rcvr);
723
724
for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
725
if ((intf->seq_table[i].inuse)
726
&& (intf->seq_table[i].recv_msg))
727
ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
728
}
729
730
kfree(intf);
731
}
732
733
int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher)
734
{
735
struct ipmi_smi *intf;
736
unsigned int count = 0, i;
737
int *interfaces = NULL;
738
struct device **devices = NULL;
739
int rv = 0;
740
741
/*
742
* Make sure the driver is actually initialized, this handles
743
* problems with initialization order.
744
*/
745
rv = ipmi_init_msghandler();
746
if (rv)
747
return rv;
748
749
mutex_lock(&smi_watchers_mutex);
750
751
list_add(&watcher->link, &smi_watchers);
752
753
/*
754
* Build an array of ipmi interfaces and fill it in, and
755
* another array of the devices. We can't call the callback
756
* with ipmi_interfaces_mutex held. smi_watchers_mutex will
757
* keep things in order for the user.
758
*/
759
mutex_lock(&ipmi_interfaces_mutex);
760
list_for_each_entry(intf, &ipmi_interfaces, link)
761
count++;
762
if (count > 0) {
763
interfaces = kmalloc_array(count, sizeof(*interfaces),
764
GFP_KERNEL);
765
if (!interfaces) {
766
rv = -ENOMEM;
767
} else {
768
devices = kmalloc_array(count, sizeof(*devices),
769
GFP_KERNEL);
770
if (!devices) {
771
kfree(interfaces);
772
interfaces = NULL;
773
rv = -ENOMEM;
774
}
775
}
776
count = 0;
777
}
778
if (interfaces) {
779
list_for_each_entry(intf, &ipmi_interfaces, link) {
780
int intf_num = READ_ONCE(intf->intf_num);
781
782
if (intf_num == -1)
783
continue;
784
devices[count] = intf->si_dev;
785
interfaces[count++] = intf_num;
786
}
787
}
788
mutex_unlock(&ipmi_interfaces_mutex);
789
790
if (interfaces) {
791
for (i = 0; i < count; i++)
792
watcher->new_smi(interfaces[i], devices[i]);
793
kfree(interfaces);
794
kfree(devices);
795
}
796
797
mutex_unlock(&smi_watchers_mutex);
798
799
return rv;
800
}
801
EXPORT_SYMBOL(ipmi_smi_watcher_register);
802
803
int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher)
804
{
805
mutex_lock(&smi_watchers_mutex);
806
list_del(&watcher->link);
807
mutex_unlock(&smi_watchers_mutex);
808
return 0;
809
}
810
EXPORT_SYMBOL(ipmi_smi_watcher_unregister);
811
812
static void
813
call_smi_watchers(int i, struct device *dev)
814
{
815
struct ipmi_smi_watcher *w;
816
817
list_for_each_entry(w, &smi_watchers, link) {
818
if (try_module_get(w->owner)) {
819
w->new_smi(i, dev);
820
module_put(w->owner);
821
}
822
}
823
}
824
825
static int
826
ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2)
827
{
828
if (addr1->addr_type != addr2->addr_type)
829
return 0;
830
831
if (addr1->channel != addr2->channel)
832
return 0;
833
834
if (addr1->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
835
struct ipmi_system_interface_addr *smi_addr1
836
= (struct ipmi_system_interface_addr *) addr1;
837
struct ipmi_system_interface_addr *smi_addr2
838
= (struct ipmi_system_interface_addr *) addr2;
839
return (smi_addr1->lun == smi_addr2->lun);
840
}
841
842
if (is_ipmb_addr(addr1) || is_ipmb_bcast_addr(addr1)) {
843
struct ipmi_ipmb_addr *ipmb_addr1
844
= (struct ipmi_ipmb_addr *) addr1;
845
struct ipmi_ipmb_addr *ipmb_addr2
846
= (struct ipmi_ipmb_addr *) addr2;
847
848
return ((ipmb_addr1->slave_addr == ipmb_addr2->slave_addr)
849
&& (ipmb_addr1->lun == ipmb_addr2->lun));
850
}
851
852
if (is_ipmb_direct_addr(addr1)) {
853
struct ipmi_ipmb_direct_addr *daddr1
854
= (struct ipmi_ipmb_direct_addr *) addr1;
855
struct ipmi_ipmb_direct_addr *daddr2
856
= (struct ipmi_ipmb_direct_addr *) addr2;
857
858
return daddr1->slave_addr == daddr2->slave_addr &&
859
daddr1->rq_lun == daddr2->rq_lun &&
860
daddr1->rs_lun == daddr2->rs_lun;
861
}
862
863
if (is_lan_addr(addr1)) {
864
struct ipmi_lan_addr *lan_addr1
865
= (struct ipmi_lan_addr *) addr1;
866
struct ipmi_lan_addr *lan_addr2
867
= (struct ipmi_lan_addr *) addr2;
868
869
return ((lan_addr1->remote_SWID == lan_addr2->remote_SWID)
870
&& (lan_addr1->local_SWID == lan_addr2->local_SWID)
871
&& (lan_addr1->session_handle
872
== lan_addr2->session_handle)
873
&& (lan_addr1->lun == lan_addr2->lun));
874
}
875
876
return 1;
877
}
878
879
int ipmi_validate_addr(struct ipmi_addr *addr, int len)
880
{
881
if (len < sizeof(struct ipmi_system_interface_addr))
882
return -EINVAL;
883
884
if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
885
if (addr->channel != IPMI_BMC_CHANNEL)
886
return -EINVAL;
887
return 0;
888
}
889
890
if ((addr->channel == IPMI_BMC_CHANNEL)
891
|| (addr->channel >= IPMI_MAX_CHANNELS)
892
|| (addr->channel < 0))
893
return -EINVAL;
894
895
if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
896
if (len < sizeof(struct ipmi_ipmb_addr))
897
return -EINVAL;
898
return 0;
899
}
900
901
if (is_ipmb_direct_addr(addr)) {
902
struct ipmi_ipmb_direct_addr *daddr = (void *) addr;
903
904
if (addr->channel != 0)
905
return -EINVAL;
906
if (len < sizeof(struct ipmi_ipmb_direct_addr))
907
return -EINVAL;
908
909
if (daddr->slave_addr & 0x01)
910
return -EINVAL;
911
if (daddr->rq_lun >= 4)
912
return -EINVAL;
913
if (daddr->rs_lun >= 4)
914
return -EINVAL;
915
return 0;
916
}
917
918
if (is_lan_addr(addr)) {
919
if (len < sizeof(struct ipmi_lan_addr))
920
return -EINVAL;
921
return 0;
922
}
923
924
return -EINVAL;
925
}
926
EXPORT_SYMBOL(ipmi_validate_addr);
927
928
unsigned int ipmi_addr_length(int addr_type)
929
{
930
if (addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
931
return sizeof(struct ipmi_system_interface_addr);
932
933
if ((addr_type == IPMI_IPMB_ADDR_TYPE)
934
|| (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE))
935
return sizeof(struct ipmi_ipmb_addr);
936
937
if (addr_type == IPMI_IPMB_DIRECT_ADDR_TYPE)
938
return sizeof(struct ipmi_ipmb_direct_addr);
939
940
if (addr_type == IPMI_LAN_ADDR_TYPE)
941
return sizeof(struct ipmi_lan_addr);
942
943
return 0;
944
}
945
EXPORT_SYMBOL(ipmi_addr_length);
946
947
static int deliver_response(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
948
{
949
int rv = 0;
950
951
if (!msg->user) {
952
/* Special handling for NULL users. */
953
if (intf->null_user_handler) {
954
intf->null_user_handler(intf, msg);
955
} else {
956
/* No handler, so give up. */
957
rv = -EINVAL;
958
}
959
ipmi_free_recv_msg(msg);
960
} else if (oops_in_progress) {
961
/*
962
* If we are running in the panic context, calling the
963
* receive handler doesn't much meaning and has a deadlock
964
* risk. At this moment, simply skip it in that case.
965
*/
966
ipmi_free_recv_msg(msg);
967
} else {
968
/*
969
* Deliver it in smi_work. The message will hold a
970
* refcount to the user.
971
*/
972
mutex_lock(&intf->user_msgs_mutex);
973
list_add_tail(&msg->link, &intf->user_msgs);
974
mutex_unlock(&intf->user_msgs_mutex);
975
queue_work(system_wq, &intf->smi_work);
976
}
977
978
return rv;
979
}
980
981
static void deliver_local_response(struct ipmi_smi *intf,
982
struct ipmi_recv_msg *msg)
983
{
984
if (deliver_response(intf, msg))
985
ipmi_inc_stat(intf, unhandled_local_responses);
986
else
987
ipmi_inc_stat(intf, handled_local_responses);
988
}
989
990
static void deliver_err_response(struct ipmi_smi *intf,
991
struct ipmi_recv_msg *msg, int err)
992
{
993
msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
994
msg->msg_data[0] = err;
995
msg->msg.netfn |= 1; /* Convert to a response. */
996
msg->msg.data_len = 1;
997
msg->msg.data = msg->msg_data;
998
deliver_local_response(intf, msg);
999
}
1000
1001
static void smi_add_watch(struct ipmi_smi *intf, unsigned int flags)
1002
{
1003
unsigned long iflags;
1004
1005
if (!intf->handlers->set_need_watch)
1006
return;
1007
1008
spin_lock_irqsave(&intf->watch_lock, iflags);
1009
if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES)
1010
intf->response_waiters++;
1011
1012
if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG)
1013
intf->watchdog_waiters++;
1014
1015
if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS)
1016
intf->command_waiters++;
1017
1018
if ((intf->last_watch_mask & flags) != flags) {
1019
intf->last_watch_mask |= flags;
1020
intf->handlers->set_need_watch(intf->send_info,
1021
intf->last_watch_mask);
1022
}
1023
spin_unlock_irqrestore(&intf->watch_lock, iflags);
1024
}
1025
1026
static void smi_remove_watch(struct ipmi_smi *intf, unsigned int flags)
1027
{
1028
unsigned long iflags;
1029
1030
if (!intf->handlers->set_need_watch)
1031
return;
1032
1033
spin_lock_irqsave(&intf->watch_lock, iflags);
1034
if (flags & IPMI_WATCH_MASK_CHECK_MESSAGES)
1035
intf->response_waiters--;
1036
1037
if (flags & IPMI_WATCH_MASK_CHECK_WATCHDOG)
1038
intf->watchdog_waiters--;
1039
1040
if (flags & IPMI_WATCH_MASK_CHECK_COMMANDS)
1041
intf->command_waiters--;
1042
1043
flags = 0;
1044
if (intf->response_waiters)
1045
flags |= IPMI_WATCH_MASK_CHECK_MESSAGES;
1046
if (intf->watchdog_waiters)
1047
flags |= IPMI_WATCH_MASK_CHECK_WATCHDOG;
1048
if (intf->command_waiters)
1049
flags |= IPMI_WATCH_MASK_CHECK_COMMANDS;
1050
1051
if (intf->last_watch_mask != flags) {
1052
intf->last_watch_mask = flags;
1053
intf->handlers->set_need_watch(intf->send_info,
1054
intf->last_watch_mask);
1055
}
1056
spin_unlock_irqrestore(&intf->watch_lock, iflags);
1057
}
1058
1059
/*
1060
* Find the next sequence number not being used and add the given
1061
* message with the given timeout to the sequence table. This must be
1062
* called with the interface's seq_lock held.
1063
*/
1064
static int intf_next_seq(struct ipmi_smi *intf,
1065
struct ipmi_recv_msg *recv_msg,
1066
unsigned long timeout,
1067
int retries,
1068
int broadcast,
1069
unsigned char *seq,
1070
long *seqid)
1071
{
1072
int rv = 0;
1073
unsigned int i;
1074
1075
if (timeout == 0)
1076
timeout = default_retry_ms;
1077
if (retries < 0)
1078
retries = default_max_retries;
1079
1080
for (i = intf->curr_seq; (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq;
1081
i = (i+1)%IPMI_IPMB_NUM_SEQ) {
1082
if (!intf->seq_table[i].inuse)
1083
break;
1084
}
1085
1086
if (!intf->seq_table[i].inuse) {
1087
intf->seq_table[i].recv_msg = recv_msg;
1088
1089
/*
1090
* Start with the maximum timeout, when the send response
1091
* comes in we will start the real timer.
1092
*/
1093
intf->seq_table[i].timeout = MAX_MSG_TIMEOUT;
1094
intf->seq_table[i].orig_timeout = timeout;
1095
intf->seq_table[i].retries_left = retries;
1096
intf->seq_table[i].broadcast = broadcast;
1097
intf->seq_table[i].inuse = 1;
1098
intf->seq_table[i].seqid = NEXT_SEQID(intf->seq_table[i].seqid);
1099
*seq = i;
1100
*seqid = intf->seq_table[i].seqid;
1101
intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
1102
smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1103
need_waiter(intf);
1104
} else {
1105
rv = -EAGAIN;
1106
}
1107
1108
return rv;
1109
}
1110
1111
/*
1112
* Return the receive message for the given sequence number and
1113
* release the sequence number so it can be reused. Some other data
1114
* is passed in to be sure the message matches up correctly (to help
1115
* guard against message coming in after their timeout and the
1116
* sequence number being reused).
1117
*/
1118
static int intf_find_seq(struct ipmi_smi *intf,
1119
unsigned char seq,
1120
short channel,
1121
unsigned char cmd,
1122
unsigned char netfn,
1123
struct ipmi_addr *addr,
1124
struct ipmi_recv_msg **recv_msg)
1125
{
1126
int rv = -ENODEV;
1127
1128
if (seq >= IPMI_IPMB_NUM_SEQ)
1129
return -EINVAL;
1130
1131
mutex_lock(&intf->seq_lock);
1132
if (intf->seq_table[seq].inuse) {
1133
struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg;
1134
1135
if ((msg->addr.channel == channel) && (msg->msg.cmd == cmd)
1136
&& (msg->msg.netfn == netfn)
1137
&& (ipmi_addr_equal(addr, &msg->addr))) {
1138
*recv_msg = msg;
1139
intf->seq_table[seq].inuse = 0;
1140
smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1141
rv = 0;
1142
}
1143
}
1144
mutex_unlock(&intf->seq_lock);
1145
1146
return rv;
1147
}
1148
1149
1150
/* Start the timer for a specific sequence table entry. */
1151
static int intf_start_seq_timer(struct ipmi_smi *intf,
1152
long msgid)
1153
{
1154
int rv = -ENODEV;
1155
unsigned char seq;
1156
unsigned long seqid;
1157
1158
1159
GET_SEQ_FROM_MSGID(msgid, seq, seqid);
1160
1161
mutex_lock(&intf->seq_lock);
1162
/*
1163
* We do this verification because the user can be deleted
1164
* while a message is outstanding.
1165
*/
1166
if ((intf->seq_table[seq].inuse)
1167
&& (intf->seq_table[seq].seqid == seqid)) {
1168
struct seq_table *ent = &intf->seq_table[seq];
1169
ent->timeout = ent->orig_timeout;
1170
rv = 0;
1171
}
1172
mutex_unlock(&intf->seq_lock);
1173
1174
return rv;
1175
}
1176
1177
/* Got an error for the send message for a specific sequence number. */
1178
static int intf_err_seq(struct ipmi_smi *intf,
1179
long msgid,
1180
unsigned int err)
1181
{
1182
int rv = -ENODEV;
1183
unsigned char seq;
1184
unsigned long seqid;
1185
struct ipmi_recv_msg *msg = NULL;
1186
1187
1188
GET_SEQ_FROM_MSGID(msgid, seq, seqid);
1189
1190
mutex_lock(&intf->seq_lock);
1191
/*
1192
* We do this verification because the user can be deleted
1193
* while a message is outstanding.
1194
*/
1195
if ((intf->seq_table[seq].inuse)
1196
&& (intf->seq_table[seq].seqid == seqid)) {
1197
struct seq_table *ent = &intf->seq_table[seq];
1198
1199
ent->inuse = 0;
1200
smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1201
msg = ent->recv_msg;
1202
rv = 0;
1203
}
1204
mutex_unlock(&intf->seq_lock);
1205
1206
if (msg)
1207
deliver_err_response(intf, msg, err);
1208
1209
return rv;
1210
}
1211
1212
int ipmi_create_user(unsigned int if_num,
1213
const struct ipmi_user_hndl *handler,
1214
void *handler_data,
1215
struct ipmi_user **user)
1216
{
1217
struct ipmi_user *new_user = NULL;
1218
int rv = 0;
1219
struct ipmi_smi *intf;
1220
1221
/*
1222
* There is no module usecount here, because it's not
1223
* required. Since this can only be used by and called from
1224
* other modules, they will implicitly use this module, and
1225
* thus this can't be removed unless the other modules are
1226
* removed.
1227
*/
1228
1229
if (handler == NULL)
1230
return -EINVAL;
1231
1232
/*
1233
* Make sure the driver is actually initialized, this handles
1234
* problems with initialization order.
1235
*/
1236
rv = ipmi_init_msghandler();
1237
if (rv)
1238
return rv;
1239
1240
mutex_lock(&ipmi_interfaces_mutex);
1241
list_for_each_entry(intf, &ipmi_interfaces, link) {
1242
if (intf->intf_num == if_num)
1243
goto found;
1244
}
1245
/* Not found, return an error */
1246
rv = -EINVAL;
1247
goto out_unlock;
1248
1249
found:
1250
if (intf->in_shutdown) {
1251
rv = -ENODEV;
1252
goto out_unlock;
1253
}
1254
1255
if (atomic_add_return(1, &intf->nr_users) > max_users) {
1256
rv = -EBUSY;
1257
goto out_kfree;
1258
}
1259
1260
new_user = vzalloc(sizeof(*new_user));
1261
if (!new_user) {
1262
rv = -ENOMEM;
1263
goto out_kfree;
1264
}
1265
1266
if (!try_module_get(intf->owner)) {
1267
rv = -ENODEV;
1268
goto out_kfree;
1269
}
1270
1271
/* Note that each existing user holds a refcount to the interface. */
1272
kref_get(&intf->refcount);
1273
1274
atomic_set(&new_user->nr_msgs, 0);
1275
kref_init(&new_user->refcount);
1276
refcount_set(&new_user->destroyed, 1);
1277
kref_get(&new_user->refcount); /* Destroy owns a refcount. */
1278
new_user->handler = handler;
1279
new_user->handler_data = handler_data;
1280
new_user->intf = intf;
1281
new_user->gets_events = false;
1282
1283
mutex_lock(&intf->users_mutex);
1284
mutex_lock(&intf->seq_lock);
1285
list_add(&new_user->link, &intf->users);
1286
mutex_unlock(&intf->seq_lock);
1287
mutex_unlock(&intf->users_mutex);
1288
1289
if (handler->ipmi_watchdog_pretimeout)
1290
/* User wants pretimeouts, so make sure to watch for them. */
1291
smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG);
1292
1293
out_kfree:
1294
if (rv) {
1295
atomic_dec(&intf->nr_users);
1296
vfree(new_user);
1297
} else {
1298
*user = new_user;
1299
}
1300
out_unlock:
1301
mutex_unlock(&ipmi_interfaces_mutex);
1302
return rv;
1303
}
1304
EXPORT_SYMBOL(ipmi_create_user);
1305
1306
int ipmi_get_smi_info(int if_num, struct ipmi_smi_info *data)
1307
{
1308
int rv = -EINVAL;
1309
struct ipmi_smi *intf;
1310
1311
mutex_lock(&ipmi_interfaces_mutex);
1312
list_for_each_entry(intf, &ipmi_interfaces, link) {
1313
if (intf->intf_num == if_num) {
1314
if (!intf->handlers->get_smi_info)
1315
rv = -ENOTTY;
1316
else
1317
rv = intf->handlers->get_smi_info(intf->send_info, data);
1318
break;
1319
}
1320
}
1321
mutex_unlock(&ipmi_interfaces_mutex);
1322
1323
return rv;
1324
}
1325
EXPORT_SYMBOL(ipmi_get_smi_info);
1326
1327
/* Must be called with intf->users_mutex held. */
1328
static void _ipmi_destroy_user(struct ipmi_user *user)
1329
{
1330
struct ipmi_smi *intf = user->intf;
1331
int i;
1332
struct cmd_rcvr *rcvr;
1333
struct cmd_rcvr *rcvrs = NULL;
1334
struct ipmi_recv_msg *msg, *msg2;
1335
1336
if (!refcount_dec_if_one(&user->destroyed))
1337
return;
1338
1339
if (user->handler->shutdown)
1340
user->handler->shutdown(user->handler_data);
1341
1342
if (user->handler->ipmi_watchdog_pretimeout)
1343
smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_WATCHDOG);
1344
1345
if (user->gets_events)
1346
atomic_dec(&intf->event_waiters);
1347
1348
/* Remove the user from the interface's list and sequence table. */
1349
list_del(&user->link);
1350
atomic_dec(&intf->nr_users);
1351
1352
mutex_lock(&intf->seq_lock);
1353
for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
1354
if (intf->seq_table[i].inuse
1355
&& (intf->seq_table[i].recv_msg->user == user)) {
1356
intf->seq_table[i].inuse = 0;
1357
smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
1358
ipmi_free_recv_msg(intf->seq_table[i].recv_msg);
1359
}
1360
}
1361
mutex_unlock(&intf->seq_lock);
1362
1363
/*
1364
* Remove the user from the command receiver's table. First
1365
* we build a list of everything (not using the standard link,
1366
* since other things may be using it till we do
1367
* synchronize_rcu()) then free everything in that list.
1368
*/
1369
mutex_lock(&intf->cmd_rcvrs_mutex);
1370
list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
1371
lockdep_is_held(&intf->cmd_rcvrs_mutex)) {
1372
if (rcvr->user == user) {
1373
list_del_rcu(&rcvr->link);
1374
rcvr->next = rcvrs;
1375
rcvrs = rcvr;
1376
}
1377
}
1378
mutex_unlock(&intf->cmd_rcvrs_mutex);
1379
while (rcvrs) {
1380
rcvr = rcvrs;
1381
rcvrs = rcvr->next;
1382
kfree(rcvr);
1383
}
1384
1385
mutex_lock(&intf->user_msgs_mutex);
1386
list_for_each_entry_safe(msg, msg2, &intf->user_msgs, link) {
1387
if (msg->user != user)
1388
continue;
1389
list_del(&msg->link);
1390
ipmi_free_recv_msg(msg);
1391
}
1392
mutex_unlock(&intf->user_msgs_mutex);
1393
1394
release_ipmi_user(user);
1395
}
1396
1397
void ipmi_destroy_user(struct ipmi_user *user)
1398
{
1399
struct ipmi_smi *intf = user->intf;
1400
1401
mutex_lock(&intf->users_mutex);
1402
_ipmi_destroy_user(user);
1403
mutex_unlock(&intf->users_mutex);
1404
1405
kref_put(&user->refcount, free_ipmi_user);
1406
}
1407
EXPORT_SYMBOL(ipmi_destroy_user);
1408
1409
int ipmi_get_version(struct ipmi_user *user,
1410
unsigned char *major,
1411
unsigned char *minor)
1412
{
1413
struct ipmi_device_id id;
1414
int rv;
1415
1416
user = acquire_ipmi_user(user);
1417
if (!user)
1418
return -ENODEV;
1419
1420
rv = bmc_get_device_id(user->intf, NULL, &id, NULL, NULL);
1421
if (!rv) {
1422
*major = ipmi_version_major(&id);
1423
*minor = ipmi_version_minor(&id);
1424
}
1425
release_ipmi_user(user);
1426
1427
return rv;
1428
}
1429
EXPORT_SYMBOL(ipmi_get_version);
1430
1431
int ipmi_set_my_address(struct ipmi_user *user,
1432
unsigned int channel,
1433
unsigned char address)
1434
{
1435
int rv = 0;
1436
1437
user = acquire_ipmi_user(user);
1438
if (!user)
1439
return -ENODEV;
1440
1441
if (channel >= IPMI_MAX_CHANNELS) {
1442
rv = -EINVAL;
1443
} else {
1444
channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1445
user->intf->addrinfo[channel].address = address;
1446
}
1447
release_ipmi_user(user);
1448
1449
return rv;
1450
}
1451
EXPORT_SYMBOL(ipmi_set_my_address);
1452
1453
int ipmi_get_my_address(struct ipmi_user *user,
1454
unsigned int channel,
1455
unsigned char *address)
1456
{
1457
int rv = 0;
1458
1459
user = acquire_ipmi_user(user);
1460
if (!user)
1461
return -ENODEV;
1462
1463
if (channel >= IPMI_MAX_CHANNELS) {
1464
rv = -EINVAL;
1465
} else {
1466
channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1467
*address = user->intf->addrinfo[channel].address;
1468
}
1469
release_ipmi_user(user);
1470
1471
return rv;
1472
}
1473
EXPORT_SYMBOL(ipmi_get_my_address);
1474
1475
int ipmi_set_my_LUN(struct ipmi_user *user,
1476
unsigned int channel,
1477
unsigned char LUN)
1478
{
1479
int rv = 0;
1480
1481
user = acquire_ipmi_user(user);
1482
if (!user)
1483
return -ENODEV;
1484
1485
if (channel >= IPMI_MAX_CHANNELS) {
1486
rv = -EINVAL;
1487
} else {
1488
channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1489
user->intf->addrinfo[channel].lun = LUN & 0x3;
1490
}
1491
release_ipmi_user(user);
1492
1493
return rv;
1494
}
1495
EXPORT_SYMBOL(ipmi_set_my_LUN);
1496
1497
int ipmi_get_my_LUN(struct ipmi_user *user,
1498
unsigned int channel,
1499
unsigned char *address)
1500
{
1501
int rv = 0;
1502
1503
user = acquire_ipmi_user(user);
1504
if (!user)
1505
return -ENODEV;
1506
1507
if (channel >= IPMI_MAX_CHANNELS) {
1508
rv = -EINVAL;
1509
} else {
1510
channel = array_index_nospec(channel, IPMI_MAX_CHANNELS);
1511
*address = user->intf->addrinfo[channel].lun;
1512
}
1513
release_ipmi_user(user);
1514
1515
return rv;
1516
}
1517
EXPORT_SYMBOL(ipmi_get_my_LUN);
1518
1519
int ipmi_get_maintenance_mode(struct ipmi_user *user)
1520
{
1521
int mode;
1522
unsigned long flags;
1523
1524
user = acquire_ipmi_user(user);
1525
if (!user)
1526
return -ENODEV;
1527
1528
spin_lock_irqsave(&user->intf->maintenance_mode_lock, flags);
1529
mode = user->intf->maintenance_mode;
1530
spin_unlock_irqrestore(&user->intf->maintenance_mode_lock, flags);
1531
release_ipmi_user(user);
1532
1533
return mode;
1534
}
1535
EXPORT_SYMBOL(ipmi_get_maintenance_mode);
1536
1537
static void maintenance_mode_update(struct ipmi_smi *intf)
1538
{
1539
if (intf->handlers->set_maintenance_mode)
1540
/*
1541
* Lower level drivers only care about firmware mode
1542
* as it affects their timing. They don't care about
1543
* reset, which disables all commands for a while.
1544
*/
1545
intf->handlers->set_maintenance_mode(
1546
intf->send_info,
1547
(intf->maintenance_mode_state ==
1548
IPMI_MAINTENANCE_MODE_STATE_FIRMWARE));
1549
}
1550
1551
int ipmi_set_maintenance_mode(struct ipmi_user *user, int mode)
1552
{
1553
int rv = 0;
1554
unsigned long flags;
1555
struct ipmi_smi *intf = user->intf;
1556
1557
user = acquire_ipmi_user(user);
1558
if (!user)
1559
return -ENODEV;
1560
1561
spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1562
if (intf->maintenance_mode != mode) {
1563
switch (mode) {
1564
case IPMI_MAINTENANCE_MODE_AUTO:
1565
/* Just leave it alone. */
1566
break;
1567
1568
case IPMI_MAINTENANCE_MODE_OFF:
1569
intf->maintenance_mode_state =
1570
IPMI_MAINTENANCE_MODE_STATE_OFF;
1571
break;
1572
1573
case IPMI_MAINTENANCE_MODE_ON:
1574
intf->maintenance_mode_state =
1575
IPMI_MAINTENANCE_MODE_STATE_FIRMWARE;
1576
break;
1577
1578
default:
1579
rv = -EINVAL;
1580
goto out_unlock;
1581
}
1582
intf->maintenance_mode = mode;
1583
1584
maintenance_mode_update(intf);
1585
}
1586
out_unlock:
1587
spin_unlock_irqrestore(&intf->maintenance_mode_lock, flags);
1588
release_ipmi_user(user);
1589
1590
return rv;
1591
}
1592
EXPORT_SYMBOL(ipmi_set_maintenance_mode);
1593
1594
int ipmi_set_gets_events(struct ipmi_user *user, bool val)
1595
{
1596
struct ipmi_smi *intf = user->intf;
1597
struct ipmi_recv_msg *msg, *msg2;
1598
struct list_head msgs;
1599
1600
user = acquire_ipmi_user(user);
1601
if (!user)
1602
return -ENODEV;
1603
1604
INIT_LIST_HEAD(&msgs);
1605
1606
mutex_lock(&intf->events_mutex);
1607
if (user->gets_events == val)
1608
goto out;
1609
1610
user->gets_events = val;
1611
1612
if (val) {
1613
if (atomic_inc_return(&intf->event_waiters) == 1)
1614
need_waiter(intf);
1615
} else {
1616
atomic_dec(&intf->event_waiters);
1617
}
1618
1619
/* Deliver any queued events. */
1620
while (user->gets_events && !list_empty(&intf->waiting_events)) {
1621
list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link)
1622
list_move_tail(&msg->link, &msgs);
1623
intf->waiting_events_count = 0;
1624
if (intf->event_msg_printed) {
1625
dev_warn(intf->si_dev, "Event queue no longer full\n");
1626
intf->event_msg_printed = 0;
1627
}
1628
1629
list_for_each_entry_safe(msg, msg2, &msgs, link) {
1630
ipmi_set_recv_msg_user(msg, user);
1631
deliver_local_response(intf, msg);
1632
}
1633
}
1634
1635
out:
1636
mutex_unlock(&intf->events_mutex);
1637
release_ipmi_user(user);
1638
1639
return 0;
1640
}
1641
EXPORT_SYMBOL(ipmi_set_gets_events);
1642
1643
static struct cmd_rcvr *find_cmd_rcvr(struct ipmi_smi *intf,
1644
unsigned char netfn,
1645
unsigned char cmd,
1646
unsigned char chan)
1647
{
1648
struct cmd_rcvr *rcvr;
1649
1650
list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
1651
lockdep_is_held(&intf->cmd_rcvrs_mutex)) {
1652
if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1653
&& (rcvr->chans & (1 << chan)))
1654
return rcvr;
1655
}
1656
return NULL;
1657
}
1658
1659
static int is_cmd_rcvr_exclusive(struct ipmi_smi *intf,
1660
unsigned char netfn,
1661
unsigned char cmd,
1662
unsigned int chans)
1663
{
1664
struct cmd_rcvr *rcvr;
1665
1666
list_for_each_entry_rcu(rcvr, &intf->cmd_rcvrs, link,
1667
lockdep_is_held(&intf->cmd_rcvrs_mutex)) {
1668
if ((rcvr->netfn == netfn) && (rcvr->cmd == cmd)
1669
&& (rcvr->chans & chans))
1670
return 0;
1671
}
1672
return 1;
1673
}
1674
1675
int ipmi_register_for_cmd(struct ipmi_user *user,
1676
unsigned char netfn,
1677
unsigned char cmd,
1678
unsigned int chans)
1679
{
1680
struct ipmi_smi *intf = user->intf;
1681
struct cmd_rcvr *rcvr;
1682
int rv = 0;
1683
1684
user = acquire_ipmi_user(user);
1685
if (!user)
1686
return -ENODEV;
1687
1688
rcvr = kmalloc(sizeof(*rcvr), GFP_KERNEL);
1689
if (!rcvr) {
1690
rv = -ENOMEM;
1691
goto out_release;
1692
}
1693
rcvr->cmd = cmd;
1694
rcvr->netfn = netfn;
1695
rcvr->chans = chans;
1696
rcvr->user = user;
1697
1698
mutex_lock(&intf->cmd_rcvrs_mutex);
1699
/* Make sure the command/netfn is not already registered. */
1700
if (!is_cmd_rcvr_exclusive(intf, netfn, cmd, chans)) {
1701
rv = -EBUSY;
1702
goto out_unlock;
1703
}
1704
1705
smi_add_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS);
1706
1707
list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
1708
1709
out_unlock:
1710
mutex_unlock(&intf->cmd_rcvrs_mutex);
1711
if (rv)
1712
kfree(rcvr);
1713
out_release:
1714
release_ipmi_user(user);
1715
1716
return rv;
1717
}
1718
EXPORT_SYMBOL(ipmi_register_for_cmd);
1719
1720
int ipmi_unregister_for_cmd(struct ipmi_user *user,
1721
unsigned char netfn,
1722
unsigned char cmd,
1723
unsigned int chans)
1724
{
1725
struct ipmi_smi *intf = user->intf;
1726
struct cmd_rcvr *rcvr;
1727
struct cmd_rcvr *rcvrs = NULL;
1728
int i, rv = -ENOENT;
1729
1730
user = acquire_ipmi_user(user);
1731
if (!user)
1732
return -ENODEV;
1733
1734
mutex_lock(&intf->cmd_rcvrs_mutex);
1735
for (i = 0; i < IPMI_NUM_CHANNELS; i++) {
1736
if (((1 << i) & chans) == 0)
1737
continue;
1738
rcvr = find_cmd_rcvr(intf, netfn, cmd, i);
1739
if (rcvr == NULL)
1740
continue;
1741
if (rcvr->user == user) {
1742
rv = 0;
1743
rcvr->chans &= ~chans;
1744
if (rcvr->chans == 0) {
1745
list_del_rcu(&rcvr->link);
1746
rcvr->next = rcvrs;
1747
rcvrs = rcvr;
1748
}
1749
}
1750
}
1751
mutex_unlock(&intf->cmd_rcvrs_mutex);
1752
synchronize_rcu();
1753
release_ipmi_user(user);
1754
while (rcvrs) {
1755
smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_COMMANDS);
1756
rcvr = rcvrs;
1757
rcvrs = rcvr->next;
1758
kfree(rcvr);
1759
}
1760
1761
return rv;
1762
}
1763
EXPORT_SYMBOL(ipmi_unregister_for_cmd);
1764
1765
unsigned char
1766
ipmb_checksum(unsigned char *data, int size)
1767
{
1768
unsigned char csum = 0;
1769
1770
for (; size > 0; size--, data++)
1771
csum += *data;
1772
1773
return -csum;
1774
}
1775
EXPORT_SYMBOL(ipmb_checksum);
1776
1777
static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg,
1778
struct kernel_ipmi_msg *msg,
1779
struct ipmi_ipmb_addr *ipmb_addr,
1780
long msgid,
1781
unsigned char ipmb_seq,
1782
int broadcast,
1783
unsigned char source_address,
1784
unsigned char source_lun)
1785
{
1786
int i = broadcast;
1787
1788
/* Format the IPMB header data. */
1789
smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1790
smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1791
smi_msg->data[2] = ipmb_addr->channel;
1792
if (broadcast)
1793
smi_msg->data[3] = 0;
1794
smi_msg->data[i+3] = ipmb_addr->slave_addr;
1795
smi_msg->data[i+4] = (msg->netfn << 2) | (ipmb_addr->lun & 0x3);
1796
smi_msg->data[i+5] = ipmb_checksum(&smi_msg->data[i + 3], 2);
1797
smi_msg->data[i+6] = source_address;
1798
smi_msg->data[i+7] = (ipmb_seq << 2) | source_lun;
1799
smi_msg->data[i+8] = msg->cmd;
1800
1801
/* Now tack on the data to the message. */
1802
if (msg->data_len > 0)
1803
memcpy(&smi_msg->data[i + 9], msg->data, msg->data_len);
1804
smi_msg->data_size = msg->data_len + 9;
1805
1806
/* Now calculate the checksum and tack it on. */
1807
smi_msg->data[i+smi_msg->data_size]
1808
= ipmb_checksum(&smi_msg->data[i + 6], smi_msg->data_size - 6);
1809
1810
/*
1811
* Add on the checksum size and the offset from the
1812
* broadcast.
1813
*/
1814
smi_msg->data_size += 1 + i;
1815
1816
smi_msg->msgid = msgid;
1817
}
1818
1819
static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg,
1820
struct kernel_ipmi_msg *msg,
1821
struct ipmi_lan_addr *lan_addr,
1822
long msgid,
1823
unsigned char ipmb_seq,
1824
unsigned char source_lun)
1825
{
1826
/* Format the IPMB header data. */
1827
smi_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
1828
smi_msg->data[1] = IPMI_SEND_MSG_CMD;
1829
smi_msg->data[2] = lan_addr->channel;
1830
smi_msg->data[3] = lan_addr->session_handle;
1831
smi_msg->data[4] = lan_addr->remote_SWID;
1832
smi_msg->data[5] = (msg->netfn << 2) | (lan_addr->lun & 0x3);
1833
smi_msg->data[6] = ipmb_checksum(&smi_msg->data[4], 2);
1834
smi_msg->data[7] = lan_addr->local_SWID;
1835
smi_msg->data[8] = (ipmb_seq << 2) | source_lun;
1836
smi_msg->data[9] = msg->cmd;
1837
1838
/* Now tack on the data to the message. */
1839
if (msg->data_len > 0)
1840
memcpy(&smi_msg->data[10], msg->data, msg->data_len);
1841
smi_msg->data_size = msg->data_len + 10;
1842
1843
/* Now calculate the checksum and tack it on. */
1844
smi_msg->data[smi_msg->data_size]
1845
= ipmb_checksum(&smi_msg->data[7], smi_msg->data_size - 7);
1846
1847
/*
1848
* Add on the checksum size and the offset from the
1849
* broadcast.
1850
*/
1851
smi_msg->data_size += 1;
1852
1853
smi_msg->msgid = msgid;
1854
}
1855
1856
static struct ipmi_smi_msg *smi_add_send_msg(struct ipmi_smi *intf,
1857
struct ipmi_smi_msg *smi_msg,
1858
int priority)
1859
{
1860
if (intf->curr_msg) {
1861
if (priority > 0)
1862
list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs);
1863
else
1864
list_add_tail(&smi_msg->link, &intf->xmit_msgs);
1865
smi_msg = NULL;
1866
} else {
1867
intf->curr_msg = smi_msg;
1868
}
1869
1870
return smi_msg;
1871
}
1872
1873
static void smi_send(struct ipmi_smi *intf,
1874
const struct ipmi_smi_handlers *handlers,
1875
struct ipmi_smi_msg *smi_msg, int priority)
1876
{
1877
int run_to_completion = READ_ONCE(intf->run_to_completion);
1878
unsigned long flags = 0;
1879
1880
if (!run_to_completion)
1881
spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
1882
smi_msg = smi_add_send_msg(intf, smi_msg, priority);
1883
if (!run_to_completion)
1884
spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
1885
1886
if (smi_msg)
1887
handlers->sender(intf->send_info, smi_msg);
1888
}
1889
1890
static bool is_maintenance_mode_cmd(struct kernel_ipmi_msg *msg)
1891
{
1892
return (((msg->netfn == IPMI_NETFN_APP_REQUEST)
1893
&& ((msg->cmd == IPMI_COLD_RESET_CMD)
1894
|| (msg->cmd == IPMI_WARM_RESET_CMD)))
1895
|| (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST));
1896
}
1897
1898
static int i_ipmi_req_sysintf(struct ipmi_smi *intf,
1899
struct ipmi_addr *addr,
1900
long msgid,
1901
struct kernel_ipmi_msg *msg,
1902
struct ipmi_smi_msg *smi_msg,
1903
struct ipmi_recv_msg *recv_msg,
1904
int retries,
1905
unsigned int retry_time_ms)
1906
{
1907
struct ipmi_system_interface_addr *smi_addr;
1908
1909
if (msg->netfn & 1)
1910
/* Responses are not allowed to the SMI. */
1911
return -EINVAL;
1912
1913
smi_addr = (struct ipmi_system_interface_addr *) addr;
1914
if (smi_addr->lun > 3) {
1915
ipmi_inc_stat(intf, sent_invalid_commands);
1916
return -EINVAL;
1917
}
1918
1919
memcpy(&recv_msg->addr, smi_addr, sizeof(*smi_addr));
1920
1921
if ((msg->netfn == IPMI_NETFN_APP_REQUEST)
1922
&& ((msg->cmd == IPMI_SEND_MSG_CMD)
1923
|| (msg->cmd == IPMI_GET_MSG_CMD)
1924
|| (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) {
1925
/*
1926
* We don't let the user do these, since we manage
1927
* the sequence numbers.
1928
*/
1929
ipmi_inc_stat(intf, sent_invalid_commands);
1930
return -EINVAL;
1931
}
1932
1933
if (is_maintenance_mode_cmd(msg)) {
1934
unsigned long flags;
1935
int newst;
1936
1937
if (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST)
1938
newst = IPMI_MAINTENANCE_MODE_STATE_FIRMWARE;
1939
else
1940
newst = IPMI_MAINTENANCE_MODE_STATE_RESET;
1941
1942
spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
1943
intf->auto_maintenance_timeout = maintenance_mode_timeout_ms;
1944
if (!intf->maintenance_mode
1945
&& intf->maintenance_mode_state < newst) {
1946
intf->maintenance_mode_state = newst;
1947
maintenance_mode_update(intf);
1948
mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
1949
}
1950
spin_unlock_irqrestore(&intf->maintenance_mode_lock,
1951
flags);
1952
}
1953
1954
if (msg->data_len + 2 > IPMI_MAX_MSG_LENGTH) {
1955
ipmi_inc_stat(intf, sent_invalid_commands);
1956
return -EMSGSIZE;
1957
}
1958
1959
smi_msg->data[0] = (msg->netfn << 2) | (smi_addr->lun & 0x3);
1960
smi_msg->data[1] = msg->cmd;
1961
smi_msg->msgid = msgid;
1962
smi_msg->recv_msg = recv_msg;
1963
if (msg->data_len > 0)
1964
memcpy(&smi_msg->data[2], msg->data, msg->data_len);
1965
smi_msg->data_size = msg->data_len + 2;
1966
ipmi_inc_stat(intf, sent_local_commands);
1967
1968
return 0;
1969
}
1970
1971
static int i_ipmi_req_ipmb(struct ipmi_smi *intf,
1972
struct ipmi_addr *addr,
1973
long msgid,
1974
struct kernel_ipmi_msg *msg,
1975
struct ipmi_smi_msg *smi_msg,
1976
struct ipmi_recv_msg *recv_msg,
1977
unsigned char source_address,
1978
unsigned char source_lun,
1979
int retries,
1980
unsigned int retry_time_ms)
1981
{
1982
struct ipmi_ipmb_addr *ipmb_addr;
1983
unsigned char ipmb_seq;
1984
long seqid;
1985
int broadcast = 0;
1986
struct ipmi_channel *chans;
1987
int rv = 0;
1988
1989
if (addr->channel >= IPMI_MAX_CHANNELS) {
1990
ipmi_inc_stat(intf, sent_invalid_commands);
1991
return -EINVAL;
1992
}
1993
1994
chans = READ_ONCE(intf->channel_list)->c;
1995
1996
if (chans[addr->channel].medium != IPMI_CHANNEL_MEDIUM_IPMB) {
1997
ipmi_inc_stat(intf, sent_invalid_commands);
1998
return -EINVAL;
1999
}
2000
2001
if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) {
2002
/*
2003
* Broadcasts add a zero at the beginning of the
2004
* message, but otherwise is the same as an IPMB
2005
* address.
2006
*/
2007
addr->addr_type = IPMI_IPMB_ADDR_TYPE;
2008
broadcast = 1;
2009
retries = 0; /* Don't retry broadcasts. */
2010
}
2011
2012
/*
2013
* 9 for the header and 1 for the checksum, plus
2014
* possibly one for the broadcast.
2015
*/
2016
if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) {
2017
ipmi_inc_stat(intf, sent_invalid_commands);
2018
return -EMSGSIZE;
2019
}
2020
2021
ipmb_addr = (struct ipmi_ipmb_addr *) addr;
2022
if (ipmb_addr->lun > 3) {
2023
ipmi_inc_stat(intf, sent_invalid_commands);
2024
return -EINVAL;
2025
}
2026
2027
memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr));
2028
2029
if (recv_msg->msg.netfn & 0x1) {
2030
/*
2031
* It's a response, so use the user's sequence
2032
* from msgid.
2033
*/
2034
ipmi_inc_stat(intf, sent_ipmb_responses);
2035
format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid,
2036
msgid, broadcast,
2037
source_address, source_lun);
2038
2039
/*
2040
* Save the receive message so we can use it
2041
* to deliver the response.
2042
*/
2043
smi_msg->recv_msg = recv_msg;
2044
} else {
2045
mutex_lock(&intf->seq_lock);
2046
2047
if (is_maintenance_mode_cmd(msg))
2048
intf->ipmb_maintenance_mode_timeout =
2049
maintenance_mode_timeout_ms;
2050
2051
if (intf->ipmb_maintenance_mode_timeout && retry_time_ms == 0)
2052
/* Different default in maintenance mode */
2053
retry_time_ms = default_maintenance_retry_ms;
2054
2055
/*
2056
* Create a sequence number with a 1 second
2057
* timeout and 4 retries.
2058
*/
2059
rv = intf_next_seq(intf,
2060
recv_msg,
2061
retry_time_ms,
2062
retries,
2063
broadcast,
2064
&ipmb_seq,
2065
&seqid);
2066
if (rv)
2067
/*
2068
* We have used up all the sequence numbers,
2069
* probably, so abort.
2070
*/
2071
goto out_err;
2072
2073
ipmi_inc_stat(intf, sent_ipmb_commands);
2074
2075
/*
2076
* Store the sequence number in the message,
2077
* so that when the send message response
2078
* comes back we can start the timer.
2079
*/
2080
format_ipmb_msg(smi_msg, msg, ipmb_addr,
2081
STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
2082
ipmb_seq, broadcast,
2083
source_address, source_lun);
2084
2085
/*
2086
* Copy the message into the recv message data, so we
2087
* can retransmit it later if necessary.
2088
*/
2089
memcpy(recv_msg->msg_data, smi_msg->data,
2090
smi_msg->data_size);
2091
recv_msg->msg.data = recv_msg->msg_data;
2092
recv_msg->msg.data_len = smi_msg->data_size;
2093
2094
/*
2095
* We don't unlock until here, because we need
2096
* to copy the completed message into the
2097
* recv_msg before we release the lock.
2098
* Otherwise, race conditions may bite us. I
2099
* know that's pretty paranoid, but I prefer
2100
* to be correct.
2101
*/
2102
out_err:
2103
mutex_unlock(&intf->seq_lock);
2104
}
2105
2106
return rv;
2107
}
2108
2109
static int i_ipmi_req_ipmb_direct(struct ipmi_smi *intf,
2110
struct ipmi_addr *addr,
2111
long msgid,
2112
struct kernel_ipmi_msg *msg,
2113
struct ipmi_smi_msg *smi_msg,
2114
struct ipmi_recv_msg *recv_msg,
2115
unsigned char source_lun)
2116
{
2117
struct ipmi_ipmb_direct_addr *daddr;
2118
bool is_cmd = !(recv_msg->msg.netfn & 0x1);
2119
2120
if (!(intf->handlers->flags & IPMI_SMI_CAN_HANDLE_IPMB_DIRECT))
2121
return -EAFNOSUPPORT;
2122
2123
/* Responses must have a completion code. */
2124
if (!is_cmd && msg->data_len < 1) {
2125
ipmi_inc_stat(intf, sent_invalid_commands);
2126
return -EINVAL;
2127
}
2128
2129
if ((msg->data_len + 4) > IPMI_MAX_MSG_LENGTH) {
2130
ipmi_inc_stat(intf, sent_invalid_commands);
2131
return -EMSGSIZE;
2132
}
2133
2134
daddr = (struct ipmi_ipmb_direct_addr *) addr;
2135
if (daddr->rq_lun > 3 || daddr->rs_lun > 3) {
2136
ipmi_inc_stat(intf, sent_invalid_commands);
2137
return -EINVAL;
2138
}
2139
2140
smi_msg->type = IPMI_SMI_MSG_TYPE_IPMB_DIRECT;
2141
smi_msg->msgid = msgid;
2142
2143
if (is_cmd) {
2144
smi_msg->data[0] = msg->netfn << 2 | daddr->rs_lun;
2145
smi_msg->data[2] = recv_msg->msgid << 2 | daddr->rq_lun;
2146
} else {
2147
smi_msg->data[0] = msg->netfn << 2 | daddr->rq_lun;
2148
smi_msg->data[2] = recv_msg->msgid << 2 | daddr->rs_lun;
2149
}
2150
smi_msg->data[1] = daddr->slave_addr;
2151
smi_msg->data[3] = msg->cmd;
2152
2153
memcpy(smi_msg->data + 4, msg->data, msg->data_len);
2154
smi_msg->data_size = msg->data_len + 4;
2155
2156
smi_msg->recv_msg = recv_msg;
2157
2158
return 0;
2159
}
2160
2161
static int i_ipmi_req_lan(struct ipmi_smi *intf,
2162
struct ipmi_addr *addr,
2163
long msgid,
2164
struct kernel_ipmi_msg *msg,
2165
struct ipmi_smi_msg *smi_msg,
2166
struct ipmi_recv_msg *recv_msg,
2167
unsigned char source_lun,
2168
int retries,
2169
unsigned int retry_time_ms)
2170
{
2171
struct ipmi_lan_addr *lan_addr;
2172
unsigned char ipmb_seq;
2173
long seqid;
2174
struct ipmi_channel *chans;
2175
int rv = 0;
2176
2177
if (addr->channel >= IPMI_MAX_CHANNELS) {
2178
ipmi_inc_stat(intf, sent_invalid_commands);
2179
return -EINVAL;
2180
}
2181
2182
chans = READ_ONCE(intf->channel_list)->c;
2183
2184
if ((chans[addr->channel].medium
2185
!= IPMI_CHANNEL_MEDIUM_8023LAN)
2186
&& (chans[addr->channel].medium
2187
!= IPMI_CHANNEL_MEDIUM_ASYNC)) {
2188
ipmi_inc_stat(intf, sent_invalid_commands);
2189
return -EINVAL;
2190
}
2191
2192
/* 11 for the header and 1 for the checksum. */
2193
if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) {
2194
ipmi_inc_stat(intf, sent_invalid_commands);
2195
return -EMSGSIZE;
2196
}
2197
2198
lan_addr = (struct ipmi_lan_addr *) addr;
2199
if (lan_addr->lun > 3) {
2200
ipmi_inc_stat(intf, sent_invalid_commands);
2201
return -EINVAL;
2202
}
2203
2204
memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr));
2205
2206
if (recv_msg->msg.netfn & 0x1) {
2207
/*
2208
* It's a response, so use the user's sequence
2209
* from msgid.
2210
*/
2211
ipmi_inc_stat(intf, sent_lan_responses);
2212
format_lan_msg(smi_msg, msg, lan_addr, msgid,
2213
msgid, source_lun);
2214
2215
/*
2216
* Save the receive message so we can use it
2217
* to deliver the response.
2218
*/
2219
smi_msg->recv_msg = recv_msg;
2220
} else {
2221
mutex_lock(&intf->seq_lock);
2222
2223
/*
2224
* Create a sequence number with a 1 second
2225
* timeout and 4 retries.
2226
*/
2227
rv = intf_next_seq(intf,
2228
recv_msg,
2229
retry_time_ms,
2230
retries,
2231
0,
2232
&ipmb_seq,
2233
&seqid);
2234
if (rv)
2235
/*
2236
* We have used up all the sequence numbers,
2237
* probably, so abort.
2238
*/
2239
goto out_err;
2240
2241
ipmi_inc_stat(intf, sent_lan_commands);
2242
2243
/*
2244
* Store the sequence number in the message,
2245
* so that when the send message response
2246
* comes back we can start the timer.
2247
*/
2248
format_lan_msg(smi_msg, msg, lan_addr,
2249
STORE_SEQ_IN_MSGID(ipmb_seq, seqid),
2250
ipmb_seq, source_lun);
2251
2252
/*
2253
* Copy the message into the recv message data, so we
2254
* can retransmit it later if necessary.
2255
*/
2256
memcpy(recv_msg->msg_data, smi_msg->data,
2257
smi_msg->data_size);
2258
recv_msg->msg.data = recv_msg->msg_data;
2259
recv_msg->msg.data_len = smi_msg->data_size;
2260
2261
/*
2262
* We don't unlock until here, because we need
2263
* to copy the completed message into the
2264
* recv_msg before we release the lock.
2265
* Otherwise, race conditions may bite us. I
2266
* know that's pretty paranoid, but I prefer
2267
* to be correct.
2268
*/
2269
out_err:
2270
mutex_unlock(&intf->seq_lock);
2271
}
2272
2273
return rv;
2274
}
2275
2276
/*
2277
* Separate from ipmi_request so that the user does not have to be
2278
* supplied in certain circumstances (mainly at panic time). If
2279
* messages are supplied, they will be freed, even if an error
2280
* occurs.
2281
*/
2282
static int i_ipmi_request(struct ipmi_user *user,
2283
struct ipmi_smi *intf,
2284
struct ipmi_addr *addr,
2285
long msgid,
2286
struct kernel_ipmi_msg *msg,
2287
void *user_msg_data,
2288
void *supplied_smi,
2289
struct ipmi_recv_msg *supplied_recv,
2290
int priority,
2291
unsigned char source_address,
2292
unsigned char source_lun,
2293
int retries,
2294
unsigned int retry_time_ms)
2295
{
2296
struct ipmi_smi_msg *smi_msg;
2297
struct ipmi_recv_msg *recv_msg;
2298
int run_to_completion = READ_ONCE(intf->run_to_completion);
2299
int rv = 0;
2300
2301
if (supplied_recv) {
2302
recv_msg = supplied_recv;
2303
recv_msg->user = user;
2304
if (user)
2305
atomic_inc(&user->nr_msgs);
2306
} else {
2307
recv_msg = ipmi_alloc_recv_msg(user);
2308
if (IS_ERR(recv_msg))
2309
return PTR_ERR(recv_msg);
2310
}
2311
recv_msg->user_msg_data = user_msg_data;
2312
2313
if (supplied_smi)
2314
smi_msg = supplied_smi;
2315
else {
2316
smi_msg = ipmi_alloc_smi_msg();
2317
if (smi_msg == NULL) {
2318
if (!supplied_recv)
2319
ipmi_free_recv_msg(recv_msg);
2320
return -ENOMEM;
2321
}
2322
}
2323
2324
if (!run_to_completion)
2325
mutex_lock(&intf->users_mutex);
2326
if (intf->maintenance_mode_state == IPMI_MAINTENANCE_MODE_STATE_RESET) {
2327
/* No messages while the BMC is in reset. */
2328
rv = -EBUSY;
2329
goto out_err;
2330
}
2331
if (intf->in_shutdown) {
2332
rv = -ENODEV;
2333
goto out_err;
2334
}
2335
2336
recv_msg->msgid = msgid;
2337
/*
2338
* Store the message to send in the receive message so timeout
2339
* responses can get the proper response data.
2340
*/
2341
recv_msg->msg = *msg;
2342
2343
if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
2344
rv = i_ipmi_req_sysintf(intf, addr, msgid, msg, smi_msg,
2345
recv_msg, retries, retry_time_ms);
2346
} else if (is_ipmb_addr(addr) || is_ipmb_bcast_addr(addr)) {
2347
rv = i_ipmi_req_ipmb(intf, addr, msgid, msg, smi_msg, recv_msg,
2348
source_address, source_lun,
2349
retries, retry_time_ms);
2350
} else if (is_ipmb_direct_addr(addr)) {
2351
rv = i_ipmi_req_ipmb_direct(intf, addr, msgid, msg, smi_msg,
2352
recv_msg, source_lun);
2353
} else if (is_lan_addr(addr)) {
2354
rv = i_ipmi_req_lan(intf, addr, msgid, msg, smi_msg, recv_msg,
2355
source_lun, retries, retry_time_ms);
2356
} else {
2357
/* Unknown address type. */
2358
ipmi_inc_stat(intf, sent_invalid_commands);
2359
rv = -EINVAL;
2360
}
2361
2362
if (rv) {
2363
out_err:
2364
if (!supplied_smi)
2365
ipmi_free_smi_msg(smi_msg);
2366
if (!supplied_recv)
2367
ipmi_free_recv_msg(recv_msg);
2368
} else {
2369
dev_dbg(intf->si_dev, "Send: %*ph\n",
2370
smi_msg->data_size, smi_msg->data);
2371
2372
smi_send(intf, intf->handlers, smi_msg, priority);
2373
}
2374
if (!run_to_completion)
2375
mutex_unlock(&intf->users_mutex);
2376
2377
return rv;
2378
}
2379
2380
static int check_addr(struct ipmi_smi *intf,
2381
struct ipmi_addr *addr,
2382
unsigned char *saddr,
2383
unsigned char *lun)
2384
{
2385
if (addr->channel >= IPMI_MAX_CHANNELS)
2386
return -EINVAL;
2387
addr->channel = array_index_nospec(addr->channel, IPMI_MAX_CHANNELS);
2388
*lun = intf->addrinfo[addr->channel].lun;
2389
*saddr = intf->addrinfo[addr->channel].address;
2390
return 0;
2391
}
2392
2393
int ipmi_request_settime(struct ipmi_user *user,
2394
struct ipmi_addr *addr,
2395
long msgid,
2396
struct kernel_ipmi_msg *msg,
2397
void *user_msg_data,
2398
int priority,
2399
int retries,
2400
unsigned int retry_time_ms)
2401
{
2402
unsigned char saddr = 0, lun = 0;
2403
int rv;
2404
2405
if (!user)
2406
return -EINVAL;
2407
2408
user = acquire_ipmi_user(user);
2409
if (!user)
2410
return -ENODEV;
2411
2412
rv = check_addr(user->intf, addr, &saddr, &lun);
2413
if (!rv)
2414
rv = i_ipmi_request(user,
2415
user->intf,
2416
addr,
2417
msgid,
2418
msg,
2419
user_msg_data,
2420
NULL, NULL,
2421
priority,
2422
saddr,
2423
lun,
2424
retries,
2425
retry_time_ms);
2426
2427
release_ipmi_user(user);
2428
return rv;
2429
}
2430
EXPORT_SYMBOL(ipmi_request_settime);
2431
2432
int ipmi_request_supply_msgs(struct ipmi_user *user,
2433
struct ipmi_addr *addr,
2434
long msgid,
2435
struct kernel_ipmi_msg *msg,
2436
void *user_msg_data,
2437
void *supplied_smi,
2438
struct ipmi_recv_msg *supplied_recv,
2439
int priority)
2440
{
2441
unsigned char saddr = 0, lun = 0;
2442
int rv;
2443
2444
if (!user)
2445
return -EINVAL;
2446
2447
user = acquire_ipmi_user(user);
2448
if (!user)
2449
return -ENODEV;
2450
2451
rv = check_addr(user->intf, addr, &saddr, &lun);
2452
if (!rv)
2453
rv = i_ipmi_request(user,
2454
user->intf,
2455
addr,
2456
msgid,
2457
msg,
2458
user_msg_data,
2459
supplied_smi,
2460
supplied_recv,
2461
priority,
2462
saddr,
2463
lun,
2464
-1, 0);
2465
2466
release_ipmi_user(user);
2467
return rv;
2468
}
2469
EXPORT_SYMBOL(ipmi_request_supply_msgs);
2470
2471
static void bmc_device_id_handler(struct ipmi_smi *intf,
2472
struct ipmi_recv_msg *msg)
2473
{
2474
int rv;
2475
2476
if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
2477
|| (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
2478
|| (msg->msg.cmd != IPMI_GET_DEVICE_ID_CMD)) {
2479
dev_warn(intf->si_dev,
2480
"invalid device_id msg: addr_type=%d netfn=%x cmd=%x\n",
2481
msg->addr.addr_type, msg->msg.netfn, msg->msg.cmd);
2482
return;
2483
}
2484
2485
if (msg->msg.data[0]) {
2486
dev_warn(intf->si_dev, "device id fetch failed: 0x%2.2x\n",
2487
msg->msg.data[0]);
2488
intf->bmc->dyn_id_set = 0;
2489
goto out;
2490
}
2491
2492
rv = ipmi_demangle_device_id(msg->msg.netfn, msg->msg.cmd,
2493
msg->msg.data, msg->msg.data_len, &intf->bmc->fetch_id);
2494
if (rv) {
2495
dev_warn(intf->si_dev, "device id demangle failed: %d\n", rv);
2496
/* record completion code when error */
2497
intf->bmc->cc = msg->msg.data[0];
2498
intf->bmc->dyn_id_set = 0;
2499
} else {
2500
/*
2501
* Make sure the id data is available before setting
2502
* dyn_id_set.
2503
*/
2504
smp_wmb();
2505
intf->bmc->dyn_id_set = 1;
2506
}
2507
out:
2508
wake_up(&intf->waitq);
2509
}
2510
2511
static int
2512
send_get_device_id_cmd(struct ipmi_smi *intf)
2513
{
2514
struct ipmi_system_interface_addr si;
2515
struct kernel_ipmi_msg msg;
2516
2517
si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
2518
si.channel = IPMI_BMC_CHANNEL;
2519
si.lun = 0;
2520
2521
msg.netfn = IPMI_NETFN_APP_REQUEST;
2522
msg.cmd = IPMI_GET_DEVICE_ID_CMD;
2523
msg.data = NULL;
2524
msg.data_len = 0;
2525
2526
return i_ipmi_request(NULL,
2527
intf,
2528
(struct ipmi_addr *) &si,
2529
0,
2530
&msg,
2531
intf,
2532
NULL,
2533
NULL,
2534
0,
2535
intf->addrinfo[0].address,
2536
intf->addrinfo[0].lun,
2537
-1, 0);
2538
}
2539
2540
static int __get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc)
2541
{
2542
int rv;
2543
unsigned int retry_count = 0;
2544
2545
intf->null_user_handler = bmc_device_id_handler;
2546
2547
retry:
2548
bmc->cc = 0;
2549
bmc->dyn_id_set = 2;
2550
2551
rv = send_get_device_id_cmd(intf);
2552
if (rv)
2553
goto out_reset_handler;
2554
2555
wait_event(intf->waitq, bmc->dyn_id_set != 2);
2556
2557
if (!bmc->dyn_id_set) {
2558
if (bmc->cc != IPMI_CC_NO_ERROR &&
2559
++retry_count <= GET_DEVICE_ID_MAX_RETRY) {
2560
msleep(500);
2561
dev_warn(intf->si_dev,
2562
"BMC returned 0x%2.2x, retry get bmc device id\n",
2563
bmc->cc);
2564
goto retry;
2565
}
2566
2567
rv = -EIO; /* Something went wrong in the fetch. */
2568
}
2569
2570
/* dyn_id_set makes the id data available. */
2571
smp_rmb();
2572
2573
out_reset_handler:
2574
intf->null_user_handler = NULL;
2575
2576
return rv;
2577
}
2578
2579
/*
2580
* Fetch the device id for the bmc/interface. You must pass in either
2581
* bmc or intf, this code will get the other one. If the data has
2582
* been recently fetched, this will just use the cached data. Otherwise
2583
* it will run a new fetch.
2584
*
2585
* Except for the first time this is called (in ipmi_add_smi()),
2586
* this will always return good data;
2587
*/
2588
static int __bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
2589
struct ipmi_device_id *id,
2590
bool *guid_set, guid_t *guid, int intf_num)
2591
{
2592
int rv = 0;
2593
int prev_dyn_id_set, prev_guid_set;
2594
bool intf_set = intf != NULL;
2595
2596
if (!intf) {
2597
mutex_lock(&bmc->dyn_mutex);
2598
retry_bmc_lock:
2599
if (list_empty(&bmc->intfs)) {
2600
mutex_unlock(&bmc->dyn_mutex);
2601
return -ENOENT;
2602
}
2603
intf = list_first_entry(&bmc->intfs, struct ipmi_smi,
2604
bmc_link);
2605
kref_get(&intf->refcount);
2606
mutex_unlock(&bmc->dyn_mutex);
2607
mutex_lock(&intf->bmc_reg_mutex);
2608
mutex_lock(&bmc->dyn_mutex);
2609
if (intf != list_first_entry(&bmc->intfs, struct ipmi_smi,
2610
bmc_link)) {
2611
mutex_unlock(&intf->bmc_reg_mutex);
2612
kref_put(&intf->refcount, intf_free);
2613
goto retry_bmc_lock;
2614
}
2615
} else {
2616
mutex_lock(&intf->bmc_reg_mutex);
2617
bmc = intf->bmc;
2618
mutex_lock(&bmc->dyn_mutex);
2619
kref_get(&intf->refcount);
2620
}
2621
2622
/* If we have a valid and current ID, just return that. */
2623
if (intf->in_bmc_register ||
2624
(bmc->dyn_id_set && time_is_after_jiffies(bmc->dyn_id_expiry)))
2625
goto out_noprocessing;
2626
2627
/* Don't allow sysfs access when in maintenance mode. */
2628
if (intf->maintenance_mode_state) {
2629
rv = -EBUSY;
2630
goto out_noprocessing;
2631
}
2632
2633
prev_guid_set = bmc->dyn_guid_set;
2634
__get_guid(intf);
2635
2636
prev_dyn_id_set = bmc->dyn_id_set;
2637
rv = __get_device_id(intf, bmc);
2638
if (rv)
2639
goto out;
2640
2641
/*
2642
* The guid, device id, manufacturer id, and product id should
2643
* not change on a BMC. If it does we have to do some dancing.
2644
*/
2645
if (!intf->bmc_registered
2646
|| (!prev_guid_set && bmc->dyn_guid_set)
2647
|| (!prev_dyn_id_set && bmc->dyn_id_set)
2648
|| (prev_guid_set && bmc->dyn_guid_set
2649
&& !guid_equal(&bmc->guid, &bmc->fetch_guid))
2650
|| bmc->id.device_id != bmc->fetch_id.device_id
2651
|| bmc->id.manufacturer_id != bmc->fetch_id.manufacturer_id
2652
|| bmc->id.product_id != bmc->fetch_id.product_id) {
2653
struct ipmi_device_id id = bmc->fetch_id;
2654
int guid_set = bmc->dyn_guid_set;
2655
guid_t guid;
2656
2657
guid = bmc->fetch_guid;
2658
mutex_unlock(&bmc->dyn_mutex);
2659
2660
__ipmi_bmc_unregister(intf);
2661
/* Fill in the temporary BMC for good measure. */
2662
intf->bmc->id = id;
2663
intf->bmc->dyn_guid_set = guid_set;
2664
intf->bmc->guid = guid;
2665
if (__ipmi_bmc_register(intf, &id, guid_set, &guid, intf_num))
2666
need_waiter(intf); /* Retry later on an error. */
2667
else
2668
__scan_channels(intf, &id);
2669
2670
2671
if (!intf_set) {
2672
/*
2673
* We weren't given the interface on the
2674
* command line, so restart the operation on
2675
* the next interface for the BMC.
2676
*/
2677
mutex_unlock(&intf->bmc_reg_mutex);
2678
mutex_lock(&bmc->dyn_mutex);
2679
goto retry_bmc_lock;
2680
}
2681
2682
/* We have a new BMC, set it up. */
2683
bmc = intf->bmc;
2684
mutex_lock(&bmc->dyn_mutex);
2685
goto out_noprocessing;
2686
} else if (memcmp(&bmc->fetch_id, &bmc->id, sizeof(bmc->id)))
2687
/* Version info changes, scan the channels again. */
2688
__scan_channels(intf, &bmc->fetch_id);
2689
2690
bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY;
2691
2692
out:
2693
if (rv && prev_dyn_id_set) {
2694
rv = 0; /* Ignore failures if we have previous data. */
2695
bmc->dyn_id_set = prev_dyn_id_set;
2696
}
2697
if (!rv) {
2698
bmc->id = bmc->fetch_id;
2699
if (bmc->dyn_guid_set)
2700
bmc->guid = bmc->fetch_guid;
2701
else if (prev_guid_set)
2702
/*
2703
* The guid used to be valid and it failed to fetch,
2704
* just use the cached value.
2705
*/
2706
bmc->dyn_guid_set = prev_guid_set;
2707
}
2708
out_noprocessing:
2709
if (!rv) {
2710
if (id)
2711
*id = bmc->id;
2712
2713
if (guid_set)
2714
*guid_set = bmc->dyn_guid_set;
2715
2716
if (guid && bmc->dyn_guid_set)
2717
*guid = bmc->guid;
2718
}
2719
2720
mutex_unlock(&bmc->dyn_mutex);
2721
mutex_unlock(&intf->bmc_reg_mutex);
2722
2723
kref_put(&intf->refcount, intf_free);
2724
return rv;
2725
}
2726
2727
static int bmc_get_device_id(struct ipmi_smi *intf, struct bmc_device *bmc,
2728
struct ipmi_device_id *id,
2729
bool *guid_set, guid_t *guid)
2730
{
2731
return __bmc_get_device_id(intf, bmc, id, guid_set, guid, -1);
2732
}
2733
2734
static ssize_t device_id_show(struct device *dev,
2735
struct device_attribute *attr,
2736
char *buf)
2737
{
2738
struct bmc_device *bmc = to_bmc_device(dev);
2739
struct ipmi_device_id id;
2740
int rv;
2741
2742
rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2743
if (rv)
2744
return rv;
2745
2746
return sysfs_emit(buf, "%u\n", id.device_id);
2747
}
2748
static DEVICE_ATTR_RO(device_id);
2749
2750
static ssize_t provides_device_sdrs_show(struct device *dev,
2751
struct device_attribute *attr,
2752
char *buf)
2753
{
2754
struct bmc_device *bmc = to_bmc_device(dev);
2755
struct ipmi_device_id id;
2756
int rv;
2757
2758
rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2759
if (rv)
2760
return rv;
2761
2762
return sysfs_emit(buf, "%u\n", (id.device_revision & 0x80) >> 7);
2763
}
2764
static DEVICE_ATTR_RO(provides_device_sdrs);
2765
2766
static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
2767
char *buf)
2768
{
2769
struct bmc_device *bmc = to_bmc_device(dev);
2770
struct ipmi_device_id id;
2771
int rv;
2772
2773
rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2774
if (rv)
2775
return rv;
2776
2777
return sysfs_emit(buf, "%u\n", id.device_revision & 0x0F);
2778
}
2779
static DEVICE_ATTR_RO(revision);
2780
2781
static ssize_t firmware_revision_show(struct device *dev,
2782
struct device_attribute *attr,
2783
char *buf)
2784
{
2785
struct bmc_device *bmc = to_bmc_device(dev);
2786
struct ipmi_device_id id;
2787
int rv;
2788
2789
rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2790
if (rv)
2791
return rv;
2792
2793
return sysfs_emit(buf, "%u.%x\n", id.firmware_revision_1,
2794
id.firmware_revision_2);
2795
}
2796
static DEVICE_ATTR_RO(firmware_revision);
2797
2798
static ssize_t ipmi_version_show(struct device *dev,
2799
struct device_attribute *attr,
2800
char *buf)
2801
{
2802
struct bmc_device *bmc = to_bmc_device(dev);
2803
struct ipmi_device_id id;
2804
int rv;
2805
2806
rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2807
if (rv)
2808
return rv;
2809
2810
return sysfs_emit(buf, "%u.%u\n",
2811
ipmi_version_major(&id),
2812
ipmi_version_minor(&id));
2813
}
2814
static DEVICE_ATTR_RO(ipmi_version);
2815
2816
static ssize_t add_dev_support_show(struct device *dev,
2817
struct device_attribute *attr,
2818
char *buf)
2819
{
2820
struct bmc_device *bmc = to_bmc_device(dev);
2821
struct ipmi_device_id id;
2822
int rv;
2823
2824
rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2825
if (rv)
2826
return rv;
2827
2828
return sysfs_emit(buf, "0x%02x\n", id.additional_device_support);
2829
}
2830
static DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show,
2831
NULL);
2832
2833
static ssize_t manufacturer_id_show(struct device *dev,
2834
struct device_attribute *attr,
2835
char *buf)
2836
{
2837
struct bmc_device *bmc = to_bmc_device(dev);
2838
struct ipmi_device_id id;
2839
int rv;
2840
2841
rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2842
if (rv)
2843
return rv;
2844
2845
return sysfs_emit(buf, "0x%6.6x\n", id.manufacturer_id);
2846
}
2847
static DEVICE_ATTR_RO(manufacturer_id);
2848
2849
static ssize_t product_id_show(struct device *dev,
2850
struct device_attribute *attr,
2851
char *buf)
2852
{
2853
struct bmc_device *bmc = to_bmc_device(dev);
2854
struct ipmi_device_id id;
2855
int rv;
2856
2857
rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2858
if (rv)
2859
return rv;
2860
2861
return sysfs_emit(buf, "0x%4.4x\n", id.product_id);
2862
}
2863
static DEVICE_ATTR_RO(product_id);
2864
2865
static ssize_t aux_firmware_rev_show(struct device *dev,
2866
struct device_attribute *attr,
2867
char *buf)
2868
{
2869
struct bmc_device *bmc = to_bmc_device(dev);
2870
struct ipmi_device_id id;
2871
int rv;
2872
2873
rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2874
if (rv)
2875
return rv;
2876
2877
return sysfs_emit(buf, "0x%02x 0x%02x 0x%02x 0x%02x\n",
2878
id.aux_firmware_revision[3],
2879
id.aux_firmware_revision[2],
2880
id.aux_firmware_revision[1],
2881
id.aux_firmware_revision[0]);
2882
}
2883
static DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL);
2884
2885
static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
2886
char *buf)
2887
{
2888
struct bmc_device *bmc = to_bmc_device(dev);
2889
bool guid_set;
2890
guid_t guid;
2891
int rv;
2892
2893
rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, &guid);
2894
if (rv)
2895
return rv;
2896
if (!guid_set)
2897
return -ENOENT;
2898
2899
return sysfs_emit(buf, "%pUl\n", &guid);
2900
}
2901
static DEVICE_ATTR_RO(guid);
2902
2903
static struct attribute *bmc_dev_attrs[] = {
2904
&dev_attr_device_id.attr,
2905
&dev_attr_provides_device_sdrs.attr,
2906
&dev_attr_revision.attr,
2907
&dev_attr_firmware_revision.attr,
2908
&dev_attr_ipmi_version.attr,
2909
&dev_attr_additional_device_support.attr,
2910
&dev_attr_manufacturer_id.attr,
2911
&dev_attr_product_id.attr,
2912
&dev_attr_aux_firmware_revision.attr,
2913
&dev_attr_guid.attr,
2914
NULL
2915
};
2916
2917
static umode_t bmc_dev_attr_is_visible(struct kobject *kobj,
2918
struct attribute *attr, int idx)
2919
{
2920
struct device *dev = kobj_to_dev(kobj);
2921
struct bmc_device *bmc = to_bmc_device(dev);
2922
umode_t mode = attr->mode;
2923
int rv;
2924
2925
if (attr == &dev_attr_aux_firmware_revision.attr) {
2926
struct ipmi_device_id id;
2927
2928
rv = bmc_get_device_id(NULL, bmc, &id, NULL, NULL);
2929
return (!rv && id.aux_firmware_revision_set) ? mode : 0;
2930
}
2931
if (attr == &dev_attr_guid.attr) {
2932
bool guid_set;
2933
2934
rv = bmc_get_device_id(NULL, bmc, NULL, &guid_set, NULL);
2935
return (!rv && guid_set) ? mode : 0;
2936
}
2937
return mode;
2938
}
2939
2940
static const struct attribute_group bmc_dev_attr_group = {
2941
.attrs = bmc_dev_attrs,
2942
.is_visible = bmc_dev_attr_is_visible,
2943
};
2944
2945
static const struct attribute_group *bmc_dev_attr_groups[] = {
2946
&bmc_dev_attr_group,
2947
NULL
2948
};
2949
2950
static const struct device_type bmc_device_type = {
2951
.groups = bmc_dev_attr_groups,
2952
};
2953
2954
static int __find_bmc_guid(struct device *dev, const void *data)
2955
{
2956
const guid_t *guid = data;
2957
struct bmc_device *bmc;
2958
int rv;
2959
2960
if (dev->type != &bmc_device_type)
2961
return 0;
2962
2963
bmc = to_bmc_device(dev);
2964
rv = bmc->dyn_guid_set && guid_equal(&bmc->guid, guid);
2965
if (rv)
2966
rv = kref_get_unless_zero(&bmc->usecount);
2967
return rv;
2968
}
2969
2970
/*
2971
* Returns with the bmc's usecount incremented, if it is non-NULL.
2972
*/
2973
static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv,
2974
guid_t *guid)
2975
{
2976
struct device *dev;
2977
struct bmc_device *bmc = NULL;
2978
2979
dev = driver_find_device(drv, NULL, guid, __find_bmc_guid);
2980
if (dev) {
2981
bmc = to_bmc_device(dev);
2982
put_device(dev);
2983
}
2984
return bmc;
2985
}
2986
2987
struct prod_dev_id {
2988
unsigned int product_id;
2989
unsigned char device_id;
2990
};
2991
2992
static int __find_bmc_prod_dev_id(struct device *dev, const void *data)
2993
{
2994
const struct prod_dev_id *cid = data;
2995
struct bmc_device *bmc;
2996
int rv;
2997
2998
if (dev->type != &bmc_device_type)
2999
return 0;
3000
3001
bmc = to_bmc_device(dev);
3002
rv = (bmc->id.product_id == cid->product_id
3003
&& bmc->id.device_id == cid->device_id);
3004
if (rv)
3005
rv = kref_get_unless_zero(&bmc->usecount);
3006
return rv;
3007
}
3008
3009
/*
3010
* Returns with the bmc's usecount incremented, if it is non-NULL.
3011
*/
3012
static struct bmc_device *ipmi_find_bmc_prod_dev_id(
3013
struct device_driver *drv,
3014
unsigned int product_id, unsigned char device_id)
3015
{
3016
struct prod_dev_id id = {
3017
.product_id = product_id,
3018
.device_id = device_id,
3019
};
3020
struct device *dev;
3021
struct bmc_device *bmc = NULL;
3022
3023
dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id);
3024
if (dev) {
3025
bmc = to_bmc_device(dev);
3026
put_device(dev);
3027
}
3028
return bmc;
3029
}
3030
3031
static DEFINE_IDA(ipmi_bmc_ida);
3032
3033
static void
3034
release_bmc_device(struct device *dev)
3035
{
3036
kfree(to_bmc_device(dev));
3037
}
3038
3039
static void cleanup_bmc_work(struct work_struct *work)
3040
{
3041
struct bmc_device *bmc = container_of(work, struct bmc_device,
3042
remove_work);
3043
int id = bmc->pdev.id; /* Unregister overwrites id */
3044
3045
platform_device_unregister(&bmc->pdev);
3046
ida_free(&ipmi_bmc_ida, id);
3047
}
3048
3049
static void
3050
cleanup_bmc_device(struct kref *ref)
3051
{
3052
struct bmc_device *bmc = container_of(ref, struct bmc_device, usecount);
3053
3054
/*
3055
* Remove the platform device in a work queue to avoid issues
3056
* with removing the device attributes while reading a device
3057
* attribute.
3058
*/
3059
queue_work(bmc_remove_work_wq, &bmc->remove_work);
3060
}
3061
3062
/*
3063
* Must be called with intf->bmc_reg_mutex held.
3064
*/
3065
static void __ipmi_bmc_unregister(struct ipmi_smi *intf)
3066
{
3067
struct bmc_device *bmc = intf->bmc;
3068
3069
if (!intf->bmc_registered)
3070
return;
3071
3072
sysfs_remove_link(&intf->si_dev->kobj, "bmc");
3073
sysfs_remove_link(&bmc->pdev.dev.kobj, intf->my_dev_name);
3074
kfree(intf->my_dev_name);
3075
intf->my_dev_name = NULL;
3076
3077
mutex_lock(&bmc->dyn_mutex);
3078
list_del(&intf->bmc_link);
3079
mutex_unlock(&bmc->dyn_mutex);
3080
intf->bmc = &intf->tmp_bmc;
3081
kref_put(&bmc->usecount, cleanup_bmc_device);
3082
intf->bmc_registered = false;
3083
}
3084
3085
static void ipmi_bmc_unregister(struct ipmi_smi *intf)
3086
{
3087
mutex_lock(&intf->bmc_reg_mutex);
3088
__ipmi_bmc_unregister(intf);
3089
mutex_unlock(&intf->bmc_reg_mutex);
3090
}
3091
3092
/*
3093
* Must be called with intf->bmc_reg_mutex held.
3094
*/
3095
static int __ipmi_bmc_register(struct ipmi_smi *intf,
3096
struct ipmi_device_id *id,
3097
bool guid_set, guid_t *guid, int intf_num)
3098
{
3099
int rv;
3100
struct bmc_device *bmc;
3101
struct bmc_device *old_bmc;
3102
3103
/*
3104
* platform_device_register() can cause bmc_reg_mutex to
3105
* be claimed because of the is_visible functions of
3106
* the attributes. Eliminate possible recursion and
3107
* release the lock.
3108
*/
3109
intf->in_bmc_register = true;
3110
mutex_unlock(&intf->bmc_reg_mutex);
3111
3112
/*
3113
* Try to find if there is an bmc_device struct
3114
* representing the interfaced BMC already
3115
*/
3116
mutex_lock(&ipmidriver_mutex);
3117
if (guid_set)
3118
old_bmc = ipmi_find_bmc_guid(&ipmidriver.driver, guid);
3119
else
3120
old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver.driver,
3121
id->product_id,
3122
id->device_id);
3123
3124
/*
3125
* If there is already an bmc_device, free the new one,
3126
* otherwise register the new BMC device
3127
*/
3128
if (old_bmc) {
3129
bmc = old_bmc;
3130
/*
3131
* Note: old_bmc already has usecount incremented by
3132
* the BMC find functions.
3133
*/
3134
intf->bmc = old_bmc;
3135
mutex_lock(&bmc->dyn_mutex);
3136
list_add_tail(&intf->bmc_link, &bmc->intfs);
3137
mutex_unlock(&bmc->dyn_mutex);
3138
3139
dev_info(intf->si_dev,
3140
"interfacing existing BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
3141
bmc->id.manufacturer_id,
3142
bmc->id.product_id,
3143
bmc->id.device_id);
3144
} else {
3145
bmc = kzalloc(sizeof(*bmc), GFP_KERNEL);
3146
if (!bmc) {
3147
rv = -ENOMEM;
3148
goto out;
3149
}
3150
INIT_LIST_HEAD(&bmc->intfs);
3151
mutex_init(&bmc->dyn_mutex);
3152
INIT_WORK(&bmc->remove_work, cleanup_bmc_work);
3153
3154
bmc->id = *id;
3155
bmc->dyn_id_set = 1;
3156
bmc->dyn_guid_set = guid_set;
3157
bmc->guid = *guid;
3158
bmc->dyn_id_expiry = jiffies + IPMI_DYN_DEV_ID_EXPIRY;
3159
3160
bmc->pdev.name = "ipmi_bmc";
3161
3162
rv = ida_alloc(&ipmi_bmc_ida, GFP_KERNEL);
3163
if (rv < 0) {
3164
kfree(bmc);
3165
goto out;
3166
}
3167
3168
bmc->pdev.dev.driver = &ipmidriver.driver;
3169
bmc->pdev.id = rv;
3170
bmc->pdev.dev.release = release_bmc_device;
3171
bmc->pdev.dev.type = &bmc_device_type;
3172
kref_init(&bmc->usecount);
3173
3174
intf->bmc = bmc;
3175
mutex_lock(&bmc->dyn_mutex);
3176
list_add_tail(&intf->bmc_link, &bmc->intfs);
3177
mutex_unlock(&bmc->dyn_mutex);
3178
3179
rv = platform_device_register(&bmc->pdev);
3180
if (rv) {
3181
dev_err(intf->si_dev,
3182
"Unable to register bmc device: %d\n",
3183
rv);
3184
goto out_list_del;
3185
}
3186
3187
dev_info(intf->si_dev,
3188
"Found new BMC (man_id: 0x%6.6x, prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n",
3189
bmc->id.manufacturer_id,
3190
bmc->id.product_id,
3191
bmc->id.device_id);
3192
}
3193
3194
/*
3195
* create symlink from system interface device to bmc device
3196
* and back.
3197
*/
3198
rv = sysfs_create_link(&intf->si_dev->kobj, &bmc->pdev.dev.kobj, "bmc");
3199
if (rv) {
3200
dev_err(intf->si_dev, "Unable to create bmc symlink: %d\n", rv);
3201
goto out_put_bmc;
3202
}
3203
3204
if (intf_num == -1)
3205
intf_num = intf->intf_num;
3206
intf->my_dev_name = kasprintf(GFP_KERNEL, "ipmi%d", intf_num);
3207
if (!intf->my_dev_name) {
3208
rv = -ENOMEM;
3209
dev_err(intf->si_dev, "Unable to allocate link from BMC: %d\n",
3210
rv);
3211
goto out_unlink1;
3212
}
3213
3214
rv = sysfs_create_link(&bmc->pdev.dev.kobj, &intf->si_dev->kobj,
3215
intf->my_dev_name);
3216
if (rv) {
3217
dev_err(intf->si_dev, "Unable to create symlink to bmc: %d\n",
3218
rv);
3219
goto out_free_my_dev_name;
3220
}
3221
3222
intf->bmc_registered = true;
3223
3224
out:
3225
mutex_unlock(&ipmidriver_mutex);
3226
mutex_lock(&intf->bmc_reg_mutex);
3227
intf->in_bmc_register = false;
3228
return rv;
3229
3230
3231
out_free_my_dev_name:
3232
kfree(intf->my_dev_name);
3233
intf->my_dev_name = NULL;
3234
3235
out_unlink1:
3236
sysfs_remove_link(&intf->si_dev->kobj, "bmc");
3237
3238
out_put_bmc:
3239
mutex_lock(&bmc->dyn_mutex);
3240
list_del(&intf->bmc_link);
3241
mutex_unlock(&bmc->dyn_mutex);
3242
intf->bmc = &intf->tmp_bmc;
3243
kref_put(&bmc->usecount, cleanup_bmc_device);
3244
goto out;
3245
3246
out_list_del:
3247
mutex_lock(&bmc->dyn_mutex);
3248
list_del(&intf->bmc_link);
3249
mutex_unlock(&bmc->dyn_mutex);
3250
intf->bmc = &intf->tmp_bmc;
3251
put_device(&bmc->pdev.dev);
3252
goto out;
3253
}
3254
3255
static int
3256
send_guid_cmd(struct ipmi_smi *intf, int chan)
3257
{
3258
struct kernel_ipmi_msg msg;
3259
struct ipmi_system_interface_addr si;
3260
3261
si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3262
si.channel = IPMI_BMC_CHANNEL;
3263
si.lun = 0;
3264
3265
msg.netfn = IPMI_NETFN_APP_REQUEST;
3266
msg.cmd = IPMI_GET_DEVICE_GUID_CMD;
3267
msg.data = NULL;
3268
msg.data_len = 0;
3269
return i_ipmi_request(NULL,
3270
intf,
3271
(struct ipmi_addr *) &si,
3272
0,
3273
&msg,
3274
intf,
3275
NULL,
3276
NULL,
3277
0,
3278
intf->addrinfo[0].address,
3279
intf->addrinfo[0].lun,
3280
-1, 0);
3281
}
3282
3283
static void guid_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
3284
{
3285
struct bmc_device *bmc = intf->bmc;
3286
3287
if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3288
|| (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE)
3289
|| (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD))
3290
/* Not for me */
3291
return;
3292
3293
if (msg->msg.data[0] != 0) {
3294
/* Error from getting the GUID, the BMC doesn't have one. */
3295
bmc->dyn_guid_set = 0;
3296
goto out;
3297
}
3298
3299
if (msg->msg.data_len < UUID_SIZE + 1) {
3300
bmc->dyn_guid_set = 0;
3301
dev_warn(intf->si_dev,
3302
"The GUID response from the BMC was too short, it was %d but should have been %d. Assuming GUID is not available.\n",
3303
msg->msg.data_len, UUID_SIZE + 1);
3304
goto out;
3305
}
3306
3307
import_guid(&bmc->fetch_guid, msg->msg.data + 1);
3308
/*
3309
* Make sure the guid data is available before setting
3310
* dyn_guid_set.
3311
*/
3312
smp_wmb();
3313
bmc->dyn_guid_set = 1;
3314
out:
3315
wake_up(&intf->waitq);
3316
}
3317
3318
static void __get_guid(struct ipmi_smi *intf)
3319
{
3320
int rv;
3321
struct bmc_device *bmc = intf->bmc;
3322
3323
bmc->dyn_guid_set = 2;
3324
intf->null_user_handler = guid_handler;
3325
rv = send_guid_cmd(intf, 0);
3326
if (rv)
3327
/* Send failed, no GUID available. */
3328
bmc->dyn_guid_set = 0;
3329
else
3330
wait_event(intf->waitq, bmc->dyn_guid_set != 2);
3331
3332
/* dyn_guid_set makes the guid data available. */
3333
smp_rmb();
3334
3335
intf->null_user_handler = NULL;
3336
}
3337
3338
static int
3339
send_channel_info_cmd(struct ipmi_smi *intf, int chan)
3340
{
3341
struct kernel_ipmi_msg msg;
3342
unsigned char data[1];
3343
struct ipmi_system_interface_addr si;
3344
3345
si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
3346
si.channel = IPMI_BMC_CHANNEL;
3347
si.lun = 0;
3348
3349
msg.netfn = IPMI_NETFN_APP_REQUEST;
3350
msg.cmd = IPMI_GET_CHANNEL_INFO_CMD;
3351
msg.data = data;
3352
msg.data_len = 1;
3353
data[0] = chan;
3354
return i_ipmi_request(NULL,
3355
intf,
3356
(struct ipmi_addr *) &si,
3357
0,
3358
&msg,
3359
intf,
3360
NULL,
3361
NULL,
3362
0,
3363
intf->addrinfo[0].address,
3364
intf->addrinfo[0].lun,
3365
-1, 0);
3366
}
3367
3368
static void
3369
channel_handler(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
3370
{
3371
int rv = 0;
3372
int ch;
3373
unsigned int set = intf->curr_working_cset;
3374
struct ipmi_channel *chans;
3375
3376
if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
3377
&& (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
3378
&& (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD)) {
3379
/* It's the one we want */
3380
if (msg->msg.data[0] != 0) {
3381
/* Got an error from the channel, just go on. */
3382
if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) {
3383
/*
3384
* If the MC does not support this
3385
* command, that is legal. We just
3386
* assume it has one IPMB at channel
3387
* zero.
3388
*/
3389
intf->wchannels[set].c[0].medium
3390
= IPMI_CHANNEL_MEDIUM_IPMB;
3391
intf->wchannels[set].c[0].protocol
3392
= IPMI_CHANNEL_PROTOCOL_IPMB;
3393
3394
intf->channel_list = intf->wchannels + set;
3395
intf->channels_ready = true;
3396
wake_up(&intf->waitq);
3397
goto out;
3398
}
3399
goto next_channel;
3400
}
3401
if (msg->msg.data_len < 4) {
3402
/* Message not big enough, just go on. */
3403
goto next_channel;
3404
}
3405
ch = intf->curr_channel;
3406
chans = intf->wchannels[set].c;
3407
chans[ch].medium = msg->msg.data[2] & 0x7f;
3408
chans[ch].protocol = msg->msg.data[3] & 0x1f;
3409
3410
next_channel:
3411
intf->curr_channel++;
3412
if (intf->curr_channel >= IPMI_MAX_CHANNELS) {
3413
intf->channel_list = intf->wchannels + set;
3414
intf->channels_ready = true;
3415
wake_up(&intf->waitq);
3416
} else {
3417
intf->channel_list = intf->wchannels + set;
3418
intf->channels_ready = true;
3419
rv = send_channel_info_cmd(intf, intf->curr_channel);
3420
}
3421
3422
if (rv) {
3423
/* Got an error somehow, just give up. */
3424
dev_warn(intf->si_dev,
3425
"Error sending channel information for channel %d: %d\n",
3426
intf->curr_channel, rv);
3427
3428
intf->channel_list = intf->wchannels + set;
3429
intf->channels_ready = true;
3430
wake_up(&intf->waitq);
3431
}
3432
}
3433
out:
3434
return;
3435
}
3436
3437
/*
3438
* Must be holding intf->bmc_reg_mutex to call this.
3439
*/
3440
static int __scan_channels(struct ipmi_smi *intf, struct ipmi_device_id *id)
3441
{
3442
int rv;
3443
3444
if (ipmi_version_major(id) > 1
3445
|| (ipmi_version_major(id) == 1
3446
&& ipmi_version_minor(id) >= 5)) {
3447
unsigned int set;
3448
3449
/*
3450
* Start scanning the channels to see what is
3451
* available.
3452
*/
3453
set = !intf->curr_working_cset;
3454
intf->curr_working_cset = set;
3455
memset(&intf->wchannels[set], 0,
3456
sizeof(struct ipmi_channel_set));
3457
3458
intf->null_user_handler = channel_handler;
3459
intf->curr_channel = 0;
3460
rv = send_channel_info_cmd(intf, 0);
3461
if (rv) {
3462
dev_warn(intf->si_dev,
3463
"Error sending channel information for channel 0, %d\n",
3464
rv);
3465
intf->null_user_handler = NULL;
3466
return -EIO;
3467
}
3468
3469
/* Wait for the channel info to be read. */
3470
wait_event(intf->waitq, intf->channels_ready);
3471
intf->null_user_handler = NULL;
3472
} else {
3473
unsigned int set = intf->curr_working_cset;
3474
3475
/* Assume a single IPMB channel at zero. */
3476
intf->wchannels[set].c[0].medium = IPMI_CHANNEL_MEDIUM_IPMB;
3477
intf->wchannels[set].c[0].protocol = IPMI_CHANNEL_PROTOCOL_IPMB;
3478
intf->channel_list = intf->wchannels + set;
3479
intf->channels_ready = true;
3480
}
3481
3482
return 0;
3483
}
3484
3485
static void ipmi_poll(struct ipmi_smi *intf)
3486
{
3487
if (intf->handlers->poll)
3488
intf->handlers->poll(intf->send_info);
3489
/* In case something came in */
3490
handle_new_recv_msgs(intf);
3491
}
3492
3493
void ipmi_poll_interface(struct ipmi_user *user)
3494
{
3495
ipmi_poll(user->intf);
3496
}
3497
EXPORT_SYMBOL(ipmi_poll_interface);
3498
3499
static ssize_t nr_users_show(struct device *dev,
3500
struct device_attribute *attr,
3501
char *buf)
3502
{
3503
struct ipmi_smi *intf = container_of(attr,
3504
struct ipmi_smi, nr_users_devattr);
3505
3506
return sysfs_emit(buf, "%d\n", atomic_read(&intf->nr_users));
3507
}
3508
static DEVICE_ATTR_RO(nr_users);
3509
3510
static ssize_t nr_msgs_show(struct device *dev,
3511
struct device_attribute *attr,
3512
char *buf)
3513
{
3514
struct ipmi_smi *intf = container_of(attr,
3515
struct ipmi_smi, nr_msgs_devattr);
3516
struct ipmi_user *user;
3517
unsigned int count = 0;
3518
3519
mutex_lock(&intf->users_mutex);
3520
list_for_each_entry(user, &intf->users, link)
3521
count += atomic_read(&user->nr_msgs);
3522
mutex_unlock(&intf->users_mutex);
3523
3524
return sysfs_emit(buf, "%u\n", count);
3525
}
3526
static DEVICE_ATTR_RO(nr_msgs);
3527
3528
static ssize_t maintenance_mode_show(struct device *dev,
3529
struct device_attribute *attr,
3530
char *buf)
3531
{
3532
struct ipmi_smi *intf = container_of(attr,
3533
struct ipmi_smi,
3534
maintenance_mode_devattr);
3535
3536
return sysfs_emit(buf, "%u %d\n", intf->maintenance_mode_state,
3537
intf->auto_maintenance_timeout);
3538
}
3539
static DEVICE_ATTR_RO(maintenance_mode);
3540
3541
static void redo_bmc_reg(struct work_struct *work)
3542
{
3543
struct ipmi_smi *intf = container_of(work, struct ipmi_smi,
3544
bmc_reg_work);
3545
3546
if (!intf->in_shutdown)
3547
bmc_get_device_id(intf, NULL, NULL, NULL, NULL);
3548
3549
kref_put(&intf->refcount, intf_free);
3550
}
3551
3552
int ipmi_add_smi(struct module *owner,
3553
const struct ipmi_smi_handlers *handlers,
3554
void *send_info,
3555
struct device *si_dev,
3556
unsigned char slave_addr)
3557
{
3558
int i, j;
3559
int rv;
3560
struct ipmi_smi *intf, *tintf;
3561
struct list_head *link;
3562
struct ipmi_device_id id;
3563
3564
/*
3565
* Make sure the driver is actually initialized, this handles
3566
* problems with initialization order.
3567
*/
3568
rv = ipmi_init_msghandler();
3569
if (rv)
3570
return rv;
3571
3572
intf = kzalloc(sizeof(*intf), GFP_KERNEL);
3573
if (!intf)
3574
return -ENOMEM;
3575
3576
intf->owner = owner;
3577
intf->bmc = &intf->tmp_bmc;
3578
INIT_LIST_HEAD(&intf->bmc->intfs);
3579
mutex_init(&intf->bmc->dyn_mutex);
3580
INIT_LIST_HEAD(&intf->bmc_link);
3581
mutex_init(&intf->bmc_reg_mutex);
3582
intf->intf_num = -1; /* Mark it invalid for now. */
3583
kref_init(&intf->refcount);
3584
INIT_WORK(&intf->bmc_reg_work, redo_bmc_reg);
3585
intf->si_dev = si_dev;
3586
for (j = 0; j < IPMI_MAX_CHANNELS; j++) {
3587
intf->addrinfo[j].address = IPMI_BMC_SLAVE_ADDR;
3588
intf->addrinfo[j].lun = 2;
3589
}
3590
if (slave_addr != 0)
3591
intf->addrinfo[0].address = slave_addr;
3592
INIT_LIST_HEAD(&intf->user_msgs);
3593
mutex_init(&intf->user_msgs_mutex);
3594
INIT_LIST_HEAD(&intf->users);
3595
mutex_init(&intf->users_mutex);
3596
atomic_set(&intf->nr_users, 0);
3597
intf->handlers = handlers;
3598
intf->send_info = send_info;
3599
mutex_init(&intf->seq_lock);
3600
for (j = 0; j < IPMI_IPMB_NUM_SEQ; j++) {
3601
intf->seq_table[j].inuse = 0;
3602
intf->seq_table[j].seqid = 0;
3603
}
3604
intf->curr_seq = 0;
3605
spin_lock_init(&intf->waiting_rcv_msgs_lock);
3606
INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
3607
INIT_WORK(&intf->smi_work, smi_work);
3608
atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
3609
spin_lock_init(&intf->xmit_msgs_lock);
3610
INIT_LIST_HEAD(&intf->xmit_msgs);
3611
INIT_LIST_HEAD(&intf->hp_xmit_msgs);
3612
mutex_init(&intf->events_mutex);
3613
spin_lock_init(&intf->watch_lock);
3614
atomic_set(&intf->event_waiters, 0);
3615
intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
3616
INIT_LIST_HEAD(&intf->waiting_events);
3617
intf->waiting_events_count = 0;
3618
mutex_init(&intf->cmd_rcvrs_mutex);
3619
spin_lock_init(&intf->maintenance_mode_lock);
3620
INIT_LIST_HEAD(&intf->cmd_rcvrs);
3621
init_waitqueue_head(&intf->waitq);
3622
for (i = 0; i < IPMI_NUM_STATS; i++)
3623
atomic_set(&intf->stats[i], 0);
3624
3625
/*
3626
* Grab the watchers mutex so we can deliver the new interface
3627
* without races.
3628
*/
3629
mutex_lock(&smi_watchers_mutex);
3630
mutex_lock(&ipmi_interfaces_mutex);
3631
/* Look for a hole in the numbers. */
3632
i = 0;
3633
link = &ipmi_interfaces;
3634
list_for_each_entry(tintf, &ipmi_interfaces, link) {
3635
if (tintf->intf_num != i) {
3636
link = &tintf->link;
3637
break;
3638
}
3639
i++;
3640
}
3641
/* Add the new interface in numeric order. */
3642
if (i == 0)
3643
list_add(&intf->link, &ipmi_interfaces);
3644
else
3645
list_add_tail(&intf->link, link);
3646
3647
rv = handlers->start_processing(send_info, intf);
3648
if (rv)
3649
goto out_err;
3650
3651
rv = __bmc_get_device_id(intf, NULL, &id, NULL, NULL, i);
3652
if (rv) {
3653
dev_err(si_dev, "Unable to get the device id: %d\n", rv);
3654
goto out_err_started;
3655
}
3656
3657
mutex_lock(&intf->bmc_reg_mutex);
3658
rv = __scan_channels(intf, &id);
3659
mutex_unlock(&intf->bmc_reg_mutex);
3660
if (rv)
3661
goto out_err_bmc_reg;
3662
3663
intf->nr_users_devattr = dev_attr_nr_users;
3664
sysfs_attr_init(&intf->nr_users_devattr.attr);
3665
rv = device_create_file(intf->si_dev, &intf->nr_users_devattr);
3666
if (rv)
3667
goto out_err_bmc_reg;
3668
3669
intf->nr_msgs_devattr = dev_attr_nr_msgs;
3670
sysfs_attr_init(&intf->nr_msgs_devattr.attr);
3671
rv = device_create_file(intf->si_dev, &intf->nr_msgs_devattr);
3672
if (rv) {
3673
device_remove_file(intf->si_dev, &intf->nr_users_devattr);
3674
goto out_err_bmc_reg;
3675
}
3676
3677
intf->maintenance_mode_devattr = dev_attr_maintenance_mode;
3678
sysfs_attr_init(&intf->maintenance_mode_devattr.attr);
3679
rv = device_create_file(intf->si_dev, &intf->maintenance_mode_devattr);
3680
if (rv) {
3681
device_remove_file(intf->si_dev, &intf->nr_users_devattr);
3682
goto out_err_bmc_reg;
3683
}
3684
3685
intf->intf_num = i;
3686
mutex_unlock(&ipmi_interfaces_mutex);
3687
3688
/* After this point the interface is legal to use. */
3689
call_smi_watchers(i, intf->si_dev);
3690
3691
mutex_unlock(&smi_watchers_mutex);
3692
3693
return 0;
3694
3695
out_err_bmc_reg:
3696
ipmi_bmc_unregister(intf);
3697
out_err_started:
3698
if (intf->handlers->shutdown)
3699
intf->handlers->shutdown(intf->send_info);
3700
out_err:
3701
list_del(&intf->link);
3702
mutex_unlock(&ipmi_interfaces_mutex);
3703
mutex_unlock(&smi_watchers_mutex);
3704
kref_put(&intf->refcount, intf_free);
3705
3706
return rv;
3707
}
3708
EXPORT_SYMBOL(ipmi_add_smi);
3709
3710
static void deliver_smi_err_response(struct ipmi_smi *intf,
3711
struct ipmi_smi_msg *msg,
3712
unsigned char err)
3713
{
3714
int rv;
3715
msg->rsp[0] = msg->data[0] | 4;
3716
msg->rsp[1] = msg->data[1];
3717
msg->rsp[2] = err;
3718
msg->rsp_size = 3;
3719
3720
/* This will never requeue, but it may ask us to free the message. */
3721
rv = handle_one_recv_msg(intf, msg);
3722
if (rv == 0)
3723
ipmi_free_smi_msg(msg);
3724
}
3725
3726
static void cleanup_smi_msgs(struct ipmi_smi *intf)
3727
{
3728
int i;
3729
struct seq_table *ent;
3730
struct ipmi_smi_msg *msg;
3731
struct list_head *entry;
3732
struct list_head tmplist;
3733
3734
/* Clear out our transmit queues and hold the messages. */
3735
INIT_LIST_HEAD(&tmplist);
3736
list_splice_tail(&intf->hp_xmit_msgs, &tmplist);
3737
list_splice_tail(&intf->xmit_msgs, &tmplist);
3738
3739
/* Current message first, to preserve order */
3740
while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) {
3741
/* Wait for the message to clear out. */
3742
schedule_timeout(1);
3743
}
3744
3745
/* No need for locks, the interface is down. */
3746
3747
/*
3748
* Return errors for all pending messages in queue and in the
3749
* tables waiting for remote responses.
3750
*/
3751
while (!list_empty(&tmplist)) {
3752
entry = tmplist.next;
3753
list_del(entry);
3754
msg = list_entry(entry, struct ipmi_smi_msg, link);
3755
deliver_smi_err_response(intf, msg, IPMI_ERR_UNSPECIFIED);
3756
}
3757
3758
for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
3759
ent = &intf->seq_table[i];
3760
if (!ent->inuse)
3761
continue;
3762
deliver_err_response(intf, ent->recv_msg, IPMI_ERR_UNSPECIFIED);
3763
}
3764
}
3765
3766
void ipmi_unregister_smi(struct ipmi_smi *intf)
3767
{
3768
struct ipmi_smi_watcher *w;
3769
int intf_num;
3770
3771
if (!intf)
3772
return;
3773
3774
intf_num = intf->intf_num;
3775
mutex_lock(&ipmi_interfaces_mutex);
3776
cancel_work_sync(&intf->smi_work);
3777
/* smi_work() can no longer be in progress after this. */
3778
3779
intf->intf_num = -1;
3780
intf->in_shutdown = true;
3781
list_del(&intf->link);
3782
mutex_unlock(&ipmi_interfaces_mutex);
3783
3784
/*
3785
* At this point no users can be added to the interface and no
3786
* new messages can be sent.
3787
*/
3788
3789
if (intf->handlers->shutdown)
3790
intf->handlers->shutdown(intf->send_info);
3791
3792
device_remove_file(intf->si_dev, &intf->maintenance_mode_devattr);
3793
device_remove_file(intf->si_dev, &intf->nr_msgs_devattr);
3794
device_remove_file(intf->si_dev, &intf->nr_users_devattr);
3795
3796
/*
3797
* Call all the watcher interfaces to tell them that
3798
* an interface is going away.
3799
*/
3800
mutex_lock(&smi_watchers_mutex);
3801
list_for_each_entry(w, &smi_watchers, link)
3802
w->smi_gone(intf_num);
3803
mutex_unlock(&smi_watchers_mutex);
3804
3805
mutex_lock(&intf->users_mutex);
3806
while (!list_empty(&intf->users)) {
3807
struct ipmi_user *user = list_first_entry(&intf->users,
3808
struct ipmi_user, link);
3809
3810
_ipmi_destroy_user(user);
3811
}
3812
mutex_unlock(&intf->users_mutex);
3813
3814
cleanup_smi_msgs(intf);
3815
3816
ipmi_bmc_unregister(intf);
3817
3818
kref_put(&intf->refcount, intf_free);
3819
}
3820
EXPORT_SYMBOL(ipmi_unregister_smi);
3821
3822
static int handle_ipmb_get_msg_rsp(struct ipmi_smi *intf,
3823
struct ipmi_smi_msg *msg)
3824
{
3825
struct ipmi_ipmb_addr ipmb_addr;
3826
struct ipmi_recv_msg *recv_msg;
3827
3828
/*
3829
* This is 11, not 10, because the response must contain a
3830
* completion code.
3831
*/
3832
if (msg->rsp_size < 11) {
3833
/* Message not big enough, just ignore it. */
3834
ipmi_inc_stat(intf, invalid_ipmb_responses);
3835
return 0;
3836
}
3837
3838
if (msg->rsp[2] != 0) {
3839
/* An error getting the response, just ignore it. */
3840
return 0;
3841
}
3842
3843
ipmb_addr.addr_type = IPMI_IPMB_ADDR_TYPE;
3844
ipmb_addr.slave_addr = msg->rsp[6];
3845
ipmb_addr.channel = msg->rsp[3] & 0x0f;
3846
ipmb_addr.lun = msg->rsp[7] & 3;
3847
3848
/*
3849
* It's a response from a remote entity. Look up the sequence
3850
* number and handle the response.
3851
*/
3852
if (intf_find_seq(intf,
3853
msg->rsp[7] >> 2,
3854
msg->rsp[3] & 0x0f,
3855
msg->rsp[8],
3856
(msg->rsp[4] >> 2) & (~1),
3857
(struct ipmi_addr *) &ipmb_addr,
3858
&recv_msg)) {
3859
/*
3860
* We were unable to find the sequence number,
3861
* so just nuke the message.
3862
*/
3863
ipmi_inc_stat(intf, unhandled_ipmb_responses);
3864
return 0;
3865
}
3866
3867
memcpy(recv_msg->msg_data, &msg->rsp[9], msg->rsp_size - 9);
3868
/*
3869
* The other fields matched, so no need to set them, except
3870
* for netfn, which needs to be the response that was
3871
* returned, not the request value.
3872
*/
3873
recv_msg->msg.netfn = msg->rsp[4] >> 2;
3874
recv_msg->msg.data = recv_msg->msg_data;
3875
recv_msg->msg.data_len = msg->rsp_size - 10;
3876
recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
3877
if (deliver_response(intf, recv_msg))
3878
ipmi_inc_stat(intf, unhandled_ipmb_responses);
3879
else
3880
ipmi_inc_stat(intf, handled_ipmb_responses);
3881
3882
return 0;
3883
}
3884
3885
static int handle_ipmb_get_msg_cmd(struct ipmi_smi *intf,
3886
struct ipmi_smi_msg *msg)
3887
{
3888
struct cmd_rcvr *rcvr;
3889
int rv = 0;
3890
unsigned char netfn;
3891
unsigned char cmd;
3892
unsigned char chan;
3893
struct ipmi_user *user = NULL;
3894
struct ipmi_ipmb_addr *ipmb_addr;
3895
struct ipmi_recv_msg *recv_msg = NULL;
3896
3897
if (msg->rsp_size < 10) {
3898
/* Message not big enough, just ignore it. */
3899
ipmi_inc_stat(intf, invalid_commands);
3900
return 0;
3901
}
3902
3903
if (msg->rsp[2] != 0) {
3904
/* An error getting the response, just ignore it. */
3905
return 0;
3906
}
3907
3908
netfn = msg->rsp[4] >> 2;
3909
cmd = msg->rsp[8];
3910
chan = msg->rsp[3] & 0xf;
3911
3912
rcu_read_lock();
3913
rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
3914
if (rcvr) {
3915
user = rcvr->user;
3916
recv_msg = ipmi_alloc_recv_msg(user);
3917
}
3918
rcu_read_unlock();
3919
3920
if (user == NULL) {
3921
/* We didn't find a user, deliver an error response. */
3922
ipmi_inc_stat(intf, unhandled_commands);
3923
3924
msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
3925
msg->data[1] = IPMI_SEND_MSG_CMD;
3926
msg->data[2] = msg->rsp[3];
3927
msg->data[3] = msg->rsp[6];
3928
msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3);
3929
msg->data[5] = ipmb_checksum(&msg->data[3], 2);
3930
msg->data[6] = intf->addrinfo[msg->rsp[3] & 0xf].address;
3931
/* rqseq/lun */
3932
msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3);
3933
msg->data[8] = msg->rsp[8]; /* cmd */
3934
msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE;
3935
msg->data[10] = ipmb_checksum(&msg->data[6], 4);
3936
msg->data_size = 11;
3937
3938
dev_dbg(intf->si_dev, "Invalid command: %*ph\n",
3939
msg->data_size, msg->data);
3940
3941
smi_send(intf, intf->handlers, msg, 0);
3942
/*
3943
* We used the message, so return the value that
3944
* causes it to not be freed or queued.
3945
*/
3946
rv = -1;
3947
} else if (!IS_ERR(recv_msg)) {
3948
/* Extract the source address from the data. */
3949
ipmb_addr = (struct ipmi_ipmb_addr *) &recv_msg->addr;
3950
ipmb_addr->addr_type = IPMI_IPMB_ADDR_TYPE;
3951
ipmb_addr->slave_addr = msg->rsp[6];
3952
ipmb_addr->lun = msg->rsp[7] & 3;
3953
ipmb_addr->channel = msg->rsp[3] & 0xf;
3954
3955
/*
3956
* Extract the rest of the message information
3957
* from the IPMB header.
3958
*/
3959
recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
3960
recv_msg->msgid = msg->rsp[7] >> 2;
3961
recv_msg->msg.netfn = msg->rsp[4] >> 2;
3962
recv_msg->msg.cmd = msg->rsp[8];
3963
recv_msg->msg.data = recv_msg->msg_data;
3964
3965
/*
3966
* We chop off 10, not 9 bytes because the checksum
3967
* at the end also needs to be removed.
3968
*/
3969
recv_msg->msg.data_len = msg->rsp_size - 10;
3970
memcpy(recv_msg->msg_data, &msg->rsp[9],
3971
msg->rsp_size - 10);
3972
if (deliver_response(intf, recv_msg))
3973
ipmi_inc_stat(intf, unhandled_commands);
3974
else
3975
ipmi_inc_stat(intf, handled_commands);
3976
} else {
3977
/*
3978
* We couldn't allocate memory for the message, so
3979
* requeue it for handling later.
3980
*/
3981
rv = 1;
3982
}
3983
3984
return rv;
3985
}
3986
3987
static int handle_ipmb_direct_rcv_cmd(struct ipmi_smi *intf,
3988
struct ipmi_smi_msg *msg)
3989
{
3990
struct cmd_rcvr *rcvr;
3991
int rv = 0;
3992
struct ipmi_user *user = NULL;
3993
struct ipmi_ipmb_direct_addr *daddr;
3994
struct ipmi_recv_msg *recv_msg = NULL;
3995
unsigned char netfn = msg->rsp[0] >> 2;
3996
unsigned char cmd = msg->rsp[3];
3997
3998
rcu_read_lock();
3999
/* We always use channel 0 for direct messages. */
4000
rcvr = find_cmd_rcvr(intf, netfn, cmd, 0);
4001
if (rcvr) {
4002
user = rcvr->user;
4003
recv_msg = ipmi_alloc_recv_msg(user);
4004
}
4005
rcu_read_unlock();
4006
4007
if (user == NULL) {
4008
/* We didn't find a user, deliver an error response. */
4009
ipmi_inc_stat(intf, unhandled_commands);
4010
4011
msg->data[0] = (netfn + 1) << 2;
4012
msg->data[0] |= msg->rsp[2] & 0x3; /* rqLUN */
4013
msg->data[1] = msg->rsp[1]; /* Addr */
4014
msg->data[2] = msg->rsp[2] & ~0x3; /* rqSeq */
4015
msg->data[2] |= msg->rsp[0] & 0x3; /* rsLUN */
4016
msg->data[3] = cmd;
4017
msg->data[4] = IPMI_INVALID_CMD_COMPLETION_CODE;
4018
msg->data_size = 5;
4019
4020
smi_send(intf, intf->handlers, msg, 0);
4021
/*
4022
* We used the message, so return the value that
4023
* causes it to not be freed or queued.
4024
*/
4025
rv = -1;
4026
} else if (!IS_ERR(recv_msg)) {
4027
/* Extract the source address from the data. */
4028
daddr = (struct ipmi_ipmb_direct_addr *)&recv_msg->addr;
4029
daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE;
4030
daddr->channel = 0;
4031
daddr->slave_addr = msg->rsp[1];
4032
daddr->rs_lun = msg->rsp[0] & 3;
4033
daddr->rq_lun = msg->rsp[2] & 3;
4034
4035
/*
4036
* Extract the rest of the message information
4037
* from the IPMB header.
4038
*/
4039
recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
4040
recv_msg->msgid = (msg->rsp[2] >> 2);
4041
recv_msg->msg.netfn = msg->rsp[0] >> 2;
4042
recv_msg->msg.cmd = msg->rsp[3];
4043
recv_msg->msg.data = recv_msg->msg_data;
4044
4045
recv_msg->msg.data_len = msg->rsp_size - 4;
4046
memcpy(recv_msg->msg_data, msg->rsp + 4,
4047
msg->rsp_size - 4);
4048
if (deliver_response(intf, recv_msg))
4049
ipmi_inc_stat(intf, unhandled_commands);
4050
else
4051
ipmi_inc_stat(intf, handled_commands);
4052
} else {
4053
/*
4054
* We couldn't allocate memory for the message, so
4055
* requeue it for handling later.
4056
*/
4057
rv = 1;
4058
}
4059
4060
return rv;
4061
}
4062
4063
static int handle_ipmb_direct_rcv_rsp(struct ipmi_smi *intf,
4064
struct ipmi_smi_msg *msg)
4065
{
4066
struct ipmi_recv_msg *recv_msg;
4067
struct ipmi_ipmb_direct_addr *daddr;
4068
4069
recv_msg = msg->recv_msg;
4070
if (recv_msg == NULL) {
4071
dev_warn(intf->si_dev,
4072
"IPMI direct message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n");
4073
return 0;
4074
}
4075
4076
recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
4077
recv_msg->msgid = msg->msgid;
4078
daddr = (struct ipmi_ipmb_direct_addr *) &recv_msg->addr;
4079
daddr->addr_type = IPMI_IPMB_DIRECT_ADDR_TYPE;
4080
daddr->channel = 0;
4081
daddr->slave_addr = msg->rsp[1];
4082
daddr->rq_lun = msg->rsp[0] & 3;
4083
daddr->rs_lun = msg->rsp[2] & 3;
4084
recv_msg->msg.netfn = msg->rsp[0] >> 2;
4085
recv_msg->msg.cmd = msg->rsp[3];
4086
memcpy(recv_msg->msg_data, &msg->rsp[4], msg->rsp_size - 4);
4087
recv_msg->msg.data = recv_msg->msg_data;
4088
recv_msg->msg.data_len = msg->rsp_size - 4;
4089
deliver_local_response(intf, recv_msg);
4090
4091
return 0;
4092
}
4093
4094
static int handle_lan_get_msg_rsp(struct ipmi_smi *intf,
4095
struct ipmi_smi_msg *msg)
4096
{
4097
struct ipmi_lan_addr lan_addr;
4098
struct ipmi_recv_msg *recv_msg;
4099
4100
4101
/*
4102
* This is 13, not 12, because the response must contain a
4103
* completion code.
4104
*/
4105
if (msg->rsp_size < 13) {
4106
/* Message not big enough, just ignore it. */
4107
ipmi_inc_stat(intf, invalid_lan_responses);
4108
return 0;
4109
}
4110
4111
if (msg->rsp[2] != 0) {
4112
/* An error getting the response, just ignore it. */
4113
return 0;
4114
}
4115
4116
lan_addr.addr_type = IPMI_LAN_ADDR_TYPE;
4117
lan_addr.session_handle = msg->rsp[4];
4118
lan_addr.remote_SWID = msg->rsp[8];
4119
lan_addr.local_SWID = msg->rsp[5];
4120
lan_addr.channel = msg->rsp[3] & 0x0f;
4121
lan_addr.privilege = msg->rsp[3] >> 4;
4122
lan_addr.lun = msg->rsp[9] & 3;
4123
4124
/*
4125
* It's a response from a remote entity. Look up the sequence
4126
* number and handle the response.
4127
*/
4128
if (intf_find_seq(intf,
4129
msg->rsp[9] >> 2,
4130
msg->rsp[3] & 0x0f,
4131
msg->rsp[10],
4132
(msg->rsp[6] >> 2) & (~1),
4133
(struct ipmi_addr *) &lan_addr,
4134
&recv_msg)) {
4135
/*
4136
* We were unable to find the sequence number,
4137
* so just nuke the message.
4138
*/
4139
ipmi_inc_stat(intf, unhandled_lan_responses);
4140
return 0;
4141
}
4142
4143
memcpy(recv_msg->msg_data, &msg->rsp[11], msg->rsp_size - 11);
4144
/*
4145
* The other fields matched, so no need to set them, except
4146
* for netfn, which needs to be the response that was
4147
* returned, not the request value.
4148
*/
4149
recv_msg->msg.netfn = msg->rsp[6] >> 2;
4150
recv_msg->msg.data = recv_msg->msg_data;
4151
recv_msg->msg.data_len = msg->rsp_size - 12;
4152
recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
4153
if (deliver_response(intf, recv_msg))
4154
ipmi_inc_stat(intf, unhandled_lan_responses);
4155
else
4156
ipmi_inc_stat(intf, handled_lan_responses);
4157
4158
return 0;
4159
}
4160
4161
static int handle_lan_get_msg_cmd(struct ipmi_smi *intf,
4162
struct ipmi_smi_msg *msg)
4163
{
4164
struct cmd_rcvr *rcvr;
4165
int rv = 0;
4166
unsigned char netfn;
4167
unsigned char cmd;
4168
unsigned char chan;
4169
struct ipmi_user *user = NULL;
4170
struct ipmi_lan_addr *lan_addr;
4171
struct ipmi_recv_msg *recv_msg = NULL;
4172
4173
if (msg->rsp_size < 12) {
4174
/* Message not big enough, just ignore it. */
4175
ipmi_inc_stat(intf, invalid_commands);
4176
return 0;
4177
}
4178
4179
if (msg->rsp[2] != 0) {
4180
/* An error getting the response, just ignore it. */
4181
return 0;
4182
}
4183
4184
netfn = msg->rsp[6] >> 2;
4185
cmd = msg->rsp[10];
4186
chan = msg->rsp[3] & 0xf;
4187
4188
rcu_read_lock();
4189
rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
4190
if (rcvr) {
4191
user = rcvr->user;
4192
recv_msg = ipmi_alloc_recv_msg(user);
4193
}
4194
rcu_read_unlock();
4195
4196
if (user == NULL) {
4197
/* We didn't find a user, just give up and return an error. */
4198
ipmi_inc_stat(intf, unhandled_commands);
4199
4200
msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
4201
msg->data[1] = IPMI_SEND_MSG_CMD;
4202
msg->data[2] = chan;
4203
msg->data[3] = msg->rsp[4]; /* handle */
4204
msg->data[4] = msg->rsp[8]; /* rsSWID */
4205
msg->data[5] = ((netfn + 1) << 2) | (msg->rsp[9] & 0x3);
4206
msg->data[6] = ipmb_checksum(&msg->data[3], 3);
4207
msg->data[7] = msg->rsp[5]; /* rqSWID */
4208
/* rqseq/lun */
4209
msg->data[8] = (msg->rsp[9] & 0xfc) | (msg->rsp[6] & 0x3);
4210
msg->data[9] = cmd;
4211
msg->data[10] = IPMI_INVALID_CMD_COMPLETION_CODE;
4212
msg->data[11] = ipmb_checksum(&msg->data[7], 4);
4213
msg->data_size = 12;
4214
4215
dev_dbg(intf->si_dev, "Invalid command: %*ph\n",
4216
msg->data_size, msg->data);
4217
4218
smi_send(intf, intf->handlers, msg, 0);
4219
/*
4220
* We used the message, so return the value that
4221
* causes it to not be freed or queued.
4222
*/
4223
rv = -1;
4224
} else if (!IS_ERR(recv_msg)) {
4225
/* Extract the source address from the data. */
4226
lan_addr = (struct ipmi_lan_addr *) &recv_msg->addr;
4227
lan_addr->addr_type = IPMI_LAN_ADDR_TYPE;
4228
lan_addr->session_handle = msg->rsp[4];
4229
lan_addr->remote_SWID = msg->rsp[8];
4230
lan_addr->local_SWID = msg->rsp[5];
4231
lan_addr->lun = msg->rsp[9] & 3;
4232
lan_addr->channel = msg->rsp[3] & 0xf;
4233
lan_addr->privilege = msg->rsp[3] >> 4;
4234
4235
/*
4236
* Extract the rest of the message information
4237
* from the IPMB header.
4238
*/
4239
recv_msg->recv_type = IPMI_CMD_RECV_TYPE;
4240
recv_msg->msgid = msg->rsp[9] >> 2;
4241
recv_msg->msg.netfn = msg->rsp[6] >> 2;
4242
recv_msg->msg.cmd = msg->rsp[10];
4243
recv_msg->msg.data = recv_msg->msg_data;
4244
4245
/*
4246
* We chop off 12, not 11 bytes because the checksum
4247
* at the end also needs to be removed.
4248
*/
4249
recv_msg->msg.data_len = msg->rsp_size - 12;
4250
memcpy(recv_msg->msg_data, &msg->rsp[11],
4251
msg->rsp_size - 12);
4252
if (deliver_response(intf, recv_msg))
4253
ipmi_inc_stat(intf, unhandled_commands);
4254
else
4255
ipmi_inc_stat(intf, handled_commands);
4256
} else {
4257
/*
4258
* We couldn't allocate memory for the message, so
4259
* requeue it for handling later.
4260
*/
4261
rv = 1;
4262
}
4263
4264
return rv;
4265
}
4266
4267
/*
4268
* This routine will handle "Get Message" command responses with
4269
* channels that use an OEM Medium. The message format belongs to
4270
* the OEM. See IPMI 2.0 specification, Chapter 6 and
4271
* Chapter 22, sections 22.6 and 22.24 for more details.
4272
*/
4273
static int handle_oem_get_msg_cmd(struct ipmi_smi *intf,
4274
struct ipmi_smi_msg *msg)
4275
{
4276
struct cmd_rcvr *rcvr;
4277
int rv = 0;
4278
unsigned char netfn;
4279
unsigned char cmd;
4280
unsigned char chan;
4281
struct ipmi_user *user = NULL;
4282
struct ipmi_system_interface_addr *smi_addr;
4283
struct ipmi_recv_msg *recv_msg = NULL;
4284
4285
/*
4286
* We expect the OEM SW to perform error checking
4287
* so we just do some basic sanity checks
4288
*/
4289
if (msg->rsp_size < 4) {
4290
/* Message not big enough, just ignore it. */
4291
ipmi_inc_stat(intf, invalid_commands);
4292
return 0;
4293
}
4294
4295
if (msg->rsp[2] != 0) {
4296
/* An error getting the response, just ignore it. */
4297
return 0;
4298
}
4299
4300
/*
4301
* This is an OEM Message so the OEM needs to know how
4302
* handle the message. We do no interpretation.
4303
*/
4304
netfn = msg->rsp[0] >> 2;
4305
cmd = msg->rsp[1];
4306
chan = msg->rsp[3] & 0xf;
4307
4308
rcu_read_lock();
4309
rcvr = find_cmd_rcvr(intf, netfn, cmd, chan);
4310
if (rcvr) {
4311
user = rcvr->user;
4312
recv_msg = ipmi_alloc_recv_msg(user);
4313
}
4314
rcu_read_unlock();
4315
4316
if (user == NULL) {
4317
/* We didn't find a user, just give up. */
4318
ipmi_inc_stat(intf, unhandled_commands);
4319
4320
/*
4321
* Don't do anything with these messages, just allow
4322
* them to be freed.
4323
*/
4324
4325
rv = 0;
4326
} else if (!IS_ERR(recv_msg)) {
4327
/*
4328
* OEM Messages are expected to be delivered via
4329
* the system interface to SMS software. We might
4330
* need to visit this again depending on OEM
4331
* requirements
4332
*/
4333
smi_addr = ((struct ipmi_system_interface_addr *)
4334
&recv_msg->addr);
4335
smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4336
smi_addr->channel = IPMI_BMC_CHANNEL;
4337
smi_addr->lun = msg->rsp[0] & 3;
4338
4339
recv_msg->user_msg_data = NULL;
4340
recv_msg->recv_type = IPMI_OEM_RECV_TYPE;
4341
recv_msg->msg.netfn = msg->rsp[0] >> 2;
4342
recv_msg->msg.cmd = msg->rsp[1];
4343
recv_msg->msg.data = recv_msg->msg_data;
4344
4345
/*
4346
* The message starts at byte 4 which follows the
4347
* Channel Byte in the "GET MESSAGE" command
4348
*/
4349
recv_msg->msg.data_len = msg->rsp_size - 4;
4350
memcpy(recv_msg->msg_data, &msg->rsp[4],
4351
msg->rsp_size - 4);
4352
if (deliver_response(intf, recv_msg))
4353
ipmi_inc_stat(intf, unhandled_commands);
4354
else
4355
ipmi_inc_stat(intf, handled_commands);
4356
} else {
4357
/*
4358
* We couldn't allocate memory for the message, so
4359
* requeue it for handling later.
4360
*/
4361
rv = 1;
4362
}
4363
4364
return rv;
4365
}
4366
4367
static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg,
4368
struct ipmi_smi_msg *msg)
4369
{
4370
struct ipmi_system_interface_addr *smi_addr;
4371
4372
recv_msg->msgid = 0;
4373
smi_addr = (struct ipmi_system_interface_addr *) &recv_msg->addr;
4374
smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4375
smi_addr->channel = IPMI_BMC_CHANNEL;
4376
smi_addr->lun = msg->rsp[0] & 3;
4377
recv_msg->recv_type = IPMI_ASYNC_EVENT_RECV_TYPE;
4378
recv_msg->msg.netfn = msg->rsp[0] >> 2;
4379
recv_msg->msg.cmd = msg->rsp[1];
4380
memcpy(recv_msg->msg_data, &msg->rsp[3], msg->rsp_size - 3);
4381
recv_msg->msg.data = recv_msg->msg_data;
4382
recv_msg->msg.data_len = msg->rsp_size - 3;
4383
}
4384
4385
static int handle_read_event_rsp(struct ipmi_smi *intf,
4386
struct ipmi_smi_msg *msg)
4387
{
4388
struct ipmi_recv_msg *recv_msg, *recv_msg2;
4389
struct list_head msgs;
4390
struct ipmi_user *user;
4391
int rv = 0, deliver_count = 0;
4392
4393
if (msg->rsp_size < 19) {
4394
/* Message is too small to be an IPMB event. */
4395
ipmi_inc_stat(intf, invalid_events);
4396
return 0;
4397
}
4398
4399
if (msg->rsp[2] != 0) {
4400
/* An error getting the event, just ignore it. */
4401
return 0;
4402
}
4403
4404
INIT_LIST_HEAD(&msgs);
4405
4406
mutex_lock(&intf->events_mutex);
4407
4408
ipmi_inc_stat(intf, events);
4409
4410
/*
4411
* Allocate and fill in one message for every user that is
4412
* getting events.
4413
*/
4414
mutex_lock(&intf->users_mutex);
4415
list_for_each_entry(user, &intf->users, link) {
4416
if (!user->gets_events)
4417
continue;
4418
4419
recv_msg = ipmi_alloc_recv_msg(user);
4420
if (IS_ERR(recv_msg)) {
4421
mutex_unlock(&intf->users_mutex);
4422
list_for_each_entry_safe(recv_msg, recv_msg2, &msgs,
4423
link) {
4424
user = recv_msg->user;
4425
list_del(&recv_msg->link);
4426
ipmi_free_recv_msg(recv_msg);
4427
kref_put(&user->refcount, free_ipmi_user);
4428
}
4429
/*
4430
* We couldn't allocate memory for the
4431
* message, so requeue it for handling
4432
* later.
4433
*/
4434
rv = 1;
4435
goto out;
4436
}
4437
4438
deliver_count++;
4439
4440
copy_event_into_recv_msg(recv_msg, msg);
4441
list_add_tail(&recv_msg->link, &msgs);
4442
}
4443
mutex_unlock(&intf->users_mutex);
4444
4445
if (deliver_count) {
4446
/* Now deliver all the messages. */
4447
list_for_each_entry_safe(recv_msg, recv_msg2, &msgs, link) {
4448
list_del(&recv_msg->link);
4449
deliver_local_response(intf, recv_msg);
4450
}
4451
} else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) {
4452
/*
4453
* No one to receive the message, put it in queue if there's
4454
* not already too many things in the queue.
4455
*/
4456
recv_msg = ipmi_alloc_recv_msg(NULL);
4457
if (IS_ERR(recv_msg)) {
4458
/*
4459
* We couldn't allocate memory for the
4460
* message, so requeue it for handling
4461
* later.
4462
*/
4463
rv = 1;
4464
goto out;
4465
}
4466
4467
copy_event_into_recv_msg(recv_msg, msg);
4468
list_add_tail(&recv_msg->link, &intf->waiting_events);
4469
intf->waiting_events_count++;
4470
} else if (!intf->event_msg_printed) {
4471
/*
4472
* There's too many things in the queue, discard this
4473
* message.
4474
*/
4475
dev_warn(intf->si_dev,
4476
"Event queue full, discarding incoming events\n");
4477
intf->event_msg_printed = 1;
4478
}
4479
4480
out:
4481
mutex_unlock(&intf->events_mutex);
4482
4483
return rv;
4484
}
4485
4486
static int handle_bmc_rsp(struct ipmi_smi *intf,
4487
struct ipmi_smi_msg *msg)
4488
{
4489
struct ipmi_recv_msg *recv_msg;
4490
struct ipmi_system_interface_addr *smi_addr;
4491
4492
recv_msg = msg->recv_msg;
4493
if (recv_msg == NULL) {
4494
dev_warn(intf->si_dev,
4495
"IPMI SMI message received with no owner. This could be because of a malformed message, or because of a hardware error. Contact your hardware vendor for assistance.\n");
4496
return 0;
4497
}
4498
4499
recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE;
4500
recv_msg->msgid = msg->msgid;
4501
smi_addr = ((struct ipmi_system_interface_addr *)
4502
&recv_msg->addr);
4503
smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
4504
smi_addr->channel = IPMI_BMC_CHANNEL;
4505
smi_addr->lun = msg->rsp[0] & 3;
4506
recv_msg->msg.netfn = msg->rsp[0] >> 2;
4507
recv_msg->msg.cmd = msg->rsp[1];
4508
memcpy(recv_msg->msg_data, &msg->rsp[2], msg->rsp_size - 2);
4509
recv_msg->msg.data = recv_msg->msg_data;
4510
recv_msg->msg.data_len = msg->rsp_size - 2;
4511
deliver_local_response(intf, recv_msg);
4512
4513
return 0;
4514
}
4515
4516
/*
4517
* Handle a received message. Return 1 if the message should be requeued,
4518
* 0 if the message should be freed, or -1 if the message should not
4519
* be freed or requeued.
4520
*/
4521
static int handle_one_recv_msg(struct ipmi_smi *intf,
4522
struct ipmi_smi_msg *msg)
4523
{
4524
int requeue = 0;
4525
int chan;
4526
unsigned char cc;
4527
bool is_cmd = !((msg->rsp[0] >> 2) & 1);
4528
4529
dev_dbg(intf->si_dev, "Recv: %*ph\n", msg->rsp_size, msg->rsp);
4530
4531
if (msg->rsp_size < 2) {
4532
/* Message is too small to be correct. */
4533
dev_warn_ratelimited(intf->si_dev,
4534
"BMC returned too small a message for netfn %x cmd %x, got %d bytes\n",
4535
(msg->data[0] >> 2) | 1,
4536
msg->data[1], msg->rsp_size);
4537
4538
return_unspecified:
4539
/* Generate an error response for the message. */
4540
msg->rsp[0] = msg->data[0] | (1 << 2);
4541
msg->rsp[1] = msg->data[1];
4542
msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
4543
msg->rsp_size = 3;
4544
} else if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) {
4545
/* commands must have at least 4 bytes, responses 5. */
4546
if (is_cmd && (msg->rsp_size < 4)) {
4547
ipmi_inc_stat(intf, invalid_commands);
4548
goto out;
4549
}
4550
if (!is_cmd && (msg->rsp_size < 5)) {
4551
ipmi_inc_stat(intf, invalid_ipmb_responses);
4552
/* Construct a valid error response. */
4553
msg->rsp[0] = msg->data[0] & 0xfc; /* NetFN */
4554
msg->rsp[0] |= (1 << 2); /* Make it a response */
4555
msg->rsp[0] |= msg->data[2] & 3; /* rqLUN */
4556
msg->rsp[1] = msg->data[1]; /* Addr */
4557
msg->rsp[2] = msg->data[2] & 0xfc; /* rqSeq */
4558
msg->rsp[2] |= msg->data[0] & 0x3; /* rsLUN */
4559
msg->rsp[3] = msg->data[3]; /* Cmd */
4560
msg->rsp[4] = IPMI_ERR_UNSPECIFIED;
4561
msg->rsp_size = 5;
4562
}
4563
} else if ((msg->data_size >= 2)
4564
&& (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
4565
&& (msg->data[1] == IPMI_SEND_MSG_CMD)
4566
&& (msg->recv_msg == NULL)) {
4567
4568
if (intf->in_shutdown || intf->run_to_completion)
4569
goto out;
4570
4571
/*
4572
* This is the local response to a command send, start
4573
* the timer for these. The recv_msg will not be
4574
* NULL if this is a response send, and we will let
4575
* response sends just go through.
4576
*/
4577
4578
/*
4579
* Check for errors, if we get certain errors (ones
4580
* that mean basically we can try again later), we
4581
* ignore them and start the timer. Otherwise we
4582
* report the error immediately.
4583
*/
4584
if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0)
4585
&& (msg->rsp[2] != IPMI_NODE_BUSY_ERR)
4586
&& (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR)
4587
&& (msg->rsp[2] != IPMI_BUS_ERR)
4588
&& (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) {
4589
int ch = msg->rsp[3] & 0xf;
4590
struct ipmi_channel *chans;
4591
4592
/* Got an error sending the message, handle it. */
4593
4594
chans = READ_ONCE(intf->channel_list)->c;
4595
if ((chans[ch].medium == IPMI_CHANNEL_MEDIUM_8023LAN)
4596
|| (chans[ch].medium == IPMI_CHANNEL_MEDIUM_ASYNC))
4597
ipmi_inc_stat(intf, sent_lan_command_errs);
4598
else
4599
ipmi_inc_stat(intf, sent_ipmb_command_errs);
4600
intf_err_seq(intf, msg->msgid, msg->rsp[2]);
4601
} else
4602
/* The message was sent, start the timer. */
4603
intf_start_seq_timer(intf, msg->msgid);
4604
requeue = 0;
4605
goto out;
4606
} else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))
4607
|| (msg->rsp[1] != msg->data[1])) {
4608
/*
4609
* The NetFN and Command in the response is not even
4610
* marginally correct.
4611
*/
4612
dev_warn_ratelimited(intf->si_dev,
4613
"BMC returned incorrect response, expected netfn %x cmd %x, got netfn %x cmd %x\n",
4614
(msg->data[0] >> 2) | 1, msg->data[1],
4615
msg->rsp[0] >> 2, msg->rsp[1]);
4616
4617
goto return_unspecified;
4618
}
4619
4620
if (msg->type == IPMI_SMI_MSG_TYPE_IPMB_DIRECT) {
4621
if ((msg->data[0] >> 2) & 1) {
4622
/* It's a response to a sent response. */
4623
chan = 0;
4624
cc = msg->rsp[4];
4625
goto process_response_response;
4626
}
4627
if (is_cmd)
4628
requeue = handle_ipmb_direct_rcv_cmd(intf, msg);
4629
else
4630
requeue = handle_ipmb_direct_rcv_rsp(intf, msg);
4631
} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
4632
&& (msg->rsp[1] == IPMI_SEND_MSG_CMD)
4633
&& (msg->recv_msg != NULL)) {
4634
/*
4635
* It's a response to a response we sent. For this we
4636
* deliver a send message response to the user.
4637
*/
4638
struct ipmi_recv_msg *recv_msg;
4639
4640
if (intf->run_to_completion)
4641
goto out;
4642
4643
chan = msg->data[2] & 0x0f;
4644
if (chan >= IPMI_MAX_CHANNELS)
4645
/* Invalid channel number */
4646
goto out;
4647
cc = msg->rsp[2];
4648
4649
process_response_response:
4650
recv_msg = msg->recv_msg;
4651
4652
requeue = 0;
4653
if (!recv_msg)
4654
goto out;
4655
4656
recv_msg->recv_type = IPMI_RESPONSE_RESPONSE_TYPE;
4657
recv_msg->msg.data = recv_msg->msg_data;
4658
recv_msg->msg_data[0] = cc;
4659
recv_msg->msg.data_len = 1;
4660
deliver_local_response(intf, recv_msg);
4661
} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
4662
&& (msg->rsp[1] == IPMI_GET_MSG_CMD)) {
4663
struct ipmi_channel *chans;
4664
4665
if (intf->run_to_completion)
4666
goto out;
4667
4668
/* It's from the receive queue. */
4669
chan = msg->rsp[3] & 0xf;
4670
if (chan >= IPMI_MAX_CHANNELS) {
4671
/* Invalid channel number */
4672
requeue = 0;
4673
goto out;
4674
}
4675
4676
/*
4677
* We need to make sure the channels have been initialized.
4678
* The channel_handler routine will set the "curr_channel"
4679
* equal to or greater than IPMI_MAX_CHANNELS when all the
4680
* channels for this interface have been initialized.
4681
*/
4682
if (!intf->channels_ready) {
4683
requeue = 0; /* Throw the message away */
4684
goto out;
4685
}
4686
4687
chans = READ_ONCE(intf->channel_list)->c;
4688
4689
switch (chans[chan].medium) {
4690
case IPMI_CHANNEL_MEDIUM_IPMB:
4691
if (msg->rsp[4] & 0x04) {
4692
/*
4693
* It's a response, so find the
4694
* requesting message and send it up.
4695
*/
4696
requeue = handle_ipmb_get_msg_rsp(intf, msg);
4697
} else {
4698
/*
4699
* It's a command to the SMS from some other
4700
* entity. Handle that.
4701
*/
4702
requeue = handle_ipmb_get_msg_cmd(intf, msg);
4703
}
4704
break;
4705
4706
case IPMI_CHANNEL_MEDIUM_8023LAN:
4707
case IPMI_CHANNEL_MEDIUM_ASYNC:
4708
if (msg->rsp[6] & 0x04) {
4709
/*
4710
* It's a response, so find the
4711
* requesting message and send it up.
4712
*/
4713
requeue = handle_lan_get_msg_rsp(intf, msg);
4714
} else {
4715
/*
4716
* It's a command to the SMS from some other
4717
* entity. Handle that.
4718
*/
4719
requeue = handle_lan_get_msg_cmd(intf, msg);
4720
}
4721
break;
4722
4723
default:
4724
/* Check for OEM Channels. Clients had better
4725
register for these commands. */
4726
if ((chans[chan].medium >= IPMI_CHANNEL_MEDIUM_OEM_MIN)
4727
&& (chans[chan].medium
4728
<= IPMI_CHANNEL_MEDIUM_OEM_MAX)) {
4729
requeue = handle_oem_get_msg_cmd(intf, msg);
4730
} else {
4731
/*
4732
* We don't handle the channel type, so just
4733
* free the message.
4734
*/
4735
requeue = 0;
4736
}
4737
}
4738
4739
} else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2))
4740
&& (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) {
4741
/* It's an asynchronous event. */
4742
if (intf->run_to_completion)
4743
goto out;
4744
4745
requeue = handle_read_event_rsp(intf, msg);
4746
} else {
4747
/* It's a response from the local BMC. */
4748
requeue = handle_bmc_rsp(intf, msg);
4749
}
4750
4751
out:
4752
return requeue;
4753
}
4754
4755
/*
4756
* If there are messages in the queue or pretimeouts, handle them.
4757
*/
4758
static void handle_new_recv_msgs(struct ipmi_smi *intf)
4759
{
4760
struct ipmi_smi_msg *smi_msg;
4761
unsigned long flags = 0;
4762
int rv;
4763
int run_to_completion = READ_ONCE(intf->run_to_completion);
4764
4765
/* See if any waiting messages need to be processed. */
4766
if (!run_to_completion)
4767
spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4768
while (!list_empty(&intf->waiting_rcv_msgs)) {
4769
smi_msg = list_entry(intf->waiting_rcv_msgs.next,
4770
struct ipmi_smi_msg, link);
4771
list_del(&smi_msg->link);
4772
if (!run_to_completion)
4773
spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
4774
flags);
4775
rv = handle_one_recv_msg(intf, smi_msg);
4776
if (!run_to_completion)
4777
spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4778
if (rv > 0) {
4779
/*
4780
* To preserve message order, quit if we
4781
* can't handle a message. Add the message
4782
* back at the head, this is safe because this
4783
* workqueue is the only thing that pulls the
4784
* messages.
4785
*/
4786
list_add(&smi_msg->link, &intf->waiting_rcv_msgs);
4787
break;
4788
} else {
4789
if (rv == 0)
4790
/* Message handled */
4791
ipmi_free_smi_msg(smi_msg);
4792
/* If rv < 0, fatal error, del but don't free. */
4793
}
4794
}
4795
if (!run_to_completion)
4796
spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags);
4797
}
4798
4799
static void smi_work(struct work_struct *t)
4800
{
4801
unsigned long flags = 0; /* keep us warning-free. */
4802
struct ipmi_smi *intf = from_work(intf, t, smi_work);
4803
int run_to_completion = READ_ONCE(intf->run_to_completion);
4804
struct ipmi_smi_msg *newmsg = NULL;
4805
struct ipmi_recv_msg *msg, *msg2;
4806
int cc;
4807
4808
/*
4809
* Start the next message if available.
4810
*
4811
* Do this here, not in the actual receiver, because we may deadlock
4812
* because the lower layer is allowed to hold locks while calling
4813
* message delivery.
4814
*/
4815
restart:
4816
if (!run_to_completion)
4817
spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
4818
if (intf->curr_msg == NULL && !intf->in_shutdown) {
4819
struct list_head *entry = NULL;
4820
4821
/* Pick the high priority queue first. */
4822
if (!list_empty(&intf->hp_xmit_msgs))
4823
entry = intf->hp_xmit_msgs.next;
4824
else if (!list_empty(&intf->xmit_msgs))
4825
entry = intf->xmit_msgs.next;
4826
4827
if (entry) {
4828
list_del(entry);
4829
newmsg = list_entry(entry, struct ipmi_smi_msg, link);
4830
intf->curr_msg = newmsg;
4831
}
4832
}
4833
if (!run_to_completion)
4834
spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
4835
4836
if (newmsg) {
4837
cc = intf->handlers->sender(intf->send_info, newmsg);
4838
if (cc) {
4839
if (newmsg->recv_msg)
4840
deliver_err_response(intf,
4841
newmsg->recv_msg, cc);
4842
else
4843
ipmi_free_smi_msg(newmsg);
4844
goto restart;
4845
}
4846
}
4847
4848
handle_new_recv_msgs(intf);
4849
4850
/* Nothing below applies during panic time. */
4851
if (run_to_completion)
4852
return;
4853
4854
/*
4855
* If the pretimout count is non-zero, decrement one from it and
4856
* deliver pretimeouts to all the users.
4857
*/
4858
if (atomic_add_unless(&intf->watchdog_pretimeouts_to_deliver, -1, 0)) {
4859
struct ipmi_user *user;
4860
4861
mutex_lock(&intf->users_mutex);
4862
list_for_each_entry(user, &intf->users, link) {
4863
if (user->handler->ipmi_watchdog_pretimeout)
4864
user->handler->ipmi_watchdog_pretimeout(
4865
user->handler_data);
4866
}
4867
mutex_unlock(&intf->users_mutex);
4868
}
4869
4870
/*
4871
* Freeing the message can cause a user to be released, which
4872
* can then cause the interface to be freed. Make sure that
4873
* doesn't happen until we are ready.
4874
*/
4875
kref_get(&intf->refcount);
4876
4877
mutex_lock(&intf->user_msgs_mutex);
4878
list_for_each_entry_safe(msg, msg2, &intf->user_msgs, link) {
4879
struct ipmi_user *user = msg->user;
4880
4881
list_del(&msg->link);
4882
4883
if (refcount_read(&user->destroyed) == 0)
4884
ipmi_free_recv_msg(msg);
4885
else
4886
user->handler->ipmi_recv_hndl(msg, user->handler_data);
4887
}
4888
mutex_unlock(&intf->user_msgs_mutex);
4889
4890
kref_put(&intf->refcount, intf_free);
4891
}
4892
4893
/* Handle a new message from the lower layer. */
4894
void ipmi_smi_msg_received(struct ipmi_smi *intf,
4895
struct ipmi_smi_msg *msg)
4896
{
4897
unsigned long flags = 0; /* keep us warning-free. */
4898
int run_to_completion = READ_ONCE(intf->run_to_completion);
4899
4900
/*
4901
* To preserve message order, we keep a queue and deliver from
4902
* a workqueue.
4903
*/
4904
if (!run_to_completion)
4905
spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
4906
list_add_tail(&msg->link, &intf->waiting_rcv_msgs);
4907
if (!run_to_completion)
4908
spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
4909
flags);
4910
4911
if (!run_to_completion)
4912
spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
4913
/*
4914
* We can get an asynchronous event or receive message in addition
4915
* to commands we send.
4916
*/
4917
if (msg == intf->curr_msg)
4918
intf->curr_msg = NULL;
4919
if (!run_to_completion)
4920
spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
4921
4922
if (run_to_completion)
4923
smi_work(&intf->smi_work);
4924
else
4925
queue_work(system_wq, &intf->smi_work);
4926
}
4927
EXPORT_SYMBOL(ipmi_smi_msg_received);
4928
4929
void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf)
4930
{
4931
if (intf->in_shutdown)
4932
return;
4933
4934
atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1);
4935
queue_work(system_wq, &intf->smi_work);
4936
}
4937
EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
4938
4939
static struct ipmi_smi_msg *
4940
smi_from_recv_msg(struct ipmi_smi *intf, struct ipmi_recv_msg *recv_msg,
4941
unsigned char seq, long seqid)
4942
{
4943
struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg();
4944
if (!smi_msg)
4945
/*
4946
* If we can't allocate the message, then just return, we
4947
* get 4 retries, so this should be ok.
4948
*/
4949
return NULL;
4950
4951
memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len);
4952
smi_msg->data_size = recv_msg->msg.data_len;
4953
smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid);
4954
4955
dev_dbg(intf->si_dev, "Resend: %*ph\n",
4956
smi_msg->data_size, smi_msg->data);
4957
4958
return smi_msg;
4959
}
4960
4961
static void check_msg_timeout(struct ipmi_smi *intf, struct seq_table *ent,
4962
struct list_head *timeouts,
4963
unsigned long timeout_period,
4964
int slot, bool *need_timer)
4965
{
4966
struct ipmi_recv_msg *msg;
4967
4968
if (intf->in_shutdown)
4969
return;
4970
4971
if (!ent->inuse)
4972
return;
4973
4974
if (timeout_period < ent->timeout) {
4975
ent->timeout -= timeout_period;
4976
*need_timer = true;
4977
return;
4978
}
4979
4980
if (ent->retries_left == 0) {
4981
/* The message has used all its retries. */
4982
ent->inuse = 0;
4983
smi_remove_watch(intf, IPMI_WATCH_MASK_CHECK_MESSAGES);
4984
msg = ent->recv_msg;
4985
list_add_tail(&msg->link, timeouts);
4986
if (ent->broadcast)
4987
ipmi_inc_stat(intf, timed_out_ipmb_broadcasts);
4988
else if (is_lan_addr(&ent->recv_msg->addr))
4989
ipmi_inc_stat(intf, timed_out_lan_commands);
4990
else
4991
ipmi_inc_stat(intf, timed_out_ipmb_commands);
4992
} else {
4993
struct ipmi_smi_msg *smi_msg;
4994
/* More retries, send again. */
4995
4996
*need_timer = true;
4997
4998
/*
4999
* Start with the max timer, set to normal timer after
5000
* the message is sent.
5001
*/
5002
ent->timeout = MAX_MSG_TIMEOUT;
5003
ent->retries_left--;
5004
smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot,
5005
ent->seqid);
5006
if (!smi_msg) {
5007
if (is_lan_addr(&ent->recv_msg->addr))
5008
ipmi_inc_stat(intf,
5009
dropped_rexmit_lan_commands);
5010
else
5011
ipmi_inc_stat(intf,
5012
dropped_rexmit_ipmb_commands);
5013
return;
5014
}
5015
5016
mutex_unlock(&intf->seq_lock);
5017
5018
/*
5019
* Send the new message. We send with a zero
5020
* priority. It timed out, I doubt time is that
5021
* critical now, and high priority messages are really
5022
* only for messages to the local MC, which don't get
5023
* resent.
5024
*/
5025
if (intf->handlers) {
5026
if (is_lan_addr(&ent->recv_msg->addr))
5027
ipmi_inc_stat(intf,
5028
retransmitted_lan_commands);
5029
else
5030
ipmi_inc_stat(intf,
5031
retransmitted_ipmb_commands);
5032
5033
smi_send(intf, intf->handlers, smi_msg, 0);
5034
} else
5035
ipmi_free_smi_msg(smi_msg);
5036
5037
mutex_lock(&intf->seq_lock);
5038
}
5039
}
5040
5041
static bool ipmi_timeout_handler(struct ipmi_smi *intf,
5042
unsigned long timeout_period)
5043
{
5044
struct list_head timeouts;
5045
struct ipmi_recv_msg *msg, *msg2;
5046
unsigned long flags;
5047
int i;
5048
bool need_timer = false;
5049
5050
if (!intf->bmc_registered) {
5051
kref_get(&intf->refcount);
5052
if (!schedule_work(&intf->bmc_reg_work)) {
5053
kref_put(&intf->refcount, intf_free);
5054
need_timer = true;
5055
}
5056
}
5057
5058
/*
5059
* Go through the seq table and find any messages that
5060
* have timed out, putting them in the timeouts
5061
* list.
5062
*/
5063
INIT_LIST_HEAD(&timeouts);
5064
mutex_lock(&intf->seq_lock);
5065
if (intf->ipmb_maintenance_mode_timeout) {
5066
if (intf->ipmb_maintenance_mode_timeout <= timeout_period)
5067
intf->ipmb_maintenance_mode_timeout = 0;
5068
else
5069
intf->ipmb_maintenance_mode_timeout -= timeout_period;
5070
}
5071
for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
5072
check_msg_timeout(intf, &intf->seq_table[i],
5073
&timeouts, timeout_period, i,
5074
&need_timer);
5075
mutex_unlock(&intf->seq_lock);
5076
5077
list_for_each_entry_safe(msg, msg2, &timeouts, link)
5078
deliver_err_response(intf, msg, IPMI_TIMEOUT_COMPLETION_CODE);
5079
5080
/*
5081
* Maintenance mode handling. Check the timeout
5082
* optimistically before we claim the lock. It may
5083
* mean a timeout gets missed occasionally, but that
5084
* only means the timeout gets extended by one period
5085
* in that case. No big deal, and it avoids the lock
5086
* most of the time.
5087
*/
5088
if (intf->auto_maintenance_timeout > 0) {
5089
spin_lock_irqsave(&intf->maintenance_mode_lock, flags);
5090
if (intf->auto_maintenance_timeout > 0) {
5091
intf->auto_maintenance_timeout
5092
-= timeout_period;
5093
if (!intf->maintenance_mode
5094
&& (intf->auto_maintenance_timeout <= 0)) {
5095
intf->maintenance_mode_state =
5096
IPMI_MAINTENANCE_MODE_STATE_OFF;
5097
intf->auto_maintenance_timeout = 0;
5098
maintenance_mode_update(intf);
5099
}
5100
}
5101
spin_unlock_irqrestore(&intf->maintenance_mode_lock,
5102
flags);
5103
}
5104
5105
queue_work(system_wq, &intf->smi_work);
5106
5107
return need_timer;
5108
}
5109
5110
static void ipmi_request_event(struct ipmi_smi *intf)
5111
{
5112
/* No event requests when in maintenance mode. */
5113
if (intf->maintenance_mode_state)
5114
return;
5115
5116
if (!intf->in_shutdown)
5117
intf->handlers->request_events(intf->send_info);
5118
}
5119
5120
static atomic_t stop_operation;
5121
5122
static void ipmi_timeout_work(struct work_struct *work)
5123
{
5124
if (atomic_read(&stop_operation))
5125
return;
5126
5127
struct ipmi_smi *intf;
5128
bool need_timer = false;
5129
5130
if (atomic_read(&stop_operation))
5131
return;
5132
5133
mutex_lock(&ipmi_interfaces_mutex);
5134
list_for_each_entry(intf, &ipmi_interfaces, link) {
5135
if (atomic_read(&intf->event_waiters)) {
5136
intf->ticks_to_req_ev--;
5137
if (intf->ticks_to_req_ev == 0) {
5138
ipmi_request_event(intf);
5139
intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
5140
}
5141
need_timer = true;
5142
}
5143
if (intf->maintenance_mode_state)
5144
need_timer = true;
5145
5146
need_timer |= ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME);
5147
}
5148
mutex_unlock(&ipmi_interfaces_mutex);
5149
5150
if (need_timer)
5151
mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
5152
}
5153
5154
static DECLARE_WORK(ipmi_timer_work, ipmi_timeout_work);
5155
5156
static void ipmi_timeout(struct timer_list *unused)
5157
{
5158
if (atomic_read(&stop_operation))
5159
return;
5160
5161
queue_work(system_wq, &ipmi_timer_work);
5162
}
5163
5164
static void need_waiter(struct ipmi_smi *intf)
5165
{
5166
/* Racy, but worst case we start the timer twice. */
5167
if (!timer_pending(&ipmi_timer))
5168
mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
5169
}
5170
5171
static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
5172
static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
5173
5174
static void free_smi_msg(struct ipmi_smi_msg *msg)
5175
{
5176
atomic_dec(&smi_msg_inuse_count);
5177
/* Try to keep as much stuff out of the panic path as possible. */
5178
if (!oops_in_progress)
5179
kfree(msg);
5180
}
5181
5182
struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
5183
{
5184
struct ipmi_smi_msg *rv;
5185
rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
5186
if (rv) {
5187
rv->done = free_smi_msg;
5188
rv->recv_msg = NULL;
5189
rv->type = IPMI_SMI_MSG_TYPE_NORMAL;
5190
atomic_inc(&smi_msg_inuse_count);
5191
}
5192
return rv;
5193
}
5194
EXPORT_SYMBOL(ipmi_alloc_smi_msg);
5195
5196
static void free_recv_msg(struct ipmi_recv_msg *msg)
5197
{
5198
atomic_dec(&recv_msg_inuse_count);
5199
/* Try to keep as much stuff out of the panic path as possible. */
5200
if (!oops_in_progress)
5201
kfree(msg);
5202
}
5203
5204
static struct ipmi_recv_msg *ipmi_alloc_recv_msg(struct ipmi_user *user)
5205
{
5206
struct ipmi_recv_msg *rv;
5207
5208
if (user) {
5209
if (atomic_add_return(1, &user->nr_msgs) > max_msgs_per_user) {
5210
atomic_dec(&user->nr_msgs);
5211
return ERR_PTR(-EBUSY);
5212
}
5213
}
5214
5215
rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
5216
if (!rv) {
5217
if (user)
5218
atomic_dec(&user->nr_msgs);
5219
return ERR_PTR(-ENOMEM);
5220
}
5221
5222
rv->user = user;
5223
rv->done = free_recv_msg;
5224
if (user)
5225
kref_get(&user->refcount);
5226
atomic_inc(&recv_msg_inuse_count);
5227
return rv;
5228
}
5229
5230
void ipmi_free_recv_msg(struct ipmi_recv_msg *msg)
5231
{
5232
if (msg->user && !oops_in_progress) {
5233
atomic_dec(&msg->user->nr_msgs);
5234
kref_put(&msg->user->refcount, free_ipmi_user);
5235
}
5236
msg->done(msg);
5237
}
5238
EXPORT_SYMBOL(ipmi_free_recv_msg);
5239
5240
static void ipmi_set_recv_msg_user(struct ipmi_recv_msg *msg,
5241
struct ipmi_user *user)
5242
{
5243
WARN_ON_ONCE(msg->user); /* User should not be set. */
5244
msg->user = user;
5245
atomic_inc(&user->nr_msgs);
5246
kref_get(&user->refcount);
5247
}
5248
5249
static atomic_t panic_done_count = ATOMIC_INIT(0);
5250
5251
static void dummy_smi_done_handler(struct ipmi_smi_msg *msg)
5252
{
5253
atomic_dec(&panic_done_count);
5254
}
5255
5256
static void dummy_recv_done_handler(struct ipmi_recv_msg *msg)
5257
{
5258
atomic_dec(&panic_done_count);
5259
}
5260
5261
/*
5262
* Inside a panic, send a message and wait for a response.
5263
*/
5264
static void _ipmi_panic_request_and_wait(struct ipmi_smi *intf,
5265
struct ipmi_addr *addr,
5266
struct kernel_ipmi_msg *msg)
5267
{
5268
struct ipmi_smi_msg smi_msg;
5269
struct ipmi_recv_msg recv_msg;
5270
int rv;
5271
5272
smi_msg.done = dummy_smi_done_handler;
5273
recv_msg.done = dummy_recv_done_handler;
5274
atomic_add(2, &panic_done_count);
5275
rv = i_ipmi_request(NULL,
5276
intf,
5277
addr,
5278
0,
5279
msg,
5280
intf,
5281
&smi_msg,
5282
&recv_msg,
5283
0,
5284
intf->addrinfo[0].address,
5285
intf->addrinfo[0].lun,
5286
0, 1); /* Don't retry, and don't wait. */
5287
if (rv)
5288
atomic_sub(2, &panic_done_count);
5289
else if (intf->handlers->flush_messages)
5290
intf->handlers->flush_messages(intf->send_info);
5291
5292
while (atomic_read(&panic_done_count) != 0)
5293
ipmi_poll(intf);
5294
}
5295
5296
void ipmi_panic_request_and_wait(struct ipmi_user *user,
5297
struct ipmi_addr *addr,
5298
struct kernel_ipmi_msg *msg)
5299
{
5300
user->intf->run_to_completion = 1;
5301
_ipmi_panic_request_and_wait(user->intf, addr, msg);
5302
}
5303
EXPORT_SYMBOL(ipmi_panic_request_and_wait);
5304
5305
static void event_receiver_fetcher(struct ipmi_smi *intf,
5306
struct ipmi_recv_msg *msg)
5307
{
5308
if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
5309
&& (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE)
5310
&& (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD)
5311
&& (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
5312
/* A get event receiver command, save it. */
5313
intf->event_receiver = msg->msg.data[1];
5314
intf->event_receiver_lun = msg->msg.data[2] & 0x3;
5315
}
5316
}
5317
5318
static void device_id_fetcher(struct ipmi_smi *intf, struct ipmi_recv_msg *msg)
5319
{
5320
if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE)
5321
&& (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE)
5322
&& (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD)
5323
&& (msg->msg.data[0] == IPMI_CC_NO_ERROR)) {
5324
/*
5325
* A get device id command, save if we are an event
5326
* receiver or generator.
5327
*/
5328
intf->local_sel_device = (msg->msg.data[6] >> 2) & 1;
5329
intf->local_event_generator = (msg->msg.data[6] >> 5) & 1;
5330
}
5331
}
5332
5333
static void send_panic_events(struct ipmi_smi *intf, char *str)
5334
{
5335
struct kernel_ipmi_msg msg;
5336
unsigned char data[16];
5337
struct ipmi_system_interface_addr *si;
5338
struct ipmi_addr addr;
5339
char *p = str;
5340
struct ipmi_ipmb_addr *ipmb;
5341
int j;
5342
5343
if (ipmi_send_panic_event == IPMI_SEND_PANIC_EVENT_NONE)
5344
return;
5345
5346
si = (struct ipmi_system_interface_addr *) &addr;
5347
si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
5348
si->channel = IPMI_BMC_CHANNEL;
5349
si->lun = 0;
5350
5351
/* Fill in an event telling that we have failed. */
5352
msg.netfn = 0x04; /* Sensor or Event. */
5353
msg.cmd = 2; /* Platform event command. */
5354
msg.data = data;
5355
msg.data_len = 8;
5356
data[0] = 0x41; /* Kernel generator ID, IPMI table 5-4 */
5357
data[1] = 0x03; /* This is for IPMI 1.0. */
5358
data[2] = 0x20; /* OS Critical Stop, IPMI table 36-3 */
5359
data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */
5360
data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */
5361
5362
/*
5363
* Put a few breadcrumbs in. Hopefully later we can add more things
5364
* to make the panic events more useful.
5365
*/
5366
if (str) {
5367
data[3] = str[0];
5368
data[6] = str[1];
5369
data[7] = str[2];
5370
}
5371
5372
/* Send the event announcing the panic. */
5373
_ipmi_panic_request_and_wait(intf, &addr, &msg);
5374
5375
/*
5376
* On every interface, dump a bunch of OEM event holding the
5377
* string.
5378
*/
5379
if (ipmi_send_panic_event != IPMI_SEND_PANIC_EVENT_STRING || !str)
5380
return;
5381
5382
/*
5383
* intf_num is used as an marker to tell if the
5384
* interface is valid. Thus we need a read barrier to
5385
* make sure data fetched before checking intf_num
5386
* won't be used.
5387
*/
5388
smp_rmb();
5389
5390
/*
5391
* First job here is to figure out where to send the
5392
* OEM events. There's no way in IPMI to send OEM
5393
* events using an event send command, so we have to
5394
* find the SEL to put them in and stick them in
5395
* there.
5396
*/
5397
5398
/* Get capabilities from the get device id. */
5399
intf->local_sel_device = 0;
5400
intf->local_event_generator = 0;
5401
intf->event_receiver = 0;
5402
5403
/* Request the device info from the local MC. */
5404
msg.netfn = IPMI_NETFN_APP_REQUEST;
5405
msg.cmd = IPMI_GET_DEVICE_ID_CMD;
5406
msg.data = NULL;
5407
msg.data_len = 0;
5408
intf->null_user_handler = device_id_fetcher;
5409
_ipmi_panic_request_and_wait(intf, &addr, &msg);
5410
5411
if (intf->local_event_generator) {
5412
/* Request the event receiver from the local MC. */
5413
msg.netfn = IPMI_NETFN_SENSOR_EVENT_REQUEST;
5414
msg.cmd = IPMI_GET_EVENT_RECEIVER_CMD;
5415
msg.data = NULL;
5416
msg.data_len = 0;
5417
intf->null_user_handler = event_receiver_fetcher;
5418
_ipmi_panic_request_and_wait(intf, &addr, &msg);
5419
}
5420
intf->null_user_handler = NULL;
5421
5422
/*
5423
* Validate the event receiver. The low bit must not
5424
* be 1 (it must be a valid IPMB address), it cannot
5425
* be zero, and it must not be my address.
5426
*/
5427
if (((intf->event_receiver & 1) == 0)
5428
&& (intf->event_receiver != 0)
5429
&& (intf->event_receiver != intf->addrinfo[0].address)) {
5430
/*
5431
* The event receiver is valid, send an IPMB
5432
* message.
5433
*/
5434
ipmb = (struct ipmi_ipmb_addr *) &addr;
5435
ipmb->addr_type = IPMI_IPMB_ADDR_TYPE;
5436
ipmb->channel = 0; /* FIXME - is this right? */
5437
ipmb->lun = intf->event_receiver_lun;
5438
ipmb->slave_addr = intf->event_receiver;
5439
} else if (intf->local_sel_device) {
5440
/*
5441
* The event receiver was not valid (or was
5442
* me), but I am an SEL device, just dump it
5443
* in my SEL.
5444
*/
5445
si = (struct ipmi_system_interface_addr *) &addr;
5446
si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
5447
si->channel = IPMI_BMC_CHANNEL;
5448
si->lun = 0;
5449
} else
5450
return; /* No where to send the event. */
5451
5452
msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */
5453
msg.cmd = IPMI_ADD_SEL_ENTRY_CMD;
5454
msg.data = data;
5455
msg.data_len = 16;
5456
5457
j = 0;
5458
while (*p) {
5459
int size = strnlen(p, 11);
5460
5461
data[0] = 0;
5462
data[1] = 0;
5463
data[2] = 0xf0; /* OEM event without timestamp. */
5464
data[3] = intf->addrinfo[0].address;
5465
data[4] = j++; /* sequence # */
5466
5467
memcpy_and_pad(data+5, 11, p, size, '\0');
5468
p += size;
5469
5470
_ipmi_panic_request_and_wait(intf, &addr, &msg);
5471
}
5472
}
5473
5474
static int has_panicked;
5475
5476
static int panic_event(struct notifier_block *this,
5477
unsigned long event,
5478
void *ptr)
5479
{
5480
struct ipmi_smi *intf;
5481
struct ipmi_user *user;
5482
5483
if (has_panicked)
5484
return NOTIFY_DONE;
5485
has_panicked = 1;
5486
5487
/* For every registered interface, set it to run to completion. */
5488
list_for_each_entry(intf, &ipmi_interfaces, link) {
5489
if (!intf->handlers || intf->intf_num == -1)
5490
/* Interface is not ready. */
5491
continue;
5492
5493
if (!intf->handlers->poll)
5494
continue;
5495
5496
/*
5497
* If we were interrupted while locking xmit_msgs_lock or
5498
* waiting_rcv_msgs_lock, the corresponding list may be
5499
* corrupted. In this case, drop items on the list for
5500
* the safety.
5501
*/
5502
if (!spin_trylock(&intf->xmit_msgs_lock)) {
5503
INIT_LIST_HEAD(&intf->xmit_msgs);
5504
INIT_LIST_HEAD(&intf->hp_xmit_msgs);
5505
} else
5506
spin_unlock(&intf->xmit_msgs_lock);
5507
5508
if (!spin_trylock(&intf->waiting_rcv_msgs_lock))
5509
INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
5510
else
5511
spin_unlock(&intf->waiting_rcv_msgs_lock);
5512
5513
intf->run_to_completion = 1;
5514
if (intf->handlers->set_run_to_completion)
5515
intf->handlers->set_run_to_completion(intf->send_info,
5516
1);
5517
5518
list_for_each_entry(user, &intf->users, link) {
5519
if (user->handler->ipmi_panic_handler)
5520
user->handler->ipmi_panic_handler(
5521
user->handler_data);
5522
}
5523
5524
send_panic_events(intf, ptr);
5525
}
5526
5527
return NOTIFY_DONE;
5528
}
5529
5530
/* Must be called with ipmi_interfaces_mutex held. */
5531
static int ipmi_register_driver(void)
5532
{
5533
int rv;
5534
5535
if (drvregistered)
5536
return 0;
5537
5538
rv = driver_register(&ipmidriver.driver);
5539
if (rv)
5540
pr_err("Could not register IPMI driver\n");
5541
else
5542
drvregistered = true;
5543
return rv;
5544
}
5545
5546
static struct notifier_block panic_block = {
5547
.notifier_call = panic_event,
5548
.next = NULL,
5549
.priority = 200 /* priority: INT_MAX >= x >= 0 */
5550
};
5551
5552
static int ipmi_init_msghandler(void)
5553
{
5554
int rv;
5555
5556
mutex_lock(&ipmi_interfaces_mutex);
5557
rv = ipmi_register_driver();
5558
if (rv)
5559
goto out;
5560
if (initialized)
5561
goto out;
5562
5563
bmc_remove_work_wq = create_singlethread_workqueue("ipmi-msghandler-remove-wq");
5564
if (!bmc_remove_work_wq) {
5565
pr_err("unable to create ipmi-msghandler-remove-wq workqueue");
5566
rv = -ENOMEM;
5567
goto out;
5568
}
5569
5570
timer_setup(&ipmi_timer, ipmi_timeout, 0);
5571
mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
5572
5573
atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
5574
5575
initialized = true;
5576
5577
out:
5578
mutex_unlock(&ipmi_interfaces_mutex);
5579
return rv;
5580
}
5581
5582
static int __init ipmi_init_msghandler_mod(void)
5583
{
5584
int rv;
5585
5586
pr_info("version " IPMI_DRIVER_VERSION "\n");
5587
5588
mutex_lock(&ipmi_interfaces_mutex);
5589
rv = ipmi_register_driver();
5590
mutex_unlock(&ipmi_interfaces_mutex);
5591
5592
return rv;
5593
}
5594
5595
static void __exit cleanup_ipmi(void)
5596
{
5597
int count;
5598
5599
if (initialized) {
5600
destroy_workqueue(bmc_remove_work_wq);
5601
5602
atomic_notifier_chain_unregister(&panic_notifier_list,
5603
&panic_block);
5604
5605
/*
5606
* This can't be called if any interfaces exist, so no worry
5607
* about shutting down the interfaces.
5608
*/
5609
5610
/*
5611
* Tell the timer to stop, then wait for it to stop. This
5612
* avoids problems with race conditions removing the timer
5613
* here.
5614
*/
5615
atomic_set(&stop_operation, 1);
5616
timer_delete_sync(&ipmi_timer);
5617
cancel_work_sync(&ipmi_timer_work);
5618
5619
initialized = false;
5620
5621
/* Check for buffer leaks. */
5622
count = atomic_read(&smi_msg_inuse_count);
5623
if (count != 0)
5624
pr_warn("SMI message count %d at exit\n", count);
5625
count = atomic_read(&recv_msg_inuse_count);
5626
if (count != 0)
5627
pr_warn("recv message count %d at exit\n", count);
5628
}
5629
if (drvregistered)
5630
driver_unregister(&ipmidriver.driver);
5631
}
5632
module_exit(cleanup_ipmi);
5633
5634
module_init(ipmi_init_msghandler_mod);
5635
MODULE_LICENSE("GPL");
5636
MODULE_AUTHOR("Corey Minyard <[email protected]>");
5637
MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI interface.");
5638
MODULE_VERSION(IPMI_DRIVER_VERSION);
5639
MODULE_SOFTDEP("post: ipmi_devintf");
5640
5641