Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/char/ipmi/ipmi_si_intf.c
29269 views
1
// SPDX-License-Identifier: GPL-2.0+
2
/*
3
* ipmi_si.c
4
*
5
* The interface to the IPMI driver for the system interfaces (KCS, SMIC,
6
* BT).
7
*
8
* Author: MontaVista Software, Inc.
9
* Corey Minyard <[email protected]>
10
* [email protected]
11
*
12
* Copyright 2002 MontaVista Software Inc.
13
* Copyright 2006 IBM Corp., Christian Krafft <[email protected]>
14
*/
15
16
/*
17
* This file holds the "policy" for the interface to the SMI state
18
* machine. It does the configuration, handles timers and interrupts,
19
* and drives the real SMI state machine.
20
*/
21
22
#define pr_fmt(fmt) "ipmi_si: " fmt
23
24
#include <linux/module.h>
25
#include <linux/moduleparam.h>
26
#include <linux/sched.h>
27
#include <linux/seq_file.h>
28
#include <linux/timer.h>
29
#include <linux/errno.h>
30
#include <linux/spinlock.h>
31
#include <linux/slab.h>
32
#include <linux/delay.h>
33
#include <linux/list.h>
34
#include <linux/notifier.h>
35
#include <linux/mutex.h>
36
#include <linux/kthread.h>
37
#include <asm/irq.h>
38
#include <linux/interrupt.h>
39
#include <linux/rcupdate.h>
40
#include <linux/ipmi.h>
41
#include <linux/ipmi_smi.h>
42
#include "ipmi_si.h"
43
#include "ipmi_si_sm.h"
44
#include <linux/string.h>
45
#include <linux/ctype.h>
46
47
/* Measure times between events in the driver. */
48
#undef DEBUG_TIMING
49
50
/* Call every 10 ms. */
51
#define SI_TIMEOUT_TIME_USEC 10000
52
#define SI_USEC_PER_JIFFY (1000000/HZ)
53
#define SI_TIMEOUT_JIFFIES (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
54
#define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a
55
short timeout */
56
#define SI_TIMEOUT_HOSED (HZ) /* 1 second when in hosed state. */
57
58
enum si_intf_state {
59
SI_NORMAL,
60
SI_GETTING_FLAGS,
61
SI_GETTING_EVENTS,
62
SI_CLEARING_FLAGS,
63
SI_GETTING_MESSAGES,
64
SI_CHECKING_ENABLES,
65
SI_SETTING_ENABLES,
66
SI_HOSED
67
/* FIXME - add watchdog stuff. */
68
};
69
70
/* Some BT-specific defines we need here. */
71
#define IPMI_BT_INTMASK_REG 2
72
#define IPMI_BT_INTMASK_CLEAR_IRQ_BIT 2
73
#define IPMI_BT_INTMASK_ENABLE_IRQ_BIT 1
74
75
/* 'invalid' to allow a firmware-specified interface to be disabled */
76
const char *const si_to_str[] = { "invalid", "kcs", "smic", "bt", NULL };
77
78
const struct ipmi_match_info ipmi_kcs_si_info = { .type = SI_KCS };
79
const struct ipmi_match_info ipmi_smic_si_info = { .type = SI_SMIC };
80
const struct ipmi_match_info ipmi_bt_si_info = { .type = SI_BT };
81
82
static bool initialized;
83
84
/*
85
* Indexes into stats[] in smi_info below.
86
*/
87
enum si_stat_indexes {
88
/*
89
* Number of times the driver requested a timer while an operation
90
* was in progress.
91
*/
92
SI_STAT_short_timeouts = 0,
93
94
/*
95
* Number of times the driver requested a timer while nothing was in
96
* progress.
97
*/
98
SI_STAT_long_timeouts,
99
100
/* Number of times the interface was idle while being polled. */
101
SI_STAT_idles,
102
103
/* Number of interrupts the driver handled. */
104
SI_STAT_interrupts,
105
106
/* Number of time the driver got an ATTN from the hardware. */
107
SI_STAT_attentions,
108
109
/* Number of times the driver requested flags from the hardware. */
110
SI_STAT_flag_fetches,
111
112
/* Number of times the hardware didn't follow the state machine. */
113
SI_STAT_hosed_count,
114
115
/* Number of completed messages. */
116
SI_STAT_complete_transactions,
117
118
/* Number of IPMI events received from the hardware. */
119
SI_STAT_events,
120
121
/* Number of watchdog pretimeouts. */
122
SI_STAT_watchdog_pretimeouts,
123
124
/* Number of asynchronous messages received. */
125
SI_STAT_incoming_messages,
126
127
128
/* This *must* remain last, add new values above this. */
129
SI_NUM_STATS
130
};
131
132
struct smi_info {
133
int si_num;
134
struct ipmi_smi *intf;
135
struct si_sm_data *si_sm;
136
const struct si_sm_handlers *handlers;
137
spinlock_t si_lock;
138
struct ipmi_smi_msg *waiting_msg;
139
struct ipmi_smi_msg *curr_msg;
140
enum si_intf_state si_state;
141
142
/*
143
* Used to handle the various types of I/O that can occur with
144
* IPMI
145
*/
146
struct si_sm_io io;
147
148
/*
149
* Per-OEM handler, called from handle_flags(). Returns 1
150
* when handle_flags() needs to be re-run or 0 indicating it
151
* set si_state itself.
152
*/
153
int (*oem_data_avail_handler)(struct smi_info *smi_info);
154
155
/*
156
* Flags from the last GET_MSG_FLAGS command, used when an ATTN
157
* is set to hold the flags until we are done handling everything
158
* from the flags.
159
*/
160
#define RECEIVE_MSG_AVAIL 0x01
161
#define EVENT_MSG_BUFFER_FULL 0x02
162
#define WDT_PRE_TIMEOUT_INT 0x08
163
#define OEM0_DATA_AVAIL 0x20
164
#define OEM1_DATA_AVAIL 0x40
165
#define OEM2_DATA_AVAIL 0x80
166
#define OEM_DATA_AVAIL (OEM0_DATA_AVAIL | \
167
OEM1_DATA_AVAIL | \
168
OEM2_DATA_AVAIL)
169
unsigned char msg_flags;
170
171
/* Does the BMC have an event buffer? */
172
bool has_event_buffer;
173
174
/*
175
* If set to true, this will request events the next time the
176
* state machine is idle.
177
*/
178
atomic_t req_events;
179
180
/*
181
* If true, run the state machine to completion on every send
182
* call. Generally used after a panic to make sure stuff goes
183
* out.
184
*/
185
bool run_to_completion;
186
187
/* The timer for this si. */
188
struct timer_list si_timer;
189
190
/* This flag is set, if the timer can be set */
191
bool timer_can_start;
192
193
/* This flag is set, if the timer is running (timer_pending() isn't enough) */
194
bool timer_running;
195
196
/* The time (in jiffies) the last timeout occurred at. */
197
unsigned long last_timeout_jiffies;
198
199
/* Are we waiting for the events, pretimeouts, received msgs? */
200
atomic_t need_watch;
201
202
/*
203
* The driver will disable interrupts when it gets into a
204
* situation where it cannot handle messages due to lack of
205
* memory. Once that situation clears up, it will re-enable
206
* interrupts.
207
*/
208
bool interrupt_disabled;
209
210
/*
211
* Does the BMC support events?
212
*/
213
bool supports_event_msg_buff;
214
215
/*
216
* Can we disable interrupts the global enables receive irq
217
* bit? There are currently two forms of brokenness, some
218
* systems cannot disable the bit (which is technically within
219
* the spec but a bad idea) and some systems have the bit
220
* forced to zero even though interrupts work (which is
221
* clearly outside the spec). The next bool tells which form
222
* of brokenness is present.
223
*/
224
bool cannot_disable_irq;
225
226
/*
227
* Some systems are broken and cannot set the irq enable
228
* bit, even if they support interrupts.
229
*/
230
bool irq_enable_broken;
231
232
/* Is the driver in maintenance mode? */
233
bool in_maintenance_mode;
234
235
/*
236
* Did we get an attention that we did not handle?
237
*/
238
bool got_attn;
239
240
/* From the get device id response... */
241
struct ipmi_device_id device_id;
242
243
/* Have we added the device group to the device? */
244
bool dev_group_added;
245
246
/* Counters and things for the proc filesystem. */
247
atomic_t stats[SI_NUM_STATS];
248
249
struct task_struct *thread;
250
251
struct list_head link;
252
};
253
254
#define smi_inc_stat(smi, stat) \
255
atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
256
#define smi_get_stat(smi, stat) \
257
((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
258
259
#define IPMI_MAX_INTFS 4
260
static int force_kipmid[IPMI_MAX_INTFS];
261
static int num_force_kipmid;
262
263
static unsigned int kipmid_max_busy_us[IPMI_MAX_INTFS];
264
static int num_max_busy_us;
265
266
static bool unload_when_empty = true;
267
268
static int try_smi_init(struct smi_info *smi);
269
static void cleanup_one_si(struct smi_info *smi_info);
270
static void cleanup_ipmi_si(void);
271
272
#ifdef DEBUG_TIMING
273
void debug_timestamp(struct smi_info *smi_info, char *msg)
274
{
275
struct timespec64 t;
276
277
ktime_get_ts64(&t);
278
dev_dbg(smi_info->io.dev, "**%s: %lld.%9.9ld\n",
279
msg, t.tv_sec, t.tv_nsec);
280
}
281
#else
282
#define debug_timestamp(smi_info, x)
283
#endif
284
285
static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
286
static int register_xaction_notifier(struct notifier_block *nb)
287
{
288
return atomic_notifier_chain_register(&xaction_notifier_list, nb);
289
}
290
291
static void deliver_recv_msg(struct smi_info *smi_info,
292
struct ipmi_smi_msg *msg)
293
{
294
/* Deliver the message to the upper layer. */
295
ipmi_smi_msg_received(smi_info->intf, msg);
296
}
297
298
static void return_hosed_msg(struct smi_info *smi_info, int cCode)
299
{
300
struct ipmi_smi_msg *msg = smi_info->curr_msg;
301
302
if (cCode < 0 || cCode > IPMI_ERR_UNSPECIFIED)
303
cCode = IPMI_ERR_UNSPECIFIED;
304
/* else use it as is */
305
306
/* Make it a response */
307
msg->rsp[0] = msg->data[0] | 4;
308
msg->rsp[1] = msg->data[1];
309
msg->rsp[2] = cCode;
310
msg->rsp_size = 3;
311
312
smi_info->curr_msg = NULL;
313
deliver_recv_msg(smi_info, msg);
314
}
315
316
static enum si_sm_result start_next_msg(struct smi_info *smi_info)
317
{
318
int rv;
319
320
if (!smi_info->waiting_msg) {
321
smi_info->curr_msg = NULL;
322
rv = SI_SM_IDLE;
323
} else {
324
int err;
325
326
smi_info->curr_msg = smi_info->waiting_msg;
327
smi_info->waiting_msg = NULL;
328
debug_timestamp(smi_info, "Start2");
329
err = atomic_notifier_call_chain(&xaction_notifier_list,
330
0, smi_info);
331
if (err & NOTIFY_STOP_MASK) {
332
rv = SI_SM_CALL_WITHOUT_DELAY;
333
goto out;
334
}
335
err = smi_info->handlers->start_transaction(
336
smi_info->si_sm,
337
smi_info->curr_msg->data,
338
smi_info->curr_msg->data_size);
339
if (err)
340
return_hosed_msg(smi_info, err);
341
342
rv = SI_SM_CALL_WITHOUT_DELAY;
343
}
344
out:
345
return rv;
346
}
347
348
static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
349
{
350
if (!smi_info->timer_can_start)
351
return;
352
smi_info->last_timeout_jiffies = jiffies;
353
mod_timer(&smi_info->si_timer, new_val);
354
smi_info->timer_running = true;
355
}
356
357
/*
358
* Start a new message and (re)start the timer and thread.
359
*/
360
static void start_new_msg(struct smi_info *smi_info, unsigned char *msg,
361
unsigned int size)
362
{
363
smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
364
365
if (smi_info->thread)
366
wake_up_process(smi_info->thread);
367
368
smi_info->handlers->start_transaction(smi_info->si_sm, msg, size);
369
}
370
371
static void start_check_enables(struct smi_info *smi_info)
372
{
373
unsigned char msg[2];
374
375
msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
376
msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
377
378
start_new_msg(smi_info, msg, 2);
379
smi_info->si_state = SI_CHECKING_ENABLES;
380
}
381
382
static void start_clear_flags(struct smi_info *smi_info)
383
{
384
unsigned char msg[3];
385
386
/* Make sure the watchdog pre-timeout flag is not set at startup. */
387
msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
388
msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
389
msg[2] = WDT_PRE_TIMEOUT_INT;
390
391
start_new_msg(smi_info, msg, 3);
392
smi_info->si_state = SI_CLEARING_FLAGS;
393
}
394
395
static void start_get_flags(struct smi_info *smi_info)
396
{
397
unsigned char msg[2];
398
399
msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
400
msg[1] = IPMI_GET_MSG_FLAGS_CMD;
401
402
start_new_msg(smi_info, msg, 2);
403
smi_info->si_state = SI_GETTING_FLAGS;
404
}
405
406
static void start_getting_msg_queue(struct smi_info *smi_info)
407
{
408
smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
409
smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
410
smi_info->curr_msg->data_size = 2;
411
412
start_new_msg(smi_info, smi_info->curr_msg->data,
413
smi_info->curr_msg->data_size);
414
smi_info->si_state = SI_GETTING_MESSAGES;
415
}
416
417
static void start_getting_events(struct smi_info *smi_info)
418
{
419
smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
420
smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
421
smi_info->curr_msg->data_size = 2;
422
423
start_new_msg(smi_info, smi_info->curr_msg->data,
424
smi_info->curr_msg->data_size);
425
smi_info->si_state = SI_GETTING_EVENTS;
426
}
427
428
/*
429
* When we have a situtaion where we run out of memory and cannot
430
* allocate messages, we just leave them in the BMC and run the system
431
* polled until we can allocate some memory. Once we have some
432
* memory, we will re-enable the interrupt.
433
*
434
* Note that we cannot just use disable_irq(), since the interrupt may
435
* be shared.
436
*/
437
static inline bool disable_si_irq(struct smi_info *smi_info)
438
{
439
if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) {
440
smi_info->interrupt_disabled = true;
441
start_check_enables(smi_info);
442
return true;
443
}
444
return false;
445
}
446
447
static inline bool enable_si_irq(struct smi_info *smi_info)
448
{
449
if ((smi_info->io.irq) && (smi_info->interrupt_disabled)) {
450
smi_info->interrupt_disabled = false;
451
start_check_enables(smi_info);
452
return true;
453
}
454
return false;
455
}
456
457
/*
458
* Allocate a message. If unable to allocate, start the interrupt
459
* disable process and return NULL. If able to allocate but
460
* interrupts are disabled, free the message and return NULL after
461
* starting the interrupt enable process.
462
*/
463
static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info)
464
{
465
struct ipmi_smi_msg *msg;
466
467
msg = ipmi_alloc_smi_msg();
468
if (!msg) {
469
if (!disable_si_irq(smi_info))
470
smi_info->si_state = SI_NORMAL;
471
} else if (enable_si_irq(smi_info)) {
472
ipmi_free_smi_msg(msg);
473
msg = NULL;
474
}
475
return msg;
476
}
477
478
static void handle_flags(struct smi_info *smi_info)
479
{
480
retry:
481
if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
482
/* Watchdog pre-timeout */
483
smi_inc_stat(smi_info, watchdog_pretimeouts);
484
485
start_clear_flags(smi_info);
486
smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
487
ipmi_smi_watchdog_pretimeout(smi_info->intf);
488
} else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
489
/* Messages available. */
490
smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
491
if (!smi_info->curr_msg)
492
return;
493
494
start_getting_msg_queue(smi_info);
495
} else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
496
/* Events available. */
497
smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
498
if (!smi_info->curr_msg)
499
return;
500
501
start_getting_events(smi_info);
502
} else if (smi_info->msg_flags & OEM_DATA_AVAIL &&
503
smi_info->oem_data_avail_handler) {
504
if (smi_info->oem_data_avail_handler(smi_info))
505
goto retry;
506
} else
507
smi_info->si_state = SI_NORMAL;
508
}
509
510
/*
511
* Global enables we care about.
512
*/
513
#define GLOBAL_ENABLES_MASK (IPMI_BMC_EVT_MSG_BUFF | IPMI_BMC_RCV_MSG_INTR | \
514
IPMI_BMC_EVT_MSG_INTR)
515
516
static u8 current_global_enables(struct smi_info *smi_info, u8 base,
517
bool *irq_on)
518
{
519
u8 enables = 0;
520
521
if (smi_info->supports_event_msg_buff)
522
enables |= IPMI_BMC_EVT_MSG_BUFF;
523
524
if (((smi_info->io.irq && !smi_info->interrupt_disabled) ||
525
smi_info->cannot_disable_irq) &&
526
!smi_info->irq_enable_broken)
527
enables |= IPMI_BMC_RCV_MSG_INTR;
528
529
if (smi_info->supports_event_msg_buff &&
530
smi_info->io.irq && !smi_info->interrupt_disabled &&
531
!smi_info->irq_enable_broken)
532
enables |= IPMI_BMC_EVT_MSG_INTR;
533
534
*irq_on = enables & (IPMI_BMC_EVT_MSG_INTR | IPMI_BMC_RCV_MSG_INTR);
535
536
return enables;
537
}
538
539
static void check_bt_irq(struct smi_info *smi_info, bool irq_on)
540
{
541
u8 irqstate = smi_info->io.inputb(&smi_info->io, IPMI_BT_INTMASK_REG);
542
543
irqstate &= IPMI_BT_INTMASK_ENABLE_IRQ_BIT;
544
545
if ((bool)irqstate == irq_on)
546
return;
547
548
if (irq_on)
549
smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
550
IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
551
else
552
smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG, 0);
553
}
554
555
static void handle_transaction_done(struct smi_info *smi_info)
556
{
557
struct ipmi_smi_msg *msg;
558
559
debug_timestamp(smi_info, "Done");
560
switch (smi_info->si_state) {
561
case SI_NORMAL:
562
if (!smi_info->curr_msg)
563
break;
564
565
smi_info->curr_msg->rsp_size
566
= smi_info->handlers->get_result(
567
smi_info->si_sm,
568
smi_info->curr_msg->rsp,
569
IPMI_MAX_MSG_LENGTH);
570
571
/*
572
* Do this here becase deliver_recv_msg() releases the
573
* lock, and a new message can be put in during the
574
* time the lock is released.
575
*/
576
msg = smi_info->curr_msg;
577
smi_info->curr_msg = NULL;
578
deliver_recv_msg(smi_info, msg);
579
break;
580
581
case SI_GETTING_FLAGS:
582
{
583
unsigned char msg[4];
584
unsigned int len;
585
586
/* We got the flags from the SMI, now handle them. */
587
len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
588
if (msg[2] != 0) {
589
/* Error fetching flags, just give up for now. */
590
smi_info->si_state = SI_NORMAL;
591
} else if (len < 4) {
592
/*
593
* Hmm, no flags. That's technically illegal, but
594
* don't use uninitialized data.
595
*/
596
smi_info->si_state = SI_NORMAL;
597
} else {
598
smi_info->msg_flags = msg[3];
599
handle_flags(smi_info);
600
}
601
break;
602
}
603
604
case SI_CLEARING_FLAGS:
605
{
606
unsigned char msg[3];
607
608
/* We cleared the flags. */
609
smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
610
if (msg[2] != 0) {
611
/* Error clearing flags */
612
dev_warn_ratelimited(smi_info->io.dev,
613
"Error clearing flags: %2.2x\n", msg[2]);
614
}
615
smi_info->si_state = SI_NORMAL;
616
break;
617
}
618
619
case SI_GETTING_EVENTS:
620
{
621
smi_info->curr_msg->rsp_size
622
= smi_info->handlers->get_result(
623
smi_info->si_sm,
624
smi_info->curr_msg->rsp,
625
IPMI_MAX_MSG_LENGTH);
626
627
/*
628
* Do this here becase deliver_recv_msg() releases the
629
* lock, and a new message can be put in during the
630
* time the lock is released.
631
*/
632
msg = smi_info->curr_msg;
633
smi_info->curr_msg = NULL;
634
if (msg->rsp[2] != 0) {
635
/* Error getting event, probably done. */
636
msg->done(msg);
637
638
/* Take off the event flag. */
639
smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
640
handle_flags(smi_info);
641
} else {
642
smi_inc_stat(smi_info, events);
643
644
/*
645
* Do this before we deliver the message
646
* because delivering the message releases the
647
* lock and something else can mess with the
648
* state.
649
*/
650
handle_flags(smi_info);
651
652
deliver_recv_msg(smi_info, msg);
653
}
654
break;
655
}
656
657
case SI_GETTING_MESSAGES:
658
{
659
smi_info->curr_msg->rsp_size
660
= smi_info->handlers->get_result(
661
smi_info->si_sm,
662
smi_info->curr_msg->rsp,
663
IPMI_MAX_MSG_LENGTH);
664
665
/*
666
* Do this here becase deliver_recv_msg() releases the
667
* lock, and a new message can be put in during the
668
* time the lock is released.
669
*/
670
msg = smi_info->curr_msg;
671
smi_info->curr_msg = NULL;
672
if (msg->rsp[2] != 0) {
673
/* Error getting event, probably done. */
674
msg->done(msg);
675
676
/* Take off the msg flag. */
677
smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
678
handle_flags(smi_info);
679
} else {
680
smi_inc_stat(smi_info, incoming_messages);
681
682
/*
683
* Do this before we deliver the message
684
* because delivering the message releases the
685
* lock and something else can mess with the
686
* state.
687
*/
688
handle_flags(smi_info);
689
690
deliver_recv_msg(smi_info, msg);
691
}
692
break;
693
}
694
695
case SI_CHECKING_ENABLES:
696
{
697
unsigned char msg[4];
698
u8 enables;
699
bool irq_on;
700
701
/* We got the flags from the SMI, now handle them. */
702
smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
703
if (msg[2] != 0) {
704
dev_warn_ratelimited(smi_info->io.dev,
705
"Couldn't get irq info: %x,\n"
706
"Maybe ok, but ipmi might run very slowly.\n",
707
msg[2]);
708
smi_info->si_state = SI_NORMAL;
709
break;
710
}
711
enables = current_global_enables(smi_info, 0, &irq_on);
712
if (smi_info->io.si_info->type == SI_BT)
713
/* BT has its own interrupt enable bit. */
714
check_bt_irq(smi_info, irq_on);
715
if (enables != (msg[3] & GLOBAL_ENABLES_MASK)) {
716
/* Enables are not correct, fix them. */
717
msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
718
msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
719
msg[2] = enables | (msg[3] & ~GLOBAL_ENABLES_MASK);
720
smi_info->handlers->start_transaction(
721
smi_info->si_sm, msg, 3);
722
smi_info->si_state = SI_SETTING_ENABLES;
723
} else if (smi_info->supports_event_msg_buff) {
724
smi_info->curr_msg = ipmi_alloc_smi_msg();
725
if (!smi_info->curr_msg) {
726
smi_info->si_state = SI_NORMAL;
727
break;
728
}
729
start_getting_events(smi_info);
730
} else {
731
smi_info->si_state = SI_NORMAL;
732
}
733
break;
734
}
735
736
case SI_SETTING_ENABLES:
737
{
738
unsigned char msg[4];
739
740
smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
741
if (msg[2] != 0)
742
dev_warn_ratelimited(smi_info->io.dev,
743
"Could not set the global enables: 0x%x.\n",
744
msg[2]);
745
746
if (smi_info->supports_event_msg_buff) {
747
smi_info->curr_msg = ipmi_alloc_smi_msg();
748
if (!smi_info->curr_msg) {
749
smi_info->si_state = SI_NORMAL;
750
break;
751
}
752
start_getting_events(smi_info);
753
} else {
754
smi_info->si_state = SI_NORMAL;
755
}
756
break;
757
}
758
case SI_HOSED: /* Shouldn't happen. */
759
break;
760
}
761
}
762
763
/*
764
* Called on timeouts and events. Timeouts should pass the elapsed
765
* time, interrupts should pass in zero. Must be called with
766
* si_lock held and interrupts disabled.
767
*/
768
static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
769
int time)
770
{
771
enum si_sm_result si_sm_result;
772
773
restart:
774
if (smi_info->si_state == SI_HOSED)
775
/* Just in case, hosed state is only left from the timeout. */
776
return SI_SM_HOSED;
777
778
/*
779
* There used to be a loop here that waited a little while
780
* (around 25us) before giving up. That turned out to be
781
* pointless, the minimum delays I was seeing were in the 300us
782
* range, which is far too long to wait in an interrupt. So
783
* we just run until the state machine tells us something
784
* happened or it needs a delay.
785
*/
786
si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
787
time = 0;
788
while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
789
si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
790
791
if (si_sm_result == SI_SM_TRANSACTION_COMPLETE) {
792
smi_inc_stat(smi_info, complete_transactions);
793
794
handle_transaction_done(smi_info);
795
goto restart;
796
} else if (si_sm_result == SI_SM_HOSED) {
797
smi_inc_stat(smi_info, hosed_count);
798
799
/*
800
* Do the before return_hosed_msg, because that
801
* releases the lock. We just disable operations for
802
* a while and retry in hosed state.
803
*/
804
smi_info->si_state = SI_HOSED;
805
if (smi_info->curr_msg != NULL) {
806
/*
807
* If we were handling a user message, format
808
* a response to send to the upper layer to
809
* tell it about the error.
810
*/
811
return_hosed_msg(smi_info, IPMI_BUS_ERR);
812
}
813
smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_HOSED);
814
goto out;
815
}
816
817
/*
818
* We prefer handling attn over new messages. But don't do
819
* this if there is not yet an upper layer to handle anything.
820
*/
821
if (si_sm_result == SI_SM_ATTN || smi_info->got_attn) {
822
if (smi_info->si_state != SI_NORMAL) {
823
/*
824
* We got an ATTN, but we are doing something else.
825
* Handle the ATTN later.
826
*/
827
smi_info->got_attn = true;
828
} else {
829
smi_info->got_attn = false;
830
smi_inc_stat(smi_info, attentions);
831
832
/*
833
* Got a attn, send down a get message flags to see
834
* what's causing it. It would be better to handle
835
* this in the upper layer, but due to the way
836
* interrupts work with the SMI, that's not really
837
* possible.
838
*/
839
start_get_flags(smi_info);
840
goto restart;
841
}
842
}
843
844
/* If we are currently idle, try to start the next message. */
845
if (si_sm_result == SI_SM_IDLE) {
846
smi_inc_stat(smi_info, idles);
847
848
si_sm_result = start_next_msg(smi_info);
849
if (si_sm_result != SI_SM_IDLE)
850
goto restart;
851
}
852
853
if ((si_sm_result == SI_SM_IDLE)
854
&& (atomic_read(&smi_info->req_events))) {
855
/*
856
* We are idle and the upper layer requested that I fetch
857
* events, so do so.
858
*/
859
atomic_set(&smi_info->req_events, 0);
860
861
/*
862
* Take this opportunity to check the interrupt and
863
* message enable state for the BMC. The BMC can be
864
* asynchronously reset, and may thus get interrupts
865
* disable and messages disabled.
866
*/
867
if (smi_info->supports_event_msg_buff || smi_info->io.irq) {
868
start_check_enables(smi_info);
869
} else {
870
smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
871
if (!smi_info->curr_msg)
872
goto out;
873
874
start_getting_events(smi_info);
875
}
876
goto restart;
877
}
878
879
if (si_sm_result == SI_SM_IDLE && smi_info->timer_running) {
880
/* Ok it if fails, the timer will just go off. */
881
if (timer_delete(&smi_info->si_timer))
882
smi_info->timer_running = false;
883
}
884
885
out:
886
return si_sm_result;
887
}
888
889
static void check_start_timer_thread(struct smi_info *smi_info)
890
{
891
if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) {
892
smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
893
894
if (smi_info->thread)
895
wake_up_process(smi_info->thread);
896
897
start_next_msg(smi_info);
898
smi_event_handler(smi_info, 0);
899
}
900
}
901
902
static void flush_messages(void *send_info)
903
{
904
struct smi_info *smi_info = send_info;
905
enum si_sm_result result;
906
907
/*
908
* Currently, this function is called only in run-to-completion
909
* mode. This means we are single-threaded, no need for locks.
910
*/
911
result = smi_event_handler(smi_info, 0);
912
while (result != SI_SM_IDLE && result != SI_SM_HOSED) {
913
udelay(SI_SHORT_TIMEOUT_USEC);
914
result = smi_event_handler(smi_info, SI_SHORT_TIMEOUT_USEC);
915
}
916
}
917
918
static int sender(void *send_info, struct ipmi_smi_msg *msg)
919
{
920
struct smi_info *smi_info = send_info;
921
unsigned long flags;
922
923
debug_timestamp(smi_info, "Enqueue");
924
925
if (smi_info->si_state == SI_HOSED)
926
return IPMI_BUS_ERR;
927
928
if (smi_info->run_to_completion) {
929
/*
930
* If we are running to completion, start it. Upper
931
* layer will call flush_messages to clear it out.
932
*/
933
smi_info->waiting_msg = msg;
934
return IPMI_CC_NO_ERROR;
935
}
936
937
spin_lock_irqsave(&smi_info->si_lock, flags);
938
/*
939
* The following two lines don't need to be under the lock for
940
* the lock's sake, but they do need SMP memory barriers to
941
* avoid getting things out of order. We are already claiming
942
* the lock, anyway, so just do it under the lock to avoid the
943
* ordering problem.
944
*/
945
BUG_ON(smi_info->waiting_msg);
946
smi_info->waiting_msg = msg;
947
check_start_timer_thread(smi_info);
948
spin_unlock_irqrestore(&smi_info->si_lock, flags);
949
return IPMI_CC_NO_ERROR;
950
}
951
952
static void set_run_to_completion(void *send_info, bool i_run_to_completion)
953
{
954
struct smi_info *smi_info = send_info;
955
956
smi_info->run_to_completion = i_run_to_completion;
957
if (i_run_to_completion)
958
flush_messages(smi_info);
959
}
960
961
/*
962
* Use -1 as a special constant to tell that we are spinning in kipmid
963
* looking for something and not delaying between checks
964
*/
965
#define IPMI_TIME_NOT_BUSY ns_to_ktime(-1ull)
966
static inline bool ipmi_thread_busy_wait(enum si_sm_result smi_result,
967
const struct smi_info *smi_info,
968
ktime_t *busy_until)
969
{
970
unsigned int max_busy_us = 0;
971
972
if (smi_info->si_num < num_max_busy_us)
973
max_busy_us = kipmid_max_busy_us[smi_info->si_num];
974
if (max_busy_us == 0 || smi_result != SI_SM_CALL_WITH_DELAY)
975
*busy_until = IPMI_TIME_NOT_BUSY;
976
else if (*busy_until == IPMI_TIME_NOT_BUSY) {
977
*busy_until = ktime_get() + max_busy_us * NSEC_PER_USEC;
978
} else {
979
if (unlikely(ktime_get() > *busy_until)) {
980
*busy_until = IPMI_TIME_NOT_BUSY;
981
return false;
982
}
983
}
984
return true;
985
}
986
987
988
/*
989
* A busy-waiting loop for speeding up IPMI operation.
990
*
991
* Lousy hardware makes this hard. This is only enabled for systems
992
* that are not BT and do not have interrupts. It starts spinning
993
* when an operation is complete or until max_busy tells it to stop
994
* (if that is enabled). See the paragraph on kimid_max_busy_us in
995
* Documentation/driver-api/ipmi.rst for details.
996
*/
997
static int ipmi_thread(void *data)
998
{
999
struct smi_info *smi_info = data;
1000
unsigned long flags;
1001
enum si_sm_result smi_result;
1002
ktime_t busy_until = IPMI_TIME_NOT_BUSY;
1003
1004
set_user_nice(current, MAX_NICE);
1005
while (!kthread_should_stop()) {
1006
int busy_wait;
1007
1008
spin_lock_irqsave(&(smi_info->si_lock), flags);
1009
smi_result = smi_event_handler(smi_info, 0);
1010
1011
/*
1012
* If the driver is doing something, there is a possible
1013
* race with the timer. If the timer handler see idle,
1014
* and the thread here sees something else, the timer
1015
* handler won't restart the timer even though it is
1016
* required. So start it here if necessary.
1017
*/
1018
if (smi_result != SI_SM_IDLE && !smi_info->timer_running)
1019
smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
1020
1021
spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1022
busy_wait = ipmi_thread_busy_wait(smi_result, smi_info,
1023
&busy_until);
1024
if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
1025
; /* do nothing */
1026
} else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait) {
1027
/*
1028
* In maintenance mode we run as fast as
1029
* possible to allow firmware updates to
1030
* complete as fast as possible, but normally
1031
* don't bang on the scheduler.
1032
*/
1033
if (smi_info->in_maintenance_mode)
1034
schedule();
1035
else
1036
usleep_range(100, 200);
1037
} else if (smi_result == SI_SM_IDLE) {
1038
if (atomic_read(&smi_info->need_watch)) {
1039
schedule_timeout_interruptible(100);
1040
} else {
1041
/* Wait to be woken up when we are needed. */
1042
__set_current_state(TASK_INTERRUPTIBLE);
1043
schedule();
1044
}
1045
} else {
1046
schedule_timeout_interruptible(1);
1047
}
1048
}
1049
return 0;
1050
}
1051
1052
1053
static void poll(void *send_info)
1054
{
1055
struct smi_info *smi_info = send_info;
1056
unsigned long flags = 0;
1057
bool run_to_completion = smi_info->run_to_completion;
1058
1059
/*
1060
* Make sure there is some delay in the poll loop so we can
1061
* drive time forward and timeout things.
1062
*/
1063
udelay(10);
1064
if (!run_to_completion)
1065
spin_lock_irqsave(&smi_info->si_lock, flags);
1066
smi_event_handler(smi_info, 10);
1067
if (!run_to_completion)
1068
spin_unlock_irqrestore(&smi_info->si_lock, flags);
1069
}
1070
1071
static void request_events(void *send_info)
1072
{
1073
struct smi_info *smi_info = send_info;
1074
1075
if (!smi_info->has_event_buffer)
1076
return;
1077
1078
atomic_set(&smi_info->req_events, 1);
1079
}
1080
1081
static void set_need_watch(void *send_info, unsigned int watch_mask)
1082
{
1083
struct smi_info *smi_info = send_info;
1084
unsigned long flags;
1085
int enable;
1086
1087
enable = !!watch_mask;
1088
1089
atomic_set(&smi_info->need_watch, enable);
1090
spin_lock_irqsave(&smi_info->si_lock, flags);
1091
check_start_timer_thread(smi_info);
1092
spin_unlock_irqrestore(&smi_info->si_lock, flags);
1093
}
1094
1095
static void smi_timeout(struct timer_list *t)
1096
{
1097
struct smi_info *smi_info = timer_container_of(smi_info, t,
1098
si_timer);
1099
enum si_sm_result smi_result;
1100
unsigned long flags;
1101
unsigned long jiffies_now;
1102
long time_diff;
1103
long timeout;
1104
1105
spin_lock_irqsave(&(smi_info->si_lock), flags);
1106
debug_timestamp(smi_info, "Timer");
1107
1108
if (smi_info->si_state == SI_HOSED)
1109
/* Try something to see if the BMC is now operational. */
1110
start_get_flags(smi_info);
1111
1112
jiffies_now = jiffies;
1113
time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
1114
* SI_USEC_PER_JIFFY);
1115
smi_result = smi_event_handler(smi_info, time_diff);
1116
1117
if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) {
1118
/* Running with interrupts, only do long timeouts. */
1119
timeout = jiffies + SI_TIMEOUT_JIFFIES;
1120
smi_inc_stat(smi_info, long_timeouts);
1121
} else if (smi_result == SI_SM_CALL_WITH_DELAY) {
1122
/*
1123
* If the state machine asks for a short delay, then shorten
1124
* the timer timeout.
1125
*/
1126
smi_inc_stat(smi_info, short_timeouts);
1127
timeout = jiffies + 1;
1128
} else {
1129
smi_inc_stat(smi_info, long_timeouts);
1130
timeout = jiffies + SI_TIMEOUT_JIFFIES;
1131
}
1132
1133
if (smi_result != SI_SM_IDLE)
1134
smi_mod_timer(smi_info, timeout);
1135
else
1136
smi_info->timer_running = false;
1137
spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1138
}
1139
1140
irqreturn_t ipmi_si_irq_handler(int irq, void *data)
1141
{
1142
struct smi_info *smi_info = data;
1143
unsigned long flags;
1144
1145
if (smi_info->io.si_info->type == SI_BT)
1146
/* We need to clear the IRQ flag for the BT interface. */
1147
smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
1148
IPMI_BT_INTMASK_CLEAR_IRQ_BIT
1149
| IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
1150
1151
spin_lock_irqsave(&(smi_info->si_lock), flags);
1152
1153
smi_inc_stat(smi_info, interrupts);
1154
1155
debug_timestamp(smi_info, "Interrupt");
1156
1157
smi_event_handler(smi_info, 0);
1158
spin_unlock_irqrestore(&(smi_info->si_lock), flags);
1159
return IRQ_HANDLED;
1160
}
1161
1162
static int smi_start_processing(void *send_info,
1163
struct ipmi_smi *intf)
1164
{
1165
struct smi_info *new_smi = send_info;
1166
int enable = 0;
1167
1168
new_smi->intf = intf;
1169
1170
/* Set up the timer that drives the interface. */
1171
timer_setup(&new_smi->si_timer, smi_timeout, 0);
1172
new_smi->timer_can_start = true;
1173
smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
1174
1175
/* Try to claim any interrupts. */
1176
if (new_smi->io.irq_setup) {
1177
new_smi->io.irq_handler_data = new_smi;
1178
new_smi->io.irq_setup(&new_smi->io);
1179
}
1180
1181
/*
1182
* Check if the user forcefully enabled the daemon.
1183
*/
1184
if (new_smi->si_num < num_force_kipmid)
1185
enable = force_kipmid[new_smi->si_num];
1186
/*
1187
* The BT interface is efficient enough to not need a thread,
1188
* and there is no need for a thread if we have interrupts.
1189
*/
1190
else if (new_smi->io.si_info->type != SI_BT && !new_smi->io.irq)
1191
enable = 1;
1192
1193
if (enable) {
1194
new_smi->thread = kthread_run(ipmi_thread, new_smi,
1195
"kipmi%d", new_smi->si_num);
1196
if (IS_ERR(new_smi->thread)) {
1197
dev_notice(new_smi->io.dev,
1198
"Could not start kernel thread due to error %ld, only using timers to drive the interface\n",
1199
PTR_ERR(new_smi->thread));
1200
new_smi->thread = NULL;
1201
}
1202
}
1203
1204
return 0;
1205
}
1206
1207
static int get_smi_info(void *send_info, struct ipmi_smi_info *data)
1208
{
1209
struct smi_info *smi = send_info;
1210
1211
data->addr_src = smi->io.addr_source;
1212
data->dev = smi->io.dev;
1213
data->addr_info = smi->io.addr_info;
1214
get_device(smi->io.dev);
1215
1216
return 0;
1217
}
1218
1219
static void set_maintenance_mode(void *send_info, bool enable)
1220
{
1221
struct smi_info *smi_info = send_info;
1222
1223
if (!enable)
1224
atomic_set(&smi_info->req_events, 0);
1225
smi_info->in_maintenance_mode = enable;
1226
}
1227
1228
static void shutdown_smi(void *send_info);
1229
static const struct ipmi_smi_handlers handlers = {
1230
.owner = THIS_MODULE,
1231
.start_processing = smi_start_processing,
1232
.shutdown = shutdown_smi,
1233
.get_smi_info = get_smi_info,
1234
.sender = sender,
1235
.request_events = request_events,
1236
.set_need_watch = set_need_watch,
1237
.set_maintenance_mode = set_maintenance_mode,
1238
.set_run_to_completion = set_run_to_completion,
1239
.flush_messages = flush_messages,
1240
.poll = poll,
1241
};
1242
1243
static LIST_HEAD(smi_infos);
1244
static DEFINE_MUTEX(smi_infos_lock);
1245
static int smi_num; /* Used to sequence the SMIs */
1246
1247
static const char * const addr_space_to_str[] = { "i/o", "mem" };
1248
1249
module_param_array(force_kipmid, int, &num_force_kipmid, 0);
1250
MODULE_PARM_DESC(force_kipmid,
1251
"Force the kipmi daemon to be enabled (1) or disabled(0). Normally the IPMI driver auto-detects this, but the value may be overridden by this parm.");
1252
module_param(unload_when_empty, bool, 0);
1253
MODULE_PARM_DESC(unload_when_empty,
1254
"Unload the module if no interfaces are specified or found, default is 1. Setting to 0 is useful for hot add of devices using hotmod.");
1255
module_param_array(kipmid_max_busy_us, uint, &num_max_busy_us, 0644);
1256
MODULE_PARM_DESC(kipmid_max_busy_us,
1257
"Max time (in microseconds) to busy-wait for IPMI data before sleeping. 0 (default) means to wait forever. Set to 100-500 if kipmid is using up a lot of CPU time.");
1258
1259
void ipmi_irq_finish_setup(struct si_sm_io *io)
1260
{
1261
if (io->si_info->type == SI_BT)
1262
/* Enable the interrupt in the BT interface. */
1263
io->outputb(io, IPMI_BT_INTMASK_REG,
1264
IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
1265
}
1266
1267
void ipmi_irq_start_cleanup(struct si_sm_io *io)
1268
{
1269
if (io->si_info->type == SI_BT)
1270
/* Disable the interrupt in the BT interface. */
1271
io->outputb(io, IPMI_BT_INTMASK_REG, 0);
1272
}
1273
1274
static void std_irq_cleanup(struct si_sm_io *io)
1275
{
1276
ipmi_irq_start_cleanup(io);
1277
free_irq(io->irq, io->irq_handler_data);
1278
}
1279
1280
int ipmi_std_irq_setup(struct si_sm_io *io)
1281
{
1282
int rv;
1283
1284
if (!io->irq)
1285
return 0;
1286
1287
rv = request_irq(io->irq,
1288
ipmi_si_irq_handler,
1289
IRQF_SHARED,
1290
SI_DEVICE_NAME,
1291
io->irq_handler_data);
1292
if (rv) {
1293
dev_warn(io->dev, "%s unable to claim interrupt %d, running polled\n",
1294
SI_DEVICE_NAME, io->irq);
1295
io->irq = 0;
1296
} else {
1297
io->irq_cleanup = std_irq_cleanup;
1298
ipmi_irq_finish_setup(io);
1299
dev_info(io->dev, "Using irq %d\n", io->irq);
1300
}
1301
1302
return rv;
1303
}
1304
1305
static int wait_for_msg_done(struct smi_info *smi_info)
1306
{
1307
enum si_sm_result smi_result;
1308
1309
smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
1310
for (;;) {
1311
if (smi_result == SI_SM_CALL_WITH_DELAY ||
1312
smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
1313
schedule_timeout_uninterruptible(1);
1314
smi_result = smi_info->handlers->event(
1315
smi_info->si_sm, jiffies_to_usecs(1));
1316
} else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
1317
smi_result = smi_info->handlers->event(
1318
smi_info->si_sm, 0);
1319
} else
1320
break;
1321
}
1322
if (smi_result == SI_SM_HOSED)
1323
/*
1324
* We couldn't get the state machine to run, so whatever's at
1325
* the port is probably not an IPMI SMI interface.
1326
*/
1327
return -ENODEV;
1328
1329
return 0;
1330
}
1331
1332
static int try_get_dev_id(struct smi_info *smi_info)
1333
{
1334
unsigned char msg[2];
1335
unsigned char *resp;
1336
unsigned long resp_len;
1337
int rv = 0;
1338
unsigned int retry_count = 0;
1339
1340
resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
1341
if (!resp)
1342
return -ENOMEM;
1343
1344
/*
1345
* Do a Get Device ID command, since it comes back with some
1346
* useful info.
1347
*/
1348
msg[0] = IPMI_NETFN_APP_REQUEST << 2;
1349
msg[1] = IPMI_GET_DEVICE_ID_CMD;
1350
1351
retry:
1352
smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
1353
1354
rv = wait_for_msg_done(smi_info);
1355
if (rv)
1356
goto out;
1357
1358
resp_len = smi_info->handlers->get_result(smi_info->si_sm,
1359
resp, IPMI_MAX_MSG_LENGTH);
1360
1361
/* Check and record info from the get device id, in case we need it. */
1362
rv = ipmi_demangle_device_id(resp[0] >> 2, resp[1],
1363
resp + 2, resp_len - 2, &smi_info->device_id);
1364
if (rv) {
1365
/* record completion code */
1366
unsigned char cc = *(resp + 2);
1367
1368
if (cc != IPMI_CC_NO_ERROR &&
1369
++retry_count <= GET_DEVICE_ID_MAX_RETRY) {
1370
dev_warn_ratelimited(smi_info->io.dev,
1371
"BMC returned 0x%2.2x, retry get bmc device id\n",
1372
cc);
1373
goto retry;
1374
}
1375
}
1376
1377
out:
1378
kfree(resp);
1379
return rv;
1380
}
1381
1382
static int get_global_enables(struct smi_info *smi_info, u8 *enables)
1383
{
1384
unsigned char msg[3];
1385
unsigned char *resp;
1386
unsigned long resp_len;
1387
int rv;
1388
1389
resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
1390
if (!resp)
1391
return -ENOMEM;
1392
1393
msg[0] = IPMI_NETFN_APP_REQUEST << 2;
1394
msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
1395
smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
1396
1397
rv = wait_for_msg_done(smi_info);
1398
if (rv) {
1399
dev_warn(smi_info->io.dev,
1400
"Error getting response from get global enables command: %d\n",
1401
rv);
1402
goto out;
1403
}
1404
1405
resp_len = smi_info->handlers->get_result(smi_info->si_sm,
1406
resp, IPMI_MAX_MSG_LENGTH);
1407
1408
if (resp_len < 4 ||
1409
resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
1410
resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD ||
1411
resp[2] != 0) {
1412
dev_warn(smi_info->io.dev,
1413
"Invalid return from get global enables command: %ld %x %x %x\n",
1414
resp_len, resp[0], resp[1], resp[2]);
1415
rv = -EINVAL;
1416
goto out;
1417
} else {
1418
*enables = resp[3];
1419
}
1420
1421
out:
1422
kfree(resp);
1423
return rv;
1424
}
1425
1426
/*
1427
* Returns 1 if it gets an error from the command.
1428
*/
1429
static int set_global_enables(struct smi_info *smi_info, u8 enables)
1430
{
1431
unsigned char msg[3];
1432
unsigned char *resp;
1433
unsigned long resp_len;
1434
int rv;
1435
1436
resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
1437
if (!resp)
1438
return -ENOMEM;
1439
1440
msg[0] = IPMI_NETFN_APP_REQUEST << 2;
1441
msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
1442
msg[2] = enables;
1443
smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
1444
1445
rv = wait_for_msg_done(smi_info);
1446
if (rv) {
1447
dev_warn(smi_info->io.dev,
1448
"Error getting response from set global enables command: %d\n",
1449
rv);
1450
goto out;
1451
}
1452
1453
resp_len = smi_info->handlers->get_result(smi_info->si_sm,
1454
resp, IPMI_MAX_MSG_LENGTH);
1455
1456
if (resp_len < 3 ||
1457
resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
1458
resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) {
1459
dev_warn(smi_info->io.dev,
1460
"Invalid return from set global enables command: %ld %x %x\n",
1461
resp_len, resp[0], resp[1]);
1462
rv = -EINVAL;
1463
goto out;
1464
}
1465
1466
if (resp[2] != 0)
1467
rv = 1;
1468
1469
out:
1470
kfree(resp);
1471
return rv;
1472
}
1473
1474
/*
1475
* Some BMCs do not support clearing the receive irq bit in the global
1476
* enables (even if they don't support interrupts on the BMC). Check
1477
* for this and handle it properly.
1478
*/
1479
static void check_clr_rcv_irq(struct smi_info *smi_info)
1480
{
1481
u8 enables = 0;
1482
int rv;
1483
1484
rv = get_global_enables(smi_info, &enables);
1485
if (!rv) {
1486
if ((enables & IPMI_BMC_RCV_MSG_INTR) == 0)
1487
/* Already clear, should work ok. */
1488
return;
1489
1490
enables &= ~IPMI_BMC_RCV_MSG_INTR;
1491
rv = set_global_enables(smi_info, enables);
1492
}
1493
1494
if (rv < 0) {
1495
dev_err(smi_info->io.dev,
1496
"Cannot check clearing the rcv irq: %d\n", rv);
1497
return;
1498
}
1499
1500
if (rv) {
1501
/*
1502
* An error when setting the event buffer bit means
1503
* clearing the bit is not supported.
1504
*/
1505
dev_warn(smi_info->io.dev,
1506
"The BMC does not support clearing the recv irq bit, compensating, but the BMC needs to be fixed.\n");
1507
smi_info->cannot_disable_irq = true;
1508
}
1509
}
1510
1511
/*
1512
* Some BMCs do not support setting the interrupt bits in the global
1513
* enables even if they support interrupts. Clearly bad, but we can
1514
* compensate.
1515
*/
1516
static void check_set_rcv_irq(struct smi_info *smi_info)
1517
{
1518
u8 enables = 0;
1519
int rv;
1520
1521
if (!smi_info->io.irq)
1522
return;
1523
1524
rv = get_global_enables(smi_info, &enables);
1525
if (!rv) {
1526
enables |= IPMI_BMC_RCV_MSG_INTR;
1527
rv = set_global_enables(smi_info, enables);
1528
}
1529
1530
if (rv < 0) {
1531
dev_err(smi_info->io.dev,
1532
"Cannot check setting the rcv irq: %d\n", rv);
1533
return;
1534
}
1535
1536
if (rv) {
1537
/*
1538
* An error when setting the event buffer bit means
1539
* setting the bit is not supported.
1540
*/
1541
dev_warn(smi_info->io.dev,
1542
"The BMC does not support setting the recv irq bit, compensating, but the BMC needs to be fixed.\n");
1543
smi_info->cannot_disable_irq = true;
1544
smi_info->irq_enable_broken = true;
1545
}
1546
}
1547
1548
static int try_enable_event_buffer(struct smi_info *smi_info)
1549
{
1550
unsigned char msg[3];
1551
unsigned char *resp;
1552
unsigned long resp_len;
1553
int rv = 0;
1554
1555
resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
1556
if (!resp)
1557
return -ENOMEM;
1558
1559
msg[0] = IPMI_NETFN_APP_REQUEST << 2;
1560
msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
1561
smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
1562
1563
rv = wait_for_msg_done(smi_info);
1564
if (rv) {
1565
pr_warn("Error getting response from get global enables command, the event buffer is not enabled\n");
1566
goto out;
1567
}
1568
1569
resp_len = smi_info->handlers->get_result(smi_info->si_sm,
1570
resp, IPMI_MAX_MSG_LENGTH);
1571
1572
if (resp_len < 4 ||
1573
resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
1574
resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD ||
1575
resp[2] != 0) {
1576
pr_warn("Invalid return from get global enables command, cannot enable the event buffer\n");
1577
rv = -EINVAL;
1578
goto out;
1579
}
1580
1581
if (resp[3] & IPMI_BMC_EVT_MSG_BUFF) {
1582
/* buffer is already enabled, nothing to do. */
1583
smi_info->supports_event_msg_buff = true;
1584
goto out;
1585
}
1586
1587
msg[0] = IPMI_NETFN_APP_REQUEST << 2;
1588
msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
1589
msg[2] = resp[3] | IPMI_BMC_EVT_MSG_BUFF;
1590
smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
1591
1592
rv = wait_for_msg_done(smi_info);
1593
if (rv) {
1594
pr_warn("Error getting response from set global, enables command, the event buffer is not enabled\n");
1595
goto out;
1596
}
1597
1598
resp_len = smi_info->handlers->get_result(smi_info->si_sm,
1599
resp, IPMI_MAX_MSG_LENGTH);
1600
1601
if (resp_len < 3 ||
1602
resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
1603
resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) {
1604
pr_warn("Invalid return from get global, enables command, not enable the event buffer\n");
1605
rv = -EINVAL;
1606
goto out;
1607
}
1608
1609
if (resp[2] != 0)
1610
/*
1611
* An error when setting the event buffer bit means
1612
* that the event buffer is not supported.
1613
*/
1614
rv = -ENOENT;
1615
else
1616
smi_info->supports_event_msg_buff = true;
1617
1618
out:
1619
kfree(resp);
1620
return rv;
1621
}
1622
1623
#define IPMI_SI_ATTR(name) \
1624
static ssize_t name##_show(struct device *dev, \
1625
struct device_attribute *attr, \
1626
char *buf) \
1627
{ \
1628
struct smi_info *smi_info = dev_get_drvdata(dev); \
1629
\
1630
return sysfs_emit(buf, "%u\n", smi_get_stat(smi_info, name)); \
1631
} \
1632
static DEVICE_ATTR_RO(name)
1633
1634
static ssize_t type_show(struct device *dev,
1635
struct device_attribute *attr,
1636
char *buf)
1637
{
1638
struct smi_info *smi_info = dev_get_drvdata(dev);
1639
1640
return sysfs_emit(buf, "%s\n", si_to_str[smi_info->io.si_info->type]);
1641
}
1642
static DEVICE_ATTR_RO(type);
1643
1644
static ssize_t interrupts_enabled_show(struct device *dev,
1645
struct device_attribute *attr,
1646
char *buf)
1647
{
1648
struct smi_info *smi_info = dev_get_drvdata(dev);
1649
int enabled = smi_info->io.irq && !smi_info->interrupt_disabled;
1650
1651
return sysfs_emit(buf, "%d\n", enabled);
1652
}
1653
static DEVICE_ATTR_RO(interrupts_enabled);
1654
1655
IPMI_SI_ATTR(short_timeouts);
1656
IPMI_SI_ATTR(long_timeouts);
1657
IPMI_SI_ATTR(idles);
1658
IPMI_SI_ATTR(interrupts);
1659
IPMI_SI_ATTR(attentions);
1660
IPMI_SI_ATTR(flag_fetches);
1661
IPMI_SI_ATTR(hosed_count);
1662
IPMI_SI_ATTR(complete_transactions);
1663
IPMI_SI_ATTR(events);
1664
IPMI_SI_ATTR(watchdog_pretimeouts);
1665
IPMI_SI_ATTR(incoming_messages);
1666
1667
static ssize_t params_show(struct device *dev,
1668
struct device_attribute *attr,
1669
char *buf)
1670
{
1671
struct smi_info *smi_info = dev_get_drvdata(dev);
1672
1673
return sysfs_emit(buf,
1674
"%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
1675
si_to_str[smi_info->io.si_info->type],
1676
addr_space_to_str[smi_info->io.addr_space],
1677
smi_info->io.addr_data,
1678
smi_info->io.regspacing,
1679
smi_info->io.regsize,
1680
smi_info->io.regshift,
1681
smi_info->io.irq,
1682
smi_info->io.slave_addr);
1683
}
1684
static DEVICE_ATTR_RO(params);
1685
1686
static struct attribute *ipmi_si_dev_attrs[] = {
1687
&dev_attr_type.attr,
1688
&dev_attr_interrupts_enabled.attr,
1689
&dev_attr_short_timeouts.attr,
1690
&dev_attr_long_timeouts.attr,
1691
&dev_attr_idles.attr,
1692
&dev_attr_interrupts.attr,
1693
&dev_attr_attentions.attr,
1694
&dev_attr_flag_fetches.attr,
1695
&dev_attr_hosed_count.attr,
1696
&dev_attr_complete_transactions.attr,
1697
&dev_attr_events.attr,
1698
&dev_attr_watchdog_pretimeouts.attr,
1699
&dev_attr_incoming_messages.attr,
1700
&dev_attr_params.attr,
1701
NULL
1702
};
1703
1704
static const struct attribute_group ipmi_si_dev_attr_group = {
1705
.attrs = ipmi_si_dev_attrs,
1706
};
1707
1708
/*
1709
* oem_data_avail_to_receive_msg_avail
1710
* @info - smi_info structure with msg_flags set
1711
*
1712
* Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL
1713
* Returns 1 indicating need to re-run handle_flags().
1714
*/
1715
static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info)
1716
{
1717
smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) |
1718
RECEIVE_MSG_AVAIL);
1719
return 1;
1720
}
1721
1722
/*
1723
* setup_dell_poweredge_oem_data_handler
1724
* @info - smi_info.device_id must be populated
1725
*
1726
* Systems that match, but have firmware version < 1.40 may assert
1727
* OEM0_DATA_AVAIL on their own, without being told via Set Flags that
1728
* it's safe to do so. Such systems will de-assert OEM1_DATA_AVAIL
1729
* upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags
1730
* as RECEIVE_MSG_AVAIL instead.
1731
*
1732
* As Dell has no plans to release IPMI 1.5 firmware that *ever*
1733
* assert the OEM[012] bits, and if it did, the driver would have to
1734
* change to handle that properly, we don't actually check for the
1735
* firmware version.
1736
* Device ID = 0x20 BMC on PowerEdge 8G servers
1737
* Device Revision = 0x80
1738
* Firmware Revision1 = 0x01 BMC version 1.40
1739
* Firmware Revision2 = 0x40 BCD encoded
1740
* IPMI Version = 0x51 IPMI 1.5
1741
* Manufacturer ID = A2 02 00 Dell IANA
1742
*
1743
* Additionally, PowerEdge systems with IPMI < 1.5 may also assert
1744
* OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL.
1745
*
1746
*/
1747
#define DELL_POWEREDGE_8G_BMC_DEVICE_ID 0x20
1748
#define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80
1749
#define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51
1750
#define DELL_IANA_MFR_ID 0x0002a2
1751
static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info)
1752
{
1753
struct ipmi_device_id *id = &smi_info->device_id;
1754
if (id->manufacturer_id == DELL_IANA_MFR_ID) {
1755
if (id->device_id == DELL_POWEREDGE_8G_BMC_DEVICE_ID &&
1756
id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV &&
1757
id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) {
1758
smi_info->oem_data_avail_handler =
1759
oem_data_avail_to_receive_msg_avail;
1760
} else if (ipmi_version_major(id) < 1 ||
1761
(ipmi_version_major(id) == 1 &&
1762
ipmi_version_minor(id) < 5)) {
1763
smi_info->oem_data_avail_handler =
1764
oem_data_avail_to_receive_msg_avail;
1765
}
1766
}
1767
}
1768
1769
#define CANNOT_RETURN_REQUESTED_LENGTH 0xCA
1770
static void return_hosed_msg_badsize(struct smi_info *smi_info)
1771
{
1772
struct ipmi_smi_msg *msg = smi_info->curr_msg;
1773
1774
/* Make it a response */
1775
msg->rsp[0] = msg->data[0] | 4;
1776
msg->rsp[1] = msg->data[1];
1777
msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH;
1778
msg->rsp_size = 3;
1779
smi_info->curr_msg = NULL;
1780
deliver_recv_msg(smi_info, msg);
1781
}
1782
1783
/*
1784
* dell_poweredge_bt_xaction_handler
1785
* @info - smi_info.device_id must be populated
1786
*
1787
* Dell PowerEdge servers with the BT interface (x6xx and 1750) will
1788
* not respond to a Get SDR command if the length of the data
1789
* requested is exactly 0x3A, which leads to command timeouts and no
1790
* data returned. This intercepts such commands, and causes userspace
1791
* callers to try again with a different-sized buffer, which succeeds.
1792
*/
1793
1794
#define STORAGE_NETFN 0x0A
1795
#define STORAGE_CMD_GET_SDR 0x23
1796
static int dell_poweredge_bt_xaction_handler(struct notifier_block *self,
1797
unsigned long unused,
1798
void *in)
1799
{
1800
struct smi_info *smi_info = in;
1801
unsigned char *data = smi_info->curr_msg->data;
1802
unsigned int size = smi_info->curr_msg->data_size;
1803
if (size >= 8 &&
1804
(data[0]>>2) == STORAGE_NETFN &&
1805
data[1] == STORAGE_CMD_GET_SDR &&
1806
data[7] == 0x3A) {
1807
return_hosed_msg_badsize(smi_info);
1808
return NOTIFY_STOP;
1809
}
1810
return NOTIFY_DONE;
1811
}
1812
1813
static struct notifier_block dell_poweredge_bt_xaction_notifier = {
1814
.notifier_call = dell_poweredge_bt_xaction_handler,
1815
};
1816
1817
/*
1818
* setup_dell_poweredge_bt_xaction_handler
1819
* @info - smi_info.device_id must be filled in already
1820
*
1821
* Fills in smi_info.device_id.start_transaction_pre_hook
1822
* when we know what function to use there.
1823
*/
1824
static void
1825
setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info)
1826
{
1827
struct ipmi_device_id *id = &smi_info->device_id;
1828
if (id->manufacturer_id == DELL_IANA_MFR_ID &&
1829
smi_info->io.si_info->type == SI_BT)
1830
register_xaction_notifier(&dell_poweredge_bt_xaction_notifier);
1831
}
1832
1833
/*
1834
* setup_oem_data_handler
1835
* @info - smi_info.device_id must be filled in already
1836
*
1837
* Fills in smi_info.device_id.oem_data_available_handler
1838
* when we know what function to use there.
1839
*/
1840
1841
static void setup_oem_data_handler(struct smi_info *smi_info)
1842
{
1843
setup_dell_poweredge_oem_data_handler(smi_info);
1844
}
1845
1846
static void setup_xaction_handlers(struct smi_info *smi_info)
1847
{
1848
setup_dell_poweredge_bt_xaction_handler(smi_info);
1849
}
1850
1851
static void check_for_broken_irqs(struct smi_info *smi_info)
1852
{
1853
check_clr_rcv_irq(smi_info);
1854
check_set_rcv_irq(smi_info);
1855
}
1856
1857
static inline void stop_timer_and_thread(struct smi_info *smi_info)
1858
{
1859
if (smi_info->thread != NULL) {
1860
kthread_stop(smi_info->thread);
1861
smi_info->thread = NULL;
1862
}
1863
1864
smi_info->timer_can_start = false;
1865
timer_delete_sync(&smi_info->si_timer);
1866
}
1867
1868
static struct smi_info *find_dup_si(struct smi_info *info)
1869
{
1870
struct smi_info *e;
1871
1872
list_for_each_entry(e, &smi_infos, link) {
1873
if (e->io.addr_space != info->io.addr_space)
1874
continue;
1875
if (e->io.addr_data == info->io.addr_data) {
1876
/*
1877
* This is a cheap hack, ACPI doesn't have a defined
1878
* slave address but SMBIOS does. Pick it up from
1879
* any source that has it available.
1880
*/
1881
if (info->io.slave_addr && !e->io.slave_addr)
1882
e->io.slave_addr = info->io.slave_addr;
1883
return e;
1884
}
1885
}
1886
1887
return NULL;
1888
}
1889
1890
int ipmi_si_add_smi(struct si_sm_io *io)
1891
{
1892
int rv = 0;
1893
struct smi_info *new_smi, *dup;
1894
1895
/*
1896
* If the user gave us a hard-coded device at the same
1897
* address, they presumably want us to use it and not what is
1898
* in the firmware.
1899
*/
1900
if (io->addr_source != SI_HARDCODED && io->addr_source != SI_HOTMOD &&
1901
ipmi_si_hardcode_match(io->addr_space, io->addr_data)) {
1902
dev_info(io->dev,
1903
"Hard-coded device at this address already exists");
1904
return -ENODEV;
1905
}
1906
1907
if (!io->io_setup) {
1908
if (IS_ENABLED(CONFIG_HAS_IOPORT) &&
1909
io->addr_space == IPMI_IO_ADDR_SPACE) {
1910
io->io_setup = ipmi_si_port_setup;
1911
} else if (io->addr_space == IPMI_MEM_ADDR_SPACE) {
1912
io->io_setup = ipmi_si_mem_setup;
1913
} else {
1914
return -EINVAL;
1915
}
1916
}
1917
1918
new_smi = kzalloc(sizeof(*new_smi), GFP_KERNEL);
1919
if (!new_smi)
1920
return -ENOMEM;
1921
spin_lock_init(&new_smi->si_lock);
1922
1923
new_smi->io = *io;
1924
1925
mutex_lock(&smi_infos_lock);
1926
dup = find_dup_si(new_smi);
1927
if (dup) {
1928
if (new_smi->io.addr_source == SI_ACPI &&
1929
dup->io.addr_source == SI_SMBIOS) {
1930
/* We prefer ACPI over SMBIOS. */
1931
dev_info(dup->io.dev,
1932
"Removing SMBIOS-specified %s state machine in favor of ACPI\n",
1933
si_to_str[new_smi->io.si_info->type]);
1934
cleanup_one_si(dup);
1935
} else {
1936
dev_info(new_smi->io.dev,
1937
"%s-specified %s state machine: duplicate\n",
1938
ipmi_addr_src_to_str(new_smi->io.addr_source),
1939
si_to_str[new_smi->io.si_info->type]);
1940
rv = -EBUSY;
1941
kfree(new_smi);
1942
goto out_err;
1943
}
1944
}
1945
1946
pr_info("Adding %s-specified %s state machine\n",
1947
ipmi_addr_src_to_str(new_smi->io.addr_source),
1948
si_to_str[new_smi->io.si_info->type]);
1949
1950
list_add_tail(&new_smi->link, &smi_infos);
1951
1952
if (initialized)
1953
rv = try_smi_init(new_smi);
1954
out_err:
1955
mutex_unlock(&smi_infos_lock);
1956
return rv;
1957
}
1958
1959
/*
1960
* Try to start up an interface. Must be called with smi_infos_lock
1961
* held, primarily to keep smi_num consistent, we only one to do these
1962
* one at a time.
1963
*/
1964
static int try_smi_init(struct smi_info *new_smi)
1965
{
1966
int rv = 0;
1967
int i;
1968
1969
pr_info("Trying %s-specified %s state machine at %s address 0x%lx, slave address 0x%x, irq %d\n",
1970
ipmi_addr_src_to_str(new_smi->io.addr_source),
1971
si_to_str[new_smi->io.si_info->type],
1972
addr_space_to_str[new_smi->io.addr_space],
1973
new_smi->io.addr_data,
1974
new_smi->io.slave_addr, new_smi->io.irq);
1975
1976
switch (new_smi->io.si_info->type) {
1977
case SI_KCS:
1978
new_smi->handlers = &kcs_smi_handlers;
1979
break;
1980
1981
case SI_SMIC:
1982
new_smi->handlers = &smic_smi_handlers;
1983
break;
1984
1985
case SI_BT:
1986
new_smi->handlers = &bt_smi_handlers;
1987
break;
1988
1989
default:
1990
/* No support for anything else yet. */
1991
rv = -EIO;
1992
goto out_err;
1993
}
1994
1995
new_smi->si_num = smi_num;
1996
1997
/* Do this early so it's available for logs. */
1998
if (!new_smi->io.dev) {
1999
pr_err("IPMI interface added with no device\n");
2000
rv = -EIO;
2001
goto out_err;
2002
}
2003
2004
/* Allocate the state machine's data and initialize it. */
2005
new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
2006
if (!new_smi->si_sm) {
2007
rv = -ENOMEM;
2008
goto out_err;
2009
}
2010
new_smi->io.io_size = new_smi->handlers->init_data(new_smi->si_sm,
2011
&new_smi->io);
2012
2013
/* Now that we know the I/O size, we can set up the I/O. */
2014
rv = new_smi->io.io_setup(&new_smi->io);
2015
if (rv) {
2016
dev_err(new_smi->io.dev, "Could not set up I/O space\n");
2017
goto out_err;
2018
}
2019
2020
/* Do low-level detection first. */
2021
if (new_smi->handlers->detect(new_smi->si_sm)) {
2022
if (new_smi->io.addr_source)
2023
dev_err(new_smi->io.dev,
2024
"Interface detection failed\n");
2025
rv = -ENODEV;
2026
goto out_err;
2027
}
2028
2029
/*
2030
* Attempt a get device id command. If it fails, we probably
2031
* don't have a BMC here.
2032
*/
2033
rv = try_get_dev_id(new_smi);
2034
if (rv) {
2035
if (new_smi->io.addr_source)
2036
dev_err(new_smi->io.dev,
2037
"There appears to be no BMC at this location\n");
2038
goto out_err;
2039
}
2040
2041
setup_oem_data_handler(new_smi);
2042
setup_xaction_handlers(new_smi);
2043
check_for_broken_irqs(new_smi);
2044
2045
new_smi->waiting_msg = NULL;
2046
new_smi->curr_msg = NULL;
2047
atomic_set(&new_smi->req_events, 0);
2048
new_smi->run_to_completion = false;
2049
for (i = 0; i < SI_NUM_STATS; i++)
2050
atomic_set(&new_smi->stats[i], 0);
2051
2052
new_smi->interrupt_disabled = true;
2053
atomic_set(&new_smi->need_watch, 0);
2054
2055
rv = try_enable_event_buffer(new_smi);
2056
if (rv == 0)
2057
new_smi->has_event_buffer = true;
2058
2059
/*
2060
* Start clearing the flags before we enable interrupts or the
2061
* timer to avoid racing with the timer.
2062
*/
2063
start_clear_flags(new_smi);
2064
2065
/*
2066
* IRQ is defined to be set when non-zero. req_events will
2067
* cause a global flags check that will enable interrupts.
2068
*/
2069
if (new_smi->io.irq) {
2070
new_smi->interrupt_disabled = false;
2071
atomic_set(&new_smi->req_events, 1);
2072
}
2073
2074
dev_set_drvdata(new_smi->io.dev, new_smi);
2075
rv = device_add_group(new_smi->io.dev, &ipmi_si_dev_attr_group);
2076
if (rv) {
2077
dev_err(new_smi->io.dev,
2078
"Unable to add device attributes: error %d\n",
2079
rv);
2080
goto out_err;
2081
}
2082
new_smi->dev_group_added = true;
2083
2084
rv = ipmi_register_smi(&handlers,
2085
new_smi,
2086
new_smi->io.dev,
2087
new_smi->io.slave_addr);
2088
if (rv) {
2089
dev_err(new_smi->io.dev,
2090
"Unable to register device: error %d\n",
2091
rv);
2092
goto out_err;
2093
}
2094
2095
/* Don't increment till we know we have succeeded. */
2096
smi_num++;
2097
2098
dev_info(new_smi->io.dev, "IPMI %s interface initialized\n",
2099
si_to_str[new_smi->io.si_info->type]);
2100
2101
WARN_ON(new_smi->io.dev->init_name != NULL);
2102
2103
out_err:
2104
if (rv && new_smi->io.io_cleanup) {
2105
new_smi->io.io_cleanup(&new_smi->io);
2106
new_smi->io.io_cleanup = NULL;
2107
}
2108
2109
if (rv && new_smi->si_sm) {
2110
kfree(new_smi->si_sm);
2111
new_smi->si_sm = NULL;
2112
}
2113
2114
return rv;
2115
}
2116
2117
/*
2118
* Devices in the same address space at the same address are the same.
2119
*/
2120
static bool __init ipmi_smi_info_same(struct smi_info *e1, struct smi_info *e2)
2121
{
2122
return (e1->io.addr_space == e2->io.addr_space &&
2123
e1->io.addr_data == e2->io.addr_data);
2124
}
2125
2126
static int __init init_ipmi_si(void)
2127
{
2128
struct smi_info *e, *e2;
2129
2130
if (initialized)
2131
return 0;
2132
2133
ipmi_hardcode_init();
2134
2135
pr_info("IPMI System Interface driver\n");
2136
2137
ipmi_si_platform_init();
2138
2139
ipmi_si_pci_init();
2140
2141
ipmi_si_ls2k_init();
2142
2143
ipmi_si_parisc_init();
2144
2145
mutex_lock(&smi_infos_lock);
2146
2147
/*
2148
* Scan through all the devices. We prefer devices with
2149
* interrupts, so go through those first in case there are any
2150
* duplicates that don't have the interrupt set.
2151
*/
2152
list_for_each_entry(e, &smi_infos, link) {
2153
bool dup = false;
2154
2155
/* Register ones with interrupts first. */
2156
if (!e->io.irq)
2157
continue;
2158
2159
/*
2160
* Go through the ones we have already seen to see if this
2161
* is a dup.
2162
*/
2163
list_for_each_entry(e2, &smi_infos, link) {
2164
if (e2 == e)
2165
break;
2166
if (e2->io.irq && ipmi_smi_info_same(e, e2)) {
2167
dup = true;
2168
break;
2169
}
2170
}
2171
if (!dup)
2172
try_smi_init(e);
2173
}
2174
2175
/*
2176
* Now try devices without interrupts.
2177
*/
2178
list_for_each_entry(e, &smi_infos, link) {
2179
bool dup = false;
2180
2181
if (e->io.irq)
2182
continue;
2183
2184
/*
2185
* Go through the ones we have already seen to see if
2186
* this is a dup. We have already looked at the ones
2187
* with interrupts.
2188
*/
2189
list_for_each_entry(e2, &smi_infos, link) {
2190
if (!e2->io.irq)
2191
continue;
2192
if (ipmi_smi_info_same(e, e2)) {
2193
dup = true;
2194
break;
2195
}
2196
}
2197
list_for_each_entry(e2, &smi_infos, link) {
2198
if (e2 == e)
2199
break;
2200
if (ipmi_smi_info_same(e, e2)) {
2201
dup = true;
2202
break;
2203
}
2204
}
2205
if (!dup)
2206
try_smi_init(e);
2207
}
2208
2209
initialized = true;
2210
mutex_unlock(&smi_infos_lock);
2211
2212
mutex_lock(&smi_infos_lock);
2213
if (unload_when_empty && list_empty(&smi_infos)) {
2214
mutex_unlock(&smi_infos_lock);
2215
cleanup_ipmi_si();
2216
pr_warn("Unable to find any System Interface(s)\n");
2217
return -ENODEV;
2218
} else {
2219
mutex_unlock(&smi_infos_lock);
2220
return 0;
2221
}
2222
}
2223
module_init(init_ipmi_si);
2224
2225
static void wait_msg_processed(struct smi_info *smi_info)
2226
{
2227
unsigned long jiffies_now;
2228
long time_diff;
2229
2230
while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) {
2231
jiffies_now = jiffies;
2232
time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
2233
* SI_USEC_PER_JIFFY);
2234
smi_event_handler(smi_info, time_diff);
2235
schedule_timeout_uninterruptible(1);
2236
}
2237
}
2238
2239
static void shutdown_smi(void *send_info)
2240
{
2241
struct smi_info *smi_info = send_info;
2242
2243
if (smi_info->dev_group_added) {
2244
device_remove_group(smi_info->io.dev, &ipmi_si_dev_attr_group);
2245
smi_info->dev_group_added = false;
2246
}
2247
if (smi_info->io.dev)
2248
dev_set_drvdata(smi_info->io.dev, NULL);
2249
2250
/*
2251
* Make sure that interrupts, the timer and the thread are
2252
* stopped and will not run again.
2253
*/
2254
smi_info->interrupt_disabled = true;
2255
if (smi_info->io.irq_cleanup) {
2256
smi_info->io.irq_cleanup(&smi_info->io);
2257
smi_info->io.irq_cleanup = NULL;
2258
}
2259
stop_timer_and_thread(smi_info);
2260
2261
/*
2262
* Wait until we know that we are out of any interrupt
2263
* handlers might have been running before we freed the
2264
* interrupt.
2265
*/
2266
synchronize_rcu();
2267
2268
/*
2269
* Timeouts are stopped, now make sure the interrupts are off
2270
* in the BMC. Note that timers and CPU interrupts are off,
2271
* so no need for locks.
2272
*/
2273
wait_msg_processed(smi_info);
2274
2275
if (smi_info->handlers)
2276
disable_si_irq(smi_info);
2277
2278
wait_msg_processed(smi_info);
2279
2280
if (smi_info->handlers)
2281
smi_info->handlers->cleanup(smi_info->si_sm);
2282
2283
if (smi_info->io.io_cleanup) {
2284
smi_info->io.io_cleanup(&smi_info->io);
2285
smi_info->io.io_cleanup = NULL;
2286
}
2287
2288
kfree(smi_info->si_sm);
2289
smi_info->si_sm = NULL;
2290
2291
smi_info->intf = NULL;
2292
}
2293
2294
/*
2295
* Must be called with smi_infos_lock held, to serialize the
2296
* smi_info->intf check.
2297
*/
2298
static void cleanup_one_si(struct smi_info *smi_info)
2299
{
2300
if (!smi_info)
2301
return;
2302
2303
list_del(&smi_info->link);
2304
ipmi_unregister_smi(smi_info->intf);
2305
kfree(smi_info);
2306
}
2307
2308
void ipmi_si_remove_by_dev(struct device *dev)
2309
{
2310
struct smi_info *e;
2311
2312
mutex_lock(&smi_infos_lock);
2313
list_for_each_entry(e, &smi_infos, link) {
2314
if (e->io.dev == dev) {
2315
cleanup_one_si(e);
2316
break;
2317
}
2318
}
2319
mutex_unlock(&smi_infos_lock);
2320
}
2321
2322
struct device *ipmi_si_remove_by_data(int addr_space, enum si_type si_type,
2323
unsigned long addr)
2324
{
2325
/* remove */
2326
struct smi_info *e, *tmp_e;
2327
struct device *dev = NULL;
2328
2329
mutex_lock(&smi_infos_lock);
2330
list_for_each_entry_safe(e, tmp_e, &smi_infos, link) {
2331
if (e->io.addr_space != addr_space)
2332
continue;
2333
if (e->io.si_info->type != si_type)
2334
continue;
2335
if (e->io.addr_data == addr) {
2336
dev = get_device(e->io.dev);
2337
cleanup_one_si(e);
2338
}
2339
}
2340
mutex_unlock(&smi_infos_lock);
2341
2342
return dev;
2343
}
2344
2345
static void cleanup_ipmi_si(void)
2346
{
2347
struct smi_info *e, *tmp_e;
2348
2349
if (!initialized)
2350
return;
2351
2352
ipmi_si_pci_shutdown();
2353
2354
ipmi_si_ls2k_shutdown();
2355
2356
ipmi_si_parisc_shutdown();
2357
2358
ipmi_si_platform_shutdown();
2359
2360
mutex_lock(&smi_infos_lock);
2361
list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
2362
cleanup_one_si(e);
2363
mutex_unlock(&smi_infos_lock);
2364
2365
ipmi_si_hardcode_exit();
2366
ipmi_si_hotmod_exit();
2367
}
2368
module_exit(cleanup_ipmi_si);
2369
2370
MODULE_ALIAS("platform:dmi-ipmi-si");
2371
MODULE_LICENSE("GPL");
2372
MODULE_AUTHOR("Corey Minyard <[email protected]>");
2373
MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT system interfaces.");
2374
2375