Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/arch/powerpc/platforms/pseries/papr-hvpipe.c
29274 views
1
// SPDX-License-Identifier: GPL-2.0-only
2
3
#define pr_fmt(fmt) "papr-hvpipe: " fmt
4
5
#include <linux/module.h>
6
#include <linux/kernel.h>
7
#include <linux/types.h>
8
#include <linux/delay.h>
9
#include <linux/anon_inodes.h>
10
#include <linux/miscdevice.h>
11
#include <linux/file.h>
12
#include <linux/fs.h>
13
#include <linux/poll.h>
14
#include <linux/of.h>
15
#include <asm/machdep.h>
16
#include <asm/rtas.h>
17
#include <asm/rtas-work-area.h>
18
#include <asm/papr-sysparm.h>
19
#include <uapi/asm/papr-hvpipe.h>
20
#include "pseries.h"
21
#include "papr-hvpipe.h"
22
23
static DEFINE_SPINLOCK(hvpipe_src_list_lock);
24
static LIST_HEAD(hvpipe_src_list);
25
26
static unsigned char hvpipe_ras_buf[RTAS_ERROR_LOG_MAX];
27
static struct workqueue_struct *papr_hvpipe_wq;
28
static struct work_struct *papr_hvpipe_work;
29
static int hvpipe_check_exception_token;
30
static bool hvpipe_feature;
31
32
/*
33
* New PowerPC FW provides support for partitions and various
34
* sources (Ex: remote hardware management console (HMC)) to
35
* exchange information through an inband hypervisor channel
36
* called HVPIPE. Only HMCs are supported right now and
37
* partitions can communicate with multiple HMCs and each
38
* source represented by source ID.
39
*
40
* FW introduces send HVPIPE and recv HVPIPE RTAS calls for
41
* partitions to send and receive payloads respectively.
42
*
43
* These RTAS functions have the following certain requirements
44
* / limitations:
45
* - One hvpipe per partition for all sources.
46
* - Assume the return status of send HVPIPE as delivered to source
47
* - Assume the return status of recv HVPIPE as ACK to source
48
* - Generates HVPIPE event message when the payload is ready
49
* for the partition. The hypervisor will not deliver another
50
* event until the partition read the previous payload which
51
* means the pipe is blocked for any sources.
52
*
53
* Linux implementation:
54
* Follow the similar interfaces that the OS has for other RTAS calls.
55
* ex: /dev/papr-indices, /dev/papr-vpd, etc.
56
* - /dev/papr-hvpipe is available for the user space.
57
* - devfd = open("/dev/papr-hvpipe", ..)
58
* - fd = ioctl(fd,HVPIPE_IOC_CREATE_HANDLE,&srcID)-for each source
59
* - write(fd, buf, size) --> Issue send HVPIPE RTAS call and
60
* returns size for success or the corresponding error for RTAS
61
* return code for failure.
62
* - poll(fd,..) -> wakeup FD if the payload is available to read.
63
* HVPIPE event message handler wakeup FD based on source ID in
64
* the event message
65
* - read(fd, buf, size) --> Issue recv HVPIPE RTAS call and
66
* returns size for success or the corresponding error for RTAS
67
* return code for failure.
68
*/
69
70
/*
71
* ibm,receive-hvpipe-msg RTAS call.
72
* @area: Caller-provided work area buffer for results.
73
* @srcID: Source ID returned by the RTAS call.
74
* @bytesw: Bytes written by RTAS call to @area.
75
*/
76
static int rtas_ibm_receive_hvpipe_msg(struct rtas_work_area *area,
77
u32 *srcID, u32 *bytesw)
78
{
79
const s32 token = rtas_function_token(RTAS_FN_IBM_RECEIVE_HVPIPE_MSG);
80
u32 rets[2];
81
s32 fwrc;
82
int ret;
83
84
if (token == RTAS_UNKNOWN_SERVICE)
85
return -ENOENT;
86
87
do {
88
fwrc = rtas_call(token, 2, 3, rets,
89
rtas_work_area_phys(area),
90
rtas_work_area_size(area));
91
92
} while (rtas_busy_delay(fwrc));
93
94
switch (fwrc) {
95
case RTAS_SUCCESS:
96
*srcID = rets[0];
97
*bytesw = rets[1];
98
ret = 0;
99
break;
100
case RTAS_HARDWARE_ERROR:
101
ret = -EIO;
102
break;
103
case RTAS_INVALID_PARAMETER:
104
ret = -EINVAL;
105
break;
106
case RTAS_FUNC_NOT_SUPPORTED:
107
ret = -EOPNOTSUPP;
108
break;
109
default:
110
ret = -EIO;
111
pr_err_ratelimited("unexpected ibm,receive-hvpipe-msg status %d\n", fwrc);
112
break;
113
}
114
115
return ret;
116
}
117
118
/*
119
* ibm,send-hvpipe-msg RTAS call
120
* @area: Caller-provided work area buffer to send.
121
* @srcID: Target source for the send pipe message.
122
*/
123
static int rtas_ibm_send_hvpipe_msg(struct rtas_work_area *area, u32 srcID)
124
{
125
const s32 token = rtas_function_token(RTAS_FN_IBM_SEND_HVPIPE_MSG);
126
s32 fwrc;
127
int ret;
128
129
if (token == RTAS_UNKNOWN_SERVICE)
130
return -ENOENT;
131
132
do {
133
fwrc = rtas_call(token, 2, 1, NULL, srcID,
134
rtas_work_area_phys(area));
135
136
} while (rtas_busy_delay(fwrc));
137
138
switch (fwrc) {
139
case RTAS_SUCCESS:
140
ret = 0;
141
break;
142
case RTAS_HARDWARE_ERROR:
143
ret = -EIO;
144
break;
145
case RTAS_INVALID_PARAMETER:
146
ret = -EINVAL;
147
break;
148
case RTAS_HVPIPE_CLOSED:
149
ret = -EPIPE;
150
break;
151
case RTAS_FUNC_NOT_SUPPORTED:
152
ret = -EOPNOTSUPP;
153
break;
154
default:
155
ret = -EIO;
156
pr_err_ratelimited("unexpected ibm,receive-hvpipe-msg status %d\n", fwrc);
157
break;
158
}
159
160
return ret;
161
}
162
163
static struct hvpipe_source_info *hvpipe_find_source(u32 srcID)
164
{
165
struct hvpipe_source_info *src_info;
166
167
list_for_each_entry(src_info, &hvpipe_src_list, list)
168
if (src_info->srcID == srcID)
169
return src_info;
170
171
return NULL;
172
}
173
174
/*
175
* This work function collects receive buffer with recv HVPIPE
176
* RTAS call. Called from read()
177
* @buf: User specified buffer to copy the payload that returned
178
* from recv HVPIPE RTAS.
179
* @size: Size of buffer user passed.
180
*/
181
static int hvpipe_rtas_recv_msg(char __user *buf, int size)
182
{
183
struct rtas_work_area *work_area;
184
u32 srcID, bytes_written;
185
int ret;
186
187
work_area = rtas_work_area_alloc(SZ_4K);
188
if (!work_area) {
189
pr_err("Could not allocate RTAS buffer for recv pipe\n");
190
return -ENOMEM;
191
}
192
193
ret = rtas_ibm_receive_hvpipe_msg(work_area, &srcID,
194
&bytes_written);
195
if (!ret) {
196
/*
197
* Recv HVPIPE RTAS is successful.
198
* When releasing FD or no one is waiting on the
199
* specific source, issue recv HVPIPE RTAS call
200
* so that pipe is not blocked - this func is called
201
* with NULL buf.
202
*/
203
if (buf) {
204
if (size < bytes_written) {
205
pr_err("Received the payload size = %d, but the buffer size = %d\n",
206
bytes_written, size);
207
bytes_written = size;
208
}
209
ret = copy_to_user(buf,
210
rtas_work_area_raw_buf(work_area),
211
bytes_written);
212
if (!ret)
213
ret = bytes_written;
214
}
215
} else {
216
pr_err("ibm,receive-hvpipe-msg failed with %d\n",
217
ret);
218
}
219
220
rtas_work_area_free(work_area);
221
return ret;
222
}
223
224
/*
225
* papr_hvpipe_handle_write - Issue send HVPIPE RTAS and return
226
* the size (payload + HVPIPE_HDR_LEN) for RTAS success.
227
* Otherwise returns the status of RTAS to the user space
228
*/
229
static ssize_t papr_hvpipe_handle_write(struct file *file,
230
const char __user *buf, size_t size, loff_t *off)
231
{
232
struct hvpipe_source_info *src_info = file->private_data;
233
struct rtas_work_area *work_area, *work_buf;
234
unsigned long ret, len;
235
__be64 *area_be;
236
237
/*
238
* Return -ENXIO during migration
239
*/
240
if (!hvpipe_feature)
241
return -ENXIO;
242
243
if (!src_info)
244
return -EIO;
245
246
/*
247
* Send HVPIPE RTAS is used to send payload to the specific
248
* source with the input parameters source ID and the payload
249
* as buffer list. Each entry in the buffer list contains
250
* address/length pair of the buffer.
251
*
252
* The buffer list format is as follows:
253
*
254
* Header (length of address/length pairs and the header length)
255
* Address of 4K buffer 1
256
* Length of 4K buffer 1 used
257
* ...
258
* Address of 4K buffer n
259
* Length of 4K buffer n used
260
*
261
* See PAPR 7.3.32.2 ibm,send-hvpipe-msg
262
*
263
* Even though can support max 1MB payload, the hypervisor
264
* supports only 4048 bytes payload at present and also
265
* just one address/length entry.
266
*
267
* writev() interface can be added in future when the
268
* hypervisor supports multiple buffer list entries.
269
*/
270
/* HVPIPE_MAX_WRITE_BUFFER_SIZE = 4048 bytes */
271
if ((size > (HVPIPE_HDR_LEN + HVPIPE_MAX_WRITE_BUFFER_SIZE)) ||
272
(size <= HVPIPE_HDR_LEN))
273
return -EINVAL;
274
275
/*
276
* The length of (address + length) pair + the length of header
277
*/
278
len = (2 * sizeof(u64)) + sizeof(u64);
279
size -= HVPIPE_HDR_LEN;
280
buf += HVPIPE_HDR_LEN;
281
mutex_lock(&rtas_ibm_send_hvpipe_msg_lock);
282
work_area = rtas_work_area_alloc(SZ_4K);
283
if (!work_area) {
284
ret = -ENOMEM;
285
goto out;
286
}
287
area_be = (__be64 *)rtas_work_area_raw_buf(work_area);
288
/* header */
289
area_be[0] = cpu_to_be64(len);
290
291
work_buf = rtas_work_area_alloc(SZ_4K);
292
if (!work_buf) {
293
ret = -ENOMEM;
294
goto out_work;
295
}
296
/* First buffer address */
297
area_be[1] = cpu_to_be64(rtas_work_area_phys(work_buf));
298
/* First buffer address length */
299
area_be[2] = cpu_to_be64(size);
300
301
if (!copy_from_user(rtas_work_area_raw_buf(work_buf), buf, size)) {
302
ret = rtas_ibm_send_hvpipe_msg(work_area, src_info->srcID);
303
if (!ret)
304
ret = size + HVPIPE_HDR_LEN;
305
} else
306
ret = -EPERM;
307
308
rtas_work_area_free(work_buf);
309
out_work:
310
rtas_work_area_free(work_area);
311
out:
312
mutex_unlock(&rtas_ibm_send_hvpipe_msg_lock);
313
return ret;
314
}
315
316
/*
317
* papr_hvpipe_handle_read - If the payload for the specific
318
* source is pending in the hypervisor, issue recv HVPIPE RTAS
319
* and return the payload to the user space.
320
*
321
* When the payload is available for the partition, the
322
* hypervisor notifies HVPIPE event with the source ID
323
* and the event handler wakeup FD(s) that are waiting.
324
*/
325
static ssize_t papr_hvpipe_handle_read(struct file *file,
326
char __user *buf, size_t size, loff_t *off)
327
{
328
329
struct hvpipe_source_info *src_info = file->private_data;
330
struct papr_hvpipe_hdr hdr;
331
long ret;
332
333
/*
334
* Return -ENXIO during migration
335
*/
336
if (!hvpipe_feature)
337
return -ENXIO;
338
339
if (!src_info)
340
return -EIO;
341
342
/*
343
* Max payload is 4048 (HVPIPE_MAX_WRITE_BUFFER_SIZE)
344
*/
345
if ((size > (HVPIPE_HDR_LEN + HVPIPE_MAX_WRITE_BUFFER_SIZE)) ||
346
(size < HVPIPE_HDR_LEN))
347
return -EINVAL;
348
349
/*
350
* Payload is not available to receive or source pipe
351
* is not closed.
352
*/
353
if (!src_info->hvpipe_status)
354
return 0;
355
356
hdr.version = 0;
357
hdr.flags = 0;
358
359
/*
360
* In case if the hvpipe has payload and also the
361
* hypervisor closed the pipe to the source, retrieve
362
* the payload and return to the user space first and
363
* then notify the userspace about the hvpipe close in
364
* next read().
365
*/
366
if (src_info->hvpipe_status & HVPIPE_MSG_AVAILABLE)
367
hdr.flags = HVPIPE_MSG_AVAILABLE;
368
else if (src_info->hvpipe_status & HVPIPE_LOST_CONNECTION)
369
hdr.flags = HVPIPE_LOST_CONNECTION;
370
else
371
/*
372
* Should not be here without one of the above
373
* flags set
374
*/
375
return -EIO;
376
377
ret = copy_to_user(buf, &hdr, HVPIPE_HDR_LEN);
378
if (ret)
379
return ret;
380
381
/*
382
* Message event has payload, so get the payload with
383
* recv HVPIPE RTAS.
384
*/
385
if (hdr.flags & HVPIPE_MSG_AVAILABLE) {
386
ret = hvpipe_rtas_recv_msg(buf + HVPIPE_HDR_LEN,
387
size - HVPIPE_HDR_LEN);
388
if (ret > 0) {
389
src_info->hvpipe_status &= ~HVPIPE_MSG_AVAILABLE;
390
ret += HVPIPE_HDR_LEN;
391
}
392
} else if (hdr.flags & HVPIPE_LOST_CONNECTION) {
393
/*
394
* Hypervisor is closing the pipe for the specific
395
* source. So notify user space.
396
*/
397
src_info->hvpipe_status &= ~HVPIPE_LOST_CONNECTION;
398
ret = HVPIPE_HDR_LEN;
399
}
400
401
return ret;
402
}
403
404
/*
405
* The user space waits for the payload to receive.
406
* The hypervisor sends HVPIPE event message to the partition
407
* when the payload is available. The event handler wakeup FD
408
* depends on the source ID in the message event.
409
*/
410
static __poll_t papr_hvpipe_handle_poll(struct file *filp,
411
struct poll_table_struct *wait)
412
{
413
struct hvpipe_source_info *src_info = filp->private_data;
414
415
/*
416
* HVPIPE is disabled during SUSPEND and enabled after migration.
417
* So return POLLRDHUP during migration
418
*/
419
if (!hvpipe_feature)
420
return POLLRDHUP;
421
422
if (!src_info)
423
return POLLNVAL;
424
425
/*
426
* If hvpipe already has pending payload, return so that
427
* the user space can issue read().
428
*/
429
if (src_info->hvpipe_status)
430
return POLLIN | POLLRDNORM;
431
432
/*
433
* Wait for the message event
434
* hvpipe_event_interrupt() wakes up this wait_queue
435
*/
436
poll_wait(filp, &src_info->recv_wqh, wait);
437
if (src_info->hvpipe_status)
438
return POLLIN | POLLRDNORM;
439
440
return 0;
441
}
442
443
static int papr_hvpipe_handle_release(struct inode *inode,
444
struct file *file)
445
{
446
struct hvpipe_source_info *src_info;
447
448
/*
449
* Hold the lock, remove source from src_list, reset the
450
* hvpipe status and release the lock to prevent any race
451
* with message event IRQ.
452
*/
453
spin_lock(&hvpipe_src_list_lock);
454
src_info = file->private_data;
455
list_del(&src_info->list);
456
file->private_data = NULL;
457
/*
458
* If the pipe for this specific source has any pending
459
* payload, issue recv HVPIPE RTAS so that pipe will not
460
* be blocked.
461
*/
462
if (src_info->hvpipe_status & HVPIPE_MSG_AVAILABLE) {
463
src_info->hvpipe_status = 0;
464
spin_unlock(&hvpipe_src_list_lock);
465
hvpipe_rtas_recv_msg(NULL, 0);
466
} else
467
spin_unlock(&hvpipe_src_list_lock);
468
469
kfree(src_info);
470
return 0;
471
}
472
473
static const struct file_operations papr_hvpipe_handle_ops = {
474
.read = papr_hvpipe_handle_read,
475
.write = papr_hvpipe_handle_write,
476
.release = papr_hvpipe_handle_release,
477
.poll = papr_hvpipe_handle_poll,
478
};
479
480
static int papr_hvpipe_dev_create_handle(u32 srcID)
481
{
482
struct hvpipe_source_info *src_info;
483
struct file *file;
484
long err;
485
int fd;
486
487
spin_lock(&hvpipe_src_list_lock);
488
/*
489
* Do not allow more than one process communicates with
490
* each source.
491
*/
492
src_info = hvpipe_find_source(srcID);
493
if (src_info) {
494
spin_unlock(&hvpipe_src_list_lock);
495
pr_err("pid(%d) is already using the source(%d)\n",
496
src_info->tsk->pid, srcID);
497
return -EALREADY;
498
}
499
spin_unlock(&hvpipe_src_list_lock);
500
501
src_info = kzalloc(sizeof(*src_info), GFP_KERNEL_ACCOUNT);
502
if (!src_info)
503
return -ENOMEM;
504
505
src_info->srcID = srcID;
506
src_info->tsk = current;
507
init_waitqueue_head(&src_info->recv_wqh);
508
509
fd = get_unused_fd_flags(O_RDONLY | O_CLOEXEC);
510
if (fd < 0) {
511
err = fd;
512
goto free_buf;
513
}
514
515
file = anon_inode_getfile("[papr-hvpipe]",
516
&papr_hvpipe_handle_ops, (void *)src_info,
517
O_RDWR);
518
if (IS_ERR(file)) {
519
err = PTR_ERR(file);
520
goto free_fd;
521
}
522
523
spin_lock(&hvpipe_src_list_lock);
524
/*
525
* If two processes are executing ioctl() for the same
526
* source ID concurrently, prevent the second process to
527
* acquire FD.
528
*/
529
if (hvpipe_find_source(srcID)) {
530
spin_unlock(&hvpipe_src_list_lock);
531
err = -EALREADY;
532
goto free_file;
533
}
534
list_add(&src_info->list, &hvpipe_src_list);
535
spin_unlock(&hvpipe_src_list_lock);
536
537
fd_install(fd, file);
538
return fd;
539
540
free_file:
541
fput(file);
542
free_fd:
543
put_unused_fd(fd);
544
free_buf:
545
kfree(src_info);
546
return err;
547
}
548
549
/*
550
* Top-level ioctl handler for /dev/papr_hvpipe
551
*
552
* Use separate FD for each source (exa :HMC). So ioctl is called
553
* with source ID which returns FD.
554
*/
555
static long papr_hvpipe_dev_ioctl(struct file *filp, unsigned int ioctl,
556
unsigned long arg)
557
{
558
u32 __user *argp = (void __user *)arg;
559
u32 srcID;
560
long ret;
561
562
/*
563
* Return -ENXIO during migration
564
*/
565
if (!hvpipe_feature)
566
return -ENXIO;
567
568
if (get_user(srcID, argp))
569
return -EFAULT;
570
571
/*
572
* Support only HMC source right now
573
*/
574
if (!(srcID & HVPIPE_HMC_ID_MASK))
575
return -EINVAL;
576
577
switch (ioctl) {
578
case PAPR_HVPIPE_IOC_CREATE_HANDLE:
579
ret = papr_hvpipe_dev_create_handle(srcID);
580
break;
581
default:
582
ret = -ENOIOCTLCMD;
583
break;
584
}
585
586
return ret;
587
}
588
589
/*
590
* papr_hvpipe_work_fn - called to issue recv HVPIPE RTAS for
591
* sources that are not monitored by user space so that pipe
592
* will not be blocked.
593
*/
594
static void papr_hvpipe_work_fn(struct work_struct *work)
595
{
596
hvpipe_rtas_recv_msg(NULL, 0);
597
}
598
599
/*
600
* HVPIPE event message IRQ handler.
601
* The hypervisor sends event IRQ if the partition has payload
602
* and generates another event only after payload is read with
603
* recv HVPIPE RTAS.
604
*/
605
static irqreturn_t hvpipe_event_interrupt(int irq, void *dev_id)
606
{
607
struct hvpipe_event_buf *hvpipe_event;
608
struct pseries_errorlog *pseries_log;
609
struct hvpipe_source_info *src_info;
610
struct rtas_error_log *elog;
611
int rc;
612
613
rc = rtas_call(hvpipe_check_exception_token, 6, 1, NULL,
614
RTAS_VECTOR_EXTERNAL_INTERRUPT, virq_to_hw(irq),
615
RTAS_HVPIPE_MSG_EVENTS, 1, __pa(&hvpipe_ras_buf),
616
rtas_get_error_log_max());
617
618
if (rc != 0) {
619
pr_err_ratelimited("unexpected hvpipe-event-notification failed %d\n", rc);
620
return IRQ_HANDLED;
621
}
622
623
elog = (struct rtas_error_log *)hvpipe_ras_buf;
624
if (unlikely(rtas_error_type(elog) != RTAS_TYPE_HVPIPE)) {
625
pr_warn_ratelimited("Unexpected event type %d\n",
626
rtas_error_type(elog));
627
return IRQ_HANDLED;
628
}
629
630
pseries_log = get_pseries_errorlog(elog,
631
PSERIES_ELOG_SECT_ID_HVPIPE_EVENT);
632
hvpipe_event = (struct hvpipe_event_buf *)pseries_log->data;
633
634
/*
635
* The hypervisor notifies partition when the payload is
636
* available to read with recv HVPIPE RTAS and it will not
637
* notify another event for any source until the previous
638
* payload is read. Means the pipe is blocked in the
639
* hypervisor until the payload is read.
640
*
641
* If the source is ready to accept payload and wakeup the
642
* corresponding FD. Hold lock and update hvpipe_status
643
* and this lock is needed in case the user space process
644
* is in release FD instead of poll() so that release()
645
* reads the payload to unblock pipe before closing FD.
646
*
647
* otherwise (means no other user process waiting for the
648
* payload, issue recv HVPIPE RTAS (papr_hvpipe_work_fn())
649
* to unblock pipe.
650
*/
651
spin_lock(&hvpipe_src_list_lock);
652
src_info = hvpipe_find_source(be32_to_cpu(hvpipe_event->srcID));
653
if (src_info) {
654
u32 flags = 0;
655
656
if (hvpipe_event->event_type & HVPIPE_LOST_CONNECTION)
657
flags = HVPIPE_LOST_CONNECTION;
658
else if (hvpipe_event->event_type & HVPIPE_MSG_AVAILABLE)
659
flags = HVPIPE_MSG_AVAILABLE;
660
661
src_info->hvpipe_status |= flags;
662
wake_up(&src_info->recv_wqh);
663
spin_unlock(&hvpipe_src_list_lock);
664
} else {
665
spin_unlock(&hvpipe_src_list_lock);
666
/*
667
* user space is not waiting on this source. So
668
* execute receive pipe RTAS so that pipe will not
669
* be blocked.
670
*/
671
if (hvpipe_event->event_type & HVPIPE_MSG_AVAILABLE)
672
queue_work(papr_hvpipe_wq, papr_hvpipe_work);
673
}
674
675
return IRQ_HANDLED;
676
}
677
678
/*
679
* Enable hvpipe by system parameter set with parameter
680
* token = 64 and with 1 byte buffer data:
681
* 0 = hvpipe not in use/disable
682
* 1 = hvpipe in use/enable
683
*/
684
static int set_hvpipe_sys_param(u8 val)
685
{
686
struct papr_sysparm_buf *buf;
687
int ret;
688
689
buf = papr_sysparm_buf_alloc();
690
if (!buf)
691
return -ENOMEM;
692
693
buf->len = cpu_to_be16(1);
694
buf->val[0] = val;
695
ret = papr_sysparm_set(PAPR_SYSPARM_HVPIPE_ENABLE, buf);
696
if (ret)
697
pr_err("Can not enable hvpipe %d\n", ret);
698
699
papr_sysparm_buf_free(buf);
700
701
return ret;
702
}
703
704
static int __init enable_hvpipe_IRQ(void)
705
{
706
struct device_node *np;
707
708
hvpipe_check_exception_token = rtas_function_token(RTAS_FN_CHECK_EXCEPTION);
709
if (hvpipe_check_exception_token == RTAS_UNKNOWN_SERVICE)
710
return -ENODEV;
711
712
/* hvpipe events */
713
np = of_find_node_by_path("/event-sources/ibm,hvpipe-msg-events");
714
if (np != NULL) {
715
request_event_sources_irqs(np, hvpipe_event_interrupt,
716
"HPIPE_EVENT");
717
of_node_put(np);
718
} else {
719
pr_err("Can not enable hvpipe event IRQ\n");
720
return -ENODEV;
721
}
722
723
return 0;
724
}
725
726
void hvpipe_migration_handler(int action)
727
{
728
pr_info("hvpipe migration event %d\n", action);
729
730
/*
731
* HVPIPE is not used (Failed to create /dev/papr-hvpipe).
732
* So nothing to do for migration.
733
*/
734
if (!papr_hvpipe_work)
735
return;
736
737
switch (action) {
738
case HVPIPE_SUSPEND:
739
if (hvpipe_feature) {
740
/*
741
* Disable hvpipe_feature to the user space.
742
* It will be enabled with RESUME event.
743
*/
744
hvpipe_feature = false;
745
/*
746
* set system parameter hvpipe 'disable'
747
*/
748
set_hvpipe_sys_param(0);
749
}
750
break;
751
case HVPIPE_RESUME:
752
/*
753
* set system parameter hvpipe 'enable'
754
*/
755
if (!set_hvpipe_sys_param(1))
756
hvpipe_feature = true;
757
else
758
pr_err("hvpipe is not enabled after migration\n");
759
760
break;
761
}
762
}
763
764
static const struct file_operations papr_hvpipe_ops = {
765
.unlocked_ioctl = papr_hvpipe_dev_ioctl,
766
};
767
768
static struct miscdevice papr_hvpipe_dev = {
769
.minor = MISC_DYNAMIC_MINOR,
770
.name = "papr-hvpipe",
771
.fops = &papr_hvpipe_ops,
772
};
773
774
static int __init papr_hvpipe_init(void)
775
{
776
int ret;
777
778
if (!of_find_property(rtas.dev, "ibm,hypervisor-pipe-capable",
779
NULL))
780
return -ENODEV;
781
782
if (!rtas_function_implemented(RTAS_FN_IBM_SEND_HVPIPE_MSG) ||
783
!rtas_function_implemented(RTAS_FN_IBM_RECEIVE_HVPIPE_MSG))
784
return -ENODEV;
785
786
papr_hvpipe_work = kzalloc(sizeof(struct work_struct), GFP_ATOMIC);
787
if (!papr_hvpipe_work)
788
return -ENOMEM;
789
790
INIT_WORK(papr_hvpipe_work, papr_hvpipe_work_fn);
791
792
papr_hvpipe_wq = alloc_ordered_workqueue("papr hvpipe workqueue", 0);
793
if (!papr_hvpipe_wq) {
794
ret = -ENOMEM;
795
goto out;
796
}
797
798
ret = enable_hvpipe_IRQ();
799
if (!ret) {
800
ret = set_hvpipe_sys_param(1);
801
if (!ret)
802
ret = misc_register(&papr_hvpipe_dev);
803
}
804
805
if (!ret) {
806
pr_info("hvpipe feature is enabled\n");
807
hvpipe_feature = true;
808
return 0;
809
}
810
811
pr_err("hvpipe feature is not enabled %d\n", ret);
812
destroy_workqueue(papr_hvpipe_wq);
813
out:
814
kfree(papr_hvpipe_work);
815
papr_hvpipe_work = NULL;
816
return ret;
817
}
818
machine_device_initcall(pseries, papr_hvpipe_init);
819
820