Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/drivers/block/nbd.c
29266 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
* Network block device - make block devices work over TCP
4
*
5
* Note that you can not swap over this thing, yet. Seems to work but
6
* deadlocks sometimes - you can not swap over TCP in general.
7
*
8
* Copyright 1997-2000, 2008 Pavel Machek <[email protected]>
9
* Parts copyright 2001 Steven Whitehouse <[email protected]>
10
*
11
* (part of code stolen from loop.c)
12
*/
13
14
#define pr_fmt(fmt) "nbd: " fmt
15
16
#include <linux/major.h>
17
18
#include <linux/blkdev.h>
19
#include <linux/module.h>
20
#include <linux/init.h>
21
#include <linux/sched.h>
22
#include <linux/sched/mm.h>
23
#include <linux/fs.h>
24
#include <linux/bio.h>
25
#include <linux/stat.h>
26
#include <linux/errno.h>
27
#include <linux/file.h>
28
#include <linux/ioctl.h>
29
#include <linux/mutex.h>
30
#include <linux/compiler.h>
31
#include <linux/completion.h>
32
#include <linux/err.h>
33
#include <linux/kernel.h>
34
#include <linux/slab.h>
35
#include <net/sock.h>
36
#include <linux/net.h>
37
#include <linux/kthread.h>
38
#include <linux/types.h>
39
#include <linux/debugfs.h>
40
#include <linux/blk-mq.h>
41
42
#include <linux/uaccess.h>
43
#include <asm/types.h>
44
45
#include <linux/nbd.h>
46
#include <linux/nbd-netlink.h>
47
#include <net/genetlink.h>
48
49
#define CREATE_TRACE_POINTS
50
#include <trace/events/nbd.h>
51
52
static DEFINE_IDR(nbd_index_idr);
53
static DEFINE_MUTEX(nbd_index_mutex);
54
static struct workqueue_struct *nbd_del_wq;
55
static int nbd_total_devices = 0;
56
57
struct nbd_sock {
58
struct socket *sock;
59
struct mutex tx_lock;
60
struct request *pending;
61
int sent;
62
bool dead;
63
int fallback_index;
64
int cookie;
65
struct work_struct work;
66
};
67
68
struct recv_thread_args {
69
struct work_struct work;
70
struct nbd_device *nbd;
71
struct nbd_sock *nsock;
72
int index;
73
};
74
75
struct link_dead_args {
76
struct work_struct work;
77
int index;
78
};
79
80
#define NBD_RT_TIMEDOUT 0
81
#define NBD_RT_DISCONNECT_REQUESTED 1
82
#define NBD_RT_DISCONNECTED 2
83
#define NBD_RT_HAS_PID_FILE 3
84
#define NBD_RT_HAS_CONFIG_REF 4
85
#define NBD_RT_BOUND 5
86
#define NBD_RT_DISCONNECT_ON_CLOSE 6
87
#define NBD_RT_HAS_BACKEND_FILE 7
88
89
#define NBD_DESTROY_ON_DISCONNECT 0
90
#define NBD_DISCONNECT_REQUESTED 1
91
92
struct nbd_config {
93
u32 flags;
94
unsigned long runtime_flags;
95
u64 dead_conn_timeout;
96
97
struct nbd_sock **socks;
98
int num_connections;
99
atomic_t live_connections;
100
wait_queue_head_t conn_wait;
101
102
atomic_t recv_threads;
103
wait_queue_head_t recv_wq;
104
unsigned int blksize_bits;
105
loff_t bytesize;
106
#if IS_ENABLED(CONFIG_DEBUG_FS)
107
struct dentry *dbg_dir;
108
#endif
109
};
110
111
static inline unsigned int nbd_blksize(struct nbd_config *config)
112
{
113
return 1u << config->blksize_bits;
114
}
115
116
struct nbd_device {
117
struct blk_mq_tag_set tag_set;
118
119
int index;
120
refcount_t config_refs;
121
refcount_t refs;
122
struct nbd_config *config;
123
struct mutex config_lock;
124
struct gendisk *disk;
125
struct workqueue_struct *recv_workq;
126
struct work_struct remove_work;
127
128
struct list_head list;
129
struct task_struct *task_setup;
130
131
unsigned long flags;
132
pid_t pid; /* pid of nbd-client, if attached */
133
134
char *backend;
135
};
136
137
#define NBD_CMD_REQUEUED 1
138
/*
139
* This flag will be set if nbd_queue_rq() succeed, and will be checked and
140
* cleared in completion. Both setting and clearing of the flag are protected
141
* by cmd->lock.
142
*/
143
#define NBD_CMD_INFLIGHT 2
144
145
/* Just part of request header or data payload is sent successfully */
146
#define NBD_CMD_PARTIAL_SEND 3
147
148
struct nbd_cmd {
149
struct nbd_device *nbd;
150
struct mutex lock;
151
int index;
152
int cookie;
153
int retries;
154
blk_status_t status;
155
unsigned long flags;
156
u32 cmd_cookie;
157
};
158
159
#if IS_ENABLED(CONFIG_DEBUG_FS)
160
static struct dentry *nbd_dbg_dir;
161
#endif
162
163
#define nbd_name(nbd) ((nbd)->disk->disk_name)
164
165
#define NBD_DEF_BLKSIZE_BITS 10
166
167
static unsigned int nbds_max = 16;
168
static int max_part = 16;
169
static int part_shift;
170
171
static int nbd_dev_dbg_init(struct nbd_device *nbd);
172
static void nbd_dev_dbg_close(struct nbd_device *nbd);
173
static void nbd_config_put(struct nbd_device *nbd);
174
static void nbd_connect_reply(struct genl_info *info, int index);
175
static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info);
176
static void nbd_dead_link_work(struct work_struct *work);
177
static void nbd_disconnect_and_put(struct nbd_device *nbd);
178
179
static inline struct device *nbd_to_dev(struct nbd_device *nbd)
180
{
181
return disk_to_dev(nbd->disk);
182
}
183
184
static void nbd_requeue_cmd(struct nbd_cmd *cmd)
185
{
186
struct request *req = blk_mq_rq_from_pdu(cmd);
187
188
lockdep_assert_held(&cmd->lock);
189
190
/*
191
* Clear INFLIGHT flag so that this cmd won't be completed in
192
* normal completion path
193
*
194
* INFLIGHT flag will be set when the cmd is queued to nbd next
195
* time.
196
*/
197
__clear_bit(NBD_CMD_INFLIGHT, &cmd->flags);
198
199
if (!test_and_set_bit(NBD_CMD_REQUEUED, &cmd->flags))
200
blk_mq_requeue_request(req, true);
201
}
202
203
#define NBD_COOKIE_BITS 32
204
205
static u64 nbd_cmd_handle(struct nbd_cmd *cmd)
206
{
207
struct request *req = blk_mq_rq_from_pdu(cmd);
208
u32 tag = blk_mq_unique_tag(req);
209
u64 cookie = cmd->cmd_cookie;
210
211
return (cookie << NBD_COOKIE_BITS) | tag;
212
}
213
214
static u32 nbd_handle_to_tag(u64 handle)
215
{
216
return (u32)handle;
217
}
218
219
static u32 nbd_handle_to_cookie(u64 handle)
220
{
221
return (u32)(handle >> NBD_COOKIE_BITS);
222
}
223
224
static const char *nbdcmd_to_ascii(int cmd)
225
{
226
switch (cmd) {
227
case NBD_CMD_READ: return "read";
228
case NBD_CMD_WRITE: return "write";
229
case NBD_CMD_DISC: return "disconnect";
230
case NBD_CMD_FLUSH: return "flush";
231
case NBD_CMD_TRIM: return "trim/discard";
232
}
233
return "invalid";
234
}
235
236
static ssize_t pid_show(struct device *dev,
237
struct device_attribute *attr, char *buf)
238
{
239
struct gendisk *disk = dev_to_disk(dev);
240
struct nbd_device *nbd = disk->private_data;
241
242
return sprintf(buf, "%d\n", nbd->pid);
243
}
244
245
static const struct device_attribute pid_attr = {
246
.attr = { .name = "pid", .mode = 0444},
247
.show = pid_show,
248
};
249
250
static ssize_t backend_show(struct device *dev,
251
struct device_attribute *attr, char *buf)
252
{
253
struct gendisk *disk = dev_to_disk(dev);
254
struct nbd_device *nbd = disk->private_data;
255
256
return sprintf(buf, "%s\n", nbd->backend ?: "");
257
}
258
259
static const struct device_attribute backend_attr = {
260
.attr = { .name = "backend", .mode = 0444},
261
.show = backend_show,
262
};
263
264
static void nbd_dev_remove(struct nbd_device *nbd)
265
{
266
struct gendisk *disk = nbd->disk;
267
268
del_gendisk(disk);
269
blk_mq_free_tag_set(&nbd->tag_set);
270
271
/*
272
* Remove from idr after del_gendisk() completes, so if the same ID is
273
* reused, the following add_disk() will succeed.
274
*/
275
mutex_lock(&nbd_index_mutex);
276
idr_remove(&nbd_index_idr, nbd->index);
277
mutex_unlock(&nbd_index_mutex);
278
destroy_workqueue(nbd->recv_workq);
279
put_disk(disk);
280
}
281
282
static void nbd_dev_remove_work(struct work_struct *work)
283
{
284
nbd_dev_remove(container_of(work, struct nbd_device, remove_work));
285
}
286
287
static void nbd_put(struct nbd_device *nbd)
288
{
289
if (!refcount_dec_and_test(&nbd->refs))
290
return;
291
292
/* Call del_gendisk() asynchrounously to prevent deadlock */
293
if (test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags))
294
queue_work(nbd_del_wq, &nbd->remove_work);
295
else
296
nbd_dev_remove(nbd);
297
}
298
299
static int nbd_disconnected(struct nbd_config *config)
300
{
301
return test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags) ||
302
test_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags);
303
}
304
305
static void nbd_mark_nsock_dead(struct nbd_device *nbd, struct nbd_sock *nsock,
306
int notify)
307
{
308
if (!nsock->dead && notify && !nbd_disconnected(nbd->config)) {
309
struct link_dead_args *args;
310
args = kmalloc(sizeof(struct link_dead_args), GFP_NOIO);
311
if (args) {
312
INIT_WORK(&args->work, nbd_dead_link_work);
313
args->index = nbd->index;
314
queue_work(system_percpu_wq, &args->work);
315
}
316
}
317
if (!nsock->dead) {
318
kernel_sock_shutdown(nsock->sock, SHUT_RDWR);
319
if (atomic_dec_return(&nbd->config->live_connections) == 0) {
320
if (test_and_clear_bit(NBD_RT_DISCONNECT_REQUESTED,
321
&nbd->config->runtime_flags)) {
322
set_bit(NBD_RT_DISCONNECTED,
323
&nbd->config->runtime_flags);
324
dev_info(nbd_to_dev(nbd),
325
"Disconnected due to user request.\n");
326
}
327
}
328
}
329
nsock->dead = true;
330
nsock->pending = NULL;
331
nsock->sent = 0;
332
}
333
334
static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize, loff_t blksize)
335
{
336
struct queue_limits lim;
337
int error;
338
339
if (!blksize)
340
blksize = 1u << NBD_DEF_BLKSIZE_BITS;
341
342
if (blk_validate_block_size(blksize))
343
return -EINVAL;
344
345
if (bytesize < 0)
346
return -EINVAL;
347
348
nbd->config->bytesize = bytesize;
349
nbd->config->blksize_bits = __ffs(blksize);
350
351
if (!nbd->pid)
352
return 0;
353
354
lim = queue_limits_start_update(nbd->disk->queue);
355
if (nbd->config->flags & NBD_FLAG_SEND_TRIM)
356
lim.max_hw_discard_sectors = UINT_MAX >> SECTOR_SHIFT;
357
else
358
lim.max_hw_discard_sectors = 0;
359
if (!(nbd->config->flags & NBD_FLAG_SEND_FLUSH)) {
360
lim.features &= ~(BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA);
361
} else if (nbd->config->flags & NBD_FLAG_SEND_FUA) {
362
lim.features |= BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA;
363
} else {
364
lim.features |= BLK_FEAT_WRITE_CACHE;
365
lim.features &= ~BLK_FEAT_FUA;
366
}
367
if (nbd->config->flags & NBD_FLAG_ROTATIONAL)
368
lim.features |= BLK_FEAT_ROTATIONAL;
369
if (nbd->config->flags & NBD_FLAG_SEND_WRITE_ZEROES)
370
lim.max_write_zeroes_sectors = UINT_MAX >> SECTOR_SHIFT;
371
372
lim.logical_block_size = blksize;
373
lim.physical_block_size = blksize;
374
error = queue_limits_commit_update_frozen(nbd->disk->queue, &lim);
375
if (error)
376
return error;
377
378
if (max_part)
379
set_bit(GD_NEED_PART_SCAN, &nbd->disk->state);
380
if (!set_capacity_and_notify(nbd->disk, bytesize >> 9))
381
kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
382
return 0;
383
}
384
385
static void nbd_complete_rq(struct request *req)
386
{
387
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
388
389
dev_dbg(nbd_to_dev(cmd->nbd), "request %p: %s\n", req,
390
cmd->status ? "failed" : "done");
391
392
blk_mq_end_request(req, cmd->status);
393
}
394
395
/*
396
* Forcibly shutdown the socket causing all listeners to error
397
*/
398
static void sock_shutdown(struct nbd_device *nbd)
399
{
400
struct nbd_config *config = nbd->config;
401
int i;
402
403
if (config->num_connections == 0)
404
return;
405
if (test_and_set_bit(NBD_RT_DISCONNECTED, &config->runtime_flags))
406
return;
407
408
for (i = 0; i < config->num_connections; i++) {
409
struct nbd_sock *nsock = config->socks[i];
410
mutex_lock(&nsock->tx_lock);
411
nbd_mark_nsock_dead(nbd, nsock, 0);
412
mutex_unlock(&nsock->tx_lock);
413
}
414
dev_warn(disk_to_dev(nbd->disk), "shutting down sockets\n");
415
}
416
417
static u32 req_to_nbd_cmd_type(struct request *req)
418
{
419
switch (req_op(req)) {
420
case REQ_OP_DISCARD:
421
return NBD_CMD_TRIM;
422
case REQ_OP_FLUSH:
423
return NBD_CMD_FLUSH;
424
case REQ_OP_WRITE:
425
return NBD_CMD_WRITE;
426
case REQ_OP_READ:
427
return NBD_CMD_READ;
428
case REQ_OP_WRITE_ZEROES:
429
return NBD_CMD_WRITE_ZEROES;
430
default:
431
return U32_MAX;
432
}
433
}
434
435
static struct nbd_config *nbd_get_config_unlocked(struct nbd_device *nbd)
436
{
437
if (refcount_inc_not_zero(&nbd->config_refs)) {
438
/*
439
* Add smp_mb__after_atomic to ensure that reading nbd->config_refs
440
* and reading nbd->config is ordered. The pair is the barrier in
441
* nbd_alloc_and_init_config(), avoid nbd->config_refs is set
442
* before nbd->config.
443
*/
444
smp_mb__after_atomic();
445
return nbd->config;
446
}
447
448
return NULL;
449
}
450
451
static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req)
452
{
453
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
454
struct nbd_device *nbd = cmd->nbd;
455
struct nbd_config *config;
456
457
if (!mutex_trylock(&cmd->lock))
458
return BLK_EH_RESET_TIMER;
459
460
/* partial send is handled in nbd_sock's work function */
461
if (test_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags)) {
462
mutex_unlock(&cmd->lock);
463
return BLK_EH_RESET_TIMER;
464
}
465
466
if (!test_bit(NBD_CMD_INFLIGHT, &cmd->flags)) {
467
mutex_unlock(&cmd->lock);
468
return BLK_EH_DONE;
469
}
470
471
config = nbd_get_config_unlocked(nbd);
472
if (!config) {
473
cmd->status = BLK_STS_TIMEOUT;
474
__clear_bit(NBD_CMD_INFLIGHT, &cmd->flags);
475
mutex_unlock(&cmd->lock);
476
goto done;
477
}
478
479
if (config->num_connections > 1 ||
480
(config->num_connections == 1 && nbd->tag_set.timeout)) {
481
dev_err_ratelimited(nbd_to_dev(nbd),
482
"Connection timed out, retrying (%d/%d alive)\n",
483
atomic_read(&config->live_connections),
484
config->num_connections);
485
/*
486
* Hooray we have more connections, requeue this IO, the submit
487
* path will put it on a real connection. Or if only one
488
* connection is configured, the submit path will wait util
489
* a new connection is reconfigured or util dead timeout.
490
*/
491
if (config->socks) {
492
if (cmd->index < config->num_connections) {
493
struct nbd_sock *nsock =
494
config->socks[cmd->index];
495
mutex_lock(&nsock->tx_lock);
496
/* We can have multiple outstanding requests, so
497
* we don't want to mark the nsock dead if we've
498
* already reconnected with a new socket, so
499
* only mark it dead if its the same socket we
500
* were sent out on.
501
*/
502
if (cmd->cookie == nsock->cookie)
503
nbd_mark_nsock_dead(nbd, nsock, 1);
504
mutex_unlock(&nsock->tx_lock);
505
}
506
nbd_requeue_cmd(cmd);
507
mutex_unlock(&cmd->lock);
508
nbd_config_put(nbd);
509
return BLK_EH_DONE;
510
}
511
}
512
513
if (!nbd->tag_set.timeout) {
514
/*
515
* Userspace sets timeout=0 to disable socket disconnection,
516
* so just warn and reset the timer.
517
*/
518
struct nbd_sock *nsock = config->socks[cmd->index];
519
cmd->retries++;
520
dev_info(nbd_to_dev(nbd), "Possible stuck request %p: control (%s@%llu,%uB). Runtime %u seconds\n",
521
req, nbdcmd_to_ascii(req_to_nbd_cmd_type(req)),
522
(unsigned long long)blk_rq_pos(req) << 9,
523
blk_rq_bytes(req), (req->timeout / HZ) * cmd->retries);
524
525
mutex_lock(&nsock->tx_lock);
526
if (cmd->cookie != nsock->cookie) {
527
nbd_requeue_cmd(cmd);
528
mutex_unlock(&nsock->tx_lock);
529
mutex_unlock(&cmd->lock);
530
nbd_config_put(nbd);
531
return BLK_EH_DONE;
532
}
533
mutex_unlock(&nsock->tx_lock);
534
mutex_unlock(&cmd->lock);
535
nbd_config_put(nbd);
536
return BLK_EH_RESET_TIMER;
537
}
538
539
dev_err_ratelimited(nbd_to_dev(nbd), "Connection timed out\n");
540
set_bit(NBD_RT_TIMEDOUT, &config->runtime_flags);
541
cmd->status = BLK_STS_IOERR;
542
__clear_bit(NBD_CMD_INFLIGHT, &cmd->flags);
543
mutex_unlock(&cmd->lock);
544
sock_shutdown(nbd);
545
nbd_config_put(nbd);
546
done:
547
blk_mq_complete_request(req);
548
return BLK_EH_DONE;
549
}
550
551
static int __sock_xmit(struct nbd_device *nbd, struct socket *sock, int send,
552
struct iov_iter *iter, int msg_flags, int *sent)
553
{
554
int result;
555
struct msghdr msg = {} ;
556
unsigned int noreclaim_flag;
557
558
if (unlikely(!sock)) {
559
dev_err_ratelimited(disk_to_dev(nbd->disk),
560
"Attempted %s on closed socket in sock_xmit\n",
561
(send ? "send" : "recv"));
562
return -EINVAL;
563
}
564
565
msg.msg_iter = *iter;
566
567
noreclaim_flag = memalloc_noreclaim_save();
568
do {
569
sock->sk->sk_allocation = GFP_NOIO | __GFP_MEMALLOC;
570
sock->sk->sk_use_task_frag = false;
571
msg.msg_flags = msg_flags | MSG_NOSIGNAL;
572
573
if (send)
574
result = sock_sendmsg(sock, &msg);
575
else
576
result = sock_recvmsg(sock, &msg, msg.msg_flags);
577
578
if (result <= 0) {
579
if (result == 0)
580
result = -EPIPE; /* short read */
581
break;
582
}
583
if (sent)
584
*sent += result;
585
} while (msg_data_left(&msg));
586
587
memalloc_noreclaim_restore(noreclaim_flag);
588
589
return result;
590
}
591
592
/*
593
* Send or receive packet. Return a positive value on success and
594
* negtive value on failure, and never return 0.
595
*/
596
static int sock_xmit(struct nbd_device *nbd, int index, int send,
597
struct iov_iter *iter, int msg_flags, int *sent)
598
{
599
struct nbd_config *config = nbd->config;
600
struct socket *sock = config->socks[index]->sock;
601
602
return __sock_xmit(nbd, sock, send, iter, msg_flags, sent);
603
}
604
605
/*
606
* Different settings for sk->sk_sndtimeo can result in different return values
607
* if there is a signal pending when we enter sendmsg, because reasons?
608
*/
609
static inline int was_interrupted(int result)
610
{
611
return result == -ERESTARTSYS || result == -EINTR;
612
}
613
614
/*
615
* We've already sent header or part of data payload, have no choice but
616
* to set pending and schedule it in work.
617
*
618
* And we have to return BLK_STS_OK to block core, otherwise this same
619
* request may be re-dispatched with different tag, but our header has
620
* been sent out with old tag, and this way does confuse reply handling.
621
*/
622
static void nbd_sched_pending_work(struct nbd_device *nbd,
623
struct nbd_sock *nsock,
624
struct nbd_cmd *cmd, int sent)
625
{
626
struct request *req = blk_mq_rq_from_pdu(cmd);
627
628
/* pending work should be scheduled only once */
629
WARN_ON_ONCE(test_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags));
630
631
nsock->pending = req;
632
nsock->sent = sent;
633
set_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags);
634
refcount_inc(&nbd->config_refs);
635
schedule_work(&nsock->work);
636
}
637
638
/*
639
* Returns BLK_STS_RESOURCE if the caller should retry after a delay.
640
* Returns BLK_STS_IOERR if sending failed.
641
*/
642
static blk_status_t nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd,
643
int index)
644
{
645
struct request *req = blk_mq_rq_from_pdu(cmd);
646
struct nbd_config *config = nbd->config;
647
struct nbd_sock *nsock = config->socks[index];
648
int result;
649
struct nbd_request request = {.magic = htonl(NBD_REQUEST_MAGIC)};
650
struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
651
struct iov_iter from;
652
struct bio *bio;
653
u64 handle;
654
u32 type;
655
u32 nbd_cmd_flags = 0;
656
int sent = nsock->sent, skip = 0;
657
658
lockdep_assert_held(&cmd->lock);
659
lockdep_assert_held(&nsock->tx_lock);
660
661
iov_iter_kvec(&from, ITER_SOURCE, &iov, 1, sizeof(request));
662
663
type = req_to_nbd_cmd_type(req);
664
if (type == U32_MAX)
665
return BLK_STS_IOERR;
666
667
if (rq_data_dir(req) == WRITE &&
668
(config->flags & NBD_FLAG_READ_ONLY)) {
669
dev_err_ratelimited(disk_to_dev(nbd->disk),
670
"Write on read-only\n");
671
return BLK_STS_IOERR;
672
}
673
674
if (req->cmd_flags & REQ_FUA)
675
nbd_cmd_flags |= NBD_CMD_FLAG_FUA;
676
if ((req->cmd_flags & REQ_NOUNMAP) && (type == NBD_CMD_WRITE_ZEROES))
677
nbd_cmd_flags |= NBD_CMD_FLAG_NO_HOLE;
678
679
/* We did a partial send previously, and we at least sent the whole
680
* request struct, so just go and send the rest of the pages in the
681
* request.
682
*/
683
if (sent) {
684
if (sent >= sizeof(request)) {
685
skip = sent - sizeof(request);
686
687
/* initialize handle for tracing purposes */
688
handle = nbd_cmd_handle(cmd);
689
690
goto send_pages;
691
}
692
iov_iter_advance(&from, sent);
693
} else {
694
cmd->cmd_cookie++;
695
}
696
cmd->index = index;
697
cmd->cookie = nsock->cookie;
698
cmd->retries = 0;
699
request.type = htonl(type | nbd_cmd_flags);
700
if (type != NBD_CMD_FLUSH) {
701
request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
702
request.len = htonl(blk_rq_bytes(req));
703
}
704
handle = nbd_cmd_handle(cmd);
705
request.cookie = cpu_to_be64(handle);
706
707
trace_nbd_send_request(&request, nbd->index, blk_mq_rq_from_pdu(cmd));
708
709
dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
710
req, nbdcmd_to_ascii(type),
711
(unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req));
712
result = sock_xmit(nbd, index, 1, &from,
713
(type == NBD_CMD_WRITE) ? MSG_MORE : 0, &sent);
714
trace_nbd_header_sent(req, handle);
715
if (result < 0) {
716
if (was_interrupted(result)) {
717
/* If we haven't sent anything we can just return BUSY,
718
* however if we have sent something we need to make
719
* sure we only allow this req to be sent until we are
720
* completely done.
721
*/
722
if (sent) {
723
nbd_sched_pending_work(nbd, nsock, cmd, sent);
724
return BLK_STS_OK;
725
}
726
set_bit(NBD_CMD_REQUEUED, &cmd->flags);
727
return BLK_STS_RESOURCE;
728
}
729
dev_err_ratelimited(disk_to_dev(nbd->disk),
730
"Send control failed (result %d)\n", result);
731
goto requeue;
732
}
733
send_pages:
734
if (type != NBD_CMD_WRITE)
735
goto out;
736
737
bio = req->bio;
738
while (bio) {
739
struct bio *next = bio->bi_next;
740
struct bvec_iter iter;
741
struct bio_vec bvec;
742
743
bio_for_each_segment(bvec, bio, iter) {
744
bool is_last = !next && bio_iter_last(bvec, iter);
745
int flags = is_last ? 0 : MSG_MORE;
746
747
dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
748
req, bvec.bv_len);
749
iov_iter_bvec(&from, ITER_SOURCE, &bvec, 1, bvec.bv_len);
750
if (skip) {
751
if (skip >= iov_iter_count(&from)) {
752
skip -= iov_iter_count(&from);
753
continue;
754
}
755
iov_iter_advance(&from, skip);
756
skip = 0;
757
}
758
result = sock_xmit(nbd, index, 1, &from, flags, &sent);
759
if (result < 0) {
760
if (was_interrupted(result)) {
761
nbd_sched_pending_work(nbd, nsock, cmd, sent);
762
return BLK_STS_OK;
763
}
764
dev_err(disk_to_dev(nbd->disk),
765
"Send data failed (result %d)\n",
766
result);
767
goto requeue;
768
}
769
/*
770
* The completion might already have come in,
771
* so break for the last one instead of letting
772
* the iterator do it. This prevents use-after-free
773
* of the bio.
774
*/
775
if (is_last)
776
break;
777
}
778
bio = next;
779
}
780
out:
781
trace_nbd_payload_sent(req, handle);
782
nsock->pending = NULL;
783
nsock->sent = 0;
784
__set_bit(NBD_CMD_INFLIGHT, &cmd->flags);
785
return BLK_STS_OK;
786
787
requeue:
788
/*
789
* Can't requeue in case we are dealing with partial send
790
*
791
* We must run from pending work function.
792
* */
793
if (test_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags))
794
return BLK_STS_OK;
795
796
/* retry on a different socket */
797
dev_err_ratelimited(disk_to_dev(nbd->disk),
798
"Request send failed, requeueing\n");
799
nbd_mark_nsock_dead(nbd, nsock, 1);
800
nbd_requeue_cmd(cmd);
801
return BLK_STS_OK;
802
}
803
804
/* handle partial sending */
805
static void nbd_pending_cmd_work(struct work_struct *work)
806
{
807
struct nbd_sock *nsock = container_of(work, struct nbd_sock, work);
808
struct request *req = nsock->pending;
809
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
810
struct nbd_device *nbd = cmd->nbd;
811
unsigned long deadline = READ_ONCE(req->deadline);
812
unsigned int wait_ms = 2;
813
814
mutex_lock(&cmd->lock);
815
816
WARN_ON_ONCE(test_bit(NBD_CMD_REQUEUED, &cmd->flags));
817
if (WARN_ON_ONCE(!test_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags)))
818
goto out;
819
820
mutex_lock(&nsock->tx_lock);
821
while (true) {
822
nbd_send_cmd(nbd, cmd, cmd->index);
823
if (!nsock->pending)
824
break;
825
826
/* don't bother timeout handler for partial sending */
827
if (READ_ONCE(jiffies) + msecs_to_jiffies(wait_ms) >= deadline) {
828
cmd->status = BLK_STS_IOERR;
829
blk_mq_complete_request(req);
830
break;
831
}
832
msleep(wait_ms);
833
wait_ms *= 2;
834
}
835
mutex_unlock(&nsock->tx_lock);
836
clear_bit(NBD_CMD_PARTIAL_SEND, &cmd->flags);
837
out:
838
mutex_unlock(&cmd->lock);
839
nbd_config_put(nbd);
840
}
841
842
static int nbd_read_reply(struct nbd_device *nbd, struct socket *sock,
843
struct nbd_reply *reply)
844
{
845
struct kvec iov = {.iov_base = reply, .iov_len = sizeof(*reply)};
846
struct iov_iter to;
847
int result;
848
849
reply->magic = 0;
850
iov_iter_kvec(&to, ITER_DEST, &iov, 1, sizeof(*reply));
851
result = __sock_xmit(nbd, sock, 0, &to, MSG_WAITALL, NULL);
852
if (result < 0) {
853
if (!nbd_disconnected(nbd->config))
854
dev_err(disk_to_dev(nbd->disk),
855
"Receive control failed (result %d)\n", result);
856
return result;
857
}
858
859
if (ntohl(reply->magic) != NBD_REPLY_MAGIC) {
860
dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n",
861
(unsigned long)ntohl(reply->magic));
862
return -EPROTO;
863
}
864
865
return 0;
866
}
867
868
/* NULL returned = something went wrong, inform userspace */
869
static struct nbd_cmd *nbd_handle_reply(struct nbd_device *nbd, int index,
870
struct nbd_reply *reply)
871
{
872
int result;
873
struct nbd_cmd *cmd;
874
struct request *req = NULL;
875
u64 handle;
876
u16 hwq;
877
u32 tag;
878
int ret = 0;
879
880
handle = be64_to_cpu(reply->cookie);
881
tag = nbd_handle_to_tag(handle);
882
hwq = blk_mq_unique_tag_to_hwq(tag);
883
if (hwq < nbd->tag_set.nr_hw_queues)
884
req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq],
885
blk_mq_unique_tag_to_tag(tag));
886
if (!req || !blk_mq_request_started(req)) {
887
dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%d) %p\n",
888
tag, req);
889
return ERR_PTR(-ENOENT);
890
}
891
trace_nbd_header_received(req, handle);
892
cmd = blk_mq_rq_to_pdu(req);
893
894
mutex_lock(&cmd->lock);
895
if (!test_bit(NBD_CMD_INFLIGHT, &cmd->flags)) {
896
dev_err(disk_to_dev(nbd->disk), "Suspicious reply %d (status %u flags %lu)",
897
tag, cmd->status, cmd->flags);
898
ret = -ENOENT;
899
goto out;
900
}
901
if (cmd->index != index) {
902
dev_err(disk_to_dev(nbd->disk), "Unexpected reply %d from different sock %d (expected %d)",
903
tag, index, cmd->index);
904
ret = -ENOENT;
905
goto out;
906
}
907
if (cmd->cmd_cookie != nbd_handle_to_cookie(handle)) {
908
dev_err(disk_to_dev(nbd->disk), "Double reply on req %p, cmd_cookie %u, handle cookie %u\n",
909
req, cmd->cmd_cookie, nbd_handle_to_cookie(handle));
910
ret = -ENOENT;
911
goto out;
912
}
913
if (cmd->status != BLK_STS_OK) {
914
dev_err(disk_to_dev(nbd->disk), "Command already handled %p\n",
915
req);
916
ret = -ENOENT;
917
goto out;
918
}
919
if (test_bit(NBD_CMD_REQUEUED, &cmd->flags)) {
920
dev_err(disk_to_dev(nbd->disk), "Raced with timeout on req %p\n",
921
req);
922
ret = -ENOENT;
923
goto out;
924
}
925
if (ntohl(reply->error)) {
926
dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
927
ntohl(reply->error));
928
cmd->status = BLK_STS_IOERR;
929
goto out;
930
}
931
932
dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req);
933
if (rq_data_dir(req) != WRITE) {
934
struct req_iterator iter;
935
struct bio_vec bvec;
936
struct iov_iter to;
937
938
rq_for_each_segment(bvec, req, iter) {
939
iov_iter_bvec(&to, ITER_DEST, &bvec, 1, bvec.bv_len);
940
result = sock_xmit(nbd, index, 0, &to, MSG_WAITALL, NULL);
941
if (result < 0) {
942
dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
943
result);
944
/*
945
* If we've disconnected, we need to make sure we
946
* complete this request, otherwise error out
947
* and let the timeout stuff handle resubmitting
948
* this request onto another connection.
949
*/
950
if (nbd_disconnected(nbd->config)) {
951
cmd->status = BLK_STS_IOERR;
952
goto out;
953
}
954
ret = -EIO;
955
goto out;
956
}
957
dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
958
req, bvec.bv_len);
959
}
960
}
961
out:
962
trace_nbd_payload_received(req, handle);
963
mutex_unlock(&cmd->lock);
964
return ret ? ERR_PTR(ret) : cmd;
965
}
966
967
static void recv_work(struct work_struct *work)
968
{
969
struct recv_thread_args *args = container_of(work,
970
struct recv_thread_args,
971
work);
972
struct nbd_device *nbd = args->nbd;
973
struct nbd_config *config = nbd->config;
974
struct request_queue *q = nbd->disk->queue;
975
struct nbd_sock *nsock = args->nsock;
976
struct nbd_cmd *cmd;
977
struct request *rq;
978
979
while (1) {
980
struct nbd_reply reply;
981
982
if (nbd_read_reply(nbd, nsock->sock, &reply))
983
break;
984
985
/*
986
* Grab .q_usage_counter so request pool won't go away, then no
987
* request use-after-free is possible during nbd_handle_reply().
988
* If queue is frozen, there won't be any inflight requests, we
989
* needn't to handle the incoming garbage message.
990
*/
991
if (!percpu_ref_tryget(&q->q_usage_counter)) {
992
dev_err(disk_to_dev(nbd->disk), "%s: no io inflight\n",
993
__func__);
994
break;
995
}
996
997
cmd = nbd_handle_reply(nbd, args->index, &reply);
998
if (IS_ERR(cmd)) {
999
percpu_ref_put(&q->q_usage_counter);
1000
break;
1001
}
1002
1003
rq = blk_mq_rq_from_pdu(cmd);
1004
if (likely(!blk_should_fake_timeout(rq->q))) {
1005
bool complete;
1006
1007
mutex_lock(&cmd->lock);
1008
complete = __test_and_clear_bit(NBD_CMD_INFLIGHT,
1009
&cmd->flags);
1010
mutex_unlock(&cmd->lock);
1011
if (complete)
1012
blk_mq_complete_request(rq);
1013
}
1014
percpu_ref_put(&q->q_usage_counter);
1015
}
1016
1017
mutex_lock(&nsock->tx_lock);
1018
nbd_mark_nsock_dead(nbd, nsock, 1);
1019
mutex_unlock(&nsock->tx_lock);
1020
1021
nbd_config_put(nbd);
1022
atomic_dec(&config->recv_threads);
1023
wake_up(&config->recv_wq);
1024
kfree(args);
1025
}
1026
1027
static bool nbd_clear_req(struct request *req, void *data)
1028
{
1029
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req);
1030
1031
/* don't abort one completed request */
1032
if (blk_mq_request_completed(req))
1033
return true;
1034
1035
mutex_lock(&cmd->lock);
1036
if (!__test_and_clear_bit(NBD_CMD_INFLIGHT, &cmd->flags)) {
1037
mutex_unlock(&cmd->lock);
1038
return true;
1039
}
1040
cmd->status = BLK_STS_IOERR;
1041
mutex_unlock(&cmd->lock);
1042
1043
blk_mq_complete_request(req);
1044
return true;
1045
}
1046
1047
static void nbd_clear_que(struct nbd_device *nbd)
1048
{
1049
blk_mq_quiesce_queue(nbd->disk->queue);
1050
blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL);
1051
blk_mq_unquiesce_queue(nbd->disk->queue);
1052
dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n");
1053
}
1054
1055
static int find_fallback(struct nbd_device *nbd, int index)
1056
{
1057
struct nbd_config *config = nbd->config;
1058
int new_index = -1;
1059
struct nbd_sock *nsock = config->socks[index];
1060
int fallback = nsock->fallback_index;
1061
1062
if (test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags))
1063
return new_index;
1064
1065
if (config->num_connections <= 1) {
1066
dev_err_ratelimited(disk_to_dev(nbd->disk),
1067
"Dead connection, failed to find a fallback\n");
1068
return new_index;
1069
}
1070
1071
if (fallback >= 0 && fallback < config->num_connections &&
1072
!config->socks[fallback]->dead)
1073
return fallback;
1074
1075
if (nsock->fallback_index < 0 ||
1076
nsock->fallback_index >= config->num_connections ||
1077
config->socks[nsock->fallback_index]->dead) {
1078
int i;
1079
for (i = 0; i < config->num_connections; i++) {
1080
if (i == index)
1081
continue;
1082
if (!config->socks[i]->dead) {
1083
new_index = i;
1084
break;
1085
}
1086
}
1087
nsock->fallback_index = new_index;
1088
if (new_index < 0) {
1089
dev_err_ratelimited(disk_to_dev(nbd->disk),
1090
"Dead connection, failed to find a fallback\n");
1091
return new_index;
1092
}
1093
}
1094
new_index = nsock->fallback_index;
1095
return new_index;
1096
}
1097
1098
static int wait_for_reconnect(struct nbd_device *nbd)
1099
{
1100
struct nbd_config *config = nbd->config;
1101
if (!config->dead_conn_timeout)
1102
return 0;
1103
1104
if (!wait_event_timeout(config->conn_wait,
1105
test_bit(NBD_RT_DISCONNECTED,
1106
&config->runtime_flags) ||
1107
atomic_read(&config->live_connections) > 0,
1108
config->dead_conn_timeout))
1109
return 0;
1110
1111
return !test_bit(NBD_RT_DISCONNECTED, &config->runtime_flags);
1112
}
1113
1114
static blk_status_t nbd_handle_cmd(struct nbd_cmd *cmd, int index)
1115
{
1116
struct request *req = blk_mq_rq_from_pdu(cmd);
1117
struct nbd_device *nbd = cmd->nbd;
1118
struct nbd_config *config;
1119
struct nbd_sock *nsock;
1120
blk_status_t ret;
1121
1122
lockdep_assert_held(&cmd->lock);
1123
1124
config = nbd_get_config_unlocked(nbd);
1125
if (!config) {
1126
dev_err_ratelimited(disk_to_dev(nbd->disk),
1127
"Socks array is empty\n");
1128
return BLK_STS_IOERR;
1129
}
1130
1131
if (index >= config->num_connections) {
1132
dev_err_ratelimited(disk_to_dev(nbd->disk),
1133
"Attempted send on invalid socket\n");
1134
nbd_config_put(nbd);
1135
return BLK_STS_IOERR;
1136
}
1137
cmd->status = BLK_STS_OK;
1138
again:
1139
nsock = config->socks[index];
1140
mutex_lock(&nsock->tx_lock);
1141
if (nsock->dead) {
1142
int old_index = index;
1143
index = find_fallback(nbd, index);
1144
mutex_unlock(&nsock->tx_lock);
1145
if (index < 0) {
1146
if (wait_for_reconnect(nbd)) {
1147
index = old_index;
1148
goto again;
1149
}
1150
/* All the sockets should already be down at this point,
1151
* we just want to make sure that DISCONNECTED is set so
1152
* any requests that come in that were queue'ed waiting
1153
* for the reconnect timer don't trigger the timer again
1154
* and instead just error out.
1155
*/
1156
sock_shutdown(nbd);
1157
nbd_config_put(nbd);
1158
return BLK_STS_IOERR;
1159
}
1160
goto again;
1161
}
1162
1163
/* Handle the case that we have a pending request that was partially
1164
* transmitted that _has_ to be serviced first. We need to call requeue
1165
* here so that it gets put _after_ the request that is already on the
1166
* dispatch list.
1167
*/
1168
blk_mq_start_request(req);
1169
if (unlikely(nsock->pending && nsock->pending != req)) {
1170
nbd_requeue_cmd(cmd);
1171
ret = BLK_STS_OK;
1172
goto out;
1173
}
1174
ret = nbd_send_cmd(nbd, cmd, index);
1175
out:
1176
mutex_unlock(&nsock->tx_lock);
1177
nbd_config_put(nbd);
1178
return ret;
1179
}
1180
1181
static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
1182
const struct blk_mq_queue_data *bd)
1183
{
1184
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
1185
blk_status_t ret;
1186
1187
/*
1188
* Since we look at the bio's to send the request over the network we
1189
* need to make sure the completion work doesn't mark this request done
1190
* before we are done doing our send. This keeps us from dereferencing
1191
* freed data if we have particularly fast completions (ie we get the
1192
* completion before we exit sock_xmit on the last bvec) or in the case
1193
* that the server is misbehaving (or there was an error) before we're
1194
* done sending everything over the wire.
1195
*/
1196
mutex_lock(&cmd->lock);
1197
clear_bit(NBD_CMD_REQUEUED, &cmd->flags);
1198
1199
/* We can be called directly from the user space process, which means we
1200
* could possibly have signals pending so our sendmsg will fail. In
1201
* this case we need to return that we are busy, otherwise error out as
1202
* appropriate.
1203
*/
1204
ret = nbd_handle_cmd(cmd, hctx->queue_num);
1205
mutex_unlock(&cmd->lock);
1206
1207
return ret;
1208
}
1209
1210
static struct socket *nbd_get_socket(struct nbd_device *nbd, unsigned long fd,
1211
int *err)
1212
{
1213
struct socket *sock;
1214
1215
*err = 0;
1216
sock = sockfd_lookup(fd, err);
1217
if (!sock)
1218
return NULL;
1219
1220
if (!sk_is_tcp(sock->sk) &&
1221
!sk_is_stream_unix(sock->sk)) {
1222
dev_err(disk_to_dev(nbd->disk), "Unsupported socket: should be TCP or UNIX.\n");
1223
*err = -EINVAL;
1224
sockfd_put(sock);
1225
return NULL;
1226
}
1227
1228
if (sock->ops->shutdown == sock_no_shutdown) {
1229
dev_err(disk_to_dev(nbd->disk), "Unsupported socket: shutdown callout must be supported.\n");
1230
*err = -EINVAL;
1231
sockfd_put(sock);
1232
return NULL;
1233
}
1234
1235
return sock;
1236
}
1237
1238
static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
1239
bool netlink)
1240
{
1241
struct nbd_config *config = nbd->config;
1242
struct socket *sock;
1243
struct nbd_sock **socks;
1244
struct nbd_sock *nsock;
1245
unsigned int memflags;
1246
int err;
1247
1248
/* Arg will be cast to int, check it to avoid overflow */
1249
if (arg > INT_MAX)
1250
return -EINVAL;
1251
sock = nbd_get_socket(nbd, arg, &err);
1252
if (!sock)
1253
return err;
1254
1255
/*
1256
* We need to make sure we don't get any errant requests while we're
1257
* reallocating the ->socks array.
1258
*/
1259
memflags = blk_mq_freeze_queue(nbd->disk->queue);
1260
1261
if (!netlink && !nbd->task_setup &&
1262
!test_bit(NBD_RT_BOUND, &config->runtime_flags))
1263
nbd->task_setup = current;
1264
1265
if (!netlink &&
1266
(nbd->task_setup != current ||
1267
test_bit(NBD_RT_BOUND, &config->runtime_flags))) {
1268
dev_err(disk_to_dev(nbd->disk),
1269
"Device being setup by another task");
1270
err = -EBUSY;
1271
goto put_socket;
1272
}
1273
1274
nsock = kzalloc(sizeof(*nsock), GFP_KERNEL);
1275
if (!nsock) {
1276
err = -ENOMEM;
1277
goto put_socket;
1278
}
1279
1280
socks = krealloc(config->socks, (config->num_connections + 1) *
1281
sizeof(struct nbd_sock *), GFP_KERNEL);
1282
if (!socks) {
1283
kfree(nsock);
1284
err = -ENOMEM;
1285
goto put_socket;
1286
}
1287
1288
config->socks = socks;
1289
1290
nsock->fallback_index = -1;
1291
nsock->dead = false;
1292
mutex_init(&nsock->tx_lock);
1293
nsock->sock = sock;
1294
nsock->pending = NULL;
1295
nsock->sent = 0;
1296
nsock->cookie = 0;
1297
INIT_WORK(&nsock->work, nbd_pending_cmd_work);
1298
socks[config->num_connections++] = nsock;
1299
atomic_inc(&config->live_connections);
1300
blk_mq_unfreeze_queue(nbd->disk->queue, memflags);
1301
1302
return 0;
1303
1304
put_socket:
1305
blk_mq_unfreeze_queue(nbd->disk->queue, memflags);
1306
sockfd_put(sock);
1307
return err;
1308
}
1309
1310
static int nbd_reconnect_socket(struct nbd_device *nbd, unsigned long arg)
1311
{
1312
struct nbd_config *config = nbd->config;
1313
struct socket *sock, *old;
1314
struct recv_thread_args *args;
1315
int i;
1316
int err;
1317
1318
sock = nbd_get_socket(nbd, arg, &err);
1319
if (!sock)
1320
return err;
1321
1322
args = kzalloc(sizeof(*args), GFP_KERNEL);
1323
if (!args) {
1324
sockfd_put(sock);
1325
return -ENOMEM;
1326
}
1327
1328
for (i = 0; i < config->num_connections; i++) {
1329
struct nbd_sock *nsock = config->socks[i];
1330
1331
if (!nsock->dead)
1332
continue;
1333
1334
mutex_lock(&nsock->tx_lock);
1335
if (!nsock->dead) {
1336
mutex_unlock(&nsock->tx_lock);
1337
continue;
1338
}
1339
sk_set_memalloc(sock->sk);
1340
if (nbd->tag_set.timeout)
1341
sock->sk->sk_sndtimeo = nbd->tag_set.timeout;
1342
atomic_inc(&config->recv_threads);
1343
refcount_inc(&nbd->config_refs);
1344
old = nsock->sock;
1345
nsock->fallback_index = -1;
1346
nsock->sock = sock;
1347
nsock->dead = false;
1348
INIT_WORK(&args->work, recv_work);
1349
args->index = i;
1350
args->nbd = nbd;
1351
args->nsock = nsock;
1352
nsock->cookie++;
1353
mutex_unlock(&nsock->tx_lock);
1354
sockfd_put(old);
1355
1356
clear_bit(NBD_RT_DISCONNECTED, &config->runtime_flags);
1357
1358
/* We take the tx_mutex in an error path in the recv_work, so we
1359
* need to queue_work outside of the tx_mutex.
1360
*/
1361
queue_work(nbd->recv_workq, &args->work);
1362
1363
atomic_inc(&config->live_connections);
1364
wake_up(&config->conn_wait);
1365
return 0;
1366
}
1367
sockfd_put(sock);
1368
kfree(args);
1369
return -ENOSPC;
1370
}
1371
1372
static void nbd_bdev_reset(struct nbd_device *nbd)
1373
{
1374
if (disk_openers(nbd->disk) > 1)
1375
return;
1376
set_capacity(nbd->disk, 0);
1377
}
1378
1379
static void nbd_parse_flags(struct nbd_device *nbd)
1380
{
1381
if (nbd->config->flags & NBD_FLAG_READ_ONLY)
1382
set_disk_ro(nbd->disk, true);
1383
else
1384
set_disk_ro(nbd->disk, false);
1385
}
1386
1387
static void send_disconnects(struct nbd_device *nbd)
1388
{
1389
struct nbd_config *config = nbd->config;
1390
struct nbd_request request = {
1391
.magic = htonl(NBD_REQUEST_MAGIC),
1392
.type = htonl(NBD_CMD_DISC),
1393
};
1394
struct kvec iov = {.iov_base = &request, .iov_len = sizeof(request)};
1395
struct iov_iter from;
1396
int i, ret;
1397
1398
for (i = 0; i < config->num_connections; i++) {
1399
struct nbd_sock *nsock = config->socks[i];
1400
1401
iov_iter_kvec(&from, ITER_SOURCE, &iov, 1, sizeof(request));
1402
mutex_lock(&nsock->tx_lock);
1403
ret = sock_xmit(nbd, i, 1, &from, 0, NULL);
1404
if (ret < 0)
1405
dev_err(disk_to_dev(nbd->disk),
1406
"Send disconnect failed %d\n", ret);
1407
mutex_unlock(&nsock->tx_lock);
1408
}
1409
}
1410
1411
static int nbd_disconnect(struct nbd_device *nbd)
1412
{
1413
struct nbd_config *config = nbd->config;
1414
1415
dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n");
1416
set_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags);
1417
set_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags);
1418
send_disconnects(nbd);
1419
return 0;
1420
}
1421
1422
static void nbd_clear_sock(struct nbd_device *nbd)
1423
{
1424
sock_shutdown(nbd);
1425
nbd_clear_que(nbd);
1426
nbd->task_setup = NULL;
1427
}
1428
1429
static void nbd_config_put(struct nbd_device *nbd)
1430
{
1431
if (refcount_dec_and_mutex_lock(&nbd->config_refs,
1432
&nbd->config_lock)) {
1433
struct nbd_config *config = nbd->config;
1434
nbd_dev_dbg_close(nbd);
1435
invalidate_disk(nbd->disk);
1436
if (nbd->config->bytesize)
1437
kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
1438
if (test_and_clear_bit(NBD_RT_HAS_PID_FILE,
1439
&config->runtime_flags))
1440
device_remove_file(disk_to_dev(nbd->disk), &pid_attr);
1441
nbd->pid = 0;
1442
if (test_and_clear_bit(NBD_RT_HAS_BACKEND_FILE,
1443
&config->runtime_flags)) {
1444
device_remove_file(disk_to_dev(nbd->disk), &backend_attr);
1445
kfree(nbd->backend);
1446
nbd->backend = NULL;
1447
}
1448
nbd_clear_sock(nbd);
1449
if (config->num_connections) {
1450
int i;
1451
for (i = 0; i < config->num_connections; i++) {
1452
sockfd_put(config->socks[i]->sock);
1453
kfree(config->socks[i]);
1454
}
1455
kfree(config->socks);
1456
}
1457
kfree(nbd->config);
1458
nbd->config = NULL;
1459
1460
nbd->tag_set.timeout = 0;
1461
1462
mutex_unlock(&nbd->config_lock);
1463
nbd_put(nbd);
1464
module_put(THIS_MODULE);
1465
}
1466
}
1467
1468
static int nbd_start_device(struct nbd_device *nbd)
1469
{
1470
struct nbd_config *config = nbd->config;
1471
int num_connections = config->num_connections;
1472
int error = 0, i;
1473
1474
if (nbd->pid)
1475
return -EBUSY;
1476
if (!config->socks)
1477
return -EINVAL;
1478
if (num_connections > 1 &&
1479
!(config->flags & NBD_FLAG_CAN_MULTI_CONN)) {
1480
dev_err(disk_to_dev(nbd->disk), "server does not support multiple connections per device.\n");
1481
return -EINVAL;
1482
}
1483
1484
retry:
1485
mutex_unlock(&nbd->config_lock);
1486
blk_mq_update_nr_hw_queues(&nbd->tag_set, num_connections);
1487
mutex_lock(&nbd->config_lock);
1488
1489
/* if another code path updated nr_hw_queues, retry until succeed */
1490
if (num_connections != config->num_connections) {
1491
num_connections = config->num_connections;
1492
goto retry;
1493
}
1494
1495
nbd->pid = task_pid_nr(current);
1496
1497
nbd_parse_flags(nbd);
1498
1499
error = device_create_file(disk_to_dev(nbd->disk), &pid_attr);
1500
if (error) {
1501
dev_err(disk_to_dev(nbd->disk), "device_create_file failed for pid!\n");
1502
return error;
1503
}
1504
set_bit(NBD_RT_HAS_PID_FILE, &config->runtime_flags);
1505
1506
nbd_dev_dbg_init(nbd);
1507
for (i = 0; i < num_connections; i++) {
1508
struct recv_thread_args *args;
1509
1510
args = kzalloc(sizeof(*args), GFP_KERNEL);
1511
if (!args) {
1512
sock_shutdown(nbd);
1513
/*
1514
* If num_connections is m (2 < m),
1515
* and NO.1 ~ NO.n(1 < n < m) kzallocs are successful.
1516
* But NO.(n + 1) failed. We still have n recv threads.
1517
* So, add flush_workqueue here to prevent recv threads
1518
* dropping the last config_refs and trying to destroy
1519
* the workqueue from inside the workqueue.
1520
*/
1521
if (i)
1522
flush_workqueue(nbd->recv_workq);
1523
return -ENOMEM;
1524
}
1525
sk_set_memalloc(config->socks[i]->sock->sk);
1526
if (nbd->tag_set.timeout)
1527
config->socks[i]->sock->sk->sk_sndtimeo =
1528
nbd->tag_set.timeout;
1529
atomic_inc(&config->recv_threads);
1530
refcount_inc(&nbd->config_refs);
1531
INIT_WORK(&args->work, recv_work);
1532
args->nbd = nbd;
1533
args->nsock = config->socks[i];
1534
args->index = i;
1535
queue_work(nbd->recv_workq, &args->work);
1536
}
1537
return nbd_set_size(nbd, config->bytesize, nbd_blksize(config));
1538
}
1539
1540
static int nbd_start_device_ioctl(struct nbd_device *nbd)
1541
{
1542
struct nbd_config *config = nbd->config;
1543
int ret;
1544
1545
ret = nbd_start_device(nbd);
1546
if (ret)
1547
return ret;
1548
1549
if (max_part)
1550
set_bit(GD_NEED_PART_SCAN, &nbd->disk->state);
1551
mutex_unlock(&nbd->config_lock);
1552
ret = wait_event_interruptible(config->recv_wq,
1553
atomic_read(&config->recv_threads) == 0);
1554
if (ret) {
1555
sock_shutdown(nbd);
1556
nbd_clear_que(nbd);
1557
}
1558
1559
flush_workqueue(nbd->recv_workq);
1560
mutex_lock(&nbd->config_lock);
1561
nbd_bdev_reset(nbd);
1562
/* user requested, ignore socket errors */
1563
if (test_bit(NBD_RT_DISCONNECT_REQUESTED, &config->runtime_flags))
1564
ret = 0;
1565
if (test_bit(NBD_RT_TIMEDOUT, &config->runtime_flags))
1566
ret = -ETIMEDOUT;
1567
return ret;
1568
}
1569
1570
static void nbd_clear_sock_ioctl(struct nbd_device *nbd)
1571
{
1572
nbd_clear_sock(nbd);
1573
disk_force_media_change(nbd->disk);
1574
nbd_bdev_reset(nbd);
1575
if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
1576
&nbd->config->runtime_flags))
1577
nbd_config_put(nbd);
1578
}
1579
1580
static void nbd_set_cmd_timeout(struct nbd_device *nbd, u64 timeout)
1581
{
1582
nbd->tag_set.timeout = timeout * HZ;
1583
if (timeout)
1584
blk_queue_rq_timeout(nbd->disk->queue, timeout * HZ);
1585
else
1586
blk_queue_rq_timeout(nbd->disk->queue, 30 * HZ);
1587
}
1588
1589
/* Must be called with config_lock held */
1590
static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
1591
unsigned int cmd, unsigned long arg)
1592
{
1593
struct nbd_config *config = nbd->config;
1594
loff_t bytesize;
1595
1596
switch (cmd) {
1597
case NBD_DISCONNECT:
1598
return nbd_disconnect(nbd);
1599
case NBD_CLEAR_SOCK:
1600
nbd_clear_sock_ioctl(nbd);
1601
return 0;
1602
case NBD_SET_SOCK:
1603
return nbd_add_socket(nbd, arg, false);
1604
case NBD_SET_BLKSIZE:
1605
return nbd_set_size(nbd, config->bytesize, arg);
1606
case NBD_SET_SIZE:
1607
return nbd_set_size(nbd, arg, nbd_blksize(config));
1608
case NBD_SET_SIZE_BLOCKS:
1609
if (check_shl_overflow(arg, config->blksize_bits, &bytesize))
1610
return -EINVAL;
1611
return nbd_set_size(nbd, bytesize, nbd_blksize(config));
1612
case NBD_SET_TIMEOUT:
1613
nbd_set_cmd_timeout(nbd, arg);
1614
return 0;
1615
1616
case NBD_SET_FLAGS:
1617
config->flags = arg;
1618
return 0;
1619
case NBD_DO_IT:
1620
return nbd_start_device_ioctl(nbd);
1621
case NBD_CLEAR_QUE:
1622
/*
1623
* This is for compatibility only. The queue is always cleared
1624
* by NBD_DO_IT or NBD_CLEAR_SOCK.
1625
*/
1626
return 0;
1627
case NBD_PRINT_DEBUG:
1628
/*
1629
* For compatibility only, we no longer keep a list of
1630
* outstanding requests.
1631
*/
1632
return 0;
1633
}
1634
return -ENOTTY;
1635
}
1636
1637
static int nbd_ioctl(struct block_device *bdev, blk_mode_t mode,
1638
unsigned int cmd, unsigned long arg)
1639
{
1640
struct nbd_device *nbd = bdev->bd_disk->private_data;
1641
struct nbd_config *config = nbd->config;
1642
int error = -EINVAL;
1643
1644
if (!capable(CAP_SYS_ADMIN))
1645
return -EPERM;
1646
1647
/* The block layer will pass back some non-nbd ioctls in case we have
1648
* special handling for them, but we don't so just return an error.
1649
*/
1650
if (_IOC_TYPE(cmd) != 0xab)
1651
return -EINVAL;
1652
1653
mutex_lock(&nbd->config_lock);
1654
1655
/* Don't allow ioctl operations on a nbd device that was created with
1656
* netlink, unless it's DISCONNECT or CLEAR_SOCK, which are fine.
1657
*/
1658
if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) ||
1659
(cmd == NBD_DISCONNECT || cmd == NBD_CLEAR_SOCK))
1660
error = __nbd_ioctl(bdev, nbd, cmd, arg);
1661
else
1662
dev_err(nbd_to_dev(nbd), "Cannot use ioctl interface on a netlink controlled device.\n");
1663
mutex_unlock(&nbd->config_lock);
1664
return error;
1665
}
1666
1667
static int nbd_alloc_and_init_config(struct nbd_device *nbd)
1668
{
1669
struct nbd_config *config;
1670
1671
if (WARN_ON(nbd->config))
1672
return -EINVAL;
1673
1674
if (!try_module_get(THIS_MODULE))
1675
return -ENODEV;
1676
1677
config = kzalloc(sizeof(struct nbd_config), GFP_NOFS);
1678
if (!config) {
1679
module_put(THIS_MODULE);
1680
return -ENOMEM;
1681
}
1682
1683
atomic_set(&config->recv_threads, 0);
1684
init_waitqueue_head(&config->recv_wq);
1685
init_waitqueue_head(&config->conn_wait);
1686
config->blksize_bits = NBD_DEF_BLKSIZE_BITS;
1687
atomic_set(&config->live_connections, 0);
1688
1689
nbd->config = config;
1690
/*
1691
* Order refcount_set(&nbd->config_refs, 1) and nbd->config assignment,
1692
* its pair is the barrier in nbd_get_config_unlocked().
1693
* So nbd_get_config_unlocked() won't see nbd->config as null after
1694
* refcount_inc_not_zero() succeed.
1695
*/
1696
smp_mb__before_atomic();
1697
refcount_set(&nbd->config_refs, 1);
1698
1699
return 0;
1700
}
1701
1702
static int nbd_open(struct gendisk *disk, blk_mode_t mode)
1703
{
1704
struct nbd_device *nbd;
1705
struct nbd_config *config;
1706
int ret = 0;
1707
1708
mutex_lock(&nbd_index_mutex);
1709
nbd = disk->private_data;
1710
if (!nbd) {
1711
ret = -ENXIO;
1712
goto out;
1713
}
1714
if (!refcount_inc_not_zero(&nbd->refs)) {
1715
ret = -ENXIO;
1716
goto out;
1717
}
1718
1719
config = nbd_get_config_unlocked(nbd);
1720
if (!config) {
1721
mutex_lock(&nbd->config_lock);
1722
if (refcount_inc_not_zero(&nbd->config_refs)) {
1723
mutex_unlock(&nbd->config_lock);
1724
goto out;
1725
}
1726
ret = nbd_alloc_and_init_config(nbd);
1727
if (ret) {
1728
mutex_unlock(&nbd->config_lock);
1729
goto out;
1730
}
1731
1732
refcount_inc(&nbd->refs);
1733
mutex_unlock(&nbd->config_lock);
1734
if (max_part)
1735
set_bit(GD_NEED_PART_SCAN, &disk->state);
1736
} else if (nbd_disconnected(config)) {
1737
if (max_part)
1738
set_bit(GD_NEED_PART_SCAN, &disk->state);
1739
}
1740
out:
1741
mutex_unlock(&nbd_index_mutex);
1742
return ret;
1743
}
1744
1745
static void nbd_release(struct gendisk *disk)
1746
{
1747
struct nbd_device *nbd = disk->private_data;
1748
1749
if (test_bit(NBD_RT_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) &&
1750
disk_openers(disk) == 0)
1751
nbd_disconnect_and_put(nbd);
1752
1753
nbd_config_put(nbd);
1754
nbd_put(nbd);
1755
}
1756
1757
static void nbd_free_disk(struct gendisk *disk)
1758
{
1759
struct nbd_device *nbd = disk->private_data;
1760
1761
kfree(nbd);
1762
}
1763
1764
static const struct block_device_operations nbd_fops =
1765
{
1766
.owner = THIS_MODULE,
1767
.open = nbd_open,
1768
.release = nbd_release,
1769
.ioctl = nbd_ioctl,
1770
.compat_ioctl = nbd_ioctl,
1771
.free_disk = nbd_free_disk,
1772
};
1773
1774
#if IS_ENABLED(CONFIG_DEBUG_FS)
1775
1776
static int nbd_dbg_tasks_show(struct seq_file *s, void *unused)
1777
{
1778
struct nbd_device *nbd = s->private;
1779
1780
if (nbd->pid)
1781
seq_printf(s, "recv: %d\n", nbd->pid);
1782
1783
return 0;
1784
}
1785
1786
DEFINE_SHOW_ATTRIBUTE(nbd_dbg_tasks);
1787
1788
static int nbd_dbg_flags_show(struct seq_file *s, void *unused)
1789
{
1790
struct nbd_device *nbd = s->private;
1791
u32 flags = nbd->config->flags;
1792
1793
seq_printf(s, "Hex: 0x%08x\n\n", flags);
1794
1795
seq_puts(s, "Known flags:\n");
1796
1797
if (flags & NBD_FLAG_HAS_FLAGS)
1798
seq_puts(s, "NBD_FLAG_HAS_FLAGS\n");
1799
if (flags & NBD_FLAG_READ_ONLY)
1800
seq_puts(s, "NBD_FLAG_READ_ONLY\n");
1801
if (flags & NBD_FLAG_SEND_FLUSH)
1802
seq_puts(s, "NBD_FLAG_SEND_FLUSH\n");
1803
if (flags & NBD_FLAG_SEND_FUA)
1804
seq_puts(s, "NBD_FLAG_SEND_FUA\n");
1805
if (flags & NBD_FLAG_SEND_TRIM)
1806
seq_puts(s, "NBD_FLAG_SEND_TRIM\n");
1807
if (flags & NBD_FLAG_SEND_WRITE_ZEROES)
1808
seq_puts(s, "NBD_FLAG_SEND_WRITE_ZEROES\n");
1809
if (flags & NBD_FLAG_ROTATIONAL)
1810
seq_puts(s, "NBD_FLAG_ROTATIONAL\n");
1811
1812
return 0;
1813
}
1814
1815
DEFINE_SHOW_ATTRIBUTE(nbd_dbg_flags);
1816
1817
static int nbd_dev_dbg_init(struct nbd_device *nbd)
1818
{
1819
struct dentry *dir;
1820
struct nbd_config *config = nbd->config;
1821
1822
if (!nbd_dbg_dir)
1823
return -EIO;
1824
1825
dir = debugfs_create_dir(nbd_name(nbd), nbd_dbg_dir);
1826
if (IS_ERR(dir)) {
1827
dev_err(nbd_to_dev(nbd), "Failed to create debugfs dir for '%s'\n",
1828
nbd_name(nbd));
1829
return -EIO;
1830
}
1831
config->dbg_dir = dir;
1832
1833
debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_fops);
1834
debugfs_create_u64("size_bytes", 0444, dir, &config->bytesize);
1835
debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout);
1836
debugfs_create_u32("blocksize_bits", 0444, dir, &config->blksize_bits);
1837
debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_fops);
1838
1839
return 0;
1840
}
1841
1842
static void nbd_dev_dbg_close(struct nbd_device *nbd)
1843
{
1844
debugfs_remove_recursive(nbd->config->dbg_dir);
1845
}
1846
1847
static int nbd_dbg_init(void)
1848
{
1849
struct dentry *dbg_dir;
1850
1851
dbg_dir = debugfs_create_dir("nbd", NULL);
1852
if (IS_ERR(dbg_dir))
1853
return -EIO;
1854
1855
nbd_dbg_dir = dbg_dir;
1856
1857
return 0;
1858
}
1859
1860
static void nbd_dbg_close(void)
1861
{
1862
debugfs_remove_recursive(nbd_dbg_dir);
1863
}
1864
1865
#else /* IS_ENABLED(CONFIG_DEBUG_FS) */
1866
1867
static int nbd_dev_dbg_init(struct nbd_device *nbd)
1868
{
1869
return 0;
1870
}
1871
1872
static void nbd_dev_dbg_close(struct nbd_device *nbd)
1873
{
1874
}
1875
1876
static int nbd_dbg_init(void)
1877
{
1878
return 0;
1879
}
1880
1881
static void nbd_dbg_close(void)
1882
{
1883
}
1884
1885
#endif
1886
1887
static int nbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
1888
unsigned int hctx_idx, unsigned int numa_node)
1889
{
1890
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
1891
cmd->nbd = set->driver_data;
1892
cmd->flags = 0;
1893
mutex_init(&cmd->lock);
1894
return 0;
1895
}
1896
1897
static const struct blk_mq_ops nbd_mq_ops = {
1898
.queue_rq = nbd_queue_rq,
1899
.complete = nbd_complete_rq,
1900
.init_request = nbd_init_request,
1901
.timeout = nbd_xmit_timeout,
1902
};
1903
1904
static struct nbd_device *nbd_dev_add(int index, unsigned int refs)
1905
{
1906
struct queue_limits lim = {
1907
.max_hw_sectors = 65536,
1908
.io_opt = 256 << SECTOR_SHIFT,
1909
.max_segments = USHRT_MAX,
1910
.max_segment_size = UINT_MAX,
1911
};
1912
struct nbd_device *nbd;
1913
struct gendisk *disk;
1914
int err = -ENOMEM;
1915
1916
nbd = kzalloc(sizeof(struct nbd_device), GFP_KERNEL);
1917
if (!nbd)
1918
goto out;
1919
1920
nbd->tag_set.ops = &nbd_mq_ops;
1921
nbd->tag_set.nr_hw_queues = 1;
1922
nbd->tag_set.queue_depth = 128;
1923
nbd->tag_set.numa_node = NUMA_NO_NODE;
1924
nbd->tag_set.cmd_size = sizeof(struct nbd_cmd);
1925
nbd->tag_set.flags = BLK_MQ_F_BLOCKING;
1926
nbd->tag_set.driver_data = nbd;
1927
INIT_WORK(&nbd->remove_work, nbd_dev_remove_work);
1928
nbd->backend = NULL;
1929
1930
err = blk_mq_alloc_tag_set(&nbd->tag_set);
1931
if (err)
1932
goto out_free_nbd;
1933
1934
mutex_lock(&nbd_index_mutex);
1935
if (index >= 0) {
1936
err = idr_alloc(&nbd_index_idr, nbd, index, index + 1,
1937
GFP_KERNEL);
1938
if (err == -ENOSPC)
1939
err = -EEXIST;
1940
} else {
1941
err = idr_alloc(&nbd_index_idr, nbd, 0,
1942
(MINORMASK >> part_shift) + 1, GFP_KERNEL);
1943
if (err >= 0)
1944
index = err;
1945
}
1946
nbd->index = index;
1947
mutex_unlock(&nbd_index_mutex);
1948
if (err < 0)
1949
goto out_free_tags;
1950
1951
disk = blk_mq_alloc_disk(&nbd->tag_set, &lim, NULL);
1952
if (IS_ERR(disk)) {
1953
err = PTR_ERR(disk);
1954
goto out_free_idr;
1955
}
1956
nbd->disk = disk;
1957
1958
nbd->recv_workq = alloc_workqueue("nbd%d-recv",
1959
WQ_MEM_RECLAIM | WQ_HIGHPRI |
1960
WQ_UNBOUND, 0, nbd->index);
1961
if (!nbd->recv_workq) {
1962
dev_err(disk_to_dev(nbd->disk), "Could not allocate knbd recv work queue.\n");
1963
err = -ENOMEM;
1964
goto out_err_disk;
1965
}
1966
1967
mutex_init(&nbd->config_lock);
1968
refcount_set(&nbd->config_refs, 0);
1969
/*
1970
* Start out with a zero references to keep other threads from using
1971
* this device until it is fully initialized.
1972
*/
1973
refcount_set(&nbd->refs, 0);
1974
INIT_LIST_HEAD(&nbd->list);
1975
disk->major = NBD_MAJOR;
1976
disk->first_minor = index << part_shift;
1977
disk->minors = 1 << part_shift;
1978
disk->fops = &nbd_fops;
1979
disk->private_data = nbd;
1980
sprintf(disk->disk_name, "nbd%d", index);
1981
err = add_disk(disk);
1982
if (err)
1983
goto out_free_work;
1984
1985
/*
1986
* Now publish the device.
1987
*/
1988
refcount_set(&nbd->refs, refs);
1989
nbd_total_devices++;
1990
return nbd;
1991
1992
out_free_work:
1993
destroy_workqueue(nbd->recv_workq);
1994
out_err_disk:
1995
put_disk(disk);
1996
out_free_idr:
1997
mutex_lock(&nbd_index_mutex);
1998
idr_remove(&nbd_index_idr, index);
1999
mutex_unlock(&nbd_index_mutex);
2000
out_free_tags:
2001
blk_mq_free_tag_set(&nbd->tag_set);
2002
out_free_nbd:
2003
kfree(nbd);
2004
out:
2005
return ERR_PTR(err);
2006
}
2007
2008
static struct nbd_device *nbd_find_get_unused(void)
2009
{
2010
struct nbd_device *nbd;
2011
int id;
2012
2013
lockdep_assert_held(&nbd_index_mutex);
2014
2015
idr_for_each_entry(&nbd_index_idr, nbd, id) {
2016
if (refcount_read(&nbd->config_refs) ||
2017
test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags))
2018
continue;
2019
if (refcount_inc_not_zero(&nbd->refs))
2020
return nbd;
2021
}
2022
2023
return NULL;
2024
}
2025
2026
/* Netlink interface. */
2027
static const struct nla_policy nbd_attr_policy[NBD_ATTR_MAX + 1] = {
2028
[NBD_ATTR_INDEX] = { .type = NLA_U32 },
2029
[NBD_ATTR_SIZE_BYTES] = { .type = NLA_U64 },
2030
[NBD_ATTR_BLOCK_SIZE_BYTES] = { .type = NLA_U64 },
2031
[NBD_ATTR_TIMEOUT] = { .type = NLA_U64 },
2032
[NBD_ATTR_SERVER_FLAGS] = { .type = NLA_U64 },
2033
[NBD_ATTR_CLIENT_FLAGS] = { .type = NLA_U64 },
2034
[NBD_ATTR_SOCKETS] = { .type = NLA_NESTED},
2035
[NBD_ATTR_DEAD_CONN_TIMEOUT] = { .type = NLA_U64 },
2036
[NBD_ATTR_DEVICE_LIST] = { .type = NLA_NESTED},
2037
[NBD_ATTR_BACKEND_IDENTIFIER] = { .type = NLA_STRING},
2038
};
2039
2040
static const struct nla_policy nbd_sock_policy[NBD_SOCK_MAX + 1] = {
2041
[NBD_SOCK_FD] = { .type = NLA_U32 },
2042
};
2043
2044
/* We don't use this right now since we don't parse the incoming list, but we
2045
* still want it here so userspace knows what to expect.
2046
*/
2047
static const struct nla_policy __attribute__((unused))
2048
nbd_device_policy[NBD_DEVICE_ATTR_MAX + 1] = {
2049
[NBD_DEVICE_INDEX] = { .type = NLA_U32 },
2050
[NBD_DEVICE_CONNECTED] = { .type = NLA_U8 },
2051
};
2052
2053
static int nbd_genl_size_set(struct genl_info *info, struct nbd_device *nbd)
2054
{
2055
struct nbd_config *config = nbd->config;
2056
u64 bsize = nbd_blksize(config);
2057
u64 bytes = config->bytesize;
2058
2059
if (info->attrs[NBD_ATTR_SIZE_BYTES])
2060
bytes = nla_get_u64(info->attrs[NBD_ATTR_SIZE_BYTES]);
2061
2062
if (info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES])
2063
bsize = nla_get_u64(info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]);
2064
2065
if (bytes != config->bytesize || bsize != nbd_blksize(config))
2066
return nbd_set_size(nbd, bytes, bsize);
2067
return 0;
2068
}
2069
2070
static int nbd_genl_connect(struct sk_buff *skb, struct genl_info *info)
2071
{
2072
struct nbd_device *nbd;
2073
struct nbd_config *config;
2074
int index = -1;
2075
int ret;
2076
bool put_dev = false;
2077
2078
if (!netlink_capable(skb, CAP_SYS_ADMIN))
2079
return -EPERM;
2080
2081
if (info->attrs[NBD_ATTR_INDEX]) {
2082
index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
2083
2084
/*
2085
* Too big first_minor can cause duplicate creation of
2086
* sysfs files/links, since index << part_shift might overflow, or
2087
* MKDEV() expect that the max bits of first_minor is 20.
2088
*/
2089
if (index < 0 || index > MINORMASK >> part_shift) {
2090
pr_err("illegal input index %d\n", index);
2091
return -EINVAL;
2092
}
2093
}
2094
if (GENL_REQ_ATTR_CHECK(info, NBD_ATTR_SOCKETS)) {
2095
pr_err("must specify at least one socket\n");
2096
return -EINVAL;
2097
}
2098
if (GENL_REQ_ATTR_CHECK(info, NBD_ATTR_SIZE_BYTES)) {
2099
pr_err("must specify a size in bytes for the device\n");
2100
return -EINVAL;
2101
}
2102
again:
2103
mutex_lock(&nbd_index_mutex);
2104
if (index == -1) {
2105
nbd = nbd_find_get_unused();
2106
} else {
2107
nbd = idr_find(&nbd_index_idr, index);
2108
if (nbd) {
2109
if ((test_bit(NBD_DESTROY_ON_DISCONNECT, &nbd->flags) &&
2110
test_bit(NBD_DISCONNECT_REQUESTED, &nbd->flags)) ||
2111
!refcount_inc_not_zero(&nbd->refs)) {
2112
mutex_unlock(&nbd_index_mutex);
2113
pr_err("device at index %d is going down\n",
2114
index);
2115
return -EINVAL;
2116
}
2117
}
2118
}
2119
mutex_unlock(&nbd_index_mutex);
2120
2121
if (!nbd) {
2122
nbd = nbd_dev_add(index, 2);
2123
if (IS_ERR(nbd)) {
2124
pr_err("failed to add new device\n");
2125
return PTR_ERR(nbd);
2126
}
2127
}
2128
2129
mutex_lock(&nbd->config_lock);
2130
if (refcount_read(&nbd->config_refs)) {
2131
mutex_unlock(&nbd->config_lock);
2132
nbd_put(nbd);
2133
if (index == -1)
2134
goto again;
2135
pr_err("nbd%d already in use\n", index);
2136
return -EBUSY;
2137
}
2138
2139
ret = nbd_alloc_and_init_config(nbd);
2140
if (ret) {
2141
mutex_unlock(&nbd->config_lock);
2142
nbd_put(nbd);
2143
pr_err("couldn't allocate config\n");
2144
return ret;
2145
}
2146
2147
config = nbd->config;
2148
set_bit(NBD_RT_BOUND, &config->runtime_flags);
2149
ret = nbd_genl_size_set(info, nbd);
2150
if (ret)
2151
goto out;
2152
2153
if (info->attrs[NBD_ATTR_TIMEOUT])
2154
nbd_set_cmd_timeout(nbd,
2155
nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]));
2156
if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) {
2157
config->dead_conn_timeout =
2158
nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]);
2159
config->dead_conn_timeout *= HZ;
2160
}
2161
if (info->attrs[NBD_ATTR_SERVER_FLAGS])
2162
config->flags =
2163
nla_get_u64(info->attrs[NBD_ATTR_SERVER_FLAGS]);
2164
if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
2165
u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
2166
if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
2167
/*
2168
* We have 1 ref to keep the device around, and then 1
2169
* ref for our current operation here, which will be
2170
* inherited by the config. If we already have
2171
* DESTROY_ON_DISCONNECT set then we know we don't have
2172
* that extra ref already held so we don't need the
2173
* put_dev.
2174
*/
2175
if (!test_and_set_bit(NBD_DESTROY_ON_DISCONNECT,
2176
&nbd->flags))
2177
put_dev = true;
2178
} else {
2179
if (test_and_clear_bit(NBD_DESTROY_ON_DISCONNECT,
2180
&nbd->flags))
2181
refcount_inc(&nbd->refs);
2182
}
2183
if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
2184
set_bit(NBD_RT_DISCONNECT_ON_CLOSE,
2185
&config->runtime_flags);
2186
}
2187
}
2188
2189
if (info->attrs[NBD_ATTR_SOCKETS]) {
2190
struct nlattr *attr;
2191
int rem, fd;
2192
2193
nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS],
2194
rem) {
2195
struct nlattr *socks[NBD_SOCK_MAX+1];
2196
2197
if (nla_type(attr) != NBD_SOCK_ITEM) {
2198
pr_err("socks must be embedded in a SOCK_ITEM attr\n");
2199
ret = -EINVAL;
2200
goto out;
2201
}
2202
ret = nla_parse_nested_deprecated(socks, NBD_SOCK_MAX,
2203
attr,
2204
nbd_sock_policy,
2205
info->extack);
2206
if (ret != 0) {
2207
pr_err("error processing sock list\n");
2208
ret = -EINVAL;
2209
goto out;
2210
}
2211
if (!socks[NBD_SOCK_FD])
2212
continue;
2213
fd = (int)nla_get_u32(socks[NBD_SOCK_FD]);
2214
ret = nbd_add_socket(nbd, fd, true);
2215
if (ret)
2216
goto out;
2217
}
2218
}
2219
2220
if (info->attrs[NBD_ATTR_BACKEND_IDENTIFIER]) {
2221
nbd->backend = nla_strdup(info->attrs[NBD_ATTR_BACKEND_IDENTIFIER],
2222
GFP_KERNEL);
2223
if (!nbd->backend) {
2224
ret = -ENOMEM;
2225
goto out;
2226
}
2227
}
2228
ret = device_create_file(disk_to_dev(nbd->disk), &backend_attr);
2229
if (ret) {
2230
dev_err(disk_to_dev(nbd->disk),
2231
"device_create_file failed for backend!\n");
2232
goto out;
2233
}
2234
set_bit(NBD_RT_HAS_BACKEND_FILE, &config->runtime_flags);
2235
2236
ret = nbd_start_device(nbd);
2237
out:
2238
mutex_unlock(&nbd->config_lock);
2239
if (!ret) {
2240
set_bit(NBD_RT_HAS_CONFIG_REF, &config->runtime_flags);
2241
refcount_inc(&nbd->config_refs);
2242
nbd_connect_reply(info, nbd->index);
2243
}
2244
nbd_config_put(nbd);
2245
if (put_dev)
2246
nbd_put(nbd);
2247
return ret;
2248
}
2249
2250
static void nbd_disconnect_and_put(struct nbd_device *nbd)
2251
{
2252
mutex_lock(&nbd->config_lock);
2253
nbd_disconnect(nbd);
2254
sock_shutdown(nbd);
2255
wake_up(&nbd->config->conn_wait);
2256
/*
2257
* Make sure recv thread has finished, we can safely call nbd_clear_que()
2258
* to cancel the inflight I/Os.
2259
*/
2260
flush_workqueue(nbd->recv_workq);
2261
nbd_clear_que(nbd);
2262
nbd->task_setup = NULL;
2263
clear_bit(NBD_RT_BOUND, &nbd->config->runtime_flags);
2264
mutex_unlock(&nbd->config_lock);
2265
2266
if (test_and_clear_bit(NBD_RT_HAS_CONFIG_REF,
2267
&nbd->config->runtime_flags))
2268
nbd_config_put(nbd);
2269
}
2270
2271
static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info)
2272
{
2273
struct nbd_device *nbd;
2274
int index;
2275
2276
if (!netlink_capable(skb, CAP_SYS_ADMIN))
2277
return -EPERM;
2278
2279
if (GENL_REQ_ATTR_CHECK(info, NBD_ATTR_INDEX)) {
2280
pr_err("must specify an index to disconnect\n");
2281
return -EINVAL;
2282
}
2283
index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
2284
mutex_lock(&nbd_index_mutex);
2285
nbd = idr_find(&nbd_index_idr, index);
2286
if (!nbd) {
2287
mutex_unlock(&nbd_index_mutex);
2288
pr_err("couldn't find device at index %d\n", index);
2289
return -EINVAL;
2290
}
2291
if (!refcount_inc_not_zero(&nbd->refs)) {
2292
mutex_unlock(&nbd_index_mutex);
2293
pr_err("device at index %d is going down\n", index);
2294
return -EINVAL;
2295
}
2296
mutex_unlock(&nbd_index_mutex);
2297
if (!refcount_inc_not_zero(&nbd->config_refs))
2298
goto put_nbd;
2299
nbd_disconnect_and_put(nbd);
2300
nbd_config_put(nbd);
2301
put_nbd:
2302
nbd_put(nbd);
2303
return 0;
2304
}
2305
2306
static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
2307
{
2308
struct nbd_device *nbd = NULL;
2309
struct nbd_config *config;
2310
int index;
2311
int ret = 0;
2312
bool put_dev = false;
2313
2314
if (!netlink_capable(skb, CAP_SYS_ADMIN))
2315
return -EPERM;
2316
2317
if (GENL_REQ_ATTR_CHECK(info, NBD_ATTR_INDEX)) {
2318
pr_err("must specify a device to reconfigure\n");
2319
return -EINVAL;
2320
}
2321
index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
2322
mutex_lock(&nbd_index_mutex);
2323
nbd = idr_find(&nbd_index_idr, index);
2324
if (!nbd) {
2325
mutex_unlock(&nbd_index_mutex);
2326
pr_err("couldn't find a device at index %d\n", index);
2327
return -EINVAL;
2328
}
2329
if (nbd->backend) {
2330
if (info->attrs[NBD_ATTR_BACKEND_IDENTIFIER]) {
2331
if (nla_strcmp(info->attrs[NBD_ATTR_BACKEND_IDENTIFIER],
2332
nbd->backend)) {
2333
mutex_unlock(&nbd_index_mutex);
2334
dev_err(nbd_to_dev(nbd),
2335
"backend image doesn't match with %s\n",
2336
nbd->backend);
2337
return -EINVAL;
2338
}
2339
} else {
2340
mutex_unlock(&nbd_index_mutex);
2341
dev_err(nbd_to_dev(nbd), "must specify backend\n");
2342
return -EINVAL;
2343
}
2344
}
2345
if (!refcount_inc_not_zero(&nbd->refs)) {
2346
mutex_unlock(&nbd_index_mutex);
2347
pr_err("device at index %d is going down\n", index);
2348
return -EINVAL;
2349
}
2350
mutex_unlock(&nbd_index_mutex);
2351
2352
config = nbd_get_config_unlocked(nbd);
2353
if (!config) {
2354
dev_err(nbd_to_dev(nbd),
2355
"not configured, cannot reconfigure\n");
2356
nbd_put(nbd);
2357
return -EINVAL;
2358
}
2359
2360
mutex_lock(&nbd->config_lock);
2361
if (!test_bit(NBD_RT_BOUND, &config->runtime_flags) ||
2362
!nbd->pid) {
2363
dev_err(nbd_to_dev(nbd),
2364
"not configured, cannot reconfigure\n");
2365
ret = -EINVAL;
2366
goto out;
2367
}
2368
2369
ret = nbd_genl_size_set(info, nbd);
2370
if (ret)
2371
goto out;
2372
2373
if (info->attrs[NBD_ATTR_TIMEOUT])
2374
nbd_set_cmd_timeout(nbd,
2375
nla_get_u64(info->attrs[NBD_ATTR_TIMEOUT]));
2376
if (info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]) {
2377
config->dead_conn_timeout =
2378
nla_get_u64(info->attrs[NBD_ATTR_DEAD_CONN_TIMEOUT]);
2379
config->dead_conn_timeout *= HZ;
2380
}
2381
if (info->attrs[NBD_ATTR_CLIENT_FLAGS]) {
2382
u64 flags = nla_get_u64(info->attrs[NBD_ATTR_CLIENT_FLAGS]);
2383
if (flags & NBD_CFLAG_DESTROY_ON_DISCONNECT) {
2384
if (!test_and_set_bit(NBD_DESTROY_ON_DISCONNECT,
2385
&nbd->flags))
2386
put_dev = true;
2387
} else {
2388
if (test_and_clear_bit(NBD_DESTROY_ON_DISCONNECT,
2389
&nbd->flags))
2390
refcount_inc(&nbd->refs);
2391
}
2392
2393
if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
2394
set_bit(NBD_RT_DISCONNECT_ON_CLOSE,
2395
&config->runtime_flags);
2396
} else {
2397
clear_bit(NBD_RT_DISCONNECT_ON_CLOSE,
2398
&config->runtime_flags);
2399
}
2400
}
2401
2402
if (info->attrs[NBD_ATTR_SOCKETS]) {
2403
struct nlattr *attr;
2404
int rem, fd;
2405
2406
nla_for_each_nested(attr, info->attrs[NBD_ATTR_SOCKETS],
2407
rem) {
2408
struct nlattr *socks[NBD_SOCK_MAX+1];
2409
2410
if (nla_type(attr) != NBD_SOCK_ITEM) {
2411
pr_err("socks must be embedded in a SOCK_ITEM attr\n");
2412
ret = -EINVAL;
2413
goto out;
2414
}
2415
ret = nla_parse_nested_deprecated(socks, NBD_SOCK_MAX,
2416
attr,
2417
nbd_sock_policy,
2418
info->extack);
2419
if (ret != 0) {
2420
pr_err("error processing sock list\n");
2421
ret = -EINVAL;
2422
goto out;
2423
}
2424
if (!socks[NBD_SOCK_FD])
2425
continue;
2426
fd = (int)nla_get_u32(socks[NBD_SOCK_FD]);
2427
ret = nbd_reconnect_socket(nbd, fd);
2428
if (ret) {
2429
if (ret == -ENOSPC)
2430
ret = 0;
2431
goto out;
2432
}
2433
dev_info(nbd_to_dev(nbd), "reconnected socket\n");
2434
}
2435
}
2436
out:
2437
mutex_unlock(&nbd->config_lock);
2438
nbd_config_put(nbd);
2439
nbd_put(nbd);
2440
if (put_dev)
2441
nbd_put(nbd);
2442
return ret;
2443
}
2444
2445
static const struct genl_small_ops nbd_connect_genl_ops[] = {
2446
{
2447
.cmd = NBD_CMD_CONNECT,
2448
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2449
.doit = nbd_genl_connect,
2450
},
2451
{
2452
.cmd = NBD_CMD_DISCONNECT,
2453
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2454
.doit = nbd_genl_disconnect,
2455
},
2456
{
2457
.cmd = NBD_CMD_RECONFIGURE,
2458
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2459
.doit = nbd_genl_reconfigure,
2460
},
2461
{
2462
.cmd = NBD_CMD_STATUS,
2463
.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
2464
.doit = nbd_genl_status,
2465
},
2466
};
2467
2468
static const struct genl_multicast_group nbd_mcast_grps[] = {
2469
{ .name = NBD_GENL_MCAST_GROUP_NAME, },
2470
};
2471
2472
static struct genl_family nbd_genl_family __ro_after_init = {
2473
.hdrsize = 0,
2474
.name = NBD_GENL_FAMILY_NAME,
2475
.version = NBD_GENL_VERSION,
2476
.module = THIS_MODULE,
2477
.small_ops = nbd_connect_genl_ops,
2478
.n_small_ops = ARRAY_SIZE(nbd_connect_genl_ops),
2479
.resv_start_op = NBD_CMD_STATUS + 1,
2480
.maxattr = NBD_ATTR_MAX,
2481
.netnsok = 1,
2482
.policy = nbd_attr_policy,
2483
.mcgrps = nbd_mcast_grps,
2484
.n_mcgrps = ARRAY_SIZE(nbd_mcast_grps),
2485
};
2486
MODULE_ALIAS_GENL_FAMILY(NBD_GENL_FAMILY_NAME);
2487
2488
static int populate_nbd_status(struct nbd_device *nbd, struct sk_buff *reply)
2489
{
2490
struct nlattr *dev_opt;
2491
u8 connected = 0;
2492
int ret;
2493
2494
/* This is a little racey, but for status it's ok. The
2495
* reason we don't take a ref here is because we can't
2496
* take a ref in the index == -1 case as we would need
2497
* to put under the nbd_index_mutex, which could
2498
* deadlock if we are configured to remove ourselves
2499
* once we're disconnected.
2500
*/
2501
if (refcount_read(&nbd->config_refs))
2502
connected = 1;
2503
dev_opt = nla_nest_start_noflag(reply, NBD_DEVICE_ITEM);
2504
if (!dev_opt)
2505
return -EMSGSIZE;
2506
ret = nla_put_u32(reply, NBD_DEVICE_INDEX, nbd->index);
2507
if (ret)
2508
return -EMSGSIZE;
2509
ret = nla_put_u8(reply, NBD_DEVICE_CONNECTED,
2510
connected);
2511
if (ret)
2512
return -EMSGSIZE;
2513
nla_nest_end(reply, dev_opt);
2514
return 0;
2515
}
2516
2517
static int status_cb(int id, void *ptr, void *data)
2518
{
2519
struct nbd_device *nbd = ptr;
2520
return populate_nbd_status(nbd, (struct sk_buff *)data);
2521
}
2522
2523
static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info)
2524
{
2525
struct nlattr *dev_list;
2526
struct sk_buff *reply;
2527
void *reply_head;
2528
size_t msg_size;
2529
int index = -1;
2530
int ret = -ENOMEM;
2531
2532
if (info->attrs[NBD_ATTR_INDEX])
2533
index = nla_get_u32(info->attrs[NBD_ATTR_INDEX]);
2534
2535
mutex_lock(&nbd_index_mutex);
2536
2537
msg_size = nla_total_size(nla_attr_size(sizeof(u32)) +
2538
nla_attr_size(sizeof(u8)));
2539
msg_size *= (index == -1) ? nbd_total_devices : 1;
2540
2541
reply = genlmsg_new(msg_size, GFP_KERNEL);
2542
if (!reply)
2543
goto out;
2544
reply_head = genlmsg_put_reply(reply, info, &nbd_genl_family, 0,
2545
NBD_CMD_STATUS);
2546
if (!reply_head) {
2547
nlmsg_free(reply);
2548
goto out;
2549
}
2550
2551
dev_list = nla_nest_start_noflag(reply, NBD_ATTR_DEVICE_LIST);
2552
if (!dev_list) {
2553
nlmsg_free(reply);
2554
ret = -EMSGSIZE;
2555
goto out;
2556
}
2557
2558
if (index == -1) {
2559
ret = idr_for_each(&nbd_index_idr, &status_cb, reply);
2560
if (ret) {
2561
nlmsg_free(reply);
2562
goto out;
2563
}
2564
} else {
2565
struct nbd_device *nbd;
2566
nbd = idr_find(&nbd_index_idr, index);
2567
if (nbd) {
2568
ret = populate_nbd_status(nbd, reply);
2569
if (ret) {
2570
nlmsg_free(reply);
2571
goto out;
2572
}
2573
}
2574
}
2575
nla_nest_end(reply, dev_list);
2576
genlmsg_end(reply, reply_head);
2577
ret = genlmsg_reply(reply, info);
2578
out:
2579
mutex_unlock(&nbd_index_mutex);
2580
return ret;
2581
}
2582
2583
static void nbd_connect_reply(struct genl_info *info, int index)
2584
{
2585
struct sk_buff *skb;
2586
void *msg_head;
2587
int ret;
2588
2589
skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL);
2590
if (!skb)
2591
return;
2592
msg_head = genlmsg_put_reply(skb, info, &nbd_genl_family, 0,
2593
NBD_CMD_CONNECT);
2594
if (!msg_head) {
2595
nlmsg_free(skb);
2596
return;
2597
}
2598
ret = nla_put_u32(skb, NBD_ATTR_INDEX, index);
2599
if (ret) {
2600
nlmsg_free(skb);
2601
return;
2602
}
2603
genlmsg_end(skb, msg_head);
2604
genlmsg_reply(skb, info);
2605
}
2606
2607
static void nbd_mcast_index(int index)
2608
{
2609
struct sk_buff *skb;
2610
void *msg_head;
2611
int ret;
2612
2613
skb = genlmsg_new(nla_total_size(sizeof(u32)), GFP_KERNEL);
2614
if (!skb)
2615
return;
2616
msg_head = genlmsg_put(skb, 0, 0, &nbd_genl_family, 0,
2617
NBD_CMD_LINK_DEAD);
2618
if (!msg_head) {
2619
nlmsg_free(skb);
2620
return;
2621
}
2622
ret = nla_put_u32(skb, NBD_ATTR_INDEX, index);
2623
if (ret) {
2624
nlmsg_free(skb);
2625
return;
2626
}
2627
genlmsg_end(skb, msg_head);
2628
genlmsg_multicast(&nbd_genl_family, skb, 0, 0, GFP_KERNEL);
2629
}
2630
2631
static void nbd_dead_link_work(struct work_struct *work)
2632
{
2633
struct link_dead_args *args = container_of(work, struct link_dead_args,
2634
work);
2635
nbd_mcast_index(args->index);
2636
kfree(args);
2637
}
2638
2639
static int __init nbd_init(void)
2640
{
2641
int i;
2642
2643
BUILD_BUG_ON(sizeof(struct nbd_request) != 28);
2644
2645
if (max_part < 0) {
2646
pr_err("max_part must be >= 0\n");
2647
return -EINVAL;
2648
}
2649
2650
part_shift = 0;
2651
if (max_part > 0) {
2652
part_shift = fls(max_part);
2653
2654
/*
2655
* Adjust max_part according to part_shift as it is exported
2656
* to user space so that user can know the max number of
2657
* partition kernel should be able to manage.
2658
*
2659
* Note that -1 is required because partition 0 is reserved
2660
* for the whole disk.
2661
*/
2662
max_part = (1UL << part_shift) - 1;
2663
}
2664
2665
if ((1UL << part_shift) > DISK_MAX_PARTS)
2666
return -EINVAL;
2667
2668
if (nbds_max > 1UL << (MINORBITS - part_shift))
2669
return -EINVAL;
2670
2671
if (register_blkdev(NBD_MAJOR, "nbd"))
2672
return -EIO;
2673
2674
nbd_del_wq = alloc_workqueue("nbd-del", WQ_UNBOUND, 0);
2675
if (!nbd_del_wq) {
2676
unregister_blkdev(NBD_MAJOR, "nbd");
2677
return -ENOMEM;
2678
}
2679
2680
if (genl_register_family(&nbd_genl_family)) {
2681
destroy_workqueue(nbd_del_wq);
2682
unregister_blkdev(NBD_MAJOR, "nbd");
2683
return -EINVAL;
2684
}
2685
nbd_dbg_init();
2686
2687
for (i = 0; i < nbds_max; i++)
2688
nbd_dev_add(i, 1);
2689
return 0;
2690
}
2691
2692
static int nbd_exit_cb(int id, void *ptr, void *data)
2693
{
2694
struct list_head *list = (struct list_head *)data;
2695
struct nbd_device *nbd = ptr;
2696
2697
/* Skip nbd that is being removed asynchronously */
2698
if (refcount_read(&nbd->refs))
2699
list_add_tail(&nbd->list, list);
2700
2701
return 0;
2702
}
2703
2704
static void __exit nbd_cleanup(void)
2705
{
2706
struct nbd_device *nbd;
2707
LIST_HEAD(del_list);
2708
2709
/*
2710
* Unregister netlink interface prior to waiting
2711
* for the completion of netlink commands.
2712
*/
2713
genl_unregister_family(&nbd_genl_family);
2714
2715
nbd_dbg_close();
2716
2717
mutex_lock(&nbd_index_mutex);
2718
idr_for_each(&nbd_index_idr, &nbd_exit_cb, &del_list);
2719
mutex_unlock(&nbd_index_mutex);
2720
2721
while (!list_empty(&del_list)) {
2722
nbd = list_first_entry(&del_list, struct nbd_device, list);
2723
list_del_init(&nbd->list);
2724
if (refcount_read(&nbd->config_refs))
2725
pr_err("possibly leaking nbd_config (ref %d)\n",
2726
refcount_read(&nbd->config_refs));
2727
if (refcount_read(&nbd->refs) != 1)
2728
pr_err("possibly leaking a device\n");
2729
nbd_put(nbd);
2730
}
2731
2732
/* Also wait for nbd_dev_remove_work() completes */
2733
destroy_workqueue(nbd_del_wq);
2734
2735
idr_destroy(&nbd_index_idr);
2736
unregister_blkdev(NBD_MAJOR, "nbd");
2737
}
2738
2739
module_init(nbd_init);
2740
module_exit(nbd_cleanup);
2741
2742
MODULE_DESCRIPTION("Network Block Device");
2743
MODULE_LICENSE("GPL");
2744
2745
module_param(nbds_max, int, 0444);
2746
MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)");
2747
module_param(max_part, int, 0444);
2748
MODULE_PARM_DESC(max_part, "number of partitions per device (default: 16)");
2749
2750