Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/io_uring/cancel.c
29266 views
1
// SPDX-License-Identifier: GPL-2.0
2
#include <linux/kernel.h>
3
#include <linux/errno.h>
4
#include <linux/fs.h>
5
#include <linux/file.h>
6
#include <linux/mm.h>
7
#include <linux/slab.h>
8
#include <linux/namei.h>
9
#include <linux/nospec.h>
10
#include <linux/io_uring.h>
11
12
#include <uapi/linux/io_uring.h>
13
14
#include "filetable.h"
15
#include "io_uring.h"
16
#include "tctx.h"
17
#include "poll.h"
18
#include "timeout.h"
19
#include "waitid.h"
20
#include "futex.h"
21
#include "cancel.h"
22
23
struct io_cancel {
24
struct file *file;
25
u64 addr;
26
u32 flags;
27
s32 fd;
28
u8 opcode;
29
};
30
31
#define CANCEL_FLAGS (IORING_ASYNC_CANCEL_ALL | IORING_ASYNC_CANCEL_FD | \
32
IORING_ASYNC_CANCEL_ANY | IORING_ASYNC_CANCEL_FD_FIXED | \
33
IORING_ASYNC_CANCEL_USERDATA | IORING_ASYNC_CANCEL_OP)
34
35
/*
36
* Returns true if the request matches the criteria outlined by 'cd'.
37
*/
38
bool io_cancel_req_match(struct io_kiocb *req, struct io_cancel_data *cd)
39
{
40
bool match_user_data = cd->flags & IORING_ASYNC_CANCEL_USERDATA;
41
42
if (req->ctx != cd->ctx)
43
return false;
44
45
if (!(cd->flags & (IORING_ASYNC_CANCEL_FD | IORING_ASYNC_CANCEL_OP)))
46
match_user_data = true;
47
48
if (cd->flags & IORING_ASYNC_CANCEL_ANY)
49
goto check_seq;
50
if (cd->flags & IORING_ASYNC_CANCEL_FD) {
51
if (req->file != cd->file)
52
return false;
53
}
54
if (cd->flags & IORING_ASYNC_CANCEL_OP) {
55
if (req->opcode != cd->opcode)
56
return false;
57
}
58
if (match_user_data && req->cqe.user_data != cd->data)
59
return false;
60
if (cd->flags & IORING_ASYNC_CANCEL_ALL) {
61
check_seq:
62
if (io_cancel_match_sequence(req, cd->seq))
63
return false;
64
}
65
66
return true;
67
}
68
69
static bool io_cancel_cb(struct io_wq_work *work, void *data)
70
{
71
struct io_kiocb *req = container_of(work, struct io_kiocb, work);
72
struct io_cancel_data *cd = data;
73
74
return io_cancel_req_match(req, cd);
75
}
76
77
static int io_async_cancel_one(struct io_uring_task *tctx,
78
struct io_cancel_data *cd)
79
{
80
enum io_wq_cancel cancel_ret;
81
int ret = 0;
82
bool all;
83
84
if (!tctx || !tctx->io_wq)
85
return -ENOENT;
86
87
all = cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY);
88
cancel_ret = io_wq_cancel_cb(tctx->io_wq, io_cancel_cb, cd, all);
89
switch (cancel_ret) {
90
case IO_WQ_CANCEL_OK:
91
ret = 0;
92
break;
93
case IO_WQ_CANCEL_RUNNING:
94
ret = -EALREADY;
95
break;
96
case IO_WQ_CANCEL_NOTFOUND:
97
ret = -ENOENT;
98
break;
99
}
100
101
return ret;
102
}
103
104
int io_try_cancel(struct io_uring_task *tctx, struct io_cancel_data *cd,
105
unsigned issue_flags)
106
{
107
struct io_ring_ctx *ctx = cd->ctx;
108
int ret;
109
110
WARN_ON_ONCE(!io_wq_current_is_worker() && tctx != current->io_uring);
111
112
ret = io_async_cancel_one(tctx, cd);
113
/*
114
* Fall-through even for -EALREADY, as we may have poll armed
115
* that need unarming.
116
*/
117
if (!ret)
118
return 0;
119
120
ret = io_poll_cancel(ctx, cd, issue_flags);
121
if (ret != -ENOENT)
122
return ret;
123
124
ret = io_waitid_cancel(ctx, cd, issue_flags);
125
if (ret != -ENOENT)
126
return ret;
127
128
ret = io_futex_cancel(ctx, cd, issue_flags);
129
if (ret != -ENOENT)
130
return ret;
131
132
spin_lock(&ctx->completion_lock);
133
if (!(cd->flags & IORING_ASYNC_CANCEL_FD))
134
ret = io_timeout_cancel(ctx, cd);
135
spin_unlock(&ctx->completion_lock);
136
return ret;
137
}
138
139
int io_async_cancel_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
140
{
141
struct io_cancel *cancel = io_kiocb_to_cmd(req, struct io_cancel);
142
143
if (unlikely(req->flags & REQ_F_BUFFER_SELECT))
144
return -EINVAL;
145
if (sqe->off || sqe->splice_fd_in)
146
return -EINVAL;
147
148
cancel->addr = READ_ONCE(sqe->addr);
149
cancel->flags = READ_ONCE(sqe->cancel_flags);
150
if (cancel->flags & ~CANCEL_FLAGS)
151
return -EINVAL;
152
if (cancel->flags & IORING_ASYNC_CANCEL_FD) {
153
if (cancel->flags & IORING_ASYNC_CANCEL_ANY)
154
return -EINVAL;
155
cancel->fd = READ_ONCE(sqe->fd);
156
}
157
if (cancel->flags & IORING_ASYNC_CANCEL_OP) {
158
if (cancel->flags & IORING_ASYNC_CANCEL_ANY)
159
return -EINVAL;
160
cancel->opcode = READ_ONCE(sqe->len);
161
}
162
163
return 0;
164
}
165
166
static int __io_async_cancel(struct io_cancel_data *cd,
167
struct io_uring_task *tctx,
168
unsigned int issue_flags)
169
{
170
bool all = cd->flags & (IORING_ASYNC_CANCEL_ALL|IORING_ASYNC_CANCEL_ANY);
171
struct io_ring_ctx *ctx = cd->ctx;
172
struct io_tctx_node *node;
173
int ret, nr = 0;
174
175
do {
176
ret = io_try_cancel(tctx, cd, issue_flags);
177
if (ret == -ENOENT)
178
break;
179
if (!all)
180
return ret;
181
nr++;
182
} while (1);
183
184
/* slow path, try all io-wq's */
185
io_ring_submit_lock(ctx, issue_flags);
186
ret = -ENOENT;
187
list_for_each_entry(node, &ctx->tctx_list, ctx_node) {
188
ret = io_async_cancel_one(node->task->io_uring, cd);
189
if (ret != -ENOENT) {
190
if (!all)
191
break;
192
nr++;
193
}
194
}
195
io_ring_submit_unlock(ctx, issue_flags);
196
return all ? nr : ret;
197
}
198
199
int io_async_cancel(struct io_kiocb *req, unsigned int issue_flags)
200
{
201
struct io_cancel *cancel = io_kiocb_to_cmd(req, struct io_cancel);
202
struct io_cancel_data cd = {
203
.ctx = req->ctx,
204
.data = cancel->addr,
205
.flags = cancel->flags,
206
.opcode = cancel->opcode,
207
.seq = atomic_inc_return(&req->ctx->cancel_seq),
208
};
209
struct io_uring_task *tctx = req->tctx;
210
int ret;
211
212
if (cd.flags & IORING_ASYNC_CANCEL_FD) {
213
if (req->flags & REQ_F_FIXED_FILE ||
214
cd.flags & IORING_ASYNC_CANCEL_FD_FIXED) {
215
req->flags |= REQ_F_FIXED_FILE;
216
req->file = io_file_get_fixed(req, cancel->fd,
217
issue_flags);
218
} else {
219
req->file = io_file_get_normal(req, cancel->fd);
220
}
221
if (!req->file) {
222
ret = -EBADF;
223
goto done;
224
}
225
cd.file = req->file;
226
}
227
228
ret = __io_async_cancel(&cd, tctx, issue_flags);
229
done:
230
if (ret < 0)
231
req_set_fail(req);
232
io_req_set_res(req, ret, 0);
233
return IOU_COMPLETE;
234
}
235
236
static int __io_sync_cancel(struct io_uring_task *tctx,
237
struct io_cancel_data *cd, int fd)
238
{
239
struct io_ring_ctx *ctx = cd->ctx;
240
241
/* fixed must be grabbed every time since we drop the uring_lock */
242
if ((cd->flags & IORING_ASYNC_CANCEL_FD) &&
243
(cd->flags & IORING_ASYNC_CANCEL_FD_FIXED)) {
244
struct io_rsrc_node *node;
245
246
node = io_rsrc_node_lookup(&ctx->file_table.data, fd);
247
if (unlikely(!node))
248
return -EBADF;
249
cd->file = io_slot_file(node);
250
if (!cd->file)
251
return -EBADF;
252
}
253
254
return __io_async_cancel(cd, tctx, 0);
255
}
256
257
int io_sync_cancel(struct io_ring_ctx *ctx, void __user *arg)
258
__must_hold(&ctx->uring_lock)
259
{
260
struct io_cancel_data cd = {
261
.ctx = ctx,
262
.seq = atomic_inc_return(&ctx->cancel_seq),
263
};
264
ktime_t timeout = KTIME_MAX;
265
struct io_uring_sync_cancel_reg sc;
266
struct file *file = NULL;
267
DEFINE_WAIT(wait);
268
int ret, i;
269
270
if (copy_from_user(&sc, arg, sizeof(sc)))
271
return -EFAULT;
272
if (sc.flags & ~CANCEL_FLAGS)
273
return -EINVAL;
274
for (i = 0; i < ARRAY_SIZE(sc.pad); i++)
275
if (sc.pad[i])
276
return -EINVAL;
277
for (i = 0; i < ARRAY_SIZE(sc.pad2); i++)
278
if (sc.pad2[i])
279
return -EINVAL;
280
281
cd.data = sc.addr;
282
cd.flags = sc.flags;
283
cd.opcode = sc.opcode;
284
285
/* we can grab a normal file descriptor upfront */
286
if ((cd.flags & IORING_ASYNC_CANCEL_FD) &&
287
!(cd.flags & IORING_ASYNC_CANCEL_FD_FIXED)) {
288
file = fget(sc.fd);
289
if (!file)
290
return -EBADF;
291
cd.file = file;
292
}
293
294
ret = __io_sync_cancel(current->io_uring, &cd, sc.fd);
295
296
/* found something, done! */
297
if (ret != -EALREADY)
298
goto out;
299
300
if (sc.timeout.tv_sec != -1UL || sc.timeout.tv_nsec != -1UL) {
301
struct timespec64 ts = {
302
.tv_sec = sc.timeout.tv_sec,
303
.tv_nsec = sc.timeout.tv_nsec
304
};
305
306
timeout = ktime_add_ns(timespec64_to_ktime(ts), ktime_get_ns());
307
}
308
309
/*
310
* Keep looking until we get -ENOENT. we'll get woken everytime
311
* every time a request completes and will retry the cancelation.
312
*/
313
do {
314
cd.seq = atomic_inc_return(&ctx->cancel_seq);
315
316
prepare_to_wait(&ctx->cq_wait, &wait, TASK_INTERRUPTIBLE);
317
318
ret = __io_sync_cancel(current->io_uring, &cd, sc.fd);
319
320
mutex_unlock(&ctx->uring_lock);
321
if (ret != -EALREADY)
322
break;
323
324
ret = io_run_task_work_sig(ctx);
325
if (ret < 0)
326
break;
327
ret = schedule_hrtimeout(&timeout, HRTIMER_MODE_ABS);
328
if (!ret) {
329
ret = -ETIME;
330
break;
331
}
332
mutex_lock(&ctx->uring_lock);
333
} while (1);
334
335
finish_wait(&ctx->cq_wait, &wait);
336
mutex_lock(&ctx->uring_lock);
337
338
if (ret == -ENOENT || ret > 0)
339
ret = 0;
340
out:
341
if (file)
342
fput(file);
343
return ret;
344
}
345
346
bool io_cancel_remove_all(struct io_ring_ctx *ctx, struct io_uring_task *tctx,
347
struct hlist_head *list, bool cancel_all,
348
bool (*cancel)(struct io_kiocb *))
349
{
350
struct hlist_node *tmp;
351
struct io_kiocb *req;
352
bool found = false;
353
354
lockdep_assert_held(&ctx->uring_lock);
355
356
hlist_for_each_entry_safe(req, tmp, list, hash_node) {
357
if (!io_match_task_safe(req, tctx, cancel_all))
358
continue;
359
hlist_del_init(&req->hash_node);
360
if (cancel(req))
361
found = true;
362
}
363
364
return found;
365
}
366
367
int io_cancel_remove(struct io_ring_ctx *ctx, struct io_cancel_data *cd,
368
unsigned int issue_flags, struct hlist_head *list,
369
bool (*cancel)(struct io_kiocb *))
370
{
371
struct hlist_node *tmp;
372
struct io_kiocb *req;
373
int nr = 0;
374
375
io_ring_submit_lock(ctx, issue_flags);
376
hlist_for_each_entry_safe(req, tmp, list, hash_node) {
377
if (!io_cancel_req_match(req, cd))
378
continue;
379
if (cancel(req))
380
nr++;
381
if (!(cd->flags & IORING_ASYNC_CANCEL_ALL))
382
break;
383
}
384
io_ring_submit_unlock(ctx, issue_flags);
385
return nr ?: -ENOENT;
386
}
387
388