Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/io_uring/openclose.c
29266 views
1
// SPDX-License-Identifier: GPL-2.0
2
#include <linux/kernel.h>
3
#include <linux/errno.h>
4
#include <linux/fs.h>
5
#include <linux/file.h>
6
#include <linux/fdtable.h>
7
#include <linux/fsnotify.h>
8
#include <linux/namei.h>
9
#include <linux/pipe_fs_i.h>
10
#include <linux/watch_queue.h>
11
#include <linux/io_uring.h>
12
13
#include <uapi/linux/io_uring.h>
14
15
#include "../fs/internal.h"
16
17
#include "filetable.h"
18
#include "io_uring.h"
19
#include "rsrc.h"
20
#include "openclose.h"
21
22
struct io_open {
23
struct file *file;
24
int dfd;
25
u32 file_slot;
26
struct filename *filename;
27
struct open_how how;
28
unsigned long nofile;
29
};
30
31
struct io_close {
32
struct file *file;
33
int fd;
34
u32 file_slot;
35
};
36
37
struct io_fixed_install {
38
struct file *file;
39
unsigned int o_flags;
40
};
41
42
static bool io_openat_force_async(struct io_open *open)
43
{
44
/*
45
* Don't bother trying for O_TRUNC, O_CREAT, or O_TMPFILE open,
46
* it'll always -EAGAIN. Note that we test for __O_TMPFILE because
47
* O_TMPFILE includes O_DIRECTORY, which isn't a flag we need to force
48
* async for.
49
*/
50
return open->how.flags & (O_TRUNC | O_CREAT | __O_TMPFILE);
51
}
52
53
static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
54
{
55
struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
56
const char __user *fname;
57
int ret;
58
59
if (unlikely(sqe->buf_index))
60
return -EINVAL;
61
if (unlikely(req->flags & REQ_F_FIXED_FILE))
62
return -EBADF;
63
64
/* open.how should be already initialised */
65
if (!(open->how.flags & O_PATH) && force_o_largefile())
66
open->how.flags |= O_LARGEFILE;
67
68
open->dfd = READ_ONCE(sqe->fd);
69
fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
70
open->filename = getname(fname);
71
if (IS_ERR(open->filename)) {
72
ret = PTR_ERR(open->filename);
73
open->filename = NULL;
74
return ret;
75
}
76
77
open->file_slot = READ_ONCE(sqe->file_index);
78
if (open->file_slot && (open->how.flags & O_CLOEXEC))
79
return -EINVAL;
80
81
open->nofile = rlimit(RLIMIT_NOFILE);
82
req->flags |= REQ_F_NEED_CLEANUP;
83
if (io_openat_force_async(open))
84
req->flags |= REQ_F_FORCE_ASYNC;
85
return 0;
86
}
87
88
int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
89
{
90
struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
91
u64 mode = READ_ONCE(sqe->len);
92
u64 flags = READ_ONCE(sqe->open_flags);
93
94
open->how = build_open_how(flags, mode);
95
return __io_openat_prep(req, sqe);
96
}
97
98
int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
99
{
100
struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
101
struct open_how __user *how;
102
size_t len;
103
int ret;
104
105
how = u64_to_user_ptr(READ_ONCE(sqe->addr2));
106
len = READ_ONCE(sqe->len);
107
if (len < OPEN_HOW_SIZE_VER0)
108
return -EINVAL;
109
110
ret = copy_struct_from_user(&open->how, sizeof(open->how), how, len);
111
if (ret)
112
return ret;
113
114
return __io_openat_prep(req, sqe);
115
}
116
117
int io_openat2(struct io_kiocb *req, unsigned int issue_flags)
118
{
119
struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
120
struct open_flags op;
121
struct file *file;
122
bool resolve_nonblock, nonblock_set;
123
bool fixed = !!open->file_slot;
124
int ret;
125
126
ret = build_open_flags(&open->how, &op);
127
if (ret)
128
goto err;
129
nonblock_set = op.open_flag & O_NONBLOCK;
130
resolve_nonblock = open->how.resolve & RESOLVE_CACHED;
131
if (issue_flags & IO_URING_F_NONBLOCK) {
132
WARN_ON_ONCE(io_openat_force_async(open));
133
op.lookup_flags |= LOOKUP_CACHED;
134
op.open_flag |= O_NONBLOCK;
135
}
136
137
if (!fixed) {
138
ret = __get_unused_fd_flags(open->how.flags, open->nofile);
139
if (ret < 0)
140
goto err;
141
}
142
143
file = do_filp_open(open->dfd, open->filename, &op);
144
if (IS_ERR(file)) {
145
/*
146
* We could hang on to this 'fd' on retrying, but seems like
147
* marginal gain for something that is now known to be a slower
148
* path. So just put it, and we'll get a new one when we retry.
149
*/
150
if (!fixed)
151
put_unused_fd(ret);
152
153
ret = PTR_ERR(file);
154
/* only retry if RESOLVE_CACHED wasn't already set by application */
155
if (ret == -EAGAIN &&
156
(!resolve_nonblock && (issue_flags & IO_URING_F_NONBLOCK)))
157
return -EAGAIN;
158
goto err;
159
}
160
161
if ((issue_flags & IO_URING_F_NONBLOCK) && !nonblock_set)
162
file->f_flags &= ~O_NONBLOCK;
163
164
if (!fixed)
165
fd_install(ret, file);
166
else
167
ret = io_fixed_fd_install(req, issue_flags, file,
168
open->file_slot);
169
err:
170
putname(open->filename);
171
req->flags &= ~REQ_F_NEED_CLEANUP;
172
if (ret < 0)
173
req_set_fail(req);
174
io_req_set_res(req, ret, 0);
175
return IOU_COMPLETE;
176
}
177
178
int io_openat(struct io_kiocb *req, unsigned int issue_flags)
179
{
180
return io_openat2(req, issue_flags);
181
}
182
183
void io_open_cleanup(struct io_kiocb *req)
184
{
185
struct io_open *open = io_kiocb_to_cmd(req, struct io_open);
186
187
if (open->filename)
188
putname(open->filename);
189
}
190
191
int __io_close_fixed(struct io_ring_ctx *ctx, unsigned int issue_flags,
192
unsigned int offset)
193
{
194
int ret;
195
196
io_ring_submit_lock(ctx, issue_flags);
197
ret = io_fixed_fd_remove(ctx, offset);
198
io_ring_submit_unlock(ctx, issue_flags);
199
200
return ret;
201
}
202
203
static inline int io_close_fixed(struct io_kiocb *req, unsigned int issue_flags)
204
{
205
struct io_close *close = io_kiocb_to_cmd(req, struct io_close);
206
207
return __io_close_fixed(req->ctx, issue_flags, close->file_slot - 1);
208
}
209
210
int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
211
{
212
struct io_close *close = io_kiocb_to_cmd(req, struct io_close);
213
214
if (sqe->off || sqe->addr || sqe->len || sqe->rw_flags || sqe->buf_index)
215
return -EINVAL;
216
if (req->flags & REQ_F_FIXED_FILE)
217
return -EBADF;
218
219
close->fd = READ_ONCE(sqe->fd);
220
close->file_slot = READ_ONCE(sqe->file_index);
221
if (close->file_slot && close->fd)
222
return -EINVAL;
223
224
return 0;
225
}
226
227
int io_close(struct io_kiocb *req, unsigned int issue_flags)
228
{
229
struct files_struct *files = current->files;
230
struct io_close *close = io_kiocb_to_cmd(req, struct io_close);
231
struct file *file;
232
int ret = -EBADF;
233
234
if (close->file_slot) {
235
ret = io_close_fixed(req, issue_flags);
236
goto err;
237
}
238
239
spin_lock(&files->file_lock);
240
file = files_lookup_fd_locked(files, close->fd);
241
if (!file || io_is_uring_fops(file)) {
242
spin_unlock(&files->file_lock);
243
goto err;
244
}
245
246
/* if the file has a flush method, be safe and punt to async */
247
if (file->f_op->flush && (issue_flags & IO_URING_F_NONBLOCK)) {
248
spin_unlock(&files->file_lock);
249
return -EAGAIN;
250
}
251
252
file = file_close_fd_locked(files, close->fd);
253
spin_unlock(&files->file_lock);
254
if (!file)
255
goto err;
256
257
/* No ->flush() or already async, safely close from here */
258
ret = filp_close(file, current->files);
259
err:
260
if (ret < 0)
261
req_set_fail(req);
262
io_req_set_res(req, ret, 0);
263
return IOU_COMPLETE;
264
}
265
266
int io_install_fixed_fd_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
267
{
268
struct io_fixed_install *ifi;
269
unsigned int flags;
270
271
if (sqe->off || sqe->addr || sqe->len || sqe->buf_index ||
272
sqe->splice_fd_in || sqe->addr3)
273
return -EINVAL;
274
275
/* must be a fixed file */
276
if (!(req->flags & REQ_F_FIXED_FILE))
277
return -EBADF;
278
279
flags = READ_ONCE(sqe->install_fd_flags);
280
if (flags & ~IORING_FIXED_FD_NO_CLOEXEC)
281
return -EINVAL;
282
283
/* ensure the task's creds are used when installing/receiving fds */
284
if (req->flags & REQ_F_CREDS)
285
return -EPERM;
286
287
/* default to O_CLOEXEC, disable if IORING_FIXED_FD_NO_CLOEXEC is set */
288
ifi = io_kiocb_to_cmd(req, struct io_fixed_install);
289
ifi->o_flags = O_CLOEXEC;
290
if (flags & IORING_FIXED_FD_NO_CLOEXEC)
291
ifi->o_flags = 0;
292
293
return 0;
294
}
295
296
int io_install_fixed_fd(struct io_kiocb *req, unsigned int issue_flags)
297
{
298
struct io_fixed_install *ifi;
299
int ret;
300
301
ifi = io_kiocb_to_cmd(req, struct io_fixed_install);
302
ret = receive_fd(req->file, NULL, ifi->o_flags);
303
if (ret < 0)
304
req_set_fail(req);
305
io_req_set_res(req, ret, 0);
306
return IOU_COMPLETE;
307
}
308
309
struct io_pipe {
310
struct file *file;
311
int __user *fds;
312
int flags;
313
int file_slot;
314
unsigned long nofile;
315
};
316
317
int io_pipe_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
318
{
319
struct io_pipe *p = io_kiocb_to_cmd(req, struct io_pipe);
320
321
if (sqe->fd || sqe->off || sqe->addr3)
322
return -EINVAL;
323
324
p->fds = u64_to_user_ptr(READ_ONCE(sqe->addr));
325
p->flags = READ_ONCE(sqe->pipe_flags);
326
if (p->flags & ~(O_CLOEXEC | O_NONBLOCK | O_DIRECT | O_NOTIFICATION_PIPE))
327
return -EINVAL;
328
329
p->file_slot = READ_ONCE(sqe->file_index);
330
p->nofile = rlimit(RLIMIT_NOFILE);
331
return 0;
332
}
333
334
static int io_pipe_fixed(struct io_kiocb *req, struct file **files,
335
unsigned int issue_flags)
336
{
337
struct io_pipe *p = io_kiocb_to_cmd(req, struct io_pipe);
338
struct io_ring_ctx *ctx = req->ctx;
339
int ret, fds[2] = { -1, -1 };
340
int slot = p->file_slot;
341
342
if (p->flags & O_CLOEXEC)
343
return -EINVAL;
344
345
io_ring_submit_lock(ctx, issue_flags);
346
347
ret = __io_fixed_fd_install(ctx, files[0], slot);
348
if (ret < 0)
349
goto err;
350
fds[0] = ret;
351
files[0] = NULL;
352
353
/*
354
* If a specific slot is given, next one will be used for
355
* the write side.
356
*/
357
if (slot != IORING_FILE_INDEX_ALLOC)
358
slot++;
359
360
ret = __io_fixed_fd_install(ctx, files[1], slot);
361
if (ret < 0)
362
goto err;
363
fds[1] = ret;
364
files[1] = NULL;
365
366
io_ring_submit_unlock(ctx, issue_flags);
367
368
if (!copy_to_user(p->fds, fds, sizeof(fds)))
369
return 0;
370
371
ret = -EFAULT;
372
io_ring_submit_lock(ctx, issue_flags);
373
err:
374
if (fds[0] != -1)
375
io_fixed_fd_remove(ctx, fds[0]);
376
if (fds[1] != -1)
377
io_fixed_fd_remove(ctx, fds[1]);
378
io_ring_submit_unlock(ctx, issue_flags);
379
return ret;
380
}
381
382
static int io_pipe_fd(struct io_kiocb *req, struct file **files)
383
{
384
struct io_pipe *p = io_kiocb_to_cmd(req, struct io_pipe);
385
int ret, fds[2] = { -1, -1 };
386
387
ret = __get_unused_fd_flags(p->flags, p->nofile);
388
if (ret < 0)
389
goto err;
390
fds[0] = ret;
391
392
ret = __get_unused_fd_flags(p->flags, p->nofile);
393
if (ret < 0)
394
goto err;
395
fds[1] = ret;
396
397
if (!copy_to_user(p->fds, fds, sizeof(fds))) {
398
fd_install(fds[0], files[0]);
399
fd_install(fds[1], files[1]);
400
return 0;
401
}
402
ret = -EFAULT;
403
err:
404
if (fds[0] != -1)
405
put_unused_fd(fds[0]);
406
if (fds[1] != -1)
407
put_unused_fd(fds[1]);
408
return ret;
409
}
410
411
int io_pipe(struct io_kiocb *req, unsigned int issue_flags)
412
{
413
struct io_pipe *p = io_kiocb_to_cmd(req, struct io_pipe);
414
struct file *files[2];
415
int ret;
416
417
ret = create_pipe_files(files, p->flags);
418
if (ret)
419
return ret;
420
421
if (!!p->file_slot)
422
ret = io_pipe_fixed(req, files, issue_flags);
423
else
424
ret = io_pipe_fd(req, files);
425
426
io_req_set_res(req, ret, 0);
427
if (!ret)
428
return IOU_COMPLETE;
429
430
req_set_fail(req);
431
if (files[0])
432
fput(files[0]);
433
if (files[1])
434
fput(files[1]);
435
return ret;
436
}
437
438