// SPDX-License-Identifier: GPL-2.0-or-later1/*2* fs/eventpoll.c (Efficient event retrieval implementation)3* Copyright (C) 2001,...,2009 Davide Libenzi4*5* Davide Libenzi <[email protected]>6*/78#include <linux/init.h>9#include <linux/kernel.h>10#include <linux/sched/signal.h>11#include <linux/fs.h>12#include <linux/file.h>13#include <linux/signal.h>14#include <linux/errno.h>15#include <linux/mm.h>16#include <linux/slab.h>17#include <linux/poll.h>18#include <linux/string.h>19#include <linux/list.h>20#include <linux/hash.h>21#include <linux/spinlock.h>22#include <linux/syscalls.h>23#include <linux/rbtree.h>24#include <linux/wait.h>25#include <linux/eventpoll.h>26#include <linux/mount.h>27#include <linux/bitops.h>28#include <linux/mutex.h>29#include <linux/anon_inodes.h>30#include <linux/device.h>31#include <linux/uaccess.h>32#include <asm/io.h>33#include <asm/mman.h>34#include <linux/atomic.h>35#include <linux/proc_fs.h>36#include <linux/seq_file.h>37#include <linux/compat.h>38#include <linux/rculist.h>39#include <linux/capability.h>40#include <net/busy_poll.h>4142/*43* LOCKING:44* There are three level of locking required by epoll :45*46* 1) epnested_mutex (mutex)47* 2) ep->mtx (mutex)48* 3) ep->lock (spinlock)49*50* The acquire order is the one listed above, from 1 to 3.51* We need a spinlock (ep->lock) because we manipulate objects52* from inside the poll callback, that might be triggered from53* a wake_up() that in turn might be called from IRQ context.54* So we can't sleep inside the poll callback and hence we need55* a spinlock. During the event transfer loop (from kernel to56* user space) we could end up sleeping due a copy_to_user(), so57* we need a lock that will allow us to sleep. This lock is a58* mutex (ep->mtx). It is acquired during the event transfer loop,59* during epoll_ctl(EPOLL_CTL_DEL) and during eventpoll_release_file().60* The epnested_mutex is acquired when inserting an epoll fd onto another61* epoll fd. We do this so that we walk the epoll tree and ensure that this62* insertion does not create a cycle of epoll file descriptors, which63* could lead to deadlock. We need a global mutex to prevent two64* simultaneous inserts (A into B and B into A) from racing and65* constructing a cycle without either insert observing that it is66* going to.67* It is necessary to acquire multiple "ep->mtx"es at once in the68* case when one epoll fd is added to another. In this case, we69* always acquire the locks in the order of nesting (i.e. after70* epoll_ctl(e1, EPOLL_CTL_ADD, e2), e1->mtx will always be acquired71* before e2->mtx). Since we disallow cycles of epoll file72* descriptors, this ensures that the mutexes are well-ordered. In73* order to communicate this nesting to lockdep, when walking a tree74* of epoll file descriptors, we use the current recursion depth as75* the lockdep subkey.76* It is possible to drop the "ep->mtx" and to use the global77* mutex "epnested_mutex" (together with "ep->lock") to have it working,78* but having "ep->mtx" will make the interface more scalable.79* Events that require holding "epnested_mutex" are very rare, while for80* normal operations the epoll private "ep->mtx" will guarantee81* a better scalability.82*/8384/* Epoll private bits inside the event mask */85#define EP_PRIVATE_BITS (EPOLLWAKEUP | EPOLLONESHOT | EPOLLET | EPOLLEXCLUSIVE)8687#define EPOLLINOUT_BITS (EPOLLIN | EPOLLOUT)8889#define EPOLLEXCLUSIVE_OK_BITS (EPOLLINOUT_BITS | EPOLLERR | EPOLLHUP | \90EPOLLWAKEUP | EPOLLET | EPOLLEXCLUSIVE)9192/* Maximum number of nesting allowed inside epoll sets */93#define EP_MAX_NESTS 49495#define EP_MAX_EVENTS (INT_MAX / sizeof(struct epoll_event))9697#define EP_UNACTIVE_PTR ((void *) -1L)9899#define EP_ITEM_COST (sizeof(struct epitem) + sizeof(struct eppoll_entry))100101struct epoll_filefd {102struct file *file;103int fd;104} __packed;105106/* Wait structure used by the poll hooks */107struct eppoll_entry {108/* List header used to link this structure to the "struct epitem" */109struct eppoll_entry *next;110111/* The "base" pointer is set to the container "struct epitem" */112struct epitem *base;113114/*115* Wait queue item that will be linked to the target file wait116* queue head.117*/118wait_queue_entry_t wait;119120/* The wait queue head that linked the "wait" wait queue item */121wait_queue_head_t *whead;122};123124/*125* Each file descriptor added to the eventpoll interface will126* have an entry of this type linked to the "rbr" RB tree.127* Avoid increasing the size of this struct, there can be many thousands128* of these on a server and we do not want this to take another cache line.129*/130struct epitem {131union {132/* RB tree node links this structure to the eventpoll RB tree */133struct rb_node rbn;134/* Used to free the struct epitem */135struct rcu_head rcu;136};137138/* List header used to link this structure to the eventpoll ready list */139struct list_head rdllink;140141/*142* Works together "struct eventpoll"->ovflist in keeping the143* single linked chain of items.144*/145struct epitem *next;146147/* The file descriptor information this item refers to */148struct epoll_filefd ffd;149150/*151* Protected by file->f_lock, true for to-be-released epitem already152* removed from the "struct file" items list; together with153* eventpoll->refcount orchestrates "struct eventpoll" disposal154*/155bool dying;156157/* List containing poll wait queues */158struct eppoll_entry *pwqlist;159160/* The "container" of this item */161struct eventpoll *ep;162163/* List header used to link this item to the "struct file" items list */164struct hlist_node fllink;165166/* wakeup_source used when EPOLLWAKEUP is set */167struct wakeup_source __rcu *ws;168169/* The structure that describe the interested events and the source fd */170struct epoll_event event;171};172173/*174* This structure is stored inside the "private_data" member of the file175* structure and represents the main data structure for the eventpoll176* interface.177*/178struct eventpoll {179/*180* This mutex is used to ensure that files are not removed181* while epoll is using them. This is held during the event182* collection loop, the file cleanup path, the epoll file exit183* code and the ctl operations.184*/185struct mutex mtx;186187/* Wait queue used by sys_epoll_wait() */188wait_queue_head_t wq;189190/* Wait queue used by file->poll() */191wait_queue_head_t poll_wait;192193/* List of ready file descriptors */194struct list_head rdllist;195196/* Lock which protects rdllist and ovflist */197spinlock_t lock;198199/* RB tree root used to store monitored fd structs */200struct rb_root_cached rbr;201202/*203* This is a single linked list that chains all the "struct epitem" that204* happened while transferring ready events to userspace w/out205* holding ->lock.206*/207struct epitem *ovflist;208209/* wakeup_source used when ep_send_events or __ep_eventpoll_poll is running */210struct wakeup_source *ws;211212/* The user that created the eventpoll descriptor */213struct user_struct *user;214215struct file *file;216217/* used to optimize loop detection check */218u64 gen;219struct hlist_head refs;220u8 loop_check_depth;221222/*223* usage count, used together with epitem->dying to224* orchestrate the disposal of this struct225*/226refcount_t refcount;227228#ifdef CONFIG_NET_RX_BUSY_POLL229/* used to track busy poll napi_id */230unsigned int napi_id;231/* busy poll timeout */232u32 busy_poll_usecs;233/* busy poll packet budget */234u16 busy_poll_budget;235bool prefer_busy_poll;236#endif237238#ifdef CONFIG_DEBUG_LOCK_ALLOC239/* tracks wakeup nests for lockdep validation */240u8 nests;241#endif242};243244/* Wrapper struct used by poll queueing */245struct ep_pqueue {246poll_table pt;247struct epitem *epi;248};249250/*251* Configuration options available inside /proc/sys/fs/epoll/252*/253/* Maximum number of epoll watched descriptors, per user */254static long max_user_watches __read_mostly;255256/* Used for cycles detection */257static DEFINE_MUTEX(epnested_mutex);258259static u64 loop_check_gen = 0;260261/* Used to check for epoll file descriptor inclusion loops */262static struct eventpoll *inserting_into;263264/* Slab cache used to allocate "struct epitem" */265static struct kmem_cache *epi_cache __ro_after_init;266267/* Slab cache used to allocate "struct eppoll_entry" */268static struct kmem_cache *pwq_cache __ro_after_init;269270/*271* List of files with newly added links, where we may need to limit the number272* of emanating paths. Protected by the epnested_mutex.273*/274struct epitems_head {275struct hlist_head epitems;276struct epitems_head *next;277};278static struct epitems_head *tfile_check_list = EP_UNACTIVE_PTR;279280static struct kmem_cache *ephead_cache __ro_after_init;281282static inline void free_ephead(struct epitems_head *head)283{284if (head)285kmem_cache_free(ephead_cache, head);286}287288static void list_file(struct file *file)289{290struct epitems_head *head;291292head = container_of(file->f_ep, struct epitems_head, epitems);293if (!head->next) {294head->next = tfile_check_list;295tfile_check_list = head;296}297}298299static void unlist_file(struct epitems_head *head)300{301struct epitems_head *to_free = head;302struct hlist_node *p = rcu_dereference(hlist_first_rcu(&head->epitems));303if (p) {304struct epitem *epi= container_of(p, struct epitem, fllink);305spin_lock(&epi->ffd.file->f_lock);306if (!hlist_empty(&head->epitems))307to_free = NULL;308head->next = NULL;309spin_unlock(&epi->ffd.file->f_lock);310}311free_ephead(to_free);312}313314#ifdef CONFIG_SYSCTL315316#include <linux/sysctl.h>317318static long long_zero;319static long long_max = LONG_MAX;320321static const struct ctl_table epoll_table[] = {322{323.procname = "max_user_watches",324.data = &max_user_watches,325.maxlen = sizeof(max_user_watches),326.mode = 0644,327.proc_handler = proc_doulongvec_minmax,328.extra1 = &long_zero,329.extra2 = &long_max,330},331};332333static void __init epoll_sysctls_init(void)334{335register_sysctl("fs/epoll", epoll_table);336}337#else338#define epoll_sysctls_init() do { } while (0)339#endif /* CONFIG_SYSCTL */340341static const struct file_operations eventpoll_fops;342343static inline int is_file_epoll(struct file *f)344{345return f->f_op == &eventpoll_fops;346}347348/* Setup the structure that is used as key for the RB tree */349static inline void ep_set_ffd(struct epoll_filefd *ffd,350struct file *file, int fd)351{352ffd->file = file;353ffd->fd = fd;354}355356/* Compare RB tree keys */357static inline int ep_cmp_ffd(struct epoll_filefd *p1,358struct epoll_filefd *p2)359{360return (p1->file > p2->file ? +1:361(p1->file < p2->file ? -1 : p1->fd - p2->fd));362}363364/* Tells us if the item is currently linked */365static inline int ep_is_linked(struct epitem *epi)366{367return !list_empty(&epi->rdllink);368}369370static inline struct eppoll_entry *ep_pwq_from_wait(wait_queue_entry_t *p)371{372return container_of(p, struct eppoll_entry, wait);373}374375/* Get the "struct epitem" from a wait queue pointer */376static inline struct epitem *ep_item_from_wait(wait_queue_entry_t *p)377{378return container_of(p, struct eppoll_entry, wait)->base;379}380381/**382* ep_events_available - Checks if ready events might be available.383*384* @ep: Pointer to the eventpoll context.385*386* Return: a value different than %zero if ready events are available,387* or %zero otherwise.388*/389static inline int ep_events_available(struct eventpoll *ep)390{391return !list_empty_careful(&ep->rdllist) ||392READ_ONCE(ep->ovflist) != EP_UNACTIVE_PTR;393}394395#ifdef CONFIG_NET_RX_BUSY_POLL396/**397* busy_loop_ep_timeout - check if busy poll has timed out. The timeout value398* from the epoll instance ep is preferred, but if it is not set fallback to399* the system-wide global via busy_loop_timeout.400*401* @start_time: The start time used to compute the remaining time until timeout.402* @ep: Pointer to the eventpoll context.403*404* Return: true if the timeout has expired, false otherwise.405*/406static bool busy_loop_ep_timeout(unsigned long start_time,407struct eventpoll *ep)408{409unsigned long bp_usec = READ_ONCE(ep->busy_poll_usecs);410411if (bp_usec) {412unsigned long end_time = start_time + bp_usec;413unsigned long now = busy_loop_current_time();414415return time_after(now, end_time);416} else {417return busy_loop_timeout(start_time);418}419}420421static bool ep_busy_loop_on(struct eventpoll *ep)422{423return !!READ_ONCE(ep->busy_poll_usecs) ||424READ_ONCE(ep->prefer_busy_poll) ||425net_busy_loop_on();426}427428static bool ep_busy_loop_end(void *p, unsigned long start_time)429{430struct eventpoll *ep = p;431432return ep_events_available(ep) || busy_loop_ep_timeout(start_time, ep);433}434435/*436* Busy poll if globally on and supporting sockets found && no events,437* busy loop will return if need_resched or ep_events_available.438*439* we must do our busy polling with irqs enabled440*/441static bool ep_busy_loop(struct eventpoll *ep)442{443unsigned int napi_id = READ_ONCE(ep->napi_id);444u16 budget = READ_ONCE(ep->busy_poll_budget);445bool prefer_busy_poll = READ_ONCE(ep->prefer_busy_poll);446447if (!budget)448budget = BUSY_POLL_BUDGET;449450if (napi_id_valid(napi_id) && ep_busy_loop_on(ep)) {451napi_busy_loop(napi_id, ep_busy_loop_end,452ep, prefer_busy_poll, budget);453if (ep_events_available(ep))454return true;455/*456* Busy poll timed out. Drop NAPI ID for now, we can add457* it back in when we have moved a socket with a valid NAPI458* ID onto the ready list.459*/460if (prefer_busy_poll)461napi_resume_irqs(napi_id);462ep->napi_id = 0;463return false;464}465return false;466}467468/*469* Set epoll busy poll NAPI ID from sk.470*/471static inline void ep_set_busy_poll_napi_id(struct epitem *epi)472{473struct eventpoll *ep = epi->ep;474unsigned int napi_id;475struct socket *sock;476struct sock *sk;477478if (!ep_busy_loop_on(ep))479return;480481sock = sock_from_file(epi->ffd.file);482if (!sock)483return;484485sk = sock->sk;486if (!sk)487return;488489napi_id = READ_ONCE(sk->sk_napi_id);490491/* Non-NAPI IDs can be rejected492* or493* Nothing to do if we already have this ID494*/495if (!napi_id_valid(napi_id) || napi_id == ep->napi_id)496return;497498/* record NAPI ID for use in next busy poll */499ep->napi_id = napi_id;500}501502static long ep_eventpoll_bp_ioctl(struct file *file, unsigned int cmd,503unsigned long arg)504{505struct eventpoll *ep = file->private_data;506void __user *uarg = (void __user *)arg;507struct epoll_params epoll_params;508509switch (cmd) {510case EPIOCSPARAMS:511if (copy_from_user(&epoll_params, uarg, sizeof(epoll_params)))512return -EFAULT;513514/* pad byte must be zero */515if (epoll_params.__pad)516return -EINVAL;517518if (epoll_params.busy_poll_usecs > S32_MAX)519return -EINVAL;520521if (epoll_params.prefer_busy_poll > 1)522return -EINVAL;523524if (epoll_params.busy_poll_budget > NAPI_POLL_WEIGHT &&525!capable(CAP_NET_ADMIN))526return -EPERM;527528WRITE_ONCE(ep->busy_poll_usecs, epoll_params.busy_poll_usecs);529WRITE_ONCE(ep->busy_poll_budget, epoll_params.busy_poll_budget);530WRITE_ONCE(ep->prefer_busy_poll, epoll_params.prefer_busy_poll);531return 0;532case EPIOCGPARAMS:533memset(&epoll_params, 0, sizeof(epoll_params));534epoll_params.busy_poll_usecs = READ_ONCE(ep->busy_poll_usecs);535epoll_params.busy_poll_budget = READ_ONCE(ep->busy_poll_budget);536epoll_params.prefer_busy_poll = READ_ONCE(ep->prefer_busy_poll);537if (copy_to_user(uarg, &epoll_params, sizeof(epoll_params)))538return -EFAULT;539return 0;540default:541return -ENOIOCTLCMD;542}543}544545static void ep_suspend_napi_irqs(struct eventpoll *ep)546{547unsigned int napi_id = READ_ONCE(ep->napi_id);548549if (napi_id_valid(napi_id) && READ_ONCE(ep->prefer_busy_poll))550napi_suspend_irqs(napi_id);551}552553static void ep_resume_napi_irqs(struct eventpoll *ep)554{555unsigned int napi_id = READ_ONCE(ep->napi_id);556557if (napi_id_valid(napi_id) && READ_ONCE(ep->prefer_busy_poll))558napi_resume_irqs(napi_id);559}560561#else562563static inline bool ep_busy_loop(struct eventpoll *ep)564{565return false;566}567568static inline void ep_set_busy_poll_napi_id(struct epitem *epi)569{570}571572static long ep_eventpoll_bp_ioctl(struct file *file, unsigned int cmd,573unsigned long arg)574{575return -EOPNOTSUPP;576}577578static void ep_suspend_napi_irqs(struct eventpoll *ep)579{580}581582static void ep_resume_napi_irqs(struct eventpoll *ep)583{584}585586#endif /* CONFIG_NET_RX_BUSY_POLL */587588/*589* As described in commit 0ccf831cb lockdep: annotate epoll590* the use of wait queues used by epoll is done in a very controlled591* manner. Wake ups can nest inside each other, but are never done592* with the same locking. For example:593*594* dfd = socket(...);595* efd1 = epoll_create();596* efd2 = epoll_create();597* epoll_ctl(efd1, EPOLL_CTL_ADD, dfd, ...);598* epoll_ctl(efd2, EPOLL_CTL_ADD, efd1, ...);599*600* When a packet arrives to the device underneath "dfd", the net code will601* issue a wake_up() on its poll wake list. Epoll (efd1) has installed a602* callback wakeup entry on that queue, and the wake_up() performed by the603* "dfd" net code will end up in ep_poll_callback(). At this point epoll604* (efd1) notices that it may have some event ready, so it needs to wake up605* the waiters on its poll wait list (efd2). So it calls ep_poll_safewake()606* that ends up in another wake_up(), after having checked about the607* recursion constraints. That are, no more than EP_MAX_NESTS, to avoid608* stack blasting.609*610* When CONFIG_DEBUG_LOCK_ALLOC is enabled, make sure lockdep can handle611* this special case of epoll.612*/613#ifdef CONFIG_DEBUG_LOCK_ALLOC614615static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi,616unsigned pollflags)617{618struct eventpoll *ep_src;619unsigned long flags;620u8 nests = 0;621622/*623* To set the subclass or nesting level for spin_lock_irqsave_nested()624* it might be natural to create a per-cpu nest count. However, since625* we can recurse on ep->poll_wait.lock, and a non-raw spinlock can626* schedule() in the -rt kernel, the per-cpu variable are no longer627* protected. Thus, we are introducing a per eventpoll nest field.628* If we are not being call from ep_poll_callback(), epi is NULL and629* we are at the first level of nesting, 0. Otherwise, we are being630* called from ep_poll_callback() and if a previous wakeup source is631* not an epoll file itself, we are at depth 1 since the wakeup source632* is depth 0. If the wakeup source is a previous epoll file in the633* wakeup chain then we use its nests value and record ours as634* nests + 1. The previous epoll file nests value is stable since its635* already holding its own poll_wait.lock.636*/637if (epi) {638if ((is_file_epoll(epi->ffd.file))) {639ep_src = epi->ffd.file->private_data;640nests = ep_src->nests;641} else {642nests = 1;643}644}645spin_lock_irqsave_nested(&ep->poll_wait.lock, flags, nests);646ep->nests = nests + 1;647wake_up_locked_poll(&ep->poll_wait, EPOLLIN | pollflags);648ep->nests = 0;649spin_unlock_irqrestore(&ep->poll_wait.lock, flags);650}651652#else653654static void ep_poll_safewake(struct eventpoll *ep, struct epitem *epi,655__poll_t pollflags)656{657wake_up_poll(&ep->poll_wait, EPOLLIN | pollflags);658}659660#endif661662static void ep_remove_wait_queue(struct eppoll_entry *pwq)663{664wait_queue_head_t *whead;665666rcu_read_lock();667/*668* If it is cleared by POLLFREE, it should be rcu-safe.669* If we read NULL we need a barrier paired with670* smp_store_release() in ep_poll_callback(), otherwise671* we rely on whead->lock.672*/673whead = smp_load_acquire(&pwq->whead);674if (whead)675remove_wait_queue(whead, &pwq->wait);676rcu_read_unlock();677}678679/*680* This function unregisters poll callbacks from the associated file681* descriptor. Must be called with "mtx" held.682*/683static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi)684{685struct eppoll_entry **p = &epi->pwqlist;686struct eppoll_entry *pwq;687688while ((pwq = *p) != NULL) {689*p = pwq->next;690ep_remove_wait_queue(pwq);691kmem_cache_free(pwq_cache, pwq);692}693}694695/* call only when ep->mtx is held */696static inline struct wakeup_source *ep_wakeup_source(struct epitem *epi)697{698return rcu_dereference_check(epi->ws, lockdep_is_held(&epi->ep->mtx));699}700701/* call only when ep->mtx is held */702static inline void ep_pm_stay_awake(struct epitem *epi)703{704struct wakeup_source *ws = ep_wakeup_source(epi);705706if (ws)707__pm_stay_awake(ws);708}709710static inline bool ep_has_wakeup_source(struct epitem *epi)711{712return rcu_access_pointer(epi->ws) ? true : false;713}714715/* call when ep->mtx cannot be held (ep_poll_callback) */716static inline void ep_pm_stay_awake_rcu(struct epitem *epi)717{718struct wakeup_source *ws;719720rcu_read_lock();721ws = rcu_dereference(epi->ws);722if (ws)723__pm_stay_awake(ws);724rcu_read_unlock();725}726727728/*729* ep->mutex needs to be held because we could be hit by730* eventpoll_release_file() and epoll_ctl().731*/732static void ep_start_scan(struct eventpoll *ep, struct list_head *txlist)733{734/*735* Steal the ready list, and re-init the original one to the736* empty list. Also, set ep->ovflist to NULL so that events737* happening while looping w/out locks, are not lost. We cannot738* have the poll callback to queue directly on ep->rdllist,739* because we want the "sproc" callback to be able to do it740* in a lockless way.741*/742lockdep_assert_irqs_enabled();743spin_lock_irq(&ep->lock);744list_splice_init(&ep->rdllist, txlist);745WRITE_ONCE(ep->ovflist, NULL);746spin_unlock_irq(&ep->lock);747}748749static void ep_done_scan(struct eventpoll *ep,750struct list_head *txlist)751{752struct epitem *epi, *nepi;753754spin_lock_irq(&ep->lock);755/*756* During the time we spent inside the "sproc" callback, some757* other events might have been queued by the poll callback.758* We re-insert them inside the main ready-list here.759*/760for (nepi = READ_ONCE(ep->ovflist); (epi = nepi) != NULL;761nepi = epi->next, epi->next = EP_UNACTIVE_PTR) {762/*763* We need to check if the item is already in the list.764* During the "sproc" callback execution time, items are765* queued into ->ovflist but the "txlist" might already766* contain them, and the list_splice() below takes care of them.767*/768if (!ep_is_linked(epi)) {769/*770* ->ovflist is LIFO, so we have to reverse it in order771* to keep in FIFO.772*/773list_add(&epi->rdllink, &ep->rdllist);774ep_pm_stay_awake(epi);775}776}777/*778* We need to set back ep->ovflist to EP_UNACTIVE_PTR, so that after779* releasing the lock, events will be queued in the normal way inside780* ep->rdllist.781*/782WRITE_ONCE(ep->ovflist, EP_UNACTIVE_PTR);783784/*785* Quickly re-inject items left on "txlist".786*/787list_splice(txlist, &ep->rdllist);788__pm_relax(ep->ws);789790if (!list_empty(&ep->rdllist)) {791if (waitqueue_active(&ep->wq))792wake_up(&ep->wq);793}794795spin_unlock_irq(&ep->lock);796}797798static void ep_get(struct eventpoll *ep)799{800refcount_inc(&ep->refcount);801}802803/*804* Returns true if the event poll can be disposed805*/806static bool ep_refcount_dec_and_test(struct eventpoll *ep)807{808if (!refcount_dec_and_test(&ep->refcount))809return false;810811WARN_ON_ONCE(!RB_EMPTY_ROOT(&ep->rbr.rb_root));812return true;813}814815static void ep_free(struct eventpoll *ep)816{817ep_resume_napi_irqs(ep);818mutex_destroy(&ep->mtx);819free_uid(ep->user);820wakeup_source_unregister(ep->ws);821kfree(ep);822}823824/*825* Removes a "struct epitem" from the eventpoll RB tree and deallocates826* all the associated resources. Must be called with "mtx" held.827* If the dying flag is set, do the removal only if force is true.828* This prevents ep_clear_and_put() from dropping all the ep references829* while running concurrently with eventpoll_release_file().830* Returns true if the eventpoll can be disposed.831*/832static bool __ep_remove(struct eventpoll *ep, struct epitem *epi, bool force)833{834struct file *file = epi->ffd.file;835struct epitems_head *to_free;836struct hlist_head *head;837838lockdep_assert_irqs_enabled();839840/*841* Removes poll wait queue hooks.842*/843ep_unregister_pollwait(ep, epi);844845/* Remove the current item from the list of epoll hooks */846spin_lock(&file->f_lock);847if (epi->dying && !force) {848spin_unlock(&file->f_lock);849return false;850}851852to_free = NULL;853head = file->f_ep;854if (head->first == &epi->fllink && !epi->fllink.next) {855/* See eventpoll_release() for details. */856WRITE_ONCE(file->f_ep, NULL);857if (!is_file_epoll(file)) {858struct epitems_head *v;859v = container_of(head, struct epitems_head, epitems);860if (!smp_load_acquire(&v->next))861to_free = v;862}863}864hlist_del_rcu(&epi->fllink);865spin_unlock(&file->f_lock);866free_ephead(to_free);867868rb_erase_cached(&epi->rbn, &ep->rbr);869870spin_lock_irq(&ep->lock);871if (ep_is_linked(epi))872list_del_init(&epi->rdllink);873spin_unlock_irq(&ep->lock);874875wakeup_source_unregister(ep_wakeup_source(epi));876/*877* At this point it is safe to free the eventpoll item. Use the union878* field epi->rcu, since we are trying to minimize the size of879* 'struct epitem'. The 'rbn' field is no longer in use. Protected by880* ep->mtx. The rcu read side, reverse_path_check_proc(), does not make881* use of the rbn field.882*/883kfree_rcu(epi, rcu);884885percpu_counter_dec(&ep->user->epoll_watches);886return true;887}888889/*890* ep_remove variant for callers owing an additional reference to the ep891*/892static void ep_remove_safe(struct eventpoll *ep, struct epitem *epi)893{894if (__ep_remove(ep, epi, false))895WARN_ON_ONCE(ep_refcount_dec_and_test(ep));896}897898static void ep_clear_and_put(struct eventpoll *ep)899{900struct rb_node *rbp, *next;901struct epitem *epi;902903/* We need to release all tasks waiting for these file */904if (waitqueue_active(&ep->poll_wait))905ep_poll_safewake(ep, NULL, 0);906907mutex_lock(&ep->mtx);908909/*910* Walks through the whole tree by unregistering poll callbacks.911*/912for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) {913epi = rb_entry(rbp, struct epitem, rbn);914915ep_unregister_pollwait(ep, epi);916cond_resched();917}918919/*920* Walks through the whole tree and try to free each "struct epitem".921* Note that ep_remove_safe() will not remove the epitem in case of a922* racing eventpoll_release_file(); the latter will do the removal.923* At this point we are sure no poll callbacks will be lingering around.924* Since we still own a reference to the eventpoll struct, the loop can't925* dispose it.926*/927for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = next) {928next = rb_next(rbp);929epi = rb_entry(rbp, struct epitem, rbn);930ep_remove_safe(ep, epi);931cond_resched();932}933934mutex_unlock(&ep->mtx);935if (ep_refcount_dec_and_test(ep))936ep_free(ep);937}938939static long ep_eventpoll_ioctl(struct file *file, unsigned int cmd,940unsigned long arg)941{942int ret;943944if (!is_file_epoll(file))945return -EINVAL;946947switch (cmd) {948case EPIOCSPARAMS:949case EPIOCGPARAMS:950ret = ep_eventpoll_bp_ioctl(file, cmd, arg);951break;952default:953ret = -EINVAL;954break;955}956957return ret;958}959960static int ep_eventpoll_release(struct inode *inode, struct file *file)961{962struct eventpoll *ep = file->private_data;963964if (ep)965ep_clear_and_put(ep);966967return 0;968}969970static __poll_t ep_item_poll(const struct epitem *epi, poll_table *pt, int depth);971972static __poll_t __ep_eventpoll_poll(struct file *file, poll_table *wait, int depth)973{974struct eventpoll *ep = file->private_data;975LIST_HEAD(txlist);976struct epitem *epi, *tmp;977poll_table pt;978__poll_t res = 0;979980init_poll_funcptr(&pt, NULL);981982/* Insert inside our poll wait queue */983poll_wait(file, &ep->poll_wait, wait);984985/*986* Proceed to find out if wanted events are really available inside987* the ready list.988*/989mutex_lock_nested(&ep->mtx, depth);990ep_start_scan(ep, &txlist);991list_for_each_entry_safe(epi, tmp, &txlist, rdllink) {992if (ep_item_poll(epi, &pt, depth + 1)) {993res = EPOLLIN | EPOLLRDNORM;994break;995} else {996/*997* Item has been dropped into the ready list by the poll998* callback, but it's not actually ready, as far as999* caller requested events goes. We can remove it here.1000*/1001__pm_relax(ep_wakeup_source(epi));1002list_del_init(&epi->rdllink);1003}1004}1005ep_done_scan(ep, &txlist);1006mutex_unlock(&ep->mtx);1007return res;1008}10091010/*1011* The ffd.file pointer may be in the process of being torn down due to1012* being closed, but we may not have finished eventpoll_release() yet.1013*1014* Normally, even with the atomic_long_inc_not_zero, the file may have1015* been free'd and then gotten re-allocated to something else (since1016* files are not RCU-delayed, they are SLAB_TYPESAFE_BY_RCU).1017*1018* But for epoll, users hold the ep->mtx mutex, and as such any file in1019* the process of being free'd will block in eventpoll_release_file()1020* and thus the underlying file allocation will not be free'd, and the1021* file re-use cannot happen.1022*1023* For the same reason we can avoid a rcu_read_lock() around the1024* operation - 'ffd.file' cannot go away even if the refcount has1025* reached zero (but we must still not call out to ->poll() functions1026* etc).1027*/1028static struct file *epi_fget(const struct epitem *epi)1029{1030struct file *file;10311032file = epi->ffd.file;1033if (!file_ref_get(&file->f_ref))1034file = NULL;1035return file;1036}10371038/*1039* Differs from ep_eventpoll_poll() in that internal callers already have1040* the ep->mtx so we need to start from depth=1, such that mutex_lock_nested()1041* is correctly annotated.1042*/1043static __poll_t ep_item_poll(const struct epitem *epi, poll_table *pt,1044int depth)1045{1046struct file *file = epi_fget(epi);1047__poll_t res;10481049/*1050* We could return EPOLLERR | EPOLLHUP or something, but let's1051* treat this more as "file doesn't exist, poll didn't happen".1052*/1053if (!file)1054return 0;10551056pt->_key = epi->event.events;1057if (!is_file_epoll(file))1058res = vfs_poll(file, pt);1059else1060res = __ep_eventpoll_poll(file, pt, depth);1061fput(file);1062return res & epi->event.events;1063}10641065static __poll_t ep_eventpoll_poll(struct file *file, poll_table *wait)1066{1067return __ep_eventpoll_poll(file, wait, 0);1068}10691070#ifdef CONFIG_PROC_FS1071static void ep_show_fdinfo(struct seq_file *m, struct file *f)1072{1073struct eventpoll *ep = f->private_data;1074struct rb_node *rbp;10751076mutex_lock(&ep->mtx);1077for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) {1078struct epitem *epi = rb_entry(rbp, struct epitem, rbn);1079struct inode *inode = file_inode(epi->ffd.file);10801081seq_printf(m, "tfd: %8d events: %8x data: %16llx "1082" pos:%lli ino:%lx sdev:%x\n",1083epi->ffd.fd, epi->event.events,1084(long long)epi->event.data,1085(long long)epi->ffd.file->f_pos,1086inode->i_ino, inode->i_sb->s_dev);1087if (seq_has_overflowed(m))1088break;1089}1090mutex_unlock(&ep->mtx);1091}1092#endif10931094/* File callbacks that implement the eventpoll file behaviour */1095static const struct file_operations eventpoll_fops = {1096#ifdef CONFIG_PROC_FS1097.show_fdinfo = ep_show_fdinfo,1098#endif1099.release = ep_eventpoll_release,1100.poll = ep_eventpoll_poll,1101.llseek = noop_llseek,1102.unlocked_ioctl = ep_eventpoll_ioctl,1103.compat_ioctl = compat_ptr_ioctl,1104};11051106/*1107* This is called from eventpoll_release() to unlink files from the eventpoll1108* interface. We need to have this facility to cleanup correctly files that are1109* closed without being removed from the eventpoll interface.1110*/1111void eventpoll_release_file(struct file *file)1112{1113struct eventpoll *ep;1114struct epitem *epi;1115bool dispose;11161117/*1118* Use the 'dying' flag to prevent a concurrent ep_clear_and_put() from1119* touching the epitems list before eventpoll_release_file() can access1120* the ep->mtx.1121*/1122again:1123spin_lock(&file->f_lock);1124if (file->f_ep && file->f_ep->first) {1125epi = hlist_entry(file->f_ep->first, struct epitem, fllink);1126epi->dying = true;1127spin_unlock(&file->f_lock);11281129/*1130* ep access is safe as we still own a reference to the ep1131* struct1132*/1133ep = epi->ep;1134mutex_lock(&ep->mtx);1135dispose = __ep_remove(ep, epi, true);1136mutex_unlock(&ep->mtx);11371138if (dispose && ep_refcount_dec_and_test(ep))1139ep_free(ep);1140goto again;1141}1142spin_unlock(&file->f_lock);1143}11441145static int ep_alloc(struct eventpoll **pep)1146{1147struct eventpoll *ep;11481149ep = kzalloc(sizeof(*ep), GFP_KERNEL);1150if (unlikely(!ep))1151return -ENOMEM;11521153mutex_init(&ep->mtx);1154spin_lock_init(&ep->lock);1155init_waitqueue_head(&ep->wq);1156init_waitqueue_head(&ep->poll_wait);1157INIT_LIST_HEAD(&ep->rdllist);1158ep->rbr = RB_ROOT_CACHED;1159ep->ovflist = EP_UNACTIVE_PTR;1160ep->user = get_current_user();1161refcount_set(&ep->refcount, 1);11621163*pep = ep;11641165return 0;1166}11671168/*1169* Search the file inside the eventpoll tree. The RB tree operations1170* are protected by the "mtx" mutex, and ep_find() must be called with1171* "mtx" held.1172*/1173static struct epitem *ep_find(struct eventpoll *ep, struct file *file, int fd)1174{1175int kcmp;1176struct rb_node *rbp;1177struct epitem *epi, *epir = NULL;1178struct epoll_filefd ffd;11791180ep_set_ffd(&ffd, file, fd);1181for (rbp = ep->rbr.rb_root.rb_node; rbp; ) {1182epi = rb_entry(rbp, struct epitem, rbn);1183kcmp = ep_cmp_ffd(&ffd, &epi->ffd);1184if (kcmp > 0)1185rbp = rbp->rb_right;1186else if (kcmp < 0)1187rbp = rbp->rb_left;1188else {1189epir = epi;1190break;1191}1192}11931194return epir;1195}11961197#ifdef CONFIG_KCMP1198static struct epitem *ep_find_tfd(struct eventpoll *ep, int tfd, unsigned long toff)1199{1200struct rb_node *rbp;1201struct epitem *epi;12021203for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) {1204epi = rb_entry(rbp, struct epitem, rbn);1205if (epi->ffd.fd == tfd) {1206if (toff == 0)1207return epi;1208else1209toff--;1210}1211cond_resched();1212}12131214return NULL;1215}12161217struct file *get_epoll_tfile_raw_ptr(struct file *file, int tfd,1218unsigned long toff)1219{1220struct file *file_raw;1221struct eventpoll *ep;1222struct epitem *epi;12231224if (!is_file_epoll(file))1225return ERR_PTR(-EINVAL);12261227ep = file->private_data;12281229mutex_lock(&ep->mtx);1230epi = ep_find_tfd(ep, tfd, toff);1231if (epi)1232file_raw = epi->ffd.file;1233else1234file_raw = ERR_PTR(-ENOENT);1235mutex_unlock(&ep->mtx);12361237return file_raw;1238}1239#endif /* CONFIG_KCMP */12401241/*1242* This is the callback that is passed to the wait queue wakeup1243* mechanism. It is called by the stored file descriptors when they1244* have events to report.1245*/1246static int ep_poll_callback(wait_queue_entry_t *wait, unsigned mode, int sync, void *key)1247{1248int pwake = 0;1249struct epitem *epi = ep_item_from_wait(wait);1250struct eventpoll *ep = epi->ep;1251__poll_t pollflags = key_to_poll(key);1252unsigned long flags;1253int ewake = 0;12541255spin_lock_irqsave(&ep->lock, flags);12561257ep_set_busy_poll_napi_id(epi);12581259/*1260* If the event mask does not contain any poll(2) event, we consider the1261* descriptor to be disabled. This condition is likely the effect of the1262* EPOLLONESHOT bit that disables the descriptor when an event is received,1263* until the next EPOLL_CTL_MOD will be issued.1264*/1265if (!(epi->event.events & ~EP_PRIVATE_BITS))1266goto out_unlock;12671268/*1269* Check the events coming with the callback. At this stage, not1270* every device reports the events in the "key" parameter of the1271* callback. We need to be able to handle both cases here, hence the1272* test for "key" != NULL before the event match test.1273*/1274if (pollflags && !(pollflags & epi->event.events))1275goto out_unlock;12761277/*1278* If we are transferring events to userspace, we can hold no locks1279* (because we're accessing user memory, and because of linux f_op->poll()1280* semantics). All the events that happen during that period of time are1281* chained in ep->ovflist and requeued later on.1282*/1283if (READ_ONCE(ep->ovflist) != EP_UNACTIVE_PTR) {1284if (epi->next == EP_UNACTIVE_PTR) {1285epi->next = READ_ONCE(ep->ovflist);1286WRITE_ONCE(ep->ovflist, epi);1287ep_pm_stay_awake_rcu(epi);1288}1289} else if (!ep_is_linked(epi)) {1290/* In the usual case, add event to ready list. */1291list_add_tail(&epi->rdllink, &ep->rdllist);1292ep_pm_stay_awake_rcu(epi);1293}12941295/*1296* Wake up ( if active ) both the eventpoll wait list and the ->poll()1297* wait list.1298*/1299if (waitqueue_active(&ep->wq)) {1300if ((epi->event.events & EPOLLEXCLUSIVE) &&1301!(pollflags & POLLFREE)) {1302switch (pollflags & EPOLLINOUT_BITS) {1303case EPOLLIN:1304if (epi->event.events & EPOLLIN)1305ewake = 1;1306break;1307case EPOLLOUT:1308if (epi->event.events & EPOLLOUT)1309ewake = 1;1310break;1311case 0:1312ewake = 1;1313break;1314}1315}1316if (sync)1317wake_up_sync(&ep->wq);1318else1319wake_up(&ep->wq);1320}1321if (waitqueue_active(&ep->poll_wait))1322pwake++;13231324out_unlock:1325spin_unlock_irqrestore(&ep->lock, flags);13261327/* We have to call this outside the lock */1328if (pwake)1329ep_poll_safewake(ep, epi, pollflags & EPOLL_URING_WAKE);13301331if (!(epi->event.events & EPOLLEXCLUSIVE))1332ewake = 1;13331334if (pollflags & POLLFREE) {1335/*1336* If we race with ep_remove_wait_queue() it can miss1337* ->whead = NULL and do another remove_wait_queue() after1338* us, so we can't use __remove_wait_queue().1339*/1340list_del_init(&wait->entry);1341/*1342* ->whead != NULL protects us from the race with1343* ep_clear_and_put() or ep_remove(), ep_remove_wait_queue()1344* takes whead->lock held by the caller. Once we nullify it,1345* nothing protects ep/epi or even wait.1346*/1347smp_store_release(&ep_pwq_from_wait(wait)->whead, NULL);1348}13491350return ewake;1351}13521353/*1354* This is the callback that is used to add our wait queue to the1355* target file wakeup lists.1356*/1357static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead,1358poll_table *pt)1359{1360struct ep_pqueue *epq = container_of(pt, struct ep_pqueue, pt);1361struct epitem *epi = epq->epi;1362struct eppoll_entry *pwq;13631364if (unlikely(!epi)) // an earlier allocation has failed1365return;13661367pwq = kmem_cache_alloc(pwq_cache, GFP_KERNEL);1368if (unlikely(!pwq)) {1369epq->epi = NULL;1370return;1371}13721373init_waitqueue_func_entry(&pwq->wait, ep_poll_callback);1374pwq->whead = whead;1375pwq->base = epi;1376if (epi->event.events & EPOLLEXCLUSIVE)1377add_wait_queue_exclusive(whead, &pwq->wait);1378else1379add_wait_queue(whead, &pwq->wait);1380pwq->next = epi->pwqlist;1381epi->pwqlist = pwq;1382}13831384static void ep_rbtree_insert(struct eventpoll *ep, struct epitem *epi)1385{1386int kcmp;1387struct rb_node **p = &ep->rbr.rb_root.rb_node, *parent = NULL;1388struct epitem *epic;1389bool leftmost = true;13901391while (*p) {1392parent = *p;1393epic = rb_entry(parent, struct epitem, rbn);1394kcmp = ep_cmp_ffd(&epi->ffd, &epic->ffd);1395if (kcmp > 0) {1396p = &parent->rb_right;1397leftmost = false;1398} else1399p = &parent->rb_left;1400}1401rb_link_node(&epi->rbn, parent, p);1402rb_insert_color_cached(&epi->rbn, &ep->rbr, leftmost);1403}1404140514061407#define PATH_ARR_SIZE 51408/*1409* These are the number paths of length 1 to 5, that we are allowing to emanate1410* from a single file of interest. For example, we allow 1000 paths of length1411* 1, to emanate from each file of interest. This essentially represents the1412* potential wakeup paths, which need to be limited in order to avoid massive1413* uncontrolled wakeup storms. The common use case should be a single ep which1414* is connected to n file sources. In this case each file source has 1 path1415* of length 1. Thus, the numbers below should be more than sufficient. These1416* path limits are enforced during an EPOLL_CTL_ADD operation, since a modify1417* and delete can't add additional paths. Protected by the epnested_mutex.1418*/1419static const int path_limits[PATH_ARR_SIZE] = { 1000, 500, 100, 50, 10 };1420static int path_count[PATH_ARR_SIZE];14211422static int path_count_inc(int nests)1423{1424/* Allow an arbitrary number of depth 1 paths */1425if (nests == 0)1426return 0;14271428if (++path_count[nests] > path_limits[nests])1429return -1;1430return 0;1431}14321433static void path_count_init(void)1434{1435int i;14361437for (i = 0; i < PATH_ARR_SIZE; i++)1438path_count[i] = 0;1439}14401441static int reverse_path_check_proc(struct hlist_head *refs, int depth)1442{1443int error = 0;1444struct epitem *epi;14451446if (depth > EP_MAX_NESTS) /* too deep nesting */1447return -1;14481449/* CTL_DEL can remove links here, but that can't increase our count */1450hlist_for_each_entry_rcu(epi, refs, fllink) {1451struct hlist_head *refs = &epi->ep->refs;1452if (hlist_empty(refs))1453error = path_count_inc(depth);1454else1455error = reverse_path_check_proc(refs, depth + 1);1456if (error != 0)1457break;1458}1459return error;1460}14611462/**1463* reverse_path_check - The tfile_check_list is list of epitem_head, which have1464* links that are proposed to be newly added. We need to1465* make sure that those added links don't add too many1466* paths such that we will spend all our time waking up1467* eventpoll objects.1468*1469* Return: %zero if the proposed links don't create too many paths,1470* %-1 otherwise.1471*/1472static int reverse_path_check(void)1473{1474struct epitems_head *p;14751476for (p = tfile_check_list; p != EP_UNACTIVE_PTR; p = p->next) {1477int error;1478path_count_init();1479rcu_read_lock();1480error = reverse_path_check_proc(&p->epitems, 0);1481rcu_read_unlock();1482if (error)1483return error;1484}1485return 0;1486}14871488static int ep_create_wakeup_source(struct epitem *epi)1489{1490struct name_snapshot n;1491struct wakeup_source *ws;14921493if (!epi->ep->ws) {1494epi->ep->ws = wakeup_source_register(NULL, "eventpoll");1495if (!epi->ep->ws)1496return -ENOMEM;1497}14981499take_dentry_name_snapshot(&n, epi->ffd.file->f_path.dentry);1500ws = wakeup_source_register(NULL, n.name.name);1501release_dentry_name_snapshot(&n);15021503if (!ws)1504return -ENOMEM;1505rcu_assign_pointer(epi->ws, ws);15061507return 0;1508}15091510/* rare code path, only used when EPOLL_CTL_MOD removes a wakeup source */1511static noinline void ep_destroy_wakeup_source(struct epitem *epi)1512{1513struct wakeup_source *ws = ep_wakeup_source(epi);15141515RCU_INIT_POINTER(epi->ws, NULL);15161517/*1518* wait for ep_pm_stay_awake_rcu to finish, synchronize_rcu is1519* used internally by wakeup_source_remove, too (called by1520* wakeup_source_unregister), so we cannot use call_rcu1521*/1522synchronize_rcu();1523wakeup_source_unregister(ws);1524}15251526static int attach_epitem(struct file *file, struct epitem *epi)1527{1528struct epitems_head *to_free = NULL;1529struct hlist_head *head = NULL;1530struct eventpoll *ep = NULL;15311532if (is_file_epoll(file))1533ep = file->private_data;15341535if (ep) {1536head = &ep->refs;1537} else if (!READ_ONCE(file->f_ep)) {1538allocate:1539to_free = kmem_cache_zalloc(ephead_cache, GFP_KERNEL);1540if (!to_free)1541return -ENOMEM;1542head = &to_free->epitems;1543}1544spin_lock(&file->f_lock);1545if (!file->f_ep) {1546if (unlikely(!head)) {1547spin_unlock(&file->f_lock);1548goto allocate;1549}1550/* See eventpoll_release() for details. */1551WRITE_ONCE(file->f_ep, head);1552to_free = NULL;1553}1554hlist_add_head_rcu(&epi->fllink, file->f_ep);1555spin_unlock(&file->f_lock);1556free_ephead(to_free);1557return 0;1558}15591560/*1561* Must be called with "mtx" held.1562*/1563static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,1564struct file *tfile, int fd, int full_check)1565{1566int error, pwake = 0;1567__poll_t revents;1568struct epitem *epi;1569struct ep_pqueue epq;1570struct eventpoll *tep = NULL;15711572if (is_file_epoll(tfile))1573tep = tfile->private_data;15741575lockdep_assert_irqs_enabled();15761577if (unlikely(percpu_counter_compare(&ep->user->epoll_watches,1578max_user_watches) >= 0))1579return -ENOSPC;1580percpu_counter_inc(&ep->user->epoll_watches);15811582if (!(epi = kmem_cache_zalloc(epi_cache, GFP_KERNEL))) {1583percpu_counter_dec(&ep->user->epoll_watches);1584return -ENOMEM;1585}15861587/* Item initialization follow here ... */1588INIT_LIST_HEAD(&epi->rdllink);1589epi->ep = ep;1590ep_set_ffd(&epi->ffd, tfile, fd);1591epi->event = *event;1592epi->next = EP_UNACTIVE_PTR;15931594if (tep)1595mutex_lock_nested(&tep->mtx, 1);1596/* Add the current item to the list of active epoll hook for this file */1597if (unlikely(attach_epitem(tfile, epi) < 0)) {1598if (tep)1599mutex_unlock(&tep->mtx);1600kmem_cache_free(epi_cache, epi);1601percpu_counter_dec(&ep->user->epoll_watches);1602return -ENOMEM;1603}16041605if (full_check && !tep)1606list_file(tfile);16071608/*1609* Add the current item to the RB tree. All RB tree operations are1610* protected by "mtx", and ep_insert() is called with "mtx" held.1611*/1612ep_rbtree_insert(ep, epi);1613if (tep)1614mutex_unlock(&tep->mtx);16151616/*1617* ep_remove_safe() calls in the later error paths can't lead to1618* ep_free() as the ep file itself still holds an ep reference.1619*/1620ep_get(ep);16211622/* now check if we've created too many backpaths */1623if (unlikely(full_check && reverse_path_check())) {1624ep_remove_safe(ep, epi);1625return -EINVAL;1626}16271628if (epi->event.events & EPOLLWAKEUP) {1629error = ep_create_wakeup_source(epi);1630if (error) {1631ep_remove_safe(ep, epi);1632return error;1633}1634}16351636/* Initialize the poll table using the queue callback */1637epq.epi = epi;1638init_poll_funcptr(&epq.pt, ep_ptable_queue_proc);16391640/*1641* Attach the item to the poll hooks and get current event bits.1642* We can safely use the file* here because its usage count has1643* been increased by the caller of this function. Note that after1644* this operation completes, the poll callback can start hitting1645* the new item.1646*/1647revents = ep_item_poll(epi, &epq.pt, 1);16481649/*1650* We have to check if something went wrong during the poll wait queue1651* install process. Namely an allocation for a wait queue failed due1652* high memory pressure.1653*/1654if (unlikely(!epq.epi)) {1655ep_remove_safe(ep, epi);1656return -ENOMEM;1657}16581659/* We have to drop the new item inside our item list to keep track of it */1660spin_lock_irq(&ep->lock);16611662/* record NAPI ID of new item if present */1663ep_set_busy_poll_napi_id(epi);16641665/* If the file is already "ready" we drop it inside the ready list */1666if (revents && !ep_is_linked(epi)) {1667list_add_tail(&epi->rdllink, &ep->rdllist);1668ep_pm_stay_awake(epi);16691670/* Notify waiting tasks that events are available */1671if (waitqueue_active(&ep->wq))1672wake_up(&ep->wq);1673if (waitqueue_active(&ep->poll_wait))1674pwake++;1675}16761677spin_unlock_irq(&ep->lock);16781679/* We have to call this outside the lock */1680if (pwake)1681ep_poll_safewake(ep, NULL, 0);16821683return 0;1684}16851686/*1687* Modify the interest event mask by dropping an event if the new mask1688* has a match in the current file status. Must be called with "mtx" held.1689*/1690static int ep_modify(struct eventpoll *ep, struct epitem *epi,1691const struct epoll_event *event)1692{1693int pwake = 0;1694poll_table pt;16951696lockdep_assert_irqs_enabled();16971698init_poll_funcptr(&pt, NULL);16991700/*1701* Set the new event interest mask before calling f_op->poll();1702* otherwise we might miss an event that happens between the1703* f_op->poll() call and the new event set registering.1704*/1705epi->event.events = event->events; /* need barrier below */1706epi->event.data = event->data; /* protected by mtx */1707if (epi->event.events & EPOLLWAKEUP) {1708if (!ep_has_wakeup_source(epi))1709ep_create_wakeup_source(epi);1710} else if (ep_has_wakeup_source(epi)) {1711ep_destroy_wakeup_source(epi);1712}17131714/*1715* The following barrier has two effects:1716*1717* 1) Flush epi changes above to other CPUs. This ensures1718* we do not miss events from ep_poll_callback if an1719* event occurs immediately after we call f_op->poll().1720* We need this because we did not take ep->lock while1721* changing epi above (but ep_poll_callback does take1722* ep->lock).1723*1724* 2) We also need to ensure we do not miss _past_ events1725* when calling f_op->poll(). This barrier also1726* pairs with the barrier in wq_has_sleeper (see1727* comments for wq_has_sleeper).1728*1729* This barrier will now guarantee ep_poll_callback or f_op->poll1730* (or both) will notice the readiness of an item.1731*/1732smp_mb();17331734/*1735* Get current event bits. We can safely use the file* here because1736* its usage count has been increased by the caller of this function.1737* If the item is "hot" and it is not registered inside the ready1738* list, push it inside.1739*/1740if (ep_item_poll(epi, &pt, 1)) {1741spin_lock_irq(&ep->lock);1742if (!ep_is_linked(epi)) {1743list_add_tail(&epi->rdllink, &ep->rdllist);1744ep_pm_stay_awake(epi);17451746/* Notify waiting tasks that events are available */1747if (waitqueue_active(&ep->wq))1748wake_up(&ep->wq);1749if (waitqueue_active(&ep->poll_wait))1750pwake++;1751}1752spin_unlock_irq(&ep->lock);1753}17541755/* We have to call this outside the lock */1756if (pwake)1757ep_poll_safewake(ep, NULL, 0);17581759return 0;1760}17611762static int ep_send_events(struct eventpoll *ep,1763struct epoll_event __user *events, int maxevents)1764{1765struct epitem *epi, *tmp;1766LIST_HEAD(txlist);1767poll_table pt;1768int res = 0;17691770/*1771* Always short-circuit for fatal signals to allow threads to make a1772* timely exit without the chance of finding more events available and1773* fetching repeatedly.1774*/1775if (fatal_signal_pending(current))1776return -EINTR;17771778init_poll_funcptr(&pt, NULL);17791780mutex_lock(&ep->mtx);1781ep_start_scan(ep, &txlist);17821783/*1784* We can loop without lock because we are passed a task private list.1785* Items cannot vanish during the loop we are holding ep->mtx.1786*/1787list_for_each_entry_safe(epi, tmp, &txlist, rdllink) {1788struct wakeup_source *ws;1789__poll_t revents;17901791if (res >= maxevents)1792break;17931794/*1795* Activate ep->ws before deactivating epi->ws to prevent1796* triggering auto-suspend here (in case we reactive epi->ws1797* below).1798*1799* This could be rearranged to delay the deactivation of epi->ws1800* instead, but then epi->ws would temporarily be out of sync1801* with ep_is_linked().1802*/1803ws = ep_wakeup_source(epi);1804if (ws) {1805if (ws->active)1806__pm_stay_awake(ep->ws);1807__pm_relax(ws);1808}18091810list_del_init(&epi->rdllink);18111812/*1813* If the event mask intersect the caller-requested one,1814* deliver the event to userspace. Again, we are holding ep->mtx,1815* so no operations coming from userspace can change the item.1816*/1817revents = ep_item_poll(epi, &pt, 1);1818if (!revents)1819continue;18201821events = epoll_put_uevent(revents, epi->event.data, events);1822if (!events) {1823list_add(&epi->rdllink, &txlist);1824ep_pm_stay_awake(epi);1825if (!res)1826res = -EFAULT;1827break;1828}1829res++;1830if (epi->event.events & EPOLLONESHOT)1831epi->event.events &= EP_PRIVATE_BITS;1832else if (!(epi->event.events & EPOLLET)) {1833/*1834* If this file has been added with Level1835* Trigger mode, we need to insert back inside1836* the ready list, so that the next call to1837* epoll_wait() will check again the events1838* availability. At this point, no one can insert1839* into ep->rdllist besides us. The epoll_ctl()1840* callers are locked out by1841* ep_send_events() holding "mtx" and the1842* poll callback will queue them in ep->ovflist.1843*/1844list_add_tail(&epi->rdllink, &ep->rdllist);1845ep_pm_stay_awake(epi);1846}1847}1848ep_done_scan(ep, &txlist);1849mutex_unlock(&ep->mtx);18501851return res;1852}18531854static struct timespec64 *ep_timeout_to_timespec(struct timespec64 *to, long ms)1855{1856struct timespec64 now;18571858if (ms < 0)1859return NULL;18601861if (!ms) {1862to->tv_sec = 0;1863to->tv_nsec = 0;1864return to;1865}18661867to->tv_sec = ms / MSEC_PER_SEC;1868to->tv_nsec = NSEC_PER_MSEC * (ms % MSEC_PER_SEC);18691870ktime_get_ts64(&now);1871*to = timespec64_add_safe(now, *to);1872return to;1873}18741875/*1876* autoremove_wake_function, but remove even on failure to wake up, because we1877* know that default_wake_function/ttwu will only fail if the thread is already1878* woken, and in that case the ep_poll loop will remove the entry anyways, not1879* try to reuse it.1880*/1881static int ep_autoremove_wake_function(struct wait_queue_entry *wq_entry,1882unsigned int mode, int sync, void *key)1883{1884int ret = default_wake_function(wq_entry, mode, sync, key);18851886/*1887* Pairs with list_empty_careful in ep_poll, and ensures future loop1888* iterations see the cause of this wakeup.1889*/1890list_del_init_careful(&wq_entry->entry);1891return ret;1892}18931894static int ep_try_send_events(struct eventpoll *ep,1895struct epoll_event __user *events, int maxevents)1896{1897int res;18981899/*1900* Try to transfer events to user space. In case we get 0 events and1901* there's still timeout left over, we go trying again in search of1902* more luck.1903*/1904res = ep_send_events(ep, events, maxevents);1905if (res > 0)1906ep_suspend_napi_irqs(ep);1907return res;1908}19091910static int ep_schedule_timeout(ktime_t *to)1911{1912if (to)1913return ktime_after(*to, ktime_get());1914else1915return 1;1916}19171918/**1919* ep_poll - Retrieves ready events, and delivers them to the caller-supplied1920* event buffer.1921*1922* @ep: Pointer to the eventpoll context.1923* @events: Pointer to the userspace buffer where the ready events should be1924* stored.1925* @maxevents: Size (in terms of number of events) of the caller event buffer.1926* @timeout: Maximum timeout for the ready events fetch operation, in1927* timespec. If the timeout is zero, the function will not block,1928* while if the @timeout ptr is NULL, the function will block1929* until at least one event has been retrieved (or an error1930* occurred).1931*1932* Return: the number of ready events which have been fetched, or an1933* error code, in case of error.1934*/1935static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,1936int maxevents, struct timespec64 *timeout)1937{1938int res, eavail, timed_out = 0;1939u64 slack = 0;1940wait_queue_entry_t wait;1941ktime_t expires, *to = NULL;19421943lockdep_assert_irqs_enabled();19441945if (timeout && (timeout->tv_sec | timeout->tv_nsec)) {1946slack = select_estimate_accuracy(timeout);1947to = &expires;1948*to = timespec64_to_ktime(*timeout);1949} else if (timeout) {1950/*1951* Avoid the unnecessary trip to the wait queue loop, if the1952* caller specified a non blocking operation.1953*/1954timed_out = 1;1955}19561957/*1958* This call is racy: We may or may not see events that are being added1959* to the ready list under the lock (e.g., in IRQ callbacks). For cases1960* with a non-zero timeout, this thread will check the ready list under1961* lock and will add to the wait queue. For cases with a zero1962* timeout, the user by definition should not care and will have to1963* recheck again.1964*/1965eavail = ep_events_available(ep);19661967while (1) {1968if (eavail) {1969res = ep_try_send_events(ep, events, maxevents);1970if (res)1971return res;1972}19731974if (timed_out)1975return 0;19761977eavail = ep_busy_loop(ep);1978if (eavail)1979continue;19801981if (signal_pending(current))1982return -EINTR;19831984/*1985* Internally init_wait() uses autoremove_wake_function(),1986* thus wait entry is removed from the wait queue on each1987* wakeup. Why it is important? In case of several waiters1988* each new wakeup will hit the next waiter, giving it the1989* chance to harvest new event. Otherwise wakeup can be1990* lost. This is also good performance-wise, because on1991* normal wakeup path no need to call __remove_wait_queue()1992* explicitly, thus ep->lock is not taken, which halts the1993* event delivery.1994*1995* In fact, we now use an even more aggressive function that1996* unconditionally removes, because we don't reuse the wait1997* entry between loop iterations. This lets us also avoid the1998* performance issue if a process is killed, causing all of its1999* threads to wake up without being removed normally.2000*/2001init_wait(&wait);2002wait.func = ep_autoremove_wake_function;20032004spin_lock_irq(&ep->lock);2005/*2006* Barrierless variant, waitqueue_active() is called under2007* the same lock on wakeup ep_poll_callback() side, so it2008* is safe to avoid an explicit barrier.2009*/2010__set_current_state(TASK_INTERRUPTIBLE);20112012/*2013* Do the final check under the lock. ep_start/done_scan()2014* plays with two lists (->rdllist and ->ovflist) and there2015* is always a race when both lists are empty for short2016* period of time although events are pending, so lock is2017* important.2018*/2019eavail = ep_events_available(ep);2020if (!eavail)2021__add_wait_queue_exclusive(&ep->wq, &wait);20222023spin_unlock_irq(&ep->lock);20242025if (!eavail)2026timed_out = !ep_schedule_timeout(to) ||2027!schedule_hrtimeout_range(to, slack,2028HRTIMER_MODE_ABS);2029__set_current_state(TASK_RUNNING);20302031/*2032* We were woken up, thus go and try to harvest some events.2033* If timed out and still on the wait queue, recheck eavail2034* carefully under lock, below.2035*/2036eavail = 1;20372038if (!list_empty_careful(&wait.entry)) {2039spin_lock_irq(&ep->lock);2040/*2041* If the thread timed out and is not on the wait queue,2042* it means that the thread was woken up after its2043* timeout expired before it could reacquire the lock.2044* Thus, when wait.entry is empty, it needs to harvest2045* events.2046*/2047if (timed_out)2048eavail = list_empty(&wait.entry);2049__remove_wait_queue(&ep->wq, &wait);2050spin_unlock_irq(&ep->lock);2051}2052}2053}20542055/**2056* ep_loop_check_proc - verify that adding an epoll file @ep inside another2057* epoll file does not create closed loops, and2058* determine the depth of the subtree starting at @ep2059*2060* @ep: the &struct eventpoll to be currently checked.2061* @depth: Current depth of the path being checked.2062*2063* Return: depth of the subtree, or INT_MAX if we found a loop or went too deep.2064*/2065static int ep_loop_check_proc(struct eventpoll *ep, int depth)2066{2067int result = 0;2068struct rb_node *rbp;2069struct epitem *epi;20702071if (ep->gen == loop_check_gen)2072return ep->loop_check_depth;20732074mutex_lock_nested(&ep->mtx, depth + 1);2075ep->gen = loop_check_gen;2076for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) {2077epi = rb_entry(rbp, struct epitem, rbn);2078if (unlikely(is_file_epoll(epi->ffd.file))) {2079struct eventpoll *ep_tovisit;2080ep_tovisit = epi->ffd.file->private_data;2081if (ep_tovisit == inserting_into || depth > EP_MAX_NESTS)2082result = INT_MAX;2083else2084result = max(result, ep_loop_check_proc(ep_tovisit, depth + 1) + 1);2085if (result > EP_MAX_NESTS)2086break;2087} else {2088/*2089* If we've reached a file that is not associated with2090* an ep, then we need to check if the newly added2091* links are going to add too many wakeup paths. We do2092* this by adding it to the tfile_check_list, if it's2093* not already there, and calling reverse_path_check()2094* during ep_insert().2095*/2096list_file(epi->ffd.file);2097}2098}2099ep->loop_check_depth = result;2100mutex_unlock(&ep->mtx);21012102return result;2103}21042105/* ep_get_upwards_depth_proc - determine depth of @ep when traversed upwards */2106static int ep_get_upwards_depth_proc(struct eventpoll *ep, int depth)2107{2108int result = 0;2109struct epitem *epi;21102111if (ep->gen == loop_check_gen)2112return ep->loop_check_depth;2113hlist_for_each_entry_rcu(epi, &ep->refs, fllink)2114result = max(result, ep_get_upwards_depth_proc(epi->ep, depth + 1) + 1);2115ep->gen = loop_check_gen;2116ep->loop_check_depth = result;2117return result;2118}21192120/**2121* ep_loop_check - Performs a check to verify that adding an epoll file (@to)2122* into another epoll file (represented by @ep) does not create2123* closed loops or too deep chains.2124*2125* @ep: Pointer to the epoll we are inserting into.2126* @to: Pointer to the epoll to be inserted.2127*2128* Return: %zero if adding the epoll @to inside the epoll @from2129* does not violate the constraints, or %-1 otherwise.2130*/2131static int ep_loop_check(struct eventpoll *ep, struct eventpoll *to)2132{2133int depth, upwards_depth;21342135inserting_into = ep;2136/*2137* Check how deep down we can get from @to, and whether it is possible2138* to loop up to @ep.2139*/2140depth = ep_loop_check_proc(to, 0);2141if (depth > EP_MAX_NESTS)2142return -1;2143/* Check how far up we can go from @ep. */2144rcu_read_lock();2145upwards_depth = ep_get_upwards_depth_proc(ep, 0);2146rcu_read_unlock();21472148return (depth+1+upwards_depth > EP_MAX_NESTS) ? -1 : 0;2149}21502151static void clear_tfile_check_list(void)2152{2153rcu_read_lock();2154while (tfile_check_list != EP_UNACTIVE_PTR) {2155struct epitems_head *head = tfile_check_list;2156tfile_check_list = head->next;2157unlist_file(head);2158}2159rcu_read_unlock();2160}21612162/*2163* Open an eventpoll file descriptor.2164*/2165static int do_epoll_create(int flags)2166{2167int error, fd;2168struct eventpoll *ep = NULL;2169struct file *file;21702171/* Check the EPOLL_* constant for consistency. */2172BUILD_BUG_ON(EPOLL_CLOEXEC != O_CLOEXEC);21732174if (flags & ~EPOLL_CLOEXEC)2175return -EINVAL;2176/*2177* Create the internal data structure ("struct eventpoll").2178*/2179error = ep_alloc(&ep);2180if (error < 0)2181return error;2182/*2183* Creates all the items needed to setup an eventpoll file. That is,2184* a file structure and a free file descriptor.2185*/2186fd = get_unused_fd_flags(O_RDWR | (flags & O_CLOEXEC));2187if (fd < 0) {2188error = fd;2189goto out_free_ep;2190}2191file = anon_inode_getfile("[eventpoll]", &eventpoll_fops, ep,2192O_RDWR | (flags & O_CLOEXEC));2193if (IS_ERR(file)) {2194error = PTR_ERR(file);2195goto out_free_fd;2196}2197ep->file = file;2198fd_install(fd, file);2199return fd;22002201out_free_fd:2202put_unused_fd(fd);2203out_free_ep:2204ep_clear_and_put(ep);2205return error;2206}22072208SYSCALL_DEFINE1(epoll_create1, int, flags)2209{2210return do_epoll_create(flags);2211}22122213SYSCALL_DEFINE1(epoll_create, int, size)2214{2215if (size <= 0)2216return -EINVAL;22172218return do_epoll_create(0);2219}22202221#ifdef CONFIG_PM_SLEEP2222static inline void ep_take_care_of_epollwakeup(struct epoll_event *epev)2223{2224if ((epev->events & EPOLLWAKEUP) && !capable(CAP_BLOCK_SUSPEND))2225epev->events &= ~EPOLLWAKEUP;2226}2227#else2228static inline void ep_take_care_of_epollwakeup(struct epoll_event *epev)2229{2230epev->events &= ~EPOLLWAKEUP;2231}2232#endif22332234static inline int epoll_mutex_lock(struct mutex *mutex, int depth,2235bool nonblock)2236{2237if (!nonblock) {2238mutex_lock_nested(mutex, depth);2239return 0;2240}2241if (mutex_trylock(mutex))2242return 0;2243return -EAGAIN;2244}22452246int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds,2247bool nonblock)2248{2249int error;2250int full_check = 0;2251struct eventpoll *ep;2252struct epitem *epi;2253struct eventpoll *tep = NULL;22542255CLASS(fd, f)(epfd);2256if (fd_empty(f))2257return -EBADF;22582259/* Get the "struct file *" for the target file */2260CLASS(fd, tf)(fd);2261if (fd_empty(tf))2262return -EBADF;22632264/* The target file descriptor must support poll */2265if (!file_can_poll(fd_file(tf)))2266return -EPERM;22672268/* Check if EPOLLWAKEUP is allowed */2269if (ep_op_has_event(op))2270ep_take_care_of_epollwakeup(epds);22712272/*2273* We have to check that the file structure underneath the file descriptor2274* the user passed to us _is_ an eventpoll file. And also we do not permit2275* adding an epoll file descriptor inside itself.2276*/2277error = -EINVAL;2278if (fd_file(f) == fd_file(tf) || !is_file_epoll(fd_file(f)))2279goto error_tgt_fput;22802281/*2282* epoll adds to the wakeup queue at EPOLL_CTL_ADD time only,2283* so EPOLLEXCLUSIVE is not allowed for a EPOLL_CTL_MOD operation.2284* Also, we do not currently supported nested exclusive wakeups.2285*/2286if (ep_op_has_event(op) && (epds->events & EPOLLEXCLUSIVE)) {2287if (op == EPOLL_CTL_MOD)2288goto error_tgt_fput;2289if (op == EPOLL_CTL_ADD && (is_file_epoll(fd_file(tf)) ||2290(epds->events & ~EPOLLEXCLUSIVE_OK_BITS)))2291goto error_tgt_fput;2292}22932294/*2295* At this point it is safe to assume that the "private_data" contains2296* our own data structure.2297*/2298ep = fd_file(f)->private_data;22992300/*2301* When we insert an epoll file descriptor inside another epoll file2302* descriptor, there is the chance of creating closed loops, which are2303* better be handled here, than in more critical paths. While we are2304* checking for loops we also determine the list of files reachable2305* and hang them on the tfile_check_list, so we can check that we2306* haven't created too many possible wakeup paths.2307*2308* We do not need to take the global 'epumutex' on EPOLL_CTL_ADD when2309* the epoll file descriptor is attaching directly to a wakeup source,2310* unless the epoll file descriptor is nested. The purpose of taking the2311* 'epnested_mutex' on add is to prevent complex toplogies such as loops and2312* deep wakeup paths from forming in parallel through multiple2313* EPOLL_CTL_ADD operations.2314*/2315error = epoll_mutex_lock(&ep->mtx, 0, nonblock);2316if (error)2317goto error_tgt_fput;2318if (op == EPOLL_CTL_ADD) {2319if (READ_ONCE(fd_file(f)->f_ep) || ep->gen == loop_check_gen ||2320is_file_epoll(fd_file(tf))) {2321mutex_unlock(&ep->mtx);2322error = epoll_mutex_lock(&epnested_mutex, 0, nonblock);2323if (error)2324goto error_tgt_fput;2325loop_check_gen++;2326full_check = 1;2327if (is_file_epoll(fd_file(tf))) {2328tep = fd_file(tf)->private_data;2329error = -ELOOP;2330if (ep_loop_check(ep, tep) != 0)2331goto error_tgt_fput;2332}2333error = epoll_mutex_lock(&ep->mtx, 0, nonblock);2334if (error)2335goto error_tgt_fput;2336}2337}23382339/*2340* Try to lookup the file inside our RB tree. Since we grabbed "mtx"2341* above, we can be sure to be able to use the item looked up by2342* ep_find() till we release the mutex.2343*/2344epi = ep_find(ep, fd_file(tf), fd);23452346error = -EINVAL;2347switch (op) {2348case EPOLL_CTL_ADD:2349if (!epi) {2350epds->events |= EPOLLERR | EPOLLHUP;2351error = ep_insert(ep, epds, fd_file(tf), fd, full_check);2352} else2353error = -EEXIST;2354break;2355case EPOLL_CTL_DEL:2356if (epi) {2357/*2358* The eventpoll itself is still alive: the refcount2359* can't go to zero here.2360*/2361ep_remove_safe(ep, epi);2362error = 0;2363} else {2364error = -ENOENT;2365}2366break;2367case EPOLL_CTL_MOD:2368if (epi) {2369if (!(epi->event.events & EPOLLEXCLUSIVE)) {2370epds->events |= EPOLLERR | EPOLLHUP;2371error = ep_modify(ep, epi, epds);2372}2373} else2374error = -ENOENT;2375break;2376}2377mutex_unlock(&ep->mtx);23782379error_tgt_fput:2380if (full_check) {2381clear_tfile_check_list();2382loop_check_gen++;2383mutex_unlock(&epnested_mutex);2384}2385return error;2386}23872388/*2389* The following function implements the controller interface for2390* the eventpoll file that enables the insertion/removal/change of2391* file descriptors inside the interest set.2392*/2393SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,2394struct epoll_event __user *, event)2395{2396struct epoll_event epds;23972398if (ep_op_has_event(op) &&2399copy_from_user(&epds, event, sizeof(struct epoll_event)))2400return -EFAULT;24012402return do_epoll_ctl(epfd, op, fd, &epds, false);2403}24042405static int ep_check_params(struct file *file, struct epoll_event __user *evs,2406int maxevents)2407{2408/* The maximum number of event must be greater than zero */2409if (maxevents <= 0 || maxevents > EP_MAX_EVENTS)2410return -EINVAL;24112412/* Verify that the area passed by the user is writeable */2413if (!access_ok(evs, maxevents * sizeof(struct epoll_event)))2414return -EFAULT;24152416/*2417* We have to check that the file structure underneath the fd2418* the user passed to us _is_ an eventpoll file.2419*/2420if (!is_file_epoll(file))2421return -EINVAL;24222423return 0;2424}24252426int epoll_sendevents(struct file *file, struct epoll_event __user *events,2427int maxevents)2428{2429struct eventpoll *ep;2430int ret;24312432ret = ep_check_params(file, events, maxevents);2433if (unlikely(ret))2434return ret;24352436ep = file->private_data;2437/*2438* Racy call, but that's ok - it should get retried based on2439* poll readiness anyway.2440*/2441if (ep_events_available(ep))2442return ep_try_send_events(ep, events, maxevents);2443return 0;2444}24452446/*2447* Implement the event wait interface for the eventpoll file. It is the kernel2448* part of the user space epoll_wait(2).2449*/2450static int do_epoll_wait(int epfd, struct epoll_event __user *events,2451int maxevents, struct timespec64 *to)2452{2453struct eventpoll *ep;2454int ret;24552456/* Get the "struct file *" for the eventpoll file */2457CLASS(fd, f)(epfd);2458if (fd_empty(f))2459return -EBADF;24602461ret = ep_check_params(fd_file(f), events, maxevents);2462if (unlikely(ret))2463return ret;24642465/*2466* At this point it is safe to assume that the "private_data" contains2467* our own data structure.2468*/2469ep = fd_file(f)->private_data;24702471/* Time to fish for events ... */2472return ep_poll(ep, events, maxevents, to);2473}24742475SYSCALL_DEFINE4(epoll_wait, int, epfd, struct epoll_event __user *, events,2476int, maxevents, int, timeout)2477{2478struct timespec64 to;24792480return do_epoll_wait(epfd, events, maxevents,2481ep_timeout_to_timespec(&to, timeout));2482}24832484/*2485* Implement the event wait interface for the eventpoll file. It is the kernel2486* part of the user space epoll_pwait(2).2487*/2488static int do_epoll_pwait(int epfd, struct epoll_event __user *events,2489int maxevents, struct timespec64 *to,2490const sigset_t __user *sigmask, size_t sigsetsize)2491{2492int error;24932494/*2495* If the caller wants a certain signal mask to be set during the wait,2496* we apply it here.2497*/2498error = set_user_sigmask(sigmask, sigsetsize);2499if (error)2500return error;25012502error = do_epoll_wait(epfd, events, maxevents, to);25032504restore_saved_sigmask_unless(error == -EINTR);25052506return error;2507}25082509SYSCALL_DEFINE6(epoll_pwait, int, epfd, struct epoll_event __user *, events,2510int, maxevents, int, timeout, const sigset_t __user *, sigmask,2511size_t, sigsetsize)2512{2513struct timespec64 to;25142515return do_epoll_pwait(epfd, events, maxevents,2516ep_timeout_to_timespec(&to, timeout),2517sigmask, sigsetsize);2518}25192520SYSCALL_DEFINE6(epoll_pwait2, int, epfd, struct epoll_event __user *, events,2521int, maxevents, const struct __kernel_timespec __user *, timeout,2522const sigset_t __user *, sigmask, size_t, sigsetsize)2523{2524struct timespec64 ts, *to = NULL;25252526if (timeout) {2527if (get_timespec64(&ts, timeout))2528return -EFAULT;2529to = &ts;2530if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))2531return -EINVAL;2532}25332534return do_epoll_pwait(epfd, events, maxevents, to,2535sigmask, sigsetsize);2536}25372538#ifdef CONFIG_COMPAT2539static int do_compat_epoll_pwait(int epfd, struct epoll_event __user *events,2540int maxevents, struct timespec64 *timeout,2541const compat_sigset_t __user *sigmask,2542compat_size_t sigsetsize)2543{2544long err;25452546/*2547* If the caller wants a certain signal mask to be set during the wait,2548* we apply it here.2549*/2550err = set_compat_user_sigmask(sigmask, sigsetsize);2551if (err)2552return err;25532554err = do_epoll_wait(epfd, events, maxevents, timeout);25552556restore_saved_sigmask_unless(err == -EINTR);25572558return err;2559}25602561COMPAT_SYSCALL_DEFINE6(epoll_pwait, int, epfd,2562struct epoll_event __user *, events,2563int, maxevents, int, timeout,2564const compat_sigset_t __user *, sigmask,2565compat_size_t, sigsetsize)2566{2567struct timespec64 to;25682569return do_compat_epoll_pwait(epfd, events, maxevents,2570ep_timeout_to_timespec(&to, timeout),2571sigmask, sigsetsize);2572}25732574COMPAT_SYSCALL_DEFINE6(epoll_pwait2, int, epfd,2575struct epoll_event __user *, events,2576int, maxevents,2577const struct __kernel_timespec __user *, timeout,2578const compat_sigset_t __user *, sigmask,2579compat_size_t, sigsetsize)2580{2581struct timespec64 ts, *to = NULL;25822583if (timeout) {2584if (get_timespec64(&ts, timeout))2585return -EFAULT;2586to = &ts;2587if (poll_select_set_timeout(to, ts.tv_sec, ts.tv_nsec))2588return -EINVAL;2589}25902591return do_compat_epoll_pwait(epfd, events, maxevents, to,2592sigmask, sigsetsize);2593}25942595#endif25962597static int __init eventpoll_init(void)2598{2599struct sysinfo si;26002601si_meminfo(&si);2602/*2603* Allows top 4% of lomem to be allocated for epoll watches (per user).2604*/2605max_user_watches = (((si.totalram - si.totalhigh) / 25) << PAGE_SHIFT) /2606EP_ITEM_COST;2607BUG_ON(max_user_watches < 0);26082609/*2610* We can have many thousands of epitems, so prevent this from2611* using an extra cache line on 64-bit (and smaller) CPUs2612*/2613BUILD_BUG_ON(sizeof(void *) <= 8 && sizeof(struct epitem) > 128);26142615/* Allocates slab cache used to allocate "struct epitem" items */2616epi_cache = kmem_cache_create("eventpoll_epi", sizeof(struct epitem),26170, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, NULL);26182619/* Allocates slab cache used to allocate "struct eppoll_entry" */2620pwq_cache = kmem_cache_create("eventpoll_pwq",2621sizeof(struct eppoll_entry), 0, SLAB_PANIC|SLAB_ACCOUNT, NULL);2622epoll_sysctls_init();26232624ephead_cache = kmem_cache_create("ep_head",2625sizeof(struct epitems_head), 0, SLAB_PANIC|SLAB_ACCOUNT, NULL);26262627return 0;2628}2629fs_initcall(eventpoll_init);263026312632