/* SPDX-License-Identifier: GPL-2.0-only */1/* Copyright (c) 2021 Facebook2*/34#ifndef __MMAP_UNLOCK_WORK_H__5#define __MMAP_UNLOCK_WORK_H__6#include <linux/irq_work.h>78/* irq_work to run mmap_read_unlock() in irq_work */9struct mmap_unlock_irq_work {10struct irq_work irq_work;11struct mm_struct *mm;12};1314DECLARE_PER_CPU(struct mmap_unlock_irq_work, mmap_unlock_work);1516/*17* We cannot do mmap_read_unlock() when the irq is disabled, because of18* risk to deadlock with rq_lock. To look up vma when the irqs are19* disabled, we need to run mmap_read_unlock() in irq_work. We use a20* percpu variable to do the irq_work. If the irq_work is already used21* by another lookup, we fall over.22*/23static inline bool bpf_mmap_unlock_get_irq_work(struct mmap_unlock_irq_work **work_ptr)24{25struct mmap_unlock_irq_work *work = NULL;26bool irq_work_busy = false;2728if (irqs_disabled()) {29if (!IS_ENABLED(CONFIG_PREEMPT_RT)) {30work = this_cpu_ptr(&mmap_unlock_work);31if (irq_work_is_busy(&work->irq_work)) {32/* cannot queue more up_read, fallback */33irq_work_busy = true;34}35} else {36/*37* PREEMPT_RT does not allow to trylock mmap sem in38* interrupt disabled context. Force the fallback code.39*/40irq_work_busy = true;41}42}4344*work_ptr = work;45return irq_work_busy;46}4748static inline void bpf_mmap_unlock_mm(struct mmap_unlock_irq_work *work, struct mm_struct *mm)49{50if (!work) {51mmap_read_unlock(mm);52} else {53work->mm = mm;5455/* The lock will be released once we're out of interrupt56* context. Tell lockdep that we've released it now so57* it doesn't complain that we forgot to release it.58*/59rwsem_release(&mm->mmap_lock.dep_map, _RET_IP_);60irq_work_queue(&work->irq_work);61}62}6364#endif /* __MMAP_UNLOCK_WORK_H__ */656667