#include <linux/kernel.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/sched/mm.h>
#include <linux/export.h>
#include <linux/rmap.h>
#include <linux/mmu_notifier.h>
#include <linux/swap.h>
#include <linux/ptrace.h>
#include <linux/kdebug.h>
#include <linux/percpu-rwsem.h>
#include <linux/task_work.h>
#include <linux/shmem_fs.h>
#include <linux/khugepaged.h>
#include <linux/rcupdate_trace.h>
#include <linux/workqueue.h>
#include <linux/srcu.h>
#include <linux/oom.h>
#include <linux/pagewalk.h>
#include <linux/uprobes.h>
#define UINSNS_PER_PAGE (PAGE_SIZE/UPROBE_XOL_SLOT_BYTES)
#define MAX_UPROBE_XOL_SLOTS UINSNS_PER_PAGE
static struct rb_root uprobes_tree = RB_ROOT;
#define no_uprobe_events() RB_EMPTY_ROOT(&uprobes_tree)
static DEFINE_RWLOCK(uprobes_treelock);
static seqcount_rwlock_t uprobes_seqcount = SEQCNT_RWLOCK_ZERO(uprobes_seqcount, &uprobes_treelock);
#define UPROBES_HASH_SZ 13
static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ];
#define uprobes_mmap_hash(v) (&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
DEFINE_STATIC_PERCPU_RWSEM(dup_mmap_sem);
DEFINE_STATIC_SRCU(uretprobes_srcu);
#define UPROBE_COPY_INSN 0
struct uprobe {
struct rb_node rb_node;
refcount_t ref;
struct rw_semaphore register_rwsem;
struct rw_semaphore consumer_rwsem;
struct list_head pending_list;
struct list_head consumers;
struct inode *inode;
union {
struct rcu_head rcu;
struct work_struct work;
};
loff_t offset;
loff_t ref_ctr_offset;
unsigned long flags;
struct arch_uprobe arch;
};
struct delayed_uprobe {
struct list_head list;
struct uprobe *uprobe;
struct mm_struct *mm;
};
static DEFINE_MUTEX(delayed_uprobe_lock);
static LIST_HEAD(delayed_uprobe_list);
struct xol_area {
wait_queue_head_t wq;
unsigned long *bitmap;
struct page *page;
unsigned long vaddr;
};
static void uprobe_warn(struct task_struct *t, const char *msg)
{
pr_warn("uprobe: %s:%d failed to %s\n", t->comm, t->pid, msg);
}
static bool valid_vma(struct vm_area_struct *vma, bool is_register)
{
vm_flags_t flags = VM_HUGETLB | VM_MAYEXEC | VM_MAYSHARE;
if (is_register)
flags |= VM_WRITE;
return vma->vm_file && (vma->vm_flags & flags) == VM_MAYEXEC;
}
static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset)
{
return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
}
static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr)
{
return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start);
}
bool __weak is_swbp_insn(uprobe_opcode_t *insn)
{
return *insn == UPROBE_SWBP_INSN;
}
bool __weak is_trap_insn(uprobe_opcode_t *insn)
{
return is_swbp_insn(insn);
}
void uprobe_copy_from_page(struct page *page, unsigned long vaddr, void *dst, int len)
{
void *kaddr = kmap_atomic(page);
memcpy(dst, kaddr + (vaddr & ~PAGE_MASK), len);
kunmap_atomic(kaddr);
}
static void copy_to_page(struct page *page, unsigned long vaddr, const void *src, int len)
{
void *kaddr = kmap_atomic(page);
memcpy(kaddr + (vaddr & ~PAGE_MASK), src, len);
kunmap_atomic(kaddr);
}
static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *insn,
int nbytes, void *data)
{
uprobe_opcode_t old_opcode;
bool is_swbp;
uprobe_copy_from_page(page, vaddr, &old_opcode, UPROBE_SWBP_INSN_SIZE);
is_swbp = is_swbp_insn(&old_opcode);
if (is_swbp_insn(insn)) {
if (is_swbp)
return 0;
} else {
if (!is_swbp)
return 0;
}
return 1;
}
static struct delayed_uprobe *
delayed_uprobe_check(struct uprobe *uprobe, struct mm_struct *mm)
{
struct delayed_uprobe *du;
list_for_each_entry(du, &delayed_uprobe_list, list)
if (du->uprobe == uprobe && du->mm == mm)
return du;
return NULL;
}
static int delayed_uprobe_add(struct uprobe *uprobe, struct mm_struct *mm)
{
struct delayed_uprobe *du;
if (delayed_uprobe_check(uprobe, mm))
return 0;
du = kzalloc(sizeof(*du), GFP_KERNEL);
if (!du)
return -ENOMEM;
du->uprobe = uprobe;
du->mm = mm;
list_add(&du->list, &delayed_uprobe_list);
return 0;
}
static void delayed_uprobe_delete(struct delayed_uprobe *du)
{
if (WARN_ON(!du))
return;
list_del(&du->list);
kfree(du);
}
static void delayed_uprobe_remove(struct uprobe *uprobe, struct mm_struct *mm)
{
struct list_head *pos, *q;
struct delayed_uprobe *du;
if (!uprobe && !mm)
return;
list_for_each_safe(pos, q, &delayed_uprobe_list) {
du = list_entry(pos, struct delayed_uprobe, list);
if (uprobe && du->uprobe != uprobe)
continue;
if (mm && du->mm != mm)
continue;
delayed_uprobe_delete(du);
}
}
static bool valid_ref_ctr_vma(struct uprobe *uprobe,
struct vm_area_struct *vma)
{
unsigned long vaddr = offset_to_vaddr(vma, uprobe->ref_ctr_offset);
return uprobe->ref_ctr_offset &&
vma->vm_file &&
file_inode(vma->vm_file) == uprobe->inode &&
(vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE &&
vma->vm_start <= vaddr &&
vma->vm_end > vaddr;
}
static struct vm_area_struct *
find_ref_ctr_vma(struct uprobe *uprobe, struct mm_struct *mm)
{
VMA_ITERATOR(vmi, mm, 0);
struct vm_area_struct *tmp;
for_each_vma(vmi, tmp)
if (valid_ref_ctr_vma(uprobe, tmp))
return tmp;
return NULL;
}
static int
__update_ref_ctr(struct mm_struct *mm, unsigned long vaddr, short d)
{
void *kaddr;
struct page *page;
int ret;
short *ptr;
if (!vaddr || !d)
return -EINVAL;
ret = get_user_pages_remote(mm, vaddr, 1,
FOLL_WRITE, &page, NULL);
if (unlikely(ret <= 0)) {
return ret == 0 ? -EBUSY : ret;
}
kaddr = kmap_atomic(page);
ptr = kaddr + (vaddr & ~PAGE_MASK);
if (unlikely(*ptr + d < 0)) {
pr_warn("ref_ctr going negative. vaddr: 0x%lx, "
"curr val: %d, delta: %d\n", vaddr, *ptr, d);
ret = -EINVAL;
goto out;
}
*ptr += d;
ret = 0;
out:
kunmap_atomic(kaddr);
put_page(page);
return ret;
}
static void update_ref_ctr_warn(struct uprobe *uprobe,
struct mm_struct *mm, short d)
{
pr_warn("ref_ctr %s failed for inode: 0x%lx offset: "
"0x%llx ref_ctr_offset: 0x%llx of mm: 0x%p\n",
d > 0 ? "increment" : "decrement", uprobe->inode->i_ino,
(unsigned long long) uprobe->offset,
(unsigned long long) uprobe->ref_ctr_offset, mm);
}
static int update_ref_ctr(struct uprobe *uprobe, struct mm_struct *mm,
short d)
{
struct vm_area_struct *rc_vma;
unsigned long rc_vaddr;
int ret = 0;
rc_vma = find_ref_ctr_vma(uprobe, mm);
if (rc_vma) {
rc_vaddr = offset_to_vaddr(rc_vma, uprobe->ref_ctr_offset);
ret = __update_ref_ctr(mm, rc_vaddr, d);
if (ret)
update_ref_ctr_warn(uprobe, mm, d);
if (d > 0)
return ret;
}
mutex_lock(&delayed_uprobe_lock);
if (d > 0)
ret = delayed_uprobe_add(uprobe, mm);
else
delayed_uprobe_remove(uprobe, mm);
mutex_unlock(&delayed_uprobe_lock);
return ret;
}
static bool orig_page_is_identical(struct vm_area_struct *vma,
unsigned long vaddr, struct page *page, bool *pmd_mappable)
{
const pgoff_t index = vaddr_to_offset(vma, vaddr) >> PAGE_SHIFT;
struct folio *orig_folio = filemap_get_folio(vma->vm_file->f_mapping,
index);
struct page *orig_page;
bool identical;
if (IS_ERR(orig_folio))
return false;
orig_page = folio_file_page(orig_folio, index);
*pmd_mappable = folio_test_pmd_mappable(orig_folio);
identical = folio_test_uptodate(orig_folio) &&
pages_identical(page, orig_page);
folio_put(orig_folio);
return identical;
}
static int __uprobe_write(struct vm_area_struct *vma,
struct folio_walk *fw, struct folio *folio,
unsigned long insn_vaddr, uprobe_opcode_t *insn, int nbytes,
bool is_register)
{
const unsigned long vaddr = insn_vaddr & PAGE_MASK;
bool pmd_mappable;
if (fw->level != FW_LEVEL_PTE)
return -EFAULT;
if (!pte_write(fw->pte)) {
if (!PageAnonExclusive(fw->page))
return -EFAULT;
if (unlikely(userfaultfd_pte_wp(vma, fw->pte)))
return -EFAULT;
}
flush_cache_page(vma, vaddr, pte_pfn(fw->pte));
fw->pte = ptep_clear_flush(vma, vaddr, fw->ptep);
copy_to_page(fw->page, insn_vaddr, insn, nbytes);
if (is_register || userfaultfd_missing(vma) ||
(folio_ref_count(folio) != folio_expected_ref_count(folio) + 1))
goto remap;
if (!orig_page_is_identical(vma, vaddr, fw->page, &pmd_mappable))
goto remap;
dec_mm_counter(vma->vm_mm, MM_ANONPAGES);
folio_remove_rmap_pte(folio, fw->page, vma);
if (!folio_mapped(folio) && folio_test_swapcache(folio) &&
folio_trylock(folio)) {
folio_free_swap(folio);
folio_unlock(folio);
}
folio_put(folio);
return pmd_mappable;
remap:
smp_wmb();
set_pte_at(vma->vm_mm, vaddr, fw->ptep, pte_mkdirty(fw->pte));
return 0;
}
int uprobe_write_opcode(struct arch_uprobe *auprobe, struct vm_area_struct *vma,
const unsigned long opcode_vaddr, uprobe_opcode_t opcode,
bool is_register)
{
return uprobe_write(auprobe, vma, opcode_vaddr, &opcode, UPROBE_SWBP_INSN_SIZE,
verify_opcode, is_register, true , NULL);
}
int uprobe_write(struct arch_uprobe *auprobe, struct vm_area_struct *vma,
const unsigned long insn_vaddr, uprobe_opcode_t *insn, int nbytes,
uprobe_write_verify_t verify, bool is_register, bool do_update_ref_ctr,
void *data)
{
const unsigned long vaddr = insn_vaddr & PAGE_MASK;
struct mm_struct *mm = vma->vm_mm;
struct uprobe *uprobe;
int ret, ref_ctr_updated = 0;
unsigned int gup_flags = FOLL_FORCE;
struct mmu_notifier_range range;
struct folio_walk fw;
struct folio *folio;
struct page *page;
uprobe = container_of(auprobe, struct uprobe, arch);
if (WARN_ON_ONCE(!is_cow_mapping(vma->vm_flags)))
return -EINVAL;
if (is_register)
gup_flags |= FOLL_WRITE | FOLL_SPLIT_PMD;
retry:
ret = get_user_pages_remote(mm, vaddr, 1, gup_flags, &page, NULL);
if (ret <= 0)
goto out;
folio = page_folio(page);
ret = verify(page, insn_vaddr, insn, nbytes, data);
if (ret <= 0) {
folio_put(folio);
goto out;
}
if (do_update_ref_ctr && !ref_ctr_updated && uprobe->ref_ctr_offset) {
ret = update_ref_ctr(uprobe, mm, is_register ? 1 : -1);
if (ret) {
folio_put(folio);
goto out;
}
ref_ctr_updated = 1;
}
ret = 0;
if (unlikely(!folio_test_anon(folio) || folio_is_zone_device(folio))) {
VM_WARN_ON_ONCE(is_register);
folio_put(folio);
goto out;
}
if (!is_register) {
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
vaddr, vaddr + PAGE_SIZE);
mmu_notifier_invalidate_range_start(&range);
}
ret = -EAGAIN;
if (folio_walk_start(&fw, vma, vaddr, 0)) {
if (fw.page == page)
ret = __uprobe_write(vma, &fw, folio, insn_vaddr, insn, nbytes, is_register);
folio_walk_end(&fw, vma);
}
if (!is_register)
mmu_notifier_invalidate_range_end(&range);
folio_put(folio);
switch (ret) {
case -EFAULT:
gup_flags |= FOLL_WRITE | FOLL_SPLIT_PMD;
fallthrough;
case -EAGAIN:
goto retry;
default:
break;
}
out:
if (do_update_ref_ctr && ret < 0 && ref_ctr_updated)
update_ref_ctr(uprobe, mm, is_register ? -1 : 1);
if (ret > 0)
collapse_pte_mapped_thp(mm, vaddr, false);
return ret < 0 ? ret : 0;
}
int __weak set_swbp(struct arch_uprobe *auprobe, struct vm_area_struct *vma,
unsigned long vaddr)
{
return uprobe_write_opcode(auprobe, vma, vaddr, UPROBE_SWBP_INSN, true);
}
int __weak set_orig_insn(struct arch_uprobe *auprobe,
struct vm_area_struct *vma, unsigned long vaddr)
{
return uprobe_write_opcode(auprobe, vma, vaddr,
*(uprobe_opcode_t *)&auprobe->insn, false);
}
static struct uprobe *get_uprobe(struct uprobe *uprobe)
{
refcount_inc(&uprobe->ref);
return uprobe;
}
static struct uprobe *try_get_uprobe(struct uprobe *uprobe)
{
if (refcount_inc_not_zero(&uprobe->ref))
return uprobe;
return NULL;
}
static inline bool uprobe_is_active(struct uprobe *uprobe)
{
return !RB_EMPTY_NODE(&uprobe->rb_node);
}
static void uprobe_free_rcu_tasks_trace(struct rcu_head *rcu)
{
struct uprobe *uprobe = container_of(rcu, struct uprobe, rcu);
kfree(uprobe);
}
static void uprobe_free_srcu(struct rcu_head *rcu)
{
struct uprobe *uprobe = container_of(rcu, struct uprobe, rcu);
call_rcu_tasks_trace(&uprobe->rcu, uprobe_free_rcu_tasks_trace);
}
static void uprobe_free_deferred(struct work_struct *work)
{
struct uprobe *uprobe = container_of(work, struct uprobe, work);
write_lock(&uprobes_treelock);
if (uprobe_is_active(uprobe)) {
write_seqcount_begin(&uprobes_seqcount);
rb_erase(&uprobe->rb_node, &uprobes_tree);
write_seqcount_end(&uprobes_seqcount);
}
write_unlock(&uprobes_treelock);
mutex_lock(&delayed_uprobe_lock);
delayed_uprobe_remove(uprobe, NULL);
mutex_unlock(&delayed_uprobe_lock);
call_srcu(&uretprobes_srcu, &uprobe->rcu, uprobe_free_srcu);
}
static void put_uprobe(struct uprobe *uprobe)
{
if (!refcount_dec_and_test(&uprobe->ref))
return;
INIT_WORK(&uprobe->work, uprobe_free_deferred);
schedule_work(&uprobe->work);
}
static void hprobe_init_leased(struct hprobe *hprobe, struct uprobe *uprobe, int srcu_idx)
{
WARN_ON(!uprobe);
hprobe->state = HPROBE_LEASED;
hprobe->uprobe = uprobe;
hprobe->srcu_idx = srcu_idx;
}
static void hprobe_init_stable(struct hprobe *hprobe, struct uprobe *uprobe)
{
hprobe->state = uprobe ? HPROBE_STABLE : HPROBE_GONE;
hprobe->uprobe = uprobe;
hprobe->srcu_idx = -1;
}
static inline struct uprobe *hprobe_consume(struct hprobe *hprobe, enum hprobe_state *hstate)
{
*hstate = xchg(&hprobe->state, HPROBE_CONSUMED);
switch (*hstate) {
case HPROBE_LEASED:
case HPROBE_STABLE:
return hprobe->uprobe;
case HPROBE_GONE:
case HPROBE_CONSUMED:
return NULL;
default:
WARN(1, "hprobe invalid state %d", *hstate);
return NULL;
}
}
static void hprobe_finalize(struct hprobe *hprobe, enum hprobe_state hstate)
{
switch (hstate) {
case HPROBE_LEASED:
__srcu_read_unlock(&uretprobes_srcu, hprobe->srcu_idx);
break;
case HPROBE_STABLE:
put_uprobe(hprobe->uprobe);
break;
case HPROBE_GONE:
case HPROBE_CONSUMED:
break;
default:
WARN(1, "hprobe invalid state %d", hstate);
break;
}
}
static struct uprobe *hprobe_expire(struct hprobe *hprobe, bool get)
{
enum hprobe_state hstate;
lockdep_assert(srcu_read_lock_held(&uretprobes_srcu));
hstate = READ_ONCE(hprobe->state);
switch (hstate) {
case HPROBE_STABLE:
return get ? get_uprobe(hprobe->uprobe) : hprobe->uprobe;
case HPROBE_GONE:
return NULL;
case HPROBE_CONSUMED:
return NULL;
case HPROBE_LEASED: {
struct uprobe *uprobe = try_get_uprobe(hprobe->uprobe);
if (try_cmpxchg(&hprobe->state, &hstate, uprobe ? HPROBE_STABLE : HPROBE_GONE)) {
__srcu_read_unlock(&uretprobes_srcu, hprobe->srcu_idx);
return get ? get_uprobe(uprobe) : uprobe;
}
if (uprobe && !get)
put_uprobe(uprobe);
return uprobe;
}
default:
WARN(1, "unknown hprobe state %d", hstate);
return NULL;
}
}
static __always_inline
int uprobe_cmp(const struct inode *l_inode, const loff_t l_offset,
const struct uprobe *r)
{
if (l_inode < r->inode)
return -1;
if (l_inode > r->inode)
return 1;
if (l_offset < r->offset)
return -1;
if (l_offset > r->offset)
return 1;
return 0;
}
#define __node_2_uprobe(node) \
rb_entry((node), struct uprobe, rb_node)
struct __uprobe_key {
struct inode *inode;
loff_t offset;
};
static inline int __uprobe_cmp_key(const void *key, const struct rb_node *b)
{
const struct __uprobe_key *a = key;
return uprobe_cmp(a->inode, a->offset, __node_2_uprobe(b));
}
static inline int __uprobe_cmp(struct rb_node *a, const struct rb_node *b)
{
struct uprobe *u = __node_2_uprobe(a);
return uprobe_cmp(u->inode, u->offset, __node_2_uprobe(b));
}
static struct uprobe *find_uprobe_rcu(struct inode *inode, loff_t offset)
{
struct __uprobe_key key = {
.inode = inode,
.offset = offset,
};
struct rb_node *node;
unsigned int seq;
lockdep_assert(rcu_read_lock_trace_held());
do {
seq = read_seqcount_begin(&uprobes_seqcount);
node = rb_find_rcu(&key, &uprobes_tree, __uprobe_cmp_key);
if (node)
return __node_2_uprobe(node);
} while (read_seqcount_retry(&uprobes_seqcount, seq));
return NULL;
}
static struct uprobe *__insert_uprobe(struct uprobe *uprobe)
{
struct rb_node *node;
again:
node = rb_find_add_rcu(&uprobe->rb_node, &uprobes_tree, __uprobe_cmp);
if (node) {
struct uprobe *u = __node_2_uprobe(node);
if (!try_get_uprobe(u)) {
rb_erase(node, &uprobes_tree);
RB_CLEAR_NODE(&u->rb_node);
goto again;
}
return u;
}
return uprobe;
}
static struct uprobe *insert_uprobe(struct uprobe *uprobe)
{
struct uprobe *u;
write_lock(&uprobes_treelock);
write_seqcount_begin(&uprobes_seqcount);
u = __insert_uprobe(uprobe);
write_seqcount_end(&uprobes_seqcount);
write_unlock(&uprobes_treelock);
return u;
}
static void
ref_ctr_mismatch_warn(struct uprobe *cur_uprobe, struct uprobe *uprobe)
{
pr_warn("ref_ctr_offset mismatch. inode: 0x%lx offset: 0x%llx "
"ref_ctr_offset(old): 0x%llx ref_ctr_offset(new): 0x%llx\n",
uprobe->inode->i_ino, (unsigned long long) uprobe->offset,
(unsigned long long) cur_uprobe->ref_ctr_offset,
(unsigned long long) uprobe->ref_ctr_offset);
}
static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset,
loff_t ref_ctr_offset)
{
struct uprobe *uprobe, *cur_uprobe;
uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL);
if (!uprobe)
return ERR_PTR(-ENOMEM);
uprobe->inode = inode;
uprobe->offset = offset;
uprobe->ref_ctr_offset = ref_ctr_offset;
INIT_LIST_HEAD(&uprobe->consumers);
init_rwsem(&uprobe->register_rwsem);
init_rwsem(&uprobe->consumer_rwsem);
RB_CLEAR_NODE(&uprobe->rb_node);
refcount_set(&uprobe->ref, 1);
cur_uprobe = insert_uprobe(uprobe);
if (cur_uprobe != uprobe) {
if (cur_uprobe->ref_ctr_offset != uprobe->ref_ctr_offset) {
ref_ctr_mismatch_warn(cur_uprobe, uprobe);
put_uprobe(cur_uprobe);
kfree(uprobe);
return ERR_PTR(-EINVAL);
}
kfree(uprobe);
uprobe = cur_uprobe;
}
return uprobe;
}
static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc)
{
static atomic64_t id;
down_write(&uprobe->consumer_rwsem);
list_add_rcu(&uc->cons_node, &uprobe->consumers);
uc->id = (__u64) atomic64_inc_return(&id);
up_write(&uprobe->consumer_rwsem);
}
static void consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc)
{
down_write(&uprobe->consumer_rwsem);
list_del_rcu(&uc->cons_node);
up_write(&uprobe->consumer_rwsem);
}
static int __copy_insn(struct address_space *mapping, struct file *filp,
void *insn, int nbytes, loff_t offset)
{
struct page *page;
if (mapping->a_ops->read_folio)
page = read_mapping_page(mapping, offset >> PAGE_SHIFT, filp);
else
page = shmem_read_mapping_page(mapping, offset >> PAGE_SHIFT);
if (IS_ERR(page))
return PTR_ERR(page);
uprobe_copy_from_page(page, offset, insn, nbytes);
put_page(page);
return 0;
}
static int copy_insn(struct uprobe *uprobe, struct file *filp)
{
struct address_space *mapping = uprobe->inode->i_mapping;
loff_t offs = uprobe->offset;
void *insn = &uprobe->arch.insn;
int size = sizeof(uprobe->arch.insn);
int len, err = -EIO;
do {
if (offs >= i_size_read(uprobe->inode))
break;
len = min_t(int, size, PAGE_SIZE - (offs & ~PAGE_MASK));
err = __copy_insn(mapping, filp, insn, len, offs);
if (err)
break;
insn += len;
offs += len;
size -= len;
} while (size);
return err;
}
static int prepare_uprobe(struct uprobe *uprobe, struct file *file,
struct mm_struct *mm, unsigned long vaddr)
{
int ret = 0;
if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
return ret;
down_write(&uprobe->consumer_rwsem);
if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
goto out;
ret = copy_insn(uprobe, file);
if (ret)
goto out;
ret = -ENOTSUPP;
if (is_trap_insn((uprobe_opcode_t *)&uprobe->arch.insn))
goto out;
ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr);
if (ret)
goto out;
smp_wmb();
set_bit(UPROBE_COPY_INSN, &uprobe->flags);
out:
up_write(&uprobe->consumer_rwsem);
return ret;
}
static inline bool consumer_filter(struct uprobe_consumer *uc, struct mm_struct *mm)
{
return !uc->filter || uc->filter(uc, mm);
}
static bool filter_chain(struct uprobe *uprobe, struct mm_struct *mm)
{
struct uprobe_consumer *uc;
bool ret = false;
down_read(&uprobe->consumer_rwsem);
list_for_each_entry_rcu(uc, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) {
ret = consumer_filter(uc, mm);
if (ret)
break;
}
up_read(&uprobe->consumer_rwsem);
return ret;
}
static int install_breakpoint(struct uprobe *uprobe, struct vm_area_struct *vma,
unsigned long vaddr)
{
struct mm_struct *mm = vma->vm_mm;
bool first_uprobe;
int ret;
ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr);
if (ret)
return ret;
first_uprobe = !mm_flags_test(MMF_HAS_UPROBES, mm);
if (first_uprobe)
mm_flags_set(MMF_HAS_UPROBES, mm);
ret = set_swbp(&uprobe->arch, vma, vaddr);
if (!ret)
mm_flags_clear(MMF_RECALC_UPROBES, mm);
else if (first_uprobe)
mm_flags_clear(MMF_HAS_UPROBES, mm);
return ret;
}
static int remove_breakpoint(struct uprobe *uprobe, struct vm_area_struct *vma,
unsigned long vaddr)
{
struct mm_struct *mm = vma->vm_mm;
mm_flags_set(MMF_RECALC_UPROBES, mm);
return set_orig_insn(&uprobe->arch, vma, vaddr);
}
struct map_info {
struct map_info *next;
struct mm_struct *mm;
unsigned long vaddr;
};
static inline struct map_info *free_map_info(struct map_info *info)
{
struct map_info *next = info->next;
kfree(info);
return next;
}
static struct map_info *
build_map_info(struct address_space *mapping, loff_t offset, bool is_register)
{
unsigned long pgoff = offset >> PAGE_SHIFT;
struct vm_area_struct *vma;
struct map_info *curr = NULL;
struct map_info *prev = NULL;
struct map_info *info;
int more = 0;
again:
i_mmap_lock_read(mapping);
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
if (!valid_vma(vma, is_register))
continue;
if (!prev && !more) {
prev = kmalloc(sizeof(struct map_info),
GFP_NOWAIT | __GFP_NOMEMALLOC);
if (prev)
prev->next = NULL;
}
if (!prev) {
more++;
continue;
}
if (!mmget_not_zero(vma->vm_mm))
continue;
info = prev;
prev = prev->next;
info->next = curr;
curr = info;
info->mm = vma->vm_mm;
info->vaddr = offset_to_vaddr(vma, offset);
}
i_mmap_unlock_read(mapping);
if (!more)
goto out;
prev = curr;
while (curr) {
mmput(curr->mm);
curr = curr->next;
}
do {
info = kmalloc(sizeof(struct map_info), GFP_KERNEL);
if (!info) {
curr = ERR_PTR(-ENOMEM);
goto out;
}
info->next = prev;
prev = info;
} while (--more);
goto again;
out:
while (prev)
prev = free_map_info(prev);
return curr;
}
static int
register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new)
{
bool is_register = !!new;
struct map_info *info;
int err = 0;
percpu_down_write(&dup_mmap_sem);
info = build_map_info(uprobe->inode->i_mapping,
uprobe->offset, is_register);
if (IS_ERR(info)) {
err = PTR_ERR(info);
goto out;
}
while (info) {
struct mm_struct *mm = info->mm;
struct vm_area_struct *vma;
if (err && is_register)
goto free;
mmap_write_lock(mm);
if (check_stable_address_space(mm))
goto unlock;
vma = find_vma(mm, info->vaddr);
if (!vma || !valid_vma(vma, is_register) ||
file_inode(vma->vm_file) != uprobe->inode)
goto unlock;
if (vma->vm_start > info->vaddr ||
vaddr_to_offset(vma, info->vaddr) != uprobe->offset)
goto unlock;
if (is_register) {
if (consumer_filter(new, mm))
err = install_breakpoint(uprobe, vma, info->vaddr);
} else if (mm_flags_test(MMF_HAS_UPROBES, mm)) {
if (!filter_chain(uprobe, mm))
err |= remove_breakpoint(uprobe, vma, info->vaddr);
}
unlock:
mmap_write_unlock(mm);
free:
mmput(mm);
info = free_map_info(info);
}
out:
percpu_up_write(&dup_mmap_sem);
return err;
}
void uprobe_unregister_nosync(struct uprobe *uprobe, struct uprobe_consumer *uc)
{
int err;
down_write(&uprobe->register_rwsem);
consumer_del(uprobe, uc);
err = register_for_each_vma(uprobe, NULL);
up_write(&uprobe->register_rwsem);
if (unlikely(err)) {
uprobe_warn(current, "unregister, leaking uprobe");
return;
}
put_uprobe(uprobe);
}
EXPORT_SYMBOL_GPL(uprobe_unregister_nosync);
void uprobe_unregister_sync(void)
{
synchronize_rcu_tasks_trace();
synchronize_srcu(&uretprobes_srcu);
}
EXPORT_SYMBOL_GPL(uprobe_unregister_sync);
struct uprobe *uprobe_register(struct inode *inode,
loff_t offset, loff_t ref_ctr_offset,
struct uprobe_consumer *uc)
{
struct uprobe *uprobe;
int ret;
if (!uc->handler && !uc->ret_handler)
return ERR_PTR(-EINVAL);
if (!inode->i_mapping->a_ops->read_folio &&
!shmem_mapping(inode->i_mapping))
return ERR_PTR(-EIO);
if (offset > i_size_read(inode))
return ERR_PTR(-EINVAL);
if (!IS_ALIGNED(offset, UPROBE_SWBP_INSN_SIZE))
return ERR_PTR(-EINVAL);
if (!IS_ALIGNED(ref_ctr_offset, sizeof(short)))
return ERR_PTR(-EINVAL);
uprobe = alloc_uprobe(inode, offset, ref_ctr_offset);
if (IS_ERR(uprobe))
return uprobe;
down_write(&uprobe->register_rwsem);
consumer_add(uprobe, uc);
ret = register_for_each_vma(uprobe, uc);
up_write(&uprobe->register_rwsem);
if (ret) {
uprobe_unregister_nosync(uprobe, uc);
uprobe_unregister_sync();
return ERR_PTR(ret);
}
return uprobe;
}
EXPORT_SYMBOL_GPL(uprobe_register);
int uprobe_apply(struct uprobe *uprobe, struct uprobe_consumer *uc, bool add)
{
struct uprobe_consumer *con;
int ret = -ENOENT;
down_write(&uprobe->register_rwsem);
rcu_read_lock_trace();
list_for_each_entry_rcu(con, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) {
if (con == uc) {
ret = register_for_each_vma(uprobe, add ? uc : NULL);
break;
}
}
rcu_read_unlock_trace();
up_write(&uprobe->register_rwsem);
return ret;
}
static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm)
{
VMA_ITERATOR(vmi, mm, 0);
struct vm_area_struct *vma;
int err = 0;
mmap_write_lock(mm);
for_each_vma(vmi, vma) {
unsigned long vaddr;
loff_t offset;
if (!valid_vma(vma, false) ||
file_inode(vma->vm_file) != uprobe->inode)
continue;
offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
if (uprobe->offset < offset ||
uprobe->offset >= offset + vma->vm_end - vma->vm_start)
continue;
vaddr = offset_to_vaddr(vma, uprobe->offset);
err |= remove_breakpoint(uprobe, vma, vaddr);
}
mmap_write_unlock(mm);
return err;
}
static struct rb_node *
find_node_in_range(struct inode *inode, loff_t min, loff_t max)
{
struct rb_node *n = uprobes_tree.rb_node;
while (n) {
struct uprobe *u = rb_entry(n, struct uprobe, rb_node);
if (inode < u->inode) {
n = n->rb_left;
} else if (inode > u->inode) {
n = n->rb_right;
} else {
if (max < u->offset)
n = n->rb_left;
else if (min > u->offset)
n = n->rb_right;
else
break;
}
}
return n;
}
static void build_probe_list(struct inode *inode,
struct vm_area_struct *vma,
unsigned long start, unsigned long end,
struct list_head *head)
{
loff_t min, max;
struct rb_node *n, *t;
struct uprobe *u;
INIT_LIST_HEAD(head);
min = vaddr_to_offset(vma, start);
max = min + (end - start) - 1;
read_lock(&uprobes_treelock);
n = find_node_in_range(inode, min, max);
if (n) {
for (t = n; t; t = rb_prev(t)) {
u = rb_entry(t, struct uprobe, rb_node);
if (u->inode != inode || u->offset < min)
break;
if (try_get_uprobe(u))
list_add(&u->pending_list, head);
}
for (t = n; (t = rb_next(t)); ) {
u = rb_entry(t, struct uprobe, rb_node);
if (u->inode != inode || u->offset > max)
break;
if (try_get_uprobe(u))
list_add(&u->pending_list, head);
}
}
read_unlock(&uprobes_treelock);
}
static int delayed_ref_ctr_inc(struct vm_area_struct *vma)
{
struct list_head *pos, *q;
struct delayed_uprobe *du;
unsigned long vaddr;
int ret = 0, err = 0;
mutex_lock(&delayed_uprobe_lock);
list_for_each_safe(pos, q, &delayed_uprobe_list) {
du = list_entry(pos, struct delayed_uprobe, list);
if (du->mm != vma->vm_mm ||
!valid_ref_ctr_vma(du->uprobe, vma))
continue;
vaddr = offset_to_vaddr(vma, du->uprobe->ref_ctr_offset);
ret = __update_ref_ctr(vma->vm_mm, vaddr, 1);
if (ret) {
update_ref_ctr_warn(du->uprobe, vma->vm_mm, 1);
if (!err)
err = ret;
}
delayed_uprobe_delete(du);
}
mutex_unlock(&delayed_uprobe_lock);
return err;
}
int uprobe_mmap(struct vm_area_struct *vma)
{
struct list_head tmp_list;
struct uprobe *uprobe, *u;
struct inode *inode;
if (no_uprobe_events())
return 0;
if (vma->vm_file &&
(vma->vm_flags & (VM_WRITE|VM_SHARED)) == VM_WRITE &&
mm_flags_test(MMF_HAS_UPROBES, vma->vm_mm))
delayed_ref_ctr_inc(vma);
if (!valid_vma(vma, true))
return 0;
inode = file_inode(vma->vm_file);
if (!inode)
return 0;
mutex_lock(uprobes_mmap_hash(inode));
build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list);
list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
if (!fatal_signal_pending(current) &&
filter_chain(uprobe, vma->vm_mm)) {
unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset);
install_breakpoint(uprobe, vma, vaddr);
}
put_uprobe(uprobe);
}
mutex_unlock(uprobes_mmap_hash(inode));
return 0;
}
static bool
vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
loff_t min, max;
struct inode *inode;
struct rb_node *n;
inode = file_inode(vma->vm_file);
min = vaddr_to_offset(vma, start);
max = min + (end - start) - 1;
read_lock(&uprobes_treelock);
n = find_node_in_range(inode, min, max);
read_unlock(&uprobes_treelock);
return !!n;
}
void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
if (no_uprobe_events() || !valid_vma(vma, false))
return;
if (!atomic_read(&vma->vm_mm->mm_users))
return;
if (!mm_flags_test(MMF_HAS_UPROBES, vma->vm_mm) ||
mm_flags_test(MMF_RECALC_UPROBES, vma->vm_mm))
return;
if (vma_has_uprobes(vma, start, end))
mm_flags_set(MMF_RECALC_UPROBES, vma->vm_mm);
}
static vm_fault_t xol_fault(const struct vm_special_mapping *sm,
struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct xol_area *area = vma->vm_mm->uprobes_state.xol_area;
vmf->page = area->page;
get_page(vmf->page);
return 0;
}
static int xol_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma)
{
return -EPERM;
}
static const struct vm_special_mapping xol_mapping = {
.name = "[uprobes]",
.fault = xol_fault,
.mremap = xol_mremap,
};
static int xol_add_vma(struct mm_struct *mm, struct xol_area *area)
{
struct vm_area_struct *vma;
int ret;
if (mmap_write_lock_killable(mm))
return -EINTR;
if (mm->uprobes_state.xol_area) {
ret = -EALREADY;
goto fail;
}
if (!area->vaddr) {
area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE,
PAGE_SIZE, 0, 0);
if (IS_ERR_VALUE(area->vaddr)) {
ret = area->vaddr;
goto fail;
}
}
vma = _install_special_mapping(mm, area->vaddr, PAGE_SIZE,
VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO|
VM_SEALED_SYSMAP,
&xol_mapping);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto fail;
}
ret = 0;
smp_store_release(&mm->uprobes_state.xol_area, area);
fail:
mmap_write_unlock(mm);
return ret;
}
void * __weak arch_uretprobe_trampoline(unsigned long *psize)
{
static uprobe_opcode_t insn = UPROBE_SWBP_INSN;
*psize = UPROBE_SWBP_INSN_SIZE;
return &insn;
}
static struct xol_area *__create_xol_area(unsigned long vaddr)
{
struct mm_struct *mm = current->mm;
unsigned long insns_size;
struct xol_area *area;
void *insns;
area = kzalloc(sizeof(*area), GFP_KERNEL);
if (unlikely(!area))
goto out;
area->bitmap = kcalloc(BITS_TO_LONGS(UINSNS_PER_PAGE), sizeof(long),
GFP_KERNEL);
if (!area->bitmap)
goto free_area;
area->page = alloc_page(GFP_HIGHUSER | __GFP_ZERO);
if (!area->page)
goto free_bitmap;
area->vaddr = vaddr;
init_waitqueue_head(&area->wq);
set_bit(0, area->bitmap);
insns = arch_uretprobe_trampoline(&insns_size);
arch_uprobe_copy_ixol(area->page, 0, insns, insns_size);
if (!xol_add_vma(mm, area))
return area;
__free_page(area->page);
free_bitmap:
kfree(area->bitmap);
free_area:
kfree(area);
out:
return NULL;
}
static struct xol_area *get_xol_area(void)
{
struct mm_struct *mm = current->mm;
struct xol_area *area;
if (!mm->uprobes_state.xol_area)
__create_xol_area(0);
area = READ_ONCE(mm->uprobes_state.xol_area);
return area;
}
void __weak arch_uprobe_clear_state(struct mm_struct *mm)
{
}
void __weak arch_uprobe_init_state(struct mm_struct *mm)
{
}
void uprobe_clear_state(struct mm_struct *mm)
{
struct xol_area *area = mm->uprobes_state.xol_area;
mutex_lock(&delayed_uprobe_lock);
delayed_uprobe_remove(NULL, mm);
mutex_unlock(&delayed_uprobe_lock);
arch_uprobe_clear_state(mm);
if (!area)
return;
put_page(area->page);
kfree(area->bitmap);
kfree(area);
}
void uprobe_start_dup_mmap(void)
{
percpu_down_read(&dup_mmap_sem);
}
void uprobe_end_dup_mmap(void)
{
percpu_up_read(&dup_mmap_sem);
}
void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm)
{
if (mm_flags_test(MMF_HAS_UPROBES, oldmm)) {
mm_flags_set(MMF_HAS_UPROBES, newmm);
mm_flags_set(MMF_RECALC_UPROBES, newmm);
}
}
static unsigned long xol_get_slot_nr(struct xol_area *area)
{
unsigned long slot_nr;
slot_nr = find_first_zero_bit(area->bitmap, UINSNS_PER_PAGE);
if (slot_nr < UINSNS_PER_PAGE) {
if (!test_and_set_bit(slot_nr, area->bitmap))
return slot_nr;
}
return UINSNS_PER_PAGE;
}
static bool xol_get_insn_slot(struct uprobe *uprobe, struct uprobe_task *utask)
{
struct xol_area *area = get_xol_area();
unsigned long slot_nr;
if (!area)
return false;
wait_event(area->wq, (slot_nr = xol_get_slot_nr(area)) < UINSNS_PER_PAGE);
utask->xol_vaddr = area->vaddr + slot_nr * UPROBE_XOL_SLOT_BYTES;
arch_uprobe_copy_ixol(area->page, utask->xol_vaddr,
&uprobe->arch.ixol, sizeof(uprobe->arch.ixol));
return true;
}
static void xol_free_insn_slot(struct uprobe_task *utask)
{
struct xol_area *area = current->mm->uprobes_state.xol_area;
unsigned long offset = utask->xol_vaddr - area->vaddr;
unsigned int slot_nr;
utask->xol_vaddr = 0;
if (WARN_ON_ONCE(offset >= PAGE_SIZE))
return;
slot_nr = offset / UPROBE_XOL_SLOT_BYTES;
clear_bit(slot_nr, area->bitmap);
smp_mb__after_atomic();
if (waitqueue_active(&area->wq))
wake_up(&area->wq);
}
void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
void *src, unsigned long len)
{
copy_to_page(page, vaddr, src, len);
flush_dcache_page(page);
}
unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs)
{
return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE;
}
unsigned long uprobe_get_trap_addr(struct pt_regs *regs)
{
struct uprobe_task *utask = current->utask;
if (unlikely(utask && utask->active_uprobe))
return utask->vaddr;
return instruction_pointer(regs);
}
static void ri_pool_push(struct uprobe_task *utask, struct return_instance *ri)
{
ri->cons_cnt = 0;
ri->next = utask->ri_pool;
utask->ri_pool = ri;
}
static struct return_instance *ri_pool_pop(struct uprobe_task *utask)
{
struct return_instance *ri = utask->ri_pool;
if (likely(ri))
utask->ri_pool = ri->next;
return ri;
}
static void ri_free(struct return_instance *ri)
{
kfree(ri->extra_consumers);
kfree_rcu(ri, rcu);
}
static void free_ret_instance(struct uprobe_task *utask,
struct return_instance *ri, bool cleanup_hprobe)
{
unsigned seq;
if (cleanup_hprobe) {
enum hprobe_state hstate;
(void)hprobe_consume(&ri->hprobe, &hstate);
hprobe_finalize(&ri->hprobe, hstate);
}
if (raw_seqcount_try_begin(&utask->ri_seqcount, seq)) {
ri_pool_push(utask, ri);
} else {
ri_free(ri);
}
}
void uprobe_free_utask(struct task_struct *t)
{
struct uprobe_task *utask = t->utask;
struct return_instance *ri, *ri_next;
if (!utask)
return;
t->utask = NULL;
WARN_ON_ONCE(utask->active_uprobe || utask->xol_vaddr);
timer_delete_sync(&utask->ri_timer);
ri = utask->return_instances;
while (ri) {
ri_next = ri->next;
free_ret_instance(utask, ri, true );
ri = ri_next;
}
ri = utask->ri_pool;
while (ri) {
ri_next = ri->next;
ri_free(ri);
ri = ri_next;
}
kfree(utask);
}
#define RI_TIMER_PERIOD (HZ / 10)
#define for_each_ret_instance_rcu(pos, head) \
for (pos = rcu_dereference_raw(head); pos; pos = rcu_dereference_raw(pos->next))
static void ri_timer(struct timer_list *timer)
{
struct uprobe_task *utask = container_of(timer, struct uprobe_task, ri_timer);
struct return_instance *ri;
guard(srcu)(&uretprobes_srcu);
guard(rcu)();
raw_write_seqcount_begin(&utask->ri_seqcount);
for_each_ret_instance_rcu(ri, utask->return_instances)
hprobe_expire(&ri->hprobe, false);
raw_write_seqcount_end(&utask->ri_seqcount);
}
static struct uprobe_task *alloc_utask(void)
{
struct uprobe_task *utask;
utask = kzalloc(sizeof(*utask), GFP_KERNEL);
if (!utask)
return NULL;
timer_setup(&utask->ri_timer, ri_timer, 0);
seqcount_init(&utask->ri_seqcount);
return utask;
}
static struct uprobe_task *get_utask(void)
{
if (!current->utask)
current->utask = alloc_utask();
return current->utask;
}
static struct return_instance *alloc_return_instance(struct uprobe_task *utask)
{
struct return_instance *ri;
ri = ri_pool_pop(utask);
if (ri)
return ri;
ri = kzalloc(sizeof(*ri), GFP_KERNEL);
if (!ri)
return ZERO_SIZE_PTR;
return ri;
}
static struct return_instance *dup_return_instance(struct return_instance *old)
{
struct return_instance *ri;
ri = kmemdup(old, sizeof(*ri), GFP_KERNEL);
if (!ri)
return NULL;
if (unlikely(old->cons_cnt > 1)) {
ri->extra_consumers = kmemdup(old->extra_consumers,
sizeof(ri->extra_consumers[0]) * (old->cons_cnt - 1),
GFP_KERNEL);
if (!ri->extra_consumers) {
kfree(ri);
return NULL;
}
}
return ri;
}
static int dup_utask(struct task_struct *t, struct uprobe_task *o_utask)
{
struct uprobe_task *n_utask;
struct return_instance **p, *o, *n;
struct uprobe *uprobe;
n_utask = alloc_utask();
if (!n_utask)
return -ENOMEM;
t->utask = n_utask;
guard(srcu)(&uretprobes_srcu);
p = &n_utask->return_instances;
for (o = o_utask->return_instances; o; o = o->next) {
n = dup_return_instance(o);
if (!n)
return -ENOMEM;
uprobe = hprobe_expire(&o->hprobe, true);
hprobe_init_stable(&n->hprobe, uprobe);
n->next = NULL;
rcu_assign_pointer(*p, n);
p = &n->next;
n_utask->depth++;
}
return 0;
}
static void dup_xol_work(struct callback_head *work)
{
if (current->flags & PF_EXITING)
return;
if (!__create_xol_area(current->utask->dup_xol_addr) &&
!fatal_signal_pending(current))
uprobe_warn(current, "dup xol area");
}
void uprobe_copy_process(struct task_struct *t, u64 flags)
{
struct uprobe_task *utask = current->utask;
struct mm_struct *mm = current->mm;
struct xol_area *area;
t->utask = NULL;
if (!utask || !utask->return_instances)
return;
if (mm == t->mm && !(flags & CLONE_VFORK))
return;
if (dup_utask(t, utask))
return uprobe_warn(t, "dup ret instances");
area = mm->uprobes_state.xol_area;
if (!area)
return uprobe_warn(t, "dup xol area");
if (mm == t->mm)
return;
t->utask->dup_xol_addr = area->vaddr;
init_task_work(&t->utask->dup_xol_work, dup_xol_work);
task_work_add(t, &t->utask->dup_xol_work, TWA_RESUME);
}
unsigned long uprobe_get_trampoline_vaddr(void)
{
unsigned long trampoline_vaddr = UPROBE_NO_TRAMPOLINE_VADDR;
struct xol_area *area;
area = READ_ONCE(current->mm->uprobes_state.xol_area);
if (area)
trampoline_vaddr = area->vaddr;
return trampoline_vaddr;
}
static void cleanup_return_instances(struct uprobe_task *utask, bool chained,
struct pt_regs *regs)
{
struct return_instance *ri = utask->return_instances, *ri_next;
enum rp_check ctx = chained ? RP_CHECK_CHAIN_CALL : RP_CHECK_CALL;
while (ri && !arch_uretprobe_is_alive(ri, ctx, regs)) {
ri_next = ri->next;
rcu_assign_pointer(utask->return_instances, ri_next);
utask->depth--;
free_ret_instance(utask, ri, true );
ri = ri_next;
}
}
static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs,
struct return_instance *ri)
{
struct uprobe_task *utask = current->utask;
unsigned long orig_ret_vaddr, trampoline_vaddr;
bool chained;
int srcu_idx;
if (!get_xol_area())
goto free;
if (utask->depth >= MAX_URETPROBE_DEPTH) {
printk_ratelimited(KERN_INFO "uprobe: omit uretprobe due to"
" nestedness limit pid/tgid=%d/%d\n",
current->pid, current->tgid);
goto free;
}
trampoline_vaddr = uprobe_get_trampoline_vaddr();
orig_ret_vaddr = arch_uretprobe_hijack_return_addr(trampoline_vaddr, regs);
if (orig_ret_vaddr == -1)
goto free;
chained = (orig_ret_vaddr == trampoline_vaddr);
cleanup_return_instances(utask, chained, regs);
if (chained) {
if (!utask->return_instances) {
uprobe_warn(current, "handle tail call");
goto free;
}
orig_ret_vaddr = utask->return_instances->orig_ret_vaddr;
}
srcu_idx = __srcu_read_lock(&uretprobes_srcu);
ri->func = instruction_pointer(regs);
ri->stack = user_stack_pointer(regs);
ri->orig_ret_vaddr = orig_ret_vaddr;
ri->chained = chained;
utask->depth++;
hprobe_init_leased(&ri->hprobe, uprobe, srcu_idx);
ri->next = utask->return_instances;
rcu_assign_pointer(utask->return_instances, ri);
mod_timer(&utask->ri_timer, jiffies + RI_TIMER_PERIOD);
return;
free:
ri_free(ri);
}
static int
pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr)
{
struct uprobe_task *utask = current->utask;
int err;
if (!try_get_uprobe(uprobe))
return -EINVAL;
if (!xol_get_insn_slot(uprobe, utask)) {
err = -ENOMEM;
goto err_out;
}
utask->vaddr = bp_vaddr;
err = arch_uprobe_pre_xol(&uprobe->arch, regs);
if (unlikely(err)) {
xol_free_insn_slot(utask);
goto err_out;
}
utask->active_uprobe = uprobe;
utask->state = UTASK_SSTEP;
return 0;
err_out:
put_uprobe(uprobe);
return err;
}
bool uprobe_deny_signal(void)
{
struct task_struct *t = current;
struct uprobe_task *utask = t->utask;
if (likely(!utask || !utask->active_uprobe))
return false;
WARN_ON_ONCE(utask->state != UTASK_SSTEP);
if (task_sigpending(t)) {
utask->signal_denied = true;
clear_tsk_thread_flag(t, TIF_SIGPENDING);
if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) {
utask->state = UTASK_SSTEP_TRAPPED;
set_tsk_thread_flag(t, TIF_UPROBE);
}
}
return true;
}
static void mmf_recalc_uprobes(struct mm_struct *mm)
{
VMA_ITERATOR(vmi, mm, 0);
struct vm_area_struct *vma;
for_each_vma(vmi, vma) {
if (!valid_vma(vma, false))
continue;
if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end))
return;
}
mm_flags_clear(MMF_HAS_UPROBES, mm);
}
static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
{
struct page *page;
uprobe_opcode_t opcode;
int result;
if (WARN_ON_ONCE(!IS_ALIGNED(vaddr, UPROBE_SWBP_INSN_SIZE)))
return -EINVAL;
pagefault_disable();
result = __get_user(opcode, (uprobe_opcode_t __user *)vaddr);
pagefault_enable();
if (likely(result == 0))
goto out;
result = get_user_pages(vaddr, 1, FOLL_FORCE, &page);
if (result < 0)
return result;
uprobe_copy_from_page(page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
put_page(page);
out:
return is_trap_insn(&opcode);
}
static struct uprobe *find_active_uprobe_speculative(unsigned long bp_vaddr)
{
struct mm_struct *mm = current->mm;
struct uprobe *uprobe = NULL;
struct vm_area_struct *vma;
struct file *vm_file;
loff_t offset;
unsigned int seq;
guard(rcu)();
if (!mmap_lock_speculate_try_begin(mm, &seq))
return NULL;
vma = vma_lookup(mm, bp_vaddr);
if (!vma)
return NULL;
vm_file = READ_ONCE(vma->vm_file);
if (!vm_file)
return NULL;
offset = (loff_t)(vma->vm_pgoff << PAGE_SHIFT) + (bp_vaddr - vma->vm_start);
uprobe = find_uprobe_rcu(vm_file->f_inode, offset);
if (!uprobe)
return NULL;
if (mmap_lock_speculate_retry(mm, seq))
return NULL;
return uprobe;
}
static struct uprobe *find_active_uprobe_rcu(unsigned long bp_vaddr, int *is_swbp)
{
struct mm_struct *mm = current->mm;
struct uprobe *uprobe = NULL;
struct vm_area_struct *vma;
uprobe = find_active_uprobe_speculative(bp_vaddr);
if (uprobe)
return uprobe;
mmap_read_lock(mm);
vma = vma_lookup(mm, bp_vaddr);
if (vma) {
if (vma->vm_file) {
struct inode *inode = file_inode(vma->vm_file);
loff_t offset = vaddr_to_offset(vma, bp_vaddr);
uprobe = find_uprobe_rcu(inode, offset);
}
if (!uprobe)
*is_swbp = is_trap_at_addr(mm, bp_vaddr);
} else {
*is_swbp = -EFAULT;
}
if (!uprobe && mm_flags_test_and_clear(MMF_RECALC_UPROBES, mm))
mmf_recalc_uprobes(mm);
mmap_read_unlock(mm);
return uprobe;
}
static struct return_instance *push_consumer(struct return_instance *ri, __u64 id, __u64 cookie)
{
struct return_consumer *ric;
if (unlikely(ri == ZERO_SIZE_PTR))
return ri;
if (unlikely(ri->cons_cnt > 0)) {
ric = krealloc(ri->extra_consumers, sizeof(*ric) * ri->cons_cnt, GFP_KERNEL);
if (!ric) {
ri_free(ri);
return ZERO_SIZE_PTR;
}
ri->extra_consumers = ric;
}
ric = likely(ri->cons_cnt == 0) ? &ri->consumer : &ri->extra_consumers[ri->cons_cnt - 1];
ric->id = id;
ric->cookie = cookie;
ri->cons_cnt++;
return ri;
}
static struct return_consumer *
return_consumer_find(struct return_instance *ri, int *iter, int id)
{
struct return_consumer *ric;
int idx;
for (idx = *iter; idx < ri->cons_cnt; idx++)
{
ric = likely(idx == 0) ? &ri->consumer : &ri->extra_consumers[idx - 1];
if (ric->id == id) {
*iter = idx + 1;
return ric;
}
}
return NULL;
}
static bool ignore_ret_handler(int rc)
{
return rc == UPROBE_HANDLER_REMOVE || rc == UPROBE_HANDLER_IGNORE;
}
static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
{
struct uprobe_consumer *uc;
bool has_consumers = false, remove = true;
struct return_instance *ri = NULL;
struct uprobe_task *utask = current->utask;
utask->auprobe = &uprobe->arch;
list_for_each_entry_rcu(uc, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) {
bool session = uc->handler && uc->ret_handler;
__u64 cookie = 0;
int rc = 0;
if (uc->handler) {
rc = uc->handler(uc, regs, &cookie);
WARN(rc < 0 || rc > 2,
"bad rc=0x%x from %ps()\n", rc, uc->handler);
}
remove &= rc == UPROBE_HANDLER_REMOVE;
has_consumers = true;
if (!uc->ret_handler || ignore_ret_handler(rc))
continue;
if (!ri)
ri = alloc_return_instance(utask);
if (session)
ri = push_consumer(ri, uc->id, cookie);
}
utask->auprobe = NULL;
if (!ZERO_OR_NULL_PTR(ri))
prepare_uretprobe(uprobe, regs, ri);
if (remove && has_consumers) {
down_read(&uprobe->register_rwsem);
if (!filter_chain(uprobe, current->mm)) {
WARN_ON(!uprobe_is_active(uprobe));
unapply_uprobe(uprobe, current->mm);
}
up_read(&uprobe->register_rwsem);
}
}
static void
handle_uretprobe_chain(struct return_instance *ri, struct uprobe *uprobe, struct pt_regs *regs)
{
struct return_consumer *ric;
struct uprobe_consumer *uc;
int ric_idx = 0;
if (unlikely(!uprobe))
return;
rcu_read_lock_trace();
list_for_each_entry_rcu(uc, &uprobe->consumers, cons_node, rcu_read_lock_trace_held()) {
bool session = uc->handler && uc->ret_handler;
if (uc->ret_handler) {
ric = return_consumer_find(ri, &ric_idx, uc->id);
if (!session || ric)
uc->ret_handler(uc, ri->func, regs, ric ? &ric->cookie : NULL);
}
}
rcu_read_unlock_trace();
}
static struct return_instance *find_next_ret_chain(struct return_instance *ri)
{
bool chained;
do {
chained = ri->chained;
ri = ri->next;
} while (chained);
return ri;
}
void uprobe_handle_trampoline(struct pt_regs *regs)
{
struct uprobe_task *utask;
struct return_instance *ri, *ri_next, *next_chain;
struct uprobe *uprobe;
enum hprobe_state hstate;
bool valid;
utask = current->utask;
if (!utask)
goto sigill;
ri = utask->return_instances;
if (!ri)
goto sigill;
do {
next_chain = find_next_ret_chain(ri);
valid = !next_chain || arch_uretprobe_is_alive(next_chain, RP_CHECK_RET, regs);
instruction_pointer_set(regs, ri->orig_ret_vaddr);
do {
ri_next = ri->next;
rcu_assign_pointer(utask->return_instances, ri_next);
utask->depth--;
uprobe = hprobe_consume(&ri->hprobe, &hstate);
if (valid)
handle_uretprobe_chain(ri, uprobe, regs);
hprobe_finalize(&ri->hprobe, hstate);
free_ret_instance(utask, ri, false );
ri = ri_next;
} while (ri != next_chain);
} while (!valid);
return;
sigill:
uprobe_warn(current, "handle uretprobe, sending SIGILL.");
force_sig(SIGILL);
}
bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs)
{
return false;
}
bool __weak arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
struct pt_regs *regs)
{
return true;
}
void __weak arch_uprobe_optimize(struct arch_uprobe *auprobe, unsigned long vaddr)
{
}
static void handle_swbp(struct pt_regs *regs)
{
struct uprobe *uprobe;
unsigned long bp_vaddr;
int is_swbp;
bp_vaddr = uprobe_get_swbp_addr(regs);
if (bp_vaddr == uprobe_get_trampoline_vaddr())
return uprobe_handle_trampoline(regs);
rcu_read_lock_trace();
uprobe = find_active_uprobe_rcu(bp_vaddr, &is_swbp);
if (!uprobe) {
if (is_swbp > 0) {
force_sig(SIGTRAP);
} else {
instruction_pointer_set(regs, bp_vaddr);
}
goto out;
}
instruction_pointer_set(regs, bp_vaddr);
if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags)))
goto out;
smp_rmb();
if (!get_utask())
goto out;
if (arch_uprobe_ignore(&uprobe->arch, regs))
goto out;
handler_chain(uprobe, regs);
if (instruction_pointer(regs) != bp_vaddr)
goto out;
arch_uprobe_optimize(&uprobe->arch, bp_vaddr);
if (arch_uprobe_skip_sstep(&uprobe->arch, regs))
goto out;
if (pre_ssout(uprobe, regs, bp_vaddr))
goto out;
out:
rcu_read_unlock_trace();
}
void handle_syscall_uprobe(struct pt_regs *regs, unsigned long bp_vaddr)
{
struct uprobe *uprobe;
int is_swbp;
guard(rcu_tasks_trace)();
uprobe = find_active_uprobe_rcu(bp_vaddr, &is_swbp);
if (!uprobe)
return;
if (!get_utask())
return;
if (arch_uprobe_ignore(&uprobe->arch, regs))
return;
handler_chain(uprobe, regs);
}
static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs)
{
struct uprobe *uprobe;
int err = 0;
uprobe = utask->active_uprobe;
if (utask->state == UTASK_SSTEP_ACK)
err = arch_uprobe_post_xol(&uprobe->arch, regs);
else if (utask->state == UTASK_SSTEP_TRAPPED)
arch_uprobe_abort_xol(&uprobe->arch, regs);
else
WARN_ON_ONCE(1);
put_uprobe(uprobe);
utask->active_uprobe = NULL;
utask->state = UTASK_RUNNING;
xol_free_insn_slot(utask);
if (utask->signal_denied) {
set_thread_flag(TIF_SIGPENDING);
utask->signal_denied = false;
}
if (unlikely(err)) {
uprobe_warn(current, "execute the probed insn, sending SIGILL.");
force_sig(SIGILL);
}
}
void uprobe_notify_resume(struct pt_regs *regs)
{
struct uprobe_task *utask;
clear_thread_flag(TIF_UPROBE);
utask = current->utask;
if (utask && utask->active_uprobe)
handle_singlestep(utask, regs);
else
handle_swbp(regs);
}
int uprobe_pre_sstep_notifier(struct pt_regs *regs)
{
if (!current->mm)
return 0;
if (!mm_flags_test(MMF_HAS_UPROBES, current->mm) &&
(!current->utask || !current->utask->return_instances))
return 0;
set_thread_flag(TIF_UPROBE);
return 1;
}
int uprobe_post_sstep_notifier(struct pt_regs *regs)
{
struct uprobe_task *utask = current->utask;
if (!current->mm || !utask || !utask->active_uprobe)
return 0;
utask->state = UTASK_SSTEP_ACK;
set_thread_flag(TIF_UPROBE);
return 1;
}
static struct notifier_block uprobe_exception_nb = {
.notifier_call = arch_uprobe_exception_notify,
.priority = INT_MAX-1,
};
void __init uprobes_init(void)
{
int i;
for (i = 0; i < UPROBES_HASH_SZ; i++)
mutex_init(&uprobes_mmap_mutex[i]);
BUG_ON(register_die_notifier(&uprobe_exception_nb));
}