// SPDX-License-Identifier: GPL-2.0-only1/*2* linux/mm/filemap.c3*4* Copyright (C) 1994-1999 Linus Torvalds5*/67/*8* This file handles the generic file mmap semantics used by9* most "normal" filesystems (but you don't /have/ to use this:10* the NFS filesystem used to do this differently, for example)11*/12#include <linux/export.h>13#include <linux/compiler.h>14#include <linux/dax.h>15#include <linux/fs.h>16#include <linux/sched/signal.h>17#include <linux/uaccess.h>18#include <linux/capability.h>19#include <linux/kernel_stat.h>20#include <linux/gfp.h>21#include <linux/mm.h>22#include <linux/swap.h>23#include <linux/swapops.h>24#include <linux/syscalls.h>25#include <linux/mman.h>26#include <linux/pagemap.h>27#include <linux/file.h>28#include <linux/uio.h>29#include <linux/error-injection.h>30#include <linux/hash.h>31#include <linux/writeback.h>32#include <linux/backing-dev.h>33#include <linux/pagevec.h>34#include <linux/security.h>35#include <linux/cpuset.h>36#include <linux/hugetlb.h>37#include <linux/memcontrol.h>38#include <linux/shmem_fs.h>39#include <linux/rmap.h>40#include <linux/delayacct.h>41#include <linux/psi.h>42#include <linux/ramfs.h>43#include <linux/page_idle.h>44#include <linux/migrate.h>45#include <linux/pipe_fs_i.h>46#include <linux/splice.h>47#include <linux/rcupdate_wait.h>48#include <linux/sched/mm.h>49#include <linux/sysctl.h>50#include <asm/pgalloc.h>51#include <asm/tlbflush.h>52#include "internal.h"5354#define CREATE_TRACE_POINTS55#include <trace/events/filemap.h>5657/*58* FIXME: remove all knowledge of the buffer layer from the core VM59*/60#include <linux/buffer_head.h> /* for try_to_free_buffers */6162#include <asm/mman.h>6364#include "swap.h"6566/*67* Shared mappings implemented 30.11.1994. It's not fully working yet,68* though.69*70* Shared mappings now work. 15.8.1995 Bruno.71*72* finished 'unifying' the page and buffer cache and SMP-threaded the73* page-cache, 21.05.1999, Ingo Molnar <[email protected]>74*75* SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <[email protected]>76*/7778/*79* Lock ordering:80*81* ->i_mmap_rwsem (truncate_pagecache)82* ->private_lock (__free_pte->block_dirty_folio)83* ->swap_lock (exclusive_swap_page, others)84* ->i_pages lock85*86* ->i_rwsem87* ->invalidate_lock (acquired by fs in truncate path)88* ->i_mmap_rwsem (truncate->unmap_mapping_range)89*90* ->mmap_lock91* ->i_mmap_rwsem92* ->page_table_lock or pte_lock (various, mainly in memory.c)93* ->i_pages lock (arch-dependent flush_dcache_mmap_lock)94*95* ->mmap_lock96* ->invalidate_lock (filemap_fault)97* ->lock_page (filemap_fault, access_process_vm)98*99* ->i_rwsem (generic_perform_write)100* ->mmap_lock (fault_in_readable->do_page_fault)101*102* bdi->wb.list_lock103* sb_lock (fs/fs-writeback.c)104* ->i_pages lock (__sync_single_inode)105*106* ->i_mmap_rwsem107* ->anon_vma.lock (vma_merge)108*109* ->anon_vma.lock110* ->page_table_lock or pte_lock (anon_vma_prepare and various)111*112* ->page_table_lock or pte_lock113* ->swap_lock (try_to_unmap_one)114* ->private_lock (try_to_unmap_one)115* ->i_pages lock (try_to_unmap_one)116* ->lruvec->lru_lock (follow_page_mask->mark_page_accessed)117* ->lruvec->lru_lock (check_pte_range->folio_isolate_lru)118* ->private_lock (folio_remove_rmap_pte->set_page_dirty)119* ->i_pages lock (folio_remove_rmap_pte->set_page_dirty)120* bdi.wb->list_lock (folio_remove_rmap_pte->set_page_dirty)121* ->inode->i_lock (folio_remove_rmap_pte->set_page_dirty)122* bdi.wb->list_lock (zap_pte_range->set_page_dirty)123* ->inode->i_lock (zap_pte_range->set_page_dirty)124* ->private_lock (zap_pte_range->block_dirty_folio)125*/126127static void page_cache_delete(struct address_space *mapping,128struct folio *folio, void *shadow)129{130XA_STATE(xas, &mapping->i_pages, folio->index);131long nr = 1;132133mapping_set_update(&xas, mapping);134135xas_set_order(&xas, folio->index, folio_order(folio));136nr = folio_nr_pages(folio);137138VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);139140xas_store(&xas, shadow);141xas_init_marks(&xas);142143folio->mapping = NULL;144/* Leave folio->index set: truncation lookup relies upon it */145mapping->nrpages -= nr;146}147148static void filemap_unaccount_folio(struct address_space *mapping,149struct folio *folio)150{151long nr;152153VM_BUG_ON_FOLIO(folio_mapped(folio), folio);154if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(folio_mapped(folio))) {155pr_alert("BUG: Bad page cache in process %s pfn:%05lx\n",156current->comm, folio_pfn(folio));157dump_page(&folio->page, "still mapped when deleted");158dump_stack();159add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);160161if (mapping_exiting(mapping) && !folio_test_large(folio)) {162int mapcount = folio_mapcount(folio);163164if (folio_ref_count(folio) >= mapcount + 2) {165/*166* All vmas have already been torn down, so it's167* a good bet that actually the page is unmapped168* and we'd rather not leak it: if we're wrong,169* another bad page check should catch it later.170*/171atomic_set(&folio->_mapcount, -1);172folio_ref_sub(folio, mapcount);173}174}175}176177/* hugetlb folios do not participate in page cache accounting. */178if (folio_test_hugetlb(folio))179return;180181nr = folio_nr_pages(folio);182183__lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr);184if (folio_test_swapbacked(folio)) {185__lruvec_stat_mod_folio(folio, NR_SHMEM, -nr);186if (folio_test_pmd_mappable(folio))187__lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, -nr);188} else if (folio_test_pmd_mappable(folio)) {189__lruvec_stat_mod_folio(folio, NR_FILE_THPS, -nr);190filemap_nr_thps_dec(mapping);191}192if (test_bit(AS_KERNEL_FILE, &folio->mapping->flags))193mod_node_page_state(folio_pgdat(folio),194NR_KERNEL_FILE_PAGES, -nr);195196/*197* At this point folio must be either written or cleaned by198* truncate. Dirty folio here signals a bug and loss of199* unwritten data - on ordinary filesystems.200*201* But it's harmless on in-memory filesystems like tmpfs; and can202* occur when a driver which did get_user_pages() sets page dirty203* before putting it, while the inode is being finally evicted.204*205* Below fixes dirty accounting after removing the folio entirely206* but leaves the dirty flag set: it has no effect for truncated207* folio and anyway will be cleared before returning folio to208* buddy allocator.209*/210if (WARN_ON_ONCE(folio_test_dirty(folio) &&211mapping_can_writeback(mapping)))212folio_account_cleaned(folio, inode_to_wb(mapping->host));213}214215/*216* Delete a page from the page cache and free it. Caller has to make217* sure the page is locked and that nobody else uses it - or that usage218* is safe. The caller must hold the i_pages lock.219*/220void __filemap_remove_folio(struct folio *folio, void *shadow)221{222struct address_space *mapping = folio->mapping;223224trace_mm_filemap_delete_from_page_cache(folio);225filemap_unaccount_folio(mapping, folio);226page_cache_delete(mapping, folio, shadow);227}228229void filemap_free_folio(struct address_space *mapping, struct folio *folio)230{231void (*free_folio)(struct folio *);232233free_folio = mapping->a_ops->free_folio;234if (free_folio)235free_folio(folio);236237folio_put_refs(folio, folio_nr_pages(folio));238}239240/**241* filemap_remove_folio - Remove folio from page cache.242* @folio: The folio.243*244* This must be called only on folios that are locked and have been245* verified to be in the page cache. It will never put the folio into246* the free list because the caller has a reference on the page.247*/248void filemap_remove_folio(struct folio *folio)249{250struct address_space *mapping = folio->mapping;251252BUG_ON(!folio_test_locked(folio));253spin_lock(&mapping->host->i_lock);254xa_lock_irq(&mapping->i_pages);255__filemap_remove_folio(folio, NULL);256xa_unlock_irq(&mapping->i_pages);257if (mapping_shrinkable(mapping))258inode_add_lru(mapping->host);259spin_unlock(&mapping->host->i_lock);260261filemap_free_folio(mapping, folio);262}263264/*265* page_cache_delete_batch - delete several folios from page cache266* @mapping: the mapping to which folios belong267* @fbatch: batch of folios to delete268*269* The function walks over mapping->i_pages and removes folios passed in270* @fbatch from the mapping. The function expects @fbatch to be sorted271* by page index and is optimised for it to be dense.272* It tolerates holes in @fbatch (mapping entries at those indices are not273* modified).274*275* The function expects the i_pages lock to be held.276*/277static void page_cache_delete_batch(struct address_space *mapping,278struct folio_batch *fbatch)279{280XA_STATE(xas, &mapping->i_pages, fbatch->folios[0]->index);281long total_pages = 0;282int i = 0;283struct folio *folio;284285mapping_set_update(&xas, mapping);286xas_for_each(&xas, folio, ULONG_MAX) {287if (i >= folio_batch_count(fbatch))288break;289290/* A swap/dax/shadow entry got inserted? Skip it. */291if (xa_is_value(folio))292continue;293/*294* A page got inserted in our range? Skip it. We have our295* pages locked so they are protected from being removed.296* If we see a page whose index is higher than ours, it297* means our page has been removed, which shouldn't be298* possible because we're holding the PageLock.299*/300if (folio != fbatch->folios[i]) {301VM_BUG_ON_FOLIO(folio->index >302fbatch->folios[i]->index, folio);303continue;304}305306WARN_ON_ONCE(!folio_test_locked(folio));307308folio->mapping = NULL;309/* Leave folio->index set: truncation lookup relies on it */310311i++;312xas_store(&xas, NULL);313total_pages += folio_nr_pages(folio);314}315mapping->nrpages -= total_pages;316}317318void delete_from_page_cache_batch(struct address_space *mapping,319struct folio_batch *fbatch)320{321int i;322323if (!folio_batch_count(fbatch))324return;325326spin_lock(&mapping->host->i_lock);327xa_lock_irq(&mapping->i_pages);328for (i = 0; i < folio_batch_count(fbatch); i++) {329struct folio *folio = fbatch->folios[i];330331trace_mm_filemap_delete_from_page_cache(folio);332filemap_unaccount_folio(mapping, folio);333}334page_cache_delete_batch(mapping, fbatch);335xa_unlock_irq(&mapping->i_pages);336if (mapping_shrinkable(mapping))337inode_add_lru(mapping->host);338spin_unlock(&mapping->host->i_lock);339340for (i = 0; i < folio_batch_count(fbatch); i++)341filemap_free_folio(mapping, fbatch->folios[i]);342}343344int filemap_check_errors(struct address_space *mapping)345{346int ret = 0;347/* Check for outstanding write errors */348if (test_bit(AS_ENOSPC, &mapping->flags) &&349test_and_clear_bit(AS_ENOSPC, &mapping->flags))350ret = -ENOSPC;351if (test_bit(AS_EIO, &mapping->flags) &&352test_and_clear_bit(AS_EIO, &mapping->flags))353ret = -EIO;354return ret;355}356EXPORT_SYMBOL(filemap_check_errors);357358static int filemap_check_and_keep_errors(struct address_space *mapping)359{360/* Check for outstanding write errors */361if (test_bit(AS_EIO, &mapping->flags))362return -EIO;363if (test_bit(AS_ENOSPC, &mapping->flags))364return -ENOSPC;365return 0;366}367368/**369* filemap_fdatawrite_wbc - start writeback on mapping dirty pages in range370* @mapping: address space structure to write371* @wbc: the writeback_control controlling the writeout372*373* Call writepages on the mapping using the provided wbc to control the374* writeout.375*376* Return: %0 on success, negative error code otherwise.377*/378int filemap_fdatawrite_wbc(struct address_space *mapping,379struct writeback_control *wbc)380{381int ret;382383if (!mapping_can_writeback(mapping) ||384!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))385return 0;386387wbc_attach_fdatawrite_inode(wbc, mapping->host);388ret = do_writepages(mapping, wbc);389wbc_detach_inode(wbc);390return ret;391}392EXPORT_SYMBOL(filemap_fdatawrite_wbc);393394/**395* __filemap_fdatawrite_range - start writeback on mapping dirty pages in range396* @mapping: address space structure to write397* @start: offset in bytes where the range starts398* @end: offset in bytes where the range ends (inclusive)399* @sync_mode: enable synchronous operation400*401* Start writeback against all of a mapping's dirty pages that lie402* within the byte offsets <start, end> inclusive.403*404* If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as405* opposed to a regular memory cleansing writeback. The difference between406* these two operations is that if a dirty page/buffer is encountered, it must407* be waited upon, and not just skipped over.408*409* Return: %0 on success, negative error code otherwise.410*/411int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,412loff_t end, int sync_mode)413{414struct writeback_control wbc = {415.sync_mode = sync_mode,416.nr_to_write = LONG_MAX,417.range_start = start,418.range_end = end,419};420421return filemap_fdatawrite_wbc(mapping, &wbc);422}423424static inline int __filemap_fdatawrite(struct address_space *mapping,425int sync_mode)426{427return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode);428}429430int filemap_fdatawrite(struct address_space *mapping)431{432return __filemap_fdatawrite(mapping, WB_SYNC_ALL);433}434EXPORT_SYMBOL(filemap_fdatawrite);435436int filemap_fdatawrite_range(struct address_space *mapping, loff_t start,437loff_t end)438{439return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL);440}441EXPORT_SYMBOL(filemap_fdatawrite_range);442443/**444* filemap_fdatawrite_range_kick - start writeback on a range445* @mapping: target address_space446* @start: index to start writeback on447* @end: last (inclusive) index for writeback448*449* This is a non-integrity writeback helper, to start writing back folios450* for the indicated range.451*452* Return: %0 on success, negative error code otherwise.453*/454int filemap_fdatawrite_range_kick(struct address_space *mapping, loff_t start,455loff_t end)456{457return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_NONE);458}459EXPORT_SYMBOL_GPL(filemap_fdatawrite_range_kick);460461/**462* filemap_flush - mostly a non-blocking flush463* @mapping: target address_space464*465* This is a mostly non-blocking flush. Not suitable for data-integrity466* purposes - I/O may not be started against all dirty pages.467*468* Return: %0 on success, negative error code otherwise.469*/470int filemap_flush(struct address_space *mapping)471{472return __filemap_fdatawrite(mapping, WB_SYNC_NONE);473}474EXPORT_SYMBOL(filemap_flush);475476/**477* filemap_range_has_page - check if a page exists in range.478* @mapping: address space within which to check479* @start_byte: offset in bytes where the range starts480* @end_byte: offset in bytes where the range ends (inclusive)481*482* Find at least one page in the range supplied, usually used to check if483* direct writing in this range will trigger a writeback.484*485* Return: %true if at least one page exists in the specified range,486* %false otherwise.487*/488bool filemap_range_has_page(struct address_space *mapping,489loff_t start_byte, loff_t end_byte)490{491struct folio *folio;492XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT);493pgoff_t max = end_byte >> PAGE_SHIFT;494495if (end_byte < start_byte)496return false;497498rcu_read_lock();499for (;;) {500folio = xas_find(&xas, max);501if (xas_retry(&xas, folio))502continue;503/* Shadow entries don't count */504if (xa_is_value(folio))505continue;506/*507* We don't need to try to pin this page; we're about to508* release the RCU lock anyway. It is enough to know that509* there was a page here recently.510*/511break;512}513rcu_read_unlock();514515return folio != NULL;516}517EXPORT_SYMBOL(filemap_range_has_page);518519static void __filemap_fdatawait_range(struct address_space *mapping,520loff_t start_byte, loff_t end_byte)521{522pgoff_t index = start_byte >> PAGE_SHIFT;523pgoff_t end = end_byte >> PAGE_SHIFT;524struct folio_batch fbatch;525unsigned nr_folios;526527folio_batch_init(&fbatch);528529while (index <= end) {530unsigned i;531532nr_folios = filemap_get_folios_tag(mapping, &index, end,533PAGECACHE_TAG_WRITEBACK, &fbatch);534535if (!nr_folios)536break;537538for (i = 0; i < nr_folios; i++) {539struct folio *folio = fbatch.folios[i];540541folio_wait_writeback(folio);542}543folio_batch_release(&fbatch);544cond_resched();545}546}547548/**549* filemap_fdatawait_range - wait for writeback to complete550* @mapping: address space structure to wait for551* @start_byte: offset in bytes where the range starts552* @end_byte: offset in bytes where the range ends (inclusive)553*554* Walk the list of under-writeback pages of the given address space555* in the given range and wait for all of them. Check error status of556* the address space and return it.557*558* Since the error status of the address space is cleared by this function,559* callers are responsible for checking the return value and handling and/or560* reporting the error.561*562* Return: error status of the address space.563*/564int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte,565loff_t end_byte)566{567__filemap_fdatawait_range(mapping, start_byte, end_byte);568return filemap_check_errors(mapping);569}570EXPORT_SYMBOL(filemap_fdatawait_range);571572/**573* filemap_fdatawait_range_keep_errors - wait for writeback to complete574* @mapping: address space structure to wait for575* @start_byte: offset in bytes where the range starts576* @end_byte: offset in bytes where the range ends (inclusive)577*578* Walk the list of under-writeback pages of the given address space in the579* given range and wait for all of them. Unlike filemap_fdatawait_range(),580* this function does not clear error status of the address space.581*582* Use this function if callers don't handle errors themselves. Expected583* call sites are system-wide / filesystem-wide data flushers: e.g. sync(2),584* fsfreeze(8)585*/586int filemap_fdatawait_range_keep_errors(struct address_space *mapping,587loff_t start_byte, loff_t end_byte)588{589__filemap_fdatawait_range(mapping, start_byte, end_byte);590return filemap_check_and_keep_errors(mapping);591}592EXPORT_SYMBOL(filemap_fdatawait_range_keep_errors);593594/**595* file_fdatawait_range - wait for writeback to complete596* @file: file pointing to address space structure to wait for597* @start_byte: offset in bytes where the range starts598* @end_byte: offset in bytes where the range ends (inclusive)599*600* Walk the list of under-writeback pages of the address space that file601* refers to, in the given range and wait for all of them. Check error602* status of the address space vs. the file->f_wb_err cursor and return it.603*604* Since the error status of the file is advanced by this function,605* callers are responsible for checking the return value and handling and/or606* reporting the error.607*608* Return: error status of the address space vs. the file->f_wb_err cursor.609*/610int file_fdatawait_range(struct file *file, loff_t start_byte, loff_t end_byte)611{612struct address_space *mapping = file->f_mapping;613614__filemap_fdatawait_range(mapping, start_byte, end_byte);615return file_check_and_advance_wb_err(file);616}617EXPORT_SYMBOL(file_fdatawait_range);618619/**620* filemap_fdatawait_keep_errors - wait for writeback without clearing errors621* @mapping: address space structure to wait for622*623* Walk the list of under-writeback pages of the given address space624* and wait for all of them. Unlike filemap_fdatawait(), this function625* does not clear error status of the address space.626*627* Use this function if callers don't handle errors themselves. Expected628* call sites are system-wide / filesystem-wide data flushers: e.g. sync(2),629* fsfreeze(8)630*631* Return: error status of the address space.632*/633int filemap_fdatawait_keep_errors(struct address_space *mapping)634{635__filemap_fdatawait_range(mapping, 0, LLONG_MAX);636return filemap_check_and_keep_errors(mapping);637}638EXPORT_SYMBOL(filemap_fdatawait_keep_errors);639640/* Returns true if writeback might be needed or already in progress. */641static bool mapping_needs_writeback(struct address_space *mapping)642{643return mapping->nrpages;644}645646bool filemap_range_has_writeback(struct address_space *mapping,647loff_t start_byte, loff_t end_byte)648{649XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT);650pgoff_t max = end_byte >> PAGE_SHIFT;651struct folio *folio;652653if (end_byte < start_byte)654return false;655656rcu_read_lock();657xas_for_each(&xas, folio, max) {658if (xas_retry(&xas, folio))659continue;660if (xa_is_value(folio))661continue;662if (folio_test_dirty(folio) || folio_test_locked(folio) ||663folio_test_writeback(folio))664break;665}666rcu_read_unlock();667return folio != NULL;668}669EXPORT_SYMBOL_GPL(filemap_range_has_writeback);670671/**672* filemap_write_and_wait_range - write out & wait on a file range673* @mapping: the address_space for the pages674* @lstart: offset in bytes where the range starts675* @lend: offset in bytes where the range ends (inclusive)676*677* Write out and wait upon file offsets lstart->lend, inclusive.678*679* Note that @lend is inclusive (describes the last byte to be written) so680* that this function can be used to write to the very end-of-file (end = -1).681*682* Return: error status of the address space.683*/684int filemap_write_and_wait_range(struct address_space *mapping,685loff_t lstart, loff_t lend)686{687int err = 0, err2;688689if (lend < lstart)690return 0;691692if (mapping_needs_writeback(mapping)) {693err = __filemap_fdatawrite_range(mapping, lstart, lend,694WB_SYNC_ALL);695/*696* Even if the above returned error, the pages may be697* written partially (e.g. -ENOSPC), so we wait for it.698* But the -EIO is special case, it may indicate the worst699* thing (e.g. bug) happened, so we avoid waiting for it.700*/701if (err != -EIO)702__filemap_fdatawait_range(mapping, lstart, lend);703}704err2 = filemap_check_errors(mapping);705if (!err)706err = err2;707return err;708}709EXPORT_SYMBOL(filemap_write_and_wait_range);710711void __filemap_set_wb_err(struct address_space *mapping, int err)712{713errseq_t eseq = errseq_set(&mapping->wb_err, err);714715trace_filemap_set_wb_err(mapping, eseq);716}717EXPORT_SYMBOL(__filemap_set_wb_err);718719/**720* file_check_and_advance_wb_err - report wb error (if any) that was previously721* and advance wb_err to current one722* @file: struct file on which the error is being reported723*724* When userland calls fsync (or something like nfsd does the equivalent), we725* want to report any writeback errors that occurred since the last fsync (or726* since the file was opened if there haven't been any).727*728* Grab the wb_err from the mapping. If it matches what we have in the file,729* then just quickly return 0. The file is all caught up.730*731* If it doesn't match, then take the mapping value, set the "seen" flag in732* it and try to swap it into place. If it works, or another task beat us733* to it with the new value, then update the f_wb_err and return the error734* portion. The error at this point must be reported via proper channels735* (a'la fsync, or NFS COMMIT operation, etc.).736*737* While we handle mapping->wb_err with atomic operations, the f_wb_err738* value is protected by the f_lock since we must ensure that it reflects739* the latest value swapped in for this file descriptor.740*741* Return: %0 on success, negative error code otherwise.742*/743int file_check_and_advance_wb_err(struct file *file)744{745int err = 0;746errseq_t old = READ_ONCE(file->f_wb_err);747struct address_space *mapping = file->f_mapping;748749/* Locklessly handle the common case where nothing has changed */750if (errseq_check(&mapping->wb_err, old)) {751/* Something changed, must use slow path */752spin_lock(&file->f_lock);753old = file->f_wb_err;754err = errseq_check_and_advance(&mapping->wb_err,755&file->f_wb_err);756trace_file_check_and_advance_wb_err(file, old);757spin_unlock(&file->f_lock);758}759760/*761* We're mostly using this function as a drop in replacement for762* filemap_check_errors. Clear AS_EIO/AS_ENOSPC to emulate the effect763* that the legacy code would have had on these flags.764*/765clear_bit(AS_EIO, &mapping->flags);766clear_bit(AS_ENOSPC, &mapping->flags);767return err;768}769EXPORT_SYMBOL(file_check_and_advance_wb_err);770771/**772* file_write_and_wait_range - write out & wait on a file range773* @file: file pointing to address_space with pages774* @lstart: offset in bytes where the range starts775* @lend: offset in bytes where the range ends (inclusive)776*777* Write out and wait upon file offsets lstart->lend, inclusive.778*779* Note that @lend is inclusive (describes the last byte to be written) so780* that this function can be used to write to the very end-of-file (end = -1).781*782* After writing out and waiting on the data, we check and advance the783* f_wb_err cursor to the latest value, and return any errors detected there.784*785* Return: %0 on success, negative error code otherwise.786*/787int file_write_and_wait_range(struct file *file, loff_t lstart, loff_t lend)788{789int err = 0, err2;790struct address_space *mapping = file->f_mapping;791792if (lend < lstart)793return 0;794795if (mapping_needs_writeback(mapping)) {796err = __filemap_fdatawrite_range(mapping, lstart, lend,797WB_SYNC_ALL);798/* See comment of filemap_write_and_wait() */799if (err != -EIO)800__filemap_fdatawait_range(mapping, lstart, lend);801}802err2 = file_check_and_advance_wb_err(file);803if (!err)804err = err2;805return err;806}807EXPORT_SYMBOL(file_write_and_wait_range);808809/**810* replace_page_cache_folio - replace a pagecache folio with a new one811* @old: folio to be replaced812* @new: folio to replace with813*814* This function replaces a folio in the pagecache with a new one. On815* success it acquires the pagecache reference for the new folio and816* drops it for the old folio. Both the old and new folios must be817* locked. This function does not add the new folio to the LRU, the818* caller must do that.819*820* The remove + add is atomic. This function cannot fail.821*/822void replace_page_cache_folio(struct folio *old, struct folio *new)823{824struct address_space *mapping = old->mapping;825void (*free_folio)(struct folio *) = mapping->a_ops->free_folio;826pgoff_t offset = old->index;827XA_STATE(xas, &mapping->i_pages, offset);828829VM_BUG_ON_FOLIO(!folio_test_locked(old), old);830VM_BUG_ON_FOLIO(!folio_test_locked(new), new);831VM_BUG_ON_FOLIO(new->mapping, new);832833folio_get(new);834new->mapping = mapping;835new->index = offset;836837mem_cgroup_replace_folio(old, new);838839xas_lock_irq(&xas);840xas_store(&xas, new);841842old->mapping = NULL;843/* hugetlb pages do not participate in page cache accounting. */844if (!folio_test_hugetlb(old))845__lruvec_stat_sub_folio(old, NR_FILE_PAGES);846if (!folio_test_hugetlb(new))847__lruvec_stat_add_folio(new, NR_FILE_PAGES);848if (folio_test_swapbacked(old))849__lruvec_stat_sub_folio(old, NR_SHMEM);850if (folio_test_swapbacked(new))851__lruvec_stat_add_folio(new, NR_SHMEM);852xas_unlock_irq(&xas);853if (free_folio)854free_folio(old);855folio_put(old);856}857EXPORT_SYMBOL_GPL(replace_page_cache_folio);858859noinline int __filemap_add_folio(struct address_space *mapping,860struct folio *folio, pgoff_t index, gfp_t gfp, void **shadowp)861{862XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio));863bool huge;864long nr;865unsigned int forder = folio_order(folio);866867VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);868VM_BUG_ON_FOLIO(folio_test_swapbacked(folio), folio);869VM_BUG_ON_FOLIO(folio_order(folio) < mapping_min_folio_order(mapping),870folio);871mapping_set_update(&xas, mapping);872873VM_BUG_ON_FOLIO(index & (folio_nr_pages(folio) - 1), folio);874huge = folio_test_hugetlb(folio);875nr = folio_nr_pages(folio);876877gfp &= GFP_RECLAIM_MASK;878folio_ref_add(folio, nr);879folio->mapping = mapping;880folio->index = xas.xa_index;881882for (;;) {883int order = -1;884void *entry, *old = NULL;885886xas_lock_irq(&xas);887xas_for_each_conflict(&xas, entry) {888old = entry;889if (!xa_is_value(entry)) {890xas_set_err(&xas, -EEXIST);891goto unlock;892}893/*894* If a larger entry exists,895* it will be the first and only entry iterated.896*/897if (order == -1)898order = xas_get_order(&xas);899}900901if (old) {902if (order > 0 && order > forder) {903unsigned int split_order = max(forder,904xas_try_split_min_order(order));905906/* How to handle large swap entries? */907BUG_ON(shmem_mapping(mapping));908909while (order > forder) {910xas_set_order(&xas, index, split_order);911xas_try_split(&xas, old, order);912if (xas_error(&xas))913goto unlock;914order = split_order;915split_order =916max(xas_try_split_min_order(917split_order),918forder);919}920xas_reset(&xas);921}922if (shadowp)923*shadowp = old;924}925926xas_store(&xas, folio);927if (xas_error(&xas))928goto unlock;929930mapping->nrpages += nr;931932/* hugetlb pages do not participate in page cache accounting */933if (!huge) {934__lruvec_stat_mod_folio(folio, NR_FILE_PAGES, nr);935if (folio_test_pmd_mappable(folio))936__lruvec_stat_mod_folio(folio,937NR_FILE_THPS, nr);938}939940unlock:941xas_unlock_irq(&xas);942943if (!xas_nomem(&xas, gfp))944break;945}946947if (xas_error(&xas))948goto error;949950trace_mm_filemap_add_to_page_cache(folio);951return 0;952error:953folio->mapping = NULL;954/* Leave folio->index set: truncation relies upon it */955folio_put_refs(folio, nr);956return xas_error(&xas);957}958ALLOW_ERROR_INJECTION(__filemap_add_folio, ERRNO);959960int filemap_add_folio(struct address_space *mapping, struct folio *folio,961pgoff_t index, gfp_t gfp)962{963void *shadow = NULL;964int ret;965struct mem_cgroup *tmp;966bool kernel_file = test_bit(AS_KERNEL_FILE, &mapping->flags);967968if (kernel_file)969tmp = set_active_memcg(root_mem_cgroup);970ret = mem_cgroup_charge(folio, NULL, gfp);971if (kernel_file)972set_active_memcg(tmp);973if (ret)974return ret;975976__folio_set_locked(folio);977ret = __filemap_add_folio(mapping, folio, index, gfp, &shadow);978if (unlikely(ret)) {979mem_cgroup_uncharge(folio);980__folio_clear_locked(folio);981} else {982/*983* The folio might have been evicted from cache only984* recently, in which case it should be activated like985* any other repeatedly accessed folio.986* The exception is folios getting rewritten; evicting other987* data from the working set, only to cache data that will988* get overwritten with something else, is a waste of memory.989*/990WARN_ON_ONCE(folio_test_active(folio));991if (!(gfp & __GFP_WRITE) && shadow)992workingset_refault(folio, shadow);993folio_add_lru(folio);994if (kernel_file)995mod_node_page_state(folio_pgdat(folio),996NR_KERNEL_FILE_PAGES,997folio_nr_pages(folio));998}999return ret;1000}1001EXPORT_SYMBOL_GPL(filemap_add_folio);10021003#ifdef CONFIG_NUMA1004struct folio *filemap_alloc_folio_noprof(gfp_t gfp, unsigned int order)1005{1006int n;1007struct folio *folio;10081009if (cpuset_do_page_mem_spread()) {1010unsigned int cpuset_mems_cookie;1011do {1012cpuset_mems_cookie = read_mems_allowed_begin();1013n = cpuset_mem_spread_node();1014folio = __folio_alloc_node_noprof(gfp, order, n);1015} while (!folio && read_mems_allowed_retry(cpuset_mems_cookie));10161017return folio;1018}1019return folio_alloc_noprof(gfp, order);1020}1021EXPORT_SYMBOL(filemap_alloc_folio_noprof);1022#endif10231024/*1025* filemap_invalidate_lock_two - lock invalidate_lock for two mappings1026*1027* Lock exclusively invalidate_lock of any passed mapping that is not NULL.1028*1029* @mapping1: the first mapping to lock1030* @mapping2: the second mapping to lock1031*/1032void filemap_invalidate_lock_two(struct address_space *mapping1,1033struct address_space *mapping2)1034{1035if (mapping1 > mapping2)1036swap(mapping1, mapping2);1037if (mapping1)1038down_write(&mapping1->invalidate_lock);1039if (mapping2 && mapping1 != mapping2)1040down_write_nested(&mapping2->invalidate_lock, 1);1041}1042EXPORT_SYMBOL(filemap_invalidate_lock_two);10431044/*1045* filemap_invalidate_unlock_two - unlock invalidate_lock for two mappings1046*1047* Unlock exclusive invalidate_lock of any passed mapping that is not NULL.1048*1049* @mapping1: the first mapping to unlock1050* @mapping2: the second mapping to unlock1051*/1052void filemap_invalidate_unlock_two(struct address_space *mapping1,1053struct address_space *mapping2)1054{1055if (mapping1)1056up_write(&mapping1->invalidate_lock);1057if (mapping2 && mapping1 != mapping2)1058up_write(&mapping2->invalidate_lock);1059}1060EXPORT_SYMBOL(filemap_invalidate_unlock_two);10611062/*1063* In order to wait for pages to become available there must be1064* waitqueues associated with pages. By using a hash table of1065* waitqueues where the bucket discipline is to maintain all1066* waiters on the same queue and wake all when any of the pages1067* become available, and for the woken contexts to check to be1068* sure the appropriate page became available, this saves space1069* at a cost of "thundering herd" phenomena during rare hash1070* collisions.1071*/1072#define PAGE_WAIT_TABLE_BITS 81073#define PAGE_WAIT_TABLE_SIZE (1 << PAGE_WAIT_TABLE_BITS)1074static wait_queue_head_t folio_wait_table[PAGE_WAIT_TABLE_SIZE] __cacheline_aligned;10751076static wait_queue_head_t *folio_waitqueue(struct folio *folio)1077{1078return &folio_wait_table[hash_ptr(folio, PAGE_WAIT_TABLE_BITS)];1079}10801081/* How many times do we accept lock stealing from under a waiter? */1082static int sysctl_page_lock_unfairness = 5;1083static const struct ctl_table filemap_sysctl_table[] = {1084{1085.procname = "page_lock_unfairness",1086.data = &sysctl_page_lock_unfairness,1087.maxlen = sizeof(sysctl_page_lock_unfairness),1088.mode = 0644,1089.proc_handler = proc_dointvec_minmax,1090.extra1 = SYSCTL_ZERO,1091}1092};10931094void __init pagecache_init(void)1095{1096int i;10971098for (i = 0; i < PAGE_WAIT_TABLE_SIZE; i++)1099init_waitqueue_head(&folio_wait_table[i]);11001101page_writeback_init();1102register_sysctl_init("vm", filemap_sysctl_table);1103}11041105/*1106* The page wait code treats the "wait->flags" somewhat unusually, because1107* we have multiple different kinds of waits, not just the usual "exclusive"1108* one.1109*1110* We have:1111*1112* (a) no special bits set:1113*1114* We're just waiting for the bit to be released, and when a waker1115* calls the wakeup function, we set WQ_FLAG_WOKEN and wake it up,1116* and remove it from the wait queue.1117*1118* Simple and straightforward.1119*1120* (b) WQ_FLAG_EXCLUSIVE:1121*1122* The waiter is waiting to get the lock, and only one waiter should1123* be woken up to avoid any thundering herd behavior. We'll set the1124* WQ_FLAG_WOKEN bit, wake it up, and remove it from the wait queue.1125*1126* This is the traditional exclusive wait.1127*1128* (c) WQ_FLAG_EXCLUSIVE | WQ_FLAG_CUSTOM:1129*1130* The waiter is waiting to get the bit, and additionally wants the1131* lock to be transferred to it for fair lock behavior. If the lock1132* cannot be taken, we stop walking the wait queue without waking1133* the waiter.1134*1135* This is the "fair lock handoff" case, and in addition to setting1136* WQ_FLAG_WOKEN, we set WQ_FLAG_DONE to let the waiter easily see1137* that it now has the lock.1138*/1139static int wake_page_function(wait_queue_entry_t *wait, unsigned mode, int sync, void *arg)1140{1141unsigned int flags;1142struct wait_page_key *key = arg;1143struct wait_page_queue *wait_page1144= container_of(wait, struct wait_page_queue, wait);11451146if (!wake_page_match(wait_page, key))1147return 0;11481149/*1150* If it's a lock handoff wait, we get the bit for it, and1151* stop walking (and do not wake it up) if we can't.1152*/1153flags = wait->flags;1154if (flags & WQ_FLAG_EXCLUSIVE) {1155if (test_bit(key->bit_nr, &key->folio->flags.f))1156return -1;1157if (flags & WQ_FLAG_CUSTOM) {1158if (test_and_set_bit(key->bit_nr, &key->folio->flags.f))1159return -1;1160flags |= WQ_FLAG_DONE;1161}1162}11631164/*1165* We are holding the wait-queue lock, but the waiter that1166* is waiting for this will be checking the flags without1167* any locking.1168*1169* So update the flags atomically, and wake up the waiter1170* afterwards to avoid any races. This store-release pairs1171* with the load-acquire in folio_wait_bit_common().1172*/1173smp_store_release(&wait->flags, flags | WQ_FLAG_WOKEN);1174wake_up_state(wait->private, mode);11751176/*1177* Ok, we have successfully done what we're waiting for,1178* and we can unconditionally remove the wait entry.1179*1180* Note that this pairs with the "finish_wait()" in the1181* waiter, and has to be the absolute last thing we do.1182* After this list_del_init(&wait->entry) the wait entry1183* might be de-allocated and the process might even have1184* exited.1185*/1186list_del_init_careful(&wait->entry);1187return (flags & WQ_FLAG_EXCLUSIVE) != 0;1188}11891190static void folio_wake_bit(struct folio *folio, int bit_nr)1191{1192wait_queue_head_t *q = folio_waitqueue(folio);1193struct wait_page_key key;1194unsigned long flags;11951196key.folio = folio;1197key.bit_nr = bit_nr;1198key.page_match = 0;11991200spin_lock_irqsave(&q->lock, flags);1201__wake_up_locked_key(q, TASK_NORMAL, &key);12021203/*1204* It's possible to miss clearing waiters here, when we woke our page1205* waiters, but the hashed waitqueue has waiters for other pages on it.1206* That's okay, it's a rare case. The next waker will clear it.1207*1208* Note that, depending on the page pool (buddy, hugetlb, ZONE_DEVICE,1209* other), the flag may be cleared in the course of freeing the page;1210* but that is not required for correctness.1211*/1212if (!waitqueue_active(q) || !key.page_match)1213folio_clear_waiters(folio);12141215spin_unlock_irqrestore(&q->lock, flags);1216}12171218/*1219* A choice of three behaviors for folio_wait_bit_common():1220*/1221enum behavior {1222EXCLUSIVE, /* Hold ref to page and take the bit when woken, like1223* __folio_lock() waiting on then setting PG_locked.1224*/1225SHARED, /* Hold ref to page and check the bit when woken, like1226* folio_wait_writeback() waiting on PG_writeback.1227*/1228DROP, /* Drop ref to page before wait, no check when woken,1229* like folio_put_wait_locked() on PG_locked.1230*/1231};12321233/*1234* Attempt to check (or get) the folio flag, and mark us done1235* if successful.1236*/1237static inline bool folio_trylock_flag(struct folio *folio, int bit_nr,1238struct wait_queue_entry *wait)1239{1240if (wait->flags & WQ_FLAG_EXCLUSIVE) {1241if (test_and_set_bit(bit_nr, &folio->flags.f))1242return false;1243} else if (test_bit(bit_nr, &folio->flags.f))1244return false;12451246wait->flags |= WQ_FLAG_WOKEN | WQ_FLAG_DONE;1247return true;1248}12491250static inline int folio_wait_bit_common(struct folio *folio, int bit_nr,1251int state, enum behavior behavior)1252{1253wait_queue_head_t *q = folio_waitqueue(folio);1254int unfairness = sysctl_page_lock_unfairness;1255struct wait_page_queue wait_page;1256wait_queue_entry_t *wait = &wait_page.wait;1257bool thrashing = false;1258unsigned long pflags;1259bool in_thrashing;12601261if (bit_nr == PG_locked &&1262!folio_test_uptodate(folio) && folio_test_workingset(folio)) {1263delayacct_thrashing_start(&in_thrashing);1264psi_memstall_enter(&pflags);1265thrashing = true;1266}12671268init_wait(wait);1269wait->func = wake_page_function;1270wait_page.folio = folio;1271wait_page.bit_nr = bit_nr;12721273repeat:1274wait->flags = 0;1275if (behavior == EXCLUSIVE) {1276wait->flags = WQ_FLAG_EXCLUSIVE;1277if (--unfairness < 0)1278wait->flags |= WQ_FLAG_CUSTOM;1279}12801281/*1282* Do one last check whether we can get the1283* page bit synchronously.1284*1285* Do the folio_set_waiters() marking before that1286* to let any waker we _just_ missed know they1287* need to wake us up (otherwise they'll never1288* even go to the slow case that looks at the1289* page queue), and add ourselves to the wait1290* queue if we need to sleep.1291*1292* This part needs to be done under the queue1293* lock to avoid races.1294*/1295spin_lock_irq(&q->lock);1296folio_set_waiters(folio);1297if (!folio_trylock_flag(folio, bit_nr, wait))1298__add_wait_queue_entry_tail(q, wait);1299spin_unlock_irq(&q->lock);13001301/*1302* From now on, all the logic will be based on1303* the WQ_FLAG_WOKEN and WQ_FLAG_DONE flag, to1304* see whether the page bit testing has already1305* been done by the wake function.1306*1307* We can drop our reference to the folio.1308*/1309if (behavior == DROP)1310folio_put(folio);13111312/*1313* Note that until the "finish_wait()", or until1314* we see the WQ_FLAG_WOKEN flag, we need to1315* be very careful with the 'wait->flags', because1316* we may race with a waker that sets them.1317*/1318for (;;) {1319unsigned int flags;13201321set_current_state(state);13221323/* Loop until we've been woken or interrupted */1324flags = smp_load_acquire(&wait->flags);1325if (!(flags & WQ_FLAG_WOKEN)) {1326if (signal_pending_state(state, current))1327break;13281329io_schedule();1330continue;1331}13321333/* If we were non-exclusive, we're done */1334if (behavior != EXCLUSIVE)1335break;13361337/* If the waker got the lock for us, we're done */1338if (flags & WQ_FLAG_DONE)1339break;13401341/*1342* Otherwise, if we're getting the lock, we need to1343* try to get it ourselves.1344*1345* And if that fails, we'll have to retry this all.1346*/1347if (unlikely(test_and_set_bit(bit_nr, folio_flags(folio, 0))))1348goto repeat;13491350wait->flags |= WQ_FLAG_DONE;1351break;1352}13531354/*1355* If a signal happened, this 'finish_wait()' may remove the last1356* waiter from the wait-queues, but the folio waiters bit will remain1357* set. That's ok. The next wakeup will take care of it, and trying1358* to do it here would be difficult and prone to races.1359*/1360finish_wait(q, wait);13611362if (thrashing) {1363delayacct_thrashing_end(&in_thrashing);1364psi_memstall_leave(&pflags);1365}13661367/*1368* NOTE! The wait->flags weren't stable until we've done the1369* 'finish_wait()', and we could have exited the loop above due1370* to a signal, and had a wakeup event happen after the signal1371* test but before the 'finish_wait()'.1372*1373* So only after the finish_wait() can we reliably determine1374* if we got woken up or not, so we can now figure out the final1375* return value based on that state without races.1376*1377* Also note that WQ_FLAG_WOKEN is sufficient for a non-exclusive1378* waiter, but an exclusive one requires WQ_FLAG_DONE.1379*/1380if (behavior == EXCLUSIVE)1381return wait->flags & WQ_FLAG_DONE ? 0 : -EINTR;13821383return wait->flags & WQ_FLAG_WOKEN ? 0 : -EINTR;1384}13851386#ifdef CONFIG_MIGRATION1387/**1388* migration_entry_wait_on_locked - Wait for a migration entry to be removed1389* @entry: migration swap entry.1390* @ptl: already locked ptl. This function will drop the lock.1391*1392* Wait for a migration entry referencing the given page to be removed. This is1393* equivalent to folio_put_wait_locked(folio, TASK_UNINTERRUPTIBLE) except1394* this can be called without taking a reference on the page. Instead this1395* should be called while holding the ptl for the migration entry referencing1396* the page.1397*1398* Returns after unlocking the ptl.1399*1400* This follows the same logic as folio_wait_bit_common() so see the comments1401* there.1402*/1403void migration_entry_wait_on_locked(swp_entry_t entry, spinlock_t *ptl)1404__releases(ptl)1405{1406struct wait_page_queue wait_page;1407wait_queue_entry_t *wait = &wait_page.wait;1408bool thrashing = false;1409unsigned long pflags;1410bool in_thrashing;1411wait_queue_head_t *q;1412struct folio *folio = pfn_swap_entry_folio(entry);14131414q = folio_waitqueue(folio);1415if (!folio_test_uptodate(folio) && folio_test_workingset(folio)) {1416delayacct_thrashing_start(&in_thrashing);1417psi_memstall_enter(&pflags);1418thrashing = true;1419}14201421init_wait(wait);1422wait->func = wake_page_function;1423wait_page.folio = folio;1424wait_page.bit_nr = PG_locked;1425wait->flags = 0;14261427spin_lock_irq(&q->lock);1428folio_set_waiters(folio);1429if (!folio_trylock_flag(folio, PG_locked, wait))1430__add_wait_queue_entry_tail(q, wait);1431spin_unlock_irq(&q->lock);14321433/*1434* If a migration entry exists for the page the migration path must hold1435* a valid reference to the page, and it must take the ptl to remove the1436* migration entry. So the page is valid until the ptl is dropped.1437*/1438spin_unlock(ptl);14391440for (;;) {1441unsigned int flags;14421443set_current_state(TASK_UNINTERRUPTIBLE);14441445/* Loop until we've been woken or interrupted */1446flags = smp_load_acquire(&wait->flags);1447if (!(flags & WQ_FLAG_WOKEN)) {1448if (signal_pending_state(TASK_UNINTERRUPTIBLE, current))1449break;14501451io_schedule();1452continue;1453}1454break;1455}14561457finish_wait(q, wait);14581459if (thrashing) {1460delayacct_thrashing_end(&in_thrashing);1461psi_memstall_leave(&pflags);1462}1463}1464#endif14651466void folio_wait_bit(struct folio *folio, int bit_nr)1467{1468folio_wait_bit_common(folio, bit_nr, TASK_UNINTERRUPTIBLE, SHARED);1469}1470EXPORT_SYMBOL(folio_wait_bit);14711472int folio_wait_bit_killable(struct folio *folio, int bit_nr)1473{1474return folio_wait_bit_common(folio, bit_nr, TASK_KILLABLE, SHARED);1475}1476EXPORT_SYMBOL(folio_wait_bit_killable);14771478/**1479* folio_put_wait_locked - Drop a reference and wait for it to be unlocked1480* @folio: The folio to wait for.1481* @state: The sleep state (TASK_KILLABLE, TASK_UNINTERRUPTIBLE, etc).1482*1483* The caller should hold a reference on @folio. They expect the page to1484* become unlocked relatively soon, but do not wish to hold up migration1485* (for example) by holding the reference while waiting for the folio to1486* come unlocked. After this function returns, the caller should not1487* dereference @folio.1488*1489* Return: 0 if the folio was unlocked or -EINTR if interrupted by a signal.1490*/1491static int folio_put_wait_locked(struct folio *folio, int state)1492{1493return folio_wait_bit_common(folio, PG_locked, state, DROP);1494}14951496/**1497* folio_unlock - Unlock a locked folio.1498* @folio: The folio.1499*1500* Unlocks the folio and wakes up any thread sleeping on the page lock.1501*1502* Context: May be called from interrupt or process context. May not be1503* called from NMI context.1504*/1505void folio_unlock(struct folio *folio)1506{1507/* Bit 7 allows x86 to check the byte's sign bit */1508BUILD_BUG_ON(PG_waiters != 7);1509BUILD_BUG_ON(PG_locked > 7);1510VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);1511if (folio_xor_flags_has_waiters(folio, 1 << PG_locked))1512folio_wake_bit(folio, PG_locked);1513}1514EXPORT_SYMBOL(folio_unlock);15151516/**1517* folio_end_read - End read on a folio.1518* @folio: The folio.1519* @success: True if all reads completed successfully.1520*1521* When all reads against a folio have completed, filesystems should1522* call this function to let the pagecache know that no more reads1523* are outstanding. This will unlock the folio and wake up any thread1524* sleeping on the lock. The folio will also be marked uptodate if all1525* reads succeeded.1526*1527* Context: May be called from interrupt or process context. May not be1528* called from NMI context.1529*/1530void folio_end_read(struct folio *folio, bool success)1531{1532unsigned long mask = 1 << PG_locked;15331534/* Must be in bottom byte for x86 to work */1535BUILD_BUG_ON(PG_uptodate > 7);1536VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);1537VM_BUG_ON_FOLIO(success && folio_test_uptodate(folio), folio);15381539if (likely(success))1540mask |= 1 << PG_uptodate;1541if (folio_xor_flags_has_waiters(folio, mask))1542folio_wake_bit(folio, PG_locked);1543}1544EXPORT_SYMBOL(folio_end_read);15451546/**1547* folio_end_private_2 - Clear PG_private_2 and wake any waiters.1548* @folio: The folio.1549*1550* Clear the PG_private_2 bit on a folio and wake up any sleepers waiting for1551* it. The folio reference held for PG_private_2 being set is released.1552*1553* This is, for example, used when a netfs folio is being written to a local1554* disk cache, thereby allowing writes to the cache for the same folio to be1555* serialised.1556*/1557void folio_end_private_2(struct folio *folio)1558{1559VM_BUG_ON_FOLIO(!folio_test_private_2(folio), folio);1560clear_bit_unlock(PG_private_2, folio_flags(folio, 0));1561folio_wake_bit(folio, PG_private_2);1562folio_put(folio);1563}1564EXPORT_SYMBOL(folio_end_private_2);15651566/**1567* folio_wait_private_2 - Wait for PG_private_2 to be cleared on a folio.1568* @folio: The folio to wait on.1569*1570* Wait for PG_private_2 to be cleared on a folio.1571*/1572void folio_wait_private_2(struct folio *folio)1573{1574while (folio_test_private_2(folio))1575folio_wait_bit(folio, PG_private_2);1576}1577EXPORT_SYMBOL(folio_wait_private_2);15781579/**1580* folio_wait_private_2_killable - Wait for PG_private_2 to be cleared on a folio.1581* @folio: The folio to wait on.1582*1583* Wait for PG_private_2 to be cleared on a folio or until a fatal signal is1584* received by the calling task.1585*1586* Return:1587* - 0 if successful.1588* - -EINTR if a fatal signal was encountered.1589*/1590int folio_wait_private_2_killable(struct folio *folio)1591{1592int ret = 0;15931594while (folio_test_private_2(folio)) {1595ret = folio_wait_bit_killable(folio, PG_private_2);1596if (ret < 0)1597break;1598}15991600return ret;1601}1602EXPORT_SYMBOL(folio_wait_private_2_killable);16031604static void filemap_end_dropbehind(struct folio *folio)1605{1606struct address_space *mapping = folio->mapping;16071608VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);16091610if (folio_test_writeback(folio) || folio_test_dirty(folio))1611return;1612if (!folio_test_clear_dropbehind(folio))1613return;1614if (mapping)1615folio_unmap_invalidate(mapping, folio, 0);1616}16171618/*1619* If folio was marked as dropbehind, then pages should be dropped when writeback1620* completes. Do that now. If we fail, it's likely because of a big folio -1621* just reset dropbehind for that case and latter completions should invalidate.1622*/1623static void filemap_end_dropbehind_write(struct folio *folio)1624{1625if (!folio_test_dropbehind(folio))1626return;16271628/*1629* Hitting !in_task() should not happen off RWF_DONTCACHE writeback,1630* but can happen if normal writeback just happens to find dirty folios1631* that were created as part of uncached writeback, and that writeback1632* would otherwise not need non-IRQ handling. Just skip the1633* invalidation in that case.1634*/1635if (in_task() && folio_trylock(folio)) {1636filemap_end_dropbehind(folio);1637folio_unlock(folio);1638}1639}16401641/**1642* folio_end_writeback - End writeback against a folio.1643* @folio: The folio.1644*1645* The folio must actually be under writeback.1646*1647* Context: May be called from process or interrupt context.1648*/1649void folio_end_writeback(struct folio *folio)1650{1651VM_BUG_ON_FOLIO(!folio_test_writeback(folio), folio);16521653/*1654* folio_test_clear_reclaim() could be used here but it is an1655* atomic operation and overkill in this particular case. Failing1656* to shuffle a folio marked for immediate reclaim is too mild1657* a gain to justify taking an atomic operation penalty at the1658* end of every folio writeback.1659*/1660if (folio_test_reclaim(folio)) {1661folio_clear_reclaim(folio);1662folio_rotate_reclaimable(folio);1663}16641665/*1666* Writeback does not hold a folio reference of its own, relying1667* on truncation to wait for the clearing of PG_writeback.1668* But here we must make sure that the folio is not freed and1669* reused before the folio_wake_bit().1670*/1671folio_get(folio);1672if (__folio_end_writeback(folio))1673folio_wake_bit(folio, PG_writeback);16741675filemap_end_dropbehind_write(folio);1676acct_reclaim_writeback(folio);1677folio_put(folio);1678}1679EXPORT_SYMBOL(folio_end_writeback);16801681/**1682* __folio_lock - Get a lock on the folio, assuming we need to sleep to get it.1683* @folio: The folio to lock1684*/1685void __folio_lock(struct folio *folio)1686{1687folio_wait_bit_common(folio, PG_locked, TASK_UNINTERRUPTIBLE,1688EXCLUSIVE);1689}1690EXPORT_SYMBOL(__folio_lock);16911692int __folio_lock_killable(struct folio *folio)1693{1694return folio_wait_bit_common(folio, PG_locked, TASK_KILLABLE,1695EXCLUSIVE);1696}1697EXPORT_SYMBOL_GPL(__folio_lock_killable);16981699static int __folio_lock_async(struct folio *folio, struct wait_page_queue *wait)1700{1701struct wait_queue_head *q = folio_waitqueue(folio);1702int ret;17031704wait->folio = folio;1705wait->bit_nr = PG_locked;17061707spin_lock_irq(&q->lock);1708__add_wait_queue_entry_tail(q, &wait->wait);1709folio_set_waiters(folio);1710ret = !folio_trylock(folio);1711/*1712* If we were successful now, we know we're still on the1713* waitqueue as we're still under the lock. This means it's1714* safe to remove and return success, we know the callback1715* isn't going to trigger.1716*/1717if (!ret)1718__remove_wait_queue(q, &wait->wait);1719else1720ret = -EIOCBQUEUED;1721spin_unlock_irq(&q->lock);1722return ret;1723}17241725/*1726* Return values:1727* 0 - folio is locked.1728* non-zero - folio is not locked.1729* mmap_lock or per-VMA lock has been released (mmap_read_unlock() or1730* vma_end_read()), unless flags had both FAULT_FLAG_ALLOW_RETRY and1731* FAULT_FLAG_RETRY_NOWAIT set, in which case the lock is still held.1732*1733* If neither ALLOW_RETRY nor KILLABLE are set, will always return 01734* with the folio locked and the mmap_lock/per-VMA lock is left unperturbed.1735*/1736vm_fault_t __folio_lock_or_retry(struct folio *folio, struct vm_fault *vmf)1737{1738unsigned int flags = vmf->flags;17391740if (fault_flag_allow_retry_first(flags)) {1741/*1742* CAUTION! In this case, mmap_lock/per-VMA lock is not1743* released even though returning VM_FAULT_RETRY.1744*/1745if (flags & FAULT_FLAG_RETRY_NOWAIT)1746return VM_FAULT_RETRY;17471748release_fault_lock(vmf);1749if (flags & FAULT_FLAG_KILLABLE)1750folio_wait_locked_killable(folio);1751else1752folio_wait_locked(folio);1753return VM_FAULT_RETRY;1754}1755if (flags & FAULT_FLAG_KILLABLE) {1756bool ret;17571758ret = __folio_lock_killable(folio);1759if (ret) {1760release_fault_lock(vmf);1761return VM_FAULT_RETRY;1762}1763} else {1764__folio_lock(folio);1765}17661767return 0;1768}17691770/**1771* page_cache_next_miss() - Find the next gap in the page cache.1772* @mapping: Mapping.1773* @index: Index.1774* @max_scan: Maximum range to search.1775*1776* Search the range [index, min(index + max_scan - 1, ULONG_MAX)] for the1777* gap with the lowest index.1778*1779* This function may be called under the rcu_read_lock. However, this will1780* not atomically search a snapshot of the cache at a single point in time.1781* For example, if a gap is created at index 5, then subsequently a gap is1782* created at index 10, page_cache_next_miss covering both indices may1783* return 10 if called under the rcu_read_lock.1784*1785* Return: The index of the gap if found, otherwise an index outside the1786* range specified (in which case 'return - index >= max_scan' will be true).1787* In the rare case of index wrap-around, 0 will be returned.1788*/1789pgoff_t page_cache_next_miss(struct address_space *mapping,1790pgoff_t index, unsigned long max_scan)1791{1792XA_STATE(xas, &mapping->i_pages, index);1793unsigned long nr = max_scan;17941795while (nr--) {1796void *entry = xas_next(&xas);1797if (!entry || xa_is_value(entry))1798return xas.xa_index;1799if (xas.xa_index == 0)1800return 0;1801}18021803return index + max_scan;1804}1805EXPORT_SYMBOL(page_cache_next_miss);18061807/**1808* page_cache_prev_miss() - Find the previous gap in the page cache.1809* @mapping: Mapping.1810* @index: Index.1811* @max_scan: Maximum range to search.1812*1813* Search the range [max(index - max_scan + 1, 0), index] for the1814* gap with the highest index.1815*1816* This function may be called under the rcu_read_lock. However, this will1817* not atomically search a snapshot of the cache at a single point in time.1818* For example, if a gap is created at index 10, then subsequently a gap is1819* created at index 5, page_cache_prev_miss() covering both indices may1820* return 5 if called under the rcu_read_lock.1821*1822* Return: The index of the gap if found, otherwise an index outside the1823* range specified (in which case 'index - return >= max_scan' will be true).1824* In the rare case of wrap-around, ULONG_MAX will be returned.1825*/1826pgoff_t page_cache_prev_miss(struct address_space *mapping,1827pgoff_t index, unsigned long max_scan)1828{1829XA_STATE(xas, &mapping->i_pages, index);18301831while (max_scan--) {1832void *entry = xas_prev(&xas);1833if (!entry || xa_is_value(entry))1834break;1835if (xas.xa_index == ULONG_MAX)1836break;1837}18381839return xas.xa_index;1840}1841EXPORT_SYMBOL(page_cache_prev_miss);18421843/*1844* Lockless page cache protocol:1845* On the lookup side:1846* 1. Load the folio from i_pages1847* 2. Increment the refcount if it's not zero1848* 3. If the folio is not found by xas_reload(), put the refcount and retry1849*1850* On the removal side:1851* A. Freeze the page (by zeroing the refcount if nobody else has a reference)1852* B. Remove the page from i_pages1853* C. Return the page to the page allocator1854*1855* This means that any page may have its reference count temporarily1856* increased by a speculative page cache (or GUP-fast) lookup as it can1857* be allocated by another user before the RCU grace period expires.1858* Because the refcount temporarily acquired here may end up being the1859* last refcount on the page, any page allocation must be freeable by1860* folio_put().1861*/18621863/*1864* filemap_get_entry - Get a page cache entry.1865* @mapping: the address_space to search1866* @index: The page cache index.1867*1868* Looks up the page cache entry at @mapping & @index. If it is a folio,1869* it is returned with an increased refcount. If it is a shadow entry1870* of a previously evicted folio, or a swap entry from shmem/tmpfs,1871* it is returned without further action.1872*1873* Return: The folio, swap or shadow entry, %NULL if nothing is found.1874*/1875void *filemap_get_entry(struct address_space *mapping, pgoff_t index)1876{1877XA_STATE(xas, &mapping->i_pages, index);1878struct folio *folio;18791880rcu_read_lock();1881repeat:1882xas_reset(&xas);1883folio = xas_load(&xas);1884if (xas_retry(&xas, folio))1885goto repeat;1886/*1887* A shadow entry of a recently evicted page, or a swap entry from1888* shmem/tmpfs. Return it without attempting to raise page count.1889*/1890if (!folio || xa_is_value(folio))1891goto out;18921893if (!folio_try_get(folio))1894goto repeat;18951896if (unlikely(folio != xas_reload(&xas))) {1897folio_put(folio);1898goto repeat;1899}1900out:1901rcu_read_unlock();19021903return folio;1904}19051906/**1907* __filemap_get_folio - Find and get a reference to a folio.1908* @mapping: The address_space to search.1909* @index: The page index.1910* @fgp_flags: %FGP flags modify how the folio is returned.1911* @gfp: Memory allocation flags to use if %FGP_CREAT is specified.1912*1913* Looks up the page cache entry at @mapping & @index.1914*1915* If %FGP_LOCK or %FGP_CREAT are specified then the function may sleep even1916* if the %GFP flags specified for %FGP_CREAT are atomic.1917*1918* If this function returns a folio, it is returned with an increased refcount.1919*1920* Return: The found folio or an ERR_PTR() otherwise.1921*/1922struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,1923fgf_t fgp_flags, gfp_t gfp)1924{1925struct folio *folio;19261927repeat:1928folio = filemap_get_entry(mapping, index);1929if (xa_is_value(folio))1930folio = NULL;1931if (!folio)1932goto no_page;19331934if (fgp_flags & FGP_LOCK) {1935if (fgp_flags & FGP_NOWAIT) {1936if (!folio_trylock(folio)) {1937folio_put(folio);1938return ERR_PTR(-EAGAIN);1939}1940} else {1941folio_lock(folio);1942}19431944/* Has the page been truncated? */1945if (unlikely(folio->mapping != mapping)) {1946folio_unlock(folio);1947folio_put(folio);1948goto repeat;1949}1950VM_BUG_ON_FOLIO(!folio_contains(folio, index), folio);1951}19521953if (fgp_flags & FGP_ACCESSED)1954folio_mark_accessed(folio);1955else if (fgp_flags & FGP_WRITE) {1956/* Clear idle flag for buffer write */1957if (folio_test_idle(folio))1958folio_clear_idle(folio);1959}19601961if (fgp_flags & FGP_STABLE)1962folio_wait_stable(folio);1963no_page:1964if (!folio && (fgp_flags & FGP_CREAT)) {1965unsigned int min_order = mapping_min_folio_order(mapping);1966unsigned int order = max(min_order, FGF_GET_ORDER(fgp_flags));1967int err;1968index = mapping_align_index(mapping, index);19691970if ((fgp_flags & FGP_WRITE) && mapping_can_writeback(mapping))1971gfp |= __GFP_WRITE;1972if (fgp_flags & FGP_NOFS)1973gfp &= ~__GFP_FS;1974if (fgp_flags & FGP_NOWAIT) {1975gfp &= ~GFP_KERNEL;1976gfp |= GFP_NOWAIT;1977}1978if (WARN_ON_ONCE(!(fgp_flags & (FGP_LOCK | FGP_FOR_MMAP))))1979fgp_flags |= FGP_LOCK;19801981if (order > mapping_max_folio_order(mapping))1982order = mapping_max_folio_order(mapping);1983/* If we're not aligned, allocate a smaller folio */1984if (index & ((1UL << order) - 1))1985order = __ffs(index);19861987do {1988gfp_t alloc_gfp = gfp;19891990err = -ENOMEM;1991if (order > min_order)1992alloc_gfp |= __GFP_NORETRY | __GFP_NOWARN;1993folio = filemap_alloc_folio(alloc_gfp, order);1994if (!folio)1995continue;19961997/* Init accessed so avoid atomic mark_page_accessed later */1998if (fgp_flags & FGP_ACCESSED)1999__folio_set_referenced(folio);2000if (fgp_flags & FGP_DONTCACHE)2001__folio_set_dropbehind(folio);20022003err = filemap_add_folio(mapping, folio, index, gfp);2004if (!err)2005break;2006folio_put(folio);2007folio = NULL;2008} while (order-- > min_order);20092010if (err == -EEXIST)2011goto repeat;2012if (err) {2013/*2014* When NOWAIT I/O fails to allocate folios this could2015* be due to a nonblocking memory allocation and not2016* because the system actually is out of memory.2017* Return -EAGAIN so that there caller retries in a2018* blocking fashion instead of propagating -ENOMEM2019* to the application.2020*/2021if ((fgp_flags & FGP_NOWAIT) && err == -ENOMEM)2022err = -EAGAIN;2023return ERR_PTR(err);2024}2025/*2026* filemap_add_folio locks the page, and for mmap2027* we expect an unlocked page.2028*/2029if (folio && (fgp_flags & FGP_FOR_MMAP))2030folio_unlock(folio);2031}20322033if (!folio)2034return ERR_PTR(-ENOENT);2035/* not an uncached lookup, clear uncached if set */2036if (folio_test_dropbehind(folio) && !(fgp_flags & FGP_DONTCACHE))2037folio_clear_dropbehind(folio);2038return folio;2039}2040EXPORT_SYMBOL(__filemap_get_folio);20412042static inline struct folio *find_get_entry(struct xa_state *xas, pgoff_t max,2043xa_mark_t mark)2044{2045struct folio *folio;20462047retry:2048if (mark == XA_PRESENT)2049folio = xas_find(xas, max);2050else2051folio = xas_find_marked(xas, max, mark);20522053if (xas_retry(xas, folio))2054goto retry;2055/*2056* A shadow entry of a recently evicted page, a swap2057* entry from shmem/tmpfs or a DAX entry. Return it2058* without attempting to raise page count.2059*/2060if (!folio || xa_is_value(folio))2061return folio;20622063if (!folio_try_get(folio))2064goto reset;20652066if (unlikely(folio != xas_reload(xas))) {2067folio_put(folio);2068goto reset;2069}20702071return folio;2072reset:2073xas_reset(xas);2074goto retry;2075}20762077/**2078* find_get_entries - gang pagecache lookup2079* @mapping: The address_space to search2080* @start: The starting page cache index2081* @end: The final page index (inclusive).2082* @fbatch: Where the resulting entries are placed.2083* @indices: The cache indices corresponding to the entries in @entries2084*2085* find_get_entries() will search for and return a batch of entries in2086* the mapping. The entries are placed in @fbatch. find_get_entries()2087* takes a reference on any actual folios it returns.2088*2089* The entries have ascending indexes. The indices may not be consecutive2090* due to not-present entries or large folios.2091*2092* Any shadow entries of evicted folios, or swap entries from2093* shmem/tmpfs, are included in the returned array.2094*2095* Return: The number of entries which were found.2096*/2097unsigned find_get_entries(struct address_space *mapping, pgoff_t *start,2098pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices)2099{2100XA_STATE(xas, &mapping->i_pages, *start);2101struct folio *folio;21022103rcu_read_lock();2104while ((folio = find_get_entry(&xas, end, XA_PRESENT)) != NULL) {2105indices[fbatch->nr] = xas.xa_index;2106if (!folio_batch_add(fbatch, folio))2107break;2108}21092110if (folio_batch_count(fbatch)) {2111unsigned long nr;2112int idx = folio_batch_count(fbatch) - 1;21132114folio = fbatch->folios[idx];2115if (!xa_is_value(folio))2116nr = folio_nr_pages(folio);2117else2118nr = 1 << xa_get_order(&mapping->i_pages, indices[idx]);2119*start = round_down(indices[idx] + nr, nr);2120}2121rcu_read_unlock();21222123return folio_batch_count(fbatch);2124}21252126/**2127* find_lock_entries - Find a batch of pagecache entries.2128* @mapping: The address_space to search.2129* @start: The starting page cache index.2130* @end: The final page index (inclusive).2131* @fbatch: Where the resulting entries are placed.2132* @indices: The cache indices of the entries in @fbatch.2133*2134* find_lock_entries() will return a batch of entries from @mapping.2135* Swap, shadow and DAX entries are included. Folios are returned2136* locked and with an incremented refcount. Folios which are locked2137* by somebody else or under writeback are skipped. Folios which are2138* partially outside the range are not returned.2139*2140* The entries have ascending indexes. The indices may not be consecutive2141* due to not-present entries, large folios, folios which could not be2142* locked or folios under writeback.2143*2144* Return: The number of entries which were found.2145*/2146unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start,2147pgoff_t end, struct folio_batch *fbatch, pgoff_t *indices)2148{2149XA_STATE(xas, &mapping->i_pages, *start);2150struct folio *folio;21512152rcu_read_lock();2153while ((folio = find_get_entry(&xas, end, XA_PRESENT))) {2154unsigned long base;2155unsigned long nr;21562157if (!xa_is_value(folio)) {2158nr = folio_nr_pages(folio);2159base = folio->index;2160/* Omit large folio which begins before the start */2161if (base < *start)2162goto put;2163/* Omit large folio which extends beyond the end */2164if (base + nr - 1 > end)2165goto put;2166if (!folio_trylock(folio))2167goto put;2168if (folio->mapping != mapping ||2169folio_test_writeback(folio))2170goto unlock;2171VM_BUG_ON_FOLIO(!folio_contains(folio, xas.xa_index),2172folio);2173} else {2174nr = 1 << xas_get_order(&xas);2175base = xas.xa_index & ~(nr - 1);2176/* Omit order>0 value which begins before the start */2177if (base < *start)2178continue;2179/* Omit order>0 value which extends beyond the end */2180if (base + nr - 1 > end)2181break;2182}21832184/* Update start now so that last update is correct on return */2185*start = base + nr;2186indices[fbatch->nr] = xas.xa_index;2187if (!folio_batch_add(fbatch, folio))2188break;2189continue;2190unlock:2191folio_unlock(folio);2192put:2193folio_put(folio);2194}2195rcu_read_unlock();21962197return folio_batch_count(fbatch);2198}21992200/**2201* filemap_get_folios - Get a batch of folios2202* @mapping: The address_space to search2203* @start: The starting page index2204* @end: The final page index (inclusive)2205* @fbatch: The batch to fill.2206*2207* Search for and return a batch of folios in the mapping starting at2208* index @start and up to index @end (inclusive). The folios are returned2209* in @fbatch with an elevated reference count.2210*2211* Return: The number of folios which were found.2212* We also update @start to index the next folio for the traversal.2213*/2214unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start,2215pgoff_t end, struct folio_batch *fbatch)2216{2217return filemap_get_folios_tag(mapping, start, end, XA_PRESENT, fbatch);2218}2219EXPORT_SYMBOL(filemap_get_folios);22202221/**2222* filemap_get_folios_contig - Get a batch of contiguous folios2223* @mapping: The address_space to search2224* @start: The starting page index2225* @end: The final page index (inclusive)2226* @fbatch: The batch to fill2227*2228* filemap_get_folios_contig() works exactly like filemap_get_folios(),2229* except the returned folios are guaranteed to be contiguous. This may2230* not return all contiguous folios if the batch gets filled up.2231*2232* Return: The number of folios found.2233* Also update @start to be positioned for traversal of the next folio.2234*/22352236unsigned filemap_get_folios_contig(struct address_space *mapping,2237pgoff_t *start, pgoff_t end, struct folio_batch *fbatch)2238{2239XA_STATE(xas, &mapping->i_pages, *start);2240unsigned long nr;2241struct folio *folio;22422243rcu_read_lock();22442245for (folio = xas_load(&xas); folio && xas.xa_index <= end;2246folio = xas_next(&xas)) {2247if (xas_retry(&xas, folio))2248continue;2249/*2250* If the entry has been swapped out, we can stop looking.2251* No current caller is looking for DAX entries.2252*/2253if (xa_is_value(folio))2254goto update_start;22552256/* If we landed in the middle of a THP, continue at its end. */2257if (xa_is_sibling(folio))2258goto update_start;22592260if (!folio_try_get(folio))2261goto retry;22622263if (unlikely(folio != xas_reload(&xas)))2264goto put_folio;22652266if (!folio_batch_add(fbatch, folio)) {2267nr = folio_nr_pages(folio);2268*start = folio->index + nr;2269goto out;2270}2271xas_advance(&xas, folio_next_index(folio) - 1);2272continue;2273put_folio:2274folio_put(folio);22752276retry:2277xas_reset(&xas);2278}22792280update_start:2281nr = folio_batch_count(fbatch);22822283if (nr) {2284folio = fbatch->folios[nr - 1];2285*start = folio_next_index(folio);2286}2287out:2288rcu_read_unlock();2289return folio_batch_count(fbatch);2290}2291EXPORT_SYMBOL(filemap_get_folios_contig);22922293/**2294* filemap_get_folios_tag - Get a batch of folios matching @tag2295* @mapping: The address_space to search2296* @start: The starting page index2297* @end: The final page index (inclusive)2298* @tag: The tag index2299* @fbatch: The batch to fill2300*2301* The first folio may start before @start; if it does, it will contain2302* @start. The final folio may extend beyond @end; if it does, it will2303* contain @end. The folios have ascending indices. There may be gaps2304* between the folios if there are indices which have no folio in the2305* page cache. If folios are added to or removed from the page cache2306* while this is running, they may or may not be found by this call.2307* Only returns folios that are tagged with @tag.2308*2309* Return: The number of folios found.2310* Also update @start to index the next folio for traversal.2311*/2312unsigned filemap_get_folios_tag(struct address_space *mapping, pgoff_t *start,2313pgoff_t end, xa_mark_t tag, struct folio_batch *fbatch)2314{2315XA_STATE(xas, &mapping->i_pages, *start);2316struct folio *folio;23172318rcu_read_lock();2319while ((folio = find_get_entry(&xas, end, tag)) != NULL) {2320/*2321* Shadow entries should never be tagged, but this iteration2322* is lockless so there is a window for page reclaim to evict2323* a page we saw tagged. Skip over it.2324*/2325if (xa_is_value(folio))2326continue;2327if (!folio_batch_add(fbatch, folio)) {2328unsigned long nr = folio_nr_pages(folio);2329*start = folio->index + nr;2330goto out;2331}2332}2333/*2334* We come here when there is no page beyond @end. We take care to not2335* overflow the index @start as it confuses some of the callers. This2336* breaks the iteration when there is a page at index -1 but that is2337* already broke anyway.2338*/2339if (end == (pgoff_t)-1)2340*start = (pgoff_t)-1;2341else2342*start = end + 1;2343out:2344rcu_read_unlock();23452346return folio_batch_count(fbatch);2347}2348EXPORT_SYMBOL(filemap_get_folios_tag);23492350/*2351* CD/DVDs are error prone. When a medium error occurs, the driver may fail2352* a _large_ part of the i/o request. Imagine the worst scenario:2353*2354* ---R__________________________________________B__________2355* ^ reading here ^ bad block(assume 4k)2356*2357* read(R) => miss => readahead(R...B) => media error => frustrating retries2358* => failing the whole request => read(R) => read(R+1) =>2359* readahead(R+1...B+1) => bang => read(R+2) => read(R+3) =>2360* readahead(R+3...B+2) => bang => read(R+3) => read(R+4) =>2361* readahead(R+4...B+3) => bang => read(R+4) => read(R+5) => ......2362*2363* It is going insane. Fix it by quickly scaling down the readahead size.2364*/2365static void shrink_readahead_size_eio(struct file_ra_state *ra)2366{2367ra->ra_pages /= 4;2368}23692370/*2371* filemap_get_read_batch - Get a batch of folios for read2372*2373* Get a batch of folios which represent a contiguous range of bytes in2374* the file. No exceptional entries will be returned. If @index is in2375* the middle of a folio, the entire folio will be returned. The last2376* folio in the batch may have the readahead flag set or the uptodate flag2377* clear so that the caller can take the appropriate action.2378*/2379static void filemap_get_read_batch(struct address_space *mapping,2380pgoff_t index, pgoff_t max, struct folio_batch *fbatch)2381{2382XA_STATE(xas, &mapping->i_pages, index);2383struct folio *folio;23842385rcu_read_lock();2386for (folio = xas_load(&xas); folio; folio = xas_next(&xas)) {2387if (xas_retry(&xas, folio))2388continue;2389if (xas.xa_index > max || xa_is_value(folio))2390break;2391if (xa_is_sibling(folio))2392break;2393if (!folio_try_get(folio))2394goto retry;23952396if (unlikely(folio != xas_reload(&xas)))2397goto put_folio;23982399if (!folio_batch_add(fbatch, folio))2400break;2401if (!folio_test_uptodate(folio))2402break;2403if (folio_test_readahead(folio))2404break;2405xas_advance(&xas, folio_next_index(folio) - 1);2406continue;2407put_folio:2408folio_put(folio);2409retry:2410xas_reset(&xas);2411}2412rcu_read_unlock();2413}24142415static int filemap_read_folio(struct file *file, filler_t filler,2416struct folio *folio)2417{2418bool workingset = folio_test_workingset(folio);2419unsigned long pflags;2420int error;24212422/* Start the actual read. The read will unlock the page. */2423if (unlikely(workingset))2424psi_memstall_enter(&pflags);2425error = filler(file, folio);2426if (unlikely(workingset))2427psi_memstall_leave(&pflags);2428if (error)2429return error;24302431error = folio_wait_locked_killable(folio);2432if (error)2433return error;2434if (folio_test_uptodate(folio))2435return 0;2436if (file)2437shrink_readahead_size_eio(&file->f_ra);2438return -EIO;2439}24402441static bool filemap_range_uptodate(struct address_space *mapping,2442loff_t pos, size_t count, struct folio *folio,2443bool need_uptodate)2444{2445if (folio_test_uptodate(folio))2446return true;2447/* pipes can't handle partially uptodate pages */2448if (need_uptodate)2449return false;2450if (!mapping->a_ops->is_partially_uptodate)2451return false;2452if (mapping->host->i_blkbits >= folio_shift(folio))2453return false;24542455if (folio_pos(folio) > pos) {2456count -= folio_pos(folio) - pos;2457pos = 0;2458} else {2459pos -= folio_pos(folio);2460}24612462if (pos == 0 && count >= folio_size(folio))2463return false;24642465return mapping->a_ops->is_partially_uptodate(folio, pos, count);2466}24672468static int filemap_update_page(struct kiocb *iocb,2469struct address_space *mapping, size_t count,2470struct folio *folio, bool need_uptodate)2471{2472int error;24732474if (iocb->ki_flags & IOCB_NOWAIT) {2475if (!filemap_invalidate_trylock_shared(mapping))2476return -EAGAIN;2477} else {2478filemap_invalidate_lock_shared(mapping);2479}24802481if (!folio_trylock(folio)) {2482error = -EAGAIN;2483if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_NOIO))2484goto unlock_mapping;2485if (!(iocb->ki_flags & IOCB_WAITQ)) {2486filemap_invalidate_unlock_shared(mapping);2487/*2488* This is where we usually end up waiting for a2489* previously submitted readahead to finish.2490*/2491folio_put_wait_locked(folio, TASK_KILLABLE);2492return AOP_TRUNCATED_PAGE;2493}2494error = __folio_lock_async(folio, iocb->ki_waitq);2495if (error)2496goto unlock_mapping;2497}24982499error = AOP_TRUNCATED_PAGE;2500if (!folio->mapping)2501goto unlock;25022503error = 0;2504if (filemap_range_uptodate(mapping, iocb->ki_pos, count, folio,2505need_uptodate))2506goto unlock;25072508error = -EAGAIN;2509if (iocb->ki_flags & (IOCB_NOIO | IOCB_NOWAIT | IOCB_WAITQ))2510goto unlock;25112512error = filemap_read_folio(iocb->ki_filp, mapping->a_ops->read_folio,2513folio);2514goto unlock_mapping;2515unlock:2516folio_unlock(folio);2517unlock_mapping:2518filemap_invalidate_unlock_shared(mapping);2519if (error == AOP_TRUNCATED_PAGE)2520folio_put(folio);2521return error;2522}25232524static int filemap_create_folio(struct kiocb *iocb, struct folio_batch *fbatch)2525{2526struct address_space *mapping = iocb->ki_filp->f_mapping;2527struct folio *folio;2528int error;2529unsigned int min_order = mapping_min_folio_order(mapping);2530pgoff_t index;25312532if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_WAITQ))2533return -EAGAIN;25342535folio = filemap_alloc_folio(mapping_gfp_mask(mapping), min_order);2536if (!folio)2537return -ENOMEM;2538if (iocb->ki_flags & IOCB_DONTCACHE)2539__folio_set_dropbehind(folio);25402541/*2542* Protect against truncate / hole punch. Grabbing invalidate_lock2543* here assures we cannot instantiate and bring uptodate new2544* pagecache folios after evicting page cache during truncate2545* and before actually freeing blocks. Note that we could2546* release invalidate_lock after inserting the folio into2547* the page cache as the locked folio would then be enough to2548* synchronize with hole punching. But there are code paths2549* such as filemap_update_page() filling in partially uptodate2550* pages or ->readahead() that need to hold invalidate_lock2551* while mapping blocks for IO so let's hold the lock here as2552* well to keep locking rules simple.2553*/2554filemap_invalidate_lock_shared(mapping);2555index = (iocb->ki_pos >> (PAGE_SHIFT + min_order)) << min_order;2556error = filemap_add_folio(mapping, folio, index,2557mapping_gfp_constraint(mapping, GFP_KERNEL));2558if (error == -EEXIST)2559error = AOP_TRUNCATED_PAGE;2560if (error)2561goto error;25622563error = filemap_read_folio(iocb->ki_filp, mapping->a_ops->read_folio,2564folio);2565if (error)2566goto error;25672568filemap_invalidate_unlock_shared(mapping);2569folio_batch_add(fbatch, folio);2570return 0;2571error:2572filemap_invalidate_unlock_shared(mapping);2573folio_put(folio);2574return error;2575}25762577static int filemap_readahead(struct kiocb *iocb, struct file *file,2578struct address_space *mapping, struct folio *folio,2579pgoff_t last_index)2580{2581DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, folio->index);25822583if (iocb->ki_flags & IOCB_NOIO)2584return -EAGAIN;2585if (iocb->ki_flags & IOCB_DONTCACHE)2586ractl.dropbehind = 1;2587page_cache_async_ra(&ractl, folio, last_index - folio->index);2588return 0;2589}25902591static int filemap_get_pages(struct kiocb *iocb, size_t count,2592struct folio_batch *fbatch, bool need_uptodate)2593{2594struct file *filp = iocb->ki_filp;2595struct address_space *mapping = filp->f_mapping;2596pgoff_t index = iocb->ki_pos >> PAGE_SHIFT;2597pgoff_t last_index;2598struct folio *folio;2599unsigned int flags;2600int err = 0;26012602/* "last_index" is the index of the folio beyond the end of the read */2603last_index = round_up(iocb->ki_pos + count,2604mapping_min_folio_nrbytes(mapping)) >> PAGE_SHIFT;2605retry:2606if (fatal_signal_pending(current))2607return -EINTR;26082609filemap_get_read_batch(mapping, index, last_index - 1, fbatch);2610if (!folio_batch_count(fbatch)) {2611DEFINE_READAHEAD(ractl, filp, &filp->f_ra, mapping, index);26122613if (iocb->ki_flags & IOCB_NOIO)2614return -EAGAIN;2615if (iocb->ki_flags & IOCB_NOWAIT)2616flags = memalloc_noio_save();2617if (iocb->ki_flags & IOCB_DONTCACHE)2618ractl.dropbehind = 1;2619page_cache_sync_ra(&ractl, last_index - index);2620if (iocb->ki_flags & IOCB_NOWAIT)2621memalloc_noio_restore(flags);2622filemap_get_read_batch(mapping, index, last_index - 1, fbatch);2623}2624if (!folio_batch_count(fbatch)) {2625err = filemap_create_folio(iocb, fbatch);2626if (err == AOP_TRUNCATED_PAGE)2627goto retry;2628return err;2629}26302631folio = fbatch->folios[folio_batch_count(fbatch) - 1];2632if (folio_test_readahead(folio)) {2633err = filemap_readahead(iocb, filp, mapping, folio, last_index);2634if (err)2635goto err;2636}2637if (!folio_test_uptodate(folio)) {2638if (folio_batch_count(fbatch) > 1) {2639err = -EAGAIN;2640goto err;2641}2642err = filemap_update_page(iocb, mapping, count, folio,2643need_uptodate);2644if (err)2645goto err;2646}26472648trace_mm_filemap_get_pages(mapping, index, last_index - 1);2649return 0;2650err:2651if (err < 0)2652folio_put(folio);2653if (likely(--fbatch->nr))2654return 0;2655if (err == AOP_TRUNCATED_PAGE)2656goto retry;2657return err;2658}26592660static inline bool pos_same_folio(loff_t pos1, loff_t pos2, struct folio *folio)2661{2662unsigned int shift = folio_shift(folio);26632664return (pos1 >> shift == pos2 >> shift);2665}26662667static void filemap_end_dropbehind_read(struct folio *folio)2668{2669if (!folio_test_dropbehind(folio))2670return;2671if (folio_test_writeback(folio) || folio_test_dirty(folio))2672return;2673if (folio_trylock(folio)) {2674filemap_end_dropbehind(folio);2675folio_unlock(folio);2676}2677}26782679/**2680* filemap_read - Read data from the page cache.2681* @iocb: The iocb to read.2682* @iter: Destination for the data.2683* @already_read: Number of bytes already read by the caller.2684*2685* Copies data from the page cache. If the data is not currently present,2686* uses the readahead and read_folio address_space operations to fetch it.2687*2688* Return: Total number of bytes copied, including those already read by2689* the caller. If an error happens before any bytes are copied, returns2690* a negative error number.2691*/2692ssize_t filemap_read(struct kiocb *iocb, struct iov_iter *iter,2693ssize_t already_read)2694{2695struct file *filp = iocb->ki_filp;2696struct file_ra_state *ra = &filp->f_ra;2697struct address_space *mapping = filp->f_mapping;2698struct inode *inode = mapping->host;2699struct folio_batch fbatch;2700int i, error = 0;2701bool writably_mapped;2702loff_t isize, end_offset;2703loff_t last_pos = ra->prev_pos;27042705if (unlikely(iocb->ki_pos < 0))2706return -EINVAL;2707if (unlikely(iocb->ki_pos >= inode->i_sb->s_maxbytes))2708return 0;2709if (unlikely(!iov_iter_count(iter)))2710return 0;27112712iov_iter_truncate(iter, inode->i_sb->s_maxbytes - iocb->ki_pos);2713folio_batch_init(&fbatch);27142715do {2716cond_resched();27172718/*2719* If we've already successfully copied some data, then we2720* can no longer safely return -EIOCBQUEUED. Hence mark2721* an async read NOWAIT at that point.2722*/2723if ((iocb->ki_flags & IOCB_WAITQ) && already_read)2724iocb->ki_flags |= IOCB_NOWAIT;27252726if (unlikely(iocb->ki_pos >= i_size_read(inode)))2727break;27282729error = filemap_get_pages(iocb, iter->count, &fbatch, false);2730if (error < 0)2731break;27322733/*2734* i_size must be checked after we know the pages are Uptodate.2735*2736* Checking i_size after the check allows us to calculate2737* the correct value for "nr", which means the zero-filled2738* part of the page is not copied back to userspace (unless2739* another truncate extends the file - this is desired though).2740*/2741isize = i_size_read(inode);2742if (unlikely(iocb->ki_pos >= isize))2743goto put_folios;2744end_offset = min_t(loff_t, isize, iocb->ki_pos + iter->count);27452746/*2747* Once we start copying data, we don't want to be touching any2748* cachelines that might be contended:2749*/2750writably_mapped = mapping_writably_mapped(mapping);27512752/*2753* When a read accesses the same folio several times, only2754* mark it as accessed the first time.2755*/2756if (!pos_same_folio(iocb->ki_pos, last_pos - 1,2757fbatch.folios[0]))2758folio_mark_accessed(fbatch.folios[0]);27592760for (i = 0; i < folio_batch_count(&fbatch); i++) {2761struct folio *folio = fbatch.folios[i];2762size_t fsize = folio_size(folio);2763size_t offset = iocb->ki_pos & (fsize - 1);2764size_t bytes = min_t(loff_t, end_offset - iocb->ki_pos,2765fsize - offset);2766size_t copied;27672768if (end_offset < folio_pos(folio))2769break;2770if (i > 0)2771folio_mark_accessed(folio);2772/*2773* If users can be writing to this folio using arbitrary2774* virtual addresses, take care of potential aliasing2775* before reading the folio on the kernel side.2776*/2777if (writably_mapped)2778flush_dcache_folio(folio);27792780copied = copy_folio_to_iter(folio, offset, bytes, iter);27812782already_read += copied;2783iocb->ki_pos += copied;2784last_pos = iocb->ki_pos;27852786if (copied < bytes) {2787error = -EFAULT;2788break;2789}2790}2791put_folios:2792for (i = 0; i < folio_batch_count(&fbatch); i++) {2793struct folio *folio = fbatch.folios[i];27942795filemap_end_dropbehind_read(folio);2796folio_put(folio);2797}2798folio_batch_init(&fbatch);2799} while (iov_iter_count(iter) && iocb->ki_pos < isize && !error);28002801file_accessed(filp);2802ra->prev_pos = last_pos;2803return already_read ? already_read : error;2804}2805EXPORT_SYMBOL_GPL(filemap_read);28062807int kiocb_write_and_wait(struct kiocb *iocb, size_t count)2808{2809struct address_space *mapping = iocb->ki_filp->f_mapping;2810loff_t pos = iocb->ki_pos;2811loff_t end = pos + count - 1;28122813if (iocb->ki_flags & IOCB_NOWAIT) {2814if (filemap_range_needs_writeback(mapping, pos, end))2815return -EAGAIN;2816return 0;2817}28182819return filemap_write_and_wait_range(mapping, pos, end);2820}2821EXPORT_SYMBOL_GPL(kiocb_write_and_wait);28222823int filemap_invalidate_pages(struct address_space *mapping,2824loff_t pos, loff_t end, bool nowait)2825{2826int ret;28272828if (nowait) {2829/* we could block if there are any pages in the range */2830if (filemap_range_has_page(mapping, pos, end))2831return -EAGAIN;2832} else {2833ret = filemap_write_and_wait_range(mapping, pos, end);2834if (ret)2835return ret;2836}28372838/*2839* After a write we want buffered reads to be sure to go to disk to get2840* the new data. We invalidate clean cached page from the region we're2841* about to write. We do this *before* the write so that we can return2842* without clobbering -EIOCBQUEUED from ->direct_IO().2843*/2844return invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT,2845end >> PAGE_SHIFT);2846}28472848int kiocb_invalidate_pages(struct kiocb *iocb, size_t count)2849{2850struct address_space *mapping = iocb->ki_filp->f_mapping;28512852return filemap_invalidate_pages(mapping, iocb->ki_pos,2853iocb->ki_pos + count - 1,2854iocb->ki_flags & IOCB_NOWAIT);2855}2856EXPORT_SYMBOL_GPL(kiocb_invalidate_pages);28572858/**2859* generic_file_read_iter - generic filesystem read routine2860* @iocb: kernel I/O control block2861* @iter: destination for the data read2862*2863* This is the "read_iter()" routine for all filesystems2864* that can use the page cache directly.2865*2866* The IOCB_NOWAIT flag in iocb->ki_flags indicates that -EAGAIN shall2867* be returned when no data can be read without waiting for I/O requests2868* to complete; it doesn't prevent readahead.2869*2870* The IOCB_NOIO flag in iocb->ki_flags indicates that no new I/O2871* requests shall be made for the read or for readahead. When no data2872* can be read, -EAGAIN shall be returned. When readahead would be2873* triggered, a partial, possibly empty read shall be returned.2874*2875* Return:2876* * number of bytes copied, even for partial reads2877* * negative error code (or 0 if IOCB_NOIO) if nothing was read2878*/2879ssize_t2880generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter)2881{2882size_t count = iov_iter_count(iter);2883ssize_t retval = 0;28842885if (!count)2886return 0; /* skip atime */28872888if (iocb->ki_flags & IOCB_DIRECT) {2889struct file *file = iocb->ki_filp;2890struct address_space *mapping = file->f_mapping;2891struct inode *inode = mapping->host;28922893retval = kiocb_write_and_wait(iocb, count);2894if (retval < 0)2895return retval;2896file_accessed(file);28972898retval = mapping->a_ops->direct_IO(iocb, iter);2899if (retval >= 0) {2900iocb->ki_pos += retval;2901count -= retval;2902}2903if (retval != -EIOCBQUEUED)2904iov_iter_revert(iter, count - iov_iter_count(iter));29052906/*2907* Btrfs can have a short DIO read if we encounter2908* compressed extents, so if there was an error, or if2909* we've already read everything we wanted to, or if2910* there was a short read because we hit EOF, go ahead2911* and return. Otherwise fallthrough to buffered io for2912* the rest of the read. Buffered reads will not work for2913* DAX files, so don't bother trying.2914*/2915if (retval < 0 || !count || IS_DAX(inode))2916return retval;2917if (iocb->ki_pos >= i_size_read(inode))2918return retval;2919}29202921return filemap_read(iocb, iter, retval);2922}2923EXPORT_SYMBOL(generic_file_read_iter);29242925/*2926* Splice subpages from a folio into a pipe.2927*/2928size_t splice_folio_into_pipe(struct pipe_inode_info *pipe,2929struct folio *folio, loff_t fpos, size_t size)2930{2931struct page *page;2932size_t spliced = 0, offset = offset_in_folio(folio, fpos);29332934page = folio_page(folio, offset / PAGE_SIZE);2935size = min(size, folio_size(folio) - offset);2936offset %= PAGE_SIZE;29372938while (spliced < size && !pipe_is_full(pipe)) {2939struct pipe_buffer *buf = pipe_head_buf(pipe);2940size_t part = min_t(size_t, PAGE_SIZE - offset, size - spliced);29412942*buf = (struct pipe_buffer) {2943.ops = &page_cache_pipe_buf_ops,2944.page = page,2945.offset = offset,2946.len = part,2947};2948folio_get(folio);2949pipe->head++;2950page++;2951spliced += part;2952offset = 0;2953}29542955return spliced;2956}29572958/**2959* filemap_splice_read - Splice data from a file's pagecache into a pipe2960* @in: The file to read from2961* @ppos: Pointer to the file position to read from2962* @pipe: The pipe to splice into2963* @len: The amount to splice2964* @flags: The SPLICE_F_* flags2965*2966* This function gets folios from a file's pagecache and splices them into the2967* pipe. Readahead will be called as necessary to fill more folios. This may2968* be used for blockdevs also.2969*2970* Return: On success, the number of bytes read will be returned and *@ppos2971* will be updated if appropriate; 0 will be returned if there is no more data2972* to be read; -EAGAIN will be returned if the pipe had no space, and some2973* other negative error code will be returned on error. A short read may occur2974* if the pipe has insufficient space, we reach the end of the data or we hit a2975* hole.2976*/2977ssize_t filemap_splice_read(struct file *in, loff_t *ppos,2978struct pipe_inode_info *pipe,2979size_t len, unsigned int flags)2980{2981struct folio_batch fbatch;2982struct kiocb iocb;2983size_t total_spliced = 0, used, npages;2984loff_t isize, end_offset;2985bool writably_mapped;2986int i, error = 0;29872988if (unlikely(*ppos >= in->f_mapping->host->i_sb->s_maxbytes))2989return 0;29902991init_sync_kiocb(&iocb, in);2992iocb.ki_pos = *ppos;29932994/* Work out how much data we can actually add into the pipe */2995used = pipe_buf_usage(pipe);2996npages = max_t(ssize_t, pipe->max_usage - used, 0);2997len = min_t(size_t, len, npages * PAGE_SIZE);29982999folio_batch_init(&fbatch);30003001do {3002cond_resched();30033004if (*ppos >= i_size_read(in->f_mapping->host))3005break;30063007iocb.ki_pos = *ppos;3008error = filemap_get_pages(&iocb, len, &fbatch, true);3009if (error < 0)3010break;30113012/*3013* i_size must be checked after we know the pages are Uptodate.3014*3015* Checking i_size after the check allows us to calculate3016* the correct value for "nr", which means the zero-filled3017* part of the page is not copied back to userspace (unless3018* another truncate extends the file - this is desired though).3019*/3020isize = i_size_read(in->f_mapping->host);3021if (unlikely(*ppos >= isize))3022break;3023end_offset = min_t(loff_t, isize, *ppos + len);30243025/*3026* Once we start copying data, we don't want to be touching any3027* cachelines that might be contended:3028*/3029writably_mapped = mapping_writably_mapped(in->f_mapping);30303031for (i = 0; i < folio_batch_count(&fbatch); i++) {3032struct folio *folio = fbatch.folios[i];3033size_t n;30343035if (folio_pos(folio) >= end_offset)3036goto out;3037folio_mark_accessed(folio);30383039/*3040* If users can be writing to this folio using arbitrary3041* virtual addresses, take care of potential aliasing3042* before reading the folio on the kernel side.3043*/3044if (writably_mapped)3045flush_dcache_folio(folio);30463047n = min_t(loff_t, len, isize - *ppos);3048n = splice_folio_into_pipe(pipe, folio, *ppos, n);3049if (!n)3050goto out;3051len -= n;3052total_spliced += n;3053*ppos += n;3054in->f_ra.prev_pos = *ppos;3055if (pipe_is_full(pipe))3056goto out;3057}30583059folio_batch_release(&fbatch);3060} while (len);30613062out:3063folio_batch_release(&fbatch);3064file_accessed(in);30653066return total_spliced ? total_spliced : error;3067}3068EXPORT_SYMBOL(filemap_splice_read);30693070static inline loff_t folio_seek_hole_data(struct xa_state *xas,3071struct address_space *mapping, struct folio *folio,3072loff_t start, loff_t end, bool seek_data)3073{3074const struct address_space_operations *ops = mapping->a_ops;3075size_t offset, bsz = i_blocksize(mapping->host);30763077if (xa_is_value(folio) || folio_test_uptodate(folio))3078return seek_data ? start : end;3079if (!ops->is_partially_uptodate)3080return seek_data ? end : start;30813082xas_pause(xas);3083rcu_read_unlock();3084folio_lock(folio);3085if (unlikely(folio->mapping != mapping))3086goto unlock;30873088offset = offset_in_folio(folio, start) & ~(bsz - 1);30893090do {3091if (ops->is_partially_uptodate(folio, offset, bsz) ==3092seek_data)3093break;3094start = (start + bsz) & ~((u64)bsz - 1);3095offset += bsz;3096} while (offset < folio_size(folio));3097unlock:3098folio_unlock(folio);3099rcu_read_lock();3100return start;3101}31023103static inline size_t seek_folio_size(struct xa_state *xas, struct folio *folio)3104{3105if (xa_is_value(folio))3106return PAGE_SIZE << xas_get_order(xas);3107return folio_size(folio);3108}31093110/**3111* mapping_seek_hole_data - Seek for SEEK_DATA / SEEK_HOLE in the page cache.3112* @mapping: Address space to search.3113* @start: First byte to consider.3114* @end: Limit of search (exclusive).3115* @whence: Either SEEK_HOLE or SEEK_DATA.3116*3117* If the page cache knows which blocks contain holes and which blocks3118* contain data, your filesystem can use this function to implement3119* SEEK_HOLE and SEEK_DATA. This is useful for filesystems which are3120* entirely memory-based such as tmpfs, and filesystems which support3121* unwritten extents.3122*3123* Return: The requested offset on success, or -ENXIO if @whence specifies3124* SEEK_DATA and there is no data after @start. There is an implicit hole3125* after @end - 1, so SEEK_HOLE returns @end if all the bytes between @start3126* and @end contain data.3127*/3128loff_t mapping_seek_hole_data(struct address_space *mapping, loff_t start,3129loff_t end, int whence)3130{3131XA_STATE(xas, &mapping->i_pages, start >> PAGE_SHIFT);3132pgoff_t max = (end - 1) >> PAGE_SHIFT;3133bool seek_data = (whence == SEEK_DATA);3134struct folio *folio;31353136if (end <= start)3137return -ENXIO;31383139rcu_read_lock();3140while ((folio = find_get_entry(&xas, max, XA_PRESENT))) {3141loff_t pos = (u64)xas.xa_index << PAGE_SHIFT;3142size_t seek_size;31433144if (start < pos) {3145if (!seek_data)3146goto unlock;3147start = pos;3148}31493150seek_size = seek_folio_size(&xas, folio);3151pos = round_up((u64)pos + 1, seek_size);3152start = folio_seek_hole_data(&xas, mapping, folio, start, pos,3153seek_data);3154if (start < pos)3155goto unlock;3156if (start >= end)3157break;3158if (seek_size > PAGE_SIZE)3159xas_set(&xas, pos >> PAGE_SHIFT);3160if (!xa_is_value(folio))3161folio_put(folio);3162}3163if (seek_data)3164start = -ENXIO;3165unlock:3166rcu_read_unlock();3167if (folio && !xa_is_value(folio))3168folio_put(folio);3169if (start > end)3170return end;3171return start;3172}31733174#ifdef CONFIG_MMU3175#define MMAP_LOTSAMISS (100)3176/*3177* lock_folio_maybe_drop_mmap - lock the page, possibly dropping the mmap_lock3178* @vmf - the vm_fault for this fault.3179* @folio - the folio to lock.3180* @fpin - the pointer to the file we may pin (or is already pinned).3181*3182* This works similar to lock_folio_or_retry in that it can drop the3183* mmap_lock. It differs in that it actually returns the folio locked3184* if it returns 1 and 0 if it couldn't lock the folio. If we did have3185* to drop the mmap_lock then fpin will point to the pinned file and3186* needs to be fput()'ed at a later point.3187*/3188static int lock_folio_maybe_drop_mmap(struct vm_fault *vmf, struct folio *folio,3189struct file **fpin)3190{3191if (folio_trylock(folio))3192return 1;31933194/*3195* NOTE! This will make us return with VM_FAULT_RETRY, but with3196* the fault lock still held. That's how FAULT_FLAG_RETRY_NOWAIT3197* is supposed to work. We have way too many special cases..3198*/3199if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT)3200return 0;32013202*fpin = maybe_unlock_mmap_for_io(vmf, *fpin);3203if (vmf->flags & FAULT_FLAG_KILLABLE) {3204if (__folio_lock_killable(folio)) {3205/*3206* We didn't have the right flags to drop the3207* fault lock, but all fault_handlers only check3208* for fatal signals if we return VM_FAULT_RETRY,3209* so we need to drop the fault lock here and3210* return 0 if we don't have a fpin.3211*/3212if (*fpin == NULL)3213release_fault_lock(vmf);3214return 0;3215}3216} else3217__folio_lock(folio);32183219return 1;3220}32213222/*3223* Synchronous readahead happens when we don't even find a page in the page3224* cache at all. We don't want to perform IO under the mmap sem, so if we have3225* to drop the mmap sem we return the file that was pinned in order for us to do3226* that. If we didn't pin a file then we return NULL. The file that is3227* returned needs to be fput()'ed when we're done with it.3228*/3229static struct file *do_sync_mmap_readahead(struct vm_fault *vmf)3230{3231struct file *file = vmf->vma->vm_file;3232struct file_ra_state *ra = &file->f_ra;3233struct address_space *mapping = file->f_mapping;3234DEFINE_READAHEAD(ractl, file, ra, mapping, vmf->pgoff);3235struct file *fpin = NULL;3236vm_flags_t vm_flags = vmf->vma->vm_flags;3237unsigned short mmap_miss;32383239#ifdef CONFIG_TRANSPARENT_HUGEPAGE3240/* Use the readahead code, even if readahead is disabled */3241if ((vm_flags & VM_HUGEPAGE) && HPAGE_PMD_ORDER <= MAX_PAGECACHE_ORDER) {3242fpin = maybe_unlock_mmap_for_io(vmf, fpin);3243ractl._index &= ~((unsigned long)HPAGE_PMD_NR - 1);3244ra->size = HPAGE_PMD_NR;3245/*3246* Fetch two PMD folios, so we get the chance to actually3247* readahead, unless we've been told not to.3248*/3249if (!(vm_flags & VM_RAND_READ))3250ra->size *= 2;3251ra->async_size = HPAGE_PMD_NR;3252ra->order = HPAGE_PMD_ORDER;3253page_cache_ra_order(&ractl, ra);3254return fpin;3255}3256#endif32573258/*3259* If we don't want any read-ahead, don't bother. VM_EXEC case below is3260* already intended for random access.3261*/3262if ((vm_flags & (VM_RAND_READ | VM_EXEC)) == VM_RAND_READ)3263return fpin;3264if (!ra->ra_pages)3265return fpin;32663267if (vm_flags & VM_SEQ_READ) {3268fpin = maybe_unlock_mmap_for_io(vmf, fpin);3269page_cache_sync_ra(&ractl, ra->ra_pages);3270return fpin;3271}32723273/* Avoid banging the cache line if not needed */3274mmap_miss = READ_ONCE(ra->mmap_miss);3275if (mmap_miss < MMAP_LOTSAMISS * 10)3276WRITE_ONCE(ra->mmap_miss, ++mmap_miss);32773278/*3279* Do we miss much more than hit in this file? If so,3280* stop bothering with read-ahead. It will only hurt.3281*/3282if (mmap_miss > MMAP_LOTSAMISS)3283return fpin;32843285if (vm_flags & VM_EXEC) {3286/*3287* Allow arch to request a preferred minimum folio order for3288* executable memory. This can often be beneficial to3289* performance if (e.g.) arm64 can contpte-map the folio.3290* Executable memory rarely benefits from readahead, due to its3291* random access nature, so set async_size to 0.3292*3293* Limit to the boundaries of the VMA to avoid reading in any3294* pad that might exist between sections, which would be a waste3295* of memory.3296*/3297struct vm_area_struct *vma = vmf->vma;3298unsigned long start = vma->vm_pgoff;3299unsigned long end = start + vma_pages(vma);3300unsigned long ra_end;33013302ra->order = exec_folio_order();3303ra->start = round_down(vmf->pgoff, 1UL << ra->order);3304ra->start = max(ra->start, start);3305ra_end = round_up(ra->start + ra->ra_pages, 1UL << ra->order);3306ra_end = min(ra_end, end);3307ra->size = ra_end - ra->start;3308ra->async_size = 0;3309} else {3310/*3311* mmap read-around3312*/3313ra->start = max_t(long, 0, vmf->pgoff - ra->ra_pages / 2);3314ra->size = ra->ra_pages;3315ra->async_size = ra->ra_pages / 4;3316ra->order = 0;3317}33183319fpin = maybe_unlock_mmap_for_io(vmf, fpin);3320ractl._index = ra->start;3321page_cache_ra_order(&ractl, ra);3322return fpin;3323}33243325/*3326* Asynchronous readahead happens when we find the page and PG_readahead,3327* so we want to possibly extend the readahead further. We return the file that3328* was pinned if we have to drop the mmap_lock in order to do IO.3329*/3330static struct file *do_async_mmap_readahead(struct vm_fault *vmf,3331struct folio *folio)3332{3333struct file *file = vmf->vma->vm_file;3334struct file_ra_state *ra = &file->f_ra;3335DEFINE_READAHEAD(ractl, file, ra, file->f_mapping, vmf->pgoff);3336struct file *fpin = NULL;3337unsigned short mmap_miss;33383339/* If we don't want any read-ahead, don't bother */3340if (vmf->vma->vm_flags & VM_RAND_READ || !ra->ra_pages)3341return fpin;33423343/*3344* If the folio is locked, we're likely racing against another fault.3345* Don't touch the mmap_miss counter to avoid decreasing it multiple3346* times for a single folio and break the balance with mmap_miss3347* increase in do_sync_mmap_readahead().3348*/3349if (likely(!folio_test_locked(folio))) {3350mmap_miss = READ_ONCE(ra->mmap_miss);3351if (mmap_miss)3352WRITE_ONCE(ra->mmap_miss, --mmap_miss);3353}33543355if (folio_test_readahead(folio)) {3356fpin = maybe_unlock_mmap_for_io(vmf, fpin);3357page_cache_async_ra(&ractl, folio, ra->ra_pages);3358}3359return fpin;3360}33613362static vm_fault_t filemap_fault_recheck_pte_none(struct vm_fault *vmf)3363{3364struct vm_area_struct *vma = vmf->vma;3365vm_fault_t ret = 0;3366pte_t *ptep;33673368/*3369* We might have COW'ed a pagecache folio and might now have an mlocked3370* anon folio mapped. The original pagecache folio is not mlocked and3371* might have been evicted. During a read+clear/modify/write update of3372* the PTE, such as done in do_numa_page()/change_pte_range(), we3373* temporarily clear the PTE under PT lock and might detect it here as3374* "none" when not holding the PT lock.3375*3376* Not rechecking the PTE under PT lock could result in an unexpected3377* major fault in an mlock'ed region. Recheck only for this special3378* scenario while holding the PT lock, to not degrade non-mlocked3379* scenarios. Recheck the PTE without PT lock firstly, thereby reducing3380* the number of times we hold PT lock.3381*/3382if (!(vma->vm_flags & VM_LOCKED))3383return 0;33843385if (!(vmf->flags & FAULT_FLAG_ORIG_PTE_VALID))3386return 0;33873388ptep = pte_offset_map_ro_nolock(vma->vm_mm, vmf->pmd, vmf->address,3389&vmf->ptl);3390if (unlikely(!ptep))3391return VM_FAULT_NOPAGE;33923393if (unlikely(!pte_none(ptep_get_lockless(ptep)))) {3394ret = VM_FAULT_NOPAGE;3395} else {3396spin_lock(vmf->ptl);3397if (unlikely(!pte_none(ptep_get(ptep))))3398ret = VM_FAULT_NOPAGE;3399spin_unlock(vmf->ptl);3400}3401pte_unmap(ptep);3402return ret;3403}34043405/**3406* filemap_fault - read in file data for page fault handling3407* @vmf: struct vm_fault containing details of the fault3408*3409* filemap_fault() is invoked via the vma operations vector for a3410* mapped memory region to read in file data during a page fault.3411*3412* The goto's are kind of ugly, but this streamlines the normal case of having3413* it in the page cache, and handles the special cases reasonably without3414* having a lot of duplicated code.3415*3416* vma->vm_mm->mmap_lock must be held on entry.3417*3418* If our return value has VM_FAULT_RETRY set, it's because the mmap_lock3419* may be dropped before doing I/O or by lock_folio_maybe_drop_mmap().3420*3421* If our return value does not have VM_FAULT_RETRY set, the mmap_lock3422* has not been released.3423*3424* We never return with VM_FAULT_RETRY and a bit from VM_FAULT_ERROR set.3425*3426* Return: bitwise-OR of %VM_FAULT_ codes.3427*/3428vm_fault_t filemap_fault(struct vm_fault *vmf)3429{3430int error;3431struct file *file = vmf->vma->vm_file;3432struct file *fpin = NULL;3433struct address_space *mapping = file->f_mapping;3434struct inode *inode = mapping->host;3435pgoff_t max_idx, index = vmf->pgoff;3436struct folio *folio;3437vm_fault_t ret = 0;3438bool mapping_locked = false;34393440max_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);3441if (unlikely(index >= max_idx))3442return VM_FAULT_SIGBUS;34433444trace_mm_filemap_fault(mapping, index);34453446/*3447* Do we have something in the page cache already?3448*/3449folio = filemap_get_folio(mapping, index);3450if (likely(!IS_ERR(folio))) {3451/*3452* We found the page, so try async readahead before waiting for3453* the lock.3454*/3455if (!(vmf->flags & FAULT_FLAG_TRIED))3456fpin = do_async_mmap_readahead(vmf, folio);3457if (unlikely(!folio_test_uptodate(folio))) {3458filemap_invalidate_lock_shared(mapping);3459mapping_locked = true;3460}3461} else {3462ret = filemap_fault_recheck_pte_none(vmf);3463if (unlikely(ret))3464return ret;34653466/* No page in the page cache at all */3467count_vm_event(PGMAJFAULT);3468count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);3469ret = VM_FAULT_MAJOR;3470fpin = do_sync_mmap_readahead(vmf);3471retry_find:3472/*3473* See comment in filemap_create_folio() why we need3474* invalidate_lock3475*/3476if (!mapping_locked) {3477filemap_invalidate_lock_shared(mapping);3478mapping_locked = true;3479}3480folio = __filemap_get_folio(mapping, index,3481FGP_CREAT|FGP_FOR_MMAP,3482vmf->gfp_mask);3483if (IS_ERR(folio)) {3484if (fpin)3485goto out_retry;3486filemap_invalidate_unlock_shared(mapping);3487return VM_FAULT_OOM;3488}3489}34903491if (!lock_folio_maybe_drop_mmap(vmf, folio, &fpin))3492goto out_retry;34933494/* Did it get truncated? */3495if (unlikely(folio->mapping != mapping)) {3496folio_unlock(folio);3497folio_put(folio);3498goto retry_find;3499}3500VM_BUG_ON_FOLIO(!folio_contains(folio, index), folio);35013502/*3503* We have a locked folio in the page cache, now we need to check3504* that it's up-to-date. If not, it is going to be due to an error,3505* or because readahead was otherwise unable to retrieve it.3506*/3507if (unlikely(!folio_test_uptodate(folio))) {3508/*3509* If the invalidate lock is not held, the folio was in cache3510* and uptodate and now it is not. Strange but possible since we3511* didn't hold the page lock all the time. Let's drop3512* everything, get the invalidate lock and try again.3513*/3514if (!mapping_locked) {3515folio_unlock(folio);3516folio_put(folio);3517goto retry_find;3518}35193520/*3521* OK, the folio is really not uptodate. This can be because the3522* VMA has the VM_RAND_READ flag set, or because an error3523* arose. Let's read it in directly.3524*/3525goto page_not_uptodate;3526}35273528/*3529* We've made it this far and we had to drop our mmap_lock, now is the3530* time to return to the upper layer and have it re-find the vma and3531* redo the fault.3532*/3533if (fpin) {3534folio_unlock(folio);3535goto out_retry;3536}3537if (mapping_locked)3538filemap_invalidate_unlock_shared(mapping);35393540/*3541* Found the page and have a reference on it.3542* We must recheck i_size under page lock.3543*/3544max_idx = DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE);3545if (unlikely(index >= max_idx)) {3546folio_unlock(folio);3547folio_put(folio);3548return VM_FAULT_SIGBUS;3549}35503551vmf->page = folio_file_page(folio, index);3552return ret | VM_FAULT_LOCKED;35533554page_not_uptodate:3555/*3556* Umm, take care of errors if the page isn't up-to-date.3557* Try to re-read it _once_. We do this synchronously,3558* because there really aren't any performance issues here3559* and we need to check for errors.3560*/3561fpin = maybe_unlock_mmap_for_io(vmf, fpin);3562error = filemap_read_folio(file, mapping->a_ops->read_folio, folio);3563if (fpin)3564goto out_retry;3565folio_put(folio);35663567if (!error || error == AOP_TRUNCATED_PAGE)3568goto retry_find;3569filemap_invalidate_unlock_shared(mapping);35703571return VM_FAULT_SIGBUS;35723573out_retry:3574/*3575* We dropped the mmap_lock, we need to return to the fault handler to3576* re-find the vma and come back and find our hopefully still populated3577* page.3578*/3579if (!IS_ERR(folio))3580folio_put(folio);3581if (mapping_locked)3582filemap_invalidate_unlock_shared(mapping);3583if (fpin)3584fput(fpin);3585return ret | VM_FAULT_RETRY;3586}3587EXPORT_SYMBOL(filemap_fault);35883589static bool filemap_map_pmd(struct vm_fault *vmf, struct folio *folio,3590pgoff_t start)3591{3592struct mm_struct *mm = vmf->vma->vm_mm;35933594/* Huge page is mapped? No need to proceed. */3595if (pmd_trans_huge(*vmf->pmd)) {3596folio_unlock(folio);3597folio_put(folio);3598return true;3599}36003601if (pmd_none(*vmf->pmd) && folio_test_pmd_mappable(folio)) {3602struct page *page = folio_file_page(folio, start);3603vm_fault_t ret = do_set_pmd(vmf, folio, page);3604if (!ret) {3605/* The page is mapped successfully, reference consumed. */3606folio_unlock(folio);3607return true;3608}3609}36103611if (pmd_none(*vmf->pmd) && vmf->prealloc_pte)3612pmd_install(mm, vmf->pmd, &vmf->prealloc_pte);36133614return false;3615}36163617static struct folio *next_uptodate_folio(struct xa_state *xas,3618struct address_space *mapping, pgoff_t end_pgoff)3619{3620struct folio *folio = xas_next_entry(xas, end_pgoff);3621unsigned long max_idx;36223623do {3624if (!folio)3625return NULL;3626if (xas_retry(xas, folio))3627continue;3628if (xa_is_value(folio))3629continue;3630if (!folio_try_get(folio))3631continue;3632if (folio_test_locked(folio))3633goto skip;3634/* Has the page moved or been split? */3635if (unlikely(folio != xas_reload(xas)))3636goto skip;3637if (!folio_test_uptodate(folio) || folio_test_readahead(folio))3638goto skip;3639if (!folio_trylock(folio))3640goto skip;3641if (folio->mapping != mapping)3642goto unlock;3643if (!folio_test_uptodate(folio))3644goto unlock;3645max_idx = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE);3646if (xas->xa_index >= max_idx)3647goto unlock;3648return folio;3649unlock:3650folio_unlock(folio);3651skip:3652folio_put(folio);3653} while ((folio = xas_next_entry(xas, end_pgoff)) != NULL);36543655return NULL;3656}36573658/*3659* Map page range [start_page, start_page + nr_pages) of folio.3660* start_page is gotten from start by folio_page(folio, start)3661*/3662static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf,3663struct folio *folio, unsigned long start,3664unsigned long addr, unsigned int nr_pages,3665unsigned long *rss, unsigned short *mmap_miss)3666{3667unsigned int ref_from_caller = 1;3668vm_fault_t ret = 0;3669struct page *page = folio_page(folio, start);3670unsigned int count = 0;3671pte_t *old_ptep = vmf->pte;3672unsigned long addr0;36733674/*3675* Map the large folio fully where possible.3676*3677* The folio must not cross VMA or page table boundary.3678*/3679addr0 = addr - start * PAGE_SIZE;3680if (folio_within_vma(folio, vmf->vma) &&3681(addr0 & PMD_MASK) == ((addr0 + folio_size(folio) - 1) & PMD_MASK)) {3682vmf->pte -= start;3683page -= start;3684addr = addr0;3685nr_pages = folio_nr_pages(folio);3686}36873688do {3689if (PageHWPoison(page + count))3690goto skip;36913692/*3693* If there are too many folios that are recently evicted3694* in a file, they will probably continue to be evicted.3695* In such situation, read-ahead is only a waste of IO.3696* Don't decrease mmap_miss in this scenario to make sure3697* we can stop read-ahead.3698*/3699if (!folio_test_workingset(folio))3700(*mmap_miss)++;37013702/*3703* NOTE: If there're PTE markers, we'll leave them to be3704* handled in the specific fault path, and it'll prohibit the3705* fault-around logic.3706*/3707if (!pte_none(ptep_get(&vmf->pte[count])))3708goto skip;37093710count++;3711continue;3712skip:3713if (count) {3714set_pte_range(vmf, folio, page, count, addr);3715*rss += count;3716folio_ref_add(folio, count - ref_from_caller);3717ref_from_caller = 0;3718if (in_range(vmf->address, addr, count * PAGE_SIZE))3719ret = VM_FAULT_NOPAGE;3720}37213722count++;3723page += count;3724vmf->pte += count;3725addr += count * PAGE_SIZE;3726count = 0;3727} while (--nr_pages > 0);37283729if (count) {3730set_pte_range(vmf, folio, page, count, addr);3731*rss += count;3732folio_ref_add(folio, count - ref_from_caller);3733ref_from_caller = 0;3734if (in_range(vmf->address, addr, count * PAGE_SIZE))3735ret = VM_FAULT_NOPAGE;3736}37373738vmf->pte = old_ptep;3739if (ref_from_caller)3740/* Locked folios cannot get truncated. */3741folio_ref_dec(folio);37423743return ret;3744}37453746static vm_fault_t filemap_map_order0_folio(struct vm_fault *vmf,3747struct folio *folio, unsigned long addr,3748unsigned long *rss, unsigned short *mmap_miss)3749{3750vm_fault_t ret = 0;3751struct page *page = &folio->page;37523753if (PageHWPoison(page))3754goto out;37553756/* See comment of filemap_map_folio_range() */3757if (!folio_test_workingset(folio))3758(*mmap_miss)++;37593760/*3761* NOTE: If there're PTE markers, we'll leave them to be3762* handled in the specific fault path, and it'll prohibit3763* the fault-around logic.3764*/3765if (!pte_none(ptep_get(vmf->pte)))3766goto out;37673768if (vmf->address == addr)3769ret = VM_FAULT_NOPAGE;37703771set_pte_range(vmf, folio, page, 1, addr);3772(*rss)++;3773return ret;37743775out:3776/* Locked folios cannot get truncated. */3777folio_ref_dec(folio);3778return ret;3779}37803781vm_fault_t filemap_map_pages(struct vm_fault *vmf,3782pgoff_t start_pgoff, pgoff_t end_pgoff)3783{3784struct vm_area_struct *vma = vmf->vma;3785struct file *file = vma->vm_file;3786struct address_space *mapping = file->f_mapping;3787pgoff_t file_end, last_pgoff = start_pgoff;3788unsigned long addr;3789XA_STATE(xas, &mapping->i_pages, start_pgoff);3790struct folio *folio;3791vm_fault_t ret = 0;3792unsigned long rss = 0;3793unsigned int nr_pages = 0, folio_type;3794unsigned short mmap_miss = 0, mmap_miss_saved;37953796rcu_read_lock();3797folio = next_uptodate_folio(&xas, mapping, end_pgoff);3798if (!folio)3799goto out;38003801if (filemap_map_pmd(vmf, folio, start_pgoff)) {3802ret = VM_FAULT_NOPAGE;3803goto out;3804}38053806addr = vma->vm_start + ((start_pgoff - vma->vm_pgoff) << PAGE_SHIFT);3807vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl);3808if (!vmf->pte) {3809folio_unlock(folio);3810folio_put(folio);3811goto out;3812}38133814file_end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE) - 1;3815if (end_pgoff > file_end)3816end_pgoff = file_end;38173818folio_type = mm_counter_file(folio);3819do {3820unsigned long end;38213822addr += (xas.xa_index - last_pgoff) << PAGE_SHIFT;3823vmf->pte += xas.xa_index - last_pgoff;3824last_pgoff = xas.xa_index;3825end = folio_next_index(folio) - 1;3826nr_pages = min(end, end_pgoff) - xas.xa_index + 1;38273828if (!folio_test_large(folio))3829ret |= filemap_map_order0_folio(vmf,3830folio, addr, &rss, &mmap_miss);3831else3832ret |= filemap_map_folio_range(vmf, folio,3833xas.xa_index - folio->index, addr,3834nr_pages, &rss, &mmap_miss);38353836folio_unlock(folio);3837} while ((folio = next_uptodate_folio(&xas, mapping, end_pgoff)) != NULL);3838add_mm_counter(vma->vm_mm, folio_type, rss);3839pte_unmap_unlock(vmf->pte, vmf->ptl);3840trace_mm_filemap_map_pages(mapping, start_pgoff, end_pgoff);3841out:3842rcu_read_unlock();38433844mmap_miss_saved = READ_ONCE(file->f_ra.mmap_miss);3845if (mmap_miss >= mmap_miss_saved)3846WRITE_ONCE(file->f_ra.mmap_miss, 0);3847else3848WRITE_ONCE(file->f_ra.mmap_miss, mmap_miss_saved - mmap_miss);38493850return ret;3851}3852EXPORT_SYMBOL(filemap_map_pages);38533854vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf)3855{3856struct address_space *mapping = vmf->vma->vm_file->f_mapping;3857struct folio *folio = page_folio(vmf->page);3858vm_fault_t ret = VM_FAULT_LOCKED;38593860sb_start_pagefault(mapping->host->i_sb);3861file_update_time(vmf->vma->vm_file);3862folio_lock(folio);3863if (folio->mapping != mapping) {3864folio_unlock(folio);3865ret = VM_FAULT_NOPAGE;3866goto out;3867}3868/*3869* We mark the folio dirty already here so that when freeze is in3870* progress, we are guaranteed that writeback during freezing will3871* see the dirty folio and writeprotect it again.3872*/3873folio_mark_dirty(folio);3874folio_wait_stable(folio);3875out:3876sb_end_pagefault(mapping->host->i_sb);3877return ret;3878}38793880const struct vm_operations_struct generic_file_vm_ops = {3881.fault = filemap_fault,3882.map_pages = filemap_map_pages,3883.page_mkwrite = filemap_page_mkwrite,3884};38853886/* This is used for a general mmap of a disk file */38873888int generic_file_mmap(struct file *file, struct vm_area_struct *vma)3889{3890struct address_space *mapping = file->f_mapping;38913892if (!mapping->a_ops->read_folio)3893return -ENOEXEC;3894file_accessed(file);3895vma->vm_ops = &generic_file_vm_ops;3896return 0;3897}38983899int generic_file_mmap_prepare(struct vm_area_desc *desc)3900{3901struct file *file = desc->file;3902struct address_space *mapping = file->f_mapping;39033904if (!mapping->a_ops->read_folio)3905return -ENOEXEC;3906file_accessed(file);3907desc->vm_ops = &generic_file_vm_ops;3908return 0;3909}39103911/*3912* This is for filesystems which do not implement ->writepage.3913*/3914int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)3915{3916if (vma_is_shared_maywrite(vma))3917return -EINVAL;3918return generic_file_mmap(file, vma);3919}39203921int generic_file_readonly_mmap_prepare(struct vm_area_desc *desc)3922{3923if (is_shared_maywrite(desc->vm_flags))3924return -EINVAL;3925return generic_file_mmap_prepare(desc);3926}3927#else3928vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf)3929{3930return VM_FAULT_SIGBUS;3931}3932int generic_file_mmap(struct file *file, struct vm_area_struct *vma)3933{3934return -ENOSYS;3935}3936int generic_file_mmap_prepare(struct vm_area_desc *desc)3937{3938return -ENOSYS;3939}3940int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma)3941{3942return -ENOSYS;3943}3944int generic_file_readonly_mmap_prepare(struct vm_area_desc *desc)3945{3946return -ENOSYS;3947}3948#endif /* CONFIG_MMU */39493950EXPORT_SYMBOL(filemap_page_mkwrite);3951EXPORT_SYMBOL(generic_file_mmap);3952EXPORT_SYMBOL(generic_file_mmap_prepare);3953EXPORT_SYMBOL(generic_file_readonly_mmap);3954EXPORT_SYMBOL(generic_file_readonly_mmap_prepare);39553956static struct folio *do_read_cache_folio(struct address_space *mapping,3957pgoff_t index, filler_t filler, struct file *file, gfp_t gfp)3958{3959struct folio *folio;3960int err;39613962if (!filler)3963filler = mapping->a_ops->read_folio;3964repeat:3965folio = filemap_get_folio(mapping, index);3966if (IS_ERR(folio)) {3967folio = filemap_alloc_folio(gfp,3968mapping_min_folio_order(mapping));3969if (!folio)3970return ERR_PTR(-ENOMEM);3971index = mapping_align_index(mapping, index);3972err = filemap_add_folio(mapping, folio, index, gfp);3973if (unlikely(err)) {3974folio_put(folio);3975if (err == -EEXIST)3976goto repeat;3977/* Presumably ENOMEM for xarray node */3978return ERR_PTR(err);3979}39803981goto filler;3982}3983if (folio_test_uptodate(folio))3984goto out;39853986if (!folio_trylock(folio)) {3987folio_put_wait_locked(folio, TASK_UNINTERRUPTIBLE);3988goto repeat;3989}39903991/* Folio was truncated from mapping */3992if (!folio->mapping) {3993folio_unlock(folio);3994folio_put(folio);3995goto repeat;3996}39973998/* Someone else locked and filled the page in a very small window */3999if (folio_test_uptodate(folio)) {4000folio_unlock(folio);4001goto out;4002}40034004filler:4005err = filemap_read_folio(file, filler, folio);4006if (err) {4007folio_put(folio);4008if (err == AOP_TRUNCATED_PAGE)4009goto repeat;4010return ERR_PTR(err);4011}40124013out:4014folio_mark_accessed(folio);4015return folio;4016}40174018/**4019* read_cache_folio - Read into page cache, fill it if needed.4020* @mapping: The address_space to read from.4021* @index: The index to read.4022* @filler: Function to perform the read, or NULL to use aops->read_folio().4023* @file: Passed to filler function, may be NULL if not required.4024*4025* Read one page into the page cache. If it succeeds, the folio returned4026* will contain @index, but it may not be the first page of the folio.4027*4028* If the filler function returns an error, it will be returned to the4029* caller.4030*4031* Context: May sleep. Expects mapping->invalidate_lock to be held.4032* Return: An uptodate folio on success, ERR_PTR() on failure.4033*/4034struct folio *read_cache_folio(struct address_space *mapping, pgoff_t index,4035filler_t filler, struct file *file)4036{4037return do_read_cache_folio(mapping, index, filler, file,4038mapping_gfp_mask(mapping));4039}4040EXPORT_SYMBOL(read_cache_folio);40414042/**4043* mapping_read_folio_gfp - Read into page cache, using specified allocation flags.4044* @mapping: The address_space for the folio.4045* @index: The index that the allocated folio will contain.4046* @gfp: The page allocator flags to use if allocating.4047*4048* This is the same as "read_cache_folio(mapping, index, NULL, NULL)", but with4049* any new memory allocations done using the specified allocation flags.4050*4051* The most likely error from this function is EIO, but ENOMEM is4052* possible and so is EINTR. If ->read_folio returns another error,4053* that will be returned to the caller.4054*4055* The function expects mapping->invalidate_lock to be already held.4056*4057* Return: Uptodate folio on success, ERR_PTR() on failure.4058*/4059struct folio *mapping_read_folio_gfp(struct address_space *mapping,4060pgoff_t index, gfp_t gfp)4061{4062return do_read_cache_folio(mapping, index, NULL, NULL, gfp);4063}4064EXPORT_SYMBOL(mapping_read_folio_gfp);40654066static struct page *do_read_cache_page(struct address_space *mapping,4067pgoff_t index, filler_t *filler, struct file *file, gfp_t gfp)4068{4069struct folio *folio;40704071folio = do_read_cache_folio(mapping, index, filler, file, gfp);4072if (IS_ERR(folio))4073return &folio->page;4074return folio_file_page(folio, index);4075}40764077struct page *read_cache_page(struct address_space *mapping,4078pgoff_t index, filler_t *filler, struct file *file)4079{4080return do_read_cache_page(mapping, index, filler, file,4081mapping_gfp_mask(mapping));4082}4083EXPORT_SYMBOL(read_cache_page);40844085/**4086* read_cache_page_gfp - read into page cache, using specified page allocation flags.4087* @mapping: the page's address_space4088* @index: the page index4089* @gfp: the page allocator flags to use if allocating4090*4091* This is the same as "read_mapping_page(mapping, index, NULL)", but with4092* any new page allocations done using the specified allocation flags.4093*4094* If the page does not get brought uptodate, return -EIO.4095*4096* The function expects mapping->invalidate_lock to be already held.4097*4098* Return: up to date page on success, ERR_PTR() on failure.4099*/4100struct page *read_cache_page_gfp(struct address_space *mapping,4101pgoff_t index,4102gfp_t gfp)4103{4104return do_read_cache_page(mapping, index, NULL, NULL, gfp);4105}4106EXPORT_SYMBOL(read_cache_page_gfp);41074108/*4109* Warn about a page cache invalidation failure during a direct I/O write.4110*/4111static void dio_warn_stale_pagecache(struct file *filp)4112{4113static DEFINE_RATELIMIT_STATE(_rs, 86400 * HZ, DEFAULT_RATELIMIT_BURST);4114char pathname[128];4115char *path;41164117errseq_set(&filp->f_mapping->wb_err, -EIO);4118if (__ratelimit(&_rs)) {4119path = file_path(filp, pathname, sizeof(pathname));4120if (IS_ERR(path))4121path = "(unknown)";4122pr_crit("Page cache invalidation failure on direct I/O. Possible data corruption due to collision with buffered I/O!\n");4123pr_crit("File: %s PID: %d Comm: %.20s\n", path, current->pid,4124current->comm);4125}4126}41274128void kiocb_invalidate_post_direct_write(struct kiocb *iocb, size_t count)4129{4130struct address_space *mapping = iocb->ki_filp->f_mapping;41314132if (mapping->nrpages &&4133invalidate_inode_pages2_range(mapping,4134iocb->ki_pos >> PAGE_SHIFT,4135(iocb->ki_pos + count - 1) >> PAGE_SHIFT))4136dio_warn_stale_pagecache(iocb->ki_filp);4137}41384139ssize_t4140generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from)4141{4142struct address_space *mapping = iocb->ki_filp->f_mapping;4143size_t write_len = iov_iter_count(from);4144ssize_t written;41454146/*4147* If a page can not be invalidated, return 0 to fall back4148* to buffered write.4149*/4150written = kiocb_invalidate_pages(iocb, write_len);4151if (written) {4152if (written == -EBUSY)4153return 0;4154return written;4155}41564157written = mapping->a_ops->direct_IO(iocb, from);41584159/*4160* Finally, try again to invalidate clean pages which might have been4161* cached by non-direct readahead, or faulted in by get_user_pages()4162* if the source of the write was an mmap'ed region of the file4163* we're writing. Either one is a pretty crazy thing to do,4164* so we don't support it 100%. If this invalidation4165* fails, tough, the write still worked...4166*4167* Most of the time we do not need this since dio_complete() will do4168* the invalidation for us. However there are some file systems that4169* do not end up with dio_complete() being called, so let's not break4170* them by removing it completely.4171*4172* Noticeable example is a blkdev_direct_IO().4173*4174* Skip invalidation for async writes or if mapping has no pages.4175*/4176if (written > 0) {4177struct inode *inode = mapping->host;4178loff_t pos = iocb->ki_pos;41794180kiocb_invalidate_post_direct_write(iocb, written);4181pos += written;4182write_len -= written;4183if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) {4184i_size_write(inode, pos);4185mark_inode_dirty(inode);4186}4187iocb->ki_pos = pos;4188}4189if (written != -EIOCBQUEUED)4190iov_iter_revert(from, write_len - iov_iter_count(from));4191return written;4192}4193EXPORT_SYMBOL(generic_file_direct_write);41944195ssize_t generic_perform_write(struct kiocb *iocb, struct iov_iter *i)4196{4197struct file *file = iocb->ki_filp;4198loff_t pos = iocb->ki_pos;4199struct address_space *mapping = file->f_mapping;4200const struct address_space_operations *a_ops = mapping->a_ops;4201size_t chunk = mapping_max_folio_size(mapping);4202long status = 0;4203ssize_t written = 0;42044205do {4206struct folio *folio;4207size_t offset; /* Offset into folio */4208size_t bytes; /* Bytes to write to folio */4209size_t copied; /* Bytes copied from user */4210void *fsdata = NULL;42114212bytes = iov_iter_count(i);4213retry:4214offset = pos & (chunk - 1);4215bytes = min(chunk - offset, bytes);4216balance_dirty_pages_ratelimited(mapping);42174218if (fatal_signal_pending(current)) {4219status = -EINTR;4220break;4221}42224223status = a_ops->write_begin(iocb, mapping, pos, bytes,4224&folio, &fsdata);4225if (unlikely(status < 0))4226break;42274228offset = offset_in_folio(folio, pos);4229if (bytes > folio_size(folio) - offset)4230bytes = folio_size(folio) - offset;42314232if (mapping_writably_mapped(mapping))4233flush_dcache_folio(folio);42344235/*4236* Faults here on mmap()s can recurse into arbitrary4237* filesystem code. Lots of locks are held that can4238* deadlock. Use an atomic copy to avoid deadlocking4239* in page fault handling.4240*/4241copied = copy_folio_from_iter_atomic(folio, offset, bytes, i);4242flush_dcache_folio(folio);42434244status = a_ops->write_end(iocb, mapping, pos, bytes, copied,4245folio, fsdata);4246if (unlikely(status != copied)) {4247iov_iter_revert(i, copied - max(status, 0L));4248if (unlikely(status < 0))4249break;4250}4251cond_resched();42524253if (unlikely(status == 0)) {4254/*4255* A short copy made ->write_end() reject the4256* thing entirely. Might be memory poisoning4257* halfway through, might be a race with munmap,4258* might be severe memory pressure.4259*/4260if (chunk > PAGE_SIZE)4261chunk /= 2;4262if (copied) {4263bytes = copied;4264goto retry;4265}42664267/*4268* 'folio' is now unlocked and faults on it can be4269* handled. Ensure forward progress by trying to4270* fault it in now.4271*/4272if (fault_in_iov_iter_readable(i, bytes) == bytes) {4273status = -EFAULT;4274break;4275}4276} else {4277pos += status;4278written += status;4279}4280} while (iov_iter_count(i));42814282if (!written)4283return status;4284iocb->ki_pos += written;4285return written;4286}4287EXPORT_SYMBOL(generic_perform_write);42884289/**4290* __generic_file_write_iter - write data to a file4291* @iocb: IO state structure (file, offset, etc.)4292* @from: iov_iter with data to write4293*4294* This function does all the work needed for actually writing data to a4295* file. It does all basic checks, removes SUID from the file, updates4296* modification times and calls proper subroutines depending on whether we4297* do direct IO or a standard buffered write.4298*4299* It expects i_rwsem to be grabbed unless we work on a block device or similar4300* object which does not need locking at all.4301*4302* This function does *not* take care of syncing data in case of O_SYNC write.4303* A caller has to handle it. This is mainly due to the fact that we want to4304* avoid syncing under i_rwsem.4305*4306* Return:4307* * number of bytes written, even for truncated writes4308* * negative error code if no data has been written at all4309*/4310ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)4311{4312struct file *file = iocb->ki_filp;4313struct address_space *mapping = file->f_mapping;4314struct inode *inode = mapping->host;4315ssize_t ret;43164317ret = file_remove_privs(file);4318if (ret)4319return ret;43204321ret = file_update_time(file);4322if (ret)4323return ret;43244325if (iocb->ki_flags & IOCB_DIRECT) {4326ret = generic_file_direct_write(iocb, from);4327/*4328* If the write stopped short of completing, fall back to4329* buffered writes. Some filesystems do this for writes to4330* holes, for example. For DAX files, a buffered write will4331* not succeed (even if it did, DAX does not handle dirty4332* page-cache pages correctly).4333*/4334if (ret < 0 || !iov_iter_count(from) || IS_DAX(inode))4335return ret;4336return direct_write_fallback(iocb, from, ret,4337generic_perform_write(iocb, from));4338}43394340return generic_perform_write(iocb, from);4341}4342EXPORT_SYMBOL(__generic_file_write_iter);43434344/**4345* generic_file_write_iter - write data to a file4346* @iocb: IO state structure4347* @from: iov_iter with data to write4348*4349* This is a wrapper around __generic_file_write_iter() to be used by most4350* filesystems. It takes care of syncing the file in case of O_SYNC file4351* and acquires i_rwsem as needed.4352* Return:4353* * negative error code if no data has been written at all of4354* vfs_fsync_range() failed for a synchronous write4355* * number of bytes written, even for truncated writes4356*/4357ssize_t generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from)4358{4359struct file *file = iocb->ki_filp;4360struct inode *inode = file->f_mapping->host;4361ssize_t ret;43624363inode_lock(inode);4364ret = generic_write_checks(iocb, from);4365if (ret > 0)4366ret = __generic_file_write_iter(iocb, from);4367inode_unlock(inode);43684369if (ret > 0)4370ret = generic_write_sync(iocb, ret);4371return ret;4372}4373EXPORT_SYMBOL(generic_file_write_iter);43744375/**4376* filemap_release_folio() - Release fs-specific metadata on a folio.4377* @folio: The folio which the kernel is trying to free.4378* @gfp: Memory allocation flags (and I/O mode).4379*4380* The address_space is trying to release any data attached to a folio4381* (presumably at folio->private).4382*4383* This will also be called if the private_2 flag is set on a page,4384* indicating that the folio has other metadata associated with it.4385*4386* The @gfp argument specifies whether I/O may be performed to release4387* this page (__GFP_IO), and whether the call may block4388* (__GFP_RECLAIM & __GFP_FS).4389*4390* Return: %true if the release was successful, otherwise %false.4391*/4392bool filemap_release_folio(struct folio *folio, gfp_t gfp)4393{4394struct address_space * const mapping = folio->mapping;43954396BUG_ON(!folio_test_locked(folio));4397if (!folio_needs_release(folio))4398return true;4399if (folio_test_writeback(folio))4400return false;44014402if (mapping && mapping->a_ops->release_folio)4403return mapping->a_ops->release_folio(folio, gfp);4404return try_to_free_buffers(folio);4405}4406EXPORT_SYMBOL(filemap_release_folio);44074408/**4409* filemap_invalidate_inode - Invalidate/forcibly write back a range of an inode's pagecache4410* @inode: The inode to flush4411* @flush: Set to write back rather than simply invalidate.4412* @start: First byte to in range.4413* @end: Last byte in range (inclusive), or LLONG_MAX for everything from start4414* onwards.4415*4416* Invalidate all the folios on an inode that contribute to the specified4417* range, possibly writing them back first. Whilst the operation is4418* undertaken, the invalidate lock is held to prevent new folios from being4419* installed.4420*/4421int filemap_invalidate_inode(struct inode *inode, bool flush,4422loff_t start, loff_t end)4423{4424struct address_space *mapping = inode->i_mapping;4425pgoff_t first = start >> PAGE_SHIFT;4426pgoff_t last = end >> PAGE_SHIFT;4427pgoff_t nr = end == LLONG_MAX ? ULONG_MAX : last - first + 1;44284429if (!mapping || !mapping->nrpages || end < start)4430goto out;44314432/* Prevent new folios from being added to the inode. */4433filemap_invalidate_lock(mapping);44344435if (!mapping->nrpages)4436goto unlock;44374438unmap_mapping_pages(mapping, first, nr, false);44394440/* Write back the data if we're asked to. */4441if (flush) {4442struct writeback_control wbc = {4443.sync_mode = WB_SYNC_ALL,4444.nr_to_write = LONG_MAX,4445.range_start = start,4446.range_end = end,4447};44484449filemap_fdatawrite_wbc(mapping, &wbc);4450}44514452/* Wait for writeback to complete on all folios and discard. */4453invalidate_inode_pages2_range(mapping, start / PAGE_SIZE, end / PAGE_SIZE);44544455unlock:4456filemap_invalidate_unlock(mapping);4457out:4458return filemap_check_errors(mapping);4459}4460EXPORT_SYMBOL_GPL(filemap_invalidate_inode);44614462#ifdef CONFIG_CACHESTAT_SYSCALL4463/**4464* filemap_cachestat() - compute the page cache statistics of a mapping4465* @mapping: The mapping to compute the statistics for.4466* @first_index: The starting page cache index.4467* @last_index: The final page index (inclusive).4468* @cs: the cachestat struct to write the result to.4469*4470* This will query the page cache statistics of a mapping in the4471* page range of [first_index, last_index] (inclusive). The statistics4472* queried include: number of dirty pages, number of pages marked for4473* writeback, and the number of (recently) evicted pages.4474*/4475static void filemap_cachestat(struct address_space *mapping,4476pgoff_t first_index, pgoff_t last_index, struct cachestat *cs)4477{4478XA_STATE(xas, &mapping->i_pages, first_index);4479struct folio *folio;44804481/* Flush stats (and potentially sleep) outside the RCU read section. */4482mem_cgroup_flush_stats_ratelimited(NULL);44834484rcu_read_lock();4485xas_for_each(&xas, folio, last_index) {4486int order;4487unsigned long nr_pages;4488pgoff_t folio_first_index, folio_last_index;44894490/*4491* Don't deref the folio. It is not pinned, and might4492* get freed (and reused) underneath us.4493*4494* We *could* pin it, but that would be expensive for4495* what should be a fast and lightweight syscall.4496*4497* Instead, derive all information of interest from4498* the rcu-protected xarray.4499*/45004501if (xas_retry(&xas, folio))4502continue;45034504order = xas_get_order(&xas);4505nr_pages = 1 << order;4506folio_first_index = round_down(xas.xa_index, 1 << order);4507folio_last_index = folio_first_index + nr_pages - 1;45084509/* Folios might straddle the range boundaries, only count covered pages */4510if (folio_first_index < first_index)4511nr_pages -= first_index - folio_first_index;45124513if (folio_last_index > last_index)4514nr_pages -= folio_last_index - last_index;45154516if (xa_is_value(folio)) {4517/* page is evicted */4518void *shadow = (void *)folio;4519bool workingset; /* not used */45204521cs->nr_evicted += nr_pages;45224523#ifdef CONFIG_SWAP /* implies CONFIG_MMU */4524if (shmem_mapping(mapping)) {4525/* shmem file - in swap cache */4526swp_entry_t swp = radix_to_swp_entry(folio);45274528/* swapin error results in poisoned entry */4529if (non_swap_entry(swp))4530goto resched;45314532/*4533* Getting a swap entry from the shmem4534* inode means we beat4535* shmem_unuse(). rcu_read_lock()4536* ensures swapoff waits for us before4537* freeing the swapper space. However,4538* we can race with swapping and4539* invalidation, so there might not be4540* a shadow in the swapcache (yet).4541*/4542shadow = swap_cache_get_shadow(swp);4543if (!shadow)4544goto resched;4545}4546#endif4547if (workingset_test_recent(shadow, true, &workingset, false))4548cs->nr_recently_evicted += nr_pages;45494550goto resched;4551}45524553/* page is in cache */4554cs->nr_cache += nr_pages;45554556if (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY))4557cs->nr_dirty += nr_pages;45584559if (xas_get_mark(&xas, PAGECACHE_TAG_WRITEBACK))4560cs->nr_writeback += nr_pages;45614562resched:4563if (need_resched()) {4564xas_pause(&xas);4565cond_resched_rcu();4566}4567}4568rcu_read_unlock();4569}45704571/*4572* See mincore: reveal pagecache information only for files4573* that the calling process has write access to, or could (if4574* tried) open for writing.4575*/4576static inline bool can_do_cachestat(struct file *f)4577{4578if (f->f_mode & FMODE_WRITE)4579return true;4580if (inode_owner_or_capable(file_mnt_idmap(f), file_inode(f)))4581return true;4582return file_permission(f, MAY_WRITE) == 0;4583}45844585/*4586* The cachestat(2) system call.4587*4588* cachestat() returns the page cache statistics of a file in the4589* bytes range specified by `off` and `len`: number of cached pages,4590* number of dirty pages, number of pages marked for writeback,4591* number of evicted pages, and number of recently evicted pages.4592*4593* An evicted page is a page that is previously in the page cache4594* but has been evicted since. A page is recently evicted if its last4595* eviction was recent enough that its reentry to the cache would4596* indicate that it is actively being used by the system, and that4597* there is memory pressure on the system.4598*4599* `off` and `len` must be non-negative integers. If `len` > 0,4600* the queried range is [`off`, `off` + `len`]. If `len` == 0,4601* we will query in the range from `off` to the end of the file.4602*4603* The `flags` argument is unused for now, but is included for future4604* extensibility. User should pass 0 (i.e no flag specified).4605*4606* Currently, hugetlbfs is not supported.4607*4608* Because the status of a page can change after cachestat() checks it4609* but before it returns to the application, the returned values may4610* contain stale information.4611*4612* return values:4613* zero - success4614* -EFAULT - cstat or cstat_range points to an illegal address4615* -EINVAL - invalid flags4616* -EBADF - invalid file descriptor4617* -EOPNOTSUPP - file descriptor is of a hugetlbfs file4618*/4619SYSCALL_DEFINE4(cachestat, unsigned int, fd,4620struct cachestat_range __user *, cstat_range,4621struct cachestat __user *, cstat, unsigned int, flags)4622{4623CLASS(fd, f)(fd);4624struct address_space *mapping;4625struct cachestat_range csr;4626struct cachestat cs;4627pgoff_t first_index, last_index;46284629if (fd_empty(f))4630return -EBADF;46314632if (copy_from_user(&csr, cstat_range,4633sizeof(struct cachestat_range)))4634return -EFAULT;46354636/* hugetlbfs is not supported */4637if (is_file_hugepages(fd_file(f)))4638return -EOPNOTSUPP;46394640if (!can_do_cachestat(fd_file(f)))4641return -EPERM;46424643if (flags != 0)4644return -EINVAL;46454646first_index = csr.off >> PAGE_SHIFT;4647last_index =4648csr.len == 0 ? ULONG_MAX : (csr.off + csr.len - 1) >> PAGE_SHIFT;4649memset(&cs, 0, sizeof(struct cachestat));4650mapping = fd_file(f)->f_mapping;4651filemap_cachestat(mapping, first_index, last_index, &cs);46524653if (copy_to_user(cstat, &cs, sizeof(struct cachestat)))4654return -EFAULT;46554656return 0;4657}4658#endif /* CONFIG_CACHESTAT_SYSCALL */465946604661