Line data Source code
1 : /* SPDX-License-Identifier: GPL-2.0 */
2 : #ifndef _LINUX_PAGEMAP_H
3 : #define _LINUX_PAGEMAP_H
4 :
5 : /*
6 : * Copyright 1995 Linus Torvalds
7 : */
8 : #include <linux/mm.h>
9 : #include <linux/fs.h>
10 : #include <linux/list.h>
11 : #include <linux/highmem.h>
12 : #include <linux/compiler.h>
13 : #include <linux/uaccess.h>
14 : #include <linux/gfp.h>
15 : #include <linux/bitops.h>
16 : #include <linux/hardirq.h> /* for in_interrupt() */
17 : #include <linux/hugetlb_inline.h>
18 :
19 : struct folio_batch;
20 :
21 : unsigned long invalidate_mapping_pages(struct address_space *mapping,
22 : pgoff_t start, pgoff_t end);
23 :
24 : static inline void invalidate_remote_inode(struct inode *inode)
25 : {
26 : if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
27 : S_ISLNK(inode->i_mode))
28 : invalidate_mapping_pages(inode->i_mapping, 0, -1);
29 : }
30 : int invalidate_inode_pages2(struct address_space *mapping);
31 : int invalidate_inode_pages2_range(struct address_space *mapping,
32 : pgoff_t start, pgoff_t end);
33 : int write_inode_now(struct inode *, int sync);
34 : int filemap_fdatawrite(struct address_space *);
35 : int filemap_flush(struct address_space *);
36 : int filemap_fdatawait_keep_errors(struct address_space *mapping);
37 : int filemap_fdatawait_range(struct address_space *, loff_t lstart, loff_t lend);
38 : int filemap_fdatawait_range_keep_errors(struct address_space *mapping,
39 : loff_t start_byte, loff_t end_byte);
40 :
41 : static inline int filemap_fdatawait(struct address_space *mapping)
42 : {
43 0 : return filemap_fdatawait_range(mapping, 0, LLONG_MAX);
44 : }
45 :
46 : bool filemap_range_has_page(struct address_space *, loff_t lstart, loff_t lend);
47 : int filemap_write_and_wait_range(struct address_space *mapping,
48 : loff_t lstart, loff_t lend);
49 : int __filemap_fdatawrite_range(struct address_space *mapping,
50 : loff_t start, loff_t end, int sync_mode);
51 : int filemap_fdatawrite_range(struct address_space *mapping,
52 : loff_t start, loff_t end);
53 : int filemap_check_errors(struct address_space *mapping);
54 : void __filemap_set_wb_err(struct address_space *mapping, int err);
55 : int filemap_fdatawrite_wbc(struct address_space *mapping,
56 : struct writeback_control *wbc);
57 :
58 : static inline int filemap_write_and_wait(struct address_space *mapping)
59 : {
60 0 : return filemap_write_and_wait_range(mapping, 0, LLONG_MAX);
61 : }
62 :
63 : /**
64 : * filemap_set_wb_err - set a writeback error on an address_space
65 : * @mapping: mapping in which to set writeback error
66 : * @err: error to be set in mapping
67 : *
68 : * When writeback fails in some way, we must record that error so that
69 : * userspace can be informed when fsync and the like are called. We endeavor
70 : * to report errors on any file that was open at the time of the error. Some
71 : * internal callers also need to know when writeback errors have occurred.
72 : *
73 : * When a writeback error occurs, most filesystems will want to call
74 : * filemap_set_wb_err to record the error in the mapping so that it will be
75 : * automatically reported whenever fsync is called on the file.
76 : */
77 : static inline void filemap_set_wb_err(struct address_space *mapping, int err)
78 : {
79 : /* Fastpath for common case of no error */
80 : if (unlikely(err))
81 : __filemap_set_wb_err(mapping, err);
82 : }
83 :
84 : /**
85 : * filemap_check_wb_err - has an error occurred since the mark was sampled?
86 : * @mapping: mapping to check for writeback errors
87 : * @since: previously-sampled errseq_t
88 : *
89 : * Grab the errseq_t value from the mapping, and see if it has changed "since"
90 : * the given value was sampled.
91 : *
92 : * If it has then report the latest error set, otherwise return 0.
93 : */
94 : static inline int filemap_check_wb_err(struct address_space *mapping,
95 : errseq_t since)
96 : {
97 : return errseq_check(&mapping->wb_err, since);
98 : }
99 :
100 : /**
101 : * filemap_sample_wb_err - sample the current errseq_t to test for later errors
102 : * @mapping: mapping to be sampled
103 : *
104 : * Writeback errors are always reported relative to a particular sample point
105 : * in the past. This function provides those sample points.
106 : */
107 : static inline errseq_t filemap_sample_wb_err(struct address_space *mapping)
108 : {
109 0 : return errseq_sample(&mapping->wb_err);
110 : }
111 :
112 : /**
113 : * file_sample_sb_err - sample the current errseq_t to test for later errors
114 : * @file: file pointer to be sampled
115 : *
116 : * Grab the most current superblock-level errseq_t value for the given
117 : * struct file.
118 : */
119 : static inline errseq_t file_sample_sb_err(struct file *file)
120 : {
121 0 : return errseq_sample(&file->f_path.dentry->d_sb->s_wb_err);
122 : }
123 :
124 : /*
125 : * Flush file data before changing attributes. Caller must hold any locks
126 : * required to prevent further writes to this file until we're done setting
127 : * flags.
128 : */
129 0 : static inline int inode_drain_writes(struct inode *inode)
130 : {
131 0 : inode_dio_wait(inode);
132 0 : return filemap_write_and_wait(inode->i_mapping);
133 : }
134 :
135 : static inline bool mapping_empty(struct address_space *mapping)
136 : {
137 0 : return xa_empty(&mapping->i_pages);
138 : }
139 :
140 : /*
141 : * mapping_shrinkable - test if page cache state allows inode reclaim
142 : * @mapping: the page cache mapping
143 : *
144 : * This checks the mapping's cache state for the pupose of inode
145 : * reclaim and LRU management.
146 : *
147 : * The caller is expected to hold the i_lock, but is not required to
148 : * hold the i_pages lock, which usually protects cache state. That's
149 : * because the i_lock and the list_lru lock that protect the inode and
150 : * its LRU state don't nest inside the irq-safe i_pages lock.
151 : *
152 : * Cache deletions are performed under the i_lock, which ensures that
153 : * when an inode goes empty, it will reliably get queued on the LRU.
154 : *
155 : * Cache additions do not acquire the i_lock and may race with this
156 : * check, in which case we'll report the inode as shrinkable when it
157 : * has cache pages. This is okay: the shrinker also checks the
158 : * refcount and the referenced bit, which will be elevated or set in
159 : * the process of adding new cache pages to an inode.
160 : */
161 : static inline bool mapping_shrinkable(struct address_space *mapping)
162 : {
163 : void *head;
164 :
165 : /*
166 : * On highmem systems, there could be lowmem pressure from the
167 : * inodes before there is highmem pressure from the page
168 : * cache. Make inodes shrinkable regardless of cache state.
169 : */
170 : if (IS_ENABLED(CONFIG_HIGHMEM))
171 : return true;
172 :
173 : /* Cache completely empty? Shrink away. */
174 0 : head = rcu_access_pointer(mapping->i_pages.xa_head);
175 0 : if (!head)
176 : return true;
177 :
178 : /*
179 : * The xarray stores single offset-0 entries directly in the
180 : * head pointer, which allows non-resident page cache entries
181 : * to escape the shadow shrinker's list of xarray nodes. The
182 : * inode shrinker needs to pick them up under memory pressure.
183 : */
184 0 : if (!xa_is_node(head) && xa_is_value(head))
185 : return true;
186 :
187 : return false;
188 : }
189 :
190 : /*
191 : * Bits in mapping->flags.
192 : */
193 : enum mapping_flags {
194 : AS_EIO = 0, /* IO error on async write */
195 : AS_ENOSPC = 1, /* ENOSPC on async write */
196 : AS_MM_ALL_LOCKS = 2, /* under mm_take_all_locks() */
197 : AS_UNEVICTABLE = 3, /* e.g., ramdisk, SHM_LOCK */
198 : AS_EXITING = 4, /* final truncate in progress */
199 : /* writeback related tags are not used */
200 : AS_NO_WRITEBACK_TAGS = 5,
201 : AS_LARGE_FOLIO_SUPPORT = 6,
202 : };
203 :
204 : /**
205 : * mapping_set_error - record a writeback error in the address_space
206 : * @mapping: the mapping in which an error should be set
207 : * @error: the error to set in the mapping
208 : *
209 : * When writeback fails in some way, we must record that error so that
210 : * userspace can be informed when fsync and the like are called. We endeavor
211 : * to report errors on any file that was open at the time of the error. Some
212 : * internal callers also need to know when writeback errors have occurred.
213 : *
214 : * When a writeback error occurs, most filesystems will want to call
215 : * mapping_set_error to record the error in the mapping so that it can be
216 : * reported when the application calls fsync(2).
217 : */
218 0 : static inline void mapping_set_error(struct address_space *mapping, int error)
219 : {
220 0 : if (likely(!error))
221 : return;
222 :
223 : /* Record in wb_err for checkers using errseq_t based tracking */
224 0 : __filemap_set_wb_err(mapping, error);
225 :
226 : /* Record it in superblock */
227 0 : if (mapping->host)
228 0 : errseq_set(&mapping->host->i_sb->s_wb_err, error);
229 :
230 : /* Record it in flags for now, for legacy callers */
231 0 : if (error == -ENOSPC)
232 0 : set_bit(AS_ENOSPC, &mapping->flags);
233 : else
234 0 : set_bit(AS_EIO, &mapping->flags);
235 : }
236 :
237 : static inline void mapping_set_unevictable(struct address_space *mapping)
238 : {
239 8 : set_bit(AS_UNEVICTABLE, &mapping->flags);
240 : }
241 :
242 : static inline void mapping_clear_unevictable(struct address_space *mapping)
243 : {
244 0 : clear_bit(AS_UNEVICTABLE, &mapping->flags);
245 : }
246 :
247 : static inline bool mapping_unevictable(struct address_space *mapping)
248 : {
249 0 : return mapping && test_bit(AS_UNEVICTABLE, &mapping->flags);
250 : }
251 :
252 : static inline void mapping_set_exiting(struct address_space *mapping)
253 : {
254 0 : set_bit(AS_EXITING, &mapping->flags);
255 : }
256 :
257 : static inline int mapping_exiting(struct address_space *mapping)
258 : {
259 0 : return test_bit(AS_EXITING, &mapping->flags);
260 : }
261 :
262 : static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
263 : {
264 0 : set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
265 : }
266 :
267 : static inline int mapping_use_writeback_tags(struct address_space *mapping)
268 : {
269 0 : return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
270 : }
271 :
272 : static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
273 : {
274 : return mapping->gfp_mask;
275 : }
276 :
277 : /* Restricts the given gfp_mask to what the mapping allows. */
278 : static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
279 : gfp_t gfp_mask)
280 : {
281 0 : return mapping_gfp_mask(mapping) & gfp_mask;
282 : }
283 :
284 : /*
285 : * This is non-atomic. Only to be used before the mapping is activated.
286 : * Probably needs a barrier...
287 : */
288 : static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
289 : {
290 21 : m->gfp_mask = mask;
291 : }
292 :
293 : /**
294 : * mapping_set_large_folios() - Indicate the file supports large folios.
295 : * @mapping: The file.
296 : *
297 : * The filesystem should call this function in its inode constructor to
298 : * indicate that the VFS can use large folios to cache the contents of
299 : * the file.
300 : *
301 : * Context: This should not be called while the inode is active as it
302 : * is non-atomic.
303 : */
304 : static inline void mapping_set_large_folios(struct address_space *mapping)
305 : {
306 2 : __set_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags);
307 : }
308 :
309 : /*
310 : * Large folio support currently depends on THP. These dependencies are
311 : * being worked on but are not yet fixed.
312 : */
313 : static inline bool mapping_large_folio_support(struct address_space *mapping)
314 : {
315 : return IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
316 : test_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags);
317 : }
318 :
319 : static inline int filemap_nr_thps(struct address_space *mapping)
320 : {
321 : #ifdef CONFIG_READ_ONLY_THP_FOR_FS
322 : return atomic_read(&mapping->nr_thps);
323 : #else
324 : return 0;
325 : #endif
326 : }
327 :
328 : static inline void filemap_nr_thps_inc(struct address_space *mapping)
329 : {
330 : #ifdef CONFIG_READ_ONLY_THP_FOR_FS
331 : if (!mapping_large_folio_support(mapping))
332 : atomic_inc(&mapping->nr_thps);
333 : #else
334 : WARN_ON_ONCE(mapping_large_folio_support(mapping) == 0);
335 : #endif
336 : }
337 :
338 : static inline void filemap_nr_thps_dec(struct address_space *mapping)
339 : {
340 : #ifdef CONFIG_READ_ONLY_THP_FOR_FS
341 : if (!mapping_large_folio_support(mapping))
342 : atomic_dec(&mapping->nr_thps);
343 : #else
344 : WARN_ON_ONCE(mapping_large_folio_support(mapping) == 0);
345 : #endif
346 : }
347 :
348 : void release_pages(struct page **pages, int nr);
349 :
350 : struct address_space *page_mapping(struct page *);
351 : struct address_space *folio_mapping(struct folio *);
352 : struct address_space *swapcache_mapping(struct folio *);
353 :
354 : /**
355 : * folio_file_mapping - Find the mapping this folio belongs to.
356 : * @folio: The folio.
357 : *
358 : * For folios which are in the page cache, return the mapping that this
359 : * page belongs to. Folios in the swap cache return the mapping of the
360 : * swap file or swap device where the data is stored. This is different
361 : * from the mapping returned by folio_mapping(). The only reason to
362 : * use it is if, like NFS, you return 0 from ->activate_swapfile.
363 : *
364 : * Do not call this for folios which aren't in the page cache or swap cache.
365 : */
366 : static inline struct address_space *folio_file_mapping(struct folio *folio)
367 : {
368 : if (unlikely(folio_test_swapcache(folio)))
369 : return swapcache_mapping(folio);
370 :
371 : return folio->mapping;
372 : }
373 :
374 : static inline struct address_space *page_file_mapping(struct page *page)
375 : {
376 : return folio_file_mapping(page_folio(page));
377 : }
378 :
379 : /*
380 : * For file cache pages, return the address_space, otherwise return NULL
381 : */
382 : static inline struct address_space *page_mapping_file(struct page *page)
383 : {
384 : struct folio *folio = page_folio(page);
385 :
386 : if (unlikely(folio_test_swapcache(folio)))
387 : return NULL;
388 : return folio_mapping(folio);
389 : }
390 :
391 : /**
392 : * folio_inode - Get the host inode for this folio.
393 : * @folio: The folio.
394 : *
395 : * For folios which are in the page cache, return the inode that this folio
396 : * belongs to.
397 : *
398 : * Do not call this for folios which aren't in the page cache.
399 : */
400 : static inline struct inode *folio_inode(struct folio *folio)
401 : {
402 0 : return folio->mapping->host;
403 : }
404 :
405 : /**
406 : * folio_attach_private - Attach private data to a folio.
407 : * @folio: Folio to attach data to.
408 : * @data: Data to attach to folio.
409 : *
410 : * Attaching private data to a folio increments the page's reference count.
411 : * The data must be detached before the folio will be freed.
412 : */
413 : static inline void folio_attach_private(struct folio *folio, void *data)
414 : {
415 0 : folio_get(folio);
416 0 : folio->private = data;
417 0 : folio_set_private(folio);
418 : }
419 :
420 : /**
421 : * folio_change_private - Change private data on a folio.
422 : * @folio: Folio to change the data on.
423 : * @data: Data to set on the folio.
424 : *
425 : * Change the private data attached to a folio and return the old
426 : * data. The page must previously have had data attached and the data
427 : * must be detached before the folio will be freed.
428 : *
429 : * Return: Data that was previously attached to the folio.
430 : */
431 : static inline void *folio_change_private(struct folio *folio, void *data)
432 : {
433 : void *old = folio_get_private(folio);
434 :
435 : folio->private = data;
436 : return old;
437 : }
438 :
439 : /**
440 : * folio_detach_private - Detach private data from a folio.
441 : * @folio: Folio to detach data from.
442 : *
443 : * Removes the data that was previously attached to the folio and decrements
444 : * the refcount on the page.
445 : *
446 : * Return: Data that was attached to the folio.
447 : */
448 0 : static inline void *folio_detach_private(struct folio *folio)
449 : {
450 0 : void *data = folio_get_private(folio);
451 :
452 0 : if (!folio_test_private(folio))
453 : return NULL;
454 0 : folio_clear_private(folio);
455 0 : folio->private = NULL;
456 : folio_put(folio);
457 :
458 : return data;
459 : }
460 :
461 : static inline void attach_page_private(struct page *page, void *data)
462 : {
463 0 : folio_attach_private(page_folio(page), data);
464 : }
465 :
466 : static inline void *detach_page_private(struct page *page)
467 : {
468 0 : return folio_detach_private(page_folio(page));
469 : }
470 :
471 : #ifdef CONFIG_NUMA
472 : struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order);
473 : #else
474 : static inline struct folio *filemap_alloc_folio(gfp_t gfp, unsigned int order)
475 : {
476 0 : return folio_alloc(gfp, order);
477 : }
478 : #endif
479 :
480 : static inline struct page *__page_cache_alloc(gfp_t gfp)
481 : {
482 0 : return &filemap_alloc_folio(gfp, 0)->page;
483 : }
484 :
485 : static inline struct page *page_cache_alloc(struct address_space *x)
486 : {
487 : return __page_cache_alloc(mapping_gfp_mask(x));
488 : }
489 :
490 : static inline gfp_t readahead_gfp_mask(struct address_space *x)
491 : {
492 0 : return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN;
493 : }
494 :
495 : typedef int filler_t(void *, struct page *);
496 :
497 : pgoff_t page_cache_next_miss(struct address_space *mapping,
498 : pgoff_t index, unsigned long max_scan);
499 : pgoff_t page_cache_prev_miss(struct address_space *mapping,
500 : pgoff_t index, unsigned long max_scan);
501 :
502 : #define FGP_ACCESSED 0x00000001
503 : #define FGP_LOCK 0x00000002
504 : #define FGP_CREAT 0x00000004
505 : #define FGP_WRITE 0x00000008
506 : #define FGP_NOFS 0x00000010
507 : #define FGP_NOWAIT 0x00000020
508 : #define FGP_FOR_MMAP 0x00000040
509 : #define FGP_HEAD 0x00000080
510 : #define FGP_ENTRY 0x00000100
511 : #define FGP_STABLE 0x00000200
512 :
513 : struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
514 : int fgp_flags, gfp_t gfp);
515 : struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index,
516 : int fgp_flags, gfp_t gfp);
517 :
518 : /**
519 : * filemap_get_folio - Find and get a folio.
520 : * @mapping: The address_space to search.
521 : * @index: The page index.
522 : *
523 : * Looks up the page cache entry at @mapping & @index. If a folio is
524 : * present, it is returned with an increased refcount.
525 : *
526 : * Otherwise, %NULL is returned.
527 : */
528 : static inline struct folio *filemap_get_folio(struct address_space *mapping,
529 : pgoff_t index)
530 : {
531 0 : return __filemap_get_folio(mapping, index, 0, 0);
532 : }
533 :
534 : /**
535 : * filemap_lock_folio - Find and lock a folio.
536 : * @mapping: The address_space to search.
537 : * @index: The page index.
538 : *
539 : * Looks up the page cache entry at @mapping & @index. If a folio is
540 : * present, it is returned locked with an increased refcount.
541 : *
542 : * Context: May sleep.
543 : * Return: A folio or %NULL if there is no folio in the cache for this
544 : * index. Will not return a shadow, swap or DAX entry.
545 : */
546 : static inline struct folio *filemap_lock_folio(struct address_space *mapping,
547 : pgoff_t index)
548 : {
549 : return __filemap_get_folio(mapping, index, FGP_LOCK, 0);
550 : }
551 :
552 : /**
553 : * find_get_page - find and get a page reference
554 : * @mapping: the address_space to search
555 : * @offset: the page index
556 : *
557 : * Looks up the page cache slot at @mapping & @offset. If there is a
558 : * page cache page, it is returned with an increased refcount.
559 : *
560 : * Otherwise, %NULL is returned.
561 : */
562 : static inline struct page *find_get_page(struct address_space *mapping,
563 : pgoff_t offset)
564 : {
565 0 : return pagecache_get_page(mapping, offset, 0, 0);
566 : }
567 :
568 : static inline struct page *find_get_page_flags(struct address_space *mapping,
569 : pgoff_t offset, int fgp_flags)
570 : {
571 0 : return pagecache_get_page(mapping, offset, fgp_flags, 0);
572 : }
573 :
574 : /**
575 : * find_lock_page - locate, pin and lock a pagecache page
576 : * @mapping: the address_space to search
577 : * @index: the page index
578 : *
579 : * Looks up the page cache entry at @mapping & @index. If there is a
580 : * page cache page, it is returned locked and with an increased
581 : * refcount.
582 : *
583 : * Context: May sleep.
584 : * Return: A struct page or %NULL if there is no page in the cache for this
585 : * index.
586 : */
587 : static inline struct page *find_lock_page(struct address_space *mapping,
588 : pgoff_t index)
589 : {
590 0 : return pagecache_get_page(mapping, index, FGP_LOCK, 0);
591 : }
592 :
593 : /**
594 : * find_or_create_page - locate or add a pagecache page
595 : * @mapping: the page's address_space
596 : * @index: the page's index into the mapping
597 : * @gfp_mask: page allocation mode
598 : *
599 : * Looks up the page cache slot at @mapping & @offset. If there is a
600 : * page cache page, it is returned locked and with an increased
601 : * refcount.
602 : *
603 : * If the page is not present, a new page is allocated using @gfp_mask
604 : * and added to the page cache and the VM's LRU list. The page is
605 : * returned locked and with an increased refcount.
606 : *
607 : * On memory exhaustion, %NULL is returned.
608 : *
609 : * find_or_create_page() may sleep, even if @gfp_flags specifies an
610 : * atomic allocation!
611 : */
612 : static inline struct page *find_or_create_page(struct address_space *mapping,
613 : pgoff_t index, gfp_t gfp_mask)
614 : {
615 0 : return pagecache_get_page(mapping, index,
616 : FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
617 : gfp_mask);
618 : }
619 :
620 : /**
621 : * grab_cache_page_nowait - returns locked page at given index in given cache
622 : * @mapping: target address_space
623 : * @index: the page index
624 : *
625 : * Same as grab_cache_page(), but do not wait if the page is unavailable.
626 : * This is intended for speculative data generators, where the data can
627 : * be regenerated if the page couldn't be grabbed. This routine should
628 : * be safe to call while holding the lock for another page.
629 : *
630 : * Clear __GFP_FS when allocating the page to avoid recursion into the fs
631 : * and deadlock against the caller's locked page.
632 : */
633 : static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
634 : pgoff_t index)
635 : {
636 : return pagecache_get_page(mapping, index,
637 : FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
638 : mapping_gfp_mask(mapping));
639 : }
640 :
641 : #define swapcache_index(folio) __page_file_index(&(folio)->page)
642 :
643 : /**
644 : * folio_index - File index of a folio.
645 : * @folio: The folio.
646 : *
647 : * For a folio which is either in the page cache or the swap cache,
648 : * return its index within the address_space it belongs to. If you know
649 : * the page is definitely in the page cache, you can look at the folio's
650 : * index directly.
651 : *
652 : * Return: The index (offset in units of pages) of a folio in its file.
653 : */
654 0 : static inline pgoff_t folio_index(struct folio *folio)
655 : {
656 0 : if (unlikely(folio_test_swapcache(folio)))
657 0 : return swapcache_index(folio);
658 0 : return folio->index;
659 : }
660 :
661 : /**
662 : * folio_next_index - Get the index of the next folio.
663 : * @folio: The current folio.
664 : *
665 : * Return: The index of the folio which follows this folio in the file.
666 : */
667 : static inline pgoff_t folio_next_index(struct folio *folio)
668 : {
669 : return folio->index + folio_nr_pages(folio);
670 : }
671 :
672 : /**
673 : * folio_file_page - The page for a particular index.
674 : * @folio: The folio which contains this index.
675 : * @index: The index we want to look up.
676 : *
677 : * Sometimes after looking up a folio in the page cache, we need to
678 : * obtain the specific page for an index (eg a page fault).
679 : *
680 : * Return: The page containing the file data for this index.
681 : */
682 : static inline struct page *folio_file_page(struct folio *folio, pgoff_t index)
683 : {
684 : /* HugeTLBfs indexes the page cache in units of hpage_size */
685 0 : if (folio_test_hugetlb(folio))
686 : return &folio->page;
687 0 : return folio_page(folio, index & (folio_nr_pages(folio) - 1));
688 : }
689 :
690 : /**
691 : * folio_contains - Does this folio contain this index?
692 : * @folio: The folio.
693 : * @index: The page index within the file.
694 : *
695 : * Context: The caller should have the page locked in order to prevent
696 : * (eg) shmem from moving the page between the page cache and swap cache
697 : * and changing its index in the middle of the operation.
698 : * Return: true or false.
699 : */
700 : static inline bool folio_contains(struct folio *folio, pgoff_t index)
701 : {
702 : /* HugeTLBfs indexes the page cache in units of hpage_size */
703 : if (folio_test_hugetlb(folio))
704 : return folio->index == index;
705 : return index - folio_index(folio) < folio_nr_pages(folio);
706 : }
707 :
708 : /*
709 : * Given the page we found in the page cache, return the page corresponding
710 : * to this index in the file
711 : */
712 : static inline struct page *find_subpage(struct page *head, pgoff_t index)
713 : {
714 : /* HugeTLBfs wants the head page regardless */
715 0 : if (PageHuge(head))
716 : return head;
717 :
718 0 : return head + (index & (thp_nr_pages(head) - 1));
719 : }
720 :
721 : unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
722 : pgoff_t end, unsigned int nr_pages,
723 : struct page **pages);
724 : unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
725 : unsigned int nr_pages, struct page **pages);
726 : unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
727 : pgoff_t end, xa_mark_t tag, unsigned int nr_pages,
728 : struct page **pages);
729 : static inline unsigned find_get_pages_tag(struct address_space *mapping,
730 : pgoff_t *index, xa_mark_t tag, unsigned int nr_pages,
731 : struct page **pages)
732 : {
733 : return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag,
734 : nr_pages, pages);
735 : }
736 :
737 : struct page *grab_cache_page_write_begin(struct address_space *mapping,
738 : pgoff_t index, unsigned flags);
739 :
740 : /*
741 : * Returns locked page at given index in given cache, creating it if needed.
742 : */
743 : static inline struct page *grab_cache_page(struct address_space *mapping,
744 : pgoff_t index)
745 : {
746 0 : return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
747 : }
748 :
749 : struct folio *read_cache_folio(struct address_space *, pgoff_t index,
750 : filler_t *filler, void *data);
751 : struct page *read_cache_page(struct address_space *, pgoff_t index,
752 : filler_t *filler, void *data);
753 : extern struct page * read_cache_page_gfp(struct address_space *mapping,
754 : pgoff_t index, gfp_t gfp_mask);
755 :
756 : static inline struct page *read_mapping_page(struct address_space *mapping,
757 : pgoff_t index, struct file *file)
758 : {
759 0 : return read_cache_page(mapping, index, NULL, file);
760 : }
761 :
762 : static inline struct folio *read_mapping_folio(struct address_space *mapping,
763 : pgoff_t index, struct file *file)
764 : {
765 0 : return read_cache_folio(mapping, index, NULL, file);
766 : }
767 :
768 : /*
769 : * Get index of the page within radix-tree (but not for hugetlb pages).
770 : * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
771 : */
772 : static inline pgoff_t page_to_index(struct page *page)
773 : {
774 : struct page *head;
775 :
776 0 : if (likely(!PageTransTail(page)))
777 : return page->index;
778 :
779 : head = compound_head(page);
780 : /*
781 : * We don't initialize ->index for tail pages: calculate based on
782 : * head page
783 : */
784 : return head->index + page - head;
785 : }
786 :
787 : extern pgoff_t hugetlb_basepage_index(struct page *page);
788 :
789 : /*
790 : * Get the offset in PAGE_SIZE (even for hugetlb pages).
791 : * (TODO: hugetlb pages should have ->index in PAGE_SIZE)
792 : */
793 : static inline pgoff_t page_to_pgoff(struct page *page)
794 : {
795 0 : if (unlikely(PageHuge(page)))
796 : return hugetlb_basepage_index(page);
797 0 : return page_to_index(page);
798 : }
799 :
800 : /*
801 : * Return byte-offset into filesystem object for page.
802 : */
803 : static inline loff_t page_offset(struct page *page)
804 : {
805 0 : return ((loff_t)page->index) << PAGE_SHIFT;
806 : }
807 :
808 : static inline loff_t page_file_offset(struct page *page)
809 : {
810 0 : return ((loff_t)page_index(page)) << PAGE_SHIFT;
811 : }
812 :
813 : /**
814 : * folio_pos - Returns the byte position of this folio in its file.
815 : * @folio: The folio.
816 : */
817 : static inline loff_t folio_pos(struct folio *folio)
818 : {
819 0 : return page_offset(&folio->page);
820 : }
821 :
822 : /**
823 : * folio_file_pos - Returns the byte position of this folio in its file.
824 : * @folio: The folio.
825 : *
826 : * This differs from folio_pos() for folios which belong to a swap file.
827 : * NFS is the only filesystem today which needs to use folio_file_pos().
828 : */
829 : static inline loff_t folio_file_pos(struct folio *folio)
830 : {
831 : return page_file_offset(&folio->page);
832 : }
833 :
834 : /*
835 : * Get the offset in PAGE_SIZE (even for hugetlb folios).
836 : * (TODO: hugetlb folios should have ->index in PAGE_SIZE)
837 : */
838 : static inline pgoff_t folio_pgoff(struct folio *folio)
839 : {
840 0 : if (unlikely(folio_test_hugetlb(folio)))
841 : return hugetlb_basepage_index(&folio->page);
842 : return folio->index;
843 : }
844 :
845 : extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
846 : unsigned long address);
847 :
848 : static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
849 : unsigned long address)
850 : {
851 : pgoff_t pgoff;
852 0 : if (unlikely(is_vm_hugetlb_page(vma)))
853 : return linear_hugepage_index(vma, address);
854 0 : pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
855 0 : pgoff += vma->vm_pgoff;
856 : return pgoff;
857 : }
858 :
859 : struct wait_page_key {
860 : struct folio *folio;
861 : int bit_nr;
862 : int page_match;
863 : };
864 :
865 : struct wait_page_queue {
866 : struct folio *folio;
867 : int bit_nr;
868 : wait_queue_entry_t wait;
869 : };
870 :
871 : static inline bool wake_page_match(struct wait_page_queue *wait_page,
872 : struct wait_page_key *key)
873 : {
874 0 : if (wait_page->folio != key->folio)
875 : return false;
876 0 : key->page_match = 1;
877 :
878 0 : if (wait_page->bit_nr != key->bit_nr)
879 : return false;
880 :
881 : return true;
882 : }
883 :
884 : void __folio_lock(struct folio *folio);
885 : int __folio_lock_killable(struct folio *folio);
886 : bool __folio_lock_or_retry(struct folio *folio, struct mm_struct *mm,
887 : unsigned int flags);
888 : void unlock_page(struct page *page);
889 : void folio_unlock(struct folio *folio);
890 :
891 : static inline bool folio_trylock(struct folio *folio)
892 : {
893 0 : return likely(!test_and_set_bit_lock(PG_locked, folio_flags(folio, 0)));
894 : }
895 :
896 : /*
897 : * Return true if the page was successfully locked
898 : */
899 : static inline int trylock_page(struct page *page)
900 : {
901 0 : return folio_trylock(page_folio(page));
902 : }
903 :
904 0 : static inline void folio_lock(struct folio *folio)
905 : {
906 : might_sleep();
907 0 : if (!folio_trylock(folio))
908 0 : __folio_lock(folio);
909 0 : }
910 :
911 : /*
912 : * lock_page may only be called if we have the page's inode pinned.
913 : */
914 0 : static inline void lock_page(struct page *page)
915 : {
916 : struct folio *folio;
917 : might_sleep();
918 :
919 0 : folio = page_folio(page);
920 0 : if (!folio_trylock(folio))
921 0 : __folio_lock(folio);
922 0 : }
923 :
924 : static inline int folio_lock_killable(struct folio *folio)
925 : {
926 : might_sleep();
927 : if (!folio_trylock(folio))
928 : return __folio_lock_killable(folio);
929 : return 0;
930 : }
931 :
932 : /*
933 : * lock_page_killable is like lock_page but can be interrupted by fatal
934 : * signals. It returns 0 if it locked the page and -EINTR if it was
935 : * killed while waiting.
936 : */
937 : static inline int lock_page_killable(struct page *page)
938 : {
939 : return folio_lock_killable(page_folio(page));
940 : }
941 :
942 : /*
943 : * lock_page_or_retry - Lock the page, unless this would block and the
944 : * caller indicated that it can handle a retry.
945 : *
946 : * Return value and mmap_lock implications depend on flags; see
947 : * __folio_lock_or_retry().
948 : */
949 0 : static inline bool lock_page_or_retry(struct page *page, struct mm_struct *mm,
950 : unsigned int flags)
951 : {
952 : struct folio *folio;
953 : might_sleep();
954 :
955 0 : folio = page_folio(page);
956 0 : return folio_trylock(folio) || __folio_lock_or_retry(folio, mm, flags);
957 : }
958 :
959 : /*
960 : * This is exported only for folio_wait_locked/folio_wait_writeback, etc.,
961 : * and should not be used directly.
962 : */
963 : void folio_wait_bit(struct folio *folio, int bit_nr);
964 : int folio_wait_bit_killable(struct folio *folio, int bit_nr);
965 :
966 : /*
967 : * Wait for a folio to be unlocked.
968 : *
969 : * This must be called with the caller "holding" the folio,
970 : * ie with increased "page->count" so that the folio won't
971 : * go away during the wait..
972 : */
973 : static inline void folio_wait_locked(struct folio *folio)
974 : {
975 0 : if (folio_test_locked(folio))
976 : folio_wait_bit(folio, PG_locked);
977 : }
978 :
979 0 : static inline int folio_wait_locked_killable(struct folio *folio)
980 : {
981 0 : if (!folio_test_locked(folio))
982 : return 0;
983 0 : return folio_wait_bit_killable(folio, PG_locked);
984 : }
985 :
986 : static inline void wait_on_page_locked(struct page *page)
987 : {
988 : folio_wait_locked(page_folio(page));
989 : }
990 :
991 : static inline int wait_on_page_locked_killable(struct page *page)
992 : {
993 : return folio_wait_locked_killable(page_folio(page));
994 : }
995 :
996 : int folio_put_wait_locked(struct folio *folio, int state);
997 : void wait_on_page_writeback(struct page *page);
998 : void folio_wait_writeback(struct folio *folio);
999 : int folio_wait_writeback_killable(struct folio *folio);
1000 : void end_page_writeback(struct page *page);
1001 : void folio_end_writeback(struct folio *folio);
1002 : void wait_for_stable_page(struct page *page);
1003 : void folio_wait_stable(struct folio *folio);
1004 : void __folio_mark_dirty(struct folio *folio, struct address_space *, int warn);
1005 : static inline void __set_page_dirty(struct page *page,
1006 : struct address_space *mapping, int warn)
1007 : {
1008 0 : __folio_mark_dirty(page_folio(page), mapping, warn);
1009 : }
1010 : void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb);
1011 : void __folio_cancel_dirty(struct folio *folio);
1012 : static inline void folio_cancel_dirty(struct folio *folio)
1013 : {
1014 : /* Avoid atomic ops, locking, etc. when not actually needed. */
1015 0 : if (folio_test_dirty(folio))
1016 0 : __folio_cancel_dirty(folio);
1017 : }
1018 0 : static inline void cancel_dirty_page(struct page *page)
1019 : {
1020 0 : folio_cancel_dirty(page_folio(page));
1021 0 : }
1022 : bool folio_clear_dirty_for_io(struct folio *folio);
1023 : bool clear_page_dirty_for_io(struct page *page);
1024 : void folio_invalidate(struct folio *folio, size_t offset, size_t length);
1025 : int __must_check folio_write_one(struct folio *folio);
1026 : static inline int __must_check write_one_page(struct page *page)
1027 : {
1028 : return folio_write_one(page_folio(page));
1029 : }
1030 :
1031 : int __set_page_dirty_nobuffers(struct page *page);
1032 : bool noop_dirty_folio(struct address_space *mapping, struct folio *folio);
1033 :
1034 : void page_endio(struct page *page, bool is_write, int err);
1035 :
1036 : void folio_end_private_2(struct folio *folio);
1037 : void folio_wait_private_2(struct folio *folio);
1038 : int folio_wait_private_2_killable(struct folio *folio);
1039 :
1040 : /*
1041 : * Add an arbitrary waiter to a page's wait queue
1042 : */
1043 : void folio_add_wait_queue(struct folio *folio, wait_queue_entry_t *waiter);
1044 :
1045 : /*
1046 : * Fault in userspace address range.
1047 : */
1048 : size_t fault_in_writeable(char __user *uaddr, size_t size);
1049 : size_t fault_in_safe_writeable(const char __user *uaddr, size_t size);
1050 : size_t fault_in_readable(const char __user *uaddr, size_t size);
1051 :
1052 : int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
1053 : pgoff_t index, gfp_t gfp);
1054 : int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
1055 : pgoff_t index, gfp_t gfp);
1056 : int filemap_add_folio(struct address_space *mapping, struct folio *folio,
1057 : pgoff_t index, gfp_t gfp);
1058 : void filemap_remove_folio(struct folio *folio);
1059 : void delete_from_page_cache(struct page *page);
1060 : void __filemap_remove_folio(struct folio *folio, void *shadow);
1061 : static inline void __delete_from_page_cache(struct page *page, void *shadow)
1062 : {
1063 : __filemap_remove_folio(page_folio(page), shadow);
1064 : }
1065 : void replace_page_cache_page(struct page *old, struct page *new);
1066 : void delete_from_page_cache_batch(struct address_space *mapping,
1067 : struct folio_batch *fbatch);
1068 : int try_to_release_page(struct page *page, gfp_t gfp);
1069 : bool filemap_release_folio(struct folio *folio, gfp_t gfp);
1070 : loff_t mapping_seek_hole_data(struct address_space *, loff_t start, loff_t end,
1071 : int whence);
1072 :
1073 : /*
1074 : * Like add_to_page_cache_locked, but used to add newly allocated pages:
1075 : * the page is new, so we can just run __SetPageLocked() against it.
1076 : */
1077 : static inline int add_to_page_cache(struct page *page,
1078 : struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
1079 : {
1080 : int error;
1081 :
1082 : __SetPageLocked(page);
1083 : error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
1084 : if (unlikely(error))
1085 : __ClearPageLocked(page);
1086 : return error;
1087 : }
1088 :
1089 : /* Must be non-static for BPF error injection */
1090 : int __filemap_add_folio(struct address_space *mapping, struct folio *folio,
1091 : pgoff_t index, gfp_t gfp, void **shadowp);
1092 :
1093 : bool filemap_range_has_writeback(struct address_space *mapping,
1094 : loff_t start_byte, loff_t end_byte);
1095 :
1096 : /**
1097 : * filemap_range_needs_writeback - check if range potentially needs writeback
1098 : * @mapping: address space within which to check
1099 : * @start_byte: offset in bytes where the range starts
1100 : * @end_byte: offset in bytes where the range ends (inclusive)
1101 : *
1102 : * Find at least one page in the range supplied, usually used to check if
1103 : * direct writing in this range will trigger a writeback. Used by O_DIRECT
1104 : * read/write with IOCB_NOWAIT, to see if the caller needs to do
1105 : * filemap_write_and_wait_range() before proceeding.
1106 : *
1107 : * Return: %true if the caller should do filemap_write_and_wait_range() before
1108 : * doing O_DIRECT to a page in this range, %false otherwise.
1109 : */
1110 0 : static inline bool filemap_range_needs_writeback(struct address_space *mapping,
1111 : loff_t start_byte,
1112 : loff_t end_byte)
1113 : {
1114 0 : if (!mapping->nrpages)
1115 : return false;
1116 0 : if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
1117 0 : !mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK))
1118 : return false;
1119 0 : return filemap_range_has_writeback(mapping, start_byte, end_byte);
1120 : }
1121 :
1122 : /**
1123 : * struct readahead_control - Describes a readahead request.
1124 : *
1125 : * A readahead request is for consecutive pages. Filesystems which
1126 : * implement the ->readahead method should call readahead_page() or
1127 : * readahead_page_batch() in a loop and attempt to start I/O against
1128 : * each page in the request.
1129 : *
1130 : * Most of the fields in this struct are private and should be accessed
1131 : * by the functions below.
1132 : *
1133 : * @file: The file, used primarily by network filesystems for authentication.
1134 : * May be NULL if invoked internally by the filesystem.
1135 : * @mapping: Readahead this filesystem object.
1136 : * @ra: File readahead state. May be NULL.
1137 : */
1138 : struct readahead_control {
1139 : struct file *file;
1140 : struct address_space *mapping;
1141 : struct file_ra_state *ra;
1142 : /* private: use the readahead_* accessors instead */
1143 : pgoff_t _index;
1144 : unsigned int _nr_pages;
1145 : unsigned int _batch_count;
1146 : };
1147 :
1148 : #define DEFINE_READAHEAD(ractl, f, r, m, i) \
1149 : struct readahead_control ractl = { \
1150 : .file = f, \
1151 : .mapping = m, \
1152 : .ra = r, \
1153 : ._index = i, \
1154 : }
1155 :
1156 : #define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE)
1157 :
1158 : void page_cache_ra_unbounded(struct readahead_control *,
1159 : unsigned long nr_to_read, unsigned long lookahead_count);
1160 : void page_cache_sync_ra(struct readahead_control *, unsigned long req_count);
1161 : void page_cache_async_ra(struct readahead_control *, struct folio *,
1162 : unsigned long req_count);
1163 : void readahead_expand(struct readahead_control *ractl,
1164 : loff_t new_start, size_t new_len);
1165 :
1166 : /**
1167 : * page_cache_sync_readahead - generic file readahead
1168 : * @mapping: address_space which holds the pagecache and I/O vectors
1169 : * @ra: file_ra_state which holds the readahead state
1170 : * @file: Used by the filesystem for authentication.
1171 : * @index: Index of first page to be read.
1172 : * @req_count: Total number of pages being read by the caller.
1173 : *
1174 : * page_cache_sync_readahead() should be called when a cache miss happened:
1175 : * it will submit the read. The readahead logic may decide to piggyback more
1176 : * pages onto the read request if access patterns suggest it will improve
1177 : * performance.
1178 : */
1179 : static inline
1180 : void page_cache_sync_readahead(struct address_space *mapping,
1181 : struct file_ra_state *ra, struct file *file, pgoff_t index,
1182 : unsigned long req_count)
1183 : {
1184 0 : DEFINE_READAHEAD(ractl, file, ra, mapping, index);
1185 0 : page_cache_sync_ra(&ractl, req_count);
1186 : }
1187 :
1188 : /**
1189 : * page_cache_async_readahead - file readahead for marked pages
1190 : * @mapping: address_space which holds the pagecache and I/O vectors
1191 : * @ra: file_ra_state which holds the readahead state
1192 : * @file: Used by the filesystem for authentication.
1193 : * @page: The page at @index which triggered the readahead call.
1194 : * @index: Index of first page to be read.
1195 : * @req_count: Total number of pages being read by the caller.
1196 : *
1197 : * page_cache_async_readahead() should be called when a page is used which
1198 : * is marked as PageReadahead; this is a marker to suggest that the application
1199 : * has used up enough of the readahead window that we should start pulling in
1200 : * more pages.
1201 : */
1202 : static inline
1203 : void page_cache_async_readahead(struct address_space *mapping,
1204 : struct file_ra_state *ra, struct file *file,
1205 : struct page *page, pgoff_t index, unsigned long req_count)
1206 : {
1207 : DEFINE_READAHEAD(ractl, file, ra, mapping, index);
1208 : page_cache_async_ra(&ractl, page_folio(page), req_count);
1209 : }
1210 :
1211 0 : static inline struct folio *__readahead_folio(struct readahead_control *ractl)
1212 : {
1213 : struct folio *folio;
1214 :
1215 0 : BUG_ON(ractl->_batch_count > ractl->_nr_pages);
1216 0 : ractl->_nr_pages -= ractl->_batch_count;
1217 0 : ractl->_index += ractl->_batch_count;
1218 :
1219 0 : if (!ractl->_nr_pages) {
1220 0 : ractl->_batch_count = 0;
1221 0 : return NULL;
1222 : }
1223 :
1224 0 : folio = xa_load(&ractl->mapping->i_pages, ractl->_index);
1225 : VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
1226 0 : ractl->_batch_count = folio_nr_pages(folio);
1227 :
1228 0 : return folio;
1229 : }
1230 :
1231 : /**
1232 : * readahead_page - Get the next page to read.
1233 : * @ractl: The current readahead request.
1234 : *
1235 : * Context: The page is locked and has an elevated refcount. The caller
1236 : * should decreases the refcount once the page has been submitted for I/O
1237 : * and unlock the page once all I/O to that page has completed.
1238 : * Return: A pointer to the next page, or %NULL if we are done.
1239 : */
1240 : static inline struct page *readahead_page(struct readahead_control *ractl)
1241 : {
1242 0 : struct folio *folio = __readahead_folio(ractl);
1243 :
1244 0 : return &folio->page;
1245 : }
1246 :
1247 : /**
1248 : * readahead_folio - Get the next folio to read.
1249 : * @ractl: The current readahead request.
1250 : *
1251 : * Context: The folio is locked. The caller should unlock the folio once
1252 : * all I/O to that folio has completed.
1253 : * Return: A pointer to the next folio, or %NULL if we are done.
1254 : */
1255 : static inline struct folio *readahead_folio(struct readahead_control *ractl)
1256 : {
1257 : struct folio *folio = __readahead_folio(ractl);
1258 :
1259 : if (folio)
1260 : folio_put(folio);
1261 : return folio;
1262 : }
1263 :
1264 : static inline unsigned int __readahead_batch(struct readahead_control *rac,
1265 : struct page **array, unsigned int array_sz)
1266 : {
1267 : unsigned int i = 0;
1268 : XA_STATE(xas, &rac->mapping->i_pages, 0);
1269 : struct page *page;
1270 :
1271 : BUG_ON(rac->_batch_count > rac->_nr_pages);
1272 : rac->_nr_pages -= rac->_batch_count;
1273 : rac->_index += rac->_batch_count;
1274 : rac->_batch_count = 0;
1275 :
1276 : xas_set(&xas, rac->_index);
1277 : rcu_read_lock();
1278 : xas_for_each(&xas, page, rac->_index + rac->_nr_pages - 1) {
1279 : if (xas_retry(&xas, page))
1280 : continue;
1281 : VM_BUG_ON_PAGE(!PageLocked(page), page);
1282 : VM_BUG_ON_PAGE(PageTail(page), page);
1283 : array[i++] = page;
1284 : rac->_batch_count += thp_nr_pages(page);
1285 : if (i == array_sz)
1286 : break;
1287 : }
1288 : rcu_read_unlock();
1289 :
1290 : return i;
1291 : }
1292 :
1293 : /**
1294 : * readahead_page_batch - Get a batch of pages to read.
1295 : * @rac: The current readahead request.
1296 : * @array: An array of pointers to struct page.
1297 : *
1298 : * Context: The pages are locked and have an elevated refcount. The caller
1299 : * should decreases the refcount once the page has been submitted for I/O
1300 : * and unlock the page once all I/O to that page has completed.
1301 : * Return: The number of pages placed in the array. 0 indicates the request
1302 : * is complete.
1303 : */
1304 : #define readahead_page_batch(rac, array) \
1305 : __readahead_batch(rac, array, ARRAY_SIZE(array))
1306 :
1307 : /**
1308 : * readahead_pos - The byte offset into the file of this readahead request.
1309 : * @rac: The readahead request.
1310 : */
1311 : static inline loff_t readahead_pos(struct readahead_control *rac)
1312 : {
1313 0 : return (loff_t)rac->_index * PAGE_SIZE;
1314 : }
1315 :
1316 : /**
1317 : * readahead_length - The number of bytes in this readahead request.
1318 : * @rac: The readahead request.
1319 : */
1320 : static inline size_t readahead_length(struct readahead_control *rac)
1321 : {
1322 : return rac->_nr_pages * PAGE_SIZE;
1323 : }
1324 :
1325 : /**
1326 : * readahead_index - The index of the first page in this readahead request.
1327 : * @rac: The readahead request.
1328 : */
1329 : static inline pgoff_t readahead_index(struct readahead_control *rac)
1330 : {
1331 : return rac->_index;
1332 : }
1333 :
1334 : /**
1335 : * readahead_count - The number of pages in this readahead request.
1336 : * @rac: The readahead request.
1337 : */
1338 : static inline unsigned int readahead_count(struct readahead_control *rac)
1339 : {
1340 : return rac->_nr_pages;
1341 : }
1342 :
1343 : /**
1344 : * readahead_batch_length - The number of bytes in the current batch.
1345 : * @rac: The readahead request.
1346 : */
1347 : static inline size_t readahead_batch_length(struct readahead_control *rac)
1348 : {
1349 : return rac->_batch_count * PAGE_SIZE;
1350 : }
1351 :
1352 : static inline unsigned long dir_pages(struct inode *inode)
1353 : {
1354 : return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
1355 : PAGE_SHIFT;
1356 : }
1357 :
1358 : /**
1359 : * folio_mkwrite_check_truncate - check if folio was truncated
1360 : * @folio: the folio to check
1361 : * @inode: the inode to check the folio against
1362 : *
1363 : * Return: the number of bytes in the folio up to EOF,
1364 : * or -EFAULT if the folio was truncated.
1365 : */
1366 : static inline ssize_t folio_mkwrite_check_truncate(struct folio *folio,
1367 : struct inode *inode)
1368 : {
1369 : loff_t size = i_size_read(inode);
1370 : pgoff_t index = size >> PAGE_SHIFT;
1371 : size_t offset = offset_in_folio(folio, size);
1372 :
1373 : if (!folio->mapping)
1374 : return -EFAULT;
1375 :
1376 : /* folio is wholly inside EOF */
1377 : if (folio_next_index(folio) - 1 < index)
1378 : return folio_size(folio);
1379 : /* folio is wholly past EOF */
1380 : if (folio->index > index || !offset)
1381 : return -EFAULT;
1382 : /* folio is partially inside EOF */
1383 : return offset;
1384 : }
1385 :
1386 : /**
1387 : * page_mkwrite_check_truncate - check if page was truncated
1388 : * @page: the page to check
1389 : * @inode: the inode to check the page against
1390 : *
1391 : * Returns the number of bytes in the page up to EOF,
1392 : * or -EFAULT if the page was truncated.
1393 : */
1394 : static inline int page_mkwrite_check_truncate(struct page *page,
1395 : struct inode *inode)
1396 : {
1397 : loff_t size = i_size_read(inode);
1398 : pgoff_t index = size >> PAGE_SHIFT;
1399 : int offset = offset_in_page(size);
1400 :
1401 : if (page->mapping != inode->i_mapping)
1402 : return -EFAULT;
1403 :
1404 : /* page is wholly inside EOF */
1405 : if (page->index < index)
1406 : return PAGE_SIZE;
1407 : /* page is wholly past EOF */
1408 : if (page->index > index || !offset)
1409 : return -EFAULT;
1410 : /* page is partially inside EOF */
1411 : return offset;
1412 : }
1413 :
1414 : /**
1415 : * i_blocks_per_folio - How many blocks fit in this folio.
1416 : * @inode: The inode which contains the blocks.
1417 : * @folio: The folio.
1418 : *
1419 : * If the block size is larger than the size of this folio, return zero.
1420 : *
1421 : * Context: The caller should hold a refcount on the folio to prevent it
1422 : * from being split.
1423 : * Return: The number of filesystem blocks covered by this folio.
1424 : */
1425 : static inline
1426 : unsigned int i_blocks_per_folio(struct inode *inode, struct folio *folio)
1427 : {
1428 : return folio_size(folio) >> inode->i_blkbits;
1429 : }
1430 :
1431 : static inline
1432 : unsigned int i_blocks_per_page(struct inode *inode, struct page *page)
1433 : {
1434 : return i_blocks_per_folio(inode, page_folio(page));
1435 : }
1436 : #endif /* _LINUX_PAGEMAP_H */
|