Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-only
2 : /*
3 : * fs/fs-writeback.c
4 : *
5 : * Copyright (C) 2002, Linus Torvalds.
6 : *
7 : * Contains all the functions related to writing back and waiting
8 : * upon dirty inodes against superblocks, and writing back dirty
9 : * pages against inodes. ie: data writeback. Writeout of the
10 : * inode itself is not handled here.
11 : *
12 : * 10Apr2002 Andrew Morton
13 : * Split out of fs/inode.c
14 : * Additions for address_space-based writeback
15 : */
16 :
17 : #include <linux/kernel.h>
18 : #include <linux/export.h>
19 : #include <linux/spinlock.h>
20 : #include <linux/slab.h>
21 : #include <linux/sched.h>
22 : #include <linux/fs.h>
23 : #include <linux/mm.h>
24 : #include <linux/pagemap.h>
25 : #include <linux/kthread.h>
26 : #include <linux/writeback.h>
27 : #include <linux/blkdev.h>
28 : #include <linux/backing-dev.h>
29 : #include <linux/tracepoint.h>
30 : #include <linux/device.h>
31 : #include <linux/memcontrol.h>
32 : #include "internal.h"
33 :
34 : /*
35 : * 4MB minimal write chunk size
36 : */
37 : #define MIN_WRITEBACK_PAGES (4096UL >> (PAGE_SHIFT - 10))
38 :
39 : /*
40 : * Passed into wb_writeback(), essentially a subset of writeback_control
41 : */
42 : struct wb_writeback_work {
43 : long nr_pages;
44 : struct super_block *sb;
45 : enum writeback_sync_modes sync_mode;
46 : unsigned int tagged_writepages:1;
47 : unsigned int for_kupdate:1;
48 : unsigned int range_cyclic:1;
49 : unsigned int for_background:1;
50 : unsigned int for_sync:1; /* sync(2) WB_SYNC_ALL writeback */
51 : unsigned int auto_free:1; /* free on completion */
52 : enum wb_reason reason; /* why was writeback initiated? */
53 :
54 : struct list_head list; /* pending work list */
55 : struct wb_completion *done; /* set if the caller waits */
56 : };
57 :
58 : /*
59 : * If an inode is constantly having its pages dirtied, but then the
60 : * updates stop dirtytime_expire_interval seconds in the past, it's
61 : * possible for the worst case time between when an inode has its
62 : * timestamps updated and when they finally get written out to be two
63 : * dirtytime_expire_intervals. We set the default to 12 hours (in
64 : * seconds), which means most of the time inodes will have their
65 : * timestamps written to disk after 12 hours, but in the worst case a
66 : * few inodes might not their timestamps updated for 24 hours.
67 : */
68 : unsigned int dirtytime_expire_interval = 12 * 60 * 60;
69 :
70 : static inline struct inode *wb_inode(struct list_head *head)
71 : {
72 0 : return list_entry(head, struct inode, i_io_list);
73 : }
74 :
75 : /*
76 : * Include the creation of the trace points after defining the
77 : * wb_writeback_work structure and inline functions so that the definition
78 : * remains local to this file.
79 : */
80 : #define CREATE_TRACE_POINTS
81 : #include <trace/events/writeback.h>
82 :
83 : EXPORT_TRACEPOINT_SYMBOL_GPL(wbc_writepage);
84 :
85 0 : static bool wb_io_lists_populated(struct bdi_writeback *wb)
86 : {
87 0 : if (wb_has_dirty_io(wb)) {
88 : return false;
89 : } else {
90 0 : set_bit(WB_has_dirty_io, &wb->state);
91 0 : WARN_ON_ONCE(!wb->avg_write_bandwidth);
92 0 : atomic_long_add(wb->avg_write_bandwidth,
93 0 : &wb->bdi->tot_write_bandwidth);
94 0 : return true;
95 : }
96 : }
97 :
98 0 : static void wb_io_lists_depopulated(struct bdi_writeback *wb)
99 : {
100 0 : if (wb_has_dirty_io(wb) && list_empty(&wb->b_dirty) &&
101 0 : list_empty(&wb->b_io) && list_empty(&wb->b_more_io)) {
102 0 : clear_bit(WB_has_dirty_io, &wb->state);
103 0 : WARN_ON_ONCE(atomic_long_sub_return(wb->avg_write_bandwidth,
104 : &wb->bdi->tot_write_bandwidth) < 0);
105 : }
106 0 : }
107 :
108 : /**
109 : * inode_io_list_move_locked - move an inode onto a bdi_writeback IO list
110 : * @inode: inode to be moved
111 : * @wb: target bdi_writeback
112 : * @head: one of @wb->b_{dirty|io|more_io|dirty_time}
113 : *
114 : * Move @inode->i_io_list to @list of @wb and set %WB_has_dirty_io.
115 : * Returns %true if @inode is the first occupant of the !dirty_time IO
116 : * lists; otherwise, %false.
117 : */
118 0 : static bool inode_io_list_move_locked(struct inode *inode,
119 : struct bdi_writeback *wb,
120 : struct list_head *head)
121 : {
122 : assert_spin_locked(&wb->list_lock);
123 :
124 0 : list_move(&inode->i_io_list, head);
125 :
126 : /* dirty_time doesn't count as dirty_io until expiration */
127 0 : if (head != &wb->b_dirty_time)
128 0 : return wb_io_lists_populated(wb);
129 :
130 0 : wb_io_lists_depopulated(wb);
131 0 : return false;
132 : }
133 :
134 0 : static void wb_wakeup(struct bdi_writeback *wb)
135 : {
136 0 : spin_lock_bh(&wb->work_lock);
137 0 : if (test_bit(WB_registered, &wb->state))
138 0 : mod_delayed_work(bdi_wq, &wb->dwork, 0);
139 0 : spin_unlock_bh(&wb->work_lock);
140 0 : }
141 :
142 0 : static void finish_writeback_work(struct bdi_writeback *wb,
143 : struct wb_writeback_work *work)
144 : {
145 0 : struct wb_completion *done = work->done;
146 :
147 0 : if (work->auto_free)
148 0 : kfree(work);
149 0 : if (done) {
150 0 : wait_queue_head_t *waitq = done->waitq;
151 :
152 : /* @done can't be accessed after the following dec */
153 0 : if (atomic_dec_and_test(&done->cnt))
154 0 : wake_up_all(waitq);
155 : }
156 0 : }
157 :
158 0 : static void wb_queue_work(struct bdi_writeback *wb,
159 : struct wb_writeback_work *work)
160 : {
161 0 : trace_writeback_queue(wb, work);
162 :
163 0 : if (work->done)
164 0 : atomic_inc(&work->done->cnt);
165 :
166 0 : spin_lock_bh(&wb->work_lock);
167 :
168 0 : if (test_bit(WB_registered, &wb->state)) {
169 0 : list_add_tail(&work->list, &wb->work_list);
170 0 : mod_delayed_work(bdi_wq, &wb->dwork, 0);
171 : } else
172 0 : finish_writeback_work(wb, work);
173 :
174 0 : spin_unlock_bh(&wb->work_lock);
175 0 : }
176 :
177 : /**
178 : * wb_wait_for_completion - wait for completion of bdi_writeback_works
179 : * @done: target wb_completion
180 : *
181 : * Wait for one or more work items issued to @bdi with their ->done field
182 : * set to @done, which should have been initialized with
183 : * DEFINE_WB_COMPLETION(). This function returns after all such work items
184 : * are completed. Work items which are waited upon aren't freed
185 : * automatically on completion.
186 : */
187 0 : void wb_wait_for_completion(struct wb_completion *done)
188 : {
189 0 : atomic_dec(&done->cnt); /* put down the initial count */
190 0 : wait_event(*done->waitq, !atomic_read(&done->cnt));
191 0 : }
192 :
193 : #ifdef CONFIG_CGROUP_WRITEBACK
194 :
195 : /*
196 : * Parameters for foreign inode detection, see wbc_detach_inode() to see
197 : * how they're used.
198 : *
199 : * These paramters are inherently heuristical as the detection target
200 : * itself is fuzzy. All we want to do is detaching an inode from the
201 : * current owner if it's being written to by some other cgroups too much.
202 : *
203 : * The current cgroup writeback is built on the assumption that multiple
204 : * cgroups writing to the same inode concurrently is very rare and a mode
205 : * of operation which isn't well supported. As such, the goal is not
206 : * taking too long when a different cgroup takes over an inode while
207 : * avoiding too aggressive flip-flops from occasional foreign writes.
208 : *
209 : * We record, very roughly, 2s worth of IO time history and if more than
210 : * half of that is foreign, trigger the switch. The recording is quantized
211 : * to 16 slots. To avoid tiny writes from swinging the decision too much,
212 : * writes smaller than 1/8 of avg size are ignored.
213 : */
214 : #define WB_FRN_TIME_SHIFT 13 /* 1s = 2^13, upto 8 secs w/ 16bit */
215 : #define WB_FRN_TIME_AVG_SHIFT 3 /* avg = avg * 7/8 + new * 1/8 */
216 : #define WB_FRN_TIME_CUT_DIV 8 /* ignore rounds < avg / 8 */
217 : #define WB_FRN_TIME_PERIOD (2 * (1 << WB_FRN_TIME_SHIFT)) /* 2s */
218 :
219 : #define WB_FRN_HIST_SLOTS 16 /* inode->i_wb_frn_history is 16bit */
220 : #define WB_FRN_HIST_UNIT (WB_FRN_TIME_PERIOD / WB_FRN_HIST_SLOTS)
221 : /* each slot's duration is 2s / 16 */
222 : #define WB_FRN_HIST_THR_SLOTS (WB_FRN_HIST_SLOTS / 2)
223 : /* if foreign slots >= 8, switch */
224 : #define WB_FRN_HIST_MAX_SLOTS (WB_FRN_HIST_THR_SLOTS / 2 + 1)
225 : /* one round can affect upto 5 slots */
226 : #define WB_FRN_MAX_IN_FLIGHT 1024 /* don't queue too many concurrently */
227 :
228 : /*
229 : * Maximum inodes per isw. A specific value has been chosen to make
230 : * struct inode_switch_wbs_context fit into 1024 bytes kmalloc.
231 : */
232 : #define WB_MAX_INODES_PER_ISW ((1024UL - sizeof(struct inode_switch_wbs_context)) \
233 : / sizeof(struct inode *))
234 :
235 : static atomic_t isw_nr_in_flight = ATOMIC_INIT(0);
236 : static struct workqueue_struct *isw_wq;
237 :
238 : void __inode_attach_wb(struct inode *inode, struct page *page)
239 : {
240 : struct backing_dev_info *bdi = inode_to_bdi(inode);
241 : struct bdi_writeback *wb = NULL;
242 :
243 : if (inode_cgwb_enabled(inode)) {
244 : struct cgroup_subsys_state *memcg_css;
245 :
246 : if (page) {
247 : memcg_css = mem_cgroup_css_from_page(page);
248 : wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
249 : } else {
250 : /* must pin memcg_css, see wb_get_create() */
251 : memcg_css = task_get_css(current, memory_cgrp_id);
252 : wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
253 : css_put(memcg_css);
254 : }
255 : }
256 :
257 : if (!wb)
258 : wb = &bdi->wb;
259 :
260 : /*
261 : * There may be multiple instances of this function racing to
262 : * update the same inode. Use cmpxchg() to tell the winner.
263 : */
264 : if (unlikely(cmpxchg(&inode->i_wb, NULL, wb)))
265 : wb_put(wb);
266 : }
267 : EXPORT_SYMBOL_GPL(__inode_attach_wb);
268 :
269 : /**
270 : * inode_cgwb_move_to_attached - put the inode onto wb->b_attached list
271 : * @inode: inode of interest with i_lock held
272 : * @wb: target bdi_writeback
273 : *
274 : * Remove the inode from wb's io lists and if necessarily put onto b_attached
275 : * list. Only inodes attached to cgwb's are kept on this list.
276 : */
277 : static void inode_cgwb_move_to_attached(struct inode *inode,
278 : struct bdi_writeback *wb)
279 : {
280 : assert_spin_locked(&wb->list_lock);
281 : assert_spin_locked(&inode->i_lock);
282 :
283 : inode->i_state &= ~I_SYNC_QUEUED;
284 : if (wb != &wb->bdi->wb)
285 : list_move(&inode->i_io_list, &wb->b_attached);
286 : else
287 : list_del_init(&inode->i_io_list);
288 : wb_io_lists_depopulated(wb);
289 : }
290 :
291 : /**
292 : * locked_inode_to_wb_and_lock_list - determine a locked inode's wb and lock it
293 : * @inode: inode of interest with i_lock held
294 : *
295 : * Returns @inode's wb with its list_lock held. @inode->i_lock must be
296 : * held on entry and is released on return. The returned wb is guaranteed
297 : * to stay @inode's associated wb until its list_lock is released.
298 : */
299 : static struct bdi_writeback *
300 : locked_inode_to_wb_and_lock_list(struct inode *inode)
301 : __releases(&inode->i_lock)
302 : __acquires(&wb->list_lock)
303 : {
304 : while (true) {
305 : struct bdi_writeback *wb = inode_to_wb(inode);
306 :
307 : /*
308 : * inode_to_wb() association is protected by both
309 : * @inode->i_lock and @wb->list_lock but list_lock nests
310 : * outside i_lock. Drop i_lock and verify that the
311 : * association hasn't changed after acquiring list_lock.
312 : */
313 : wb_get(wb);
314 : spin_unlock(&inode->i_lock);
315 : spin_lock(&wb->list_lock);
316 :
317 : /* i_wb may have changed inbetween, can't use inode_to_wb() */
318 : if (likely(wb == inode->i_wb)) {
319 : wb_put(wb); /* @inode already has ref */
320 : return wb;
321 : }
322 :
323 : spin_unlock(&wb->list_lock);
324 : wb_put(wb);
325 : cpu_relax();
326 : spin_lock(&inode->i_lock);
327 : }
328 : }
329 :
330 : /**
331 : * inode_to_wb_and_lock_list - determine an inode's wb and lock it
332 : * @inode: inode of interest
333 : *
334 : * Same as locked_inode_to_wb_and_lock_list() but @inode->i_lock isn't held
335 : * on entry.
336 : */
337 : static struct bdi_writeback *inode_to_wb_and_lock_list(struct inode *inode)
338 : __acquires(&wb->list_lock)
339 : {
340 : spin_lock(&inode->i_lock);
341 : return locked_inode_to_wb_and_lock_list(inode);
342 : }
343 :
344 : struct inode_switch_wbs_context {
345 : struct rcu_work work;
346 :
347 : /*
348 : * Multiple inodes can be switched at once. The switching procedure
349 : * consists of two parts, separated by a RCU grace period. To make
350 : * sure that the second part is executed for each inode gone through
351 : * the first part, all inode pointers are placed into a NULL-terminated
352 : * array embedded into struct inode_switch_wbs_context. Otherwise
353 : * an inode could be left in a non-consistent state.
354 : */
355 : struct bdi_writeback *new_wb;
356 : struct inode *inodes[];
357 : };
358 :
359 : static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi)
360 : {
361 : down_write(&bdi->wb_switch_rwsem);
362 : }
363 :
364 : static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi)
365 : {
366 : up_write(&bdi->wb_switch_rwsem);
367 : }
368 :
369 : static bool inode_do_switch_wbs(struct inode *inode,
370 : struct bdi_writeback *old_wb,
371 : struct bdi_writeback *new_wb)
372 : {
373 : struct address_space *mapping = inode->i_mapping;
374 : XA_STATE(xas, &mapping->i_pages, 0);
375 : struct folio *folio;
376 : bool switched = false;
377 :
378 : spin_lock(&inode->i_lock);
379 : xa_lock_irq(&mapping->i_pages);
380 :
381 : /*
382 : * Once I_FREEING or I_WILL_FREE are visible under i_lock, the eviction
383 : * path owns the inode and we shouldn't modify ->i_io_list.
384 : */
385 : if (unlikely(inode->i_state & (I_FREEING | I_WILL_FREE)))
386 : goto skip_switch;
387 :
388 : trace_inode_switch_wbs(inode, old_wb, new_wb);
389 :
390 : /*
391 : * Count and transfer stats. Note that PAGECACHE_TAG_DIRTY points
392 : * to possibly dirty folios while PAGECACHE_TAG_WRITEBACK points to
393 : * folios actually under writeback.
394 : */
395 : xas_for_each_marked(&xas, folio, ULONG_MAX, PAGECACHE_TAG_DIRTY) {
396 : if (folio_test_dirty(folio)) {
397 : long nr = folio_nr_pages(folio);
398 : wb_stat_mod(old_wb, WB_RECLAIMABLE, -nr);
399 : wb_stat_mod(new_wb, WB_RECLAIMABLE, nr);
400 : }
401 : }
402 :
403 : xas_set(&xas, 0);
404 : xas_for_each_marked(&xas, folio, ULONG_MAX, PAGECACHE_TAG_WRITEBACK) {
405 : long nr = folio_nr_pages(folio);
406 : WARN_ON_ONCE(!folio_test_writeback(folio));
407 : wb_stat_mod(old_wb, WB_WRITEBACK, -nr);
408 : wb_stat_mod(new_wb, WB_WRITEBACK, nr);
409 : }
410 :
411 : if (mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) {
412 : atomic_dec(&old_wb->writeback_inodes);
413 : atomic_inc(&new_wb->writeback_inodes);
414 : }
415 :
416 : wb_get(new_wb);
417 :
418 : /*
419 : * Transfer to @new_wb's IO list if necessary. If the @inode is dirty,
420 : * the specific list @inode was on is ignored and the @inode is put on
421 : * ->b_dirty which is always correct including from ->b_dirty_time.
422 : * The transfer preserves @inode->dirtied_when ordering. If the @inode
423 : * was clean, it means it was on the b_attached list, so move it onto
424 : * the b_attached list of @new_wb.
425 : */
426 : if (!list_empty(&inode->i_io_list)) {
427 : inode->i_wb = new_wb;
428 :
429 : if (inode->i_state & I_DIRTY_ALL) {
430 : struct inode *pos;
431 :
432 : list_for_each_entry(pos, &new_wb->b_dirty, i_io_list)
433 : if (time_after_eq(inode->dirtied_when,
434 : pos->dirtied_when))
435 : break;
436 : inode_io_list_move_locked(inode, new_wb,
437 : pos->i_io_list.prev);
438 : } else {
439 : inode_cgwb_move_to_attached(inode, new_wb);
440 : }
441 : } else {
442 : inode->i_wb = new_wb;
443 : }
444 :
445 : /* ->i_wb_frn updates may race wbc_detach_inode() but doesn't matter */
446 : inode->i_wb_frn_winner = 0;
447 : inode->i_wb_frn_avg_time = 0;
448 : inode->i_wb_frn_history = 0;
449 : switched = true;
450 : skip_switch:
451 : /*
452 : * Paired with load_acquire in unlocked_inode_to_wb_begin() and
453 : * ensures that the new wb is visible if they see !I_WB_SWITCH.
454 : */
455 : smp_store_release(&inode->i_state, inode->i_state & ~I_WB_SWITCH);
456 :
457 : xa_unlock_irq(&mapping->i_pages);
458 : spin_unlock(&inode->i_lock);
459 :
460 : return switched;
461 : }
462 :
463 : static void inode_switch_wbs_work_fn(struct work_struct *work)
464 : {
465 : struct inode_switch_wbs_context *isw =
466 : container_of(to_rcu_work(work), struct inode_switch_wbs_context, work);
467 : struct backing_dev_info *bdi = inode_to_bdi(isw->inodes[0]);
468 : struct bdi_writeback *old_wb = isw->inodes[0]->i_wb;
469 : struct bdi_writeback *new_wb = isw->new_wb;
470 : unsigned long nr_switched = 0;
471 : struct inode **inodep;
472 :
473 : /*
474 : * If @inode switches cgwb membership while sync_inodes_sb() is
475 : * being issued, sync_inodes_sb() might miss it. Synchronize.
476 : */
477 : down_read(&bdi->wb_switch_rwsem);
478 :
479 : /*
480 : * By the time control reaches here, RCU grace period has passed
481 : * since I_WB_SWITCH assertion and all wb stat update transactions
482 : * between unlocked_inode_to_wb_begin/end() are guaranteed to be
483 : * synchronizing against the i_pages lock.
484 : *
485 : * Grabbing old_wb->list_lock, inode->i_lock and the i_pages lock
486 : * gives us exclusion against all wb related operations on @inode
487 : * including IO list manipulations and stat updates.
488 : */
489 : if (old_wb < new_wb) {
490 : spin_lock(&old_wb->list_lock);
491 : spin_lock_nested(&new_wb->list_lock, SINGLE_DEPTH_NESTING);
492 : } else {
493 : spin_lock(&new_wb->list_lock);
494 : spin_lock_nested(&old_wb->list_lock, SINGLE_DEPTH_NESTING);
495 : }
496 :
497 : for (inodep = isw->inodes; *inodep; inodep++) {
498 : WARN_ON_ONCE((*inodep)->i_wb != old_wb);
499 : if (inode_do_switch_wbs(*inodep, old_wb, new_wb))
500 : nr_switched++;
501 : }
502 :
503 : spin_unlock(&new_wb->list_lock);
504 : spin_unlock(&old_wb->list_lock);
505 :
506 : up_read(&bdi->wb_switch_rwsem);
507 :
508 : if (nr_switched) {
509 : wb_wakeup(new_wb);
510 : wb_put_many(old_wb, nr_switched);
511 : }
512 :
513 : for (inodep = isw->inodes; *inodep; inodep++)
514 : iput(*inodep);
515 : wb_put(new_wb);
516 : kfree(isw);
517 : atomic_dec(&isw_nr_in_flight);
518 : }
519 :
520 : static bool inode_prepare_wbs_switch(struct inode *inode,
521 : struct bdi_writeback *new_wb)
522 : {
523 : /*
524 : * Paired with smp_mb() in cgroup_writeback_umount().
525 : * isw_nr_in_flight must be increased before checking SB_ACTIVE and
526 : * grabbing an inode, otherwise isw_nr_in_flight can be observed as 0
527 : * in cgroup_writeback_umount() and the isw_wq will be not flushed.
528 : */
529 : smp_mb();
530 :
531 : if (IS_DAX(inode))
532 : return false;
533 :
534 : /* while holding I_WB_SWITCH, no one else can update the association */
535 : spin_lock(&inode->i_lock);
536 : if (!(inode->i_sb->s_flags & SB_ACTIVE) ||
537 : inode->i_state & (I_WB_SWITCH | I_FREEING | I_WILL_FREE) ||
538 : inode_to_wb(inode) == new_wb) {
539 : spin_unlock(&inode->i_lock);
540 : return false;
541 : }
542 : inode->i_state |= I_WB_SWITCH;
543 : __iget(inode);
544 : spin_unlock(&inode->i_lock);
545 :
546 : return true;
547 : }
548 :
549 : /**
550 : * inode_switch_wbs - change the wb association of an inode
551 : * @inode: target inode
552 : * @new_wb_id: ID of the new wb
553 : *
554 : * Switch @inode's wb association to the wb identified by @new_wb_id. The
555 : * switching is performed asynchronously and may fail silently.
556 : */
557 : static void inode_switch_wbs(struct inode *inode, int new_wb_id)
558 : {
559 : struct backing_dev_info *bdi = inode_to_bdi(inode);
560 : struct cgroup_subsys_state *memcg_css;
561 : struct inode_switch_wbs_context *isw;
562 :
563 : /* noop if seems to be already in progress */
564 : if (inode->i_state & I_WB_SWITCH)
565 : return;
566 :
567 : /* avoid queueing a new switch if too many are already in flight */
568 : if (atomic_read(&isw_nr_in_flight) > WB_FRN_MAX_IN_FLIGHT)
569 : return;
570 :
571 : isw = kzalloc(struct_size(isw, inodes, 2), GFP_ATOMIC);
572 : if (!isw)
573 : return;
574 :
575 : atomic_inc(&isw_nr_in_flight);
576 :
577 : /* find and pin the new wb */
578 : rcu_read_lock();
579 : memcg_css = css_from_id(new_wb_id, &memory_cgrp_subsys);
580 : if (memcg_css && !css_tryget(memcg_css))
581 : memcg_css = NULL;
582 : rcu_read_unlock();
583 : if (!memcg_css)
584 : goto out_free;
585 :
586 : isw->new_wb = wb_get_create(bdi, memcg_css, GFP_ATOMIC);
587 : css_put(memcg_css);
588 : if (!isw->new_wb)
589 : goto out_free;
590 :
591 : if (!inode_prepare_wbs_switch(inode, isw->new_wb))
592 : goto out_free;
593 :
594 : isw->inodes[0] = inode;
595 :
596 : /*
597 : * In addition to synchronizing among switchers, I_WB_SWITCH tells
598 : * the RCU protected stat update paths to grab the i_page
599 : * lock so that stat transfer can synchronize against them.
600 : * Let's continue after I_WB_SWITCH is guaranteed to be visible.
601 : */
602 : INIT_RCU_WORK(&isw->work, inode_switch_wbs_work_fn);
603 : queue_rcu_work(isw_wq, &isw->work);
604 : return;
605 :
606 : out_free:
607 : atomic_dec(&isw_nr_in_flight);
608 : if (isw->new_wb)
609 : wb_put(isw->new_wb);
610 : kfree(isw);
611 : }
612 :
613 : /**
614 : * cleanup_offline_cgwb - detach associated inodes
615 : * @wb: target wb
616 : *
617 : * Switch all inodes attached to @wb to a nearest living ancestor's wb in order
618 : * to eventually release the dying @wb. Returns %true if not all inodes were
619 : * switched and the function has to be restarted.
620 : */
621 : bool cleanup_offline_cgwb(struct bdi_writeback *wb)
622 : {
623 : struct cgroup_subsys_state *memcg_css;
624 : struct inode_switch_wbs_context *isw;
625 : struct inode *inode;
626 : int nr;
627 : bool restart = false;
628 :
629 : isw = kzalloc(struct_size(isw, inodes, WB_MAX_INODES_PER_ISW),
630 : GFP_KERNEL);
631 : if (!isw)
632 : return restart;
633 :
634 : atomic_inc(&isw_nr_in_flight);
635 :
636 : for (memcg_css = wb->memcg_css->parent; memcg_css;
637 : memcg_css = memcg_css->parent) {
638 : isw->new_wb = wb_get_create(wb->bdi, memcg_css, GFP_KERNEL);
639 : if (isw->new_wb)
640 : break;
641 : }
642 : if (unlikely(!isw->new_wb))
643 : isw->new_wb = &wb->bdi->wb; /* wb_get() is noop for bdi's wb */
644 :
645 : nr = 0;
646 : spin_lock(&wb->list_lock);
647 : list_for_each_entry(inode, &wb->b_attached, i_io_list) {
648 : if (!inode_prepare_wbs_switch(inode, isw->new_wb))
649 : continue;
650 :
651 : isw->inodes[nr++] = inode;
652 :
653 : if (nr >= WB_MAX_INODES_PER_ISW - 1) {
654 : restart = true;
655 : break;
656 : }
657 : }
658 : spin_unlock(&wb->list_lock);
659 :
660 : /* no attached inodes? bail out */
661 : if (nr == 0) {
662 : atomic_dec(&isw_nr_in_flight);
663 : wb_put(isw->new_wb);
664 : kfree(isw);
665 : return restart;
666 : }
667 :
668 : /*
669 : * In addition to synchronizing among switchers, I_WB_SWITCH tells
670 : * the RCU protected stat update paths to grab the i_page
671 : * lock so that stat transfer can synchronize against them.
672 : * Let's continue after I_WB_SWITCH is guaranteed to be visible.
673 : */
674 : INIT_RCU_WORK(&isw->work, inode_switch_wbs_work_fn);
675 : queue_rcu_work(isw_wq, &isw->work);
676 :
677 : return restart;
678 : }
679 :
680 : /**
681 : * wbc_attach_and_unlock_inode - associate wbc with target inode and unlock it
682 : * @wbc: writeback_control of interest
683 : * @inode: target inode
684 : *
685 : * @inode is locked and about to be written back under the control of @wbc.
686 : * Record @inode's writeback context into @wbc and unlock the i_lock. On
687 : * writeback completion, wbc_detach_inode() should be called. This is used
688 : * to track the cgroup writeback context.
689 : */
690 : void wbc_attach_and_unlock_inode(struct writeback_control *wbc,
691 : struct inode *inode)
692 : {
693 : if (!inode_cgwb_enabled(inode)) {
694 : spin_unlock(&inode->i_lock);
695 : return;
696 : }
697 :
698 : wbc->wb = inode_to_wb(inode);
699 : wbc->inode = inode;
700 :
701 : wbc->wb_id = wbc->wb->memcg_css->id;
702 : wbc->wb_lcand_id = inode->i_wb_frn_winner;
703 : wbc->wb_tcand_id = 0;
704 : wbc->wb_bytes = 0;
705 : wbc->wb_lcand_bytes = 0;
706 : wbc->wb_tcand_bytes = 0;
707 :
708 : wb_get(wbc->wb);
709 : spin_unlock(&inode->i_lock);
710 :
711 : /*
712 : * A dying wb indicates that either the blkcg associated with the
713 : * memcg changed or the associated memcg is dying. In the first
714 : * case, a replacement wb should already be available and we should
715 : * refresh the wb immediately. In the second case, trying to
716 : * refresh will keep failing.
717 : */
718 : if (unlikely(wb_dying(wbc->wb) && !css_is_dying(wbc->wb->memcg_css)))
719 : inode_switch_wbs(inode, wbc->wb_id);
720 : }
721 : EXPORT_SYMBOL_GPL(wbc_attach_and_unlock_inode);
722 :
723 : /**
724 : * wbc_detach_inode - disassociate wbc from inode and perform foreign detection
725 : * @wbc: writeback_control of the just finished writeback
726 : *
727 : * To be called after a writeback attempt of an inode finishes and undoes
728 : * wbc_attach_and_unlock_inode(). Can be called under any context.
729 : *
730 : * As concurrent write sharing of an inode is expected to be very rare and
731 : * memcg only tracks page ownership on first-use basis severely confining
732 : * the usefulness of such sharing, cgroup writeback tracks ownership
733 : * per-inode. While the support for concurrent write sharing of an inode
734 : * is deemed unnecessary, an inode being written to by different cgroups at
735 : * different points in time is a lot more common, and, more importantly,
736 : * charging only by first-use can too readily lead to grossly incorrect
737 : * behaviors (single foreign page can lead to gigabytes of writeback to be
738 : * incorrectly attributed).
739 : *
740 : * To resolve this issue, cgroup writeback detects the majority dirtier of
741 : * an inode and transfers the ownership to it. To avoid unnnecessary
742 : * oscillation, the detection mechanism keeps track of history and gives
743 : * out the switch verdict only if the foreign usage pattern is stable over
744 : * a certain amount of time and/or writeback attempts.
745 : *
746 : * On each writeback attempt, @wbc tries to detect the majority writer
747 : * using Boyer-Moore majority vote algorithm. In addition to the byte
748 : * count from the majority voting, it also counts the bytes written for the
749 : * current wb and the last round's winner wb (max of last round's current
750 : * wb, the winner from two rounds ago, and the last round's majority
751 : * candidate). Keeping track of the historical winner helps the algorithm
752 : * to semi-reliably detect the most active writer even when it's not the
753 : * absolute majority.
754 : *
755 : * Once the winner of the round is determined, whether the winner is
756 : * foreign or not and how much IO time the round consumed is recorded in
757 : * inode->i_wb_frn_history. If the amount of recorded foreign IO time is
758 : * over a certain threshold, the switch verdict is given.
759 : */
760 : void wbc_detach_inode(struct writeback_control *wbc)
761 : {
762 : struct bdi_writeback *wb = wbc->wb;
763 : struct inode *inode = wbc->inode;
764 : unsigned long avg_time, max_bytes, max_time;
765 : u16 history;
766 : int max_id;
767 :
768 : if (!wb)
769 : return;
770 :
771 : history = inode->i_wb_frn_history;
772 : avg_time = inode->i_wb_frn_avg_time;
773 :
774 : /* pick the winner of this round */
775 : if (wbc->wb_bytes >= wbc->wb_lcand_bytes &&
776 : wbc->wb_bytes >= wbc->wb_tcand_bytes) {
777 : max_id = wbc->wb_id;
778 : max_bytes = wbc->wb_bytes;
779 : } else if (wbc->wb_lcand_bytes >= wbc->wb_tcand_bytes) {
780 : max_id = wbc->wb_lcand_id;
781 : max_bytes = wbc->wb_lcand_bytes;
782 : } else {
783 : max_id = wbc->wb_tcand_id;
784 : max_bytes = wbc->wb_tcand_bytes;
785 : }
786 :
787 : /*
788 : * Calculate the amount of IO time the winner consumed and fold it
789 : * into the running average kept per inode. If the consumed IO
790 : * time is lower than avag / WB_FRN_TIME_CUT_DIV, ignore it for
791 : * deciding whether to switch or not. This is to prevent one-off
792 : * small dirtiers from skewing the verdict.
793 : */
794 : max_time = DIV_ROUND_UP((max_bytes >> PAGE_SHIFT) << WB_FRN_TIME_SHIFT,
795 : wb->avg_write_bandwidth);
796 : if (avg_time)
797 : avg_time += (max_time >> WB_FRN_TIME_AVG_SHIFT) -
798 : (avg_time >> WB_FRN_TIME_AVG_SHIFT);
799 : else
800 : avg_time = max_time; /* immediate catch up on first run */
801 :
802 : if (max_time >= avg_time / WB_FRN_TIME_CUT_DIV) {
803 : int slots;
804 :
805 : /*
806 : * The switch verdict is reached if foreign wb's consume
807 : * more than a certain proportion of IO time in a
808 : * WB_FRN_TIME_PERIOD. This is loosely tracked by 16 slot
809 : * history mask where each bit represents one sixteenth of
810 : * the period. Determine the number of slots to shift into
811 : * history from @max_time.
812 : */
813 : slots = min(DIV_ROUND_UP(max_time, WB_FRN_HIST_UNIT),
814 : (unsigned long)WB_FRN_HIST_MAX_SLOTS);
815 : history <<= slots;
816 : if (wbc->wb_id != max_id)
817 : history |= (1U << slots) - 1;
818 :
819 : if (history)
820 : trace_inode_foreign_history(inode, wbc, history);
821 :
822 : /*
823 : * Switch if the current wb isn't the consistent winner.
824 : * If there are multiple closely competing dirtiers, the
825 : * inode may switch across them repeatedly over time, which
826 : * is okay. The main goal is avoiding keeping an inode on
827 : * the wrong wb for an extended period of time.
828 : */
829 : if (hweight32(history) > WB_FRN_HIST_THR_SLOTS)
830 : inode_switch_wbs(inode, max_id);
831 : }
832 :
833 : /*
834 : * Multiple instances of this function may race to update the
835 : * following fields but we don't mind occassional inaccuracies.
836 : */
837 : inode->i_wb_frn_winner = max_id;
838 : inode->i_wb_frn_avg_time = min(avg_time, (unsigned long)U16_MAX);
839 : inode->i_wb_frn_history = history;
840 :
841 : wb_put(wbc->wb);
842 : wbc->wb = NULL;
843 : }
844 : EXPORT_SYMBOL_GPL(wbc_detach_inode);
845 :
846 : /**
847 : * wbc_account_cgroup_owner - account writeback to update inode cgroup ownership
848 : * @wbc: writeback_control of the writeback in progress
849 : * @page: page being written out
850 : * @bytes: number of bytes being written out
851 : *
852 : * @bytes from @page are about to written out during the writeback
853 : * controlled by @wbc. Keep the book for foreign inode detection. See
854 : * wbc_detach_inode().
855 : */
856 : void wbc_account_cgroup_owner(struct writeback_control *wbc, struct page *page,
857 : size_t bytes)
858 : {
859 : struct cgroup_subsys_state *css;
860 : int id;
861 :
862 : /*
863 : * pageout() path doesn't attach @wbc to the inode being written
864 : * out. This is intentional as we don't want the function to block
865 : * behind a slow cgroup. Ultimately, we want pageout() to kick off
866 : * regular writeback instead of writing things out itself.
867 : */
868 : if (!wbc->wb || wbc->no_cgroup_owner)
869 : return;
870 :
871 : css = mem_cgroup_css_from_page(page);
872 : /* dead cgroups shouldn't contribute to inode ownership arbitration */
873 : if (!(css->flags & CSS_ONLINE))
874 : return;
875 :
876 : id = css->id;
877 :
878 : if (id == wbc->wb_id) {
879 : wbc->wb_bytes += bytes;
880 : return;
881 : }
882 :
883 : if (id == wbc->wb_lcand_id)
884 : wbc->wb_lcand_bytes += bytes;
885 :
886 : /* Boyer-Moore majority vote algorithm */
887 : if (!wbc->wb_tcand_bytes)
888 : wbc->wb_tcand_id = id;
889 : if (id == wbc->wb_tcand_id)
890 : wbc->wb_tcand_bytes += bytes;
891 : else
892 : wbc->wb_tcand_bytes -= min(bytes, wbc->wb_tcand_bytes);
893 : }
894 : EXPORT_SYMBOL_GPL(wbc_account_cgroup_owner);
895 :
896 : /**
897 : * wb_split_bdi_pages - split nr_pages to write according to bandwidth
898 : * @wb: target bdi_writeback to split @nr_pages to
899 : * @nr_pages: number of pages to write for the whole bdi
900 : *
901 : * Split @wb's portion of @nr_pages according to @wb's write bandwidth in
902 : * relation to the total write bandwidth of all wb's w/ dirty inodes on
903 : * @wb->bdi.
904 : */
905 : static long wb_split_bdi_pages(struct bdi_writeback *wb, long nr_pages)
906 : {
907 : unsigned long this_bw = wb->avg_write_bandwidth;
908 : unsigned long tot_bw = atomic_long_read(&wb->bdi->tot_write_bandwidth);
909 :
910 : if (nr_pages == LONG_MAX)
911 : return LONG_MAX;
912 :
913 : /*
914 : * This may be called on clean wb's and proportional distribution
915 : * may not make sense, just use the original @nr_pages in those
916 : * cases. In general, we wanna err on the side of writing more.
917 : */
918 : if (!tot_bw || this_bw >= tot_bw)
919 : return nr_pages;
920 : else
921 : return DIV_ROUND_UP_ULL((u64)nr_pages * this_bw, tot_bw);
922 : }
923 :
924 : /**
925 : * bdi_split_work_to_wbs - split a wb_writeback_work to all wb's of a bdi
926 : * @bdi: target backing_dev_info
927 : * @base_work: wb_writeback_work to issue
928 : * @skip_if_busy: skip wb's which already have writeback in progress
929 : *
930 : * Split and issue @base_work to all wb's (bdi_writeback's) of @bdi which
931 : * have dirty inodes. If @base_work->nr_page isn't %LONG_MAX, it's
932 : * distributed to the busy wbs according to each wb's proportion in the
933 : * total active write bandwidth of @bdi.
934 : */
935 : static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
936 : struct wb_writeback_work *base_work,
937 : bool skip_if_busy)
938 : {
939 : struct bdi_writeback *last_wb = NULL;
940 : struct bdi_writeback *wb = list_entry(&bdi->wb_list,
941 : struct bdi_writeback, bdi_node);
942 :
943 : might_sleep();
944 : restart:
945 : rcu_read_lock();
946 : list_for_each_entry_continue_rcu(wb, &bdi->wb_list, bdi_node) {
947 : DEFINE_WB_COMPLETION(fallback_work_done, bdi);
948 : struct wb_writeback_work fallback_work;
949 : struct wb_writeback_work *work;
950 : long nr_pages;
951 :
952 : if (last_wb) {
953 : wb_put(last_wb);
954 : last_wb = NULL;
955 : }
956 :
957 : /* SYNC_ALL writes out I_DIRTY_TIME too */
958 : if (!wb_has_dirty_io(wb) &&
959 : (base_work->sync_mode == WB_SYNC_NONE ||
960 : list_empty(&wb->b_dirty_time)))
961 : continue;
962 : if (skip_if_busy && writeback_in_progress(wb))
963 : continue;
964 :
965 : nr_pages = wb_split_bdi_pages(wb, base_work->nr_pages);
966 :
967 : work = kmalloc(sizeof(*work), GFP_ATOMIC);
968 : if (work) {
969 : *work = *base_work;
970 : work->nr_pages = nr_pages;
971 : work->auto_free = 1;
972 : wb_queue_work(wb, work);
973 : continue;
974 : }
975 :
976 : /* alloc failed, execute synchronously using on-stack fallback */
977 : work = &fallback_work;
978 : *work = *base_work;
979 : work->nr_pages = nr_pages;
980 : work->auto_free = 0;
981 : work->done = &fallback_work_done;
982 :
983 : wb_queue_work(wb, work);
984 :
985 : /*
986 : * Pin @wb so that it stays on @bdi->wb_list. This allows
987 : * continuing iteration from @wb after dropping and
988 : * regrabbing rcu read lock.
989 : */
990 : wb_get(wb);
991 : last_wb = wb;
992 :
993 : rcu_read_unlock();
994 : wb_wait_for_completion(&fallback_work_done);
995 : goto restart;
996 : }
997 : rcu_read_unlock();
998 :
999 : if (last_wb)
1000 : wb_put(last_wb);
1001 : }
1002 :
1003 : /**
1004 : * cgroup_writeback_by_id - initiate cgroup writeback from bdi and memcg IDs
1005 : * @bdi_id: target bdi id
1006 : * @memcg_id: target memcg css id
1007 : * @reason: reason why some writeback work initiated
1008 : * @done: target wb_completion
1009 : *
1010 : * Initiate flush of the bdi_writeback identified by @bdi_id and @memcg_id
1011 : * with the specified parameters.
1012 : */
1013 : int cgroup_writeback_by_id(u64 bdi_id, int memcg_id,
1014 : enum wb_reason reason, struct wb_completion *done)
1015 : {
1016 : struct backing_dev_info *bdi;
1017 : struct cgroup_subsys_state *memcg_css;
1018 : struct bdi_writeback *wb;
1019 : struct wb_writeback_work *work;
1020 : unsigned long dirty;
1021 : int ret;
1022 :
1023 : /* lookup bdi and memcg */
1024 : bdi = bdi_get_by_id(bdi_id);
1025 : if (!bdi)
1026 : return -ENOENT;
1027 :
1028 : rcu_read_lock();
1029 : memcg_css = css_from_id(memcg_id, &memory_cgrp_subsys);
1030 : if (memcg_css && !css_tryget(memcg_css))
1031 : memcg_css = NULL;
1032 : rcu_read_unlock();
1033 : if (!memcg_css) {
1034 : ret = -ENOENT;
1035 : goto out_bdi_put;
1036 : }
1037 :
1038 : /*
1039 : * And find the associated wb. If the wb isn't there already
1040 : * there's nothing to flush, don't create one.
1041 : */
1042 : wb = wb_get_lookup(bdi, memcg_css);
1043 : if (!wb) {
1044 : ret = -ENOENT;
1045 : goto out_css_put;
1046 : }
1047 :
1048 : /*
1049 : * The caller is attempting to write out most of
1050 : * the currently dirty pages. Let's take the current dirty page
1051 : * count and inflate it by 25% which should be large enough to
1052 : * flush out most dirty pages while avoiding getting livelocked by
1053 : * concurrent dirtiers.
1054 : *
1055 : * BTW the memcg stats are flushed periodically and this is best-effort
1056 : * estimation, so some potential error is ok.
1057 : */
1058 : dirty = memcg_page_state(mem_cgroup_from_css(memcg_css), NR_FILE_DIRTY);
1059 : dirty = dirty * 10 / 8;
1060 :
1061 : /* issue the writeback work */
1062 : work = kzalloc(sizeof(*work), GFP_NOWAIT | __GFP_NOWARN);
1063 : if (work) {
1064 : work->nr_pages = dirty;
1065 : work->sync_mode = WB_SYNC_NONE;
1066 : work->range_cyclic = 1;
1067 : work->reason = reason;
1068 : work->done = done;
1069 : work->auto_free = 1;
1070 : wb_queue_work(wb, work);
1071 : ret = 0;
1072 : } else {
1073 : ret = -ENOMEM;
1074 : }
1075 :
1076 : wb_put(wb);
1077 : out_css_put:
1078 : css_put(memcg_css);
1079 : out_bdi_put:
1080 : bdi_put(bdi);
1081 : return ret;
1082 : }
1083 :
1084 : /**
1085 : * cgroup_writeback_umount - flush inode wb switches for umount
1086 : *
1087 : * This function is called when a super_block is about to be destroyed and
1088 : * flushes in-flight inode wb switches. An inode wb switch goes through
1089 : * RCU and then workqueue, so the two need to be flushed in order to ensure
1090 : * that all previously scheduled switches are finished. As wb switches are
1091 : * rare occurrences and synchronize_rcu() can take a while, perform
1092 : * flushing iff wb switches are in flight.
1093 : */
1094 : void cgroup_writeback_umount(void)
1095 : {
1096 : /*
1097 : * SB_ACTIVE should be reliably cleared before checking
1098 : * isw_nr_in_flight, see generic_shutdown_super().
1099 : */
1100 : smp_mb();
1101 :
1102 : if (atomic_read(&isw_nr_in_flight)) {
1103 : /*
1104 : * Use rcu_barrier() to wait for all pending callbacks to
1105 : * ensure that all in-flight wb switches are in the workqueue.
1106 : */
1107 : rcu_barrier();
1108 : flush_workqueue(isw_wq);
1109 : }
1110 : }
1111 :
1112 : static int __init cgroup_writeback_init(void)
1113 : {
1114 : isw_wq = alloc_workqueue("inode_switch_wbs", 0, 0);
1115 : if (!isw_wq)
1116 : return -ENOMEM;
1117 : return 0;
1118 : }
1119 : fs_initcall(cgroup_writeback_init);
1120 :
1121 : #else /* CONFIG_CGROUP_WRITEBACK */
1122 :
1123 : static void bdi_down_write_wb_switch_rwsem(struct backing_dev_info *bdi) { }
1124 : static void bdi_up_write_wb_switch_rwsem(struct backing_dev_info *bdi) { }
1125 :
1126 : static void inode_cgwb_move_to_attached(struct inode *inode,
1127 : struct bdi_writeback *wb)
1128 : {
1129 : assert_spin_locked(&wb->list_lock);
1130 : assert_spin_locked(&inode->i_lock);
1131 :
1132 0 : inode->i_state &= ~I_SYNC_QUEUED;
1133 0 : list_del_init(&inode->i_io_list);
1134 0 : wb_io_lists_depopulated(wb);
1135 : }
1136 :
1137 : static struct bdi_writeback *
1138 : locked_inode_to_wb_and_lock_list(struct inode *inode)
1139 : __releases(&inode->i_lock)
1140 : __acquires(&wb->list_lock)
1141 : {
1142 0 : struct bdi_writeback *wb = inode_to_wb(inode);
1143 :
1144 0 : spin_unlock(&inode->i_lock);
1145 0 : spin_lock(&wb->list_lock);
1146 : return wb;
1147 : }
1148 :
1149 : static struct bdi_writeback *inode_to_wb_and_lock_list(struct inode *inode)
1150 : __acquires(&wb->list_lock)
1151 : {
1152 0 : struct bdi_writeback *wb = inode_to_wb(inode);
1153 :
1154 0 : spin_lock(&wb->list_lock);
1155 : return wb;
1156 : }
1157 :
1158 : static long wb_split_bdi_pages(struct bdi_writeback *wb, long nr_pages)
1159 : {
1160 : return nr_pages;
1161 : }
1162 :
1163 : static void bdi_split_work_to_wbs(struct backing_dev_info *bdi,
1164 : struct wb_writeback_work *base_work,
1165 : bool skip_if_busy)
1166 : {
1167 : might_sleep();
1168 :
1169 0 : if (!skip_if_busy || !writeback_in_progress(&bdi->wb)) {
1170 0 : base_work->auto_free = 0;
1171 0 : wb_queue_work(&bdi->wb, base_work);
1172 : }
1173 : }
1174 :
1175 : #endif /* CONFIG_CGROUP_WRITEBACK */
1176 :
1177 : /*
1178 : * Add in the number of potentially dirty inodes, because each inode
1179 : * write can dirty pagecache in the underlying blockdev.
1180 : */
1181 : static unsigned long get_nr_dirty_pages(void)
1182 : {
1183 0 : return global_node_page_state(NR_FILE_DIRTY) +
1184 0 : get_nr_dirty_inodes();
1185 : }
1186 :
1187 0 : static void wb_start_writeback(struct bdi_writeback *wb, enum wb_reason reason)
1188 : {
1189 0 : if (!wb_has_dirty_io(wb))
1190 : return;
1191 :
1192 : /*
1193 : * All callers of this function want to start writeback of all
1194 : * dirty pages. Places like vmscan can call this at a very
1195 : * high frequency, causing pointless allocations of tons of
1196 : * work items and keeping the flusher threads busy retrieving
1197 : * that work. Ensure that we only allow one of them pending and
1198 : * inflight at the time.
1199 : */
1200 0 : if (test_bit(WB_start_all, &wb->state) ||
1201 0 : test_and_set_bit(WB_start_all, &wb->state))
1202 : return;
1203 :
1204 0 : wb->start_all_reason = reason;
1205 0 : wb_wakeup(wb);
1206 : }
1207 :
1208 : /**
1209 : * wb_start_background_writeback - start background writeback
1210 : * @wb: bdi_writback to write from
1211 : *
1212 : * Description:
1213 : * This makes sure WB_SYNC_NONE background writeback happens. When
1214 : * this function returns, it is only guaranteed that for given wb
1215 : * some IO is happening if we are over background dirty threshold.
1216 : * Caller need not hold sb s_umount semaphore.
1217 : */
1218 0 : void wb_start_background_writeback(struct bdi_writeback *wb)
1219 : {
1220 : /*
1221 : * We just wake up the flusher thread. It will perform background
1222 : * writeback as soon as there is no other work to do.
1223 : */
1224 0 : trace_writeback_wake_background(wb);
1225 0 : wb_wakeup(wb);
1226 0 : }
1227 :
1228 : /*
1229 : * Remove the inode from the writeback list it is on.
1230 : */
1231 0 : void inode_io_list_del(struct inode *inode)
1232 : {
1233 : struct bdi_writeback *wb;
1234 :
1235 0 : wb = inode_to_wb_and_lock_list(inode);
1236 0 : spin_lock(&inode->i_lock);
1237 :
1238 0 : inode->i_state &= ~I_SYNC_QUEUED;
1239 0 : list_del_init(&inode->i_io_list);
1240 0 : wb_io_lists_depopulated(wb);
1241 :
1242 0 : spin_unlock(&inode->i_lock);
1243 0 : spin_unlock(&wb->list_lock);
1244 0 : }
1245 : EXPORT_SYMBOL(inode_io_list_del);
1246 :
1247 : /*
1248 : * mark an inode as under writeback on the sb
1249 : */
1250 0 : void sb_mark_inode_writeback(struct inode *inode)
1251 : {
1252 0 : struct super_block *sb = inode->i_sb;
1253 : unsigned long flags;
1254 :
1255 0 : if (list_empty(&inode->i_wb_list)) {
1256 0 : spin_lock_irqsave(&sb->s_inode_wblist_lock, flags);
1257 0 : if (list_empty(&inode->i_wb_list)) {
1258 0 : list_add_tail(&inode->i_wb_list, &sb->s_inodes_wb);
1259 : trace_sb_mark_inode_writeback(inode);
1260 : }
1261 0 : spin_unlock_irqrestore(&sb->s_inode_wblist_lock, flags);
1262 : }
1263 0 : }
1264 :
1265 : /*
1266 : * clear an inode as under writeback on the sb
1267 : */
1268 0 : void sb_clear_inode_writeback(struct inode *inode)
1269 : {
1270 0 : struct super_block *sb = inode->i_sb;
1271 : unsigned long flags;
1272 :
1273 0 : if (!list_empty(&inode->i_wb_list)) {
1274 0 : spin_lock_irqsave(&sb->s_inode_wblist_lock, flags);
1275 0 : if (!list_empty(&inode->i_wb_list)) {
1276 0 : list_del_init(&inode->i_wb_list);
1277 : trace_sb_clear_inode_writeback(inode);
1278 : }
1279 0 : spin_unlock_irqrestore(&sb->s_inode_wblist_lock, flags);
1280 : }
1281 0 : }
1282 :
1283 : /*
1284 : * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
1285 : * furthest end of its superblock's dirty-inode list.
1286 : *
1287 : * Before stamping the inode's ->dirtied_when, we check to see whether it is
1288 : * already the most-recently-dirtied inode on the b_dirty list. If that is
1289 : * the case then the inode must have been redirtied while it was being written
1290 : * out and we don't reset its dirtied_when.
1291 : */
1292 0 : static void redirty_tail_locked(struct inode *inode, struct bdi_writeback *wb)
1293 : {
1294 : assert_spin_locked(&inode->i_lock);
1295 :
1296 0 : if (!list_empty(&wb->b_dirty)) {
1297 : struct inode *tail;
1298 :
1299 0 : tail = wb_inode(wb->b_dirty.next);
1300 0 : if (time_before(inode->dirtied_when, tail->dirtied_when))
1301 0 : inode->dirtied_when = jiffies;
1302 : }
1303 0 : inode_io_list_move_locked(inode, wb, &wb->b_dirty);
1304 0 : inode->i_state &= ~I_SYNC_QUEUED;
1305 0 : }
1306 :
1307 : static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
1308 : {
1309 0 : spin_lock(&inode->i_lock);
1310 0 : redirty_tail_locked(inode, wb);
1311 0 : spin_unlock(&inode->i_lock);
1312 : }
1313 :
1314 : /*
1315 : * requeue inode for re-scanning after bdi->b_io list is exhausted.
1316 : */
1317 : static void requeue_io(struct inode *inode, struct bdi_writeback *wb)
1318 : {
1319 0 : inode_io_list_move_locked(inode, wb, &wb->b_more_io);
1320 : }
1321 :
1322 0 : static void inode_sync_complete(struct inode *inode)
1323 : {
1324 0 : inode->i_state &= ~I_SYNC;
1325 : /* If inode is clean an unused, put it into LRU now... */
1326 0 : inode_add_lru(inode);
1327 : /* Waiters must see I_SYNC cleared before being woken up */
1328 0 : smp_mb();
1329 0 : wake_up_bit(&inode->i_state, __I_SYNC);
1330 0 : }
1331 :
1332 : static bool inode_dirtied_after(struct inode *inode, unsigned long t)
1333 : {
1334 0 : bool ret = time_after(inode->dirtied_when, t);
1335 : #ifndef CONFIG_64BIT
1336 : /*
1337 : * For inodes being constantly redirtied, dirtied_when can get stuck.
1338 : * It _appears_ to be in the future, but is actually in distant past.
1339 : * This test is necessary to prevent such wrapped-around relative times
1340 : * from permanently stopping the whole bdi writeback.
1341 : */
1342 : ret = ret && time_before_eq(inode->dirtied_when, jiffies);
1343 : #endif
1344 : return ret;
1345 : }
1346 :
1347 : #define EXPIRE_DIRTY_ATIME 0x0001
1348 :
1349 : /*
1350 : * Move expired (dirtied before dirtied_before) dirty inodes from
1351 : * @delaying_queue to @dispatch_queue.
1352 : */
1353 0 : static int move_expired_inodes(struct list_head *delaying_queue,
1354 : struct list_head *dispatch_queue,
1355 : unsigned long dirtied_before)
1356 : {
1357 0 : LIST_HEAD(tmp);
1358 : struct list_head *pos, *node;
1359 0 : struct super_block *sb = NULL;
1360 : struct inode *inode;
1361 0 : int do_sb_sort = 0;
1362 0 : int moved = 0;
1363 :
1364 0 : while (!list_empty(delaying_queue)) {
1365 0 : inode = wb_inode(delaying_queue->prev);
1366 0 : if (inode_dirtied_after(inode, dirtied_before))
1367 : break;
1368 0 : list_move(&inode->i_io_list, &tmp);
1369 0 : moved++;
1370 0 : spin_lock(&inode->i_lock);
1371 0 : inode->i_state |= I_SYNC_QUEUED;
1372 0 : spin_unlock(&inode->i_lock);
1373 0 : if (sb_is_blkdev_sb(inode->i_sb))
1374 0 : continue;
1375 0 : if (sb && sb != inode->i_sb)
1376 0 : do_sb_sort = 1;
1377 : sb = inode->i_sb;
1378 : }
1379 :
1380 : /* just one sb in list, splice to dispatch_queue and we're done */
1381 0 : if (!do_sb_sort) {
1382 : list_splice(&tmp, dispatch_queue);
1383 : goto out;
1384 : }
1385 :
1386 : /* Move inodes from one superblock together */
1387 0 : while (!list_empty(&tmp)) {
1388 0 : sb = wb_inode(tmp.prev)->i_sb;
1389 0 : list_for_each_prev_safe(pos, node, &tmp) {
1390 0 : inode = wb_inode(pos);
1391 0 : if (inode->i_sb == sb)
1392 0 : list_move(&inode->i_io_list, dispatch_queue);
1393 : }
1394 : }
1395 : out:
1396 0 : return moved;
1397 : }
1398 :
1399 : /*
1400 : * Queue all expired dirty inodes for io, eldest first.
1401 : * Before
1402 : * newly dirtied b_dirty b_io b_more_io
1403 : * =============> gf edc BA
1404 : * After
1405 : * newly dirtied b_dirty b_io b_more_io
1406 : * =============> g fBAedc
1407 : * |
1408 : * +--> dequeue for IO
1409 : */
1410 0 : static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work,
1411 : unsigned long dirtied_before)
1412 : {
1413 : int moved;
1414 0 : unsigned long time_expire_jif = dirtied_before;
1415 :
1416 : assert_spin_locked(&wb->list_lock);
1417 0 : list_splice_init(&wb->b_more_io, &wb->b_io);
1418 0 : moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, dirtied_before);
1419 0 : if (!work->for_sync)
1420 0 : time_expire_jif = jiffies - dirtytime_expire_interval * HZ;
1421 0 : moved += move_expired_inodes(&wb->b_dirty_time, &wb->b_io,
1422 : time_expire_jif);
1423 0 : if (moved)
1424 0 : wb_io_lists_populated(wb);
1425 0 : trace_writeback_queue_io(wb, work, dirtied_before, moved);
1426 0 : }
1427 :
1428 0 : static int write_inode(struct inode *inode, struct writeback_control *wbc)
1429 : {
1430 : int ret;
1431 :
1432 0 : if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) {
1433 0 : trace_writeback_write_inode_start(inode, wbc);
1434 0 : ret = inode->i_sb->s_op->write_inode(inode, wbc);
1435 0 : trace_writeback_write_inode(inode, wbc);
1436 0 : return ret;
1437 : }
1438 : return 0;
1439 : }
1440 :
1441 : /*
1442 : * Wait for writeback on an inode to complete. Called with i_lock held.
1443 : * Caller must make sure inode cannot go away when we drop i_lock.
1444 : */
1445 0 : static void __inode_wait_for_writeback(struct inode *inode)
1446 : __releases(inode->i_lock)
1447 : __acquires(inode->i_lock)
1448 : {
1449 0 : DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
1450 : wait_queue_head_t *wqh;
1451 :
1452 0 : wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
1453 0 : while (inode->i_state & I_SYNC) {
1454 0 : spin_unlock(&inode->i_lock);
1455 0 : __wait_on_bit(wqh, &wq, bit_wait,
1456 : TASK_UNINTERRUPTIBLE);
1457 0 : spin_lock(&inode->i_lock);
1458 : }
1459 0 : }
1460 :
1461 : /*
1462 : * Wait for writeback on an inode to complete. Caller must have inode pinned.
1463 : */
1464 0 : void inode_wait_for_writeback(struct inode *inode)
1465 : {
1466 0 : spin_lock(&inode->i_lock);
1467 0 : __inode_wait_for_writeback(inode);
1468 0 : spin_unlock(&inode->i_lock);
1469 0 : }
1470 :
1471 : /*
1472 : * Sleep until I_SYNC is cleared. This function must be called with i_lock
1473 : * held and drops it. It is aimed for callers not holding any inode reference
1474 : * so once i_lock is dropped, inode can go away.
1475 : */
1476 0 : static void inode_sleep_on_writeback(struct inode *inode)
1477 : __releases(inode->i_lock)
1478 : {
1479 0 : DEFINE_WAIT(wait);
1480 0 : wait_queue_head_t *wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
1481 : int sleep;
1482 :
1483 0 : prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
1484 0 : sleep = inode->i_state & I_SYNC;
1485 0 : spin_unlock(&inode->i_lock);
1486 0 : if (sleep)
1487 0 : schedule();
1488 0 : finish_wait(wqh, &wait);
1489 0 : }
1490 :
1491 : /*
1492 : * Find proper writeback list for the inode depending on its current state and
1493 : * possibly also change of its state while we were doing writeback. Here we
1494 : * handle things such as livelock prevention or fairness of writeback among
1495 : * inodes. This function can be called only by flusher thread - noone else
1496 : * processes all inodes in writeback lists and requeueing inodes behind flusher
1497 : * thread's back can have unexpected consequences.
1498 : */
1499 0 : static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
1500 : struct writeback_control *wbc)
1501 : {
1502 0 : if (inode->i_state & I_FREEING)
1503 : return;
1504 :
1505 : /*
1506 : * Sync livelock prevention. Each inode is tagged and synced in one
1507 : * shot. If still dirty, it will be redirty_tail()'ed below. Update
1508 : * the dirty time to prevent enqueue and sync it again.
1509 : */
1510 0 : if ((inode->i_state & I_DIRTY) &&
1511 0 : (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages))
1512 0 : inode->dirtied_when = jiffies;
1513 :
1514 0 : if (wbc->pages_skipped) {
1515 : /*
1516 : * writeback is not making progress due to locked
1517 : * buffers. Skip this inode for now.
1518 : */
1519 0 : redirty_tail_locked(inode, wb);
1520 0 : return;
1521 : }
1522 :
1523 0 : if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
1524 : /*
1525 : * We didn't write back all the pages. nfs_writepages()
1526 : * sometimes bales out without doing anything.
1527 : */
1528 0 : if (wbc->nr_to_write <= 0) {
1529 : /* Slice used up. Queue for next turn. */
1530 : requeue_io(inode, wb);
1531 : } else {
1532 : /*
1533 : * Writeback blocked by something other than
1534 : * congestion. Delay the inode for some time to
1535 : * avoid spinning on the CPU (100% iowait)
1536 : * retrying writeback of the dirty page/inode
1537 : * that cannot be performed immediately.
1538 : */
1539 0 : redirty_tail_locked(inode, wb);
1540 : }
1541 0 : } else if (inode->i_state & I_DIRTY) {
1542 : /*
1543 : * Filesystems can dirty the inode during writeback operations,
1544 : * such as delayed allocation during submission or metadata
1545 : * updates after data IO completion.
1546 : */
1547 0 : redirty_tail_locked(inode, wb);
1548 0 : } else if (inode->i_state & I_DIRTY_TIME) {
1549 0 : inode->dirtied_when = jiffies;
1550 0 : inode_io_list_move_locked(inode, wb, &wb->b_dirty_time);
1551 0 : inode->i_state &= ~I_SYNC_QUEUED;
1552 : } else {
1553 : /* The inode is clean. Remove from writeback lists. */
1554 : inode_cgwb_move_to_attached(inode, wb);
1555 : }
1556 : }
1557 :
1558 : /*
1559 : * Write out an inode and its dirty pages (or some of its dirty pages, depending
1560 : * on @wbc->nr_to_write), and clear the relevant dirty flags from i_state.
1561 : *
1562 : * This doesn't remove the inode from the writeback list it is on, except
1563 : * potentially to move it from b_dirty_time to b_dirty due to timestamp
1564 : * expiration. The caller is otherwise responsible for writeback list handling.
1565 : *
1566 : * The caller is also responsible for setting the I_SYNC flag beforehand and
1567 : * calling inode_sync_complete() to clear it afterwards.
1568 : */
1569 : static int
1570 0 : __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
1571 : {
1572 0 : struct address_space *mapping = inode->i_mapping;
1573 0 : long nr_to_write = wbc->nr_to_write;
1574 : unsigned dirty;
1575 : int ret;
1576 :
1577 0 : WARN_ON(!(inode->i_state & I_SYNC));
1578 :
1579 0 : trace_writeback_single_inode_start(inode, wbc, nr_to_write);
1580 :
1581 0 : ret = do_writepages(mapping, wbc);
1582 :
1583 : /*
1584 : * Make sure to wait on the data before writing out the metadata.
1585 : * This is important for filesystems that modify metadata on data
1586 : * I/O completion. We don't do it for sync(2) writeback because it has a
1587 : * separate, external IO completion path and ->sync_fs for guaranteeing
1588 : * inode metadata is written back correctly.
1589 : */
1590 0 : if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync) {
1591 0 : int err = filemap_fdatawait(mapping);
1592 0 : if (ret == 0)
1593 0 : ret = err;
1594 : }
1595 :
1596 : /*
1597 : * If the inode has dirty timestamps and we need to write them, call
1598 : * mark_inode_dirty_sync() to notify the filesystem about it and to
1599 : * change I_DIRTY_TIME into I_DIRTY_SYNC.
1600 : */
1601 0 : if ((inode->i_state & I_DIRTY_TIME) &&
1602 0 : (wbc->sync_mode == WB_SYNC_ALL ||
1603 0 : time_after(jiffies, inode->dirtied_time_when +
1604 : dirtytime_expire_interval * HZ))) {
1605 0 : trace_writeback_lazytime(inode);
1606 : mark_inode_dirty_sync(inode);
1607 : }
1608 :
1609 : /*
1610 : * Get and clear the dirty flags from i_state. This needs to be done
1611 : * after calling writepages because some filesystems may redirty the
1612 : * inode during writepages due to delalloc. It also needs to be done
1613 : * after handling timestamp expiration, as that may dirty the inode too.
1614 : */
1615 0 : spin_lock(&inode->i_lock);
1616 0 : dirty = inode->i_state & I_DIRTY;
1617 0 : inode->i_state &= ~dirty;
1618 :
1619 : /*
1620 : * Paired with smp_mb() in __mark_inode_dirty(). This allows
1621 : * __mark_inode_dirty() to test i_state without grabbing i_lock -
1622 : * either they see the I_DIRTY bits cleared or we see the dirtied
1623 : * inode.
1624 : *
1625 : * I_DIRTY_PAGES is always cleared together above even if @mapping
1626 : * still has dirty pages. The flag is reinstated after smp_mb() if
1627 : * necessary. This guarantees that either __mark_inode_dirty()
1628 : * sees clear I_DIRTY_PAGES or we see PAGECACHE_TAG_DIRTY.
1629 : */
1630 0 : smp_mb();
1631 :
1632 0 : if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
1633 0 : inode->i_state |= I_DIRTY_PAGES;
1634 0 : else if (unlikely(inode->i_state & I_PINNING_FSCACHE_WB)) {
1635 0 : if (!(inode->i_state & I_DIRTY_PAGES)) {
1636 0 : inode->i_state &= ~I_PINNING_FSCACHE_WB;
1637 0 : wbc->unpinned_fscache_wb = true;
1638 0 : dirty |= I_PINNING_FSCACHE_WB; /* Cause write_inode */
1639 : }
1640 : }
1641 :
1642 0 : spin_unlock(&inode->i_lock);
1643 :
1644 : /* Don't write the inode if only I_DIRTY_PAGES was set */
1645 0 : if (dirty & ~I_DIRTY_PAGES) {
1646 0 : int err = write_inode(inode, wbc);
1647 0 : if (ret == 0)
1648 0 : ret = err;
1649 : }
1650 0 : wbc->unpinned_fscache_wb = false;
1651 0 : trace_writeback_single_inode(inode, wbc, nr_to_write);
1652 0 : return ret;
1653 : }
1654 :
1655 : /*
1656 : * Write out an inode's dirty data and metadata on-demand, i.e. separately from
1657 : * the regular batched writeback done by the flusher threads in
1658 : * writeback_sb_inodes(). @wbc controls various aspects of the write, such as
1659 : * whether it is a data-integrity sync (%WB_SYNC_ALL) or not (%WB_SYNC_NONE).
1660 : *
1661 : * To prevent the inode from going away, either the caller must have a reference
1662 : * to the inode, or the inode must have I_WILL_FREE or I_FREEING set.
1663 : */
1664 0 : static int writeback_single_inode(struct inode *inode,
1665 : struct writeback_control *wbc)
1666 : {
1667 : struct bdi_writeback *wb;
1668 0 : int ret = 0;
1669 :
1670 0 : spin_lock(&inode->i_lock);
1671 0 : if (!atomic_read(&inode->i_count))
1672 0 : WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
1673 : else
1674 0 : WARN_ON(inode->i_state & I_WILL_FREE);
1675 :
1676 0 : if (inode->i_state & I_SYNC) {
1677 : /*
1678 : * Writeback is already running on the inode. For WB_SYNC_NONE,
1679 : * that's enough and we can just return. For WB_SYNC_ALL, we
1680 : * must wait for the existing writeback to complete, then do
1681 : * writeback again if there's anything left.
1682 : */
1683 0 : if (wbc->sync_mode != WB_SYNC_ALL)
1684 : goto out;
1685 0 : __inode_wait_for_writeback(inode);
1686 : }
1687 0 : WARN_ON(inode->i_state & I_SYNC);
1688 : /*
1689 : * If the inode is already fully clean, then there's nothing to do.
1690 : *
1691 : * For data-integrity syncs we also need to check whether any pages are
1692 : * still under writeback, e.g. due to prior WB_SYNC_NONE writeback. If
1693 : * there are any such pages, we'll need to wait for them.
1694 : */
1695 0 : if (!(inode->i_state & I_DIRTY_ALL) &&
1696 0 : (wbc->sync_mode != WB_SYNC_ALL ||
1697 0 : !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK)))
1698 : goto out;
1699 0 : inode->i_state |= I_SYNC;
1700 0 : wbc_attach_and_unlock_inode(wbc, inode);
1701 :
1702 0 : ret = __writeback_single_inode(inode, wbc);
1703 :
1704 0 : wbc_detach_inode(wbc);
1705 :
1706 0 : wb = inode_to_wb_and_lock_list(inode);
1707 0 : spin_lock(&inode->i_lock);
1708 : /*
1709 : * If the inode is now fully clean, then it can be safely removed from
1710 : * its writeback list (if any). Otherwise the flusher threads are
1711 : * responsible for the writeback lists.
1712 : */
1713 0 : if (!(inode->i_state & I_DIRTY_ALL))
1714 : inode_cgwb_move_to_attached(inode, wb);
1715 0 : else if (!(inode->i_state & I_SYNC_QUEUED) &&
1716 0 : (inode->i_state & I_DIRTY))
1717 0 : redirty_tail_locked(inode, wb);
1718 :
1719 0 : spin_unlock(&wb->list_lock);
1720 0 : inode_sync_complete(inode);
1721 : out:
1722 0 : spin_unlock(&inode->i_lock);
1723 0 : return ret;
1724 : }
1725 :
1726 : static long writeback_chunk_size(struct bdi_writeback *wb,
1727 : struct wb_writeback_work *work)
1728 : {
1729 : long pages;
1730 :
1731 : /*
1732 : * WB_SYNC_ALL mode does livelock avoidance by syncing dirty
1733 : * inodes/pages in one big loop. Setting wbc.nr_to_write=LONG_MAX
1734 : * here avoids calling into writeback_inodes_wb() more than once.
1735 : *
1736 : * The intended call sequence for WB_SYNC_ALL writeback is:
1737 : *
1738 : * wb_writeback()
1739 : * writeback_sb_inodes() <== called only once
1740 : * write_cache_pages() <== called once for each inode
1741 : * (quickly) tag currently dirty pages
1742 : * (maybe slowly) sync all tagged pages
1743 : */
1744 0 : if (work->sync_mode == WB_SYNC_ALL || work->tagged_writepages)
1745 : pages = LONG_MAX;
1746 : else {
1747 0 : pages = min(wb->avg_write_bandwidth / 2,
1748 : global_wb_domain.dirty_limit / DIRTY_SCOPE);
1749 0 : pages = min(pages, work->nr_pages);
1750 0 : pages = round_down(pages + MIN_WRITEBACK_PAGES,
1751 : MIN_WRITEBACK_PAGES);
1752 : }
1753 :
1754 : return pages;
1755 : }
1756 :
1757 : /*
1758 : * Write a portion of b_io inodes which belong to @sb.
1759 : *
1760 : * Return the number of pages and/or inodes written.
1761 : *
1762 : * NOTE! This is called with wb->list_lock held, and will
1763 : * unlock and relock that for each inode it ends up doing
1764 : * IO for.
1765 : */
1766 0 : static long writeback_sb_inodes(struct super_block *sb,
1767 : struct bdi_writeback *wb,
1768 : struct wb_writeback_work *work)
1769 : {
1770 0 : struct writeback_control wbc = {
1771 0 : .sync_mode = work->sync_mode,
1772 0 : .tagged_writepages = work->tagged_writepages,
1773 0 : .for_kupdate = work->for_kupdate,
1774 0 : .for_background = work->for_background,
1775 0 : .for_sync = work->for_sync,
1776 0 : .range_cyclic = work->range_cyclic,
1777 : .range_start = 0,
1778 : .range_end = LLONG_MAX,
1779 : };
1780 0 : unsigned long start_time = jiffies;
1781 : long write_chunk;
1782 0 : long wrote = 0; /* count both pages and inodes */
1783 :
1784 0 : while (!list_empty(&wb->b_io)) {
1785 0 : struct inode *inode = wb_inode(wb->b_io.prev);
1786 : struct bdi_writeback *tmp_wb;
1787 :
1788 0 : if (inode->i_sb != sb) {
1789 0 : if (work->sb) {
1790 : /*
1791 : * We only want to write back data for this
1792 : * superblock, move all inodes not belonging
1793 : * to it back onto the dirty list.
1794 : */
1795 0 : redirty_tail(inode, wb);
1796 0 : continue;
1797 : }
1798 :
1799 : /*
1800 : * The inode belongs to a different superblock.
1801 : * Bounce back to the caller to unpin this and
1802 : * pin the next superblock.
1803 : */
1804 : break;
1805 : }
1806 :
1807 : /*
1808 : * Don't bother with new inodes or inodes being freed, first
1809 : * kind does not need periodic writeout yet, and for the latter
1810 : * kind writeout is handled by the freer.
1811 : */
1812 0 : spin_lock(&inode->i_lock);
1813 0 : if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
1814 0 : redirty_tail_locked(inode, wb);
1815 0 : spin_unlock(&inode->i_lock);
1816 0 : continue;
1817 : }
1818 0 : if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) {
1819 : /*
1820 : * If this inode is locked for writeback and we are not
1821 : * doing writeback-for-data-integrity, move it to
1822 : * b_more_io so that writeback can proceed with the
1823 : * other inodes on s_io.
1824 : *
1825 : * We'll have another go at writing back this inode
1826 : * when we completed a full scan of b_io.
1827 : */
1828 0 : spin_unlock(&inode->i_lock);
1829 0 : requeue_io(inode, wb);
1830 : trace_writeback_sb_inodes_requeue(inode);
1831 0 : continue;
1832 : }
1833 0 : spin_unlock(&wb->list_lock);
1834 :
1835 : /*
1836 : * We already requeued the inode if it had I_SYNC set and we
1837 : * are doing WB_SYNC_NONE writeback. So this catches only the
1838 : * WB_SYNC_ALL case.
1839 : */
1840 0 : if (inode->i_state & I_SYNC) {
1841 : /* Wait for I_SYNC. This function drops i_lock... */
1842 0 : inode_sleep_on_writeback(inode);
1843 : /* Inode may be gone, start again */
1844 0 : spin_lock(&wb->list_lock);
1845 0 : continue;
1846 : }
1847 0 : inode->i_state |= I_SYNC;
1848 0 : wbc_attach_and_unlock_inode(&wbc, inode);
1849 :
1850 0 : write_chunk = writeback_chunk_size(wb, work);
1851 0 : wbc.nr_to_write = write_chunk;
1852 0 : wbc.pages_skipped = 0;
1853 :
1854 : /*
1855 : * We use I_SYNC to pin the inode in memory. While it is set
1856 : * evict_inode() will wait so the inode cannot be freed.
1857 : */
1858 0 : __writeback_single_inode(inode, &wbc);
1859 :
1860 0 : wbc_detach_inode(&wbc);
1861 0 : work->nr_pages -= write_chunk - wbc.nr_to_write;
1862 0 : wrote += write_chunk - wbc.nr_to_write;
1863 :
1864 0 : if (need_resched()) {
1865 : /*
1866 : * We're trying to balance between building up a nice
1867 : * long list of IOs to improve our merge rate, and
1868 : * getting those IOs out quickly for anyone throttling
1869 : * in balance_dirty_pages(). cond_resched() doesn't
1870 : * unplug, so get our IOs out the door before we
1871 : * give up the CPU.
1872 : */
1873 0 : blk_flush_plug(current->plug, false);
1874 0 : cond_resched();
1875 : }
1876 :
1877 : /*
1878 : * Requeue @inode if still dirty. Be careful as @inode may
1879 : * have been switched to another wb in the meantime.
1880 : */
1881 0 : tmp_wb = inode_to_wb_and_lock_list(inode);
1882 0 : spin_lock(&inode->i_lock);
1883 0 : if (!(inode->i_state & I_DIRTY_ALL))
1884 0 : wrote++;
1885 0 : requeue_inode(inode, tmp_wb, &wbc);
1886 0 : inode_sync_complete(inode);
1887 0 : spin_unlock(&inode->i_lock);
1888 :
1889 0 : if (unlikely(tmp_wb != wb)) {
1890 0 : spin_unlock(&tmp_wb->list_lock);
1891 0 : spin_lock(&wb->list_lock);
1892 : }
1893 :
1894 : /*
1895 : * bail out to wb_writeback() often enough to check
1896 : * background threshold and other termination conditions.
1897 : */
1898 0 : if (wrote) {
1899 0 : if (time_is_before_jiffies(start_time + HZ / 10UL))
1900 : break;
1901 0 : if (work->nr_pages <= 0)
1902 : break;
1903 : }
1904 : }
1905 0 : return wrote;
1906 : }
1907 :
1908 0 : static long __writeback_inodes_wb(struct bdi_writeback *wb,
1909 : struct wb_writeback_work *work)
1910 : {
1911 0 : unsigned long start_time = jiffies;
1912 0 : long wrote = 0;
1913 :
1914 0 : while (!list_empty(&wb->b_io)) {
1915 0 : struct inode *inode = wb_inode(wb->b_io.prev);
1916 0 : struct super_block *sb = inode->i_sb;
1917 :
1918 0 : if (!trylock_super(sb)) {
1919 : /*
1920 : * trylock_super() may fail consistently due to
1921 : * s_umount being grabbed by someone else. Don't use
1922 : * requeue_io() to avoid busy retrying the inode/sb.
1923 : */
1924 0 : redirty_tail(inode, wb);
1925 0 : continue;
1926 : }
1927 0 : wrote += writeback_sb_inodes(sb, wb, work);
1928 0 : up_read(&sb->s_umount);
1929 :
1930 : /* refer to the same tests at the end of writeback_sb_inodes */
1931 0 : if (wrote) {
1932 0 : if (time_is_before_jiffies(start_time + HZ / 10UL))
1933 : break;
1934 0 : if (work->nr_pages <= 0)
1935 : break;
1936 : }
1937 : }
1938 : /* Leave any unwritten inodes on b_io */
1939 0 : return wrote;
1940 : }
1941 :
1942 0 : static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
1943 : enum wb_reason reason)
1944 : {
1945 0 : struct wb_writeback_work work = {
1946 : .nr_pages = nr_pages,
1947 : .sync_mode = WB_SYNC_NONE,
1948 : .range_cyclic = 1,
1949 : .reason = reason,
1950 : };
1951 : struct blk_plug plug;
1952 :
1953 0 : blk_start_plug(&plug);
1954 0 : spin_lock(&wb->list_lock);
1955 0 : if (list_empty(&wb->b_io))
1956 0 : queue_io(wb, &work, jiffies);
1957 0 : __writeback_inodes_wb(wb, &work);
1958 0 : spin_unlock(&wb->list_lock);
1959 0 : blk_finish_plug(&plug);
1960 :
1961 0 : return nr_pages - work.nr_pages;
1962 : }
1963 :
1964 : /*
1965 : * Explicit flushing or periodic writeback of "old" data.
1966 : *
1967 : * Define "old": the first time one of an inode's pages is dirtied, we mark the
1968 : * dirtying-time in the inode's address_space. So this periodic writeback code
1969 : * just walks the superblock inode list, writing back any inodes which are
1970 : * older than a specific point in time.
1971 : *
1972 : * Try to run once per dirty_writeback_interval. But if a writeback event
1973 : * takes longer than a dirty_writeback_interval interval, then leave a
1974 : * one-second gap.
1975 : *
1976 : * dirtied_before takes precedence over nr_to_write. So we'll only write back
1977 : * all dirty pages if they are all attached to "old" mappings.
1978 : */
1979 0 : static long wb_writeback(struct bdi_writeback *wb,
1980 : struct wb_writeback_work *work)
1981 : {
1982 0 : long nr_pages = work->nr_pages;
1983 0 : unsigned long dirtied_before = jiffies;
1984 : struct inode *inode;
1985 : long progress;
1986 : struct blk_plug plug;
1987 :
1988 0 : blk_start_plug(&plug);
1989 0 : spin_lock(&wb->list_lock);
1990 : for (;;) {
1991 : /*
1992 : * Stop writeback when nr_pages has been consumed
1993 : */
1994 0 : if (work->nr_pages <= 0)
1995 : break;
1996 :
1997 : /*
1998 : * Background writeout and kupdate-style writeback may
1999 : * run forever. Stop them if there is other work to do
2000 : * so that e.g. sync can proceed. They'll be restarted
2001 : * after the other works are all done.
2002 : */
2003 0 : if ((work->for_background || work->for_kupdate) &&
2004 0 : !list_empty(&wb->work_list))
2005 : break;
2006 :
2007 : /*
2008 : * For background writeout, stop when we are below the
2009 : * background dirty threshold
2010 : */
2011 0 : if (work->for_background && !wb_over_bg_thresh(wb))
2012 : break;
2013 :
2014 : /*
2015 : * Kupdate and background works are special and we want to
2016 : * include all inodes that need writing. Livelock avoidance is
2017 : * handled by these works yielding to any other work so we are
2018 : * safe.
2019 : */
2020 0 : if (work->for_kupdate) {
2021 0 : dirtied_before = jiffies -
2022 0 : msecs_to_jiffies(dirty_expire_interval * 10);
2023 0 : } else if (work->for_background)
2024 0 : dirtied_before = jiffies;
2025 :
2026 0 : trace_writeback_start(wb, work);
2027 0 : if (list_empty(&wb->b_io))
2028 0 : queue_io(wb, work, dirtied_before);
2029 0 : if (work->sb)
2030 0 : progress = writeback_sb_inodes(work->sb, wb, work);
2031 : else
2032 0 : progress = __writeback_inodes_wb(wb, work);
2033 0 : trace_writeback_written(wb, work);
2034 :
2035 : /*
2036 : * Did we write something? Try for more
2037 : *
2038 : * Dirty inodes are moved to b_io for writeback in batches.
2039 : * The completion of the current batch does not necessarily
2040 : * mean the overall work is done. So we keep looping as long
2041 : * as made some progress on cleaning pages or inodes.
2042 : */
2043 0 : if (progress)
2044 0 : continue;
2045 : /*
2046 : * No more inodes for IO, bail
2047 : */
2048 0 : if (list_empty(&wb->b_more_io))
2049 : break;
2050 : /*
2051 : * Nothing written. Wait for some inode to
2052 : * become available for writeback. Otherwise
2053 : * we'll just busyloop.
2054 : */
2055 0 : trace_writeback_wait(wb, work);
2056 0 : inode = wb_inode(wb->b_more_io.prev);
2057 0 : spin_lock(&inode->i_lock);
2058 0 : spin_unlock(&wb->list_lock);
2059 : /* This function drops i_lock... */
2060 0 : inode_sleep_on_writeback(inode);
2061 0 : spin_lock(&wb->list_lock);
2062 : }
2063 0 : spin_unlock(&wb->list_lock);
2064 0 : blk_finish_plug(&plug);
2065 :
2066 0 : return nr_pages - work->nr_pages;
2067 : }
2068 :
2069 : /*
2070 : * Return the next wb_writeback_work struct that hasn't been processed yet.
2071 : */
2072 0 : static struct wb_writeback_work *get_next_work_item(struct bdi_writeback *wb)
2073 : {
2074 0 : struct wb_writeback_work *work = NULL;
2075 :
2076 0 : spin_lock_bh(&wb->work_lock);
2077 0 : if (!list_empty(&wb->work_list)) {
2078 0 : work = list_entry(wb->work_list.next,
2079 : struct wb_writeback_work, list);
2080 0 : list_del_init(&work->list);
2081 : }
2082 0 : spin_unlock_bh(&wb->work_lock);
2083 0 : return work;
2084 : }
2085 :
2086 0 : static long wb_check_background_flush(struct bdi_writeback *wb)
2087 : {
2088 0 : if (wb_over_bg_thresh(wb)) {
2089 :
2090 0 : struct wb_writeback_work work = {
2091 : .nr_pages = LONG_MAX,
2092 : .sync_mode = WB_SYNC_NONE,
2093 : .for_background = 1,
2094 : .range_cyclic = 1,
2095 : .reason = WB_REASON_BACKGROUND,
2096 : };
2097 :
2098 0 : return wb_writeback(wb, &work);
2099 : }
2100 :
2101 : return 0;
2102 : }
2103 :
2104 0 : static long wb_check_old_data_flush(struct bdi_writeback *wb)
2105 : {
2106 : unsigned long expired;
2107 : long nr_pages;
2108 :
2109 : /*
2110 : * When set to zero, disable periodic writeback
2111 : */
2112 0 : if (!dirty_writeback_interval)
2113 : return 0;
2114 :
2115 0 : expired = wb->last_old_flush +
2116 0 : msecs_to_jiffies(dirty_writeback_interval * 10);
2117 0 : if (time_before(jiffies, expired))
2118 : return 0;
2119 :
2120 0 : wb->last_old_flush = jiffies;
2121 0 : nr_pages = get_nr_dirty_pages();
2122 :
2123 0 : if (nr_pages) {
2124 0 : struct wb_writeback_work work = {
2125 : .nr_pages = nr_pages,
2126 : .sync_mode = WB_SYNC_NONE,
2127 : .for_kupdate = 1,
2128 : .range_cyclic = 1,
2129 : .reason = WB_REASON_PERIODIC,
2130 : };
2131 :
2132 0 : return wb_writeback(wb, &work);
2133 : }
2134 :
2135 : return 0;
2136 : }
2137 :
2138 0 : static long wb_check_start_all(struct bdi_writeback *wb)
2139 : {
2140 : long nr_pages;
2141 :
2142 0 : if (!test_bit(WB_start_all, &wb->state))
2143 : return 0;
2144 :
2145 0 : nr_pages = get_nr_dirty_pages();
2146 0 : if (nr_pages) {
2147 0 : struct wb_writeback_work work = {
2148 0 : .nr_pages = wb_split_bdi_pages(wb, nr_pages),
2149 : .sync_mode = WB_SYNC_NONE,
2150 : .range_cyclic = 1,
2151 0 : .reason = wb->start_all_reason,
2152 : };
2153 :
2154 0 : nr_pages = wb_writeback(wb, &work);
2155 : }
2156 :
2157 0 : clear_bit(WB_start_all, &wb->state);
2158 0 : return nr_pages;
2159 : }
2160 :
2161 :
2162 : /*
2163 : * Retrieve work items and do the writeback they describe
2164 : */
2165 0 : static long wb_do_writeback(struct bdi_writeback *wb)
2166 : {
2167 : struct wb_writeback_work *work;
2168 0 : long wrote = 0;
2169 :
2170 0 : set_bit(WB_writeback_running, &wb->state);
2171 0 : while ((work = get_next_work_item(wb)) != NULL) {
2172 0 : trace_writeback_exec(wb, work);
2173 0 : wrote += wb_writeback(wb, work);
2174 0 : finish_writeback_work(wb, work);
2175 : }
2176 :
2177 : /*
2178 : * Check for a flush-everything request
2179 : */
2180 0 : wrote += wb_check_start_all(wb);
2181 :
2182 : /*
2183 : * Check for periodic writeback, kupdated() style
2184 : */
2185 0 : wrote += wb_check_old_data_flush(wb);
2186 0 : wrote += wb_check_background_flush(wb);
2187 0 : clear_bit(WB_writeback_running, &wb->state);
2188 :
2189 0 : return wrote;
2190 : }
2191 :
2192 : /*
2193 : * Handle writeback of dirty data for the device backed by this bdi. Also
2194 : * reschedules periodically and does kupdated style flushing.
2195 : */
2196 0 : void wb_workfn(struct work_struct *work)
2197 : {
2198 0 : struct bdi_writeback *wb = container_of(to_delayed_work(work),
2199 : struct bdi_writeback, dwork);
2200 : long pages_written;
2201 :
2202 0 : set_worker_desc("flush-%s", bdi_dev_name(wb->bdi));
2203 :
2204 0 : if (likely(!current_is_workqueue_rescuer() ||
2205 : !test_bit(WB_registered, &wb->state))) {
2206 : /*
2207 : * The normal path. Keep writing back @wb until its
2208 : * work_list is empty. Note that this path is also taken
2209 : * if @wb is shutting down even when we're running off the
2210 : * rescuer as work_list needs to be drained.
2211 : */
2212 : do {
2213 0 : pages_written = wb_do_writeback(wb);
2214 0 : trace_writeback_pages_written(pages_written);
2215 0 : } while (!list_empty(&wb->work_list));
2216 : } else {
2217 : /*
2218 : * bdi_wq can't get enough workers and we're running off
2219 : * the emergency worker. Don't hog it. Hopefully, 1024 is
2220 : * enough for efficient IO.
2221 : */
2222 0 : pages_written = writeback_inodes_wb(wb, 1024,
2223 : WB_REASON_FORKER_THREAD);
2224 0 : trace_writeback_pages_written(pages_written);
2225 : }
2226 :
2227 0 : if (!list_empty(&wb->work_list))
2228 0 : wb_wakeup(wb);
2229 0 : else if (wb_has_dirty_io(wb) && dirty_writeback_interval)
2230 0 : wb_wakeup_delayed(wb);
2231 0 : }
2232 :
2233 : /*
2234 : * Start writeback of `nr_pages' pages on this bdi. If `nr_pages' is zero,
2235 : * write back the whole world.
2236 : */
2237 0 : static void __wakeup_flusher_threads_bdi(struct backing_dev_info *bdi,
2238 : enum wb_reason reason)
2239 : {
2240 : struct bdi_writeback *wb;
2241 :
2242 0 : if (!bdi_has_dirty_io(bdi))
2243 : return;
2244 :
2245 0 : list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node)
2246 0 : wb_start_writeback(wb, reason);
2247 : }
2248 :
2249 0 : void wakeup_flusher_threads_bdi(struct backing_dev_info *bdi,
2250 : enum wb_reason reason)
2251 : {
2252 : rcu_read_lock();
2253 0 : __wakeup_flusher_threads_bdi(bdi, reason);
2254 : rcu_read_unlock();
2255 0 : }
2256 :
2257 : /*
2258 : * Wakeup the flusher threads to start writeback of all currently dirty pages
2259 : */
2260 0 : void wakeup_flusher_threads(enum wb_reason reason)
2261 : {
2262 : struct backing_dev_info *bdi;
2263 :
2264 : /*
2265 : * If we are expecting writeback progress we must submit plugged IO.
2266 : */
2267 0 : blk_flush_plug(current->plug, true);
2268 :
2269 : rcu_read_lock();
2270 0 : list_for_each_entry_rcu(bdi, &bdi_list, bdi_list)
2271 0 : __wakeup_flusher_threads_bdi(bdi, reason);
2272 : rcu_read_unlock();
2273 0 : }
2274 :
2275 : /*
2276 : * Wake up bdi's periodically to make sure dirtytime inodes gets
2277 : * written back periodically. We deliberately do *not* check the
2278 : * b_dirtytime list in wb_has_dirty_io(), since this would cause the
2279 : * kernel to be constantly waking up once there are any dirtytime
2280 : * inodes on the system. So instead we define a separate delayed work
2281 : * function which gets called much more rarely. (By default, only
2282 : * once every 12 hours.)
2283 : *
2284 : * If there is any other write activity going on in the file system,
2285 : * this function won't be necessary. But if the only thing that has
2286 : * happened on the file system is a dirtytime inode caused by an atime
2287 : * update, we need this infrastructure below to make sure that inode
2288 : * eventually gets pushed out to disk.
2289 : */
2290 : static void wakeup_dirtytime_writeback(struct work_struct *w);
2291 : static DECLARE_DELAYED_WORK(dirtytime_work, wakeup_dirtytime_writeback);
2292 :
2293 0 : static void wakeup_dirtytime_writeback(struct work_struct *w)
2294 : {
2295 : struct backing_dev_info *bdi;
2296 :
2297 : rcu_read_lock();
2298 0 : list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
2299 : struct bdi_writeback *wb;
2300 :
2301 0 : list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node)
2302 0 : if (!list_empty(&wb->b_dirty_time))
2303 0 : wb_wakeup(wb);
2304 : }
2305 : rcu_read_unlock();
2306 0 : schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
2307 0 : }
2308 :
2309 1 : static int __init start_dirtytime_writeback(void)
2310 : {
2311 2 : schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ);
2312 1 : return 0;
2313 : }
2314 : __initcall(start_dirtytime_writeback);
2315 :
2316 0 : int dirtytime_interval_handler(struct ctl_table *table, int write,
2317 : void *buffer, size_t *lenp, loff_t *ppos)
2318 : {
2319 : int ret;
2320 :
2321 0 : ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
2322 0 : if (ret == 0 && write)
2323 0 : mod_delayed_work(system_wq, &dirtytime_work, 0);
2324 0 : return ret;
2325 : }
2326 :
2327 : /**
2328 : * __mark_inode_dirty - internal function to mark an inode dirty
2329 : *
2330 : * @inode: inode to mark
2331 : * @flags: what kind of dirty, e.g. I_DIRTY_SYNC. This can be a combination of
2332 : * multiple I_DIRTY_* flags, except that I_DIRTY_TIME can't be combined
2333 : * with I_DIRTY_PAGES.
2334 : *
2335 : * Mark an inode as dirty. We notify the filesystem, then update the inode's
2336 : * dirty flags. Then, if needed we add the inode to the appropriate dirty list.
2337 : *
2338 : * Most callers should use mark_inode_dirty() or mark_inode_dirty_sync()
2339 : * instead of calling this directly.
2340 : *
2341 : * CAREFUL! We only add the inode to the dirty list if it is hashed or if it
2342 : * refers to a blockdev. Unhashed inodes will never be added to the dirty list
2343 : * even if they are later hashed, as they will have been marked dirty already.
2344 : *
2345 : * In short, ensure you hash any inodes _before_ you start marking them dirty.
2346 : *
2347 : * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
2348 : * the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of
2349 : * the kernel-internal blockdev inode represents the dirtying time of the
2350 : * blockdev's pages. This is why for I_DIRTY_PAGES we always use
2351 : * page->mapping->host, so the page-dirtying time is recorded in the internal
2352 : * blockdev inode.
2353 : */
2354 0 : void __mark_inode_dirty(struct inode *inode, int flags)
2355 : {
2356 0 : struct super_block *sb = inode->i_sb;
2357 0 : int dirtytime = 0;
2358 :
2359 0 : trace_writeback_mark_inode_dirty(inode, flags);
2360 :
2361 0 : if (flags & I_DIRTY_INODE) {
2362 : /*
2363 : * Notify the filesystem about the inode being dirtied, so that
2364 : * (if needed) it can update on-disk fields and journal the
2365 : * inode. This is only needed when the inode itself is being
2366 : * dirtied now. I.e. it's only needed for I_DIRTY_INODE, not
2367 : * for just I_DIRTY_PAGES or I_DIRTY_TIME.
2368 : */
2369 0 : trace_writeback_dirty_inode_start(inode, flags);
2370 0 : if (sb->s_op->dirty_inode)
2371 0 : sb->s_op->dirty_inode(inode, flags & I_DIRTY_INODE);
2372 0 : trace_writeback_dirty_inode(inode, flags);
2373 :
2374 : /* I_DIRTY_INODE supersedes I_DIRTY_TIME. */
2375 0 : flags &= ~I_DIRTY_TIME;
2376 : } else {
2377 : /*
2378 : * Else it's either I_DIRTY_PAGES, I_DIRTY_TIME, or nothing.
2379 : * (We don't support setting both I_DIRTY_PAGES and I_DIRTY_TIME
2380 : * in one call to __mark_inode_dirty().)
2381 : */
2382 0 : dirtytime = flags & I_DIRTY_TIME;
2383 0 : WARN_ON_ONCE(dirtytime && flags != I_DIRTY_TIME);
2384 : }
2385 :
2386 : /*
2387 : * Paired with smp_mb() in __writeback_single_inode() for the
2388 : * following lockless i_state test. See there for details.
2389 : */
2390 0 : smp_mb();
2391 :
2392 0 : if (((inode->i_state & flags) == flags) ||
2393 0 : (dirtytime && (inode->i_state & I_DIRTY_INODE)))
2394 : return;
2395 :
2396 0 : spin_lock(&inode->i_lock);
2397 0 : if (dirtytime && (inode->i_state & I_DIRTY_INODE))
2398 : goto out_unlock_inode;
2399 0 : if ((inode->i_state & flags) != flags) {
2400 0 : const int was_dirty = inode->i_state & I_DIRTY;
2401 :
2402 0 : inode_attach_wb(inode, NULL);
2403 :
2404 : /* I_DIRTY_INODE supersedes I_DIRTY_TIME. */
2405 0 : if (flags & I_DIRTY_INODE)
2406 0 : inode->i_state &= ~I_DIRTY_TIME;
2407 0 : inode->i_state |= flags;
2408 :
2409 : /*
2410 : * If the inode is queued for writeback by flush worker, just
2411 : * update its dirty state. Once the flush worker is done with
2412 : * the inode it will place it on the appropriate superblock
2413 : * list, based upon its state.
2414 : */
2415 0 : if (inode->i_state & I_SYNC_QUEUED)
2416 : goto out_unlock_inode;
2417 :
2418 : /*
2419 : * Only add valid (hashed) inodes to the superblock's
2420 : * dirty list. Add blockdev inodes as well.
2421 : */
2422 0 : if (!S_ISBLK(inode->i_mode)) {
2423 0 : if (inode_unhashed(inode))
2424 : goto out_unlock_inode;
2425 : }
2426 0 : if (inode->i_state & I_FREEING)
2427 : goto out_unlock_inode;
2428 :
2429 : /*
2430 : * If the inode was already on b_dirty/b_io/b_more_io, don't
2431 : * reposition it (that would break b_dirty time-ordering).
2432 : */
2433 0 : if (!was_dirty) {
2434 : struct bdi_writeback *wb;
2435 : struct list_head *dirty_list;
2436 0 : bool wakeup_bdi = false;
2437 :
2438 0 : wb = locked_inode_to_wb_and_lock_list(inode);
2439 :
2440 0 : inode->dirtied_when = jiffies;
2441 0 : if (dirtytime)
2442 0 : inode->dirtied_time_when = jiffies;
2443 :
2444 0 : if (inode->i_state & I_DIRTY)
2445 0 : dirty_list = &wb->b_dirty;
2446 : else
2447 0 : dirty_list = &wb->b_dirty_time;
2448 :
2449 0 : wakeup_bdi = inode_io_list_move_locked(inode, wb,
2450 : dirty_list);
2451 :
2452 0 : spin_unlock(&wb->list_lock);
2453 0 : trace_writeback_dirty_inode_enqueue(inode);
2454 :
2455 : /*
2456 : * If this is the first dirty inode for this bdi,
2457 : * we have to wake-up the corresponding bdi thread
2458 : * to make sure background write-back happens
2459 : * later.
2460 : */
2461 0 : if (wakeup_bdi &&
2462 0 : (wb->bdi->capabilities & BDI_CAP_WRITEBACK))
2463 0 : wb_wakeup_delayed(wb);
2464 : return;
2465 : }
2466 : }
2467 : out_unlock_inode:
2468 0 : spin_unlock(&inode->i_lock);
2469 : }
2470 : EXPORT_SYMBOL(__mark_inode_dirty);
2471 :
2472 : /*
2473 : * The @s_sync_lock is used to serialise concurrent sync operations
2474 : * to avoid lock contention problems with concurrent wait_sb_inodes() calls.
2475 : * Concurrent callers will block on the s_sync_lock rather than doing contending
2476 : * walks. The queueing maintains sync(2) required behaviour as all the IO that
2477 : * has been issued up to the time this function is enter is guaranteed to be
2478 : * completed by the time we have gained the lock and waited for all IO that is
2479 : * in progress regardless of the order callers are granted the lock.
2480 : */
2481 0 : static void wait_sb_inodes(struct super_block *sb)
2482 : {
2483 0 : LIST_HEAD(sync_list);
2484 :
2485 : /*
2486 : * We need to be protected against the filesystem going from
2487 : * r/o to r/w or vice versa.
2488 : */
2489 0 : WARN_ON(!rwsem_is_locked(&sb->s_umount));
2490 :
2491 0 : mutex_lock(&sb->s_sync_lock);
2492 :
2493 : /*
2494 : * Splice the writeback list onto a temporary list to avoid waiting on
2495 : * inodes that have started writeback after this point.
2496 : *
2497 : * Use rcu_read_lock() to keep the inodes around until we have a
2498 : * reference. s_inode_wblist_lock protects sb->s_inodes_wb as well as
2499 : * the local list because inodes can be dropped from either by writeback
2500 : * completion.
2501 : */
2502 : rcu_read_lock();
2503 0 : spin_lock_irq(&sb->s_inode_wblist_lock);
2504 0 : list_splice_init(&sb->s_inodes_wb, &sync_list);
2505 :
2506 : /*
2507 : * Data integrity sync. Must wait for all pages under writeback, because
2508 : * there may have been pages dirtied before our sync call, but which had
2509 : * writeout started before we write it out. In which case, the inode
2510 : * may not be on the dirty list, but we still have to wait for that
2511 : * writeout.
2512 : */
2513 0 : while (!list_empty(&sync_list)) {
2514 0 : struct inode *inode = list_first_entry(&sync_list, struct inode,
2515 : i_wb_list);
2516 0 : struct address_space *mapping = inode->i_mapping;
2517 :
2518 : /*
2519 : * Move each inode back to the wb list before we drop the lock
2520 : * to preserve consistency between i_wb_list and the mapping
2521 : * writeback tag. Writeback completion is responsible to remove
2522 : * the inode from either list once the writeback tag is cleared.
2523 : */
2524 0 : list_move_tail(&inode->i_wb_list, &sb->s_inodes_wb);
2525 :
2526 : /*
2527 : * The mapping can appear untagged while still on-list since we
2528 : * do not have the mapping lock. Skip it here, wb completion
2529 : * will remove it.
2530 : */
2531 0 : if (!mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK))
2532 0 : continue;
2533 :
2534 0 : spin_unlock_irq(&sb->s_inode_wblist_lock);
2535 :
2536 0 : spin_lock(&inode->i_lock);
2537 0 : if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) {
2538 0 : spin_unlock(&inode->i_lock);
2539 :
2540 0 : spin_lock_irq(&sb->s_inode_wblist_lock);
2541 0 : continue;
2542 : }
2543 0 : __iget(inode);
2544 0 : spin_unlock(&inode->i_lock);
2545 : rcu_read_unlock();
2546 :
2547 : /*
2548 : * We keep the error status of individual mapping so that
2549 : * applications can catch the writeback error using fsync(2).
2550 : * See filemap_fdatawait_keep_errors() for details.
2551 : */
2552 0 : filemap_fdatawait_keep_errors(mapping);
2553 :
2554 0 : cond_resched();
2555 :
2556 0 : iput(inode);
2557 :
2558 : rcu_read_lock();
2559 0 : spin_lock_irq(&sb->s_inode_wblist_lock);
2560 : }
2561 0 : spin_unlock_irq(&sb->s_inode_wblist_lock);
2562 : rcu_read_unlock();
2563 0 : mutex_unlock(&sb->s_sync_lock);
2564 0 : }
2565 :
2566 0 : static void __writeback_inodes_sb_nr(struct super_block *sb, unsigned long nr,
2567 : enum wb_reason reason, bool skip_if_busy)
2568 : {
2569 0 : struct backing_dev_info *bdi = sb->s_bdi;
2570 0 : DEFINE_WB_COMPLETION(done, bdi);
2571 0 : struct wb_writeback_work work = {
2572 : .sb = sb,
2573 : .sync_mode = WB_SYNC_NONE,
2574 : .tagged_writepages = 1,
2575 : .done = &done,
2576 : .nr_pages = nr,
2577 : .reason = reason,
2578 : };
2579 :
2580 0 : if (!bdi_has_dirty_io(bdi) || bdi == &noop_backing_dev_info)
2581 0 : return;
2582 0 : WARN_ON(!rwsem_is_locked(&sb->s_umount));
2583 :
2584 0 : bdi_split_work_to_wbs(sb->s_bdi, &work, skip_if_busy);
2585 0 : wb_wait_for_completion(&done);
2586 : }
2587 :
2588 : /**
2589 : * writeback_inodes_sb_nr - writeback dirty inodes from given super_block
2590 : * @sb: the superblock
2591 : * @nr: the number of pages to write
2592 : * @reason: reason why some writeback work initiated
2593 : *
2594 : * Start writeback on some inodes on this super_block. No guarantees are made
2595 : * on how many (if any) will be written, and this function does not wait
2596 : * for IO completion of submitted IO.
2597 : */
2598 0 : void writeback_inodes_sb_nr(struct super_block *sb,
2599 : unsigned long nr,
2600 : enum wb_reason reason)
2601 : {
2602 0 : __writeback_inodes_sb_nr(sb, nr, reason, false);
2603 0 : }
2604 : EXPORT_SYMBOL(writeback_inodes_sb_nr);
2605 :
2606 : /**
2607 : * writeback_inodes_sb - writeback dirty inodes from given super_block
2608 : * @sb: the superblock
2609 : * @reason: reason why some writeback work was initiated
2610 : *
2611 : * Start writeback on some inodes on this super_block. No guarantees are made
2612 : * on how many (if any) will be written, and this function does not wait
2613 : * for IO completion of submitted IO.
2614 : */
2615 0 : void writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
2616 : {
2617 0 : return writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason);
2618 : }
2619 : EXPORT_SYMBOL(writeback_inodes_sb);
2620 :
2621 : /**
2622 : * try_to_writeback_inodes_sb - try to start writeback if none underway
2623 : * @sb: the superblock
2624 : * @reason: reason why some writeback work was initiated
2625 : *
2626 : * Invoke __writeback_inodes_sb_nr if no writeback is currently underway.
2627 : */
2628 0 : void try_to_writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
2629 : {
2630 0 : if (!down_read_trylock(&sb->s_umount))
2631 : return;
2632 :
2633 0 : __writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason, true);
2634 0 : up_read(&sb->s_umount);
2635 : }
2636 : EXPORT_SYMBOL(try_to_writeback_inodes_sb);
2637 :
2638 : /**
2639 : * sync_inodes_sb - sync sb inode pages
2640 : * @sb: the superblock
2641 : *
2642 : * This function writes and waits on any dirty inode belonging to this
2643 : * super_block.
2644 : */
2645 0 : void sync_inodes_sb(struct super_block *sb)
2646 : {
2647 0 : struct backing_dev_info *bdi = sb->s_bdi;
2648 0 : DEFINE_WB_COMPLETION(done, bdi);
2649 0 : struct wb_writeback_work work = {
2650 : .sb = sb,
2651 : .sync_mode = WB_SYNC_ALL,
2652 : .nr_pages = LONG_MAX,
2653 : .range_cyclic = 0,
2654 : .done = &done,
2655 : .reason = WB_REASON_SYNC,
2656 : .for_sync = 1,
2657 : };
2658 :
2659 : /*
2660 : * Can't skip on !bdi_has_dirty() because we should wait for !dirty
2661 : * inodes under writeback and I_DIRTY_TIME inodes ignored by
2662 : * bdi_has_dirty() need to be written out too.
2663 : */
2664 0 : if (bdi == &noop_backing_dev_info)
2665 0 : return;
2666 0 : WARN_ON(!rwsem_is_locked(&sb->s_umount));
2667 :
2668 : /* protect against inode wb switch, see inode_switch_wbs_work_fn() */
2669 0 : bdi_down_write_wb_switch_rwsem(bdi);
2670 0 : bdi_split_work_to_wbs(bdi, &work, false);
2671 0 : wb_wait_for_completion(&done);
2672 0 : bdi_up_write_wb_switch_rwsem(bdi);
2673 :
2674 0 : wait_sb_inodes(sb);
2675 : }
2676 : EXPORT_SYMBOL(sync_inodes_sb);
2677 :
2678 : /**
2679 : * write_inode_now - write an inode to disk
2680 : * @inode: inode to write to disk
2681 : * @sync: whether the write should be synchronous or not
2682 : *
2683 : * This function commits an inode to disk immediately if it is dirty. This is
2684 : * primarily needed by knfsd.
2685 : *
2686 : * The caller must either have a ref on the inode or must have set I_WILL_FREE.
2687 : */
2688 0 : int write_inode_now(struct inode *inode, int sync)
2689 : {
2690 0 : struct writeback_control wbc = {
2691 : .nr_to_write = LONG_MAX,
2692 0 : .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
2693 : .range_start = 0,
2694 : .range_end = LLONG_MAX,
2695 : };
2696 :
2697 0 : if (!mapping_can_writeback(inode->i_mapping))
2698 0 : wbc.nr_to_write = 0;
2699 :
2700 : might_sleep();
2701 0 : return writeback_single_inode(inode, &wbc);
2702 : }
2703 : EXPORT_SYMBOL(write_inode_now);
2704 :
2705 : /**
2706 : * sync_inode_metadata - write an inode to disk
2707 : * @inode: the inode to sync
2708 : * @wait: wait for I/O to complete.
2709 : *
2710 : * Write an inode to disk and adjust its dirty state after completion.
2711 : *
2712 : * Note: only writes the actual inode, no associated data or other metadata.
2713 : */
2714 0 : int sync_inode_metadata(struct inode *inode, int wait)
2715 : {
2716 0 : struct writeback_control wbc = {
2717 0 : .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE,
2718 : .nr_to_write = 0, /* metadata-only */
2719 : };
2720 :
2721 0 : return writeback_single_inode(inode, &wbc);
2722 : }
2723 : EXPORT_SYMBOL(sync_inode_metadata);
|