Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-only
2 : /*
3 : * Generic entry points for the idle threads and
4 : * implementation of the idle task scheduling class.
5 : *
6 : * (NOTE: these are not related to SCHED_IDLE batch scheduled
7 : * tasks which are handled in sched/fair.c )
8 : */
9 :
10 : /* Linker adds these: start and end of __cpuidle functions */
11 : extern char __cpuidle_text_start[], __cpuidle_text_end[];
12 :
13 : /**
14 : * sched_idle_set_state - Record idle state for the current CPU.
15 : * @idle_state: State to record.
16 : */
17 0 : void sched_idle_set_state(struct cpuidle_state *idle_state)
18 : {
19 0 : idle_set_state(this_rq(), idle_state);
20 0 : }
21 :
22 : static int __read_mostly cpu_idle_force_poll;
23 :
24 0 : void cpu_idle_poll_ctrl(bool enable)
25 : {
26 0 : if (enable) {
27 0 : cpu_idle_force_poll++;
28 : } else {
29 0 : cpu_idle_force_poll--;
30 0 : WARN_ON_ONCE(cpu_idle_force_poll < 0);
31 : }
32 0 : }
33 :
34 : #ifdef CONFIG_GENERIC_IDLE_POLL_SETUP
35 : static int __init cpu_idle_poll_setup(char *__unused)
36 : {
37 : cpu_idle_force_poll = 1;
38 :
39 : return 1;
40 : }
41 : __setup("nohlt", cpu_idle_poll_setup);
42 :
43 : static int __init cpu_idle_nopoll_setup(char *__unused)
44 : {
45 : cpu_idle_force_poll = 0;
46 :
47 : return 1;
48 : }
49 : __setup("hlt", cpu_idle_nopoll_setup);
50 : #endif
51 :
52 0 : static noinline int __cpuidle cpu_idle_poll(void)
53 : {
54 0 : trace_cpu_idle(0, smp_processor_id());
55 : stop_critical_timings();
56 : rcu_idle_enter();
57 : local_irq_enable();
58 :
59 0 : while (!tif_need_resched() &&
60 0 : (cpu_idle_force_poll || tick_check_broadcast_expired()))
61 : cpu_relax();
62 :
63 : rcu_idle_exit();
64 : start_critical_timings();
65 0 : trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
66 :
67 0 : return 1;
68 : }
69 :
70 : /* Weak implementations for optional arch specific functions */
71 0 : void __weak arch_cpu_idle_prepare(void) { }
72 0 : void __weak arch_cpu_idle_enter(void) { }
73 0 : void __weak arch_cpu_idle_exit(void) { }
74 0 : void __weak arch_cpu_idle_dead(void) { }
75 0 : void __weak arch_cpu_idle(void)
76 : {
77 0 : cpu_idle_force_poll = 1;
78 : raw_local_irq_enable();
79 0 : }
80 :
81 : /**
82 : * default_idle_call - Default CPU idle routine.
83 : *
84 : * To use when the cpuidle framework cannot be used.
85 : */
86 0 : void __cpuidle default_idle_call(void)
87 : {
88 0 : if (current_clr_polling_and_test()) {
89 : local_irq_enable();
90 : } else {
91 :
92 0 : trace_cpu_idle(1, smp_processor_id());
93 : stop_critical_timings();
94 :
95 : /*
96 : * arch_cpu_idle() is supposed to enable IRQs, however
97 : * we can't do that because of RCU and tracing.
98 : *
99 : * Trace IRQs enable here, then switch off RCU, and have
100 : * arch_cpu_idle() use raw_local_irq_enable(). Note that
101 : * rcu_idle_enter() relies on lockdep IRQ state, so switch that
102 : * last -- this is very similar to the entry code.
103 : */
104 : trace_hardirqs_on_prepare();
105 0 : lockdep_hardirqs_on_prepare(_THIS_IP_);
106 : rcu_idle_enter();
107 0 : lockdep_hardirqs_on(_THIS_IP_);
108 :
109 0 : arch_cpu_idle();
110 :
111 : /*
112 : * OK, so IRQs are enabled here, but RCU needs them disabled to
113 : * turn itself back on.. funny thing is that disabling IRQs
114 : * will cause tracing, which needs RCU. Jump through hoops to
115 : * make it 'work'.
116 : */
117 : raw_local_irq_disable();
118 0 : lockdep_hardirqs_off(_THIS_IP_);
119 : rcu_idle_exit();
120 0 : lockdep_hardirqs_on(_THIS_IP_);
121 : raw_local_irq_enable();
122 :
123 : start_critical_timings();
124 : trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
125 : }
126 0 : }
127 :
128 : static int call_cpuidle_s2idle(struct cpuidle_driver *drv,
129 : struct cpuidle_device *dev)
130 : {
131 : if (current_clr_polling_and_test())
132 : return -EBUSY;
133 :
134 : return cpuidle_enter_s2idle(drv, dev);
135 : }
136 :
137 : static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
138 : int next_state)
139 : {
140 : /*
141 : * The idle task must be scheduled, it is pointless to go to idle, just
142 : * update no idle residency and return.
143 : */
144 : if (current_clr_polling_and_test()) {
145 : dev->last_residency_ns = 0;
146 : local_irq_enable();
147 : return -EBUSY;
148 : }
149 :
150 : /*
151 : * Enter the idle state previously returned by the governor decision.
152 : * This function will block until an interrupt occurs and will take
153 : * care of re-enabling the local interrupts
154 : */
155 : return cpuidle_enter(drv, dev, next_state);
156 : }
157 :
158 : /**
159 : * cpuidle_idle_call - the main idle function
160 : *
161 : * NOTE: no locks or semaphores should be used here
162 : *
163 : * On architectures that support TIF_POLLING_NRFLAG, is called with polling
164 : * set, and it returns with polling set. If it ever stops polling, it
165 : * must clear the polling bit.
166 : */
167 0 : static void cpuidle_idle_call(void)
168 : {
169 0 : struct cpuidle_device *dev = cpuidle_get_device();
170 0 : struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
171 : int next_state, entered_state;
172 :
173 : /*
174 : * Check if the idle task must be rescheduled. If it is the
175 : * case, exit the function after re-enabling the local irq.
176 : */
177 0 : if (need_resched()) {
178 : local_irq_enable();
179 : return;
180 : }
181 :
182 : /*
183 : * The RCU framework needs to be told that we are entering an idle
184 : * section, so no more rcu read side critical sections and one more
185 : * step to the grace period
186 : */
187 :
188 : if (cpuidle_not_available(drv, dev)) {
189 : tick_nohz_idle_stop_tick();
190 :
191 0 : default_idle_call();
192 : goto exit_idle;
193 : }
194 :
195 : /*
196 : * Suspend-to-idle ("s2idle") is a system state in which all user space
197 : * has been frozen, all I/O devices have been suspended and the only
198 : * activity happens here and in interrupts (if any). In that case bypass
199 : * the cpuidle governor and go straight for the deepest idle state
200 : * available. Possibly also suspend the local tick and the entire
201 : * timekeeping to prevent timer interrupts from kicking us out of idle
202 : * until a proper wakeup interrupt happens.
203 : */
204 :
205 : if (idle_should_enter_s2idle() || dev->forced_idle_latency_limit_ns) {
206 : u64 max_latency_ns;
207 :
208 : if (idle_should_enter_s2idle()) {
209 :
210 : entered_state = call_cpuidle_s2idle(drv, dev);
211 : if (entered_state > 0)
212 : goto exit_idle;
213 :
214 : max_latency_ns = U64_MAX;
215 : } else {
216 : max_latency_ns = dev->forced_idle_latency_limit_ns;
217 : }
218 :
219 : tick_nohz_idle_stop_tick();
220 :
221 : next_state = cpuidle_find_deepest_state(drv, dev, max_latency_ns);
222 : call_cpuidle(drv, dev, next_state);
223 : } else {
224 : bool stop_tick = true;
225 :
226 : /*
227 : * Ask the cpuidle framework to choose a convenient idle state.
228 : */
229 : next_state = cpuidle_select(drv, dev, &stop_tick);
230 :
231 : if (stop_tick || tick_nohz_tick_stopped())
232 : tick_nohz_idle_stop_tick();
233 : else
234 : tick_nohz_idle_retain_tick();
235 :
236 : entered_state = call_cpuidle(drv, dev, next_state);
237 : /*
238 : * Give the governor an opportunity to reflect on the outcome
239 : */
240 : cpuidle_reflect(dev, entered_state);
241 : }
242 :
243 : exit_idle:
244 : __current_set_polling();
245 :
246 : /*
247 : * It is up to the idle functions to reenable local interrupts
248 : */
249 0 : if (WARN_ON_ONCE(irqs_disabled()))
250 : local_irq_enable();
251 : }
252 :
253 : /*
254 : * Generic idle loop implementation
255 : *
256 : * Called with polling cleared.
257 : */
258 0 : static void do_idle(void)
259 : {
260 0 : int cpu = smp_processor_id();
261 :
262 : /*
263 : * Check if we need to update blocked load
264 : */
265 0 : nohz_run_idle_balance(cpu);
266 :
267 : /*
268 : * If the arch has a polling bit, we maintain an invariant:
269 : *
270 : * Our polling bit is clear if we're not scheduled (i.e. if rq->curr !=
271 : * rq->idle). This means that, if rq->idle has the polling bit set,
272 : * then setting need_resched is guaranteed to cause the CPU to
273 : * reschedule.
274 : */
275 :
276 : __current_set_polling();
277 : tick_nohz_idle_enter();
278 :
279 0 : while (!need_resched()) {
280 0 : rmb();
281 :
282 : local_irq_disable();
283 :
284 0 : if (cpu_is_offline(cpu)) {
285 : tick_nohz_idle_stop_tick();
286 : cpuhp_report_idle_dead();
287 : arch_cpu_idle_dead();
288 : }
289 :
290 0 : arch_cpu_idle_enter();
291 : rcu_nocb_flush_deferred_wakeup();
292 :
293 : /*
294 : * In poll mode we reenable interrupts and spin. Also if we
295 : * detected in the wakeup from idle path that the tick
296 : * broadcast device expired for us, we don't want to go deep
297 : * idle as we know that the IPI is going to arrive right away.
298 : */
299 0 : if (cpu_idle_force_poll || tick_check_broadcast_expired()) {
300 : tick_nohz_idle_restart_tick();
301 0 : cpu_idle_poll();
302 : } else {
303 0 : cpuidle_idle_call();
304 : }
305 0 : arch_cpu_idle_exit();
306 : }
307 :
308 : /*
309 : * Since we fell out of the loop above, we know TIF_NEED_RESCHED must
310 : * be set, propagate it into PREEMPT_NEED_RESCHED.
311 : *
312 : * This is required because for polling idle loops we will not have had
313 : * an IPI to fold the state for us.
314 : */
315 : preempt_set_need_resched();
316 : tick_nohz_idle_exit();
317 : __current_clr_polling();
318 :
319 : /*
320 : * We promise to call sched_ttwu_pending() and reschedule if
321 : * need_resched() is set while polling is set. That means that clearing
322 : * polling needs to be visible before doing these things.
323 : */
324 0 : smp_mb__after_atomic();
325 :
326 : /*
327 : * RCU relies on this call to be done outside of an RCU read-side
328 : * critical section.
329 : */
330 : flush_smp_call_function_from_idle();
331 0 : schedule_idle();
332 :
333 0 : if (unlikely(klp_patch_pending(current)))
334 : klp_update_patch_state(current);
335 0 : }
336 :
337 0 : bool cpu_in_idle(unsigned long pc)
338 : {
339 0 : return pc >= (unsigned long)__cpuidle_text_start &&
340 0 : pc < (unsigned long)__cpuidle_text_end;
341 : }
342 :
343 : struct idle_timer {
344 : struct hrtimer timer;
345 : int done;
346 : };
347 :
348 0 : static enum hrtimer_restart idle_inject_timer_fn(struct hrtimer *timer)
349 : {
350 0 : struct idle_timer *it = container_of(timer, struct idle_timer, timer);
351 :
352 0 : WRITE_ONCE(it->done, 1);
353 0 : set_tsk_need_resched(current);
354 :
355 0 : return HRTIMER_NORESTART;
356 : }
357 :
358 0 : void play_idle_precise(u64 duration_ns, u64 latency_ns)
359 : {
360 : struct idle_timer it;
361 :
362 : /*
363 : * Only FIFO tasks can disable the tick since they don't need the forced
364 : * preemption.
365 : */
366 0 : WARN_ON_ONCE(current->policy != SCHED_FIFO);
367 0 : WARN_ON_ONCE(current->nr_cpus_allowed != 1);
368 0 : WARN_ON_ONCE(!(current->flags & PF_KTHREAD));
369 0 : WARN_ON_ONCE(!(current->flags & PF_NO_SETAFFINITY));
370 0 : WARN_ON_ONCE(!duration_ns);
371 0 : WARN_ON_ONCE(current->mm);
372 :
373 : rcu_sleep_check();
374 0 : preempt_disable();
375 0 : current->flags |= PF_IDLE;
376 0 : cpuidle_use_deepest_state(latency_ns);
377 :
378 0 : it.done = 0;
379 0 : hrtimer_init_on_stack(&it.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
380 0 : it.timer.function = idle_inject_timer_fn;
381 0 : hrtimer_start(&it.timer, ns_to_ktime(duration_ns),
382 : HRTIMER_MODE_REL_PINNED_HARD);
383 :
384 0 : while (!READ_ONCE(it.done))
385 0 : do_idle();
386 :
387 0 : cpuidle_use_deepest_state(0);
388 0 : current->flags &= ~PF_IDLE;
389 :
390 0 : preempt_fold_need_resched();
391 0 : preempt_enable();
392 0 : }
393 : EXPORT_SYMBOL_GPL(play_idle_precise);
394 :
395 0 : void cpu_startup_entry(enum cpuhp_state state)
396 : {
397 0 : arch_cpu_idle_prepare();
398 0 : cpuhp_online_idle(state);
399 : while (1)
400 0 : do_idle();
401 : }
402 :
403 : /*
404 : * idle-task scheduling class.
405 : */
406 :
407 : #ifdef CONFIG_SMP
408 : static int
409 : select_task_rq_idle(struct task_struct *p, int cpu, int flags)
410 : {
411 : return task_cpu(p); /* IDLE tasks as never migrated */
412 : }
413 :
414 : static int
415 : balance_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
416 : {
417 : return WARN_ON_ONCE(1);
418 : }
419 : #endif
420 :
421 : /*
422 : * Idle tasks are unconditionally rescheduled:
423 : */
424 0 : static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags)
425 : {
426 0 : resched_curr(rq);
427 0 : }
428 :
429 2 : static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
430 : {
431 2 : }
432 :
433 0 : static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool first)
434 : {
435 1 : update_idle_core(rq);
436 : schedstat_inc(rq->sched_goidle);
437 0 : }
438 :
439 : #ifdef CONFIG_SMP
440 : static struct task_struct *pick_task_idle(struct rq *rq)
441 : {
442 : return rq->idle;
443 : }
444 : #endif
445 :
446 1 : struct task_struct *pick_next_task_idle(struct rq *rq)
447 : {
448 1 : struct task_struct *next = rq->idle;
449 :
450 1 : set_next_task_idle(rq, next, true);
451 :
452 1 : return next;
453 : }
454 :
455 : /*
456 : * It is not legal to sleep in the idle task - print a warning
457 : * message if some code attempts to do it:
458 : */
459 : static void
460 0 : dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
461 : {
462 0 : raw_spin_rq_unlock_irq(rq);
463 0 : printk(KERN_ERR "bad: scheduling from the idle thread!\n");
464 0 : dump_stack();
465 0 : raw_spin_rq_lock_irq(rq);
466 0 : }
467 :
468 : /*
469 : * scheduler tick hitting a task of our scheduling class.
470 : *
471 : * NOTE: This function can be called remotely by the tick offload that
472 : * goes along full dynticks. Therefore no local assumption can be made
473 : * and everything must be accessed through the @rq and @curr passed in
474 : * parameters.
475 : */
476 10 : static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
477 : {
478 10 : }
479 :
480 0 : static void switched_to_idle(struct rq *rq, struct task_struct *p)
481 : {
482 0 : BUG();
483 : }
484 :
485 : static void
486 0 : prio_changed_idle(struct rq *rq, struct task_struct *p, int oldprio)
487 : {
488 0 : BUG();
489 : }
490 :
491 0 : static void update_curr_idle(struct rq *rq)
492 : {
493 0 : }
494 :
495 : /*
496 : * Simple, special scheduling class for the per-CPU idle tasks:
497 : */
498 : DEFINE_SCHED_CLASS(idle) = {
499 :
500 : /* no enqueue/yield_task for idle tasks */
501 :
502 : /* dequeue is not valid, we print a debug message there: */
503 : .dequeue_task = dequeue_task_idle,
504 :
505 : .check_preempt_curr = check_preempt_curr_idle,
506 :
507 : .pick_next_task = pick_next_task_idle,
508 : .put_prev_task = put_prev_task_idle,
509 : .set_next_task = set_next_task_idle,
510 :
511 : #ifdef CONFIG_SMP
512 : .balance = balance_idle,
513 : .pick_task = pick_task_idle,
514 : .select_task_rq = select_task_rq_idle,
515 : .set_cpus_allowed = set_cpus_allowed_common,
516 : #endif
517 :
518 : .task_tick = task_tick_idle,
519 :
520 : .prio_changed = prio_changed_idle,
521 : .switched_to = switched_to_idle,
522 : .update_curr = update_curr_idle,
523 : };
|