blob: 48c21acef915883aa08205e2d9388cef67f91b99 [file] [log] [blame]
Kyle Swenson8d8f6542021-03-15 11:02:55 -06001/*
2 * Copyright 2010 IBM Corp, Benjamin Herrenschmidt <benh@kernel.crashing.org>
3 *
4 * Generic idle routine for Book3E processors
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/threads.h>
13#include <asm/reg.h>
14#include <asm/ppc_asm.h>
15#include <asm/asm-offsets.h>
16#include <asm/ppc-opcode.h>
17#include <asm/processor.h>
18#include <asm/thread_info.h>
19#include <asm/epapr_hcalls.h>
20
21/* 64-bit version only for now */
22#ifdef CONFIG_PPC64
23
24.macro BOOK3E_IDLE name loop
25_GLOBAL(\name)
26 /* Save LR for later */
27 mflr r0
28 std r0,16(r1)
29
30 /* Hard disable interrupts */
31 wrteei 0
32
33 /* Now check if an interrupt came in while we were soft disabled
34 * since we may otherwise lose it (doorbells etc...).
35 */
36 lbz r3,PACAIRQHAPPENED(r13)
37 cmpwi cr0,r3,0
38 bnelr
39
40 /* Now we are going to mark ourselves as soft and hard enabled in
41 * order to be able to take interrupts while asleep. We inform lockdep
42 * of that. We don't actually turn interrupts on just yet tho.
43 */
44#ifdef CONFIG_TRACE_IRQFLAGS
45 stdu r1,-128(r1)
46 bl trace_hardirqs_on
47 addi r1,r1,128
48#endif
49 li r0,1
50 stb r0,PACASOFTIRQEN(r13)
51
52 /* Interrupts will make use return to LR, so get something we want
53 * in there
54 */
55 bl 1f
56
57 /* And return (interrupts are on) */
58 ld r0,16(r1)
59 mtlr r0
60 blr
61
621: /* Let's set the _TLF_NAPPING flag so interrupts make us return
63 * to the right spot
64 */
65 CURRENT_THREAD_INFO(r11, r1)
66 ld r10,TI_LOCAL_FLAGS(r11)
67 ori r10,r10,_TLF_NAPPING
68 std r10,TI_LOCAL_FLAGS(r11)
69
70 /* We can now re-enable hard interrupts and go to sleep */
71 wrteei 1
72 \loop
73
74.endm
75
76.macro BOOK3E_IDLE_LOOP
771:
78 PPC_WAIT(0)
79 b 1b
80.endm
81
82/* epapr_ev_idle_start below is patched with the proper hcall
83 opcodes during kernel initialization */
84.macro EPAPR_EV_IDLE_LOOP
85idle_loop:
86 LOAD_REG_IMMEDIATE(r11, EV_HCALL_TOKEN(EV_IDLE))
87
88.global epapr_ev_idle_start
89epapr_ev_idle_start:
90 li r3, -1
91 nop
92 nop
93 nop
94 b idle_loop
95.endm
96
97BOOK3E_IDLE epapr_ev_idle EPAPR_EV_IDLE_LOOP
98
99BOOK3E_IDLE book3e_idle BOOK3E_IDLE_LOOP
100
101#endif /* CONFIG_PPC64 */