Kyle Swenson | 8d8f654 | 2021-03-15 11:02:55 -0600 | [diff] [blame^] | 1 | /* |
| 2 | * Hibernation support for x86-64 |
| 3 | * |
| 4 | * Distribute under GPLv2. |
| 5 | * |
| 6 | * Copyright 2007 Rafael J. Wysocki <rjw@sisk.pl> |
| 7 | * Copyright 2005 Andi Kleen <ak@suse.de> |
| 8 | * Copyright 2004 Pavel Machek <pavel@suse.cz> |
| 9 | * |
| 10 | * swsusp_arch_resume must not use any stack or any nonlocal variables while |
| 11 | * copying pages: |
| 12 | * |
| 13 | * Its rewriting one kernel image with another. What is stack in "old" |
| 14 | * image could very well be data page in "new" image, and overwriting |
| 15 | * your own stack under you is bad idea. |
| 16 | */ |
| 17 | |
| 18 | .text |
| 19 | #include <linux/linkage.h> |
| 20 | #include <asm/segment.h> |
| 21 | #include <asm/page_types.h> |
| 22 | #include <asm/asm-offsets.h> |
| 23 | #include <asm/processor-flags.h> |
| 24 | |
| 25 | ENTRY(swsusp_arch_suspend) |
| 26 | movq $saved_context, %rax |
| 27 | movq %rsp, pt_regs_sp(%rax) |
| 28 | movq %rbp, pt_regs_bp(%rax) |
| 29 | movq %rsi, pt_regs_si(%rax) |
| 30 | movq %rdi, pt_regs_di(%rax) |
| 31 | movq %rbx, pt_regs_bx(%rax) |
| 32 | movq %rcx, pt_regs_cx(%rax) |
| 33 | movq %rdx, pt_regs_dx(%rax) |
| 34 | movq %r8, pt_regs_r8(%rax) |
| 35 | movq %r9, pt_regs_r9(%rax) |
| 36 | movq %r10, pt_regs_r10(%rax) |
| 37 | movq %r11, pt_regs_r11(%rax) |
| 38 | movq %r12, pt_regs_r12(%rax) |
| 39 | movq %r13, pt_regs_r13(%rax) |
| 40 | movq %r14, pt_regs_r14(%rax) |
| 41 | movq %r15, pt_regs_r15(%rax) |
| 42 | pushfq |
| 43 | popq pt_regs_flags(%rax) |
| 44 | |
| 45 | /* save the address of restore_registers */ |
| 46 | movq $restore_registers, %rax |
| 47 | movq %rax, restore_jump_address(%rip) |
| 48 | /* save cr3 */ |
| 49 | movq %cr3, %rax |
| 50 | movq %rax, restore_cr3(%rip) |
| 51 | |
| 52 | call swsusp_save |
| 53 | ret |
| 54 | |
| 55 | ENTRY(restore_image) |
| 56 | /* switch to temporary page tables */ |
| 57 | movq $__PAGE_OFFSET, %rdx |
| 58 | movq temp_level4_pgt(%rip), %rax |
| 59 | subq %rdx, %rax |
| 60 | movq %rax, %cr3 |
| 61 | /* Flush TLB */ |
| 62 | movq mmu_cr4_features(%rip), %rax |
| 63 | movq %rax, %rdx |
| 64 | andq $~(X86_CR4_PGE), %rdx |
| 65 | movq %rdx, %cr4; # turn off PGE |
| 66 | movq %cr3, %rcx; # flush TLB |
| 67 | movq %rcx, %cr3; |
| 68 | movq %rax, %cr4; # turn PGE back on |
| 69 | |
| 70 | /* prepare to jump to the image kernel */ |
| 71 | movq restore_jump_address(%rip), %rax |
| 72 | movq restore_cr3(%rip), %rbx |
| 73 | |
| 74 | /* prepare to copy image data to their original locations */ |
| 75 | movq restore_pblist(%rip), %rdx |
| 76 | movq relocated_restore_code(%rip), %rcx |
| 77 | jmpq *%rcx |
| 78 | |
| 79 | /* code below has been relocated to a safe page */ |
| 80 | ENTRY(core_restore_code) |
| 81 | .Lloop: |
| 82 | testq %rdx, %rdx |
| 83 | jz .Ldone |
| 84 | |
| 85 | /* get addresses from the pbe and copy the page */ |
| 86 | movq pbe_address(%rdx), %rsi |
| 87 | movq pbe_orig_address(%rdx), %rdi |
| 88 | movq $(PAGE_SIZE >> 3), %rcx |
| 89 | rep |
| 90 | movsq |
| 91 | |
| 92 | /* progress to the next pbe */ |
| 93 | movq pbe_next(%rdx), %rdx |
| 94 | jmp .Lloop |
| 95 | .Ldone: |
| 96 | /* jump to the restore_registers address from the image header */ |
| 97 | jmpq *%rax |
| 98 | /* |
| 99 | * NOTE: This assumes that the boot kernel's text mapping covers the |
| 100 | * image kernel's page containing restore_registers and the address of |
| 101 | * this page is the same as in the image kernel's text mapping (it |
| 102 | * should always be true, because the text mapping is linear, starting |
| 103 | * from 0, and is supposed to cover the entire kernel text for every |
| 104 | * kernel). |
| 105 | * |
| 106 | * code below belongs to the image kernel |
| 107 | */ |
| 108 | |
| 109 | ENTRY(restore_registers) |
| 110 | /* go back to the original page tables */ |
| 111 | movq %rbx, %cr3 |
| 112 | |
| 113 | /* Flush TLB, including "global" things (vmalloc) */ |
| 114 | movq mmu_cr4_features(%rip), %rax |
| 115 | movq %rax, %rdx |
| 116 | andq $~(X86_CR4_PGE), %rdx |
| 117 | movq %rdx, %cr4; # turn off PGE |
| 118 | movq %cr3, %rcx; # flush TLB |
| 119 | movq %rcx, %cr3 |
| 120 | movq %rax, %cr4; # turn PGE back on |
| 121 | |
| 122 | /* We don't restore %rax, it must be 0 anyway */ |
| 123 | movq $saved_context, %rax |
| 124 | movq pt_regs_sp(%rax), %rsp |
| 125 | movq pt_regs_bp(%rax), %rbp |
| 126 | movq pt_regs_si(%rax), %rsi |
| 127 | movq pt_regs_di(%rax), %rdi |
| 128 | movq pt_regs_bx(%rax), %rbx |
| 129 | movq pt_regs_cx(%rax), %rcx |
| 130 | movq pt_regs_dx(%rax), %rdx |
| 131 | movq pt_regs_r8(%rax), %r8 |
| 132 | movq pt_regs_r9(%rax), %r9 |
| 133 | movq pt_regs_r10(%rax), %r10 |
| 134 | movq pt_regs_r11(%rax), %r11 |
| 135 | movq pt_regs_r12(%rax), %r12 |
| 136 | movq pt_regs_r13(%rax), %r13 |
| 137 | movq pt_regs_r14(%rax), %r14 |
| 138 | movq pt_regs_r15(%rax), %r15 |
| 139 | pushq pt_regs_flags(%rax) |
| 140 | popfq |
| 141 | |
| 142 | /* Saved in save_processor_state. */ |
| 143 | lgdt saved_context_gdt_desc(%rax) |
| 144 | |
| 145 | xorq %rax, %rax |
| 146 | |
| 147 | /* tell the hibernation core that we've just restored the memory */ |
| 148 | movq %rax, in_suspend(%rip) |
| 149 | |
| 150 | ret |