Kyle Swenson | 8d8f654 | 2021-03-15 11:02:55 -0600 | [diff] [blame^] | 1 | #include <asm/paravirt.h> |
| 2 | |
| 3 | DEF_NATIVE(pv_irq_ops, irq_disable, "cli"); |
| 4 | DEF_NATIVE(pv_irq_ops, irq_enable, "sti"); |
| 5 | DEF_NATIVE(pv_irq_ops, restore_fl, "push %eax; popf"); |
| 6 | DEF_NATIVE(pv_irq_ops, save_fl, "pushf; pop %eax"); |
| 7 | DEF_NATIVE(pv_cpu_ops, iret, "iret"); |
| 8 | DEF_NATIVE(pv_cpu_ops, irq_enable_sysexit, "sti; sysexit"); |
| 9 | DEF_NATIVE(pv_mmu_ops, read_cr2, "mov %cr2, %eax"); |
| 10 | DEF_NATIVE(pv_mmu_ops, write_cr3, "mov %eax, %cr3"); |
| 11 | DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax"); |
| 12 | DEF_NATIVE(pv_cpu_ops, clts, "clts"); |
| 13 | |
| 14 | #if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCKS) |
| 15 | DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%eax)"); |
| 16 | #endif |
| 17 | |
| 18 | unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len) |
| 19 | { |
| 20 | /* arg in %eax, return in %eax */ |
| 21 | return 0; |
| 22 | } |
| 23 | |
| 24 | unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len) |
| 25 | { |
| 26 | /* arg in %edx:%eax, return in %edx:%eax */ |
| 27 | return 0; |
| 28 | } |
| 29 | |
| 30 | extern bool pv_is_native_spin_unlock(void); |
| 31 | |
| 32 | unsigned native_patch(u8 type, u16 clobbers, void *ibuf, |
| 33 | unsigned long addr, unsigned len) |
| 34 | { |
| 35 | const unsigned char *start, *end; |
| 36 | unsigned ret; |
| 37 | |
| 38 | #define PATCH_SITE(ops, x) \ |
| 39 | case PARAVIRT_PATCH(ops.x): \ |
| 40 | start = start_##ops##_##x; \ |
| 41 | end = end_##ops##_##x; \ |
| 42 | goto patch_site |
| 43 | switch (type) { |
| 44 | PATCH_SITE(pv_irq_ops, irq_disable); |
| 45 | PATCH_SITE(pv_irq_ops, irq_enable); |
| 46 | PATCH_SITE(pv_irq_ops, restore_fl); |
| 47 | PATCH_SITE(pv_irq_ops, save_fl); |
| 48 | PATCH_SITE(pv_cpu_ops, iret); |
| 49 | PATCH_SITE(pv_cpu_ops, irq_enable_sysexit); |
| 50 | PATCH_SITE(pv_mmu_ops, read_cr2); |
| 51 | PATCH_SITE(pv_mmu_ops, read_cr3); |
| 52 | PATCH_SITE(pv_mmu_ops, write_cr3); |
| 53 | PATCH_SITE(pv_cpu_ops, clts); |
| 54 | #if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCKS) |
| 55 | case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock): |
| 56 | if (pv_is_native_spin_unlock()) { |
| 57 | start = start_pv_lock_ops_queued_spin_unlock; |
| 58 | end = end_pv_lock_ops_queued_spin_unlock; |
| 59 | goto patch_site; |
| 60 | } |
| 61 | #endif |
| 62 | |
| 63 | default: |
| 64 | ret = paravirt_patch_default(type, clobbers, ibuf, addr, len); |
| 65 | break; |
| 66 | |
| 67 | patch_site: |
| 68 | ret = paravirt_patch_insns(ibuf, len, start, end); |
| 69 | break; |
| 70 | } |
| 71 | #undef PATCH_SITE |
| 72 | return ret; |
| 73 | } |