Kyle Swenson | 8d8f654 | 2021-03-15 11:02:55 -0600 | [diff] [blame^] | 1 | /* |
| 2 | * Machine vector for IA-64. |
| 3 | * |
| 4 | * Copyright (C) 1999 Silicon Graphics, Inc. |
| 5 | * Copyright (C) Srinivasa Thirumalachar <sprasad@engr.sgi.com> |
| 6 | * Copyright (C) Vijay Chander <vijay@engr.sgi.com> |
| 7 | * Copyright (C) 1999-2001, 2003-2004 Hewlett-Packard Co. |
| 8 | * David Mosberger-Tang <davidm@hpl.hp.com> |
| 9 | */ |
| 10 | #ifndef _ASM_IA64_MACHVEC_H |
| 11 | #define _ASM_IA64_MACHVEC_H |
| 12 | |
| 13 | #include <linux/types.h> |
| 14 | |
| 15 | /* forward declarations: */ |
| 16 | struct device; |
| 17 | struct pt_regs; |
| 18 | struct scatterlist; |
| 19 | struct page; |
| 20 | struct mm_struct; |
| 21 | struct pci_bus; |
| 22 | struct task_struct; |
| 23 | struct pci_dev; |
| 24 | struct msi_desc; |
| 25 | struct dma_attrs; |
| 26 | |
| 27 | typedef void ia64_mv_setup_t (char **); |
| 28 | typedef void ia64_mv_cpu_init_t (void); |
| 29 | typedef void ia64_mv_irq_init_t (void); |
| 30 | typedef void ia64_mv_send_ipi_t (int, int, int, int); |
| 31 | typedef void ia64_mv_timer_interrupt_t (int, void *); |
| 32 | typedef void ia64_mv_global_tlb_purge_t (struct mm_struct *, unsigned long, unsigned long, unsigned long); |
| 33 | typedef void ia64_mv_tlb_migrate_finish_t (struct mm_struct *); |
| 34 | typedef u8 ia64_mv_irq_to_vector (int); |
| 35 | typedef unsigned int ia64_mv_local_vector_to_irq (u8); |
| 36 | typedef char *ia64_mv_pci_get_legacy_mem_t (struct pci_bus *); |
| 37 | typedef int ia64_mv_pci_legacy_read_t (struct pci_bus *, u16 port, u32 *val, |
| 38 | u8 size); |
| 39 | typedef int ia64_mv_pci_legacy_write_t (struct pci_bus *, u16 port, u32 val, |
| 40 | u8 size); |
| 41 | typedef void ia64_mv_migrate_t(struct task_struct * task); |
| 42 | typedef void ia64_mv_pci_fixup_bus_t (struct pci_bus *); |
| 43 | typedef void ia64_mv_kernel_launch_event_t(void); |
| 44 | |
| 45 | /* DMA-mapping interface: */ |
| 46 | typedef void ia64_mv_dma_init (void); |
| 47 | typedef u64 ia64_mv_dma_get_required_mask (struct device *); |
| 48 | typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *); |
| 49 | |
| 50 | /* |
| 51 | * WARNING: The legacy I/O space is _architected_. Platforms are |
| 52 | * expected to follow this architected model (see Section 10.7 in the |
| 53 | * IA-64 Architecture Software Developer's Manual). Unfortunately, |
| 54 | * some broken machines do not follow that model, which is why we have |
| 55 | * to make the inX/outX operations part of the machine vector. |
| 56 | * Platform designers should follow the architected model whenever |
| 57 | * possible. |
| 58 | */ |
| 59 | typedef unsigned int ia64_mv_inb_t (unsigned long); |
| 60 | typedef unsigned int ia64_mv_inw_t (unsigned long); |
| 61 | typedef unsigned int ia64_mv_inl_t (unsigned long); |
| 62 | typedef void ia64_mv_outb_t (unsigned char, unsigned long); |
| 63 | typedef void ia64_mv_outw_t (unsigned short, unsigned long); |
| 64 | typedef void ia64_mv_outl_t (unsigned int, unsigned long); |
| 65 | typedef void ia64_mv_mmiowb_t (void); |
| 66 | typedef unsigned char ia64_mv_readb_t (const volatile void __iomem *); |
| 67 | typedef unsigned short ia64_mv_readw_t (const volatile void __iomem *); |
| 68 | typedef unsigned int ia64_mv_readl_t (const volatile void __iomem *); |
| 69 | typedef unsigned long ia64_mv_readq_t (const volatile void __iomem *); |
| 70 | typedef unsigned char ia64_mv_readb_relaxed_t (const volatile void __iomem *); |
| 71 | typedef unsigned short ia64_mv_readw_relaxed_t (const volatile void __iomem *); |
| 72 | typedef unsigned int ia64_mv_readl_relaxed_t (const volatile void __iomem *); |
| 73 | typedef unsigned long ia64_mv_readq_relaxed_t (const volatile void __iomem *); |
| 74 | |
| 75 | typedef int ia64_mv_setup_msi_irq_t (struct pci_dev *pdev, struct msi_desc *); |
| 76 | typedef void ia64_mv_teardown_msi_irq_t (unsigned int irq); |
| 77 | |
| 78 | static inline void |
| 79 | machvec_noop (void) |
| 80 | { |
| 81 | } |
| 82 | |
| 83 | static inline void |
| 84 | machvec_noop_mm (struct mm_struct *mm) |
| 85 | { |
| 86 | } |
| 87 | |
| 88 | static inline void |
| 89 | machvec_noop_task (struct task_struct *task) |
| 90 | { |
| 91 | } |
| 92 | |
| 93 | static inline void |
| 94 | machvec_noop_bus (struct pci_bus *bus) |
| 95 | { |
| 96 | } |
| 97 | |
| 98 | extern void machvec_setup (char **); |
| 99 | extern void machvec_timer_interrupt (int, void *); |
| 100 | extern void machvec_tlb_migrate_finish (struct mm_struct *); |
| 101 | |
| 102 | # if defined (CONFIG_IA64_HP_SIM) |
| 103 | # include <asm/machvec_hpsim.h> |
| 104 | # elif defined (CONFIG_IA64_DIG) |
| 105 | # include <asm/machvec_dig.h> |
| 106 | # elif defined(CONFIG_IA64_DIG_VTD) |
| 107 | # include <asm/machvec_dig_vtd.h> |
| 108 | # elif defined (CONFIG_IA64_HP_ZX1) |
| 109 | # include <asm/machvec_hpzx1.h> |
| 110 | # elif defined (CONFIG_IA64_HP_ZX1_SWIOTLB) |
| 111 | # include <asm/machvec_hpzx1_swiotlb.h> |
| 112 | # elif defined (CONFIG_IA64_SGI_SN2) |
| 113 | # include <asm/machvec_sn2.h> |
| 114 | # elif defined (CONFIG_IA64_SGI_UV) |
| 115 | # include <asm/machvec_uv.h> |
| 116 | # elif defined (CONFIG_IA64_GENERIC) |
| 117 | |
| 118 | # ifdef MACHVEC_PLATFORM_HEADER |
| 119 | # include MACHVEC_PLATFORM_HEADER |
| 120 | # else |
| 121 | # define ia64_platform_name ia64_mv.name |
| 122 | # define platform_setup ia64_mv.setup |
| 123 | # define platform_cpu_init ia64_mv.cpu_init |
| 124 | # define platform_irq_init ia64_mv.irq_init |
| 125 | # define platform_send_ipi ia64_mv.send_ipi |
| 126 | # define platform_timer_interrupt ia64_mv.timer_interrupt |
| 127 | # define platform_global_tlb_purge ia64_mv.global_tlb_purge |
| 128 | # define platform_tlb_migrate_finish ia64_mv.tlb_migrate_finish |
| 129 | # define platform_dma_init ia64_mv.dma_init |
| 130 | # define platform_dma_get_required_mask ia64_mv.dma_get_required_mask |
| 131 | # define platform_dma_get_ops ia64_mv.dma_get_ops |
| 132 | # define platform_irq_to_vector ia64_mv.irq_to_vector |
| 133 | # define platform_local_vector_to_irq ia64_mv.local_vector_to_irq |
| 134 | # define platform_pci_get_legacy_mem ia64_mv.pci_get_legacy_mem |
| 135 | # define platform_pci_legacy_read ia64_mv.pci_legacy_read |
| 136 | # define platform_pci_legacy_write ia64_mv.pci_legacy_write |
| 137 | # define platform_inb ia64_mv.inb |
| 138 | # define platform_inw ia64_mv.inw |
| 139 | # define platform_inl ia64_mv.inl |
| 140 | # define platform_outb ia64_mv.outb |
| 141 | # define platform_outw ia64_mv.outw |
| 142 | # define platform_outl ia64_mv.outl |
| 143 | # define platform_mmiowb ia64_mv.mmiowb |
| 144 | # define platform_readb ia64_mv.readb |
| 145 | # define platform_readw ia64_mv.readw |
| 146 | # define platform_readl ia64_mv.readl |
| 147 | # define platform_readq ia64_mv.readq |
| 148 | # define platform_readb_relaxed ia64_mv.readb_relaxed |
| 149 | # define platform_readw_relaxed ia64_mv.readw_relaxed |
| 150 | # define platform_readl_relaxed ia64_mv.readl_relaxed |
| 151 | # define platform_readq_relaxed ia64_mv.readq_relaxed |
| 152 | # define platform_migrate ia64_mv.migrate |
| 153 | # define platform_setup_msi_irq ia64_mv.setup_msi_irq |
| 154 | # define platform_teardown_msi_irq ia64_mv.teardown_msi_irq |
| 155 | # define platform_pci_fixup_bus ia64_mv.pci_fixup_bus |
| 156 | # define platform_kernel_launch_event ia64_mv.kernel_launch_event |
| 157 | # endif |
| 158 | |
| 159 | /* __attribute__((__aligned__(16))) is required to make size of the |
| 160 | * structure multiple of 16 bytes. |
| 161 | * This will fillup the holes created because of section 3.3.1 in |
| 162 | * Software Conventions guide. |
| 163 | */ |
| 164 | struct ia64_machine_vector { |
| 165 | const char *name; |
| 166 | ia64_mv_setup_t *setup; |
| 167 | ia64_mv_cpu_init_t *cpu_init; |
| 168 | ia64_mv_irq_init_t *irq_init; |
| 169 | ia64_mv_send_ipi_t *send_ipi; |
| 170 | ia64_mv_timer_interrupt_t *timer_interrupt; |
| 171 | ia64_mv_global_tlb_purge_t *global_tlb_purge; |
| 172 | ia64_mv_tlb_migrate_finish_t *tlb_migrate_finish; |
| 173 | ia64_mv_dma_init *dma_init; |
| 174 | ia64_mv_dma_get_required_mask *dma_get_required_mask; |
| 175 | ia64_mv_dma_get_ops *dma_get_ops; |
| 176 | ia64_mv_irq_to_vector *irq_to_vector; |
| 177 | ia64_mv_local_vector_to_irq *local_vector_to_irq; |
| 178 | ia64_mv_pci_get_legacy_mem_t *pci_get_legacy_mem; |
| 179 | ia64_mv_pci_legacy_read_t *pci_legacy_read; |
| 180 | ia64_mv_pci_legacy_write_t *pci_legacy_write; |
| 181 | ia64_mv_inb_t *inb; |
| 182 | ia64_mv_inw_t *inw; |
| 183 | ia64_mv_inl_t *inl; |
| 184 | ia64_mv_outb_t *outb; |
| 185 | ia64_mv_outw_t *outw; |
| 186 | ia64_mv_outl_t *outl; |
| 187 | ia64_mv_mmiowb_t *mmiowb; |
| 188 | ia64_mv_readb_t *readb; |
| 189 | ia64_mv_readw_t *readw; |
| 190 | ia64_mv_readl_t *readl; |
| 191 | ia64_mv_readq_t *readq; |
| 192 | ia64_mv_readb_relaxed_t *readb_relaxed; |
| 193 | ia64_mv_readw_relaxed_t *readw_relaxed; |
| 194 | ia64_mv_readl_relaxed_t *readl_relaxed; |
| 195 | ia64_mv_readq_relaxed_t *readq_relaxed; |
| 196 | ia64_mv_migrate_t *migrate; |
| 197 | ia64_mv_setup_msi_irq_t *setup_msi_irq; |
| 198 | ia64_mv_teardown_msi_irq_t *teardown_msi_irq; |
| 199 | ia64_mv_pci_fixup_bus_t *pci_fixup_bus; |
| 200 | ia64_mv_kernel_launch_event_t *kernel_launch_event; |
| 201 | } __attribute__((__aligned__(16))); /* align attrib? see above comment */ |
| 202 | |
| 203 | #define MACHVEC_INIT(name) \ |
| 204 | { \ |
| 205 | #name, \ |
| 206 | platform_setup, \ |
| 207 | platform_cpu_init, \ |
| 208 | platform_irq_init, \ |
| 209 | platform_send_ipi, \ |
| 210 | platform_timer_interrupt, \ |
| 211 | platform_global_tlb_purge, \ |
| 212 | platform_tlb_migrate_finish, \ |
| 213 | platform_dma_init, \ |
| 214 | platform_dma_get_required_mask, \ |
| 215 | platform_dma_get_ops, \ |
| 216 | platform_irq_to_vector, \ |
| 217 | platform_local_vector_to_irq, \ |
| 218 | platform_pci_get_legacy_mem, \ |
| 219 | platform_pci_legacy_read, \ |
| 220 | platform_pci_legacy_write, \ |
| 221 | platform_inb, \ |
| 222 | platform_inw, \ |
| 223 | platform_inl, \ |
| 224 | platform_outb, \ |
| 225 | platform_outw, \ |
| 226 | platform_outl, \ |
| 227 | platform_mmiowb, \ |
| 228 | platform_readb, \ |
| 229 | platform_readw, \ |
| 230 | platform_readl, \ |
| 231 | platform_readq, \ |
| 232 | platform_readb_relaxed, \ |
| 233 | platform_readw_relaxed, \ |
| 234 | platform_readl_relaxed, \ |
| 235 | platform_readq_relaxed, \ |
| 236 | platform_migrate, \ |
| 237 | platform_setup_msi_irq, \ |
| 238 | platform_teardown_msi_irq, \ |
| 239 | platform_pci_fixup_bus, \ |
| 240 | platform_kernel_launch_event \ |
| 241 | } |
| 242 | |
| 243 | extern struct ia64_machine_vector ia64_mv; |
| 244 | extern void machvec_init (const char *name); |
| 245 | extern void machvec_init_from_cmdline(const char *cmdline); |
| 246 | |
| 247 | # else |
| 248 | # error Unknown configuration. Update arch/ia64/include/asm/machvec.h. |
| 249 | # endif /* CONFIG_IA64_GENERIC */ |
| 250 | |
| 251 | extern void swiotlb_dma_init(void); |
| 252 | extern struct dma_map_ops *dma_get_ops(struct device *); |
| 253 | |
| 254 | /* |
| 255 | * Define default versions so we can extend machvec for new platforms without having |
| 256 | * to update the machvec files for all existing platforms. |
| 257 | */ |
| 258 | #ifndef platform_setup |
| 259 | # define platform_setup machvec_setup |
| 260 | #endif |
| 261 | #ifndef platform_cpu_init |
| 262 | # define platform_cpu_init machvec_noop |
| 263 | #endif |
| 264 | #ifndef platform_irq_init |
| 265 | # define platform_irq_init machvec_noop |
| 266 | #endif |
| 267 | |
| 268 | #ifndef platform_send_ipi |
| 269 | # define platform_send_ipi ia64_send_ipi /* default to architected version */ |
| 270 | #endif |
| 271 | #ifndef platform_timer_interrupt |
| 272 | # define platform_timer_interrupt machvec_timer_interrupt |
| 273 | #endif |
| 274 | #ifndef platform_global_tlb_purge |
| 275 | # define platform_global_tlb_purge ia64_global_tlb_purge /* default to architected version */ |
| 276 | #endif |
| 277 | #ifndef platform_tlb_migrate_finish |
| 278 | # define platform_tlb_migrate_finish machvec_noop_mm |
| 279 | #endif |
| 280 | #ifndef platform_kernel_launch_event |
| 281 | # define platform_kernel_launch_event machvec_noop |
| 282 | #endif |
| 283 | #ifndef platform_dma_init |
| 284 | # define platform_dma_init swiotlb_dma_init |
| 285 | #endif |
| 286 | #ifndef platform_dma_get_ops |
| 287 | # define platform_dma_get_ops dma_get_ops |
| 288 | #endif |
| 289 | #ifndef platform_dma_get_required_mask |
| 290 | # define platform_dma_get_required_mask ia64_dma_get_required_mask |
| 291 | #endif |
| 292 | #ifndef platform_irq_to_vector |
| 293 | # define platform_irq_to_vector __ia64_irq_to_vector |
| 294 | #endif |
| 295 | #ifndef platform_local_vector_to_irq |
| 296 | # define platform_local_vector_to_irq __ia64_local_vector_to_irq |
| 297 | #endif |
| 298 | #ifndef platform_pci_get_legacy_mem |
| 299 | # define platform_pci_get_legacy_mem ia64_pci_get_legacy_mem |
| 300 | #endif |
| 301 | #ifndef platform_pci_legacy_read |
| 302 | # define platform_pci_legacy_read ia64_pci_legacy_read |
| 303 | extern int ia64_pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size); |
| 304 | #endif |
| 305 | #ifndef platform_pci_legacy_write |
| 306 | # define platform_pci_legacy_write ia64_pci_legacy_write |
| 307 | extern int ia64_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size); |
| 308 | #endif |
| 309 | #ifndef platform_inb |
| 310 | # define platform_inb __ia64_inb |
| 311 | #endif |
| 312 | #ifndef platform_inw |
| 313 | # define platform_inw __ia64_inw |
| 314 | #endif |
| 315 | #ifndef platform_inl |
| 316 | # define platform_inl __ia64_inl |
| 317 | #endif |
| 318 | #ifndef platform_outb |
| 319 | # define platform_outb __ia64_outb |
| 320 | #endif |
| 321 | #ifndef platform_outw |
| 322 | # define platform_outw __ia64_outw |
| 323 | #endif |
| 324 | #ifndef platform_outl |
| 325 | # define platform_outl __ia64_outl |
| 326 | #endif |
| 327 | #ifndef platform_mmiowb |
| 328 | # define platform_mmiowb __ia64_mmiowb |
| 329 | #endif |
| 330 | #ifndef platform_readb |
| 331 | # define platform_readb __ia64_readb |
| 332 | #endif |
| 333 | #ifndef platform_readw |
| 334 | # define platform_readw __ia64_readw |
| 335 | #endif |
| 336 | #ifndef platform_readl |
| 337 | # define platform_readl __ia64_readl |
| 338 | #endif |
| 339 | #ifndef platform_readq |
| 340 | # define platform_readq __ia64_readq |
| 341 | #endif |
| 342 | #ifndef platform_readb_relaxed |
| 343 | # define platform_readb_relaxed __ia64_readb_relaxed |
| 344 | #endif |
| 345 | #ifndef platform_readw_relaxed |
| 346 | # define platform_readw_relaxed __ia64_readw_relaxed |
| 347 | #endif |
| 348 | #ifndef platform_readl_relaxed |
| 349 | # define platform_readl_relaxed __ia64_readl_relaxed |
| 350 | #endif |
| 351 | #ifndef platform_readq_relaxed |
| 352 | # define platform_readq_relaxed __ia64_readq_relaxed |
| 353 | #endif |
| 354 | #ifndef platform_migrate |
| 355 | # define platform_migrate machvec_noop_task |
| 356 | #endif |
| 357 | #ifndef platform_setup_msi_irq |
| 358 | # define platform_setup_msi_irq ((ia64_mv_setup_msi_irq_t*)NULL) |
| 359 | #endif |
| 360 | #ifndef platform_teardown_msi_irq |
| 361 | # define platform_teardown_msi_irq ((ia64_mv_teardown_msi_irq_t*)NULL) |
| 362 | #endif |
| 363 | #ifndef platform_pci_fixup_bus |
| 364 | # define platform_pci_fixup_bus machvec_noop_bus |
| 365 | #endif |
| 366 | |
| 367 | #endif /* _ASM_IA64_MACHVEC_H */ |