Kyle Swenson | 8d8f654 | 2021-03-15 11:02:55 -0600 | [diff] [blame^] | 1 | /* |
| 2 | * ARM GIC v2m MSI(-X) support |
| 3 | * Support for Message Signaled Interrupts for systems that |
| 4 | * implement ARM Generic Interrupt Controller: GICv2m. |
| 5 | * |
| 6 | * Copyright (C) 2014 Advanced Micro Devices, Inc. |
| 7 | * Authors: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> |
| 8 | * Harish Kasiviswanathan <harish.kasiviswanathan@amd.com> |
| 9 | * Brandon Anderson <brandon.anderson@amd.com> |
| 10 | * |
| 11 | * This program is free software; you can redistribute it and/or modify it |
| 12 | * under the terms of the GNU General Public License version 2 as published |
| 13 | * by the Free Software Foundation. |
| 14 | */ |
| 15 | |
| 16 | #define pr_fmt(fmt) "GICv2m: " fmt |
| 17 | |
| 18 | #include <linux/irq.h> |
| 19 | #include <linux/irqdomain.h> |
| 20 | #include <linux/kernel.h> |
| 21 | #include <linux/of_address.h> |
| 22 | #include <linux/of_pci.h> |
| 23 | #include <linux/slab.h> |
| 24 | #include <linux/spinlock.h> |
| 25 | |
| 26 | /* |
| 27 | * MSI_TYPER: |
| 28 | * [31:26] Reserved |
| 29 | * [25:16] lowest SPI assigned to MSI |
| 30 | * [15:10] Reserved |
| 31 | * [9:0] Numer of SPIs assigned to MSI |
| 32 | */ |
| 33 | #define V2M_MSI_TYPER 0x008 |
| 34 | #define V2M_MSI_TYPER_BASE_SHIFT 16 |
| 35 | #define V2M_MSI_TYPER_BASE_MASK 0x3FF |
| 36 | #define V2M_MSI_TYPER_NUM_MASK 0x3FF |
| 37 | #define V2M_MSI_SETSPI_NS 0x040 |
| 38 | #define V2M_MIN_SPI 32 |
| 39 | #define V2M_MAX_SPI 1019 |
| 40 | #define V2M_MSI_IIDR 0xFCC |
| 41 | |
| 42 | #define V2M_MSI_TYPER_BASE_SPI(x) \ |
| 43 | (((x) >> V2M_MSI_TYPER_BASE_SHIFT) & V2M_MSI_TYPER_BASE_MASK) |
| 44 | |
| 45 | #define V2M_MSI_TYPER_NUM_SPI(x) ((x) & V2M_MSI_TYPER_NUM_MASK) |
| 46 | |
| 47 | /* APM X-Gene with GICv2m MSI_IIDR register value */ |
| 48 | #define XGENE_GICV2M_MSI_IIDR 0x06000170 |
| 49 | |
| 50 | /* List of flags for specific v2m implementation */ |
| 51 | #define GICV2M_NEEDS_SPI_OFFSET 0x00000001 |
| 52 | |
| 53 | static LIST_HEAD(v2m_nodes); |
| 54 | static DEFINE_SPINLOCK(v2m_lock); |
| 55 | |
| 56 | struct v2m_data { |
| 57 | struct list_head entry; |
| 58 | struct device_node *node; |
| 59 | struct resource res; /* GICv2m resource */ |
| 60 | void __iomem *base; /* GICv2m virt address */ |
| 61 | u32 spi_start; /* The SPI number that MSIs start */ |
| 62 | u32 nr_spis; /* The number of SPIs for MSIs */ |
| 63 | unsigned long *bm; /* MSI vector bitmap */ |
| 64 | u32 flags; /* v2m flags for specific implementation */ |
| 65 | }; |
| 66 | |
| 67 | static void gicv2m_mask_msi_irq(struct irq_data *d) |
| 68 | { |
| 69 | pci_msi_mask_irq(d); |
| 70 | irq_chip_mask_parent(d); |
| 71 | } |
| 72 | |
| 73 | static void gicv2m_unmask_msi_irq(struct irq_data *d) |
| 74 | { |
| 75 | pci_msi_unmask_irq(d); |
| 76 | irq_chip_unmask_parent(d); |
| 77 | } |
| 78 | |
| 79 | static struct irq_chip gicv2m_msi_irq_chip = { |
| 80 | .name = "MSI", |
| 81 | .irq_mask = gicv2m_mask_msi_irq, |
| 82 | .irq_unmask = gicv2m_unmask_msi_irq, |
| 83 | .irq_eoi = irq_chip_eoi_parent, |
| 84 | .irq_write_msi_msg = pci_msi_domain_write_msg, |
| 85 | }; |
| 86 | |
| 87 | static struct msi_domain_info gicv2m_msi_domain_info = { |
| 88 | .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | |
| 89 | MSI_FLAG_PCI_MSIX), |
| 90 | .chip = &gicv2m_msi_irq_chip, |
| 91 | }; |
| 92 | |
| 93 | static int gicv2m_set_affinity(struct irq_data *irq_data, |
| 94 | const struct cpumask *mask, bool force) |
| 95 | { |
| 96 | int ret; |
| 97 | |
| 98 | ret = irq_chip_set_affinity_parent(irq_data, mask, force); |
| 99 | if (ret == IRQ_SET_MASK_OK) |
| 100 | ret = IRQ_SET_MASK_OK_DONE; |
| 101 | |
| 102 | return ret; |
| 103 | } |
| 104 | |
| 105 | static void gicv2m_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) |
| 106 | { |
| 107 | struct v2m_data *v2m = irq_data_get_irq_chip_data(data); |
| 108 | phys_addr_t addr = v2m->res.start + V2M_MSI_SETSPI_NS; |
| 109 | |
| 110 | msg->address_hi = upper_32_bits(addr); |
| 111 | msg->address_lo = lower_32_bits(addr); |
| 112 | msg->data = data->hwirq; |
| 113 | |
| 114 | if (v2m->flags & GICV2M_NEEDS_SPI_OFFSET) |
| 115 | msg->data -= v2m->spi_start; |
| 116 | } |
| 117 | |
| 118 | static struct irq_chip gicv2m_irq_chip = { |
| 119 | .name = "GICv2m", |
| 120 | .irq_mask = irq_chip_mask_parent, |
| 121 | .irq_unmask = irq_chip_unmask_parent, |
| 122 | .irq_eoi = irq_chip_eoi_parent, |
| 123 | .irq_set_affinity = gicv2m_set_affinity, |
| 124 | .irq_compose_msi_msg = gicv2m_compose_msi_msg, |
| 125 | }; |
| 126 | |
| 127 | static int gicv2m_irq_gic_domain_alloc(struct irq_domain *domain, |
| 128 | unsigned int virq, |
| 129 | irq_hw_number_t hwirq) |
| 130 | { |
| 131 | struct irq_fwspec fwspec; |
| 132 | struct irq_data *d; |
| 133 | int err; |
| 134 | |
| 135 | if (is_of_node(domain->parent->fwnode)) { |
| 136 | fwspec.fwnode = domain->parent->fwnode; |
| 137 | fwspec.param_count = 3; |
| 138 | fwspec.param[0] = 0; |
| 139 | fwspec.param[1] = hwirq - 32; |
| 140 | fwspec.param[2] = IRQ_TYPE_EDGE_RISING; |
| 141 | } else { |
| 142 | return -EINVAL; |
| 143 | } |
| 144 | |
| 145 | err = irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec); |
| 146 | if (err) |
| 147 | return err; |
| 148 | |
| 149 | /* Configure the interrupt line to be edge */ |
| 150 | d = irq_domain_get_irq_data(domain->parent, virq); |
| 151 | d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING); |
| 152 | return 0; |
| 153 | } |
| 154 | |
| 155 | static void gicv2m_unalloc_msi(struct v2m_data *v2m, unsigned int hwirq) |
| 156 | { |
| 157 | int pos; |
| 158 | |
| 159 | pos = hwirq - v2m->spi_start; |
| 160 | if (pos < 0 || pos >= v2m->nr_spis) { |
| 161 | pr_err("Failed to teardown msi. Invalid hwirq %d\n", hwirq); |
| 162 | return; |
| 163 | } |
| 164 | |
| 165 | spin_lock(&v2m_lock); |
| 166 | __clear_bit(pos, v2m->bm); |
| 167 | spin_unlock(&v2m_lock); |
| 168 | } |
| 169 | |
| 170 | static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, |
| 171 | unsigned int nr_irqs, void *args) |
| 172 | { |
| 173 | struct v2m_data *v2m = NULL, *tmp; |
| 174 | int hwirq, offset, err = 0; |
| 175 | |
| 176 | spin_lock(&v2m_lock); |
| 177 | list_for_each_entry(tmp, &v2m_nodes, entry) { |
| 178 | offset = find_first_zero_bit(tmp->bm, tmp->nr_spis); |
| 179 | if (offset < tmp->nr_spis) { |
| 180 | __set_bit(offset, tmp->bm); |
| 181 | v2m = tmp; |
| 182 | break; |
| 183 | } |
| 184 | } |
| 185 | spin_unlock(&v2m_lock); |
| 186 | |
| 187 | if (!v2m) |
| 188 | return -ENOSPC; |
| 189 | |
| 190 | hwirq = v2m->spi_start + offset; |
| 191 | |
| 192 | err = gicv2m_irq_gic_domain_alloc(domain, virq, hwirq); |
| 193 | if (err) { |
| 194 | gicv2m_unalloc_msi(v2m, hwirq); |
| 195 | return err; |
| 196 | } |
| 197 | |
| 198 | irq_domain_set_hwirq_and_chip(domain, virq, hwirq, |
| 199 | &gicv2m_irq_chip, v2m); |
| 200 | |
| 201 | return 0; |
| 202 | } |
| 203 | |
| 204 | static void gicv2m_irq_domain_free(struct irq_domain *domain, |
| 205 | unsigned int virq, unsigned int nr_irqs) |
| 206 | { |
| 207 | struct irq_data *d = irq_domain_get_irq_data(domain, virq); |
| 208 | struct v2m_data *v2m = irq_data_get_irq_chip_data(d); |
| 209 | |
| 210 | BUG_ON(nr_irqs != 1); |
| 211 | gicv2m_unalloc_msi(v2m, d->hwirq); |
| 212 | irq_domain_free_irqs_parent(domain, virq, nr_irqs); |
| 213 | } |
| 214 | |
| 215 | static const struct irq_domain_ops gicv2m_domain_ops = { |
| 216 | .alloc = gicv2m_irq_domain_alloc, |
| 217 | .free = gicv2m_irq_domain_free, |
| 218 | }; |
| 219 | |
| 220 | static bool is_msi_spi_valid(u32 base, u32 num) |
| 221 | { |
| 222 | if (base < V2M_MIN_SPI) { |
| 223 | pr_err("Invalid MSI base SPI (base:%u)\n", base); |
| 224 | return false; |
| 225 | } |
| 226 | |
| 227 | if ((num == 0) || (base + num > V2M_MAX_SPI)) { |
| 228 | pr_err("Number of SPIs (%u) exceed maximum (%u)\n", |
| 229 | num, V2M_MAX_SPI - V2M_MIN_SPI + 1); |
| 230 | return false; |
| 231 | } |
| 232 | |
| 233 | return true; |
| 234 | } |
| 235 | |
| 236 | static struct irq_chip gicv2m_pmsi_irq_chip = { |
| 237 | .name = "pMSI", |
| 238 | }; |
| 239 | |
| 240 | static struct msi_domain_ops gicv2m_pmsi_ops = { |
| 241 | }; |
| 242 | |
| 243 | static struct msi_domain_info gicv2m_pmsi_domain_info = { |
| 244 | .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS), |
| 245 | .ops = &gicv2m_pmsi_ops, |
| 246 | .chip = &gicv2m_pmsi_irq_chip, |
| 247 | }; |
| 248 | |
| 249 | static void gicv2m_teardown(void) |
| 250 | { |
| 251 | struct v2m_data *v2m, *tmp; |
| 252 | |
| 253 | list_for_each_entry_safe(v2m, tmp, &v2m_nodes, entry) { |
| 254 | list_del(&v2m->entry); |
| 255 | kfree(v2m->bm); |
| 256 | iounmap(v2m->base); |
| 257 | of_node_put(v2m->node); |
| 258 | kfree(v2m); |
| 259 | } |
| 260 | } |
| 261 | |
| 262 | static int gicv2m_allocate_domains(struct irq_domain *parent) |
| 263 | { |
| 264 | struct irq_domain *inner_domain, *pci_domain, *plat_domain; |
| 265 | struct v2m_data *v2m; |
| 266 | |
| 267 | v2m = list_first_entry_or_null(&v2m_nodes, struct v2m_data, entry); |
| 268 | if (!v2m) |
| 269 | return 0; |
| 270 | |
| 271 | inner_domain = irq_domain_create_tree(of_node_to_fwnode(v2m->node), |
| 272 | &gicv2m_domain_ops, v2m); |
| 273 | if (!inner_domain) { |
| 274 | pr_err("Failed to create GICv2m domain\n"); |
| 275 | return -ENOMEM; |
| 276 | } |
| 277 | |
| 278 | inner_domain->bus_token = DOMAIN_BUS_NEXUS; |
| 279 | inner_domain->parent = parent; |
| 280 | pci_domain = pci_msi_create_irq_domain(of_node_to_fwnode(v2m->node), |
| 281 | &gicv2m_msi_domain_info, |
| 282 | inner_domain); |
| 283 | plat_domain = platform_msi_create_irq_domain(of_node_to_fwnode(v2m->node), |
| 284 | &gicv2m_pmsi_domain_info, |
| 285 | inner_domain); |
| 286 | if (!pci_domain || !plat_domain) { |
| 287 | pr_err("Failed to create MSI domains\n"); |
| 288 | if (plat_domain) |
| 289 | irq_domain_remove(plat_domain); |
| 290 | if (pci_domain) |
| 291 | irq_domain_remove(pci_domain); |
| 292 | irq_domain_remove(inner_domain); |
| 293 | return -ENOMEM; |
| 294 | } |
| 295 | |
| 296 | return 0; |
| 297 | } |
| 298 | |
| 299 | static int __init gicv2m_init_one(struct device_node *node, |
| 300 | struct irq_domain *parent) |
| 301 | { |
| 302 | int ret; |
| 303 | struct v2m_data *v2m; |
| 304 | |
| 305 | v2m = kzalloc(sizeof(struct v2m_data), GFP_KERNEL); |
| 306 | if (!v2m) { |
| 307 | pr_err("Failed to allocate struct v2m_data.\n"); |
| 308 | return -ENOMEM; |
| 309 | } |
| 310 | |
| 311 | INIT_LIST_HEAD(&v2m->entry); |
| 312 | v2m->node = node; |
| 313 | |
| 314 | ret = of_address_to_resource(node, 0, &v2m->res); |
| 315 | if (ret) { |
| 316 | pr_err("Failed to allocate v2m resource.\n"); |
| 317 | goto err_free_v2m; |
| 318 | } |
| 319 | |
| 320 | v2m->base = ioremap(v2m->res.start, resource_size(&v2m->res)); |
| 321 | if (!v2m->base) { |
| 322 | pr_err("Failed to map GICv2m resource\n"); |
| 323 | ret = -ENOMEM; |
| 324 | goto err_free_v2m; |
| 325 | } |
| 326 | |
| 327 | if (!of_property_read_u32(node, "arm,msi-base-spi", &v2m->spi_start) && |
| 328 | !of_property_read_u32(node, "arm,msi-num-spis", &v2m->nr_spis)) { |
| 329 | pr_info("Overriding V2M MSI_TYPER (base:%u, num:%u)\n", |
| 330 | v2m->spi_start, v2m->nr_spis); |
| 331 | } else { |
| 332 | u32 typer = readl_relaxed(v2m->base + V2M_MSI_TYPER); |
| 333 | |
| 334 | v2m->spi_start = V2M_MSI_TYPER_BASE_SPI(typer); |
| 335 | v2m->nr_spis = V2M_MSI_TYPER_NUM_SPI(typer); |
| 336 | } |
| 337 | |
| 338 | if (!is_msi_spi_valid(v2m->spi_start, v2m->nr_spis)) { |
| 339 | ret = -EINVAL; |
| 340 | goto err_iounmap; |
| 341 | } |
| 342 | |
| 343 | /* |
| 344 | * APM X-Gene GICv2m implementation has an erratum where |
| 345 | * the MSI data needs to be the offset from the spi_start |
| 346 | * in order to trigger the correct MSI interrupt. This is |
| 347 | * different from the standard GICv2m implementation where |
| 348 | * the MSI data is the absolute value within the range from |
| 349 | * spi_start to (spi_start + num_spis). |
| 350 | */ |
| 351 | if (readl_relaxed(v2m->base + V2M_MSI_IIDR) == XGENE_GICV2M_MSI_IIDR) |
| 352 | v2m->flags |= GICV2M_NEEDS_SPI_OFFSET; |
| 353 | |
| 354 | v2m->bm = kzalloc(sizeof(long) * BITS_TO_LONGS(v2m->nr_spis), |
| 355 | GFP_KERNEL); |
| 356 | if (!v2m->bm) { |
| 357 | ret = -ENOMEM; |
| 358 | goto err_iounmap; |
| 359 | } |
| 360 | |
| 361 | list_add_tail(&v2m->entry, &v2m_nodes); |
| 362 | pr_info("Node %s: range[%#lx:%#lx], SPI[%d:%d]\n", node->name, |
| 363 | (unsigned long)v2m->res.start, (unsigned long)v2m->res.end, |
| 364 | v2m->spi_start, (v2m->spi_start + v2m->nr_spis)); |
| 365 | |
| 366 | return 0; |
| 367 | |
| 368 | err_iounmap: |
| 369 | iounmap(v2m->base); |
| 370 | err_free_v2m: |
| 371 | kfree(v2m); |
| 372 | return ret; |
| 373 | } |
| 374 | |
| 375 | static struct of_device_id gicv2m_device_id[] = { |
| 376 | { .compatible = "arm,gic-v2m-frame", }, |
| 377 | {}, |
| 378 | }; |
| 379 | |
| 380 | int __init gicv2m_of_init(struct device_node *node, struct irq_domain *parent) |
| 381 | { |
| 382 | int ret = 0; |
| 383 | struct device_node *child; |
| 384 | |
| 385 | for (child = of_find_matching_node(node, gicv2m_device_id); child; |
| 386 | child = of_find_matching_node(child, gicv2m_device_id)) { |
| 387 | if (!of_find_property(child, "msi-controller", NULL)) |
| 388 | continue; |
| 389 | |
| 390 | ret = gicv2m_init_one(child, parent); |
| 391 | if (ret) { |
| 392 | of_node_put(node); |
| 393 | break; |
| 394 | } |
| 395 | } |
| 396 | |
| 397 | if (!ret) |
| 398 | ret = gicv2m_allocate_domains(parent); |
| 399 | if (ret) |
| 400 | gicv2m_teardown(); |
| 401 | return ret; |
| 402 | } |