Kyle Swenson | 8d8f654 | 2021-03-15 11:02:55 -0600 | [diff] [blame^] | 1 | /* Wrapper for DMA channel allocator that starts clocks etc */ |
| 2 | |
| 3 | #include <linux/kernel.h> |
| 4 | #include <linux/spinlock.h> |
| 5 | #include <mach/dma.h> |
| 6 | #include <hwregs/reg_map.h> |
| 7 | #include <hwregs/reg_rdwr.h> |
| 8 | #include <hwregs/marb_defs.h> |
| 9 | #include <hwregs/clkgen_defs.h> |
| 10 | #include <hwregs/strmux_defs.h> |
| 11 | #include <linux/errno.h> |
| 12 | #include <arbiter.h> |
| 13 | |
| 14 | static char used_dma_channels[MAX_DMA_CHANNELS]; |
| 15 | static const char *used_dma_channels_users[MAX_DMA_CHANNELS]; |
| 16 | |
| 17 | static DEFINE_SPINLOCK(dma_lock); |
| 18 | |
| 19 | int crisv32_request_dma(unsigned int dmanr, const char *device_id, |
| 20 | unsigned options, unsigned int bandwidth, enum dma_owner owner) |
| 21 | { |
| 22 | unsigned long flags; |
| 23 | reg_clkgen_rw_clk_ctrl clk_ctrl; |
| 24 | reg_strmux_rw_cfg strmux_cfg; |
| 25 | |
| 26 | if (crisv32_arbiter_allocate_bandwidth(dmanr, |
| 27 | options & DMA_INT_MEM ? INT_REGION : EXT_REGION, |
| 28 | bandwidth)) |
| 29 | return -ENOMEM; |
| 30 | |
| 31 | spin_lock_irqsave(&dma_lock, flags); |
| 32 | |
| 33 | if (used_dma_channels[dmanr]) { |
| 34 | spin_unlock_irqrestore(&dma_lock, flags); |
| 35 | if (options & DMA_VERBOSE_ON_ERROR) |
| 36 | printk(KERN_ERR "Failed to request DMA %i for %s, " |
| 37 | "already allocated by %s\n", |
| 38 | dmanr, |
| 39 | device_id, |
| 40 | used_dma_channels_users[dmanr]); |
| 41 | |
| 42 | if (options & DMA_PANIC_ON_ERROR) |
| 43 | panic("request_dma error!"); |
| 44 | spin_unlock_irqrestore(&dma_lock, flags); |
| 45 | return -EBUSY; |
| 46 | } |
| 47 | clk_ctrl = REG_RD(clkgen, regi_clkgen, rw_clk_ctrl); |
| 48 | strmux_cfg = REG_RD(strmux, regi_strmux, rw_cfg); |
| 49 | |
| 50 | switch (dmanr) { |
| 51 | case 0: |
| 52 | case 1: |
| 53 | clk_ctrl.dma0_1_eth = 1; |
| 54 | break; |
| 55 | case 2: |
| 56 | case 3: |
| 57 | clk_ctrl.dma2_3_strcop = 1; |
| 58 | break; |
| 59 | case 4: |
| 60 | case 5: |
| 61 | clk_ctrl.dma4_5_iop = 1; |
| 62 | break; |
| 63 | case 6: |
| 64 | case 7: |
| 65 | clk_ctrl.sser_ser_dma6_7 = 1; |
| 66 | break; |
| 67 | case 9: |
| 68 | case 11: |
| 69 | clk_ctrl.dma9_11 = 1; |
| 70 | break; |
| 71 | #if MAX_DMA_CHANNELS-1 != 11 |
| 72 | #error Check dma.c |
| 73 | #endif |
| 74 | default: |
| 75 | spin_unlock_irqrestore(&dma_lock, flags); |
| 76 | if (options & DMA_VERBOSE_ON_ERROR) |
| 77 | printk(KERN_ERR "Failed to request DMA %i for %s, " |
| 78 | "only 0-%i valid)\n", |
| 79 | dmanr, device_id, MAX_DMA_CHANNELS-1); |
| 80 | |
| 81 | if (options & DMA_PANIC_ON_ERROR) |
| 82 | panic("request_dma error!"); |
| 83 | return -EINVAL; |
| 84 | } |
| 85 | |
| 86 | switch (owner) { |
| 87 | case dma_eth: |
| 88 | if (dmanr == 0) |
| 89 | strmux_cfg.dma0 = regk_strmux_eth; |
| 90 | else if (dmanr == 1) |
| 91 | strmux_cfg.dma1 = regk_strmux_eth; |
| 92 | else |
| 93 | panic("Invalid DMA channel for eth\n"); |
| 94 | break; |
| 95 | case dma_ser0: |
| 96 | if (dmanr == 0) |
| 97 | strmux_cfg.dma0 = regk_strmux_ser0; |
| 98 | else if (dmanr == 1) |
| 99 | strmux_cfg.dma1 = regk_strmux_ser0; |
| 100 | else |
| 101 | panic("Invalid DMA channel for ser0\n"); |
| 102 | break; |
| 103 | case dma_ser3: |
| 104 | if (dmanr == 2) |
| 105 | strmux_cfg.dma2 = regk_strmux_ser3; |
| 106 | else if (dmanr == 3) |
| 107 | strmux_cfg.dma3 = regk_strmux_ser3; |
| 108 | else |
| 109 | panic("Invalid DMA channel for ser3\n"); |
| 110 | break; |
| 111 | case dma_strp: |
| 112 | if (dmanr == 2) |
| 113 | strmux_cfg.dma2 = regk_strmux_strcop; |
| 114 | else if (dmanr == 3) |
| 115 | strmux_cfg.dma3 = regk_strmux_strcop; |
| 116 | else |
| 117 | panic("Invalid DMA channel for strp\n"); |
| 118 | break; |
| 119 | case dma_ser1: |
| 120 | if (dmanr == 4) |
| 121 | strmux_cfg.dma4 = regk_strmux_ser1; |
| 122 | else if (dmanr == 5) |
| 123 | strmux_cfg.dma5 = regk_strmux_ser1; |
| 124 | else |
| 125 | panic("Invalid DMA channel for ser1\n"); |
| 126 | break; |
| 127 | case dma_iop: |
| 128 | if (dmanr == 4) |
| 129 | strmux_cfg.dma4 = regk_strmux_iop; |
| 130 | else if (dmanr == 5) |
| 131 | strmux_cfg.dma5 = regk_strmux_iop; |
| 132 | else |
| 133 | panic("Invalid DMA channel for iop\n"); |
| 134 | break; |
| 135 | case dma_ser2: |
| 136 | if (dmanr == 6) |
| 137 | strmux_cfg.dma6 = regk_strmux_ser2; |
| 138 | else if (dmanr == 7) |
| 139 | strmux_cfg.dma7 = regk_strmux_ser2; |
| 140 | else |
| 141 | panic("Invalid DMA channel for ser2\n"); |
| 142 | break; |
| 143 | case dma_sser: |
| 144 | if (dmanr == 6) |
| 145 | strmux_cfg.dma6 = regk_strmux_sser; |
| 146 | else if (dmanr == 7) |
| 147 | strmux_cfg.dma7 = regk_strmux_sser; |
| 148 | else |
| 149 | panic("Invalid DMA channel for sser\n"); |
| 150 | break; |
| 151 | case dma_ser4: |
| 152 | if (dmanr == 9) |
| 153 | strmux_cfg.dma9 = regk_strmux_ser4; |
| 154 | else |
| 155 | panic("Invalid DMA channel for ser4\n"); |
| 156 | break; |
| 157 | case dma_jpeg: |
| 158 | if (dmanr == 9) |
| 159 | strmux_cfg.dma9 = regk_strmux_jpeg; |
| 160 | else |
| 161 | panic("Invalid DMA channel for JPEG\n"); |
| 162 | break; |
| 163 | case dma_h264: |
| 164 | if (dmanr == 11) |
| 165 | strmux_cfg.dma11 = regk_strmux_h264; |
| 166 | else |
| 167 | panic("Invalid DMA channel for H264\n"); |
| 168 | break; |
| 169 | } |
| 170 | |
| 171 | used_dma_channels[dmanr] = 1; |
| 172 | used_dma_channels_users[dmanr] = device_id; |
| 173 | REG_WR(clkgen, regi_clkgen, rw_clk_ctrl, clk_ctrl); |
| 174 | REG_WR(strmux, regi_strmux, rw_cfg, strmux_cfg); |
| 175 | spin_unlock_irqrestore(&dma_lock, flags); |
| 176 | return 0; |
| 177 | } |
| 178 | |
| 179 | void crisv32_free_dma(unsigned int dmanr) |
| 180 | { |
| 181 | spin_lock(&dma_lock); |
| 182 | used_dma_channels[dmanr] = 0; |
| 183 | spin_unlock(&dma_lock); |
| 184 | } |