Kyle Swenson | 8d8f654 | 2021-03-15 11:02:55 -0600 | [diff] [blame^] | 1 | /* |
| 2 | * Provide TDMA helper functions used by cipher and hash algorithm |
| 3 | * implementations. |
| 4 | * |
| 5 | * Author: Boris Brezillon <boris.brezillon@free-electrons.com> |
| 6 | * Author: Arnaud Ebalard <arno@natisbad.org> |
| 7 | * |
| 8 | * This work is based on an initial version written by |
| 9 | * Sebastian Andrzej Siewior < sebastian at breakpoint dot cc > |
| 10 | * |
| 11 | * This program is free software; you can redistribute it and/or modify it |
| 12 | * under the terms of the GNU General Public License version 2 as published |
| 13 | * by the Free Software Foundation. |
| 14 | */ |
| 15 | |
| 16 | #include "cesa.h" |
| 17 | |
| 18 | bool mv_cesa_req_dma_iter_next_transfer(struct mv_cesa_dma_iter *iter, |
| 19 | struct mv_cesa_sg_dma_iter *sgiter, |
| 20 | unsigned int len) |
| 21 | { |
| 22 | if (!sgiter->sg) |
| 23 | return false; |
| 24 | |
| 25 | sgiter->op_offset += len; |
| 26 | sgiter->offset += len; |
| 27 | if (sgiter->offset == sg_dma_len(sgiter->sg)) { |
| 28 | if (sg_is_last(sgiter->sg)) |
| 29 | return false; |
| 30 | sgiter->offset = 0; |
| 31 | sgiter->sg = sg_next(sgiter->sg); |
| 32 | } |
| 33 | |
| 34 | if (sgiter->op_offset == iter->op_len) |
| 35 | return false; |
| 36 | |
| 37 | return true; |
| 38 | } |
| 39 | |
| 40 | void mv_cesa_dma_step(struct mv_cesa_tdma_req *dreq) |
| 41 | { |
| 42 | struct mv_cesa_engine *engine = dreq->base.engine; |
| 43 | |
| 44 | writel_relaxed(0, engine->regs + CESA_SA_CFG); |
| 45 | |
| 46 | mv_cesa_set_int_mask(engine, CESA_SA_INT_ACC0_IDMA_DONE); |
| 47 | writel_relaxed(CESA_TDMA_DST_BURST_128B | CESA_TDMA_SRC_BURST_128B | |
| 48 | CESA_TDMA_NO_BYTE_SWAP | CESA_TDMA_EN, |
| 49 | engine->regs + CESA_TDMA_CONTROL); |
| 50 | |
| 51 | writel_relaxed(CESA_SA_CFG_ACT_CH0_IDMA | CESA_SA_CFG_MULTI_PKT | |
| 52 | CESA_SA_CFG_CH0_W_IDMA | CESA_SA_CFG_PARA_DIS, |
| 53 | engine->regs + CESA_SA_CFG); |
| 54 | writel_relaxed(dreq->chain.first->cur_dma, |
| 55 | engine->regs + CESA_TDMA_NEXT_ADDR); |
| 56 | writel(CESA_SA_CMD_EN_CESA_SA_ACCL0, engine->regs + CESA_SA_CMD); |
| 57 | } |
| 58 | |
| 59 | void mv_cesa_dma_cleanup(struct mv_cesa_tdma_req *dreq) |
| 60 | { |
| 61 | struct mv_cesa_tdma_desc *tdma; |
| 62 | |
| 63 | for (tdma = dreq->chain.first; tdma;) { |
| 64 | struct mv_cesa_tdma_desc *old_tdma = tdma; |
| 65 | |
| 66 | if (tdma->flags & CESA_TDMA_OP) |
| 67 | dma_pool_free(cesa_dev->dma->op_pool, tdma->op, |
| 68 | le32_to_cpu(tdma->src)); |
| 69 | |
| 70 | tdma = tdma->next; |
| 71 | dma_pool_free(cesa_dev->dma->tdma_desc_pool, old_tdma, |
| 72 | old_tdma->cur_dma); |
| 73 | } |
| 74 | |
| 75 | dreq->chain.first = NULL; |
| 76 | dreq->chain.last = NULL; |
| 77 | } |
| 78 | |
| 79 | void mv_cesa_dma_prepare(struct mv_cesa_tdma_req *dreq, |
| 80 | struct mv_cesa_engine *engine) |
| 81 | { |
| 82 | struct mv_cesa_tdma_desc *tdma; |
| 83 | |
| 84 | for (tdma = dreq->chain.first; tdma; tdma = tdma->next) { |
| 85 | if (tdma->flags & CESA_TDMA_DST_IN_SRAM) |
| 86 | tdma->dst = cpu_to_le32(tdma->dst + engine->sram_dma); |
| 87 | |
| 88 | if (tdma->flags & CESA_TDMA_SRC_IN_SRAM) |
| 89 | tdma->src = cpu_to_le32(tdma->src + engine->sram_dma); |
| 90 | |
| 91 | if (tdma->flags & CESA_TDMA_OP) |
| 92 | mv_cesa_adjust_op(engine, tdma->op); |
| 93 | } |
| 94 | } |
| 95 | |
| 96 | static struct mv_cesa_tdma_desc * |
| 97 | mv_cesa_dma_add_desc(struct mv_cesa_tdma_chain *chain, gfp_t flags) |
| 98 | { |
| 99 | struct mv_cesa_tdma_desc *new_tdma = NULL; |
| 100 | dma_addr_t dma_handle; |
| 101 | |
| 102 | new_tdma = dma_pool_alloc(cesa_dev->dma->tdma_desc_pool, flags, |
| 103 | &dma_handle); |
| 104 | if (!new_tdma) |
| 105 | return ERR_PTR(-ENOMEM); |
| 106 | |
| 107 | memset(new_tdma, 0, sizeof(*new_tdma)); |
| 108 | new_tdma->cur_dma = dma_handle; |
| 109 | if (chain->last) { |
| 110 | chain->last->next_dma = cpu_to_le32(dma_handle); |
| 111 | chain->last->next = new_tdma; |
| 112 | } else { |
| 113 | chain->first = new_tdma; |
| 114 | } |
| 115 | |
| 116 | chain->last = new_tdma; |
| 117 | |
| 118 | return new_tdma; |
| 119 | } |
| 120 | |
| 121 | struct mv_cesa_op_ctx *mv_cesa_dma_add_op(struct mv_cesa_tdma_chain *chain, |
| 122 | const struct mv_cesa_op_ctx *op_templ, |
| 123 | bool skip_ctx, |
| 124 | gfp_t flags) |
| 125 | { |
| 126 | struct mv_cesa_tdma_desc *tdma; |
| 127 | struct mv_cesa_op_ctx *op; |
| 128 | dma_addr_t dma_handle; |
| 129 | unsigned int size; |
| 130 | |
| 131 | tdma = mv_cesa_dma_add_desc(chain, flags); |
| 132 | if (IS_ERR(tdma)) |
| 133 | return ERR_CAST(tdma); |
| 134 | |
| 135 | op = dma_pool_alloc(cesa_dev->dma->op_pool, flags, &dma_handle); |
| 136 | if (!op) |
| 137 | return ERR_PTR(-ENOMEM); |
| 138 | |
| 139 | *op = *op_templ; |
| 140 | |
| 141 | size = skip_ctx ? sizeof(op->desc) : sizeof(*op); |
| 142 | |
| 143 | tdma = chain->last; |
| 144 | tdma->op = op; |
| 145 | tdma->byte_cnt = cpu_to_le32(size | BIT(31)); |
| 146 | tdma->src = cpu_to_le32(dma_handle); |
| 147 | tdma->flags = CESA_TDMA_DST_IN_SRAM | CESA_TDMA_OP; |
| 148 | |
| 149 | return op; |
| 150 | } |
| 151 | |
| 152 | int mv_cesa_dma_add_data_transfer(struct mv_cesa_tdma_chain *chain, |
| 153 | dma_addr_t dst, dma_addr_t src, u32 size, |
| 154 | u32 flags, gfp_t gfp_flags) |
| 155 | { |
| 156 | struct mv_cesa_tdma_desc *tdma; |
| 157 | |
| 158 | tdma = mv_cesa_dma_add_desc(chain, gfp_flags); |
| 159 | if (IS_ERR(tdma)) |
| 160 | return PTR_ERR(tdma); |
| 161 | |
| 162 | tdma->byte_cnt = cpu_to_le32(size | BIT(31)); |
| 163 | tdma->src = src; |
| 164 | tdma->dst = dst; |
| 165 | |
| 166 | flags &= (CESA_TDMA_DST_IN_SRAM | CESA_TDMA_SRC_IN_SRAM); |
| 167 | tdma->flags = flags | CESA_TDMA_DATA; |
| 168 | |
| 169 | return 0; |
| 170 | } |
| 171 | |
| 172 | int mv_cesa_dma_add_dummy_launch(struct mv_cesa_tdma_chain *chain, gfp_t flags) |
| 173 | { |
| 174 | struct mv_cesa_tdma_desc *tdma; |
| 175 | |
| 176 | tdma = mv_cesa_dma_add_desc(chain, flags); |
| 177 | if (IS_ERR(tdma)) |
| 178 | return PTR_ERR(tdma); |
| 179 | |
| 180 | return 0; |
| 181 | } |
| 182 | |
| 183 | int mv_cesa_dma_add_dummy_end(struct mv_cesa_tdma_chain *chain, gfp_t flags) |
| 184 | { |
| 185 | struct mv_cesa_tdma_desc *tdma; |
| 186 | |
| 187 | tdma = mv_cesa_dma_add_desc(chain, flags); |
| 188 | if (IS_ERR(tdma)) |
| 189 | return PTR_ERR(tdma); |
| 190 | |
| 191 | tdma->byte_cnt = cpu_to_le32(BIT(31)); |
| 192 | |
| 193 | return 0; |
| 194 | } |
| 195 | |
| 196 | int mv_cesa_dma_add_op_transfers(struct mv_cesa_tdma_chain *chain, |
| 197 | struct mv_cesa_dma_iter *dma_iter, |
| 198 | struct mv_cesa_sg_dma_iter *sgiter, |
| 199 | gfp_t gfp_flags) |
| 200 | { |
| 201 | u32 flags = sgiter->dir == DMA_TO_DEVICE ? |
| 202 | CESA_TDMA_DST_IN_SRAM : CESA_TDMA_SRC_IN_SRAM; |
| 203 | unsigned int len; |
| 204 | |
| 205 | do { |
| 206 | dma_addr_t dst, src; |
| 207 | int ret; |
| 208 | |
| 209 | len = mv_cesa_req_dma_iter_transfer_len(dma_iter, sgiter); |
| 210 | if (sgiter->dir == DMA_TO_DEVICE) { |
| 211 | dst = CESA_SA_DATA_SRAM_OFFSET + sgiter->op_offset; |
| 212 | src = sg_dma_address(sgiter->sg) + sgiter->offset; |
| 213 | } else { |
| 214 | dst = sg_dma_address(sgiter->sg) + sgiter->offset; |
| 215 | src = CESA_SA_DATA_SRAM_OFFSET + sgiter->op_offset; |
| 216 | } |
| 217 | |
| 218 | ret = mv_cesa_dma_add_data_transfer(chain, dst, src, len, |
| 219 | flags, gfp_flags); |
| 220 | if (ret) |
| 221 | return ret; |
| 222 | |
| 223 | } while (mv_cesa_req_dma_iter_next_transfer(dma_iter, sgiter, len)); |
| 224 | |
| 225 | return 0; |
| 226 | } |