Kyle Swenson | 8d8f654 | 2021-03-15 11:02:55 -0600 | [diff] [blame^] | 1 | /* |
| 2 | * Driver for the Conexant CX23885 PCIe bridge |
| 3 | * |
| 4 | * Copyright (c) 2007 Steven Toth <stoth@linuxtv.org> |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License as published by |
| 8 | * the Free Software Foundation; either version 2 of the License, or |
| 9 | * (at your option) any later version. |
| 10 | * |
| 11 | * This program is distributed in the hope that it will be useful, |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 14 | * |
| 15 | * GNU General Public License for more details. |
| 16 | */ |
| 17 | |
| 18 | #include <linux/kernel.h> |
| 19 | #include <linux/module.h> |
| 20 | #include <linux/moduleparam.h> |
| 21 | #include <linux/init.h> |
| 22 | |
| 23 | #include "cx23885.h" |
| 24 | |
| 25 | static unsigned int vbibufs = 4; |
| 26 | module_param(vbibufs, int, 0644); |
| 27 | MODULE_PARM_DESC(vbibufs, "number of vbi buffers, range 2-32"); |
| 28 | |
| 29 | static unsigned int vbi_debug; |
| 30 | module_param(vbi_debug, int, 0644); |
| 31 | MODULE_PARM_DESC(vbi_debug, "enable debug messages [vbi]"); |
| 32 | |
| 33 | #define dprintk(level, fmt, arg...)\ |
| 34 | do { if (vbi_debug >= level)\ |
| 35 | printk(KERN_DEBUG "%s/0: " fmt, dev->name, ## arg);\ |
| 36 | } while (0) |
| 37 | |
| 38 | /* ------------------------------------------------------------------ */ |
| 39 | |
| 40 | #define VBI_LINE_LENGTH 1440 |
| 41 | #define VBI_NTSC_LINE_COUNT 12 |
| 42 | #define VBI_PAL_LINE_COUNT 18 |
| 43 | |
| 44 | |
| 45 | int cx23885_vbi_fmt(struct file *file, void *priv, |
| 46 | struct v4l2_format *f) |
| 47 | { |
| 48 | struct cx23885_dev *dev = video_drvdata(file); |
| 49 | |
| 50 | f->fmt.vbi.sampling_rate = 27000000; |
| 51 | f->fmt.vbi.samples_per_line = VBI_LINE_LENGTH; |
| 52 | f->fmt.vbi.sample_format = V4L2_PIX_FMT_GREY; |
| 53 | f->fmt.vbi.offset = 0; |
| 54 | f->fmt.vbi.flags = 0; |
| 55 | if (dev->tvnorm & V4L2_STD_525_60) { |
| 56 | /* ntsc */ |
| 57 | f->fmt.vbi.start[0] = V4L2_VBI_ITU_525_F1_START + 9; |
| 58 | f->fmt.vbi.start[1] = V4L2_VBI_ITU_525_F2_START + 9; |
| 59 | f->fmt.vbi.count[0] = VBI_NTSC_LINE_COUNT; |
| 60 | f->fmt.vbi.count[1] = VBI_NTSC_LINE_COUNT; |
| 61 | } else if (dev->tvnorm & V4L2_STD_625_50) { |
| 62 | /* pal */ |
| 63 | f->fmt.vbi.start[0] = V4L2_VBI_ITU_625_F1_START + 5; |
| 64 | f->fmt.vbi.start[1] = V4L2_VBI_ITU_625_F2_START + 5; |
| 65 | f->fmt.vbi.count[0] = VBI_PAL_LINE_COUNT; |
| 66 | f->fmt.vbi.count[1] = VBI_PAL_LINE_COUNT; |
| 67 | } |
| 68 | |
| 69 | return 0; |
| 70 | } |
| 71 | |
| 72 | /* We're given the Video Interrupt status register. |
| 73 | * The cx23885_video_irq() func has already validated |
| 74 | * the potential error bits, we just need to |
| 75 | * deal with vbi payload and return indication if |
| 76 | * we actually processed any payload. |
| 77 | */ |
| 78 | int cx23885_vbi_irq(struct cx23885_dev *dev, u32 status) |
| 79 | { |
| 80 | u32 count; |
| 81 | int handled = 0; |
| 82 | |
| 83 | if (status & VID_BC_MSK_VBI_RISCI1) { |
| 84 | dprintk(1, "%s() VID_BC_MSK_VBI_RISCI1\n", __func__); |
| 85 | spin_lock(&dev->slock); |
| 86 | count = cx_read(VID_A_GPCNT); |
| 87 | cx23885_video_wakeup(dev, &dev->vbiq, count); |
| 88 | spin_unlock(&dev->slock); |
| 89 | handled++; |
| 90 | } |
| 91 | |
| 92 | return handled; |
| 93 | } |
| 94 | |
| 95 | static int cx23885_start_vbi_dma(struct cx23885_dev *dev, |
| 96 | struct cx23885_dmaqueue *q, |
| 97 | struct cx23885_buffer *buf) |
| 98 | { |
| 99 | dprintk(1, "%s()\n", __func__); |
| 100 | |
| 101 | /* setup fifo + format */ |
| 102 | cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH02], |
| 103 | VBI_LINE_LENGTH, buf->risc.dma); |
| 104 | |
| 105 | /* reset counter */ |
| 106 | cx_write(VID_A_GPCNT_CTL, 3); |
| 107 | cx_write(VID_A_VBI_CTRL, 3); |
| 108 | cx_write(VBI_A_GPCNT_CTL, 3); |
| 109 | q->count = 0; |
| 110 | |
| 111 | /* enable irq */ |
| 112 | cx23885_irq_add_enable(dev, 0x01); |
| 113 | cx_set(VID_A_INT_MSK, 0x000022); |
| 114 | |
| 115 | /* start dma */ |
| 116 | cx_set(DEV_CNTRL2, (1<<5)); |
| 117 | cx_set(VID_A_DMA_CTL, 0x22); /* FIFO and RISC enable */ |
| 118 | |
| 119 | return 0; |
| 120 | } |
| 121 | |
| 122 | /* ------------------------------------------------------------------ */ |
| 123 | |
| 124 | static int queue_setup(struct vb2_queue *q, const void *parg, |
| 125 | unsigned int *num_buffers, unsigned int *num_planes, |
| 126 | unsigned int sizes[], void *alloc_ctxs[]) |
| 127 | { |
| 128 | struct cx23885_dev *dev = q->drv_priv; |
| 129 | unsigned lines = VBI_PAL_LINE_COUNT; |
| 130 | |
| 131 | if (dev->tvnorm & V4L2_STD_525_60) |
| 132 | lines = VBI_NTSC_LINE_COUNT; |
| 133 | *num_planes = 1; |
| 134 | sizes[0] = lines * VBI_LINE_LENGTH * 2; |
| 135 | alloc_ctxs[0] = dev->alloc_ctx; |
| 136 | return 0; |
| 137 | } |
| 138 | |
| 139 | static int buffer_prepare(struct vb2_buffer *vb) |
| 140 | { |
| 141 | struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); |
| 142 | struct cx23885_dev *dev = vb->vb2_queue->drv_priv; |
| 143 | struct cx23885_buffer *buf = container_of(vbuf, |
| 144 | struct cx23885_buffer, vb); |
| 145 | struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0); |
| 146 | unsigned lines = VBI_PAL_LINE_COUNT; |
| 147 | |
| 148 | if (dev->tvnorm & V4L2_STD_525_60) |
| 149 | lines = VBI_NTSC_LINE_COUNT; |
| 150 | |
| 151 | if (vb2_plane_size(vb, 0) < lines * VBI_LINE_LENGTH * 2) |
| 152 | return -EINVAL; |
| 153 | vb2_set_plane_payload(vb, 0, lines * VBI_LINE_LENGTH * 2); |
| 154 | |
| 155 | cx23885_risc_vbibuffer(dev->pci, &buf->risc, |
| 156 | sgt->sgl, |
| 157 | 0, VBI_LINE_LENGTH * lines, |
| 158 | VBI_LINE_LENGTH, 0, |
| 159 | lines); |
| 160 | return 0; |
| 161 | } |
| 162 | |
| 163 | static void buffer_finish(struct vb2_buffer *vb) |
| 164 | { |
| 165 | struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); |
| 166 | struct cx23885_buffer *buf = container_of(vbuf, |
| 167 | struct cx23885_buffer, vb); |
| 168 | |
| 169 | cx23885_free_buffer(vb->vb2_queue->drv_priv, buf); |
| 170 | } |
| 171 | |
| 172 | /* |
| 173 | * The risc program for each buffer works as follows: it starts with a simple |
| 174 | * 'JUMP to addr + 12', which is effectively a NOP. Then the code to DMA the |
| 175 | * buffer follows and at the end we have a JUMP back to the start + 12 (skipping |
| 176 | * the initial JUMP). |
| 177 | * |
| 178 | * This is the risc program of the first buffer to be queued if the active list |
| 179 | * is empty and it just keeps DMAing this buffer without generating any |
| 180 | * interrupts. |
| 181 | * |
| 182 | * If a new buffer is added then the initial JUMP in the code for that buffer |
| 183 | * will generate an interrupt which signals that the previous buffer has been |
| 184 | * DMAed successfully and that it can be returned to userspace. |
| 185 | * |
| 186 | * It also sets the final jump of the previous buffer to the start of the new |
| 187 | * buffer, thus chaining the new buffer into the DMA chain. This is a single |
| 188 | * atomic u32 write, so there is no race condition. |
| 189 | * |
| 190 | * The end-result of all this that you only get an interrupt when a buffer |
| 191 | * is ready, so the control flow is very easy. |
| 192 | */ |
| 193 | static void buffer_queue(struct vb2_buffer *vb) |
| 194 | { |
| 195 | struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); |
| 196 | struct cx23885_dev *dev = vb->vb2_queue->drv_priv; |
| 197 | struct cx23885_buffer *buf = container_of(vbuf, |
| 198 | struct cx23885_buffer, vb); |
| 199 | struct cx23885_buffer *prev; |
| 200 | struct cx23885_dmaqueue *q = &dev->vbiq; |
| 201 | unsigned long flags; |
| 202 | |
| 203 | buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 12); |
| 204 | buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_CNT_INC); |
| 205 | buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma + 12); |
| 206 | buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */ |
| 207 | |
| 208 | if (list_empty(&q->active)) { |
| 209 | spin_lock_irqsave(&dev->slock, flags); |
| 210 | list_add_tail(&buf->queue, &q->active); |
| 211 | spin_unlock_irqrestore(&dev->slock, flags); |
| 212 | dprintk(2, "[%p/%d] vbi_queue - first active\n", |
| 213 | buf, buf->vb.vb2_buf.index); |
| 214 | |
| 215 | } else { |
| 216 | buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1); |
| 217 | prev = list_entry(q->active.prev, struct cx23885_buffer, |
| 218 | queue); |
| 219 | spin_lock_irqsave(&dev->slock, flags); |
| 220 | list_add_tail(&buf->queue, &q->active); |
| 221 | spin_unlock_irqrestore(&dev->slock, flags); |
| 222 | prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma); |
| 223 | dprintk(2, "[%p/%d] buffer_queue - append to active\n", |
| 224 | buf, buf->vb.vb2_buf.index); |
| 225 | } |
| 226 | } |
| 227 | |
| 228 | static int cx23885_start_streaming(struct vb2_queue *q, unsigned int count) |
| 229 | { |
| 230 | struct cx23885_dev *dev = q->drv_priv; |
| 231 | struct cx23885_dmaqueue *dmaq = &dev->vbiq; |
| 232 | struct cx23885_buffer *buf = list_entry(dmaq->active.next, |
| 233 | struct cx23885_buffer, queue); |
| 234 | |
| 235 | cx23885_start_vbi_dma(dev, dmaq, buf); |
| 236 | return 0; |
| 237 | } |
| 238 | |
| 239 | static void cx23885_stop_streaming(struct vb2_queue *q) |
| 240 | { |
| 241 | struct cx23885_dev *dev = q->drv_priv; |
| 242 | struct cx23885_dmaqueue *dmaq = &dev->vbiq; |
| 243 | unsigned long flags; |
| 244 | |
| 245 | cx_clear(VID_A_DMA_CTL, 0x22); /* FIFO and RISC enable */ |
| 246 | spin_lock_irqsave(&dev->slock, flags); |
| 247 | while (!list_empty(&dmaq->active)) { |
| 248 | struct cx23885_buffer *buf = list_entry(dmaq->active.next, |
| 249 | struct cx23885_buffer, queue); |
| 250 | |
| 251 | list_del(&buf->queue); |
| 252 | vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); |
| 253 | } |
| 254 | spin_unlock_irqrestore(&dev->slock, flags); |
| 255 | } |
| 256 | |
| 257 | |
| 258 | struct vb2_ops cx23885_vbi_qops = { |
| 259 | .queue_setup = queue_setup, |
| 260 | .buf_prepare = buffer_prepare, |
| 261 | .buf_finish = buffer_finish, |
| 262 | .buf_queue = buffer_queue, |
| 263 | .wait_prepare = vb2_ops_wait_prepare, |
| 264 | .wait_finish = vb2_ops_wait_finish, |
| 265 | .start_streaming = cx23885_start_streaming, |
| 266 | .stop_streaming = cx23885_stop_streaming, |
| 267 | }; |