blob: 48b3866a9ded31401d986ea79a6d3f1629c08f74 [file] [log] [blame]
Kyle Swenson8d8f6542021-03-15 11:02:55 -06001/*
2 * qdio queue initialization
3 *
4 * Copyright IBM Corp. 2008
5 * Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
6 */
7#include <linux/kernel.h>
8#include <linux/slab.h>
9#include <linux/export.h>
10#include <asm/qdio.h>
11
12#include "cio.h"
13#include "css.h"
14#include "device.h"
15#include "ioasm.h"
16#include "chsc.h"
17#include "qdio.h"
18#include "qdio_debug.h"
19
20#define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer))
21
22static struct kmem_cache *qdio_q_cache;
23static struct kmem_cache *qdio_aob_cache;
24
25struct qaob *qdio_allocate_aob(void)
26{
27 return kmem_cache_zalloc(qdio_aob_cache, GFP_ATOMIC);
28}
29EXPORT_SYMBOL_GPL(qdio_allocate_aob);
30
31void qdio_release_aob(struct qaob *aob)
32{
33 kmem_cache_free(qdio_aob_cache, aob);
34}
35EXPORT_SYMBOL_GPL(qdio_release_aob);
36
37/**
38 * qdio_free_buffers() - free qdio buffers
39 * @buf: array of pointers to qdio buffers
40 * @count: number of qdio buffers to free
41 */
42void qdio_free_buffers(struct qdio_buffer **buf, unsigned int count)
43{
44 int pos;
45
46 for (pos = 0; pos < count; pos += QBUFF_PER_PAGE)
47 free_page((unsigned long) buf[pos]);
48}
49EXPORT_SYMBOL_GPL(qdio_free_buffers);
50
51/**
52 * qdio_alloc_buffers() - allocate qdio buffers
53 * @buf: array of pointers to qdio buffers
54 * @count: number of qdio buffers to allocate
55 */
56int qdio_alloc_buffers(struct qdio_buffer **buf, unsigned int count)
57{
58 int pos;
59
60 for (pos = 0; pos < count; pos += QBUFF_PER_PAGE) {
61 buf[pos] = (void *) get_zeroed_page(GFP_KERNEL);
62 if (!buf[pos]) {
63 qdio_free_buffers(buf, count);
64 return -ENOMEM;
65 }
66 }
67 for (pos = 0; pos < count; pos++)
68 if (pos % QBUFF_PER_PAGE)
69 buf[pos] = buf[pos - 1] + 1;
70 return 0;
71}
72EXPORT_SYMBOL_GPL(qdio_alloc_buffers);
73
74/**
75 * qdio_reset_buffers() - reset qdio buffers
76 * @buf: array of pointers to qdio buffers
77 * @count: number of qdio buffers that will be zeroed
78 */
79void qdio_reset_buffers(struct qdio_buffer **buf, unsigned int count)
80{
81 int pos;
82
83 for (pos = 0; pos < count; pos++)
84 memset(buf[pos], 0, sizeof(struct qdio_buffer));
85}
86EXPORT_SYMBOL_GPL(qdio_reset_buffers);
87
88/*
89 * qebsm is only available under 64bit but the adapter sets the feature
90 * flag anyway, so we manually override it.
91 */
92static inline int qebsm_possible(void)
93{
94 return css_general_characteristics.qebsm;
95}
96
97/*
98 * qib_param_field: pointer to 128 bytes or NULL, if no param field
99 * nr_input_qs: pointer to nr_queues*128 words of data or NULL
100 */
101static void set_impl_params(struct qdio_irq *irq_ptr,
102 unsigned int qib_param_field_format,
103 unsigned char *qib_param_field,
104 unsigned long *input_slib_elements,
105 unsigned long *output_slib_elements)
106{
107 struct qdio_q *q;
108 int i, j;
109
110 if (!irq_ptr)
111 return;
112
113 irq_ptr->qib.pfmt = qib_param_field_format;
114 if (qib_param_field)
115 memcpy(irq_ptr->qib.parm, qib_param_field,
116 QDIO_MAX_BUFFERS_PER_Q);
117
118 if (!input_slib_elements)
119 goto output;
120
121 for_each_input_queue(irq_ptr, q, i) {
122 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
123 q->slib->slibe[j].parms =
124 input_slib_elements[i * QDIO_MAX_BUFFERS_PER_Q + j];
125 }
126output:
127 if (!output_slib_elements)
128 return;
129
130 for_each_output_queue(irq_ptr, q, i) {
131 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
132 q->slib->slibe[j].parms =
133 output_slib_elements[i * QDIO_MAX_BUFFERS_PER_Q + j];
134 }
135}
136
137static int __qdio_allocate_qs(struct qdio_q **irq_ptr_qs, int nr_queues)
138{
139 struct qdio_q *q;
140 int i;
141
142 for (i = 0; i < nr_queues; i++) {
143 q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL);
144 if (!q)
145 return -ENOMEM;
146
147 q->slib = (struct slib *) __get_free_page(GFP_KERNEL);
148 if (!q->slib) {
149 kmem_cache_free(qdio_q_cache, q);
150 return -ENOMEM;
151 }
152 irq_ptr_qs[i] = q;
153 }
154 return 0;
155}
156
157int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs, int nr_output_qs)
158{
159 int rc;
160
161 rc = __qdio_allocate_qs(irq_ptr->input_qs, nr_input_qs);
162 if (rc)
163 return rc;
164 rc = __qdio_allocate_qs(irq_ptr->output_qs, nr_output_qs);
165 return rc;
166}
167
168static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr,
169 qdio_handler_t *handler, int i)
170{
171 struct slib *slib = q->slib;
172
173 /* queue must be cleared for qdio_establish */
174 memset(q, 0, sizeof(*q));
175 memset(slib, 0, PAGE_SIZE);
176 q->slib = slib;
177 q->irq_ptr = irq_ptr;
178 q->mask = 1 << (31 - i);
179 q->nr = i;
180 q->handler = handler;
181}
182
183static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
184 void **sbals_array, int i)
185{
186 struct qdio_q *prev;
187 int j;
188
189 DBF_HEX(&q, sizeof(void *));
190 q->sl = (struct sl *)((char *)q->slib + PAGE_SIZE / 2);
191
192 /* fill in sbal */
193 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
194 q->sbal[j] = *sbals_array++;
195
196 /* fill in slib */
197 if (i > 0) {
198 prev = (q->is_input_q) ? irq_ptr->input_qs[i - 1]
199 : irq_ptr->output_qs[i - 1];
200 prev->slib->nsliba = (unsigned long)q->slib;
201 }
202
203 q->slib->sla = (unsigned long)q->sl;
204 q->slib->slsba = (unsigned long)&q->slsb.val[0];
205
206 /* fill in sl */
207 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
208 q->sl->element[j].sbal = (unsigned long)q->sbal[j];
209}
210
211static void setup_queues(struct qdio_irq *irq_ptr,
212 struct qdio_initialize *qdio_init)
213{
214 struct qdio_q *q;
215 void **input_sbal_array = qdio_init->input_sbal_addr_array;
216 void **output_sbal_array = qdio_init->output_sbal_addr_array;
217 struct qdio_outbuf_state *output_sbal_state_array =
218 qdio_init->output_sbal_state_array;
219 int i;
220
221 for_each_input_queue(irq_ptr, q, i) {
222 DBF_EVENT("inq:%1d", i);
223 setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i);
224
225 q->is_input_q = 1;
226 q->u.in.queue_start_poll = qdio_init->queue_start_poll_array ?
227 qdio_init->queue_start_poll_array[i] : NULL;
228
229 setup_storage_lists(q, irq_ptr, input_sbal_array, i);
230 input_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
231
232 if (is_thinint_irq(irq_ptr)) {
233 tasklet_init(&q->tasklet, tiqdio_inbound_processing,
234 (unsigned long) q);
235 } else {
236 tasklet_init(&q->tasklet, qdio_inbound_processing,
237 (unsigned long) q);
238 }
239 }
240
241 for_each_output_queue(irq_ptr, q, i) {
242 DBF_EVENT("outq:%1d", i);
243 setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i);
244
245 q->u.out.sbal_state = output_sbal_state_array;
246 output_sbal_state_array += QDIO_MAX_BUFFERS_PER_Q;
247
248 q->is_input_q = 0;
249 q->u.out.scan_threshold = qdio_init->scan_threshold;
250 setup_storage_lists(q, irq_ptr, output_sbal_array, i);
251 output_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
252
253 tasklet_init(&q->tasklet, qdio_outbound_processing,
254 (unsigned long) q);
255 setup_timer(&q->u.out.timer, (void(*)(unsigned long))
256 &qdio_outbound_timer, (unsigned long)q);
257 }
258}
259
260static void process_ac_flags(struct qdio_irq *irq_ptr, unsigned char qdioac)
261{
262 if (qdioac & AC1_SIGA_INPUT_NEEDED)
263 irq_ptr->siga_flag.input = 1;
264 if (qdioac & AC1_SIGA_OUTPUT_NEEDED)
265 irq_ptr->siga_flag.output = 1;
266 if (qdioac & AC1_SIGA_SYNC_NEEDED)
267 irq_ptr->siga_flag.sync = 1;
268 if (!(qdioac & AC1_AUTOMATIC_SYNC_ON_THININT))
269 irq_ptr->siga_flag.sync_after_ai = 1;
270 if (!(qdioac & AC1_AUTOMATIC_SYNC_ON_OUT_PCI))
271 irq_ptr->siga_flag.sync_out_after_pci = 1;
272}
273
274static void check_and_setup_qebsm(struct qdio_irq *irq_ptr,
275 unsigned char qdioac, unsigned long token)
276{
277 if (!(irq_ptr->qib.rflags & QIB_RFLAGS_ENABLE_QEBSM))
278 goto no_qebsm;
279 if (!(qdioac & AC1_SC_QEBSM_AVAILABLE) ||
280 (!(qdioac & AC1_SC_QEBSM_ENABLED)))
281 goto no_qebsm;
282
283 irq_ptr->sch_token = token;
284
285 DBF_EVENT("V=V:1");
286 DBF_EVENT("%8lx", irq_ptr->sch_token);
287 return;
288
289no_qebsm:
290 irq_ptr->sch_token = 0;
291 irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM;
292 DBF_EVENT("noV=V");
293}
294
295/*
296 * If there is a qdio_irq we use the chsc_page and store the information
297 * in the qdio_irq, otherwise we copy it to the specified structure.
298 */
299int qdio_setup_get_ssqd(struct qdio_irq *irq_ptr,
300 struct subchannel_id *schid,
301 struct qdio_ssqd_desc *data)
302{
303 struct chsc_ssqd_area *ssqd;
304 int rc;
305
306 DBF_EVENT("getssqd:%4x", schid->sch_no);
307 if (!irq_ptr) {
308 ssqd = (struct chsc_ssqd_area *)__get_free_page(GFP_KERNEL);
309 if (!ssqd)
310 return -ENOMEM;
311 } else {
312 ssqd = (struct chsc_ssqd_area *)irq_ptr->chsc_page;
313 }
314
315 rc = chsc_ssqd(*schid, ssqd);
316 if (rc)
317 goto out;
318
319 if (!(ssqd->qdio_ssqd.flags & CHSC_FLAG_QDIO_CAPABILITY) ||
320 !(ssqd->qdio_ssqd.flags & CHSC_FLAG_VALIDITY) ||
321 (ssqd->qdio_ssqd.sch != schid->sch_no))
322 rc = -EINVAL;
323
324 if (!rc)
325 memcpy(data, &ssqd->qdio_ssqd, sizeof(*data));
326
327out:
328 if (!irq_ptr)
329 free_page((unsigned long)ssqd);
330
331 return rc;
332}
333
334void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr)
335{
336 unsigned char qdioac;
337 int rc;
338
339 rc = qdio_setup_get_ssqd(irq_ptr, &irq_ptr->schid, &irq_ptr->ssqd_desc);
340 if (rc) {
341 DBF_ERROR("%4x ssqd ERR", irq_ptr->schid.sch_no);
342 DBF_ERROR("rc:%x", rc);
343 /* all flags set, worst case */
344 qdioac = AC1_SIGA_INPUT_NEEDED | AC1_SIGA_OUTPUT_NEEDED |
345 AC1_SIGA_SYNC_NEEDED;
346 } else
347 qdioac = irq_ptr->ssqd_desc.qdioac1;
348
349 check_and_setup_qebsm(irq_ptr, qdioac, irq_ptr->ssqd_desc.sch_token);
350 process_ac_flags(irq_ptr, qdioac);
351 DBF_EVENT("ac 1:%2x 2:%4x", qdioac, irq_ptr->ssqd_desc.qdioac2);
352 DBF_EVENT("3:%4x qib:%4x", irq_ptr->ssqd_desc.qdioac3, irq_ptr->qib.ac);
353}
354
355void qdio_release_memory(struct qdio_irq *irq_ptr)
356{
357 struct qdio_q *q;
358 int i;
359
360 /*
361 * Must check queue array manually since irq_ptr->nr_input_queues /
362 * irq_ptr->nr_input_queues may not yet be set.
363 */
364 for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) {
365 q = irq_ptr->input_qs[i];
366 if (q) {
367 free_page((unsigned long) q->slib);
368 kmem_cache_free(qdio_q_cache, q);
369 }
370 }
371 for (i = 0; i < QDIO_MAX_QUEUES_PER_IRQ; i++) {
372 q = irq_ptr->output_qs[i];
373 if (q) {
374 if (q->u.out.use_cq) {
375 int n;
376
377 for (n = 0; n < QDIO_MAX_BUFFERS_PER_Q; ++n) {
378 struct qaob *aob = q->u.out.aobs[n];
379 if (aob) {
380 qdio_release_aob(aob);
381 q->u.out.aobs[n] = NULL;
382 }
383 }
384
385 qdio_disable_async_operation(&q->u.out);
386 }
387 free_page((unsigned long) q->slib);
388 kmem_cache_free(qdio_q_cache, q);
389 }
390 }
391 free_page((unsigned long) irq_ptr->qdr);
392 free_page(irq_ptr->chsc_page);
393 free_page((unsigned long) irq_ptr);
394}
395
396static void __qdio_allocate_fill_qdr(struct qdio_irq *irq_ptr,
397 struct qdio_q **irq_ptr_qs,
398 int i, int nr)
399{
400 irq_ptr->qdr->qdf0[i + nr].sliba =
401 (unsigned long)irq_ptr_qs[i]->slib;
402
403 irq_ptr->qdr->qdf0[i + nr].sla =
404 (unsigned long)irq_ptr_qs[i]->sl;
405
406 irq_ptr->qdr->qdf0[i + nr].slsba =
407 (unsigned long)&irq_ptr_qs[i]->slsb.val[0];
408
409 irq_ptr->qdr->qdf0[i + nr].akey = PAGE_DEFAULT_KEY >> 4;
410 irq_ptr->qdr->qdf0[i + nr].bkey = PAGE_DEFAULT_KEY >> 4;
411 irq_ptr->qdr->qdf0[i + nr].ckey = PAGE_DEFAULT_KEY >> 4;
412 irq_ptr->qdr->qdf0[i + nr].dkey = PAGE_DEFAULT_KEY >> 4;
413}
414
415static void setup_qdr(struct qdio_irq *irq_ptr,
416 struct qdio_initialize *qdio_init)
417{
418 int i;
419
420 irq_ptr->qdr->qfmt = qdio_init->q_format;
421 irq_ptr->qdr->ac = qdio_init->qdr_ac;
422 irq_ptr->qdr->iqdcnt = qdio_init->no_input_qs;
423 irq_ptr->qdr->oqdcnt = qdio_init->no_output_qs;
424 irq_ptr->qdr->iqdsz = sizeof(struct qdesfmt0) / 4; /* size in words */
425 irq_ptr->qdr->oqdsz = sizeof(struct qdesfmt0) / 4;
426 irq_ptr->qdr->qiba = (unsigned long)&irq_ptr->qib;
427 irq_ptr->qdr->qkey = PAGE_DEFAULT_KEY >> 4;
428
429 for (i = 0; i < qdio_init->no_input_qs; i++)
430 __qdio_allocate_fill_qdr(irq_ptr, irq_ptr->input_qs, i, 0);
431
432 for (i = 0; i < qdio_init->no_output_qs; i++)
433 __qdio_allocate_fill_qdr(irq_ptr, irq_ptr->output_qs, i,
434 qdio_init->no_input_qs);
435}
436
437static void setup_qib(struct qdio_irq *irq_ptr,
438 struct qdio_initialize *init_data)
439{
440 if (qebsm_possible())
441 irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM;
442
443 irq_ptr->qib.rflags |= init_data->qib_rflags;
444
445 irq_ptr->qib.qfmt = init_data->q_format;
446 if (init_data->no_input_qs)
447 irq_ptr->qib.isliba =
448 (unsigned long)(irq_ptr->input_qs[0]->slib);
449 if (init_data->no_output_qs)
450 irq_ptr->qib.osliba =
451 (unsigned long)(irq_ptr->output_qs[0]->slib);
452 memcpy(irq_ptr->qib.ebcnam, init_data->adapter_name, 8);
453}
454
455int qdio_setup_irq(struct qdio_initialize *init_data)
456{
457 struct ciw *ciw;
458 struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data;
459 int rc;
460
461 memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib));
462 memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag));
463 memset(&irq_ptr->ccw, 0, sizeof(irq_ptr->ccw));
464 memset(&irq_ptr->ssqd_desc, 0, sizeof(irq_ptr->ssqd_desc));
465 memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat));
466
467 irq_ptr->debugfs_dev = irq_ptr->debugfs_perf = NULL;
468 irq_ptr->sch_token = irq_ptr->state = irq_ptr->perf_stat_enabled = 0;
469
470 /* wipes qib.ac, required by ar7063 */
471 memset(irq_ptr->qdr, 0, sizeof(struct qdr));
472
473 irq_ptr->int_parm = init_data->int_parm;
474 irq_ptr->nr_input_qs = init_data->no_input_qs;
475 irq_ptr->nr_output_qs = init_data->no_output_qs;
476 irq_ptr->cdev = init_data->cdev;
477 ccw_device_get_schid(irq_ptr->cdev, &irq_ptr->schid);
478 setup_queues(irq_ptr, init_data);
479
480 setup_qib(irq_ptr, init_data);
481 qdio_setup_thinint(irq_ptr);
482 set_impl_params(irq_ptr, init_data->qib_param_field_format,
483 init_data->qib_param_field,
484 init_data->input_slib_elements,
485 init_data->output_slib_elements);
486
487 /* fill input and output descriptors */
488 setup_qdr(irq_ptr, init_data);
489
490 /* qdr, qib, sls, slsbs, slibs, sbales are filled now */
491
492 /* get qdio commands */
493 ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE);
494 if (!ciw) {
495 DBF_ERROR("%4x NO EQ", irq_ptr->schid.sch_no);
496 rc = -EINVAL;
497 goto out_err;
498 }
499 irq_ptr->equeue = *ciw;
500
501 ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE);
502 if (!ciw) {
503 DBF_ERROR("%4x NO AQ", irq_ptr->schid.sch_no);
504 rc = -EINVAL;
505 goto out_err;
506 }
507 irq_ptr->aqueue = *ciw;
508
509 /* set new interrupt handler */
510 irq_ptr->orig_handler = init_data->cdev->handler;
511 init_data->cdev->handler = qdio_int_handler;
512 return 0;
513out_err:
514 qdio_release_memory(irq_ptr);
515 return rc;
516}
517
518void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
519 struct ccw_device *cdev)
520{
521 char s[80];
522
523 snprintf(s, 80, "qdio: %s %s on SC %x using "
524 "AI:%d QEBSM:%d PRI:%d TDD:%d SIGA:%s%s%s%s%s\n",
525 dev_name(&cdev->dev),
526 (irq_ptr->qib.qfmt == QDIO_QETH_QFMT) ? "OSA" :
527 ((irq_ptr->qib.qfmt == QDIO_ZFCP_QFMT) ? "ZFCP" : "HS"),
528 irq_ptr->schid.sch_no,
529 is_thinint_irq(irq_ptr),
530 (irq_ptr->sch_token) ? 1 : 0,
531 (irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED) ? 1 : 0,
532 css_general_characteristics.aif_tdd,
533 (irq_ptr->siga_flag.input) ? "R" : " ",
534 (irq_ptr->siga_flag.output) ? "W" : " ",
535 (irq_ptr->siga_flag.sync) ? "S" : " ",
536 (irq_ptr->siga_flag.sync_after_ai) ? "A" : " ",
537 (irq_ptr->siga_flag.sync_out_after_pci) ? "P" : " ");
538 printk(KERN_INFO "%s", s);
539}
540
541int qdio_enable_async_operation(struct qdio_output_q *outq)
542{
543 outq->aobs = kzalloc(sizeof(struct qaob *) * QDIO_MAX_BUFFERS_PER_Q,
544 GFP_ATOMIC);
545 if (!outq->aobs) {
546 outq->use_cq = 0;
547 return -ENOMEM;
548 }
549 outq->use_cq = 1;
550 return 0;
551}
552
553void qdio_disable_async_operation(struct qdio_output_q *q)
554{
555 kfree(q->aobs);
556 q->aobs = NULL;
557 q->use_cq = 0;
558}
559
560int __init qdio_setup_init(void)
561{
562 int rc;
563
564 qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q),
565 256, 0, NULL);
566 if (!qdio_q_cache)
567 return -ENOMEM;
568
569 qdio_aob_cache = kmem_cache_create("qdio_aob",
570 sizeof(struct qaob),
571 sizeof(struct qaob),
572 0,
573 NULL);
574 if (!qdio_aob_cache) {
575 rc = -ENOMEM;
576 goto free_qdio_q_cache;
577 }
578
579 /* Check for OSA/FCP thin interrupts (bit 67). */
580 DBF_EVENT("thinint:%1d",
581 (css_general_characteristics.aif_osa) ? 1 : 0);
582
583 /* Check for QEBSM support in general (bit 58). */
584 DBF_EVENT("cssQEBSM:%1d", (qebsm_possible()) ? 1 : 0);
585 rc = 0;
586out:
587 return rc;
588free_qdio_q_cache:
589 kmem_cache_destroy(qdio_q_cache);
590 goto out;
591}
592
593void qdio_setup_exit(void)
594{
595 kmem_cache_destroy(qdio_aob_cache);
596 kmem_cache_destroy(qdio_q_cache);
597}