Pull acpi_os_free into release branch
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / s390 / cio / qdio.c
1 /*
2 *
3 * linux/drivers/s390/cio/qdio.c
4 *
5 * Linux for S/390 QDIO base support, Hipersocket base support
6 * version 2
7 *
8 * Copyright 2000,2002 IBM Corporation
9 * Author(s): Utz Bacher <utz.bacher@de.ibm.com>
10 * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
11 *
12 * Restriction: only 63 iqdio subchannels would have its own indicator,
13 * after that, subsequent subchannels share one indicator
14 *
15 *
16 *
17 *
18 * This program is free software; you can redistribute it and/or modify
19 * it under the terms of the GNU General Public License as published by
20 * the Free Software Foundation; either version 2, or (at your option)
21 * any later version.
22 *
23 * This program is distributed in the hope that it will be useful,
24 * but WITHOUT ANY WARRANTY; without even the implied warranty of
25 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
26 * GNU General Public License for more details.
27 *
28 * You should have received a copy of the GNU General Public License
29 * along with this program; if not, write to the Free Software
30 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
31 */
32
33 #include <linux/module.h>
34 #include <linux/init.h>
35
36 #include <linux/slab.h>
37 #include <linux/kernel.h>
38 #include <linux/proc_fs.h>
39 #include <linux/timer.h>
40 #include <linux/mempool.h>
41
42 #include <asm/ccwdev.h>
43 #include <asm/io.h>
44 #include <asm/atomic.h>
45 #include <asm/semaphore.h>
46 #include <asm/timex.h>
47
48 #include <asm/debug.h>
49 #include <asm/qdio.h>
50
51 #include "cio.h"
52 #include "css.h"
53 #include "device.h"
54 #include "airq.h"
55 #include "qdio.h"
56 #include "ioasm.h"
57 #include "chsc.h"
58
59 /****************** MODULE PARAMETER VARIABLES ********************/
60 MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>");
61 MODULE_DESCRIPTION("QDIO base support version 2, " \
62 "Copyright 2000 IBM Corporation");
63 MODULE_LICENSE("GPL");
64
65 /******************** HERE WE GO ***********************************/
66
67 static const char version[] = "QDIO base support version 2";
68
69 #ifdef QDIO_PERFORMANCE_STATS
70 static int proc_perf_file_registration;
71 static unsigned long i_p_c, i_p_nc, o_p_c, o_p_nc, ii_p_c, ii_p_nc;
72 static struct qdio_perf_stats perf_stats;
73 #endif /* QDIO_PERFORMANCE_STATS */
74
75 static int hydra_thinints;
76 static int is_passthrough = 0;
77 static int omit_svs;
78
79 static int indicator_used[INDICATORS_PER_CACHELINE];
80 static __u32 * volatile indicators;
81 static __u32 volatile spare_indicator;
82 static atomic_t spare_indicator_usecount;
83 #define QDIO_MEMPOOL_SCSSC_ELEMENTS 2
84 static mempool_t *qdio_mempool_scssc;
85
86 static debug_info_t *qdio_dbf_setup;
87 static debug_info_t *qdio_dbf_sbal;
88 static debug_info_t *qdio_dbf_trace;
89 static debug_info_t *qdio_dbf_sense;
90 #ifdef CONFIG_QDIO_DEBUG
91 static debug_info_t *qdio_dbf_slsb_out;
92 static debug_info_t *qdio_dbf_slsb_in;
93 #endif /* CONFIG_QDIO_DEBUG */
94
95 /* iQDIO stuff: */
96 static volatile struct qdio_q *tiq_list=NULL; /* volatile as it could change
97 during a while loop */
98 static DEFINE_SPINLOCK(ttiq_list_lock);
99 static int register_thinint_result;
100 static void tiqdio_tl(unsigned long);
101 static DECLARE_TASKLET(tiqdio_tasklet,tiqdio_tl,0);
102
103 /* not a macro, as one of the arguments is atomic_read */
104 static inline int
105 qdio_min(int a,int b)
106 {
107 if (a<b)
108 return a;
109 else
110 return b;
111 }
112
113 /***************** SCRUBBER HELPER ROUTINES **********************/
114
115 static inline __u64
116 qdio_get_micros(void)
117 {
118 return (get_clock() >> 10); /* time>>12 is microseconds */
119 }
120
121 /*
122 * unfortunately, we can't just xchg the values; in do_QDIO we want to reserve
123 * the q in any case, so that we'll not be interrupted when we are in
124 * qdio_mark_tiq... shouldn't have a really bad impact, as reserving almost
125 * ever works (last famous words)
126 */
127 static inline int
128 qdio_reserve_q(struct qdio_q *q)
129 {
130 return atomic_add_return(1,&q->use_count) - 1;
131 }
132
133 static inline void
134 qdio_release_q(struct qdio_q *q)
135 {
136 atomic_dec(&q->use_count);
137 }
138
139 /*check ccq */
140 static inline int
141 qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
142 {
143 char dbf_text[15];
144
145 if (ccq == 0 || ccq == 32 || ccq == 96)
146 return 0;
147 if (ccq == 97)
148 return 1;
149 /*notify devices immediately*/
150 sprintf(dbf_text,"%d", ccq);
151 QDIO_DBF_TEXT2(1,trace,dbf_text);
152 return -EIO;
153 }
154 /* EQBS: extract buffer states */
155 static inline int
156 qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
157 unsigned int *start, unsigned int *cnt)
158 {
159 struct qdio_irq *irq;
160 unsigned int tmp_cnt, q_no, ccq;
161 int rc ;
162 char dbf_text[15];
163
164 ccq = 0;
165 tmp_cnt = *cnt;
166 irq = (struct qdio_irq*)q->irq_ptr;
167 q_no = q->q_no;
168 if(!q->is_input_q)
169 q_no += irq->no_input_qs;
170 again:
171 ccq = do_eqbs(irq->sch_token, state, q_no, start, cnt);
172 rc = qdio_check_ccq(q, ccq);
173 if (rc == 1) {
174 QDIO_DBF_TEXT5(1,trace,"eqAGAIN");
175 goto again;
176 }
177 if (rc < 0) {
178 QDIO_DBF_TEXT2(1,trace,"eqberr");
179 sprintf(dbf_text,"%2x,%2x,%d,%d",tmp_cnt, *cnt, ccq, q_no);
180 QDIO_DBF_TEXT2(1,trace,dbf_text);
181 q->handler(q->cdev,QDIO_STATUS_ACTIVATE_CHECK_CONDITION|
182 QDIO_STATUS_LOOK_FOR_ERROR,
183 0, 0, 0, -1, -1, q->int_parm);
184 return 0;
185 }
186 return (tmp_cnt - *cnt);
187 }
188
189 /* SQBS: set buffer states */
190 static inline int
191 qdio_do_sqbs(struct qdio_q *q, unsigned char state,
192 unsigned int *start, unsigned int *cnt)
193 {
194 struct qdio_irq *irq;
195 unsigned int tmp_cnt, q_no, ccq;
196 int rc;
197 char dbf_text[15];
198
199 ccq = 0;
200 tmp_cnt = *cnt;
201 irq = (struct qdio_irq*)q->irq_ptr;
202 q_no = q->q_no;
203 if(!q->is_input_q)
204 q_no += irq->no_input_qs;
205 again:
206 ccq = do_sqbs(irq->sch_token, state, q_no, start, cnt);
207 rc = qdio_check_ccq(q, ccq);
208 if (rc == 1) {
209 QDIO_DBF_TEXT5(1,trace,"sqAGAIN");
210 goto again;
211 }
212 if (rc < 0) {
213 QDIO_DBF_TEXT3(1,trace,"sqberr");
214 sprintf(dbf_text,"%2x,%2x,%d,%d",tmp_cnt,*cnt,ccq,q_no);
215 QDIO_DBF_TEXT3(1,trace,dbf_text);
216 q->handler(q->cdev,QDIO_STATUS_ACTIVATE_CHECK_CONDITION|
217 QDIO_STATUS_LOOK_FOR_ERROR,
218 0, 0, 0, -1, -1, q->int_parm);
219 return 0;
220 }
221 return (tmp_cnt - *cnt);
222 }
223
224 static inline int
225 qdio_set_slsb(struct qdio_q *q, unsigned int *bufno,
226 unsigned char state, unsigned int *count)
227 {
228 volatile char *slsb;
229 struct qdio_irq *irq;
230
231 irq = (struct qdio_irq*)q->irq_ptr;
232 if (!irq->is_qebsm) {
233 slsb = (char *)&q->slsb.acc.val[(*bufno)];
234 xchg(slsb, state);
235 return 1;
236 }
237 return qdio_do_sqbs(q, state, bufno, count);
238 }
239
240 #ifdef CONFIG_QDIO_DEBUG
241 static inline void
242 qdio_trace_slsb(struct qdio_q *q)
243 {
244 if (q->queue_type==QDIO_TRACE_QTYPE) {
245 if (q->is_input_q)
246 QDIO_DBF_HEX2(0,slsb_in,&q->slsb,
247 QDIO_MAX_BUFFERS_PER_Q);
248 else
249 QDIO_DBF_HEX2(0,slsb_out,&q->slsb,
250 QDIO_MAX_BUFFERS_PER_Q);
251 }
252 }
253 #endif
254
255 static inline int
256 set_slsb(struct qdio_q *q, unsigned int *bufno,
257 unsigned char state, unsigned int *count)
258 {
259 int rc;
260 #ifdef CONFIG_QDIO_DEBUG
261 qdio_trace_slsb(q);
262 #endif
263 rc = qdio_set_slsb(q, bufno, state, count);
264 #ifdef CONFIG_QDIO_DEBUG
265 qdio_trace_slsb(q);
266 #endif
267 return rc;
268 }
269 static inline int
270 qdio_siga_sync(struct qdio_q *q, unsigned int gpr2,
271 unsigned int gpr3)
272 {
273 int cc;
274
275 QDIO_DBF_TEXT4(0,trace,"sigasync");
276 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
277
278 #ifdef QDIO_PERFORMANCE_STATS
279 perf_stats.siga_syncs++;
280 #endif /* QDIO_PERFORMANCE_STATS */
281
282 cc = do_siga_sync(q->schid, gpr2, gpr3);
283 if (cc)
284 QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*));
285
286 return cc;
287 }
288
289 static inline int
290 qdio_siga_sync_q(struct qdio_q *q)
291 {
292 if (q->is_input_q)
293 return qdio_siga_sync(q, 0, q->mask);
294 return qdio_siga_sync(q, q->mask, 0);
295 }
296
297 static int
298 __do_siga_output(struct qdio_q *q, unsigned int *busy_bit)
299 {
300 struct qdio_irq *irq;
301 unsigned int fc = 0;
302 unsigned long schid;
303
304 irq = (struct qdio_irq *) q->irq_ptr;
305 if (!irq->is_qebsm)
306 schid = *((u32 *)&q->schid);
307 else {
308 schid = irq->sch_token;
309 fc |= 0x80;
310 }
311 return do_siga_output(schid, q->mask, busy_bit, fc);
312 }
313
314 /*
315 * returns QDIO_SIGA_ERROR_ACCESS_EXCEPTION as cc, when SIGA returns
316 * an access exception
317 */
318 static inline int
319 qdio_siga_output(struct qdio_q *q)
320 {
321 int cc;
322 __u32 busy_bit;
323 __u64 start_time=0;
324
325 #ifdef QDIO_PERFORMANCE_STATS
326 perf_stats.siga_outs++;
327 #endif /* QDIO_PERFORMANCE_STATS */
328
329 QDIO_DBF_TEXT4(0,trace,"sigaout");
330 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
331
332 for (;;) {
333 cc = __do_siga_output(q, &busy_bit);
334 //QDIO_PRINT_ERR("cc=%x, busy=%x\n",cc,busy_bit);
335 if ((cc==2) && (busy_bit) && (q->is_iqdio_q)) {
336 if (!start_time)
337 start_time=NOW;
338 if ((NOW-start_time)>QDIO_BUSY_BIT_PATIENCE)
339 break;
340 } else
341 break;
342 }
343
344 if ((cc==2) && (busy_bit))
345 cc |= QDIO_SIGA_ERROR_B_BIT_SET;
346
347 if (cc)
348 QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*));
349
350 return cc;
351 }
352
353 static inline int
354 qdio_siga_input(struct qdio_q *q)
355 {
356 int cc;
357
358 QDIO_DBF_TEXT4(0,trace,"sigain");
359 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
360
361 #ifdef QDIO_PERFORMANCE_STATS
362 perf_stats.siga_ins++;
363 #endif /* QDIO_PERFORMANCE_STATS */
364
365 cc = do_siga_input(q->schid, q->mask);
366
367 if (cc)
368 QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*));
369
370 return cc;
371 }
372
373 /* locked by the locks in qdio_activate and qdio_cleanup */
374 static __u32 *
375 qdio_get_indicator(void)
376 {
377 int i;
378
379 for (i=1;i<INDICATORS_PER_CACHELINE;i++)
380 if (!indicator_used[i]) {
381 indicator_used[i]=1;
382 return indicators+i;
383 }
384 atomic_inc(&spare_indicator_usecount);
385 return (__u32 * volatile) &spare_indicator;
386 }
387
388 /* locked by the locks in qdio_activate and qdio_cleanup */
389 static void
390 qdio_put_indicator(__u32 *addr)
391 {
392 int i;
393
394 if ( (addr) && (addr!=&spare_indicator) ) {
395 i=addr-indicators;
396 indicator_used[i]=0;
397 }
398 if (addr == &spare_indicator)
399 atomic_dec(&spare_indicator_usecount);
400 }
401
402 static inline void
403 tiqdio_clear_summary_bit(__u32 *location)
404 {
405 QDIO_DBF_TEXT5(0,trace,"clrsummb");
406 QDIO_DBF_HEX5(0,trace,&location,sizeof(void*));
407
408 xchg(location,0);
409 }
410
411 static inline void
412 tiqdio_set_summary_bit(__u32 *location)
413 {
414 QDIO_DBF_TEXT5(0,trace,"setsummb");
415 QDIO_DBF_HEX5(0,trace,&location,sizeof(void*));
416
417 xchg(location,-1);
418 }
419
420 static inline void
421 tiqdio_sched_tl(void)
422 {
423 tasklet_hi_schedule(&tiqdio_tasklet);
424 }
425
426 static inline void
427 qdio_mark_tiq(struct qdio_q *q)
428 {
429 unsigned long flags;
430
431 QDIO_DBF_TEXT4(0,trace,"mark iq");
432 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
433
434 spin_lock_irqsave(&ttiq_list_lock,flags);
435 if (unlikely(atomic_read(&q->is_in_shutdown)))
436 goto out_unlock;
437
438 if (!q->is_input_q)
439 goto out_unlock;
440
441 if ((q->list_prev) || (q->list_next))
442 goto out_unlock;
443
444 if (!tiq_list) {
445 tiq_list=q;
446 q->list_prev=q;
447 q->list_next=q;
448 } else {
449 q->list_next=tiq_list;
450 q->list_prev=tiq_list->list_prev;
451 tiq_list->list_prev->list_next=q;
452 tiq_list->list_prev=q;
453 }
454 spin_unlock_irqrestore(&ttiq_list_lock,flags);
455
456 tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
457 tiqdio_sched_tl();
458 return;
459 out_unlock:
460 spin_unlock_irqrestore(&ttiq_list_lock,flags);
461 return;
462 }
463
464 static inline void
465 qdio_mark_q(struct qdio_q *q)
466 {
467 QDIO_DBF_TEXT4(0,trace,"mark q");
468 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
469
470 if (unlikely(atomic_read(&q->is_in_shutdown)))
471 return;
472
473 tasklet_schedule(&q->tasklet);
474 }
475
476 static inline int
477 qdio_stop_polling(struct qdio_q *q)
478 {
479 #ifdef QDIO_USE_PROCESSING_STATE
480 unsigned int tmp, gsf, count = 1;
481 unsigned char state = 0;
482 struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
483
484 if (!atomic_swap(&q->polling,0))
485 return 1;
486
487 QDIO_DBF_TEXT4(0,trace,"stoppoll");
488 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
489
490 /* show the card that we are not polling anymore */
491 if (!q->is_input_q)
492 return 1;
493
494 tmp = gsf = GET_SAVED_FRONTIER(q);
495 tmp = ((tmp + QDIO_MAX_BUFFERS_PER_Q-1) & (QDIO_MAX_BUFFERS_PER_Q-1) );
496 set_slsb(q, &tmp, SLSB_P_INPUT_NOT_INIT, &count);
497
498 /*
499 * we don't issue this SYNC_MEMORY, as we trust Rick T and
500 * moreover will not use the PROCESSING state under VM, so
501 * q->polling was 0 anyway
502 */
503 /*SYNC_MEMORY;*/
504 if (irq->is_qebsm) {
505 count = 1;
506 qdio_do_eqbs(q, &state, &gsf, &count);
507 } else
508 state = q->slsb.acc.val[gsf];
509 if (state != SLSB_P_INPUT_PRIMED)
510 return 1;
511 /*
512 * set our summary bit again, as otherwise there is a
513 * small window we can miss between resetting it and
514 * checking for PRIMED state
515 */
516 if (q->is_thinint_q)
517 tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
518 return 0;
519
520 #else /* QDIO_USE_PROCESSING_STATE */
521 return 1;
522 #endif /* QDIO_USE_PROCESSING_STATE */
523 }
524
525 /*
526 * see the comment in do_QDIO and before qdio_reserve_q about the
527 * sophisticated locking outside of unmark_q, so that we don't need to
528 * disable the interrupts :-)
529 */
530 static inline void
531 qdio_unmark_q(struct qdio_q *q)
532 {
533 unsigned long flags;
534
535 QDIO_DBF_TEXT4(0,trace,"unmark q");
536 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
537
538 if ((!q->list_prev)||(!q->list_next))
539 return;
540
541 if ((q->is_thinint_q)&&(q->is_input_q)) {
542 /* iQDIO */
543 spin_lock_irqsave(&ttiq_list_lock,flags);
544 /* in case cleanup has done this already and simultanously
545 * qdio_unmark_q is called from the interrupt handler, we've
546 * got to check this in this specific case again */
547 if ((!q->list_prev)||(!q->list_next))
548 goto out;
549 if (q->list_next==q) {
550 /* q was the only interesting q */
551 tiq_list=NULL;
552 q->list_next=NULL;
553 q->list_prev=NULL;
554 } else {
555 q->list_next->list_prev=q->list_prev;
556 q->list_prev->list_next=q->list_next;
557 tiq_list=q->list_next;
558 q->list_next=NULL;
559 q->list_prev=NULL;
560 }
561 out:
562 spin_unlock_irqrestore(&ttiq_list_lock,flags);
563 }
564 }
565
566 static inline unsigned long
567 tiqdio_clear_global_summary(void)
568 {
569 unsigned long time;
570
571 QDIO_DBF_TEXT5(0,trace,"clrglobl");
572
573 time = do_clear_global_summary();
574
575 QDIO_DBF_HEX5(0,trace,&time,sizeof(unsigned long));
576
577 return time;
578 }
579
580
581 /************************* OUTBOUND ROUTINES *******************************/
582 static int
583 qdio_qebsm_get_outbound_buffer_frontier(struct qdio_q *q)
584 {
585 struct qdio_irq *irq;
586 unsigned char state;
587 unsigned int cnt, count, ftc;
588
589 irq = (struct qdio_irq *) q->irq_ptr;
590 if ((!q->is_iqdio_q) && (!q->hydra_gives_outbound_pcis))
591 SYNC_MEMORY;
592
593 ftc = q->first_to_check;
594 count = qdio_min(atomic_read(&q->number_of_buffers_used),
595 (QDIO_MAX_BUFFERS_PER_Q-1));
596 if (count == 0)
597 return q->first_to_check;
598 cnt = qdio_do_eqbs(q, &state, &ftc, &count);
599 if (cnt == 0)
600 return q->first_to_check;
601 switch (state) {
602 case SLSB_P_OUTPUT_ERROR:
603 QDIO_DBF_TEXT3(0,trace,"outperr");
604 atomic_sub(cnt , &q->number_of_buffers_used);
605 if (q->qdio_error)
606 q->error_status_flags |=
607 QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR;
608 q->qdio_error = SLSB_P_OUTPUT_ERROR;
609 q->error_status_flags |= QDIO_STATUS_LOOK_FOR_ERROR;
610 q->first_to_check = ftc;
611 break;
612 case SLSB_P_OUTPUT_EMPTY:
613 QDIO_DBF_TEXT5(0,trace,"outpempt");
614 atomic_sub(cnt, &q->number_of_buffers_used);
615 q->first_to_check = ftc;
616 break;
617 case SLSB_CU_OUTPUT_PRIMED:
618 /* all buffers primed */
619 QDIO_DBF_TEXT5(0,trace,"outpprim");
620 break;
621 default:
622 break;
623 }
624 QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int));
625 return q->first_to_check;
626 }
627
628 static int
629 qdio_qebsm_get_inbound_buffer_frontier(struct qdio_q *q)
630 {
631 struct qdio_irq *irq;
632 unsigned char state;
633 int tmp, ftc, count, cnt;
634 char dbf_text[15];
635
636
637 irq = (struct qdio_irq *) q->irq_ptr;
638 ftc = q->first_to_check;
639 count = qdio_min(atomic_read(&q->number_of_buffers_used),
640 (QDIO_MAX_BUFFERS_PER_Q-1));
641 if (count == 0)
642 return q->first_to_check;
643 cnt = qdio_do_eqbs(q, &state, &ftc, &count);
644 if (cnt == 0)
645 return q->first_to_check;
646 switch (state) {
647 case SLSB_P_INPUT_ERROR :
648 #ifdef CONFIG_QDIO_DEBUG
649 QDIO_DBF_TEXT3(1,trace,"inperr");
650 sprintf(dbf_text,"%2x,%2x",ftc,count);
651 QDIO_DBF_TEXT3(1,trace,dbf_text);
652 #endif /* CONFIG_QDIO_DEBUG */
653 if (q->qdio_error)
654 q->error_status_flags |=
655 QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR;
656 q->qdio_error = SLSB_P_INPUT_ERROR;
657 q->error_status_flags |= QDIO_STATUS_LOOK_FOR_ERROR;
658 atomic_sub(cnt, &q->number_of_buffers_used);
659 q->first_to_check = ftc;
660 break;
661 case SLSB_P_INPUT_PRIMED :
662 QDIO_DBF_TEXT3(0,trace,"inptprim");
663 sprintf(dbf_text,"%2x,%2x",ftc,count);
664 QDIO_DBF_TEXT3(1,trace,dbf_text);
665 tmp = 0;
666 ftc = q->first_to_check;
667 #ifdef QDIO_USE_PROCESSING_STATE
668 if (cnt > 1) {
669 cnt -= 1;
670 tmp = set_slsb(q, &ftc, SLSB_P_INPUT_NOT_INIT, &cnt);
671 if (!tmp)
672 break;
673 }
674 cnt = 1;
675 tmp += set_slsb(q, &ftc,
676 SLSB_P_INPUT_PROCESSING, &cnt);
677 atomic_set(&q->polling, 1);
678 #else
679 tmp = set_slsb(q, &ftc, SLSB_P_INPUT_NOT_INIT, &cnt);
680 #endif
681 atomic_sub(tmp, &q->number_of_buffers_used);
682 q->first_to_check = ftc;
683 break;
684 case SLSB_CU_INPUT_EMPTY:
685 case SLSB_P_INPUT_NOT_INIT:
686 case SLSB_P_INPUT_PROCESSING:
687 QDIO_DBF_TEXT5(0,trace,"inpnipro");
688 break;
689 default:
690 break;
691 }
692 QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int));
693 return q->first_to_check;
694 }
695
696 static inline int
697 qdio_get_outbound_buffer_frontier(struct qdio_q *q)
698 {
699 struct qdio_irq *irq;
700 volatile char *slsb;
701 unsigned int count = 1;
702 int first_not_to_check, f, f_mod_no;
703 char dbf_text[15];
704
705 QDIO_DBF_TEXT4(0,trace,"getobfro");
706 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
707
708 irq = (struct qdio_irq *) q->irq_ptr;
709 if (irq->is_qebsm)
710 return qdio_qebsm_get_outbound_buffer_frontier(q);
711
712 slsb=&q->slsb.acc.val[0];
713 f_mod_no=f=q->first_to_check;
714 /*
715 * f points to already processed elements, so f+no_used is correct...
716 * ... but: we don't check 128 buffers, as otherwise
717 * qdio_has_outbound_q_moved would return 0
718 */
719 first_not_to_check=f+qdio_min(atomic_read(&q->number_of_buffers_used),
720 (QDIO_MAX_BUFFERS_PER_Q-1));
721
722 if ((!q->is_iqdio_q)&&(!q->hydra_gives_outbound_pcis))
723 SYNC_MEMORY;
724
725 check_next:
726 if (f==first_not_to_check)
727 goto out;
728
729 switch(slsb[f_mod_no]) {
730
731 /* the adapter has not fetched the output yet */
732 case SLSB_CU_OUTPUT_PRIMED:
733 QDIO_DBF_TEXT5(0,trace,"outpprim");
734 break;
735
736 /* the adapter got it */
737 case SLSB_P_OUTPUT_EMPTY:
738 atomic_dec(&q->number_of_buffers_used);
739 f++;
740 f_mod_no=f&(QDIO_MAX_BUFFERS_PER_Q-1);
741 QDIO_DBF_TEXT5(0,trace,"outpempt");
742 goto check_next;
743
744 case SLSB_P_OUTPUT_ERROR:
745 QDIO_DBF_TEXT3(0,trace,"outperr");
746 sprintf(dbf_text,"%x-%x-%x",f_mod_no,
747 q->sbal[f_mod_no]->element[14].sbalf.value,
748 q->sbal[f_mod_no]->element[15].sbalf.value);
749 QDIO_DBF_TEXT3(1,trace,dbf_text);
750 QDIO_DBF_HEX2(1,sbal,q->sbal[f_mod_no],256);
751
752 /* kind of process the buffer */
753 set_slsb(q, &f_mod_no, SLSB_P_OUTPUT_NOT_INIT, &count);
754
755 /*
756 * we increment the frontier, as this buffer
757 * was processed obviously
758 */
759 atomic_dec(&q->number_of_buffers_used);
760 f_mod_no=(f_mod_no+1)&(QDIO_MAX_BUFFERS_PER_Q-1);
761
762 if (q->qdio_error)
763 q->error_status_flags|=
764 QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR;
765 q->qdio_error=SLSB_P_OUTPUT_ERROR;
766 q->error_status_flags|=QDIO_STATUS_LOOK_FOR_ERROR;
767
768 break;
769
770 /* no new buffers */
771 default:
772 QDIO_DBF_TEXT5(0,trace,"outpni");
773 }
774 out:
775 return (q->first_to_check=f_mod_no);
776 }
777
778 /* all buffers are processed */
779 static inline int
780 qdio_is_outbound_q_done(struct qdio_q *q)
781 {
782 int no_used;
783 #ifdef CONFIG_QDIO_DEBUG
784 char dbf_text[15];
785 #endif
786
787 no_used=atomic_read(&q->number_of_buffers_used);
788
789 #ifdef CONFIG_QDIO_DEBUG
790 if (no_used) {
791 sprintf(dbf_text,"oqisnt%02x",no_used);
792 QDIO_DBF_TEXT4(0,trace,dbf_text);
793 } else {
794 QDIO_DBF_TEXT4(0,trace,"oqisdone");
795 }
796 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
797 #endif /* CONFIG_QDIO_DEBUG */
798 return (no_used==0);
799 }
800
801 static inline int
802 qdio_has_outbound_q_moved(struct qdio_q *q)
803 {
804 int i;
805
806 i=qdio_get_outbound_buffer_frontier(q);
807
808 if ( (i!=GET_SAVED_FRONTIER(q)) ||
809 (q->error_status_flags&QDIO_STATUS_LOOK_FOR_ERROR) ) {
810 SAVE_FRONTIER(q,i);
811 QDIO_DBF_TEXT4(0,trace,"oqhasmvd");
812 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
813 return 1;
814 } else {
815 QDIO_DBF_TEXT4(0,trace,"oqhsntmv");
816 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
817 return 0;
818 }
819 }
820
821 static inline void
822 qdio_kick_outbound_q(struct qdio_q *q)
823 {
824 int result;
825 #ifdef CONFIG_QDIO_DEBUG
826 char dbf_text[15];
827
828 QDIO_DBF_TEXT4(0,trace,"kickoutq");
829 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
830 #endif /* CONFIG_QDIO_DEBUG */
831
832 if (!q->siga_out)
833 return;
834
835 /* here's the story with cc=2 and busy bit set (thanks, Rick):
836 * VM's CP could present us cc=2 and busy bit set on SIGA-write
837 * during reconfiguration of their Guest LAN (only in HIPERS mode,
838 * QDIO mode is asynchronous -- cc=2 and busy bit there will take
839 * the queues down immediately; and not being under VM we have a
840 * problem on cc=2 and busy bit set right away).
841 *
842 * Therefore qdio_siga_output will try for a short time constantly,
843 * if such a condition occurs. If it doesn't change, it will
844 * increase the busy_siga_counter and save the timestamp, and
845 * schedule the queue for later processing (via mark_q, using the
846 * queue tasklet). __qdio_outbound_processing will check out the
847 * counter. If non-zero, it will call qdio_kick_outbound_q as often
848 * as the value of the counter. This will attempt further SIGA
849 * instructions. For each successful SIGA, the counter is
850 * decreased, for failing SIGAs the counter remains the same, after
851 * all.
852 * After some time of no movement, qdio_kick_outbound_q will
853 * finally fail and reflect corresponding error codes to call
854 * the upper layer module and have it take the queues down.
855 *
856 * Note that this is a change from the original HiperSockets design
857 * (saying cc=2 and busy bit means take the queues down), but in
858 * these days Guest LAN didn't exist... excessive cc=2 with busy bit
859 * conditions will still take the queues down, but the threshold is
860 * higher due to the Guest LAN environment.
861 */
862
863
864 result=qdio_siga_output(q);
865
866 switch (result) {
867 case 0:
868 /* went smooth this time, reset timestamp */
869 #ifdef CONFIG_QDIO_DEBUG
870 QDIO_DBF_TEXT3(0,trace,"cc2reslv");
871 sprintf(dbf_text,"%4x%2x%2x",q->schid.sch_no,q->q_no,
872 atomic_read(&q->busy_siga_counter));
873 QDIO_DBF_TEXT3(0,trace,dbf_text);
874 #endif /* CONFIG_QDIO_DEBUG */
875 q->timing.busy_start=0;
876 break;
877 case (2|QDIO_SIGA_ERROR_B_BIT_SET):
878 /* cc=2 and busy bit: */
879 atomic_inc(&q->busy_siga_counter);
880
881 /* if the last siga was successful, save
882 * timestamp here */
883 if (!q->timing.busy_start)
884 q->timing.busy_start=NOW;
885
886 /* if we're in time, don't touch error_status_flags
887 * and siga_error */
888 if (NOW-q->timing.busy_start<QDIO_BUSY_BIT_GIVE_UP) {
889 qdio_mark_q(q);
890 break;
891 }
892 QDIO_DBF_TEXT2(0,trace,"cc2REPRT");
893 #ifdef CONFIG_QDIO_DEBUG
894 sprintf(dbf_text,"%4x%2x%2x",q->schid.sch_no,q->q_no,
895 atomic_read(&q->busy_siga_counter));
896 QDIO_DBF_TEXT3(0,trace,dbf_text);
897 #endif /* CONFIG_QDIO_DEBUG */
898 /* else fallthrough and report error */
899 default:
900 /* for plain cc=1, 2 or 3: */
901 if (q->siga_error)
902 q->error_status_flags|=
903 QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR;
904 q->error_status_flags|=
905 QDIO_STATUS_LOOK_FOR_ERROR;
906 q->siga_error=result;
907 }
908 }
909
910 static inline void
911 qdio_kick_outbound_handler(struct qdio_q *q)
912 {
913 int start, end, real_end, count;
914 #ifdef CONFIG_QDIO_DEBUG
915 char dbf_text[15];
916 #endif
917
918 start = q->first_element_to_kick;
919 /* last_move_ftc was just updated */
920 real_end = GET_SAVED_FRONTIER(q);
921 end = (real_end+QDIO_MAX_BUFFERS_PER_Q-1)&
922 (QDIO_MAX_BUFFERS_PER_Q-1);
923 count = (end+QDIO_MAX_BUFFERS_PER_Q+1-start)&
924 (QDIO_MAX_BUFFERS_PER_Q-1);
925
926 #ifdef CONFIG_QDIO_DEBUG
927 QDIO_DBF_TEXT4(0,trace,"kickouth");
928 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
929
930 sprintf(dbf_text,"s=%2xc=%2x",start,count);
931 QDIO_DBF_TEXT4(0,trace,dbf_text);
932 #endif /* CONFIG_QDIO_DEBUG */
933
934 if (q->state==QDIO_IRQ_STATE_ACTIVE)
935 q->handler(q->cdev,QDIO_STATUS_OUTBOUND_INT|
936 q->error_status_flags,
937 q->qdio_error,q->siga_error,q->q_no,start,count,
938 q->int_parm);
939
940 /* for the next time: */
941 q->first_element_to_kick=real_end;
942 q->qdio_error=0;
943 q->siga_error=0;
944 q->error_status_flags=0;
945 }
946
947 static inline void
948 __qdio_outbound_processing(struct qdio_q *q)
949 {
950 int siga_attempts;
951
952 QDIO_DBF_TEXT4(0,trace,"qoutproc");
953 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
954
955 if (unlikely(qdio_reserve_q(q))) {
956 qdio_release_q(q);
957 #ifdef QDIO_PERFORMANCE_STATS
958 o_p_c++;
959 #endif /* QDIO_PERFORMANCE_STATS */
960 /* as we're sissies, we'll check next time */
961 if (likely(!atomic_read(&q->is_in_shutdown))) {
962 qdio_mark_q(q);
963 QDIO_DBF_TEXT4(0,trace,"busy,agn");
964 }
965 return;
966 }
967 #ifdef QDIO_PERFORMANCE_STATS
968 o_p_nc++;
969 perf_stats.tl_runs++;
970 #endif /* QDIO_PERFORMANCE_STATS */
971
972 /* see comment in qdio_kick_outbound_q */
973 siga_attempts=atomic_read(&q->busy_siga_counter);
974 while (siga_attempts) {
975 atomic_dec(&q->busy_siga_counter);
976 qdio_kick_outbound_q(q);
977 siga_attempts--;
978 }
979
980 if (qdio_has_outbound_q_moved(q))
981 qdio_kick_outbound_handler(q);
982
983 if (q->is_iqdio_q) {
984 /*
985 * for asynchronous queues, we better check, if the fill
986 * level is too high. for synchronous queues, the fill
987 * level will never be that high.
988 */
989 if (atomic_read(&q->number_of_buffers_used)>
990 IQDIO_FILL_LEVEL_TO_POLL)
991 qdio_mark_q(q);
992
993 } else if (!q->hydra_gives_outbound_pcis)
994 if (!qdio_is_outbound_q_done(q))
995 qdio_mark_q(q);
996
997 qdio_release_q(q);
998 }
999
1000 static void
1001 qdio_outbound_processing(struct qdio_q *q)
1002 {
1003 __qdio_outbound_processing(q);
1004 }
1005
1006 /************************* INBOUND ROUTINES *******************************/
1007
1008
1009 static inline int
1010 qdio_get_inbound_buffer_frontier(struct qdio_q *q)
1011 {
1012 struct qdio_irq *irq;
1013 int f,f_mod_no;
1014 volatile char *slsb;
1015 unsigned int count = 1;
1016 int first_not_to_check;
1017 #ifdef CONFIG_QDIO_DEBUG
1018 char dbf_text[15];
1019 #endif /* CONFIG_QDIO_DEBUG */
1020 #ifdef QDIO_USE_PROCESSING_STATE
1021 int last_position=-1;
1022 #endif /* QDIO_USE_PROCESSING_STATE */
1023
1024 QDIO_DBF_TEXT4(0,trace,"getibfro");
1025 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1026
1027 irq = (struct qdio_irq *) q->irq_ptr;
1028 if (irq->is_qebsm)
1029 return qdio_qebsm_get_inbound_buffer_frontier(q);
1030
1031 slsb=&q->slsb.acc.val[0];
1032 f_mod_no=f=q->first_to_check;
1033 /*
1034 * we don't check 128 buffers, as otherwise qdio_has_inbound_q_moved
1035 * would return 0
1036 */
1037 first_not_to_check=f+qdio_min(atomic_read(&q->number_of_buffers_used),
1038 (QDIO_MAX_BUFFERS_PER_Q-1));
1039
1040 /*
1041 * we don't use this one, as a PCI or we after a thin interrupt
1042 * will sync the queues
1043 */
1044 /* SYNC_MEMORY;*/
1045
1046 check_next:
1047 f_mod_no=f&(QDIO_MAX_BUFFERS_PER_Q-1);
1048 if (f==first_not_to_check)
1049 goto out;
1050 switch (slsb[f_mod_no]) {
1051
1052 /* CU_EMPTY means frontier is reached */
1053 case SLSB_CU_INPUT_EMPTY:
1054 QDIO_DBF_TEXT5(0,trace,"inptempt");
1055 break;
1056
1057 /* P_PRIMED means set slsb to P_PROCESSING and move on */
1058 case SLSB_P_INPUT_PRIMED:
1059 QDIO_DBF_TEXT5(0,trace,"inptprim");
1060
1061 #ifdef QDIO_USE_PROCESSING_STATE
1062 /*
1063 * as soon as running under VM, polling the input queues will
1064 * kill VM in terms of CP overhead
1065 */
1066 if (q->siga_sync) {
1067 set_slsb(q, &f_mod_no, SLSB_P_INPUT_NOT_INIT, &count);
1068 } else {
1069 /* set the previous buffer to NOT_INIT. The current
1070 * buffer will be set to PROCESSING at the end of
1071 * this function to avoid further interrupts. */
1072 if (last_position>=0)
1073 set_slsb(q, &last_position,
1074 SLSB_P_INPUT_NOT_INIT, &count);
1075 atomic_set(&q->polling,1);
1076 last_position=f_mod_no;
1077 }
1078 #else /* QDIO_USE_PROCESSING_STATE */
1079 set_slsb(q, &f_mod_no, SLSB_P_INPUT_NOT_INIT, &count);
1080 #endif /* QDIO_USE_PROCESSING_STATE */
1081 /*
1082 * not needed, as the inbound queue will be synced on the next
1083 * siga-r, resp. tiqdio_is_inbound_q_done will do the siga-s
1084 */
1085 /*SYNC_MEMORY;*/
1086 f++;
1087 atomic_dec(&q->number_of_buffers_used);
1088 goto check_next;
1089
1090 case SLSB_P_INPUT_NOT_INIT:
1091 case SLSB_P_INPUT_PROCESSING:
1092 QDIO_DBF_TEXT5(0,trace,"inpnipro");
1093 break;
1094
1095 /* P_ERROR means frontier is reached, break and report error */
1096 case SLSB_P_INPUT_ERROR:
1097 #ifdef CONFIG_QDIO_DEBUG
1098 sprintf(dbf_text,"inperr%2x",f_mod_no);
1099 QDIO_DBF_TEXT3(1,trace,dbf_text);
1100 #endif /* CONFIG_QDIO_DEBUG */
1101 QDIO_DBF_HEX2(1,sbal,q->sbal[f_mod_no],256);
1102
1103 /* kind of process the buffer */
1104 set_slsb(q, &f_mod_no, SLSB_P_INPUT_NOT_INIT, &count);
1105
1106 if (q->qdio_error)
1107 q->error_status_flags|=
1108 QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR;
1109 q->qdio_error=SLSB_P_INPUT_ERROR;
1110 q->error_status_flags|=QDIO_STATUS_LOOK_FOR_ERROR;
1111
1112 /* we increment the frontier, as this buffer
1113 * was processed obviously */
1114 f_mod_no=(f_mod_no+1)&(QDIO_MAX_BUFFERS_PER_Q-1);
1115 atomic_dec(&q->number_of_buffers_used);
1116
1117 #ifdef QDIO_USE_PROCESSING_STATE
1118 last_position=-1;
1119 #endif /* QDIO_USE_PROCESSING_STATE */
1120
1121 break;
1122
1123 /* everything else means frontier not changed (HALTED or so) */
1124 default:
1125 break;
1126 }
1127 out:
1128 q->first_to_check=f_mod_no;
1129
1130 #ifdef QDIO_USE_PROCESSING_STATE
1131 if (last_position>=0)
1132 set_slsb(q, &last_position, SLSB_P_INPUT_NOT_INIT, &count);
1133 #endif /* QDIO_USE_PROCESSING_STATE */
1134
1135 QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int));
1136
1137 return q->first_to_check;
1138 }
1139
1140 static inline int
1141 qdio_has_inbound_q_moved(struct qdio_q *q)
1142 {
1143 int i;
1144
1145 #ifdef QDIO_PERFORMANCE_STATS
1146 static int old_pcis=0;
1147 static int old_thinints=0;
1148
1149 if ((old_pcis==perf_stats.pcis)&&(old_thinints==perf_stats.thinints))
1150 perf_stats.start_time_inbound=NOW;
1151 else
1152 old_pcis=perf_stats.pcis;
1153 #endif /* QDIO_PERFORMANCE_STATS */
1154
1155 i=qdio_get_inbound_buffer_frontier(q);
1156 if ( (i!=GET_SAVED_FRONTIER(q)) ||
1157 (q->error_status_flags&QDIO_STATUS_LOOK_FOR_ERROR) ) {
1158 SAVE_FRONTIER(q,i);
1159 if ((!q->siga_sync)&&(!q->hydra_gives_outbound_pcis))
1160 SAVE_TIMESTAMP(q);
1161
1162 QDIO_DBF_TEXT4(0,trace,"inhasmvd");
1163 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1164 return 1;
1165 } else {
1166 QDIO_DBF_TEXT4(0,trace,"inhsntmv");
1167 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1168 return 0;
1169 }
1170 }
1171
1172 /* means, no more buffers to be filled */
1173 static inline int
1174 tiqdio_is_inbound_q_done(struct qdio_q *q)
1175 {
1176 int no_used;
1177 unsigned int start_buf, count;
1178 unsigned char state = 0;
1179 struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
1180
1181 #ifdef CONFIG_QDIO_DEBUG
1182 char dbf_text[15];
1183 #endif
1184
1185 no_used=atomic_read(&q->number_of_buffers_used);
1186
1187 /* propagate the change from 82 to 80 through VM */
1188 SYNC_MEMORY;
1189
1190 #ifdef CONFIG_QDIO_DEBUG
1191 if (no_used) {
1192 sprintf(dbf_text,"iqisnt%02x",no_used);
1193 QDIO_DBF_TEXT4(0,trace,dbf_text);
1194 } else {
1195 QDIO_DBF_TEXT4(0,trace,"iniqisdo");
1196 }
1197 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1198 #endif /* CONFIG_QDIO_DEBUG */
1199
1200 if (!no_used)
1201 return 1;
1202 if (!q->siga_sync && !irq->is_qebsm)
1203 /* we'll check for more primed buffers in qeth_stop_polling */
1204 return 0;
1205 if (irq->is_qebsm) {
1206 count = 1;
1207 start_buf = q->first_to_check;
1208 qdio_do_eqbs(q, &state, &start_buf, &count);
1209 } else
1210 state = q->slsb.acc.val[q->first_to_check];
1211 if (state != SLSB_P_INPUT_PRIMED)
1212 /*
1213 * nothing more to do, if next buffer is not PRIMED.
1214 * note that we did a SYNC_MEMORY before, that there
1215 * has been a sychnronization.
1216 * we will return 0 below, as there is nothing to do
1217 * (stop_polling not necessary, as we have not been
1218 * using the PROCESSING state
1219 */
1220 return 0;
1221
1222 /*
1223 * ok, the next input buffer is primed. that means, that device state
1224 * change indicator and adapter local summary are set, so we will find
1225 * it next time.
1226 * we will return 0 below, as there is nothing to do, except scheduling
1227 * ourselves for the next time.
1228 */
1229 tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
1230 tiqdio_sched_tl();
1231 return 0;
1232 }
1233
1234 static inline int
1235 qdio_is_inbound_q_done(struct qdio_q *q)
1236 {
1237 int no_used;
1238 unsigned int start_buf, count;
1239 unsigned char state = 0;
1240 struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
1241
1242 #ifdef CONFIG_QDIO_DEBUG
1243 char dbf_text[15];
1244 #endif
1245
1246 no_used=atomic_read(&q->number_of_buffers_used);
1247
1248 /*
1249 * we need that one for synchronization with the adapter, as it
1250 * does a kind of PCI avoidance
1251 */
1252 SYNC_MEMORY;
1253
1254 if (!no_used) {
1255 QDIO_DBF_TEXT4(0,trace,"inqisdnA");
1256 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1257 QDIO_DBF_TEXT4(0,trace,dbf_text);
1258 return 1;
1259 }
1260 if (irq->is_qebsm) {
1261 count = 1;
1262 start_buf = q->first_to_check;
1263 qdio_do_eqbs(q, &state, &start_buf, &count);
1264 } else
1265 state = q->slsb.acc.val[q->first_to_check];
1266 if (state == SLSB_P_INPUT_PRIMED) {
1267 /* we got something to do */
1268 QDIO_DBF_TEXT4(0,trace,"inqisntA");
1269 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1270 return 0;
1271 }
1272
1273 /* on VM, we don't poll, so the q is always done here */
1274 if (q->siga_sync)
1275 return 1;
1276 if (q->hydra_gives_outbound_pcis)
1277 return 1;
1278
1279 /*
1280 * at this point we know, that inbound first_to_check
1281 * has (probably) not moved (see qdio_inbound_processing)
1282 */
1283 if (NOW>GET_SAVED_TIMESTAMP(q)+q->timing.threshold) {
1284 #ifdef CONFIG_QDIO_DEBUG
1285 QDIO_DBF_TEXT4(0,trace,"inqisdon");
1286 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1287 sprintf(dbf_text,"pf%02xcn%02x",q->first_to_check,no_used);
1288 QDIO_DBF_TEXT4(0,trace,dbf_text);
1289 #endif /* CONFIG_QDIO_DEBUG */
1290 return 1;
1291 } else {
1292 #ifdef CONFIG_QDIO_DEBUG
1293 QDIO_DBF_TEXT4(0,trace,"inqisntd");
1294 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1295 sprintf(dbf_text,"pf%02xcn%02x",q->first_to_check,no_used);
1296 QDIO_DBF_TEXT4(0,trace,dbf_text);
1297 #endif /* CONFIG_QDIO_DEBUG */
1298 return 0;
1299 }
1300 }
1301
1302 static inline void
1303 qdio_kick_inbound_handler(struct qdio_q *q)
1304 {
1305 int count, start, end, real_end, i;
1306 #ifdef CONFIG_QDIO_DEBUG
1307 char dbf_text[15];
1308 #endif
1309
1310 QDIO_DBF_TEXT4(0,trace,"kickinh");
1311 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1312
1313 start=q->first_element_to_kick;
1314 real_end=q->first_to_check;
1315 end=(real_end+QDIO_MAX_BUFFERS_PER_Q-1)&(QDIO_MAX_BUFFERS_PER_Q-1);
1316
1317 i=start;
1318 count=0;
1319 while (1) {
1320 count++;
1321 if (i==end)
1322 break;
1323 i=(i+1)&(QDIO_MAX_BUFFERS_PER_Q-1);
1324 }
1325
1326 #ifdef CONFIG_QDIO_DEBUG
1327 sprintf(dbf_text,"s=%2xc=%2x",start,count);
1328 QDIO_DBF_TEXT4(0,trace,dbf_text);
1329 #endif /* CONFIG_QDIO_DEBUG */
1330
1331 if (likely(q->state==QDIO_IRQ_STATE_ACTIVE))
1332 q->handler(q->cdev,
1333 QDIO_STATUS_INBOUND_INT|q->error_status_flags,
1334 q->qdio_error,q->siga_error,q->q_no,start,count,
1335 q->int_parm);
1336
1337 /* for the next time: */
1338 q->first_element_to_kick=real_end;
1339 q->qdio_error=0;
1340 q->siga_error=0;
1341 q->error_status_flags=0;
1342
1343 #ifdef QDIO_PERFORMANCE_STATS
1344 perf_stats.inbound_time+=NOW-perf_stats.start_time_inbound;
1345 perf_stats.inbound_cnt++;
1346 #endif /* QDIO_PERFORMANCE_STATS */
1347 }
1348
1349 static inline void
1350 __tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set)
1351 {
1352 struct qdio_irq *irq_ptr;
1353 struct qdio_q *oq;
1354 int i;
1355
1356 QDIO_DBF_TEXT4(0,trace,"iqinproc");
1357 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1358
1359 /*
1360 * we first want to reserve the q, so that we know, that we don't
1361 * interrupt ourselves and call qdio_unmark_q, as is_in_shutdown might
1362 * be set
1363 */
1364 if (unlikely(qdio_reserve_q(q))) {
1365 qdio_release_q(q);
1366 #ifdef QDIO_PERFORMANCE_STATS
1367 ii_p_c++;
1368 #endif /* QDIO_PERFORMANCE_STATS */
1369 /*
1370 * as we might just be about to stop polling, we make
1371 * sure that we check again at least once more
1372 */
1373 tiqdio_sched_tl();
1374 return;
1375 }
1376 #ifdef QDIO_PERFORMANCE_STATS
1377 ii_p_nc++;
1378 #endif /* QDIO_PERFORMANCE_STATS */
1379 if (unlikely(atomic_read(&q->is_in_shutdown))) {
1380 qdio_unmark_q(q);
1381 goto out;
1382 }
1383
1384 /*
1385 * we reset spare_ind_was_set, when the queue does not use the
1386 * spare indicator
1387 */
1388 if (spare_ind_was_set)
1389 spare_ind_was_set = (q->dev_st_chg_ind == &spare_indicator);
1390
1391 if (!(*(q->dev_st_chg_ind)) && !spare_ind_was_set)
1392 goto out;
1393 /*
1394 * q->dev_st_chg_ind is the indicator, be it shared or not.
1395 * only clear it, if indicator is non-shared
1396 */
1397 if (!spare_ind_was_set)
1398 tiqdio_clear_summary_bit((__u32*)q->dev_st_chg_ind);
1399
1400 if (q->hydra_gives_outbound_pcis) {
1401 if (!q->siga_sync_done_on_thinints) {
1402 SYNC_MEMORY_ALL;
1403 } else if ((!q->siga_sync_done_on_outb_tis)&&
1404 (q->hydra_gives_outbound_pcis)) {
1405 SYNC_MEMORY_ALL_OUTB;
1406 }
1407 } else {
1408 SYNC_MEMORY;
1409 }
1410 /*
1411 * maybe we have to do work on our outbound queues... at least
1412 * we have to check the outbound-int-capable thinint-capable
1413 * queues
1414 */
1415 if (q->hydra_gives_outbound_pcis) {
1416 irq_ptr = (struct qdio_irq*)q->irq_ptr;
1417 for (i=0;i<irq_ptr->no_output_qs;i++) {
1418 oq = irq_ptr->output_qs[i];
1419 #ifdef QDIO_PERFORMANCE_STATS
1420 perf_stats.tl_runs--;
1421 #endif /* QDIO_PERFORMANCE_STATS */
1422 if (!qdio_is_outbound_q_done(oq))
1423 __qdio_outbound_processing(oq);
1424 }
1425 }
1426
1427 if (!qdio_has_inbound_q_moved(q))
1428 goto out;
1429
1430 qdio_kick_inbound_handler(q);
1431 if (tiqdio_is_inbound_q_done(q))
1432 if (!qdio_stop_polling(q)) {
1433 /*
1434 * we set the flags to get into the stuff next time,
1435 * see also comment in qdio_stop_polling
1436 */
1437 tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
1438 tiqdio_sched_tl();
1439 }
1440 out:
1441 qdio_release_q(q);
1442 }
1443
1444 static void
1445 tiqdio_inbound_processing(struct qdio_q *q)
1446 {
1447 __tiqdio_inbound_processing(q, atomic_read(&spare_indicator_usecount));
1448 }
1449
1450 static inline void
1451 __qdio_inbound_processing(struct qdio_q *q)
1452 {
1453 int q_laps=0;
1454
1455 QDIO_DBF_TEXT4(0,trace,"qinproc");
1456 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1457
1458 if (unlikely(qdio_reserve_q(q))) {
1459 qdio_release_q(q);
1460 #ifdef QDIO_PERFORMANCE_STATS
1461 i_p_c++;
1462 #endif /* QDIO_PERFORMANCE_STATS */
1463 /* as we're sissies, we'll check next time */
1464 if (likely(!atomic_read(&q->is_in_shutdown))) {
1465 qdio_mark_q(q);
1466 QDIO_DBF_TEXT4(0,trace,"busy,agn");
1467 }
1468 return;
1469 }
1470 #ifdef QDIO_PERFORMANCE_STATS
1471 i_p_nc++;
1472 perf_stats.tl_runs++;
1473 #endif /* QDIO_PERFORMANCE_STATS */
1474
1475 again:
1476 if (qdio_has_inbound_q_moved(q)) {
1477 qdio_kick_inbound_handler(q);
1478 if (!qdio_stop_polling(q)) {
1479 q_laps++;
1480 if (q_laps<QDIO_Q_LAPS)
1481 goto again;
1482 }
1483 qdio_mark_q(q);
1484 } else {
1485 if (!qdio_is_inbound_q_done(q))
1486 /* means poll time is not yet over */
1487 qdio_mark_q(q);
1488 }
1489
1490 qdio_release_q(q);
1491 }
1492
1493 static void
1494 qdio_inbound_processing(struct qdio_q *q)
1495 {
1496 __qdio_inbound_processing(q);
1497 }
1498
1499 /************************* MAIN ROUTINES *******************************/
1500
1501 #ifdef QDIO_USE_PROCESSING_STATE
1502 static inline int
1503 tiqdio_reset_processing_state(struct qdio_q *q, int q_laps)
1504 {
1505 if (!q) {
1506 tiqdio_sched_tl();
1507 return 0;
1508 }
1509
1510 /*
1511 * under VM, we have not used the PROCESSING state, so no
1512 * need to stop polling
1513 */
1514 if (q->siga_sync)
1515 return 2;
1516
1517 if (unlikely(qdio_reserve_q(q))) {
1518 qdio_release_q(q);
1519 #ifdef QDIO_PERFORMANCE_STATS
1520 ii_p_c++;
1521 #endif /* QDIO_PERFORMANCE_STATS */
1522 /*
1523 * as we might just be about to stop polling, we make
1524 * sure that we check again at least once more
1525 */
1526
1527 /*
1528 * sanity -- we'd get here without setting the
1529 * dev st chg ind
1530 */
1531 tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
1532 tiqdio_sched_tl();
1533 return 0;
1534 }
1535 if (qdio_stop_polling(q)) {
1536 qdio_release_q(q);
1537 return 2;
1538 }
1539 if (q_laps<QDIO_Q_LAPS-1) {
1540 qdio_release_q(q);
1541 return 3;
1542 }
1543 /*
1544 * we set the flags to get into the stuff
1545 * next time, see also comment in qdio_stop_polling
1546 */
1547 tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
1548 tiqdio_sched_tl();
1549 qdio_release_q(q);
1550 return 1;
1551
1552 }
1553 #endif /* QDIO_USE_PROCESSING_STATE */
1554
1555 static inline void
1556 tiqdio_inbound_checks(void)
1557 {
1558 struct qdio_q *q;
1559 int spare_ind_was_set=0;
1560 #ifdef QDIO_USE_PROCESSING_STATE
1561 int q_laps=0;
1562 #endif /* QDIO_USE_PROCESSING_STATE */
1563
1564 QDIO_DBF_TEXT4(0,trace,"iqdinbck");
1565 QDIO_DBF_TEXT5(0,trace,"iqlocsum");
1566
1567 #ifdef QDIO_USE_PROCESSING_STATE
1568 again:
1569 #endif /* QDIO_USE_PROCESSING_STATE */
1570
1571 /* when the spare indicator is used and set, save that and clear it */
1572 if ((atomic_read(&spare_indicator_usecount)) && spare_indicator) {
1573 spare_ind_was_set = 1;
1574 tiqdio_clear_summary_bit((__u32*)&spare_indicator);
1575 }
1576
1577 q=(struct qdio_q*)tiq_list;
1578 do {
1579 if (!q)
1580 break;
1581 __tiqdio_inbound_processing(q, spare_ind_was_set);
1582 q=(struct qdio_q*)q->list_next;
1583 } while (q!=(struct qdio_q*)tiq_list);
1584
1585 #ifdef QDIO_USE_PROCESSING_STATE
1586 q=(struct qdio_q*)tiq_list;
1587 do {
1588 int ret;
1589
1590 ret = tiqdio_reset_processing_state(q, q_laps);
1591 switch (ret) {
1592 case 0:
1593 return;
1594 case 1:
1595 q_laps++;
1596 case 2:
1597 q = (struct qdio_q*)q->list_next;
1598 break;
1599 default:
1600 q_laps++;
1601 goto again;
1602 }
1603 } while (q!=(struct qdio_q*)tiq_list);
1604 #endif /* QDIO_USE_PROCESSING_STATE */
1605 }
1606
1607 static void
1608 tiqdio_tl(unsigned long data)
1609 {
1610 QDIO_DBF_TEXT4(0,trace,"iqdio_tl");
1611
1612 #ifdef QDIO_PERFORMANCE_STATS
1613 perf_stats.tl_runs++;
1614 #endif /* QDIO_PERFORMANCE_STATS */
1615
1616 tiqdio_inbound_checks();
1617 }
1618
1619 /********************* GENERAL HELPER_ROUTINES ***********************/
1620
1621 static void
1622 qdio_release_irq_memory(struct qdio_irq *irq_ptr)
1623 {
1624 int i;
1625
1626 for (i=0;i<QDIO_MAX_QUEUES_PER_IRQ;i++) {
1627 if (!irq_ptr->input_qs[i])
1628 goto next;
1629
1630 kfree(irq_ptr->input_qs[i]->slib);
1631 kfree(irq_ptr->input_qs[i]);
1632
1633 next:
1634 if (!irq_ptr->output_qs[i])
1635 continue;
1636
1637 kfree(irq_ptr->output_qs[i]->slib);
1638 kfree(irq_ptr->output_qs[i]);
1639
1640 }
1641 kfree(irq_ptr->qdr);
1642 free_page((unsigned long) irq_ptr);
1643 }
1644
1645 static void
1646 qdio_set_impl_params(struct qdio_irq *irq_ptr,
1647 unsigned int qib_param_field_format,
1648 /* pointer to 128 bytes or NULL, if no param field */
1649 unsigned char *qib_param_field,
1650 /* pointer to no_queues*128 words of data or NULL */
1651 unsigned int no_input_qs,
1652 unsigned int no_output_qs,
1653 unsigned long *input_slib_elements,
1654 unsigned long *output_slib_elements)
1655 {
1656 int i,j;
1657
1658 if (!irq_ptr)
1659 return;
1660
1661 irq_ptr->qib.pfmt=qib_param_field_format;
1662 if (qib_param_field)
1663 memcpy(irq_ptr->qib.parm,qib_param_field,
1664 QDIO_MAX_BUFFERS_PER_Q);
1665
1666 if (input_slib_elements)
1667 for (i=0;i<no_input_qs;i++) {
1668 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
1669 irq_ptr->input_qs[i]->slib->slibe[j].parms=
1670 input_slib_elements[
1671 i*QDIO_MAX_BUFFERS_PER_Q+j];
1672 }
1673 if (output_slib_elements)
1674 for (i=0;i<no_output_qs;i++) {
1675 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
1676 irq_ptr->output_qs[i]->slib->slibe[j].parms=
1677 output_slib_elements[
1678 i*QDIO_MAX_BUFFERS_PER_Q+j];
1679 }
1680 }
1681
1682 static int
1683 qdio_alloc_qs(struct qdio_irq *irq_ptr,
1684 int no_input_qs, int no_output_qs)
1685 {
1686 int i;
1687 struct qdio_q *q;
1688 int result=-ENOMEM;
1689
1690 for (i=0;i<no_input_qs;i++) {
1691 q = kzalloc(sizeof(struct qdio_q), GFP_KERNEL);
1692
1693 if (!q) {
1694 QDIO_PRINT_ERR("kmalloc of q failed!\n");
1695 goto out;
1696 }
1697
1698 q->slib = kmalloc(PAGE_SIZE, GFP_KERNEL);
1699 if (!q->slib) {
1700 QDIO_PRINT_ERR("kmalloc of slib failed!\n");
1701 goto out;
1702 }
1703
1704 irq_ptr->input_qs[i]=q;
1705 }
1706
1707 for (i=0;i<no_output_qs;i++) {
1708 q = kzalloc(sizeof(struct qdio_q), GFP_KERNEL);
1709
1710 if (!q) {
1711 goto out;
1712 }
1713
1714 q->slib=kmalloc(PAGE_SIZE,GFP_KERNEL);
1715 if (!q->slib) {
1716 QDIO_PRINT_ERR("kmalloc of slib failed!\n");
1717 goto out;
1718 }
1719
1720 irq_ptr->output_qs[i]=q;
1721 }
1722
1723 result=0;
1724 out:
1725 return result;
1726 }
1727
1728 static void
1729 qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev,
1730 int no_input_qs, int no_output_qs,
1731 qdio_handler_t *input_handler,
1732 qdio_handler_t *output_handler,
1733 unsigned long int_parm,int q_format,
1734 unsigned long flags,
1735 void **inbound_sbals_array,
1736 void **outbound_sbals_array)
1737 {
1738 struct qdio_q *q;
1739 int i,j;
1740 char dbf_text[20]; /* see qdio_initialize */
1741 void *ptr;
1742 int available;
1743
1744 sprintf(dbf_text,"qfqs%4x",cdev->private->sch_no);
1745 QDIO_DBF_TEXT0(0,setup,dbf_text);
1746 for (i=0;i<no_input_qs;i++) {
1747 q=irq_ptr->input_qs[i];
1748
1749 memset(q,0,((char*)&q->slib)-((char*)q));
1750 sprintf(dbf_text,"in-q%4x",i);
1751 QDIO_DBF_TEXT0(0,setup,dbf_text);
1752 QDIO_DBF_HEX0(0,setup,&q,sizeof(void*));
1753
1754 memset(q->slib,0,PAGE_SIZE);
1755 q->sl=(struct sl*)(((char*)q->slib)+PAGE_SIZE/2);
1756
1757 available=0;
1758
1759 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
1760 q->sbal[j]=*(inbound_sbals_array++);
1761
1762 q->queue_type=q_format;
1763 q->int_parm=int_parm;
1764 q->schid = irq_ptr->schid;
1765 q->irq_ptr = irq_ptr;
1766 q->cdev = cdev;
1767 q->mask=1<<(31-i);
1768 q->q_no=i;
1769 q->is_input_q=1;
1770 q->first_to_check=0;
1771 q->last_move_ftc=0;
1772 q->handler=input_handler;
1773 q->dev_st_chg_ind=irq_ptr->dev_st_chg_ind;
1774
1775 q->tasklet.data=(unsigned long)q;
1776 /* q->is_thinint_q isn't valid at this time, but
1777 * irq_ptr->is_thinint_irq is */
1778 q->tasklet.func=(void(*)(unsigned long))
1779 ((irq_ptr->is_thinint_irq)?&tiqdio_inbound_processing:
1780 &qdio_inbound_processing);
1781
1782 /* actually this is not used for inbound queues. yet. */
1783 atomic_set(&q->busy_siga_counter,0);
1784 q->timing.busy_start=0;
1785
1786 /* for (j=0;j<QDIO_STATS_NUMBER;j++)
1787 q->timing.last_transfer_times[j]=(qdio_get_micros()/
1788 QDIO_STATS_NUMBER)*j;
1789 q->timing.last_transfer_index=QDIO_STATS_NUMBER-1;
1790 */
1791
1792 /* fill in slib */
1793 if (i>0) irq_ptr->input_qs[i-1]->slib->nsliba=
1794 (unsigned long)(q->slib);
1795 q->slib->sla=(unsigned long)(q->sl);
1796 q->slib->slsba=(unsigned long)(&q->slsb.acc.val[0]);
1797
1798 /* fill in sl */
1799 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
1800 q->sl->element[j].sbal=(unsigned long)(q->sbal[j]);
1801
1802 QDIO_DBF_TEXT2(0,setup,"sl-sb-b0");
1803 ptr=(void*)q->sl;
1804 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1805 ptr=(void*)&q->slsb;
1806 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1807 ptr=(void*)q->sbal[0];
1808 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1809
1810 /* fill in slsb */
1811 if (!irq_ptr->is_qebsm) {
1812 unsigned int count = 1;
1813 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
1814 set_slsb(q, &j, SLSB_P_INPUT_NOT_INIT, &count);
1815 }
1816 }
1817
1818 for (i=0;i<no_output_qs;i++) {
1819 q=irq_ptr->output_qs[i];
1820 memset(q,0,((char*)&q->slib)-((char*)q));
1821
1822 sprintf(dbf_text,"outq%4x",i);
1823 QDIO_DBF_TEXT0(0,setup,dbf_text);
1824 QDIO_DBF_HEX0(0,setup,&q,sizeof(void*));
1825
1826 memset(q->slib,0,PAGE_SIZE);
1827 q->sl=(struct sl*)(((char*)q->slib)+PAGE_SIZE/2);
1828
1829 available=0;
1830
1831 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
1832 q->sbal[j]=*(outbound_sbals_array++);
1833
1834 q->queue_type=q_format;
1835 q->int_parm=int_parm;
1836 q->is_input_q=0;
1837 q->schid = irq_ptr->schid;
1838 q->cdev = cdev;
1839 q->irq_ptr = irq_ptr;
1840 q->mask=1<<(31-i);
1841 q->q_no=i;
1842 q->first_to_check=0;
1843 q->last_move_ftc=0;
1844 q->handler=output_handler;
1845
1846 q->tasklet.data=(unsigned long)q;
1847 q->tasklet.func=(void(*)(unsigned long))
1848 &qdio_outbound_processing;
1849
1850 atomic_set(&q->busy_siga_counter,0);
1851 q->timing.busy_start=0;
1852
1853 /* fill in slib */
1854 if (i>0) irq_ptr->output_qs[i-1]->slib->nsliba=
1855 (unsigned long)(q->slib);
1856 q->slib->sla=(unsigned long)(q->sl);
1857 q->slib->slsba=(unsigned long)(&q->slsb.acc.val[0]);
1858
1859 /* fill in sl */
1860 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
1861 q->sl->element[j].sbal=(unsigned long)(q->sbal[j]);
1862
1863 QDIO_DBF_TEXT2(0,setup,"sl-sb-b0");
1864 ptr=(void*)q->sl;
1865 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1866 ptr=(void*)&q->slsb;
1867 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1868 ptr=(void*)q->sbal[0];
1869 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1870
1871 /* fill in slsb */
1872 if (!irq_ptr->is_qebsm) {
1873 unsigned int count = 1;
1874 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
1875 set_slsb(q, &j, SLSB_P_OUTPUT_NOT_INIT, &count);
1876 }
1877 }
1878 }
1879
1880 static void
1881 qdio_fill_thresholds(struct qdio_irq *irq_ptr,
1882 unsigned int no_input_qs,
1883 unsigned int no_output_qs,
1884 unsigned int min_input_threshold,
1885 unsigned int max_input_threshold,
1886 unsigned int min_output_threshold,
1887 unsigned int max_output_threshold)
1888 {
1889 int i;
1890 struct qdio_q *q;
1891
1892 for (i=0;i<no_input_qs;i++) {
1893 q=irq_ptr->input_qs[i];
1894 q->timing.threshold=max_input_threshold;
1895 /* for (j=0;j<QDIO_STATS_CLASSES;j++) {
1896 q->threshold_classes[j].threshold=
1897 min_input_threshold+
1898 (max_input_threshold-min_input_threshold)/
1899 QDIO_STATS_CLASSES;
1900 }
1901 qdio_use_thresholds(q,QDIO_STATS_CLASSES/2);*/
1902 }
1903 for (i=0;i<no_output_qs;i++) {
1904 q=irq_ptr->output_qs[i];
1905 q->timing.threshold=max_output_threshold;
1906 /* for (j=0;j<QDIO_STATS_CLASSES;j++) {
1907 q->threshold_classes[j].threshold=
1908 min_output_threshold+
1909 (max_output_threshold-min_output_threshold)/
1910 QDIO_STATS_CLASSES;
1911 }
1912 qdio_use_thresholds(q,QDIO_STATS_CLASSES/2);*/
1913 }
1914 }
1915
1916 static int
1917 tiqdio_thinint_handler(void)
1918 {
1919 QDIO_DBF_TEXT4(0,trace,"thin_int");
1920
1921 #ifdef QDIO_PERFORMANCE_STATS
1922 perf_stats.thinints++;
1923 perf_stats.start_time_inbound=NOW;
1924 #endif /* QDIO_PERFORMANCE_STATS */
1925
1926 /* SVS only when needed:
1927 * issue SVS to benefit from iqdio interrupt avoidance
1928 * (SVS clears AISOI)*/
1929 if (!omit_svs)
1930 tiqdio_clear_global_summary();
1931
1932 tiqdio_inbound_checks();
1933 return 0;
1934 }
1935
1936 static void
1937 qdio_set_state(struct qdio_irq *irq_ptr, enum qdio_irq_states state)
1938 {
1939 int i;
1940 #ifdef CONFIG_QDIO_DEBUG
1941 char dbf_text[15];
1942
1943 QDIO_DBF_TEXT5(0,trace,"newstate");
1944 sprintf(dbf_text,"%4x%4x",irq_ptr->schid.sch_no,state);
1945 QDIO_DBF_TEXT5(0,trace,dbf_text);
1946 #endif /* CONFIG_QDIO_DEBUG */
1947
1948 irq_ptr->state=state;
1949 for (i=0;i<irq_ptr->no_input_qs;i++)
1950 irq_ptr->input_qs[i]->state=state;
1951 for (i=0;i<irq_ptr->no_output_qs;i++)
1952 irq_ptr->output_qs[i]->state=state;
1953 mb();
1954 }
1955
1956 static inline void
1957 qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb)
1958 {
1959 char dbf_text[15];
1960
1961 if (irb->esw.esw0.erw.cons) {
1962 sprintf(dbf_text,"sens%4x",schid.sch_no);
1963 QDIO_DBF_TEXT2(1,trace,dbf_text);
1964 QDIO_DBF_HEX0(0,sense,irb,QDIO_DBF_SENSE_LEN);
1965
1966 QDIO_PRINT_WARN("sense data available on qdio channel.\n");
1967 HEXDUMP16(WARN,"irb: ",irb);
1968 HEXDUMP16(WARN,"sense data: ",irb->ecw);
1969 }
1970
1971 }
1972
1973 static inline void
1974 qdio_handle_pci(struct qdio_irq *irq_ptr)
1975 {
1976 int i;
1977 struct qdio_q *q;
1978
1979 #ifdef QDIO_PERFORMANCE_STATS
1980 perf_stats.pcis++;
1981 perf_stats.start_time_inbound=NOW;
1982 #endif /* QDIO_PERFORMANCE_STATS */
1983 for (i=0;i<irq_ptr->no_input_qs;i++) {
1984 q=irq_ptr->input_qs[i];
1985 if (q->is_input_q&QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT)
1986 qdio_mark_q(q);
1987 else {
1988 #ifdef QDIO_PERFORMANCE_STATS
1989 perf_stats.tl_runs--;
1990 #endif /* QDIO_PERFORMANCE_STATS */
1991 __qdio_inbound_processing(q);
1992 }
1993 }
1994 if (!irq_ptr->hydra_gives_outbound_pcis)
1995 return;
1996 for (i=0;i<irq_ptr->no_output_qs;i++) {
1997 q=irq_ptr->output_qs[i];
1998 #ifdef QDIO_PERFORMANCE_STATS
1999 perf_stats.tl_runs--;
2000 #endif /* QDIO_PERFORMANCE_STATS */
2001 if (qdio_is_outbound_q_done(q))
2002 continue;
2003 if (!irq_ptr->sync_done_on_outb_pcis)
2004 SYNC_MEMORY;
2005 __qdio_outbound_processing(q);
2006 }
2007 }
2008
2009 static void qdio_establish_handle_irq(struct ccw_device*, int, int);
2010
2011 static inline void
2012 qdio_handle_activate_check(struct ccw_device *cdev, unsigned long intparm,
2013 int cstat, int dstat)
2014 {
2015 struct qdio_irq *irq_ptr;
2016 struct qdio_q *q;
2017 char dbf_text[15];
2018
2019 irq_ptr = cdev->private->qdio_data;
2020
2021 QDIO_DBF_TEXT2(1, trace, "ick2");
2022 sprintf(dbf_text,"%s", cdev->dev.bus_id);
2023 QDIO_DBF_TEXT2(1,trace,dbf_text);
2024 QDIO_DBF_HEX2(0,trace,&intparm,sizeof(int));
2025 QDIO_DBF_HEX2(0,trace,&dstat,sizeof(int));
2026 QDIO_DBF_HEX2(0,trace,&cstat,sizeof(int));
2027 QDIO_PRINT_ERR("received check condition on activate " \
2028 "queues on device %s (cs=x%x, ds=x%x).\n",
2029 cdev->dev.bus_id, cstat, dstat);
2030 if (irq_ptr->no_input_qs) {
2031 q=irq_ptr->input_qs[0];
2032 } else if (irq_ptr->no_output_qs) {
2033 q=irq_ptr->output_qs[0];
2034 } else {
2035 QDIO_PRINT_ERR("oops... no queue registered for device %s!?\n",
2036 cdev->dev.bus_id);
2037 goto omit_handler_call;
2038 }
2039 q->handler(q->cdev,QDIO_STATUS_ACTIVATE_CHECK_CONDITION|
2040 QDIO_STATUS_LOOK_FOR_ERROR,
2041 0,0,0,-1,-1,q->int_parm);
2042 omit_handler_call:
2043 qdio_set_state(irq_ptr,QDIO_IRQ_STATE_STOPPED);
2044
2045 }
2046
2047 static void
2048 qdio_call_shutdown(void *data)
2049 {
2050 struct ccw_device *cdev;
2051
2052 cdev = (struct ccw_device *)data;
2053 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
2054 put_device(&cdev->dev);
2055 }
2056
2057 static void
2058 qdio_timeout_handler(struct ccw_device *cdev)
2059 {
2060 struct qdio_irq *irq_ptr;
2061 char dbf_text[15];
2062
2063 QDIO_DBF_TEXT2(0, trace, "qtoh");
2064 sprintf(dbf_text, "%s", cdev->dev.bus_id);
2065 QDIO_DBF_TEXT2(0, trace, dbf_text);
2066
2067 irq_ptr = cdev->private->qdio_data;
2068 sprintf(dbf_text, "state:%d", irq_ptr->state);
2069 QDIO_DBF_TEXT2(0, trace, dbf_text);
2070
2071 switch (irq_ptr->state) {
2072 case QDIO_IRQ_STATE_INACTIVE:
2073 QDIO_PRINT_ERR("establish queues on irq 0.%x.%04x: timed out\n",
2074 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2075 QDIO_DBF_TEXT2(1,setup,"eq:timeo");
2076 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
2077 break;
2078 case QDIO_IRQ_STATE_CLEANUP:
2079 QDIO_PRINT_INFO("Did not get interrupt on cleanup, "
2080 "irq=0.%x.%x.\n",
2081 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2082 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
2083 break;
2084 case QDIO_IRQ_STATE_ESTABLISHED:
2085 case QDIO_IRQ_STATE_ACTIVE:
2086 /* I/O has been terminated by common I/O layer. */
2087 QDIO_PRINT_INFO("Queues on irq 0.%x.%04x killed by cio.\n",
2088 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2089 QDIO_DBF_TEXT2(1, trace, "cio:term");
2090 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
2091 if (get_device(&cdev->dev)) {
2092 /* Can't call shutdown from interrupt context. */
2093 PREPARE_WORK(&cdev->private->kick_work,
2094 qdio_call_shutdown, (void *)cdev);
2095 queue_work(ccw_device_work, &cdev->private->kick_work);
2096 }
2097 break;
2098 default:
2099 BUG();
2100 }
2101 ccw_device_set_timeout(cdev, 0);
2102 wake_up(&cdev->private->wait_q);
2103 }
2104
2105 static void
2106 qdio_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
2107 {
2108 struct qdio_irq *irq_ptr;
2109 int cstat,dstat;
2110 char dbf_text[15];
2111
2112 #ifdef CONFIG_QDIO_DEBUG
2113 QDIO_DBF_TEXT4(0, trace, "qint");
2114 sprintf(dbf_text, "%s", cdev->dev.bus_id);
2115 QDIO_DBF_TEXT4(0, trace, dbf_text);
2116 #endif /* CONFIG_QDIO_DEBUG */
2117
2118 if (!intparm) {
2119 QDIO_PRINT_ERR("got unsolicited interrupt in qdio " \
2120 "handler, device %s\n", cdev->dev.bus_id);
2121 return;
2122 }
2123
2124 irq_ptr = cdev->private->qdio_data;
2125 if (!irq_ptr) {
2126 QDIO_DBF_TEXT2(1, trace, "uint");
2127 sprintf(dbf_text,"%s", cdev->dev.bus_id);
2128 QDIO_DBF_TEXT2(1,trace,dbf_text);
2129 QDIO_PRINT_ERR("received interrupt on unused device %s!\n",
2130 cdev->dev.bus_id);
2131 return;
2132 }
2133
2134 if (IS_ERR(irb)) {
2135 /* Currently running i/o is in error. */
2136 switch (PTR_ERR(irb)) {
2137 case -EIO:
2138 QDIO_PRINT_ERR("i/o error on device %s\n",
2139 cdev->dev.bus_id);
2140 return;
2141 case -ETIMEDOUT:
2142 qdio_timeout_handler(cdev);
2143 return;
2144 default:
2145 QDIO_PRINT_ERR("unknown error state %ld on device %s\n",
2146 PTR_ERR(irb), cdev->dev.bus_id);
2147 return;
2148 }
2149 }
2150
2151 qdio_irq_check_sense(irq_ptr->schid, irb);
2152
2153 #ifdef CONFIG_QDIO_DEBUG
2154 sprintf(dbf_text, "state:%d", irq_ptr->state);
2155 QDIO_DBF_TEXT4(0, trace, dbf_text);
2156 #endif /* CONFIG_QDIO_DEBUG */
2157
2158 cstat = irb->scsw.cstat;
2159 dstat = irb->scsw.dstat;
2160
2161 switch (irq_ptr->state) {
2162 case QDIO_IRQ_STATE_INACTIVE:
2163 qdio_establish_handle_irq(cdev, cstat, dstat);
2164 break;
2165
2166 case QDIO_IRQ_STATE_CLEANUP:
2167 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
2168 break;
2169
2170 case QDIO_IRQ_STATE_ESTABLISHED:
2171 case QDIO_IRQ_STATE_ACTIVE:
2172 if (cstat & SCHN_STAT_PCI) {
2173 qdio_handle_pci(irq_ptr);
2174 break;
2175 }
2176
2177 if ((cstat&~SCHN_STAT_PCI)||dstat) {
2178 qdio_handle_activate_check(cdev, intparm, cstat, dstat);
2179 break;
2180 }
2181 default:
2182 QDIO_PRINT_ERR("got interrupt for queues in state %d on " \
2183 "device %s?!\n",
2184 irq_ptr->state, cdev->dev.bus_id);
2185 }
2186 wake_up(&cdev->private->wait_q);
2187
2188 }
2189
2190 int
2191 qdio_synchronize(struct ccw_device *cdev, unsigned int flags,
2192 unsigned int queue_number)
2193 {
2194 int cc = 0;
2195 struct qdio_q *q;
2196 struct qdio_irq *irq_ptr;
2197 void *ptr;
2198 #ifdef CONFIG_QDIO_DEBUG
2199 char dbf_text[15]="SyncXXXX";
2200 #endif
2201
2202 irq_ptr = cdev->private->qdio_data;
2203 if (!irq_ptr)
2204 return -ENODEV;
2205
2206 #ifdef CONFIG_QDIO_DEBUG
2207 *((int*)(&dbf_text[4])) = irq_ptr->schid.sch_no;
2208 QDIO_DBF_HEX4(0,trace,dbf_text,QDIO_DBF_TRACE_LEN);
2209 *((int*)(&dbf_text[0]))=flags;
2210 *((int*)(&dbf_text[4]))=queue_number;
2211 QDIO_DBF_HEX4(0,trace,dbf_text,QDIO_DBF_TRACE_LEN);
2212 #endif /* CONFIG_QDIO_DEBUG */
2213
2214 if (flags&QDIO_FLAG_SYNC_INPUT) {
2215 q=irq_ptr->input_qs[queue_number];
2216 if (!q)
2217 return -EINVAL;
2218 if (!(irq_ptr->is_qebsm))
2219 cc = do_siga_sync(q->schid, 0, q->mask);
2220 } else if (flags&QDIO_FLAG_SYNC_OUTPUT) {
2221 q=irq_ptr->output_qs[queue_number];
2222 if (!q)
2223 return -EINVAL;
2224 if (!(irq_ptr->is_qebsm))
2225 cc = do_siga_sync(q->schid, q->mask, 0);
2226 } else
2227 return -EINVAL;
2228
2229 ptr=&cc;
2230 if (cc)
2231 QDIO_DBF_HEX3(0,trace,&ptr,sizeof(int));
2232
2233 return cc;
2234 }
2235
2236 static inline void
2237 qdio_check_subchannel_qebsm(struct qdio_irq *irq_ptr, unsigned char qdioac,
2238 unsigned long token)
2239 {
2240 struct qdio_q *q;
2241 int i;
2242 unsigned int count, start_buf;
2243 char dbf_text[15];
2244
2245 /*check if QEBSM is disabled */
2246 if (!(irq_ptr->is_qebsm) || !(qdioac & 0x01)) {
2247 irq_ptr->is_qebsm = 0;
2248 irq_ptr->sch_token = 0;
2249 irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM;
2250 QDIO_DBF_TEXT0(0,setup,"noV=V");
2251 return;
2252 }
2253 irq_ptr->sch_token = token;
2254 /*input queue*/
2255 for (i = 0; i < irq_ptr->no_input_qs;i++) {
2256 q = irq_ptr->input_qs[i];
2257 count = QDIO_MAX_BUFFERS_PER_Q;
2258 start_buf = 0;
2259 set_slsb(q, &start_buf, SLSB_P_INPUT_NOT_INIT, &count);
2260 }
2261 sprintf(dbf_text,"V=V:%2x",irq_ptr->is_qebsm);
2262 QDIO_DBF_TEXT0(0,setup,dbf_text);
2263 sprintf(dbf_text,"%8lx",irq_ptr->sch_token);
2264 QDIO_DBF_TEXT0(0,setup,dbf_text);
2265 /*output queue*/
2266 for (i = 0; i < irq_ptr->no_output_qs; i++) {
2267 q = irq_ptr->output_qs[i];
2268 count = QDIO_MAX_BUFFERS_PER_Q;
2269 start_buf = 0;
2270 set_slsb(q, &start_buf, SLSB_P_OUTPUT_NOT_INIT, &count);
2271 }
2272 }
2273
2274 static void
2275 qdio_get_ssqd_information(struct qdio_irq *irq_ptr)
2276 {
2277 int result;
2278 unsigned char qdioac;
2279 struct {
2280 struct chsc_header request;
2281 u16 reserved1:10;
2282 u16 ssid:2;
2283 u16 fmt:4;
2284 u16 first_sch;
2285 u16 reserved2;
2286 u16 last_sch;
2287 u32 reserved3;
2288 struct chsc_header response;
2289 u32 reserved4;
2290 u8 flags;
2291 u8 reserved5;
2292 u16 sch;
2293 u8 qfmt;
2294 u8 parm;
2295 u8 qdioac1;
2296 u8 sch_class;
2297 u8 reserved7;
2298 u8 icnt;
2299 u8 reserved8;
2300 u8 ocnt;
2301 u8 reserved9;
2302 u8 mbccnt;
2303 u16 qdioac2;
2304 u64 sch_token;
2305 } *ssqd_area;
2306
2307 QDIO_DBF_TEXT0(0,setup,"getssqd");
2308 qdioac = 0;
2309 ssqd_area = mempool_alloc(qdio_mempool_scssc, GFP_ATOMIC);
2310 if (!ssqd_area) {
2311 QDIO_PRINT_WARN("Could not get memory for chsc. Using all " \
2312 "SIGAs for sch x%x.\n", irq_ptr->schid.sch_no);
2313 irq_ptr->qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY ||
2314 CHSC_FLAG_SIGA_OUTPUT_NECESSARY ||
2315 CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
2316 irq_ptr->is_qebsm = 0;
2317 irq_ptr->sch_token = 0;
2318 irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM;
2319 return;
2320 }
2321
2322 ssqd_area->request = (struct chsc_header) {
2323 .length = 0x0010,
2324 .code = 0x0024,
2325 };
2326 ssqd_area->first_sch = irq_ptr->schid.sch_no;
2327 ssqd_area->last_sch = irq_ptr->schid.sch_no;
2328 ssqd_area->ssid = irq_ptr->schid.ssid;
2329 result = chsc(ssqd_area);
2330
2331 if (result) {
2332 QDIO_PRINT_WARN("CHSC returned cc %i. Using all " \
2333 "SIGAs for sch 0.%x.%x.\n", result,
2334 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2335 qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY ||
2336 CHSC_FLAG_SIGA_OUTPUT_NECESSARY ||
2337 CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
2338 irq_ptr->is_qebsm = 0;
2339 goto out;
2340 }
2341
2342 if (ssqd_area->response.code != QDIO_CHSC_RESPONSE_CODE_OK) {
2343 QDIO_PRINT_WARN("response upon checking SIGA needs " \
2344 "is 0x%x. Using all SIGAs for sch 0.%x.%x.\n",
2345 ssqd_area->response.code,
2346 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2347 qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY ||
2348 CHSC_FLAG_SIGA_OUTPUT_NECESSARY ||
2349 CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
2350 irq_ptr->is_qebsm = 0;
2351 goto out;
2352 }
2353 if (!(ssqd_area->flags & CHSC_FLAG_QDIO_CAPABILITY) ||
2354 !(ssqd_area->flags & CHSC_FLAG_VALIDITY) ||
2355 (ssqd_area->sch != irq_ptr->schid.sch_no)) {
2356 QDIO_PRINT_WARN("huh? problems checking out sch 0.%x.%x... " \
2357 "using all SIGAs.\n",
2358 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2359 qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY |
2360 CHSC_FLAG_SIGA_OUTPUT_NECESSARY |
2361 CHSC_FLAG_SIGA_SYNC_NECESSARY; /* worst case */
2362 irq_ptr->is_qebsm = 0;
2363 goto out;
2364 }
2365 qdioac = ssqd_area->qdioac1;
2366 out:
2367 qdio_check_subchannel_qebsm(irq_ptr, qdioac,
2368 ssqd_area->sch_token);
2369 mempool_free(ssqd_area, qdio_mempool_scssc);
2370 irq_ptr->qdioac = qdioac;
2371 }
2372
2373 static unsigned int
2374 tiqdio_check_chsc_availability(void)
2375 {
2376 char dbf_text[15];
2377
2378 if (!css_characteristics_avail)
2379 return -EIO;
2380
2381 /* Check for bit 41. */
2382 if (!css_general_characteristics.aif) {
2383 QDIO_PRINT_WARN("Adapter interruption facility not " \
2384 "installed.\n");
2385 return -ENOENT;
2386 }
2387
2388 /* Check for bits 107 and 108. */
2389 if (!css_chsc_characteristics.scssc ||
2390 !css_chsc_characteristics.scsscf) {
2391 QDIO_PRINT_WARN("Set Chan Subsys. Char. & Fast-CHSCs " \
2392 "not available.\n");
2393 return -ENOENT;
2394 }
2395
2396 /* Check for OSA/FCP thin interrupts (bit 67). */
2397 hydra_thinints = css_general_characteristics.aif_osa;
2398 sprintf(dbf_text,"hydrati%1x", hydra_thinints);
2399 QDIO_DBF_TEXT0(0,setup,dbf_text);
2400
2401 #ifdef CONFIG_64BIT
2402 /* Check for QEBSM support in general (bit 58). */
2403 is_passthrough = css_general_characteristics.qebsm;
2404 #endif
2405 sprintf(dbf_text,"cssQBS:%1x", is_passthrough);
2406 QDIO_DBF_TEXT0(0,setup,dbf_text);
2407
2408 /* Check for aif time delay disablement fac (bit 56). If installed,
2409 * omit svs even under lpar (good point by rick again) */
2410 omit_svs = css_general_characteristics.aif_tdd;
2411 sprintf(dbf_text,"omitsvs%1x", omit_svs);
2412 QDIO_DBF_TEXT0(0,setup,dbf_text);
2413 return 0;
2414 }
2415
2416
2417 static unsigned int
2418 tiqdio_set_subchannel_ind(struct qdio_irq *irq_ptr, int reset_to_zero)
2419 {
2420 unsigned long real_addr_local_summary_bit;
2421 unsigned long real_addr_dev_st_chg_ind;
2422 void *ptr;
2423 char dbf_text[15];
2424
2425 unsigned int resp_code;
2426 int result;
2427
2428 struct {
2429 struct chsc_header request;
2430 u16 operation_code;
2431 u16 reserved1;
2432 u32 reserved2;
2433 u32 reserved3;
2434 u64 summary_indicator_addr;
2435 u64 subchannel_indicator_addr;
2436 u32 ks:4;
2437 u32 kc:4;
2438 u32 reserved4:21;
2439 u32 isc:3;
2440 u32 word_with_d_bit;
2441 /* set to 0x10000000 to enable
2442 * time delay disablement facility */
2443 u32 reserved5;
2444 struct subchannel_id schid;
2445 u32 reserved6[1004];
2446 struct chsc_header response;
2447 u32 reserved7;
2448 } *scssc_area;
2449
2450 if (!irq_ptr->is_thinint_irq)
2451 return -ENODEV;
2452
2453 if (reset_to_zero) {
2454 real_addr_local_summary_bit=0;
2455 real_addr_dev_st_chg_ind=0;
2456 } else {
2457 real_addr_local_summary_bit=
2458 virt_to_phys((volatile void *)indicators);
2459 real_addr_dev_st_chg_ind=
2460 virt_to_phys((volatile void *)irq_ptr->dev_st_chg_ind);
2461 }
2462
2463 scssc_area = mempool_alloc(qdio_mempool_scssc, GFP_ATOMIC);
2464 if (!scssc_area) {
2465 QDIO_PRINT_WARN("No memory for setting indicators on " \
2466 "subchannel 0.%x.%x.\n",
2467 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2468 return -ENOMEM;
2469 }
2470 scssc_area->request = (struct chsc_header) {
2471 .length = 0x0fe0,
2472 .code = 0x0021,
2473 };
2474 scssc_area->operation_code = 0;
2475
2476 scssc_area->summary_indicator_addr = real_addr_local_summary_bit;
2477 scssc_area->subchannel_indicator_addr = real_addr_dev_st_chg_ind;
2478 scssc_area->ks = QDIO_STORAGE_KEY;
2479 scssc_area->kc = QDIO_STORAGE_KEY;
2480 scssc_area->isc = TIQDIO_THININT_ISC;
2481 scssc_area->schid = irq_ptr->schid;
2482 /* enables the time delay disablement facility. Don't care
2483 * whether it is really there (i.e. we haven't checked for
2484 * it) */
2485 if (css_general_characteristics.aif_tdd)
2486 scssc_area->word_with_d_bit = 0x10000000;
2487 else
2488 QDIO_PRINT_WARN("Time delay disablement facility " \
2489 "not available\n");
2490
2491 result = chsc(scssc_area);
2492 if (result) {
2493 QDIO_PRINT_WARN("could not set indicators on irq 0.%x.%x, " \
2494 "cc=%i.\n",
2495 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,result);
2496 result = -EIO;
2497 goto out;
2498 }
2499
2500 resp_code = scssc_area->response.code;
2501 if (resp_code!=QDIO_CHSC_RESPONSE_CODE_OK) {
2502 QDIO_PRINT_WARN("response upon setting indicators " \
2503 "is 0x%x.\n",resp_code);
2504 sprintf(dbf_text,"sidR%4x",resp_code);
2505 QDIO_DBF_TEXT1(0,trace,dbf_text);
2506 QDIO_DBF_TEXT1(0,setup,dbf_text);
2507 ptr=&scssc_area->response;
2508 QDIO_DBF_HEX2(1,setup,&ptr,QDIO_DBF_SETUP_LEN);
2509 result = -EIO;
2510 goto out;
2511 }
2512
2513 QDIO_DBF_TEXT2(0,setup,"setscind");
2514 QDIO_DBF_HEX2(0,setup,&real_addr_local_summary_bit,
2515 sizeof(unsigned long));
2516 QDIO_DBF_HEX2(0,setup,&real_addr_dev_st_chg_ind,sizeof(unsigned long));
2517 result = 0;
2518 out:
2519 mempool_free(scssc_area, qdio_mempool_scssc);
2520 return result;
2521
2522 }
2523
2524 static unsigned int
2525 tiqdio_set_delay_target(struct qdio_irq *irq_ptr, unsigned long delay_target)
2526 {
2527 unsigned int resp_code;
2528 int result;
2529 void *ptr;
2530 char dbf_text[15];
2531
2532 struct {
2533 struct chsc_header request;
2534 u16 operation_code;
2535 u16 reserved1;
2536 u32 reserved2;
2537 u32 reserved3;
2538 u32 reserved4[2];
2539 u32 delay_target;
2540 u32 reserved5[1009];
2541 struct chsc_header response;
2542 u32 reserved6;
2543 } *scsscf_area;
2544
2545 if (!irq_ptr->is_thinint_irq)
2546 return -ENODEV;
2547
2548 scsscf_area = mempool_alloc(qdio_mempool_scssc, GFP_ATOMIC);
2549 if (!scsscf_area) {
2550 QDIO_PRINT_WARN("No memory for setting delay target on " \
2551 "subchannel 0.%x.%x.\n",
2552 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2553 return -ENOMEM;
2554 }
2555 scsscf_area->request = (struct chsc_header) {
2556 .length = 0x0fe0,
2557 .code = 0x1027,
2558 };
2559
2560 scsscf_area->delay_target = delay_target<<16;
2561
2562 result=chsc(scsscf_area);
2563 if (result) {
2564 QDIO_PRINT_WARN("could not set delay target on irq 0.%x.%x, " \
2565 "cc=%i. Continuing.\n",
2566 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
2567 result);
2568 result = -EIO;
2569 goto out;
2570 }
2571
2572 resp_code = scsscf_area->response.code;
2573 if (resp_code!=QDIO_CHSC_RESPONSE_CODE_OK) {
2574 QDIO_PRINT_WARN("response upon setting delay target " \
2575 "is 0x%x. Continuing.\n",resp_code);
2576 sprintf(dbf_text,"sdtR%4x",resp_code);
2577 QDIO_DBF_TEXT1(0,trace,dbf_text);
2578 QDIO_DBF_TEXT1(0,setup,dbf_text);
2579 ptr=&scsscf_area->response;
2580 QDIO_DBF_HEX2(1,trace,&ptr,QDIO_DBF_TRACE_LEN);
2581 }
2582 QDIO_DBF_TEXT2(0,trace,"delytrgt");
2583 QDIO_DBF_HEX2(0,trace,&delay_target,sizeof(unsigned long));
2584 result = 0; /* not critical */
2585 out:
2586 mempool_free(scsscf_area, qdio_mempool_scssc);
2587 return result;
2588 }
2589
2590 int
2591 qdio_cleanup(struct ccw_device *cdev, int how)
2592 {
2593 struct qdio_irq *irq_ptr;
2594 char dbf_text[15];
2595 int rc;
2596
2597 irq_ptr = cdev->private->qdio_data;
2598 if (!irq_ptr)
2599 return -ENODEV;
2600
2601 sprintf(dbf_text,"qcln%4x",irq_ptr->schid.sch_no);
2602 QDIO_DBF_TEXT1(0,trace,dbf_text);
2603 QDIO_DBF_TEXT0(0,setup,dbf_text);
2604
2605 rc = qdio_shutdown(cdev, how);
2606 if ((rc == 0) || (rc == -EINPROGRESS))
2607 rc = qdio_free(cdev);
2608 return rc;
2609 }
2610
2611 int
2612 qdio_shutdown(struct ccw_device *cdev, int how)
2613 {
2614 struct qdio_irq *irq_ptr;
2615 int i;
2616 int result = 0;
2617 int rc;
2618 unsigned long flags;
2619 int timeout;
2620 char dbf_text[15];
2621
2622 irq_ptr = cdev->private->qdio_data;
2623 if (!irq_ptr)
2624 return -ENODEV;
2625
2626 down(&irq_ptr->setting_up_sema);
2627
2628 sprintf(dbf_text,"qsqs%4x",irq_ptr->schid.sch_no);
2629 QDIO_DBF_TEXT1(0,trace,dbf_text);
2630 QDIO_DBF_TEXT0(0,setup,dbf_text);
2631
2632 /* mark all qs as uninteresting */
2633 for (i=0;i<irq_ptr->no_input_qs;i++)
2634 atomic_set(&irq_ptr->input_qs[i]->is_in_shutdown,1);
2635
2636 for (i=0;i<irq_ptr->no_output_qs;i++)
2637 atomic_set(&irq_ptr->output_qs[i]->is_in_shutdown,1);
2638
2639 tasklet_kill(&tiqdio_tasklet);
2640
2641 for (i=0;i<irq_ptr->no_input_qs;i++) {
2642 qdio_unmark_q(irq_ptr->input_qs[i]);
2643 tasklet_kill(&irq_ptr->input_qs[i]->tasklet);
2644 wait_event_interruptible_timeout(cdev->private->wait_q,
2645 !atomic_read(&irq_ptr->
2646 input_qs[i]->
2647 use_count),
2648 QDIO_NO_USE_COUNT_TIMEOUT);
2649 if (atomic_read(&irq_ptr->input_qs[i]->use_count))
2650 result=-EINPROGRESS;
2651 }
2652
2653 for (i=0;i<irq_ptr->no_output_qs;i++) {
2654 tasklet_kill(&irq_ptr->output_qs[i]->tasklet);
2655 wait_event_interruptible_timeout(cdev->private->wait_q,
2656 !atomic_read(&irq_ptr->
2657 output_qs[i]->
2658 use_count),
2659 QDIO_NO_USE_COUNT_TIMEOUT);
2660 if (atomic_read(&irq_ptr->output_qs[i]->use_count))
2661 result=-EINPROGRESS;
2662 }
2663
2664 /* cleanup subchannel */
2665 spin_lock_irqsave(get_ccwdev_lock(cdev),flags);
2666 if (how&QDIO_FLAG_CLEANUP_USING_CLEAR) {
2667 rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
2668 timeout=QDIO_CLEANUP_CLEAR_TIMEOUT;
2669 } else if (how&QDIO_FLAG_CLEANUP_USING_HALT) {
2670 rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
2671 timeout=QDIO_CLEANUP_HALT_TIMEOUT;
2672 } else { /* default behaviour */
2673 rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
2674 timeout=QDIO_CLEANUP_HALT_TIMEOUT;
2675 }
2676 if (rc == -ENODEV) {
2677 /* No need to wait for device no longer present. */
2678 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
2679 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
2680 } else if (((void *)cdev->handler != (void *)qdio_handler) && rc == 0) {
2681 /*
2682 * Whoever put another handler there, has to cope with the
2683 * interrupt theirself. Might happen if qdio_shutdown was
2684 * called on already shutdown queues, but this shouldn't have
2685 * bad side effects.
2686 */
2687 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
2688 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
2689 } else if (rc == 0) {
2690 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
2691 ccw_device_set_timeout(cdev, timeout);
2692 spin_unlock_irqrestore(get_ccwdev_lock(cdev),flags);
2693
2694 wait_event(cdev->private->wait_q,
2695 irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
2696 irq_ptr->state == QDIO_IRQ_STATE_ERR);
2697 } else {
2698 QDIO_PRINT_INFO("ccw_device_{halt,clear} returned %d for "
2699 "device %s\n", result, cdev->dev.bus_id);
2700 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
2701 result = rc;
2702 goto out;
2703 }
2704 if (irq_ptr->is_thinint_irq) {
2705 qdio_put_indicator((__u32*)irq_ptr->dev_st_chg_ind);
2706 tiqdio_set_subchannel_ind(irq_ptr,1);
2707 /* reset adapter interrupt indicators */
2708 }
2709
2710 /* exchange int handlers, if necessary */
2711 if ((void*)cdev->handler == (void*)qdio_handler)
2712 cdev->handler=irq_ptr->original_int_handler;
2713
2714 /* Ignore errors. */
2715 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
2716 ccw_device_set_timeout(cdev, 0);
2717 out:
2718 up(&irq_ptr->setting_up_sema);
2719 return result;
2720 }
2721
2722 int
2723 qdio_free(struct ccw_device *cdev)
2724 {
2725 struct qdio_irq *irq_ptr;
2726 char dbf_text[15];
2727
2728 irq_ptr = cdev->private->qdio_data;
2729 if (!irq_ptr)
2730 return -ENODEV;
2731
2732 down(&irq_ptr->setting_up_sema);
2733
2734 sprintf(dbf_text,"qfqs%4x",irq_ptr->schid.sch_no);
2735 QDIO_DBF_TEXT1(0,trace,dbf_text);
2736 QDIO_DBF_TEXT0(0,setup,dbf_text);
2737
2738 cdev->private->qdio_data = 0;
2739
2740 up(&irq_ptr->setting_up_sema);
2741
2742 qdio_release_irq_memory(irq_ptr);
2743 module_put(THIS_MODULE);
2744 return 0;
2745 }
2746
2747 static inline void
2748 qdio_allocate_do_dbf(struct qdio_initialize *init_data)
2749 {
2750 char dbf_text[20]; /* if a printf printed out more than 8 chars */
2751
2752 sprintf(dbf_text,"qfmt:%x",init_data->q_format);
2753 QDIO_DBF_TEXT0(0,setup,dbf_text);
2754 QDIO_DBF_HEX0(0,setup,init_data->adapter_name,8);
2755 sprintf(dbf_text,"qpff%4x",init_data->qib_param_field_format);
2756 QDIO_DBF_TEXT0(0,setup,dbf_text);
2757 QDIO_DBF_HEX0(0,setup,&init_data->qib_param_field,sizeof(char*));
2758 QDIO_DBF_HEX0(0,setup,&init_data->input_slib_elements,sizeof(long*));
2759 QDIO_DBF_HEX0(0,setup,&init_data->output_slib_elements,sizeof(long*));
2760 sprintf(dbf_text,"miit%4x",init_data->min_input_threshold);
2761 QDIO_DBF_TEXT0(0,setup,dbf_text);
2762 sprintf(dbf_text,"mait%4x",init_data->max_input_threshold);
2763 QDIO_DBF_TEXT0(0,setup,dbf_text);
2764 sprintf(dbf_text,"miot%4x",init_data->min_output_threshold);
2765 QDIO_DBF_TEXT0(0,setup,dbf_text);
2766 sprintf(dbf_text,"maot%4x",init_data->max_output_threshold);
2767 QDIO_DBF_TEXT0(0,setup,dbf_text);
2768 sprintf(dbf_text,"niq:%4x",init_data->no_input_qs);
2769 QDIO_DBF_TEXT0(0,setup,dbf_text);
2770 sprintf(dbf_text,"noq:%4x",init_data->no_output_qs);
2771 QDIO_DBF_TEXT0(0,setup,dbf_text);
2772 QDIO_DBF_HEX0(0,setup,&init_data->input_handler,sizeof(void*));
2773 QDIO_DBF_HEX0(0,setup,&init_data->output_handler,sizeof(void*));
2774 QDIO_DBF_HEX0(0,setup,&init_data->int_parm,sizeof(long));
2775 QDIO_DBF_HEX0(0,setup,&init_data->flags,sizeof(long));
2776 QDIO_DBF_HEX0(0,setup,&init_data->input_sbal_addr_array,sizeof(void*));
2777 QDIO_DBF_HEX0(0,setup,&init_data->output_sbal_addr_array,sizeof(void*));
2778 }
2779
2780 static inline void
2781 qdio_allocate_fill_input_desc(struct qdio_irq *irq_ptr, int i, int iqfmt)
2782 {
2783 irq_ptr->input_qs[i]->is_iqdio_q = iqfmt;
2784 irq_ptr->input_qs[i]->is_thinint_q = irq_ptr->is_thinint_irq;
2785
2786 irq_ptr->qdr->qdf0[i].sliba=(unsigned long)(irq_ptr->input_qs[i]->slib);
2787
2788 irq_ptr->qdr->qdf0[i].sla=(unsigned long)(irq_ptr->input_qs[i]->sl);
2789
2790 irq_ptr->qdr->qdf0[i].slsba=
2791 (unsigned long)(&irq_ptr->input_qs[i]->slsb.acc.val[0]);
2792
2793 irq_ptr->qdr->qdf0[i].akey=QDIO_STORAGE_KEY;
2794 irq_ptr->qdr->qdf0[i].bkey=QDIO_STORAGE_KEY;
2795 irq_ptr->qdr->qdf0[i].ckey=QDIO_STORAGE_KEY;
2796 irq_ptr->qdr->qdf0[i].dkey=QDIO_STORAGE_KEY;
2797 }
2798
2799 static inline void
2800 qdio_allocate_fill_output_desc(struct qdio_irq *irq_ptr, int i,
2801 int j, int iqfmt)
2802 {
2803 irq_ptr->output_qs[i]->is_iqdio_q = iqfmt;
2804 irq_ptr->output_qs[i]->is_thinint_q = irq_ptr->is_thinint_irq;
2805
2806 irq_ptr->qdr->qdf0[i+j].sliba=(unsigned long)(irq_ptr->output_qs[i]->slib);
2807
2808 irq_ptr->qdr->qdf0[i+j].sla=(unsigned long)(irq_ptr->output_qs[i]->sl);
2809
2810 irq_ptr->qdr->qdf0[i+j].slsba=
2811 (unsigned long)(&irq_ptr->output_qs[i]->slsb.acc.val[0]);
2812
2813 irq_ptr->qdr->qdf0[i+j].akey=QDIO_STORAGE_KEY;
2814 irq_ptr->qdr->qdf0[i+j].bkey=QDIO_STORAGE_KEY;
2815 irq_ptr->qdr->qdf0[i+j].ckey=QDIO_STORAGE_KEY;
2816 irq_ptr->qdr->qdf0[i+j].dkey=QDIO_STORAGE_KEY;
2817 }
2818
2819
2820 static inline void
2821 qdio_initialize_set_siga_flags_input(struct qdio_irq *irq_ptr)
2822 {
2823 int i;
2824
2825 for (i=0;i<irq_ptr->no_input_qs;i++) {
2826 irq_ptr->input_qs[i]->siga_sync=
2827 irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY;
2828 irq_ptr->input_qs[i]->siga_in=
2829 irq_ptr->qdioac&CHSC_FLAG_SIGA_INPUT_NECESSARY;
2830 irq_ptr->input_qs[i]->siga_out=
2831 irq_ptr->qdioac&CHSC_FLAG_SIGA_OUTPUT_NECESSARY;
2832 irq_ptr->input_qs[i]->siga_sync_done_on_thinints=
2833 irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS;
2834 irq_ptr->input_qs[i]->hydra_gives_outbound_pcis=
2835 irq_ptr->hydra_gives_outbound_pcis;
2836 irq_ptr->input_qs[i]->siga_sync_done_on_outb_tis=
2837 ((irq_ptr->qdioac&
2838 (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS|
2839 CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS))==
2840 (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS|
2841 CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS));
2842
2843 }
2844 }
2845
2846 static inline void
2847 qdio_initialize_set_siga_flags_output(struct qdio_irq *irq_ptr)
2848 {
2849 int i;
2850
2851 for (i=0;i<irq_ptr->no_output_qs;i++) {
2852 irq_ptr->output_qs[i]->siga_sync=
2853 irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY;
2854 irq_ptr->output_qs[i]->siga_in=
2855 irq_ptr->qdioac&CHSC_FLAG_SIGA_INPUT_NECESSARY;
2856 irq_ptr->output_qs[i]->siga_out=
2857 irq_ptr->qdioac&CHSC_FLAG_SIGA_OUTPUT_NECESSARY;
2858 irq_ptr->output_qs[i]->siga_sync_done_on_thinints=
2859 irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS;
2860 irq_ptr->output_qs[i]->hydra_gives_outbound_pcis=
2861 irq_ptr->hydra_gives_outbound_pcis;
2862 irq_ptr->output_qs[i]->siga_sync_done_on_outb_tis=
2863 ((irq_ptr->qdioac&
2864 (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS|
2865 CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS))==
2866 (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS|
2867 CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS));
2868
2869 }
2870 }
2871
2872 static inline int
2873 qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat,
2874 int dstat)
2875 {
2876 char dbf_text[15];
2877 struct qdio_irq *irq_ptr;
2878
2879 irq_ptr = cdev->private->qdio_data;
2880
2881 if (cstat || (dstat & ~(DEV_STAT_CHN_END|DEV_STAT_DEV_END))) {
2882 sprintf(dbf_text,"ick1%4x",irq_ptr->schid.sch_no);
2883 QDIO_DBF_TEXT2(1,trace,dbf_text);
2884 QDIO_DBF_HEX2(0,trace,&dstat,sizeof(int));
2885 QDIO_DBF_HEX2(0,trace,&cstat,sizeof(int));
2886 QDIO_PRINT_ERR("received check condition on establish " \
2887 "queues on irq 0.%x.%x (cs=x%x, ds=x%x).\n",
2888 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
2889 cstat,dstat);
2890 qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ERR);
2891 }
2892
2893 if (!(dstat & DEV_STAT_DEV_END)) {
2894 QDIO_DBF_TEXT2(1,setup,"eq:no de");
2895 QDIO_DBF_HEX2(0,setup,&dstat, sizeof(dstat));
2896 QDIO_DBF_HEX2(0,setup,&cstat, sizeof(cstat));
2897 QDIO_PRINT_ERR("establish queues on irq 0.%x.%04x: didn't get "
2898 "device end: dstat=%02x, cstat=%02x\n",
2899 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
2900 dstat, cstat);
2901 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
2902 return 1;
2903 }
2904
2905 if (dstat & ~(DEV_STAT_CHN_END|DEV_STAT_DEV_END)) {
2906 QDIO_DBF_TEXT2(1,setup,"eq:badio");
2907 QDIO_DBF_HEX2(0,setup,&dstat, sizeof(dstat));
2908 QDIO_DBF_HEX2(0,setup,&cstat, sizeof(cstat));
2909 QDIO_PRINT_ERR("establish queues on irq 0.%x.%04x: got "
2910 "the following devstat: dstat=%02x, "
2911 "cstat=%02x\n", irq_ptr->schid.ssid,
2912 irq_ptr->schid.sch_no, dstat, cstat);
2913 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
2914 return 1;
2915 }
2916 return 0;
2917 }
2918
2919 static void
2920 qdio_establish_handle_irq(struct ccw_device *cdev, int cstat, int dstat)
2921 {
2922 struct qdio_irq *irq_ptr;
2923 char dbf_text[15];
2924
2925 irq_ptr = cdev->private->qdio_data;
2926
2927 sprintf(dbf_text,"qehi%4x",cdev->private->sch_no);
2928 QDIO_DBF_TEXT0(0,setup,dbf_text);
2929 QDIO_DBF_TEXT0(0,trace,dbf_text);
2930
2931 if (qdio_establish_irq_check_for_errors(cdev, cstat, dstat)) {
2932 ccw_device_set_timeout(cdev, 0);
2933 return;
2934 }
2935
2936 qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ESTABLISHED);
2937 ccw_device_set_timeout(cdev, 0);
2938 }
2939
2940 int
2941 qdio_initialize(struct qdio_initialize *init_data)
2942 {
2943 int rc;
2944 char dbf_text[15];
2945
2946 sprintf(dbf_text,"qini%4x",init_data->cdev->private->sch_no);
2947 QDIO_DBF_TEXT0(0,setup,dbf_text);
2948 QDIO_DBF_TEXT0(0,trace,dbf_text);
2949
2950 rc = qdio_allocate(init_data);
2951 if (rc == 0) {
2952 rc = qdio_establish(init_data);
2953 if (rc != 0)
2954 qdio_free(init_data->cdev);
2955 }
2956
2957 return rc;
2958 }
2959
2960
2961 int
2962 qdio_allocate(struct qdio_initialize *init_data)
2963 {
2964 struct qdio_irq *irq_ptr;
2965 char dbf_text[15];
2966
2967 sprintf(dbf_text,"qalc%4x",init_data->cdev->private->sch_no);
2968 QDIO_DBF_TEXT0(0,setup,dbf_text);
2969 QDIO_DBF_TEXT0(0,trace,dbf_text);
2970 if ( (init_data->no_input_qs>QDIO_MAX_QUEUES_PER_IRQ) ||
2971 (init_data->no_output_qs>QDIO_MAX_QUEUES_PER_IRQ) ||
2972 ((init_data->no_input_qs) && (!init_data->input_handler)) ||
2973 ((init_data->no_output_qs) && (!init_data->output_handler)) )
2974 return -EINVAL;
2975
2976 if (!init_data->input_sbal_addr_array)
2977 return -EINVAL;
2978
2979 if (!init_data->output_sbal_addr_array)
2980 return -EINVAL;
2981
2982 qdio_allocate_do_dbf(init_data);
2983
2984 /* create irq */
2985 irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
2986
2987 QDIO_DBF_TEXT0(0,setup,"irq_ptr:");
2988 QDIO_DBF_HEX0(0,setup,&irq_ptr,sizeof(void*));
2989
2990 if (!irq_ptr) {
2991 QDIO_PRINT_ERR("kmalloc of irq_ptr failed!\n");
2992 return -ENOMEM;
2993 }
2994
2995 init_MUTEX(&irq_ptr->setting_up_sema);
2996
2997 /* QDR must be in DMA area since CCW data address is only 32 bit */
2998 irq_ptr->qdr=kmalloc(sizeof(struct qdr), GFP_KERNEL | GFP_DMA);
2999 if (!(irq_ptr->qdr)) {
3000 free_page((unsigned long) irq_ptr);
3001 QDIO_PRINT_ERR("kmalloc of irq_ptr->qdr failed!\n");
3002 return -ENOMEM;
3003 }
3004 QDIO_DBF_TEXT0(0,setup,"qdr:");
3005 QDIO_DBF_HEX0(0,setup,&irq_ptr->qdr,sizeof(void*));
3006
3007 if (qdio_alloc_qs(irq_ptr,
3008 init_data->no_input_qs,
3009 init_data->no_output_qs)) {
3010 qdio_release_irq_memory(irq_ptr);
3011 return -ENOMEM;
3012 }
3013
3014 init_data->cdev->private->qdio_data = irq_ptr;
3015
3016 qdio_set_state(irq_ptr,QDIO_IRQ_STATE_INACTIVE);
3017
3018 return 0;
3019 }
3020
3021 int qdio_fill_irq(struct qdio_initialize *init_data)
3022 {
3023 int i;
3024 char dbf_text[15];
3025 struct ciw *ciw;
3026 int is_iqdio;
3027 struct qdio_irq *irq_ptr;
3028
3029 irq_ptr = init_data->cdev->private->qdio_data;
3030
3031 memset(irq_ptr,0,((char*)&irq_ptr->qdr)-((char*)irq_ptr));
3032
3033 /* wipes qib.ac, required by ar7063 */
3034 memset(irq_ptr->qdr,0,sizeof(struct qdr));
3035
3036 irq_ptr->int_parm=init_data->int_parm;
3037
3038 irq_ptr->schid = ccw_device_get_subchannel_id(init_data->cdev);
3039 irq_ptr->no_input_qs=init_data->no_input_qs;
3040 irq_ptr->no_output_qs=init_data->no_output_qs;
3041
3042 if (init_data->q_format==QDIO_IQDIO_QFMT) {
3043 irq_ptr->is_iqdio_irq=1;
3044 irq_ptr->is_thinint_irq=1;
3045 } else {
3046 irq_ptr->is_iqdio_irq=0;
3047 irq_ptr->is_thinint_irq=hydra_thinints;
3048 }
3049 sprintf(dbf_text,"is_i_t%1x%1x",
3050 irq_ptr->is_iqdio_irq,irq_ptr->is_thinint_irq);
3051 QDIO_DBF_TEXT2(0,setup,dbf_text);
3052
3053 if (irq_ptr->is_thinint_irq) {
3054 irq_ptr->dev_st_chg_ind = qdio_get_indicator();
3055 QDIO_DBF_HEX1(0,setup,&irq_ptr->dev_st_chg_ind,sizeof(void*));
3056 if (!irq_ptr->dev_st_chg_ind) {
3057 QDIO_PRINT_WARN("no indicator location available " \
3058 "for irq 0.%x.%x\n",
3059 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
3060 qdio_release_irq_memory(irq_ptr);
3061 return -ENOBUFS;
3062 }
3063 }
3064
3065 /* defaults */
3066 irq_ptr->equeue.cmd=DEFAULT_ESTABLISH_QS_CMD;
3067 irq_ptr->equeue.count=DEFAULT_ESTABLISH_QS_COUNT;
3068 irq_ptr->aqueue.cmd=DEFAULT_ACTIVATE_QS_CMD;
3069 irq_ptr->aqueue.count=DEFAULT_ACTIVATE_QS_COUNT;
3070
3071 qdio_fill_qs(irq_ptr, init_data->cdev,
3072 init_data->no_input_qs,
3073 init_data->no_output_qs,
3074 init_data->input_handler,
3075 init_data->output_handler,init_data->int_parm,
3076 init_data->q_format,init_data->flags,
3077 init_data->input_sbal_addr_array,
3078 init_data->output_sbal_addr_array);
3079
3080 if (!try_module_get(THIS_MODULE)) {
3081 QDIO_PRINT_CRIT("try_module_get() failed!\n");
3082 qdio_release_irq_memory(irq_ptr);
3083 return -EINVAL;
3084 }
3085
3086 qdio_fill_thresholds(irq_ptr,init_data->no_input_qs,
3087 init_data->no_output_qs,
3088 init_data->min_input_threshold,
3089 init_data->max_input_threshold,
3090 init_data->min_output_threshold,
3091 init_data->max_output_threshold);
3092
3093 /* fill in qdr */
3094 irq_ptr->qdr->qfmt=init_data->q_format;
3095 irq_ptr->qdr->iqdcnt=init_data->no_input_qs;
3096 irq_ptr->qdr->oqdcnt=init_data->no_output_qs;
3097 irq_ptr->qdr->iqdsz=sizeof(struct qdesfmt0)/4; /* size in words */
3098 irq_ptr->qdr->oqdsz=sizeof(struct qdesfmt0)/4;
3099
3100 irq_ptr->qdr->qiba=(unsigned long)&irq_ptr->qib;
3101 irq_ptr->qdr->qkey=QDIO_STORAGE_KEY;
3102
3103 /* fill in qib */
3104 irq_ptr->is_qebsm = is_passthrough;
3105 if (irq_ptr->is_qebsm)
3106 irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM;
3107
3108 irq_ptr->qib.qfmt=init_data->q_format;
3109 if (init_data->no_input_qs)
3110 irq_ptr->qib.isliba=(unsigned long)(irq_ptr->input_qs[0]->slib);
3111 if (init_data->no_output_qs)
3112 irq_ptr->qib.osliba=(unsigned long)(irq_ptr->output_qs[0]->slib);
3113 memcpy(irq_ptr->qib.ebcnam,init_data->adapter_name,8);
3114
3115 qdio_set_impl_params(irq_ptr,init_data->qib_param_field_format,
3116 init_data->qib_param_field,
3117 init_data->no_input_qs,
3118 init_data->no_output_qs,
3119 init_data->input_slib_elements,
3120 init_data->output_slib_elements);
3121
3122 /* first input descriptors, then output descriptors */
3123 is_iqdio = (init_data->q_format == QDIO_IQDIO_QFMT) ? 1 : 0;
3124 for (i=0;i<init_data->no_input_qs;i++)
3125 qdio_allocate_fill_input_desc(irq_ptr, i, is_iqdio);
3126
3127 for (i=0;i<init_data->no_output_qs;i++)
3128 qdio_allocate_fill_output_desc(irq_ptr, i,
3129 init_data->no_input_qs,
3130 is_iqdio);
3131
3132 /* qdr, qib, sls, slsbs, slibs, sbales filled. */
3133
3134 /* get qdio commands */
3135 ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE);
3136 if (!ciw) {
3137 QDIO_DBF_TEXT2(1,setup,"no eq");
3138 QDIO_PRINT_INFO("No equeue CIW found for QDIO commands. "
3139 "Trying to use default.\n");
3140 } else
3141 irq_ptr->equeue = *ciw;
3142 ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE);
3143 if (!ciw) {
3144 QDIO_DBF_TEXT2(1,setup,"no aq");
3145 QDIO_PRINT_INFO("No aqueue CIW found for QDIO commands. "
3146 "Trying to use default.\n");
3147 } else
3148 irq_ptr->aqueue = *ciw;
3149
3150 /* Set new interrupt handler. */
3151 irq_ptr->original_int_handler = init_data->cdev->handler;
3152 init_data->cdev->handler = qdio_handler;
3153
3154 return 0;
3155 }
3156
3157 int
3158 qdio_establish(struct qdio_initialize *init_data)
3159 {
3160 struct qdio_irq *irq_ptr;
3161 unsigned long saveflags;
3162 int result, result2;
3163 struct ccw_device *cdev;
3164 char dbf_text[20];
3165
3166 cdev=init_data->cdev;
3167 irq_ptr = cdev->private->qdio_data;
3168 if (!irq_ptr)
3169 return -EINVAL;
3170
3171 if (cdev->private->state != DEV_STATE_ONLINE)
3172 return -EINVAL;
3173
3174 down(&irq_ptr->setting_up_sema);
3175
3176 qdio_fill_irq(init_data);
3177
3178 /* the thinint CHSC stuff */
3179 if (irq_ptr->is_thinint_irq) {
3180
3181 result = tiqdio_set_subchannel_ind(irq_ptr,0);
3182 if (result) {
3183 up(&irq_ptr->setting_up_sema);
3184 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
3185 return result;
3186 }
3187 tiqdio_set_delay_target(irq_ptr,TIQDIO_DELAY_TARGET);
3188 }
3189
3190 sprintf(dbf_text,"qest%4x",cdev->private->sch_no);
3191 QDIO_DBF_TEXT0(0,setup,dbf_text);
3192 QDIO_DBF_TEXT0(0,trace,dbf_text);
3193
3194 /* establish q */
3195 irq_ptr->ccw.cmd_code=irq_ptr->equeue.cmd;
3196 irq_ptr->ccw.flags=CCW_FLAG_SLI;
3197 irq_ptr->ccw.count=irq_ptr->equeue.count;
3198 irq_ptr->ccw.cda=QDIO_GET_ADDR(irq_ptr->qdr);
3199
3200 spin_lock_irqsave(get_ccwdev_lock(cdev),saveflags);
3201
3202 ccw_device_set_options(cdev, 0);
3203 result=ccw_device_start_timeout(cdev,&irq_ptr->ccw,
3204 QDIO_DOING_ESTABLISH,0, 0,
3205 QDIO_ESTABLISH_TIMEOUT);
3206 if (result) {
3207 result2=ccw_device_start_timeout(cdev,&irq_ptr->ccw,
3208 QDIO_DOING_ESTABLISH,0,0,
3209 QDIO_ESTABLISH_TIMEOUT);
3210 sprintf(dbf_text,"eq:io%4x",result);
3211 QDIO_DBF_TEXT2(1,setup,dbf_text);
3212 if (result2) {
3213 sprintf(dbf_text,"eq:io%4x",result);
3214 QDIO_DBF_TEXT2(1,setup,dbf_text);
3215 }
3216 QDIO_PRINT_WARN("establish queues on irq 0.%x.%04x: do_IO " \
3217 "returned %i, next try returned %i\n",
3218 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
3219 result, result2);
3220 result=result2;
3221 if (result)
3222 ccw_device_set_timeout(cdev, 0);
3223 }
3224
3225 spin_unlock_irqrestore(get_ccwdev_lock(cdev),saveflags);
3226
3227 if (result) {
3228 up(&irq_ptr->setting_up_sema);
3229 qdio_shutdown(cdev,QDIO_FLAG_CLEANUP_USING_CLEAR);
3230 return result;
3231 }
3232
3233 /* Timeout is cared for already by using ccw_device_start_timeout(). */
3234 wait_event_interruptible(cdev->private->wait_q,
3235 irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
3236 irq_ptr->state == QDIO_IRQ_STATE_ERR);
3237
3238 if (irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED)
3239 result = 0;
3240 else {
3241 up(&irq_ptr->setting_up_sema);
3242 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
3243 return -EIO;
3244 }
3245
3246 qdio_get_ssqd_information(irq_ptr);
3247 /* if this gets set once, we're running under VM and can omit SVSes */
3248 if (irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY)
3249 omit_svs=1;
3250
3251 sprintf(dbf_text,"qdioac%2x",irq_ptr->qdioac);
3252 QDIO_DBF_TEXT2(0,setup,dbf_text);
3253
3254 sprintf(dbf_text,"qib ac%2x",irq_ptr->qib.ac);
3255 QDIO_DBF_TEXT2(0,setup,dbf_text);
3256
3257 irq_ptr->hydra_gives_outbound_pcis=
3258 irq_ptr->qib.ac&QIB_AC_OUTBOUND_PCI_SUPPORTED;
3259 irq_ptr->sync_done_on_outb_pcis=
3260 irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS;
3261
3262 qdio_initialize_set_siga_flags_input(irq_ptr);
3263 qdio_initialize_set_siga_flags_output(irq_ptr);
3264
3265 up(&irq_ptr->setting_up_sema);
3266
3267 return result;
3268
3269 }
3270
3271 int
3272 qdio_activate(struct ccw_device *cdev, int flags)
3273 {
3274 struct qdio_irq *irq_ptr;
3275 int i,result=0,result2;
3276 unsigned long saveflags;
3277 char dbf_text[20]; /* see qdio_initialize */
3278
3279 irq_ptr = cdev->private->qdio_data;
3280 if (!irq_ptr)
3281 return -ENODEV;
3282
3283 if (cdev->private->state != DEV_STATE_ONLINE)
3284 return -EINVAL;
3285
3286 down(&irq_ptr->setting_up_sema);
3287 if (irq_ptr->state==QDIO_IRQ_STATE_INACTIVE) {
3288 result=-EBUSY;
3289 goto out;
3290 }
3291
3292 sprintf(dbf_text,"qact%4x", irq_ptr->schid.sch_no);
3293 QDIO_DBF_TEXT2(0,setup,dbf_text);
3294 QDIO_DBF_TEXT2(0,trace,dbf_text);
3295
3296 /* activate q */
3297 irq_ptr->ccw.cmd_code=irq_ptr->aqueue.cmd;
3298 irq_ptr->ccw.flags=CCW_FLAG_SLI;
3299 irq_ptr->ccw.count=irq_ptr->aqueue.count;
3300 irq_ptr->ccw.cda=QDIO_GET_ADDR(0);
3301
3302 spin_lock_irqsave(get_ccwdev_lock(cdev),saveflags);
3303
3304 ccw_device_set_timeout(cdev, 0);
3305 ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
3306 result=ccw_device_start(cdev,&irq_ptr->ccw,QDIO_DOING_ACTIVATE,
3307 0, DOIO_DENY_PREFETCH);
3308 if (result) {
3309 result2=ccw_device_start(cdev,&irq_ptr->ccw,
3310 QDIO_DOING_ACTIVATE,0,0);
3311 sprintf(dbf_text,"aq:io%4x",result);
3312 QDIO_DBF_TEXT2(1,setup,dbf_text);
3313 if (result2) {
3314 sprintf(dbf_text,"aq:io%4x",result);
3315 QDIO_DBF_TEXT2(1,setup,dbf_text);
3316 }
3317 QDIO_PRINT_WARN("activate queues on irq 0.%x.%04x: do_IO " \
3318 "returned %i, next try returned %i\n",
3319 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
3320 result, result2);
3321 result=result2;
3322 }
3323
3324 spin_unlock_irqrestore(get_ccwdev_lock(cdev),saveflags);
3325 if (result)
3326 goto out;
3327
3328 for (i=0;i<irq_ptr->no_input_qs;i++) {
3329 if (irq_ptr->is_thinint_irq) {
3330 /*
3331 * that way we know, that, if we will get interrupted
3332 * by tiqdio_inbound_processing, qdio_unmark_q will
3333 * not be called
3334 */
3335 qdio_reserve_q(irq_ptr->input_qs[i]);
3336 qdio_mark_tiq(irq_ptr->input_qs[i]);
3337 qdio_release_q(irq_ptr->input_qs[i]);
3338 }
3339 }
3340
3341 if (flags&QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT) {
3342 for (i=0;i<irq_ptr->no_input_qs;i++) {
3343 irq_ptr->input_qs[i]->is_input_q|=
3344 QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT;
3345 }
3346 }
3347
3348 wait_event_interruptible_timeout(cdev->private->wait_q,
3349 ((irq_ptr->state ==
3350 QDIO_IRQ_STATE_STOPPED) ||
3351 (irq_ptr->state ==
3352 QDIO_IRQ_STATE_ERR)),
3353 QDIO_ACTIVATE_TIMEOUT);
3354
3355 switch (irq_ptr->state) {
3356 case QDIO_IRQ_STATE_STOPPED:
3357 case QDIO_IRQ_STATE_ERR:
3358 up(&irq_ptr->setting_up_sema);
3359 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
3360 down(&irq_ptr->setting_up_sema);
3361 result = -EIO;
3362 break;
3363 default:
3364 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
3365 result = 0;
3366 }
3367 out:
3368 up(&irq_ptr->setting_up_sema);
3369
3370 return result;
3371 }
3372
3373 /* buffers filled forwards again to make Rick happy */
3374 static inline void
3375 qdio_do_qdio_fill_input(struct qdio_q *q, unsigned int qidx,
3376 unsigned int count, struct qdio_buffer *buffers)
3377 {
3378 struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
3379 qidx &= (QDIO_MAX_BUFFERS_PER_Q - 1);
3380 if (irq->is_qebsm) {
3381 while (count)
3382 set_slsb(q, &qidx, SLSB_CU_INPUT_EMPTY, &count);
3383 return;
3384 }
3385 for (;;) {
3386 set_slsb(q, &qidx, SLSB_CU_INPUT_EMPTY, &count);
3387 count--;
3388 if (!count) break;
3389 qidx = (qidx + 1) & (QDIO_MAX_BUFFERS_PER_Q - 1);
3390 }
3391 }
3392
3393 static inline void
3394 qdio_do_qdio_fill_output(struct qdio_q *q, unsigned int qidx,
3395 unsigned int count, struct qdio_buffer *buffers)
3396 {
3397 struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
3398
3399 qidx &= (QDIO_MAX_BUFFERS_PER_Q - 1);
3400 if (irq->is_qebsm) {
3401 while (count)
3402 set_slsb(q, &qidx, SLSB_CU_OUTPUT_PRIMED, &count);
3403 return;
3404 }
3405
3406 for (;;) {
3407 set_slsb(q, &qidx, SLSB_CU_OUTPUT_PRIMED, &count);
3408 count--;
3409 if (!count) break;
3410 qidx = (qidx + 1) & (QDIO_MAX_BUFFERS_PER_Q - 1);
3411 }
3412 }
3413
3414 static inline void
3415 do_qdio_handle_inbound(struct qdio_q *q, unsigned int callflags,
3416 unsigned int qidx, unsigned int count,
3417 struct qdio_buffer *buffers)
3418 {
3419 int used_elements;
3420
3421 /* This is the inbound handling of queues */
3422 used_elements=atomic_add_return(count, &q->number_of_buffers_used) - count;
3423
3424 qdio_do_qdio_fill_input(q,qidx,count,buffers);
3425
3426 if ((used_elements+count==QDIO_MAX_BUFFERS_PER_Q)&&
3427 (callflags&QDIO_FLAG_UNDER_INTERRUPT))
3428 atomic_swap(&q->polling,0);
3429
3430 if (used_elements)
3431 return;
3432 if (callflags&QDIO_FLAG_DONT_SIGA)
3433 return;
3434 if (q->siga_in) {
3435 int result;
3436
3437 result=qdio_siga_input(q);
3438 if (result) {
3439 if (q->siga_error)
3440 q->error_status_flags|=
3441 QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR;
3442 q->error_status_flags|=QDIO_STATUS_LOOK_FOR_ERROR;
3443 q->siga_error=result;
3444 }
3445 }
3446
3447 qdio_mark_q(q);
3448 }
3449
3450 static inline void
3451 do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags,
3452 unsigned int qidx, unsigned int count,
3453 struct qdio_buffer *buffers)
3454 {
3455 int used_elements;
3456 unsigned int cnt, start_buf;
3457 unsigned char state = 0;
3458 struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
3459
3460 /* This is the outbound handling of queues */
3461 #ifdef QDIO_PERFORMANCE_STATS
3462 perf_stats.start_time_outbound=NOW;
3463 #endif /* QDIO_PERFORMANCE_STATS */
3464
3465 qdio_do_qdio_fill_output(q,qidx,count,buffers);
3466
3467 used_elements=atomic_add_return(count, &q->number_of_buffers_used) - count;
3468
3469 if (callflags&QDIO_FLAG_DONT_SIGA) {
3470 #ifdef QDIO_PERFORMANCE_STATS
3471 perf_stats.outbound_time+=NOW-perf_stats.start_time_outbound;
3472 perf_stats.outbound_cnt++;
3473 #endif /* QDIO_PERFORMANCE_STATS */
3474 return;
3475 }
3476 if (q->is_iqdio_q) {
3477 /* one siga for every sbal */
3478 while (count--)
3479 qdio_kick_outbound_q(q);
3480
3481 __qdio_outbound_processing(q);
3482 } else {
3483 /* under VM, we do a SIGA sync unconditionally */
3484 SYNC_MEMORY;
3485 else {
3486 /*
3487 * w/o shadow queues (else branch of
3488 * SYNC_MEMORY :-/ ), we try to
3489 * fast-requeue buffers
3490 */
3491 if (irq->is_qebsm) {
3492 cnt = 1;
3493 start_buf = ((qidx+QDIO_MAX_BUFFERS_PER_Q-1) &
3494 (QDIO_MAX_BUFFERS_PER_Q-1));
3495 qdio_do_eqbs(q, &state, &start_buf, &cnt);
3496 } else
3497 state = q->slsb.acc.val[(qidx+QDIO_MAX_BUFFERS_PER_Q-1)
3498 &(QDIO_MAX_BUFFERS_PER_Q-1) ];
3499 if (state != SLSB_CU_OUTPUT_PRIMED) {
3500 qdio_kick_outbound_q(q);
3501 } else {
3502 QDIO_DBF_TEXT3(0,trace, "fast-req");
3503 #ifdef QDIO_PERFORMANCE_STATS
3504 perf_stats.fast_reqs++;
3505 #endif /* QDIO_PERFORMANCE_STATS */
3506 }
3507 }
3508 /*
3509 * only marking the q could take too long,
3510 * the upper layer module could do a lot of
3511 * traffic in that time
3512 */
3513 __qdio_outbound_processing(q);
3514 }
3515
3516 #ifdef QDIO_PERFORMANCE_STATS
3517 perf_stats.outbound_time+=NOW-perf_stats.start_time_outbound;
3518 perf_stats.outbound_cnt++;
3519 #endif /* QDIO_PERFORMANCE_STATS */
3520 }
3521
3522 /* count must be 1 in iqdio */
3523 int
3524 do_QDIO(struct ccw_device *cdev,unsigned int callflags,
3525 unsigned int queue_number, unsigned int qidx,
3526 unsigned int count,struct qdio_buffer *buffers)
3527 {
3528 struct qdio_irq *irq_ptr;
3529 #ifdef CONFIG_QDIO_DEBUG
3530 char dbf_text[20];
3531
3532 sprintf(dbf_text,"doQD%04x",cdev->private->sch_no);
3533 QDIO_DBF_TEXT3(0,trace,dbf_text);
3534 #endif /* CONFIG_QDIO_DEBUG */
3535
3536 if ( (qidx>QDIO_MAX_BUFFERS_PER_Q) ||
3537 (count>QDIO_MAX_BUFFERS_PER_Q) ||
3538 (queue_number>QDIO_MAX_QUEUES_PER_IRQ) )
3539 return -EINVAL;
3540
3541 if (count==0)
3542 return 0;
3543
3544 irq_ptr = cdev->private->qdio_data;
3545 if (!irq_ptr)
3546 return -ENODEV;
3547
3548 #ifdef CONFIG_QDIO_DEBUG
3549 if (callflags&QDIO_FLAG_SYNC_INPUT)
3550 QDIO_DBF_HEX3(0,trace,&irq_ptr->input_qs[queue_number],
3551 sizeof(void*));
3552 else
3553 QDIO_DBF_HEX3(0,trace,&irq_ptr->output_qs[queue_number],
3554 sizeof(void*));
3555 sprintf(dbf_text,"flag%04x",callflags);
3556 QDIO_DBF_TEXT3(0,trace,dbf_text);
3557 sprintf(dbf_text,"qi%02xct%02x",qidx,count);
3558 QDIO_DBF_TEXT3(0,trace,dbf_text);
3559 #endif /* CONFIG_QDIO_DEBUG */
3560
3561 if (irq_ptr->state!=QDIO_IRQ_STATE_ACTIVE)
3562 return -EBUSY;
3563
3564 if (callflags&QDIO_FLAG_SYNC_INPUT)
3565 do_qdio_handle_inbound(irq_ptr->input_qs[queue_number],
3566 callflags, qidx, count, buffers);
3567 else if (callflags&QDIO_FLAG_SYNC_OUTPUT)
3568 do_qdio_handle_outbound(irq_ptr->output_qs[queue_number],
3569 callflags, qidx, count, buffers);
3570 else {
3571 QDIO_DBF_TEXT3(1,trace,"doQD:inv");
3572 return -EINVAL;
3573 }
3574 return 0;
3575 }
3576
3577 #ifdef QDIO_PERFORMANCE_STATS
3578 static int
3579 qdio_perf_procfile_read(char *buffer, char **buffer_location, off_t offset,
3580 int buffer_length, int *eof, void *data)
3581 {
3582 int c=0;
3583
3584 /* we are always called with buffer_length=4k, so we all
3585 deliver on the first read */
3586 if (offset>0)
3587 return 0;
3588
3589 #define _OUTP_IT(x...) c+=sprintf(buffer+c,x)
3590 _OUTP_IT("i_p_nc/c=%lu/%lu\n",i_p_nc,i_p_c);
3591 _OUTP_IT("ii_p_nc/c=%lu/%lu\n",ii_p_nc,ii_p_c);
3592 _OUTP_IT("o_p_nc/c=%lu/%lu\n",o_p_nc,o_p_c);
3593 _OUTP_IT("Number of tasklet runs (total) : %u\n",
3594 perf_stats.tl_runs);
3595 _OUTP_IT("\n");
3596 _OUTP_IT("Number of SIGA sync's issued : %u\n",
3597 perf_stats.siga_syncs);
3598 _OUTP_IT("Number of SIGA in's issued : %u\n",
3599 perf_stats.siga_ins);
3600 _OUTP_IT("Number of SIGA out's issued : %u\n",
3601 perf_stats.siga_outs);
3602 _OUTP_IT("Number of PCIs caught : %u\n",
3603 perf_stats.pcis);
3604 _OUTP_IT("Number of adapter interrupts caught : %u\n",
3605 perf_stats.thinints);
3606 _OUTP_IT("Number of fast requeues (outg. SBALs w/o SIGA) : %u\n",
3607 perf_stats.fast_reqs);
3608 _OUTP_IT("\n");
3609 _OUTP_IT("Total time of all inbound actions (us) incl. UL : %u\n",
3610 perf_stats.inbound_time);
3611 _OUTP_IT("Number of inbound transfers : %u\n",
3612 perf_stats.inbound_cnt);
3613 _OUTP_IT("Total time of all outbound do_QDIOs (us) : %u\n",
3614 perf_stats.outbound_time);
3615 _OUTP_IT("Number of do_QDIOs outbound : %u\n",
3616 perf_stats.outbound_cnt);
3617 _OUTP_IT("\n");
3618
3619 return c;
3620 }
3621
3622 static struct proc_dir_entry *qdio_perf_proc_file;
3623 #endif /* QDIO_PERFORMANCE_STATS */
3624
3625 static void
3626 qdio_add_procfs_entry(void)
3627 {
3628 #ifdef QDIO_PERFORMANCE_STATS
3629 proc_perf_file_registration=0;
3630 qdio_perf_proc_file=create_proc_entry(QDIO_PERF,
3631 S_IFREG|0444,&proc_root);
3632 if (qdio_perf_proc_file) {
3633 qdio_perf_proc_file->read_proc=&qdio_perf_procfile_read;
3634 } else proc_perf_file_registration=-1;
3635
3636 if (proc_perf_file_registration)
3637 QDIO_PRINT_WARN("was not able to register perf. " \
3638 "proc-file (%i).\n",
3639 proc_perf_file_registration);
3640 #endif /* QDIO_PERFORMANCE_STATS */
3641 }
3642
3643 static void
3644 qdio_remove_procfs_entry(void)
3645 {
3646 #ifdef QDIO_PERFORMANCE_STATS
3647 perf_stats.tl_runs=0;
3648
3649 if (!proc_perf_file_registration) /* means if it went ok earlier */
3650 remove_proc_entry(QDIO_PERF,&proc_root);
3651 #endif /* QDIO_PERFORMANCE_STATS */
3652 }
3653
3654 static void
3655 tiqdio_register_thinints(void)
3656 {
3657 char dbf_text[20];
3658 register_thinint_result=
3659 s390_register_adapter_interrupt(&tiqdio_thinint_handler);
3660 if (register_thinint_result) {
3661 sprintf(dbf_text,"regthn%x",(register_thinint_result&0xff));
3662 QDIO_DBF_TEXT0(0,setup,dbf_text);
3663 QDIO_PRINT_ERR("failed to register adapter handler " \
3664 "(rc=%i).\nAdapter interrupts might " \
3665 "not work. Continuing.\n",
3666 register_thinint_result);
3667 }
3668 }
3669
3670 static void
3671 tiqdio_unregister_thinints(void)
3672 {
3673 if (!register_thinint_result)
3674 s390_unregister_adapter_interrupt(&tiqdio_thinint_handler);
3675 }
3676
3677 static int
3678 qdio_get_qdio_memory(void)
3679 {
3680 int i;
3681 indicator_used[0]=1;
3682
3683 for (i=1;i<INDICATORS_PER_CACHELINE;i++)
3684 indicator_used[i]=0;
3685 indicators = kzalloc(sizeof(__u32)*(INDICATORS_PER_CACHELINE),
3686 GFP_KERNEL);
3687 if (!indicators)
3688 return -ENOMEM;
3689 return 0;
3690 }
3691
3692 static void
3693 qdio_release_qdio_memory(void)
3694 {
3695 kfree(indicators);
3696 }
3697
3698 static void
3699 qdio_unregister_dbf_views(void)
3700 {
3701 if (qdio_dbf_setup)
3702 debug_unregister(qdio_dbf_setup);
3703 if (qdio_dbf_sbal)
3704 debug_unregister(qdio_dbf_sbal);
3705 if (qdio_dbf_sense)
3706 debug_unregister(qdio_dbf_sense);
3707 if (qdio_dbf_trace)
3708 debug_unregister(qdio_dbf_trace);
3709 #ifdef CONFIG_QDIO_DEBUG
3710 if (qdio_dbf_slsb_out)
3711 debug_unregister(qdio_dbf_slsb_out);
3712 if (qdio_dbf_slsb_in)
3713 debug_unregister(qdio_dbf_slsb_in);
3714 #endif /* CONFIG_QDIO_DEBUG */
3715 }
3716
3717 static int
3718 qdio_register_dbf_views(void)
3719 {
3720 qdio_dbf_setup=debug_register(QDIO_DBF_SETUP_NAME,
3721 QDIO_DBF_SETUP_PAGES,
3722 QDIO_DBF_SETUP_NR_AREAS,
3723 QDIO_DBF_SETUP_LEN);
3724 if (!qdio_dbf_setup)
3725 goto oom;
3726 debug_register_view(qdio_dbf_setup,&debug_hex_ascii_view);
3727 debug_set_level(qdio_dbf_setup,QDIO_DBF_SETUP_LEVEL);
3728
3729 qdio_dbf_sbal=debug_register(QDIO_DBF_SBAL_NAME,
3730 QDIO_DBF_SBAL_PAGES,
3731 QDIO_DBF_SBAL_NR_AREAS,
3732 QDIO_DBF_SBAL_LEN);
3733 if (!qdio_dbf_sbal)
3734 goto oom;
3735
3736 debug_register_view(qdio_dbf_sbal,&debug_hex_ascii_view);
3737 debug_set_level(qdio_dbf_sbal,QDIO_DBF_SBAL_LEVEL);
3738
3739 qdio_dbf_sense=debug_register(QDIO_DBF_SENSE_NAME,
3740 QDIO_DBF_SENSE_PAGES,
3741 QDIO_DBF_SENSE_NR_AREAS,
3742 QDIO_DBF_SENSE_LEN);
3743 if (!qdio_dbf_sense)
3744 goto oom;
3745
3746 debug_register_view(qdio_dbf_sense,&debug_hex_ascii_view);
3747 debug_set_level(qdio_dbf_sense,QDIO_DBF_SENSE_LEVEL);
3748
3749 qdio_dbf_trace=debug_register(QDIO_DBF_TRACE_NAME,
3750 QDIO_DBF_TRACE_PAGES,
3751 QDIO_DBF_TRACE_NR_AREAS,
3752 QDIO_DBF_TRACE_LEN);
3753 if (!qdio_dbf_trace)
3754 goto oom;
3755
3756 debug_register_view(qdio_dbf_trace,&debug_hex_ascii_view);
3757 debug_set_level(qdio_dbf_trace,QDIO_DBF_TRACE_LEVEL);
3758
3759 #ifdef CONFIG_QDIO_DEBUG
3760 qdio_dbf_slsb_out=debug_register(QDIO_DBF_SLSB_OUT_NAME,
3761 QDIO_DBF_SLSB_OUT_PAGES,
3762 QDIO_DBF_SLSB_OUT_NR_AREAS,
3763 QDIO_DBF_SLSB_OUT_LEN);
3764 if (!qdio_dbf_slsb_out)
3765 goto oom;
3766 debug_register_view(qdio_dbf_slsb_out,&debug_hex_ascii_view);
3767 debug_set_level(qdio_dbf_slsb_out,QDIO_DBF_SLSB_OUT_LEVEL);
3768
3769 qdio_dbf_slsb_in=debug_register(QDIO_DBF_SLSB_IN_NAME,
3770 QDIO_DBF_SLSB_IN_PAGES,
3771 QDIO_DBF_SLSB_IN_NR_AREAS,
3772 QDIO_DBF_SLSB_IN_LEN);
3773 if (!qdio_dbf_slsb_in)
3774 goto oom;
3775 debug_register_view(qdio_dbf_slsb_in,&debug_hex_ascii_view);
3776 debug_set_level(qdio_dbf_slsb_in,QDIO_DBF_SLSB_IN_LEVEL);
3777 #endif /* CONFIG_QDIO_DEBUG */
3778 return 0;
3779 oom:
3780 QDIO_PRINT_ERR("not enough memory for dbf.\n");
3781 qdio_unregister_dbf_views();
3782 return -ENOMEM;
3783 }
3784
3785 static void *qdio_mempool_alloc(gfp_t gfp_mask, void *size)
3786 {
3787 return (void *) get_zeroed_page(gfp_mask|GFP_DMA);
3788 }
3789
3790 static void qdio_mempool_free(void *element, void *size)
3791 {
3792 free_page((unsigned long) element);
3793 }
3794
3795 static int __init
3796 init_QDIO(void)
3797 {
3798 int res;
3799 #ifdef QDIO_PERFORMANCE_STATS
3800 void *ptr;
3801 #endif /* QDIO_PERFORMANCE_STATS */
3802
3803 printk("qdio: loading %s\n",version);
3804
3805 res=qdio_get_qdio_memory();
3806 if (res)
3807 return res;
3808
3809 res = qdio_register_dbf_views();
3810 if (res)
3811 return res;
3812
3813 QDIO_DBF_TEXT0(0,setup,"initQDIO");
3814
3815 #ifdef QDIO_PERFORMANCE_STATS
3816 memset((void*)&perf_stats,0,sizeof(perf_stats));
3817 QDIO_DBF_TEXT0(0,setup,"perfstat");
3818 ptr=&perf_stats;
3819 QDIO_DBF_HEX0(0,setup,&ptr,sizeof(void*));
3820 #endif /* QDIO_PERFORMANCE_STATS */
3821
3822 qdio_add_procfs_entry();
3823
3824 qdio_mempool_scssc = mempool_create(QDIO_MEMPOOL_SCSSC_ELEMENTS,
3825 qdio_mempool_alloc,
3826 qdio_mempool_free, NULL);
3827
3828 if (tiqdio_check_chsc_availability())
3829 QDIO_PRINT_ERR("Not all CHSCs supported. Continuing.\n");
3830
3831 tiqdio_register_thinints();
3832
3833 return 0;
3834 }
3835
3836 static void __exit
3837 cleanup_QDIO(void)
3838 {
3839 tiqdio_unregister_thinints();
3840 qdio_remove_procfs_entry();
3841 qdio_release_qdio_memory();
3842 qdio_unregister_dbf_views();
3843 mempool_destroy(qdio_mempool_scssc);
3844
3845 printk("qdio: %s: module removed\n",version);
3846 }
3847
3848 module_init(init_QDIO);
3849 module_exit(cleanup_QDIO);
3850
3851 EXPORT_SYMBOL(qdio_allocate);
3852 EXPORT_SYMBOL(qdio_establish);
3853 EXPORT_SYMBOL(qdio_initialize);
3854 EXPORT_SYMBOL(qdio_activate);
3855 EXPORT_SYMBOL(do_QDIO);
3856 EXPORT_SYMBOL(qdio_shutdown);
3857 EXPORT_SYMBOL(qdio_free);
3858 EXPORT_SYMBOL(qdio_cleanup);
3859 EXPORT_SYMBOL(qdio_synchronize);