Merge git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-2.6-nmw
[cascardo/linux.git] / drivers / s390 / cio / qdio.c
1 /*
2  *
3  * linux/drivers/s390/cio/qdio.c
4  *
5  * Linux for S/390 QDIO base support, Hipersocket base support
6  * version 2
7  *
8  * Copyright 2000,2002 IBM Corporation
9  * Author(s):             Utz Bacher <utz.bacher@de.ibm.com>
10  * 2.6 cio integration by Cornelia Huck <cornelia.huck@de.ibm.com>
11  *
12  * Restriction: only 63 iqdio subchannels would have its own indicator,
13  * after that, subsequent subchannels share one indicator
14  *
15  *
16  *
17  *
18  * This program is free software; you can redistribute it and/or modify
19  * it under the terms of the GNU General Public License as published by
20  * the Free Software Foundation; either version 2, or (at your option)
21  * any later version.
22  *
23  * This program is distributed in the hope that it will be useful,
24  * but WITHOUT ANY WARRANTY; without even the implied warranty of
25  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
26  * GNU General Public License for more details.
27  *
28  * You should have received a copy of the GNU General Public License
29  * along with this program; if not, write to the Free Software
30  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
31  */
32
33 #include <linux/module.h>
34 #include <linux/init.h>
35
36 #include <linux/slab.h>
37 #include <linux/kernel.h>
38 #include <linux/proc_fs.h>
39 #include <linux/timer.h>
40 #include <linux/mempool.h>
41
42 #include <asm/ccwdev.h>
43 #include <asm/io.h>
44 #include <asm/atomic.h>
45 #include <asm/semaphore.h>
46 #include <asm/timex.h>
47
48 #include <asm/debug.h>
49 #include <asm/s390_rdev.h>
50 #include <asm/qdio.h>
51
52 #include "cio.h"
53 #include "css.h"
54 #include "device.h"
55 #include "airq.h"
56 #include "qdio.h"
57 #include "ioasm.h"
58 #include "chsc.h"
59
60 /****************** MODULE PARAMETER VARIABLES ********************/
61 MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>");
62 MODULE_DESCRIPTION("QDIO base support version 2, " \
63                    "Copyright 2000 IBM Corporation");
64 MODULE_LICENSE("GPL");
65
66 /******************** HERE WE GO ***********************************/
67
68 static const char version[] = "QDIO base support version 2";
69
70 static int qdio_performance_stats = 0;
71 static int proc_perf_file_registration;
72 static struct qdio_perf_stats perf_stats;
73
74 static int hydra_thinints;
75 static int is_passthrough = 0;
76 static int omit_svs;
77
78 static int indicator_used[INDICATORS_PER_CACHELINE];
79 static __u32 * volatile indicators;
80 static __u32 volatile spare_indicator;
81 static atomic_t spare_indicator_usecount;
82 #define QDIO_MEMPOOL_SCSSC_ELEMENTS 2
83 static mempool_t *qdio_mempool_scssc;
84
85 static debug_info_t *qdio_dbf_setup;
86 static debug_info_t *qdio_dbf_sbal;
87 static debug_info_t *qdio_dbf_trace;
88 static debug_info_t *qdio_dbf_sense;
89 #ifdef CONFIG_QDIO_DEBUG
90 static debug_info_t *qdio_dbf_slsb_out;
91 static debug_info_t *qdio_dbf_slsb_in;
92 #endif /* CONFIG_QDIO_DEBUG */
93
94 /* iQDIO stuff: */
95 static volatile struct qdio_q *tiq_list=NULL; /* volatile as it could change
96                                                  during a while loop */
97 static DEFINE_SPINLOCK(ttiq_list_lock);
98 static int register_thinint_result;
99 static void tiqdio_tl(unsigned long);
100 static DECLARE_TASKLET(tiqdio_tasklet,tiqdio_tl,0);
101
102 /* not a macro, as one of the arguments is atomic_read */
103 static inline int 
104 qdio_min(int a,int b)
105 {
106         if (a<b)
107                 return a;
108         else
109                 return b;
110 }
111
112 /***************** SCRUBBER HELPER ROUTINES **********************/
113 #ifdef CONFIG_64BIT
114 static inline void qdio_perf_stat_inc(atomic64_t *count)
115 {
116         if (qdio_performance_stats)
117                 atomic64_inc(count);
118 }
119
120 static inline void qdio_perf_stat_dec(atomic64_t *count)
121 {
122         if (qdio_performance_stats)
123                 atomic64_dec(count);
124 }
125 #else /* CONFIG_64BIT */
126 static inline void qdio_perf_stat_inc(atomic_t *count)
127 {
128         if (qdio_performance_stats)
129                 atomic_inc(count);
130 }
131
132 static inline void qdio_perf_stat_dec(atomic_t *count)
133 {
134         if (qdio_performance_stats)
135                 atomic_dec(count);
136 }
137 #endif /* CONFIG_64BIT */
138
139 static inline __u64 
140 qdio_get_micros(void)
141 {
142         return (get_clock() >> 12); /* time>>12 is microseconds */
143 }
144
145 /* 
146  * unfortunately, we can't just xchg the values; in do_QDIO we want to reserve
147  * the q in any case, so that we'll not be interrupted when we are in
148  * qdio_mark_tiq... shouldn't have a really bad impact, as reserving almost
149  * ever works (last famous words) 
150  */
151 static inline int 
152 qdio_reserve_q(struct qdio_q *q)
153 {
154         return atomic_add_return(1,&q->use_count) - 1;
155 }
156
157 static inline void 
158 qdio_release_q(struct qdio_q *q)
159 {
160         atomic_dec(&q->use_count);
161 }
162
163 /*check ccq  */
164 static int
165 qdio_check_ccq(struct qdio_q *q, unsigned int ccq)
166 {
167         char dbf_text[15];
168
169         if (ccq == 0 || ccq == 32 || ccq == 96)
170                 return 0;
171         if (ccq == 97)
172                 return 1;
173         /*notify devices immediately*/
174         sprintf(dbf_text,"%d", ccq);
175         QDIO_DBF_TEXT2(1,trace,dbf_text);
176         return -EIO;
177 }
178 /* EQBS: extract buffer states */
179 static int
180 qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
181              unsigned int *start, unsigned int *cnt)
182 {
183         struct qdio_irq *irq;
184         unsigned int tmp_cnt, q_no, ccq;
185         int rc ;
186         char dbf_text[15];
187
188         ccq = 0;
189         tmp_cnt = *cnt;
190         irq = (struct qdio_irq*)q->irq_ptr;
191         q_no = q->q_no;
192         if(!q->is_input_q)
193                 q_no += irq->no_input_qs;
194 again:
195         ccq = do_eqbs(irq->sch_token, state, q_no, start, cnt);
196         rc = qdio_check_ccq(q, ccq);
197         if (rc == 1) {
198                 QDIO_DBF_TEXT5(1,trace,"eqAGAIN");
199                 goto again;
200         }
201         if (rc < 0) {
202                 QDIO_DBF_TEXT2(1,trace,"eqberr");
203                 sprintf(dbf_text,"%2x,%2x,%d,%d",tmp_cnt, *cnt, ccq, q_no);
204                 QDIO_DBF_TEXT2(1,trace,dbf_text);
205                 q->handler(q->cdev,QDIO_STATUS_ACTIVATE_CHECK_CONDITION|
206                                 QDIO_STATUS_LOOK_FOR_ERROR,
207                                 0, 0, 0, -1, -1, q->int_parm);
208                 return 0;
209         }
210         return (tmp_cnt - *cnt);
211 }
212
213 /* SQBS: set buffer states */
214 static int
215 qdio_do_sqbs(struct qdio_q *q, unsigned char state,
216              unsigned int *start, unsigned int *cnt)
217 {
218         struct qdio_irq *irq;
219         unsigned int tmp_cnt, q_no, ccq;
220         int rc;
221         char dbf_text[15];
222
223         ccq = 0;
224         tmp_cnt = *cnt;
225         irq = (struct qdio_irq*)q->irq_ptr;
226         q_no = q->q_no;
227         if(!q->is_input_q)
228                 q_no += irq->no_input_qs;
229 again:
230         ccq = do_sqbs(irq->sch_token, state, q_no, start, cnt);
231         rc = qdio_check_ccq(q, ccq);
232         if (rc == 1) {
233                 QDIO_DBF_TEXT5(1,trace,"sqAGAIN");
234                 goto again;
235         }
236         if (rc < 0) {
237                 QDIO_DBF_TEXT3(1,trace,"sqberr");
238                 sprintf(dbf_text,"%2x,%2x",tmp_cnt,*cnt);
239                 QDIO_DBF_TEXT3(1,trace,dbf_text);
240                 sprintf(dbf_text,"%d,%d",ccq,q_no);
241                 QDIO_DBF_TEXT3(1,trace,dbf_text);
242                 q->handler(q->cdev,QDIO_STATUS_ACTIVATE_CHECK_CONDITION|
243                                 QDIO_STATUS_LOOK_FOR_ERROR,
244                                 0, 0, 0, -1, -1, q->int_parm);
245                 return 0;
246         }
247         return (tmp_cnt - *cnt);
248 }
249
250 static inline int
251 qdio_set_slsb(struct qdio_q *q, unsigned int *bufno,
252               unsigned char state, unsigned int *count)
253 {
254         volatile char *slsb;
255         struct qdio_irq *irq;
256
257         irq = (struct qdio_irq*)q->irq_ptr;
258         if (!irq->is_qebsm) {
259                 slsb = (char *)&q->slsb.acc.val[(*bufno)];
260                 xchg(slsb, state);
261                 return 1;
262         }
263         return qdio_do_sqbs(q, state, bufno, count);
264 }
265
266 #ifdef CONFIG_QDIO_DEBUG
267 static inline void
268 qdio_trace_slsb(struct qdio_q *q)
269 {
270         if (q->queue_type==QDIO_TRACE_QTYPE) {
271                 if (q->is_input_q)
272                         QDIO_DBF_HEX2(0,slsb_in,&q->slsb,
273                                       QDIO_MAX_BUFFERS_PER_Q);
274                 else
275                         QDIO_DBF_HEX2(0,slsb_out,&q->slsb,
276                                       QDIO_MAX_BUFFERS_PER_Q);
277         }
278 }
279 #endif
280
281 static inline int
282 set_slsb(struct qdio_q *q, unsigned int *bufno,
283          unsigned char state, unsigned int *count)
284 {
285         int rc;
286 #ifdef CONFIG_QDIO_DEBUG
287         qdio_trace_slsb(q);
288 #endif
289         rc = qdio_set_slsb(q, bufno, state, count);
290 #ifdef CONFIG_QDIO_DEBUG
291         qdio_trace_slsb(q);
292 #endif
293         return rc;
294 }
295 static inline int 
296 qdio_siga_sync(struct qdio_q *q, unsigned int gpr2,
297                unsigned int gpr3)
298 {
299         int cc;
300
301         QDIO_DBF_TEXT4(0,trace,"sigasync");
302         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
303
304         qdio_perf_stat_inc(&perf_stats.siga_syncs);
305
306         cc = do_siga_sync(q->schid, gpr2, gpr3);
307         if (cc)
308                 QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*));
309
310         return cc;
311 }
312
313 static inline int
314 qdio_siga_sync_q(struct qdio_q *q)
315 {
316         if (q->is_input_q)
317                 return qdio_siga_sync(q, 0, q->mask);
318         return qdio_siga_sync(q, q->mask, 0);
319 }
320
321 static int
322 __do_siga_output(struct qdio_q *q, unsigned int *busy_bit)
323 {
324        struct qdio_irq *irq;
325        unsigned int fc = 0;
326        unsigned long schid;
327
328        irq = (struct qdio_irq *) q->irq_ptr;
329        if (!irq->is_qebsm)
330                schid = *((u32 *)&q->schid);
331        else {
332                schid = irq->sch_token;
333                fc |= 0x80;
334        }
335        return do_siga_output(schid, q->mask, busy_bit, fc);
336 }
337
338 /* 
339  * returns QDIO_SIGA_ERROR_ACCESS_EXCEPTION as cc, when SIGA returns
340  * an access exception 
341  */
342 static int
343 qdio_siga_output(struct qdio_q *q)
344 {
345         int cc;
346         __u32 busy_bit;
347         __u64 start_time=0;
348
349         qdio_perf_stat_inc(&perf_stats.siga_outs);
350
351         QDIO_DBF_TEXT4(0,trace,"sigaout");
352         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
353
354         for (;;) {
355                 cc = __do_siga_output(q, &busy_bit);
356 //QDIO_PRINT_ERR("cc=%x, busy=%x\n",cc,busy_bit);
357                 if ((cc==2) && (busy_bit) && (q->is_iqdio_q)) {
358                         if (!start_time) 
359                                 start_time=NOW;
360                         if ((NOW-start_time)>QDIO_BUSY_BIT_PATIENCE)
361                                 break;
362                 } else
363                         break;
364         }
365         
366         if ((cc==2) && (busy_bit)) 
367                 cc |= QDIO_SIGA_ERROR_B_BIT_SET;
368
369         if (cc)
370                 QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*));
371
372         return cc;
373 }
374
375 static int
376 qdio_siga_input(struct qdio_q *q)
377 {
378         int cc;
379
380         QDIO_DBF_TEXT4(0,trace,"sigain");
381         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
382
383         qdio_perf_stat_inc(&perf_stats.siga_ins);
384
385         cc = do_siga_input(q->schid, q->mask);
386         
387         if (cc)
388                 QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*));
389
390         return cc;
391 }
392
393 /* locked by the locks in qdio_activate and qdio_cleanup */
394 static __u32 *
395 qdio_get_indicator(void)
396 {
397         int i;
398
399         for (i=1;i<INDICATORS_PER_CACHELINE;i++)
400                 if (!indicator_used[i]) {
401                         indicator_used[i]=1;
402                         return indicators+i;
403                 }
404         atomic_inc(&spare_indicator_usecount);
405         return (__u32 * volatile) &spare_indicator;
406 }
407
408 /* locked by the locks in qdio_activate and qdio_cleanup */
409 static void 
410 qdio_put_indicator(__u32 *addr)
411 {
412         int i;
413
414         if ( (addr) && (addr!=&spare_indicator) ) {
415                 i=addr-indicators;
416                 indicator_used[i]=0;
417         }
418         if (addr == &spare_indicator)
419                 atomic_dec(&spare_indicator_usecount);
420 }
421
422 static inline void
423 tiqdio_clear_summary_bit(__u32 *location)
424 {
425         QDIO_DBF_TEXT5(0,trace,"clrsummb");
426         QDIO_DBF_HEX5(0,trace,&location,sizeof(void*));
427
428         xchg(location,0);
429 }
430
431 static inline  void
432 tiqdio_set_summary_bit(__u32 *location)
433 {
434         QDIO_DBF_TEXT5(0,trace,"setsummb");
435         QDIO_DBF_HEX5(0,trace,&location,sizeof(void*));
436
437         xchg(location,-1);
438 }
439
440 static inline void 
441 tiqdio_sched_tl(void)
442 {
443         tasklet_hi_schedule(&tiqdio_tasklet);
444 }
445
446 static void
447 qdio_mark_tiq(struct qdio_q *q)
448 {
449         unsigned long flags;
450
451         QDIO_DBF_TEXT4(0,trace,"mark iq");
452         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
453
454         spin_lock_irqsave(&ttiq_list_lock,flags);
455         if (unlikely(atomic_read(&q->is_in_shutdown)))
456                 goto out_unlock;
457
458         if (!q->is_input_q)
459                 goto out_unlock;
460
461         if ((q->list_prev) || (q->list_next)) 
462                 goto out_unlock;
463
464         if (!tiq_list) {
465                 tiq_list=q;
466                 q->list_prev=q;
467                 q->list_next=q;
468         } else {
469                 q->list_next=tiq_list;
470                 q->list_prev=tiq_list->list_prev;
471                 tiq_list->list_prev->list_next=q;
472                 tiq_list->list_prev=q;
473         }
474         spin_unlock_irqrestore(&ttiq_list_lock,flags);
475
476         tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
477         tiqdio_sched_tl();
478         return;
479 out_unlock:
480         spin_unlock_irqrestore(&ttiq_list_lock,flags);
481         return;
482 }
483
484 static inline void
485 qdio_mark_q(struct qdio_q *q)
486 {
487         QDIO_DBF_TEXT4(0,trace,"mark q");
488         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
489
490         if (unlikely(atomic_read(&q->is_in_shutdown)))
491                 return;
492
493         tasklet_schedule(&q->tasklet);
494 }
495
496 static int
497 qdio_stop_polling(struct qdio_q *q)
498 {
499 #ifdef QDIO_USE_PROCESSING_STATE
500        unsigned int tmp, gsf, count = 1;
501        unsigned char state = 0;
502        struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
503
504         if (!atomic_xchg(&q->polling,0))
505                 return 1;
506
507         QDIO_DBF_TEXT4(0,trace,"stoppoll");
508         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
509
510         /* show the card that we are not polling anymore */
511         if (!q->is_input_q)
512                 return 1;
513
514        tmp = gsf = GET_SAVED_FRONTIER(q);
515        tmp = ((tmp + QDIO_MAX_BUFFERS_PER_Q-1) & (QDIO_MAX_BUFFERS_PER_Q-1) );
516        set_slsb(q, &tmp, SLSB_P_INPUT_NOT_INIT, &count);
517
518         /* 
519          * we don't issue this SYNC_MEMORY, as we trust Rick T and
520          * moreover will not use the PROCESSING state under VM, so
521          * q->polling was 0 anyway
522          */
523         /*SYNC_MEMORY;*/
524        if (irq->is_qebsm) {
525                count = 1;
526                qdio_do_eqbs(q, &state, &gsf, &count);
527        } else
528                state = q->slsb.acc.val[gsf];
529        if (state != SLSB_P_INPUT_PRIMED)
530                 return 1;
531         /* 
532          * set our summary bit again, as otherwise there is a
533          * small window we can miss between resetting it and
534          * checking for PRIMED state 
535          */
536         if (q->is_thinint_q)
537                 tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
538         return 0;
539
540 #else /* QDIO_USE_PROCESSING_STATE */
541         return 1;
542 #endif /* QDIO_USE_PROCESSING_STATE */
543 }
544
545 /* 
546  * see the comment in do_QDIO and before qdio_reserve_q about the
547  * sophisticated locking outside of unmark_q, so that we don't need to
548  * disable the interrupts :-) 
549 */
550 static void
551 qdio_unmark_q(struct qdio_q *q)
552 {
553         unsigned long flags;
554
555         QDIO_DBF_TEXT4(0,trace,"unmark q");
556         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
557
558         if ((!q->list_prev)||(!q->list_next))
559                 return;
560
561         if ((q->is_thinint_q)&&(q->is_input_q)) {
562                 /* iQDIO */
563                 spin_lock_irqsave(&ttiq_list_lock,flags);
564                 /* in case cleanup has done this already and simultanously
565                  * qdio_unmark_q is called from the interrupt handler, we've
566                  * got to check this in this specific case again */
567                 if ((!q->list_prev)||(!q->list_next))
568                         goto out;
569                 if (q->list_next==q) {
570                         /* q was the only interesting q */
571                         tiq_list=NULL;
572                         q->list_next=NULL;
573                         q->list_prev=NULL;
574                 } else {
575                         q->list_next->list_prev=q->list_prev;
576                         q->list_prev->list_next=q->list_next;
577                         tiq_list=q->list_next;
578                         q->list_next=NULL;
579                         q->list_prev=NULL;
580                 }
581 out:
582                 spin_unlock_irqrestore(&ttiq_list_lock,flags);
583         }
584 }
585
586 static inline unsigned long 
587 tiqdio_clear_global_summary(void)
588 {
589         unsigned long time;
590
591         QDIO_DBF_TEXT5(0,trace,"clrglobl");
592         
593         time = do_clear_global_summary();
594
595         QDIO_DBF_HEX5(0,trace,&time,sizeof(unsigned long));
596
597         return time;
598 }
599
600
601 /************************* OUTBOUND ROUTINES *******************************/
602 static int
603 qdio_qebsm_get_outbound_buffer_frontier(struct qdio_q *q)
604 {
605         struct qdio_irq *irq;
606         unsigned char state;
607         unsigned int cnt, count, ftc;
608
609         irq = (struct qdio_irq *) q->irq_ptr;
610         if ((!q->is_iqdio_q) && (!q->hydra_gives_outbound_pcis))
611                 SYNC_MEMORY;
612
613         ftc = q->first_to_check;
614         count = qdio_min(atomic_read(&q->number_of_buffers_used),
615                         (QDIO_MAX_BUFFERS_PER_Q-1));
616         if (count == 0)
617                 return q->first_to_check;
618         cnt = qdio_do_eqbs(q, &state, &ftc, &count);
619         if (cnt == 0)
620                 return q->first_to_check;
621         switch (state) {
622         case SLSB_P_OUTPUT_ERROR:
623                 QDIO_DBF_TEXT3(0,trace,"outperr");
624                 atomic_sub(cnt , &q->number_of_buffers_used);
625                 if (q->qdio_error)
626                         q->error_status_flags |=
627                                 QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR;
628                 q->qdio_error = SLSB_P_OUTPUT_ERROR;
629                 q->error_status_flags |= QDIO_STATUS_LOOK_FOR_ERROR;
630                 q->first_to_check = ftc;
631                 break;
632         case SLSB_P_OUTPUT_EMPTY:
633                 QDIO_DBF_TEXT5(0,trace,"outpempt");
634                 atomic_sub(cnt, &q->number_of_buffers_used);
635                 q->first_to_check = ftc;
636                 break;
637         case SLSB_CU_OUTPUT_PRIMED:
638                 /* all buffers primed */
639                 QDIO_DBF_TEXT5(0,trace,"outpprim");
640                 break;
641         default:
642                 break;
643         }
644         QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int));
645         return q->first_to_check;
646 }
647
648 static int
649 qdio_qebsm_get_inbound_buffer_frontier(struct qdio_q *q)
650 {
651         struct qdio_irq *irq;
652         unsigned char state;
653         int tmp, ftc, count, cnt;
654         char dbf_text[15];
655
656
657         irq = (struct qdio_irq *) q->irq_ptr;
658         ftc = q->first_to_check;
659         count = qdio_min(atomic_read(&q->number_of_buffers_used),
660                         (QDIO_MAX_BUFFERS_PER_Q-1));
661         if (count == 0)
662                  return q->first_to_check;
663         cnt = qdio_do_eqbs(q, &state, &ftc, &count);
664         if (cnt == 0)
665                  return q->first_to_check;
666         switch (state) {
667         case SLSB_P_INPUT_ERROR :
668 #ifdef CONFIG_QDIO_DEBUG
669                 QDIO_DBF_TEXT3(1,trace,"inperr");
670                 sprintf(dbf_text,"%2x,%2x",ftc,count);
671                 QDIO_DBF_TEXT3(1,trace,dbf_text);
672 #endif /* CONFIG_QDIO_DEBUG */
673                 if (q->qdio_error)
674                         q->error_status_flags |=
675                                 QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR;
676                 q->qdio_error = SLSB_P_INPUT_ERROR;
677                 q->error_status_flags |= QDIO_STATUS_LOOK_FOR_ERROR;
678                 atomic_sub(cnt, &q->number_of_buffers_used);
679                 q->first_to_check = ftc;
680                 break;
681         case SLSB_P_INPUT_PRIMED :
682                 QDIO_DBF_TEXT3(0,trace,"inptprim");
683                 sprintf(dbf_text,"%2x,%2x",ftc,count);
684                 QDIO_DBF_TEXT3(1,trace,dbf_text);
685                 tmp = 0;
686                 ftc = q->first_to_check;
687 #ifdef QDIO_USE_PROCESSING_STATE
688                 if (cnt > 1) {
689                         cnt -= 1;
690                         tmp = set_slsb(q, &ftc, SLSB_P_INPUT_NOT_INIT, &cnt);
691                         if (!tmp)
692                                 break;
693                 }
694                 cnt = 1;
695                 tmp += set_slsb(q, &ftc,
696                                SLSB_P_INPUT_PROCESSING, &cnt);
697                 atomic_set(&q->polling, 1);
698 #else
699                 tmp = set_slsb(q, &ftc, SLSB_P_INPUT_NOT_INIT, &cnt);
700 #endif
701                 atomic_sub(tmp, &q->number_of_buffers_used);
702                 q->first_to_check = ftc;
703                 break;
704         case SLSB_CU_INPUT_EMPTY:
705         case SLSB_P_INPUT_NOT_INIT:
706         case SLSB_P_INPUT_PROCESSING:
707                 QDIO_DBF_TEXT5(0,trace,"inpnipro");
708                 break;
709         default:
710                 break;
711         }
712         QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int));
713         return q->first_to_check;
714 }
715
716 static int
717 qdio_get_outbound_buffer_frontier(struct qdio_q *q)
718 {
719         struct qdio_irq *irq;
720         volatile char *slsb;
721         unsigned int count = 1;
722         int first_not_to_check, f, f_mod_no;
723         char dbf_text[15];
724
725         QDIO_DBF_TEXT4(0,trace,"getobfro");
726         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
727
728         irq = (struct qdio_irq *) q->irq_ptr;
729         if (irq->is_qebsm)
730                 return qdio_qebsm_get_outbound_buffer_frontier(q);
731
732         slsb=&q->slsb.acc.val[0];
733         f_mod_no=f=q->first_to_check;
734         /* 
735          * f points to already processed elements, so f+no_used is correct...
736          * ... but: we don't check 128 buffers, as otherwise
737          * qdio_has_outbound_q_moved would return 0 
738          */
739         first_not_to_check=f+qdio_min(atomic_read(&q->number_of_buffers_used),
740                                       (QDIO_MAX_BUFFERS_PER_Q-1));
741
742         if ((!q->is_iqdio_q)&&(!q->hydra_gives_outbound_pcis))
743                 SYNC_MEMORY;
744
745 check_next:
746         if (f==first_not_to_check) 
747                 goto out;
748
749         switch(slsb[f_mod_no]) {
750
751         /* the adapter has not fetched the output yet */
752         case SLSB_CU_OUTPUT_PRIMED:
753                 QDIO_DBF_TEXT5(0,trace,"outpprim");
754                 break;
755
756         /* the adapter got it */
757         case SLSB_P_OUTPUT_EMPTY:
758                 atomic_dec(&q->number_of_buffers_used);
759                 f++;
760                 f_mod_no=f&(QDIO_MAX_BUFFERS_PER_Q-1);
761                 QDIO_DBF_TEXT5(0,trace,"outpempt");
762                 goto check_next;
763
764         case SLSB_P_OUTPUT_ERROR:
765                 QDIO_DBF_TEXT3(0,trace,"outperr");
766                 sprintf(dbf_text,"%x-%x-%x",f_mod_no,
767                         q->sbal[f_mod_no]->element[14].sbalf.value,
768                         q->sbal[f_mod_no]->element[15].sbalf.value);
769                 QDIO_DBF_TEXT3(1,trace,dbf_text);
770                 QDIO_DBF_HEX2(1,sbal,q->sbal[f_mod_no],256);
771
772                 /* kind of process the buffer */
773                 set_slsb(q, &f_mod_no, SLSB_P_OUTPUT_NOT_INIT, &count);
774
775                 /* 
776                  * we increment the frontier, as this buffer
777                  * was processed obviously 
778                  */
779                 atomic_dec(&q->number_of_buffers_used);
780                 f_mod_no=(f_mod_no+1)&(QDIO_MAX_BUFFERS_PER_Q-1);
781
782                 if (q->qdio_error)
783                         q->error_status_flags|=
784                                 QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR;
785                 q->qdio_error=SLSB_P_OUTPUT_ERROR;
786                 q->error_status_flags|=QDIO_STATUS_LOOK_FOR_ERROR;
787
788                 break;
789
790         /* no new buffers */
791         default:
792                 QDIO_DBF_TEXT5(0,trace,"outpni");
793         }
794 out:
795         return (q->first_to_check=f_mod_no);
796 }
797
798 /* all buffers are processed */
799 static int
800 qdio_is_outbound_q_done(struct qdio_q *q)
801 {
802         int no_used;
803 #ifdef CONFIG_QDIO_DEBUG
804         char dbf_text[15];
805 #endif
806
807         no_used=atomic_read(&q->number_of_buffers_used);
808
809 #ifdef CONFIG_QDIO_DEBUG
810         if (no_used) {
811                 sprintf(dbf_text,"oqisnt%02x",no_used);
812                 QDIO_DBF_TEXT4(0,trace,dbf_text);
813         } else {
814                 QDIO_DBF_TEXT4(0,trace,"oqisdone");
815         }
816         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
817 #endif /* CONFIG_QDIO_DEBUG */
818         return (no_used==0);
819 }
820
821 static int
822 qdio_has_outbound_q_moved(struct qdio_q *q)
823 {
824         int i;
825
826         i=qdio_get_outbound_buffer_frontier(q);
827
828         if ( (i!=GET_SAVED_FRONTIER(q)) ||
829              (q->error_status_flags&QDIO_STATUS_LOOK_FOR_ERROR) ) {
830                 SAVE_FRONTIER(q,i);
831                 QDIO_DBF_TEXT4(0,trace,"oqhasmvd");
832                 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
833                 return 1;
834         } else {
835                 QDIO_DBF_TEXT4(0,trace,"oqhsntmv");
836                 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
837                 return 0;
838         }
839 }
840
841 static void
842 qdio_kick_outbound_q(struct qdio_q *q)
843 {
844         int result;
845 #ifdef CONFIG_QDIO_DEBUG
846         char dbf_text[15];
847
848         QDIO_DBF_TEXT4(0,trace,"kickoutq");
849         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
850 #endif /* CONFIG_QDIO_DEBUG */
851
852         if (!q->siga_out)
853                 return;
854
855         /* here's the story with cc=2 and busy bit set (thanks, Rick):
856          * VM's CP could present us cc=2 and busy bit set on SIGA-write
857          * during reconfiguration of their Guest LAN (only in HIPERS mode,
858          * QDIO mode is asynchronous -- cc=2 and busy bit there will take
859          * the queues down immediately; and not being under VM we have a
860          * problem on cc=2 and busy bit set right away).
861          *
862          * Therefore qdio_siga_output will try for a short time constantly,
863          * if such a condition occurs. If it doesn't change, it will
864          * increase the busy_siga_counter and save the timestamp, and
865          * schedule the queue for later processing (via mark_q, using the
866          * queue tasklet). __qdio_outbound_processing will check out the
867          * counter. If non-zero, it will call qdio_kick_outbound_q as often
868          * as the value of the counter. This will attempt further SIGA
869          * instructions. For each successful SIGA, the counter is
870          * decreased, for failing SIGAs the counter remains the same, after
871          * all.
872          * After some time of no movement, qdio_kick_outbound_q will
873          * finally fail and reflect corresponding error codes to call
874          * the upper layer module and have it take the queues down.
875          *
876          * Note that this is a change from the original HiperSockets design
877          * (saying cc=2 and busy bit means take the queues down), but in
878          * these days Guest LAN didn't exist... excessive cc=2 with busy bit
879          * conditions will still take the queues down, but the threshold is
880          * higher due to the Guest LAN environment.
881          */
882
883
884         result=qdio_siga_output(q);
885
886         switch (result) {
887         case 0:
888                 /* went smooth this time, reset timestamp */
889 #ifdef CONFIG_QDIO_DEBUG
890                 QDIO_DBF_TEXT3(0,trace,"cc2reslv");
891                 sprintf(dbf_text,"%4x%2x%2x",q->schid.sch_no,q->q_no,
892                         atomic_read(&q->busy_siga_counter));
893                 QDIO_DBF_TEXT3(0,trace,dbf_text);
894 #endif /* CONFIG_QDIO_DEBUG */
895                 q->timing.busy_start=0;
896                 break;
897         case (2|QDIO_SIGA_ERROR_B_BIT_SET):
898                 /* cc=2 and busy bit: */
899                 atomic_inc(&q->busy_siga_counter);
900
901                 /* if the last siga was successful, save
902                  * timestamp here */
903                 if (!q->timing.busy_start)
904                         q->timing.busy_start=NOW;
905
906                 /* if we're in time, don't touch error_status_flags
907                  * and siga_error */
908                 if (NOW-q->timing.busy_start<QDIO_BUSY_BIT_GIVE_UP) {
909                         qdio_mark_q(q);
910                         break;
911                 }
912                 QDIO_DBF_TEXT2(0,trace,"cc2REPRT");
913 #ifdef CONFIG_QDIO_DEBUG
914                 sprintf(dbf_text,"%4x%2x%2x",q->schid.sch_no,q->q_no,
915                         atomic_read(&q->busy_siga_counter));
916                 QDIO_DBF_TEXT3(0,trace,dbf_text);
917 #endif /* CONFIG_QDIO_DEBUG */
918                 /* else fallthrough and report error */
919         default:
920                 /* for plain cc=1, 2 or 3: */
921                 if (q->siga_error)
922                         q->error_status_flags|=
923                                 QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR;
924                 q->error_status_flags|=
925                         QDIO_STATUS_LOOK_FOR_ERROR;
926                 q->siga_error=result;
927         }
928 }
929
930 static void
931 qdio_kick_outbound_handler(struct qdio_q *q)
932 {
933         int start, end, real_end, count;
934 #ifdef CONFIG_QDIO_DEBUG
935         char dbf_text[15];
936 #endif
937
938         start = q->first_element_to_kick;
939         /* last_move_ftc was just updated */
940         real_end = GET_SAVED_FRONTIER(q);
941         end = (real_end+QDIO_MAX_BUFFERS_PER_Q-1)&
942                 (QDIO_MAX_BUFFERS_PER_Q-1);
943         count = (end+QDIO_MAX_BUFFERS_PER_Q+1-start)&
944                 (QDIO_MAX_BUFFERS_PER_Q-1);
945
946 #ifdef CONFIG_QDIO_DEBUG
947         QDIO_DBF_TEXT4(0,trace,"kickouth");
948         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
949
950         sprintf(dbf_text,"s=%2xc=%2x",start,count);
951         QDIO_DBF_TEXT4(0,trace,dbf_text);
952 #endif /* CONFIG_QDIO_DEBUG */
953
954         if (q->state==QDIO_IRQ_STATE_ACTIVE)
955                 q->handler(q->cdev,QDIO_STATUS_OUTBOUND_INT|
956                            q->error_status_flags,
957                            q->qdio_error,q->siga_error,q->q_no,start,count,
958                            q->int_parm);
959
960         /* for the next time: */
961         q->first_element_to_kick=real_end;
962         q->qdio_error=0;
963         q->siga_error=0;
964         q->error_status_flags=0;
965 }
966
967 static void
968 __qdio_outbound_processing(struct qdio_q *q)
969 {
970         int siga_attempts;
971
972         QDIO_DBF_TEXT4(0,trace,"qoutproc");
973         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
974
975         if (unlikely(qdio_reserve_q(q))) {
976                 qdio_release_q(q);
977                 qdio_perf_stat_inc(&perf_stats.outbound_tl_runs_resched);
978                 /* as we're sissies, we'll check next time */
979                 if (likely(!atomic_read(&q->is_in_shutdown))) {
980                         qdio_mark_q(q);
981                         QDIO_DBF_TEXT4(0,trace,"busy,agn");
982                 }
983                 return;
984         }
985         qdio_perf_stat_inc(&perf_stats.outbound_tl_runs);
986         qdio_perf_stat_inc(&perf_stats.tl_runs);
987
988         /* see comment in qdio_kick_outbound_q */
989         siga_attempts=atomic_read(&q->busy_siga_counter);
990         while (siga_attempts) {
991                 atomic_dec(&q->busy_siga_counter);
992                 qdio_kick_outbound_q(q);
993                 siga_attempts--;
994         }
995
996         if (qdio_has_outbound_q_moved(q))
997                 qdio_kick_outbound_handler(q);
998
999         if (q->is_iqdio_q) {
1000                 /* 
1001                  * for asynchronous queues, we better check, if the sent
1002                  * buffer is already switched from PRIMED to EMPTY.
1003                  */
1004                 if ((q->queue_type == QDIO_IQDIO_QFMT_ASYNCH) &&
1005                     !qdio_is_outbound_q_done(q))
1006                         qdio_mark_q(q);
1007
1008         } else if (!q->hydra_gives_outbound_pcis)
1009                 if (!qdio_is_outbound_q_done(q))
1010                         qdio_mark_q(q);
1011
1012         qdio_release_q(q);
1013 }
1014
1015 static void
1016 qdio_outbound_processing(struct qdio_q *q)
1017 {
1018         __qdio_outbound_processing(q);
1019 }
1020
1021 /************************* INBOUND ROUTINES *******************************/
1022
1023
1024 static int
1025 qdio_get_inbound_buffer_frontier(struct qdio_q *q)
1026 {
1027         struct qdio_irq *irq;
1028         int f,f_mod_no;
1029         volatile char *slsb;
1030         unsigned int count = 1;
1031         int first_not_to_check;
1032 #ifdef CONFIG_QDIO_DEBUG
1033         char dbf_text[15];
1034 #endif /* CONFIG_QDIO_DEBUG */
1035 #ifdef QDIO_USE_PROCESSING_STATE
1036         int last_position=-1;
1037 #endif /* QDIO_USE_PROCESSING_STATE */
1038
1039         QDIO_DBF_TEXT4(0,trace,"getibfro");
1040         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1041
1042         irq = (struct qdio_irq *) q->irq_ptr;
1043         if (irq->is_qebsm)
1044                 return qdio_qebsm_get_inbound_buffer_frontier(q);
1045
1046         slsb=&q->slsb.acc.val[0];
1047         f_mod_no=f=q->first_to_check;
1048         /* 
1049          * we don't check 128 buffers, as otherwise qdio_has_inbound_q_moved
1050          * would return 0 
1051          */
1052         first_not_to_check=f+qdio_min(atomic_read(&q->number_of_buffers_used),
1053                                       (QDIO_MAX_BUFFERS_PER_Q-1));
1054
1055         /* 
1056          * we don't use this one, as a PCI or we after a thin interrupt
1057          * will sync the queues
1058          */
1059         /* SYNC_MEMORY;*/
1060
1061 check_next:
1062         f_mod_no=f&(QDIO_MAX_BUFFERS_PER_Q-1);
1063         if (f==first_not_to_check) 
1064                 goto out;
1065         switch (slsb[f_mod_no]) {
1066
1067         /* CU_EMPTY means frontier is reached */
1068         case SLSB_CU_INPUT_EMPTY:
1069                 QDIO_DBF_TEXT5(0,trace,"inptempt");
1070                 break;
1071
1072         /* P_PRIMED means set slsb to P_PROCESSING and move on */
1073         case SLSB_P_INPUT_PRIMED:
1074                 QDIO_DBF_TEXT5(0,trace,"inptprim");
1075
1076 #ifdef QDIO_USE_PROCESSING_STATE
1077                 /* 
1078                  * as soon as running under VM, polling the input queues will
1079                  * kill VM in terms of CP overhead 
1080                  */
1081                 if (q->siga_sync) {
1082                         set_slsb(q, &f_mod_no, SLSB_P_INPUT_NOT_INIT, &count);
1083                 } else {
1084                         /* set the previous buffer to NOT_INIT. The current
1085                          * buffer will be set to PROCESSING at the end of
1086                          * this function to avoid further interrupts. */
1087                         if (last_position>=0)
1088                                 set_slsb(q, &last_position,
1089                                          SLSB_P_INPUT_NOT_INIT, &count);
1090                         atomic_set(&q->polling,1);
1091                         last_position=f_mod_no;
1092                 }
1093 #else /* QDIO_USE_PROCESSING_STATE */
1094                 set_slsb(q, &f_mod_no, SLSB_P_INPUT_NOT_INIT, &count);
1095 #endif /* QDIO_USE_PROCESSING_STATE */
1096                 /* 
1097                  * not needed, as the inbound queue will be synced on the next
1098                  * siga-r, resp. tiqdio_is_inbound_q_done will do the siga-s
1099                  */
1100                 /*SYNC_MEMORY;*/
1101                 f++;
1102                 atomic_dec(&q->number_of_buffers_used);
1103                 goto check_next;
1104
1105         case SLSB_P_INPUT_NOT_INIT:
1106         case SLSB_P_INPUT_PROCESSING:
1107                 QDIO_DBF_TEXT5(0,trace,"inpnipro");
1108                 break;
1109
1110         /* P_ERROR means frontier is reached, break and report error */
1111         case SLSB_P_INPUT_ERROR:
1112 #ifdef CONFIG_QDIO_DEBUG
1113                 sprintf(dbf_text,"inperr%2x",f_mod_no);
1114                 QDIO_DBF_TEXT3(1,trace,dbf_text);
1115 #endif /* CONFIG_QDIO_DEBUG */
1116                 QDIO_DBF_HEX2(1,sbal,q->sbal[f_mod_no],256);
1117
1118                 /* kind of process the buffer */
1119                 set_slsb(q, &f_mod_no, SLSB_P_INPUT_NOT_INIT, &count);
1120
1121                 if (q->qdio_error)
1122                         q->error_status_flags|=
1123                                 QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR;
1124                 q->qdio_error=SLSB_P_INPUT_ERROR;
1125                 q->error_status_flags|=QDIO_STATUS_LOOK_FOR_ERROR;
1126
1127                 /* we increment the frontier, as this buffer
1128                  * was processed obviously */
1129                 f_mod_no=(f_mod_no+1)&(QDIO_MAX_BUFFERS_PER_Q-1);
1130                 atomic_dec(&q->number_of_buffers_used);
1131
1132 #ifdef QDIO_USE_PROCESSING_STATE
1133                 last_position=-1;
1134 #endif /* QDIO_USE_PROCESSING_STATE */
1135
1136                 break;
1137
1138         /* everything else means frontier not changed (HALTED or so) */
1139         default: 
1140                 break;
1141         }
1142 out:
1143         q->first_to_check=f_mod_no;
1144
1145 #ifdef QDIO_USE_PROCESSING_STATE
1146         if (last_position>=0)
1147                 set_slsb(q, &last_position, SLSB_P_INPUT_PROCESSING, &count);
1148 #endif /* QDIO_USE_PROCESSING_STATE */
1149
1150         QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int));
1151
1152         return q->first_to_check;
1153 }
1154
1155 static int
1156 qdio_has_inbound_q_moved(struct qdio_q *q)
1157 {
1158         int i;
1159
1160         i=qdio_get_inbound_buffer_frontier(q);
1161         if ( (i!=GET_SAVED_FRONTIER(q)) ||
1162              (q->error_status_flags&QDIO_STATUS_LOOK_FOR_ERROR) ) {
1163                 SAVE_FRONTIER(q,i);
1164                 if ((!q->siga_sync)&&(!q->hydra_gives_outbound_pcis))
1165                         SAVE_TIMESTAMP(q);
1166
1167                 QDIO_DBF_TEXT4(0,trace,"inhasmvd");
1168                 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1169                 return 1;
1170         } else {
1171                 QDIO_DBF_TEXT4(0,trace,"inhsntmv");
1172                 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1173                 return 0;
1174         }
1175 }
1176
1177 /* means, no more buffers to be filled */
1178 static int
1179 tiqdio_is_inbound_q_done(struct qdio_q *q)
1180 {
1181         int no_used;
1182         unsigned int start_buf, count;
1183         unsigned char state = 0;
1184         struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
1185
1186 #ifdef CONFIG_QDIO_DEBUG
1187         char dbf_text[15];
1188 #endif
1189
1190         no_used=atomic_read(&q->number_of_buffers_used);
1191
1192         /* propagate the change from 82 to 80 through VM */
1193         SYNC_MEMORY;
1194
1195 #ifdef CONFIG_QDIO_DEBUG
1196         if (no_used) {
1197                 sprintf(dbf_text,"iqisnt%02x",no_used);
1198                 QDIO_DBF_TEXT4(0,trace,dbf_text);
1199         } else {
1200                 QDIO_DBF_TEXT4(0,trace,"iniqisdo");
1201         }
1202         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1203 #endif /* CONFIG_QDIO_DEBUG */
1204
1205         if (!no_used)
1206                 return 1;
1207         if (!q->siga_sync && !irq->is_qebsm)
1208                 /* we'll check for more primed buffers in qeth_stop_polling */
1209                 return 0;
1210         if (irq->is_qebsm) {
1211                 count = 1;
1212                 start_buf = q->first_to_check;
1213                 qdio_do_eqbs(q, &state, &start_buf, &count);
1214         } else
1215                 state = q->slsb.acc.val[q->first_to_check];
1216         if (state != SLSB_P_INPUT_PRIMED)
1217                 /* 
1218                  * nothing more to do, if next buffer is not PRIMED.
1219                  * note that we did a SYNC_MEMORY before, that there
1220                  * has been a sychnronization.
1221                  * we will return 0 below, as there is nothing to do
1222                  * (stop_polling not necessary, as we have not been
1223                  * using the PROCESSING state 
1224                  */
1225                 return 0;
1226
1227         /* 
1228          * ok, the next input buffer is primed. that means, that device state 
1229          * change indicator and adapter local summary are set, so we will find
1230          * it next time.
1231          * we will return 0 below, as there is nothing to do, except scheduling
1232          * ourselves for the next time. 
1233          */
1234         tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
1235         tiqdio_sched_tl();
1236         return 0;
1237 }
1238
1239 static int
1240 qdio_is_inbound_q_done(struct qdio_q *q)
1241 {
1242         int no_used;
1243         unsigned int start_buf, count;
1244         unsigned char state = 0;
1245         struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
1246
1247 #ifdef CONFIG_QDIO_DEBUG
1248         char dbf_text[15];
1249 #endif
1250
1251         no_used=atomic_read(&q->number_of_buffers_used);
1252
1253         /* 
1254          * we need that one for synchronization with the adapter, as it
1255          * does a kind of PCI avoidance 
1256          */
1257         SYNC_MEMORY;
1258
1259         if (!no_used) {
1260                 QDIO_DBF_TEXT4(0,trace,"inqisdnA");
1261                 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1262                 return 1;
1263         }
1264         if (irq->is_qebsm) {
1265                 count = 1;
1266                 start_buf = q->first_to_check;
1267                 qdio_do_eqbs(q, &state, &start_buf, &count);
1268         } else
1269                 state = q->slsb.acc.val[q->first_to_check];
1270         if (state == SLSB_P_INPUT_PRIMED) {
1271                 /* we got something to do */
1272                 QDIO_DBF_TEXT4(0,trace,"inqisntA");
1273                 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1274                 return 0;
1275         }
1276
1277         /* on VM, we don't poll, so the q is always done here */
1278         if (q->siga_sync)
1279                 return 1;
1280         if (q->hydra_gives_outbound_pcis)
1281                 return 1;
1282
1283         /* 
1284          * at this point we know, that inbound first_to_check
1285          * has (probably) not moved (see qdio_inbound_processing) 
1286          */
1287         if (NOW>GET_SAVED_TIMESTAMP(q)+q->timing.threshold) {
1288 #ifdef CONFIG_QDIO_DEBUG
1289                 QDIO_DBF_TEXT4(0,trace,"inqisdon");
1290                 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1291                 sprintf(dbf_text,"pf%02xcn%02x",q->first_to_check,no_used);
1292                 QDIO_DBF_TEXT4(0,trace,dbf_text);
1293 #endif /* CONFIG_QDIO_DEBUG */
1294                 return 1;
1295         } else {
1296 #ifdef CONFIG_QDIO_DEBUG
1297                 QDIO_DBF_TEXT4(0,trace,"inqisntd");
1298                 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1299                 sprintf(dbf_text,"pf%02xcn%02x",q->first_to_check,no_used);
1300                 QDIO_DBF_TEXT4(0,trace,dbf_text);
1301 #endif /* CONFIG_QDIO_DEBUG */
1302                 return 0;
1303         }
1304 }
1305
1306 static void
1307 qdio_kick_inbound_handler(struct qdio_q *q)
1308 {
1309         int count, start, end, real_end, i;
1310 #ifdef CONFIG_QDIO_DEBUG
1311         char dbf_text[15];
1312 #endif
1313
1314         QDIO_DBF_TEXT4(0,trace,"kickinh");
1315         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1316
1317         start=q->first_element_to_kick;
1318         real_end=q->first_to_check;
1319         end=(real_end+QDIO_MAX_BUFFERS_PER_Q-1)&(QDIO_MAX_BUFFERS_PER_Q-1);
1320  
1321         i=start;
1322         count=0;
1323         while (1) {
1324                 count++;
1325                 if (i==end)
1326                         break;
1327                 i=(i+1)&(QDIO_MAX_BUFFERS_PER_Q-1);
1328         }
1329
1330 #ifdef CONFIG_QDIO_DEBUG
1331         sprintf(dbf_text,"s=%2xc=%2x",start,count);
1332         QDIO_DBF_TEXT4(0,trace,dbf_text);
1333 #endif /* CONFIG_QDIO_DEBUG */
1334
1335         if (likely(q->state==QDIO_IRQ_STATE_ACTIVE))
1336                 q->handler(q->cdev,
1337                            QDIO_STATUS_INBOUND_INT|q->error_status_flags,
1338                            q->qdio_error,q->siga_error,q->q_no,start,count,
1339                            q->int_parm);
1340
1341         /* for the next time: */
1342         q->first_element_to_kick=real_end;
1343         q->qdio_error=0;
1344         q->siga_error=0;
1345         q->error_status_flags=0;
1346
1347         qdio_perf_stat_inc(&perf_stats.inbound_cnt);
1348 }
1349
1350 static void
1351 __tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set)
1352 {
1353         struct qdio_irq *irq_ptr;
1354         struct qdio_q *oq;
1355         int i;
1356
1357         QDIO_DBF_TEXT4(0,trace,"iqinproc");
1358         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1359
1360         /* 
1361          * we first want to reserve the q, so that we know, that we don't
1362          * interrupt ourselves and call qdio_unmark_q, as is_in_shutdown might
1363          * be set 
1364          */
1365         if (unlikely(qdio_reserve_q(q))) {
1366                 qdio_release_q(q);
1367                 qdio_perf_stat_inc(&perf_stats.inbound_thin_tl_runs_resched);
1368                 /* 
1369                  * as we might just be about to stop polling, we make
1370                  * sure that we check again at least once more 
1371                  */
1372                 tiqdio_sched_tl();
1373                 return;
1374         }
1375         qdio_perf_stat_inc(&perf_stats.inbound_thin_tl_runs);
1376         if (unlikely(atomic_read(&q->is_in_shutdown))) {
1377                 qdio_unmark_q(q);
1378                 goto out;
1379         }
1380
1381         /* 
1382          * we reset spare_ind_was_set, when the queue does not use the
1383          * spare indicator
1384          */
1385         if (spare_ind_was_set)
1386                 spare_ind_was_set = (q->dev_st_chg_ind == &spare_indicator);
1387
1388         if (!(*(q->dev_st_chg_ind)) && !spare_ind_was_set)
1389                 goto out;
1390         /*
1391          * q->dev_st_chg_ind is the indicator, be it shared or not.
1392          * only clear it, if indicator is non-shared
1393          */
1394         if (!spare_ind_was_set)
1395                 tiqdio_clear_summary_bit((__u32*)q->dev_st_chg_ind);
1396
1397         if (q->hydra_gives_outbound_pcis) {
1398                 if (!q->siga_sync_done_on_thinints) {
1399                         SYNC_MEMORY_ALL;
1400                 } else if ((!q->siga_sync_done_on_outb_tis)&&
1401                          (q->hydra_gives_outbound_pcis)) {
1402                         SYNC_MEMORY_ALL_OUTB;
1403                 }
1404         } else {
1405                 SYNC_MEMORY;
1406         }
1407         /*
1408          * maybe we have to do work on our outbound queues... at least
1409          * we have to check the outbound-int-capable thinint-capable
1410          * queues
1411          */
1412         if (q->hydra_gives_outbound_pcis) {
1413                 irq_ptr = (struct qdio_irq*)q->irq_ptr;
1414                 for (i=0;i<irq_ptr->no_output_qs;i++) {
1415                         oq = irq_ptr->output_qs[i];
1416                         if (!qdio_is_outbound_q_done(oq)) {
1417                                 qdio_perf_stat_dec(&perf_stats.tl_runs);
1418                                 __qdio_outbound_processing(oq);
1419                         }
1420                 }
1421         }
1422
1423         if (!qdio_has_inbound_q_moved(q))
1424                 goto out;
1425
1426         qdio_kick_inbound_handler(q);
1427         if (tiqdio_is_inbound_q_done(q))
1428                 if (!qdio_stop_polling(q)) {
1429                         /* 
1430                          * we set the flags to get into the stuff next time,
1431                          * see also comment in qdio_stop_polling 
1432                          */
1433                         tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
1434                         tiqdio_sched_tl();
1435                 }
1436 out:
1437         qdio_release_q(q);
1438 }
1439
1440 static void
1441 tiqdio_inbound_processing(struct qdio_q *q)
1442 {
1443         __tiqdio_inbound_processing(q, atomic_read(&spare_indicator_usecount));
1444 }
1445
1446 static void
1447 __qdio_inbound_processing(struct qdio_q *q)
1448 {
1449         int q_laps=0;
1450
1451         QDIO_DBF_TEXT4(0,trace,"qinproc");
1452         QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
1453
1454         if (unlikely(qdio_reserve_q(q))) {
1455                 qdio_release_q(q);
1456                 qdio_perf_stat_inc(&perf_stats.inbound_tl_runs_resched);
1457                 /* as we're sissies, we'll check next time */
1458                 if (likely(!atomic_read(&q->is_in_shutdown))) {
1459                         qdio_mark_q(q);
1460                         QDIO_DBF_TEXT4(0,trace,"busy,agn");
1461                 }
1462                 return;
1463         }
1464         qdio_perf_stat_inc(&perf_stats.inbound_tl_runs);
1465         qdio_perf_stat_inc(&perf_stats.tl_runs);
1466
1467 again:
1468         if (qdio_has_inbound_q_moved(q)) {
1469                 qdio_kick_inbound_handler(q);
1470                 if (!qdio_stop_polling(q)) {
1471                         q_laps++;
1472                         if (q_laps<QDIO_Q_LAPS) 
1473                                 goto again;
1474                 }
1475                 qdio_mark_q(q);
1476         } else {
1477                 if (!qdio_is_inbound_q_done(q)) 
1478                         /* means poll time is not yet over */
1479                         qdio_mark_q(q);
1480         }
1481
1482         qdio_release_q(q);
1483 }
1484
1485 static void
1486 qdio_inbound_processing(struct qdio_q *q)
1487 {
1488         __qdio_inbound_processing(q);
1489 }
1490
1491 /************************* MAIN ROUTINES *******************************/
1492
1493 #ifdef QDIO_USE_PROCESSING_STATE
1494 static int
1495 tiqdio_reset_processing_state(struct qdio_q *q, int q_laps)
1496 {
1497         if (!q) {
1498                 tiqdio_sched_tl();
1499                 return 0;
1500         }
1501
1502         /* 
1503          * under VM, we have not used the PROCESSING state, so no
1504          * need to stop polling 
1505          */
1506         if (q->siga_sync)
1507                 return 2;
1508
1509         if (unlikely(qdio_reserve_q(q))) {
1510                 qdio_release_q(q);
1511                 qdio_perf_stat_inc(&perf_stats.inbound_thin_tl_runs_resched);
1512                 /* 
1513                  * as we might just be about to stop polling, we make
1514                  * sure that we check again at least once more 
1515                  */
1516                 
1517                 /* 
1518                  * sanity -- we'd get here without setting the
1519                  * dev st chg ind 
1520                  */
1521                 tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
1522                 tiqdio_sched_tl();
1523                 return 0;
1524         }
1525         if (qdio_stop_polling(q)) {
1526                 qdio_release_q(q);
1527                 return 2;
1528         }               
1529         if (q_laps<QDIO_Q_LAPS-1) {
1530                 qdio_release_q(q);
1531                 return 3;
1532         }
1533         /* 
1534          * we set the flags to get into the stuff
1535          * next time, see also comment in qdio_stop_polling 
1536          */
1537         tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind);
1538         tiqdio_sched_tl();
1539         qdio_release_q(q);
1540         return 1;
1541         
1542 }
1543 #endif /* QDIO_USE_PROCESSING_STATE */
1544
1545 static void
1546 tiqdio_inbound_checks(void)
1547 {
1548         struct qdio_q *q;
1549         int spare_ind_was_set=0;
1550 #ifdef QDIO_USE_PROCESSING_STATE
1551         int q_laps=0;
1552 #endif /* QDIO_USE_PROCESSING_STATE */
1553
1554         QDIO_DBF_TEXT4(0,trace,"iqdinbck");
1555         QDIO_DBF_TEXT5(0,trace,"iqlocsum");
1556
1557 #ifdef QDIO_USE_PROCESSING_STATE
1558 again:
1559 #endif /* QDIO_USE_PROCESSING_STATE */
1560
1561         /* when the spare indicator is used and set, save that and clear it */
1562         if ((atomic_read(&spare_indicator_usecount)) && spare_indicator) {
1563                 spare_ind_was_set = 1;
1564                 tiqdio_clear_summary_bit((__u32*)&spare_indicator);
1565         }
1566
1567         q=(struct qdio_q*)tiq_list;
1568         do {
1569                 if (!q)
1570                         break;
1571                 __tiqdio_inbound_processing(q, spare_ind_was_set);
1572                 q=(struct qdio_q*)q->list_next;
1573         } while (q!=(struct qdio_q*)tiq_list);
1574
1575 #ifdef QDIO_USE_PROCESSING_STATE
1576         q=(struct qdio_q*)tiq_list;
1577         do {
1578                 int ret;
1579
1580                 ret = tiqdio_reset_processing_state(q, q_laps);
1581                 switch (ret) {
1582                 case 0:
1583                         return;
1584                 case 1:
1585                         q_laps++;
1586                 case 2:
1587                         q = (struct qdio_q*)q->list_next;
1588                         break;
1589                 default:
1590                         q_laps++;
1591                         goto again;
1592                 }
1593         } while (q!=(struct qdio_q*)tiq_list);
1594 #endif /* QDIO_USE_PROCESSING_STATE */
1595 }
1596
1597 static void
1598 tiqdio_tl(unsigned long data)
1599 {
1600         QDIO_DBF_TEXT4(0,trace,"iqdio_tl");
1601
1602         qdio_perf_stat_inc(&perf_stats.tl_runs);
1603
1604         tiqdio_inbound_checks();
1605 }
1606
1607 /********************* GENERAL HELPER_ROUTINES ***********************/
1608
1609 static void
1610 qdio_release_irq_memory(struct qdio_irq *irq_ptr)
1611 {
1612         int i;
1613
1614         for (i=0;i<QDIO_MAX_QUEUES_PER_IRQ;i++) {
1615                 if (!irq_ptr->input_qs[i])
1616                         goto next;
1617
1618                 kfree(irq_ptr->input_qs[i]->slib);
1619                 kfree(irq_ptr->input_qs[i]);
1620
1621 next:
1622                 if (!irq_ptr->output_qs[i])
1623                         continue;
1624
1625                 kfree(irq_ptr->output_qs[i]->slib);
1626                 kfree(irq_ptr->output_qs[i]);
1627
1628         }
1629         kfree(irq_ptr->qdr);
1630         free_page((unsigned long) irq_ptr);
1631 }
1632
1633 static void
1634 qdio_set_impl_params(struct qdio_irq *irq_ptr,
1635                      unsigned int qib_param_field_format,
1636                      /* pointer to 128 bytes or NULL, if no param field */
1637                      unsigned char *qib_param_field,
1638                      /* pointer to no_queues*128 words of data or NULL */
1639                      unsigned int no_input_qs,
1640                      unsigned int no_output_qs,
1641                      unsigned long *input_slib_elements,
1642                      unsigned long *output_slib_elements)
1643 {
1644         int i,j;
1645
1646         if (!irq_ptr)
1647                 return;
1648
1649         irq_ptr->qib.pfmt=qib_param_field_format;
1650         if (qib_param_field)
1651                 memcpy(irq_ptr->qib.parm,qib_param_field,
1652                        QDIO_MAX_BUFFERS_PER_Q);
1653
1654         if (input_slib_elements)
1655                 for (i=0;i<no_input_qs;i++) {
1656                         for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
1657                                 irq_ptr->input_qs[i]->slib->slibe[j].parms=
1658                                         input_slib_elements[
1659                                                 i*QDIO_MAX_BUFFERS_PER_Q+j];
1660                 }
1661         if (output_slib_elements)
1662                 for (i=0;i<no_output_qs;i++) {
1663                         for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
1664                                 irq_ptr->output_qs[i]->slib->slibe[j].parms=
1665                                         output_slib_elements[
1666                                                 i*QDIO_MAX_BUFFERS_PER_Q+j];
1667                 }
1668 }
1669
1670 static int
1671 qdio_alloc_qs(struct qdio_irq *irq_ptr,
1672               int no_input_qs, int no_output_qs)
1673 {
1674         int i;
1675         struct qdio_q *q;
1676         int result=-ENOMEM;
1677
1678         for (i=0;i<no_input_qs;i++) {
1679                 q = kzalloc(sizeof(struct qdio_q), GFP_KERNEL);
1680
1681                 if (!q) {
1682                         QDIO_PRINT_ERR("kmalloc of q failed!\n");
1683                         goto out;
1684                 }
1685
1686                 q->slib = kmalloc(PAGE_SIZE, GFP_KERNEL);
1687                 if (!q->slib) {
1688                         QDIO_PRINT_ERR("kmalloc of slib failed!\n");
1689                         goto out;
1690                 }
1691
1692                 irq_ptr->input_qs[i]=q;
1693         }
1694
1695         for (i=0;i<no_output_qs;i++) {
1696                 q = kzalloc(sizeof(struct qdio_q), GFP_KERNEL);
1697
1698                 if (!q) {
1699                         goto out;
1700                 }
1701
1702                 q->slib=kmalloc(PAGE_SIZE,GFP_KERNEL);
1703                 if (!q->slib) {
1704                         QDIO_PRINT_ERR("kmalloc of slib failed!\n");
1705                         goto out;
1706                 }
1707
1708                 irq_ptr->output_qs[i]=q;
1709         }
1710
1711         result=0;
1712 out:
1713         return result;
1714 }
1715
1716 static void
1717 qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev,
1718              int no_input_qs, int no_output_qs,
1719              qdio_handler_t *input_handler,
1720              qdio_handler_t *output_handler,
1721              unsigned long int_parm,int q_format,
1722              unsigned long flags,
1723              void **inbound_sbals_array,
1724              void **outbound_sbals_array)
1725 {
1726         struct qdio_q *q;
1727         int i,j;
1728         char dbf_text[20]; /* see qdio_initialize */
1729         void *ptr;
1730         int available;
1731
1732         sprintf(dbf_text,"qfqs%4x",cdev->private->schid.sch_no);
1733         QDIO_DBF_TEXT0(0,setup,dbf_text);
1734         for (i=0;i<no_input_qs;i++) {
1735                 q=irq_ptr->input_qs[i];
1736
1737                 memset(q,0,((char*)&q->slib)-((char*)q));
1738                 sprintf(dbf_text,"in-q%4x",i);
1739                 QDIO_DBF_TEXT0(0,setup,dbf_text);
1740                 QDIO_DBF_HEX0(0,setup,&q,sizeof(void*));
1741
1742                 memset(q->slib,0,PAGE_SIZE);
1743                 q->sl=(struct sl*)(((char*)q->slib)+PAGE_SIZE/2);
1744
1745                 available=0;
1746
1747                 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
1748                         q->sbal[j]=*(inbound_sbals_array++);
1749
1750                 q->queue_type=q_format;
1751                 q->int_parm=int_parm;
1752                 q->schid = irq_ptr->schid;
1753                 q->irq_ptr = irq_ptr;
1754                 q->cdev = cdev;
1755                 q->mask=1<<(31-i);
1756                 q->q_no=i;
1757                 q->is_input_q=1;
1758                 q->first_to_check=0;
1759                 q->last_move_ftc=0;
1760                 q->handler=input_handler;
1761                 q->dev_st_chg_ind=irq_ptr->dev_st_chg_ind;
1762
1763                 q->tasklet.data=(unsigned long)q;
1764                 /* q->is_thinint_q isn't valid at this time, but
1765                  * irq_ptr->is_thinint_irq is */
1766                 q->tasklet.func=(void(*)(unsigned long))
1767                         ((irq_ptr->is_thinint_irq)?&tiqdio_inbound_processing:
1768                          &qdio_inbound_processing);
1769
1770                 /* actually this is not used for inbound queues. yet. */
1771                 atomic_set(&q->busy_siga_counter,0);
1772                 q->timing.busy_start=0;
1773
1774 /*              for (j=0;j<QDIO_STATS_NUMBER;j++)
1775                         q->timing.last_transfer_times[j]=(qdio_get_micros()/
1776                                                           QDIO_STATS_NUMBER)*j;
1777                 q->timing.last_transfer_index=QDIO_STATS_NUMBER-1;
1778 */
1779
1780                 /* fill in slib */
1781                 if (i>0) irq_ptr->input_qs[i-1]->slib->nsliba=
1782                                  (unsigned long)(q->slib);
1783                 q->slib->sla=(unsigned long)(q->sl);
1784                 q->slib->slsba=(unsigned long)(&q->slsb.acc.val[0]);
1785
1786                 /* fill in sl */
1787                 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
1788                         q->sl->element[j].sbal=(unsigned long)(q->sbal[j]);
1789
1790                 QDIO_DBF_TEXT2(0,setup,"sl-sb-b0");
1791                 ptr=(void*)q->sl;
1792                 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1793                 ptr=(void*)&q->slsb;
1794                 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1795                 ptr=(void*)q->sbal[0];
1796                 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1797
1798                 /* fill in slsb */
1799                 if (!irq_ptr->is_qebsm) {
1800                         unsigned int count = 1;
1801                         for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
1802                                 set_slsb(q, &j, SLSB_P_INPUT_NOT_INIT, &count);
1803                 }
1804         }
1805
1806         for (i=0;i<no_output_qs;i++) {
1807                 q=irq_ptr->output_qs[i];
1808                 memset(q,0,((char*)&q->slib)-((char*)q));
1809
1810                 sprintf(dbf_text,"outq%4x",i);
1811                 QDIO_DBF_TEXT0(0,setup,dbf_text);
1812                 QDIO_DBF_HEX0(0,setup,&q,sizeof(void*));
1813
1814                 memset(q->slib,0,PAGE_SIZE);
1815                 q->sl=(struct sl*)(((char*)q->slib)+PAGE_SIZE/2);
1816
1817                 available=0;
1818                 
1819                 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
1820                         q->sbal[j]=*(outbound_sbals_array++);
1821
1822                 q->queue_type=q_format;
1823                 if ((q->queue_type == QDIO_IQDIO_QFMT) &&
1824                     (no_output_qs > 1) &&
1825                     (i == no_output_qs-1))
1826                         q->queue_type = QDIO_IQDIO_QFMT_ASYNCH;
1827                 q->int_parm=int_parm;
1828                 q->is_input_q=0;
1829                 q->schid = irq_ptr->schid;
1830                 q->cdev = cdev;
1831                 q->irq_ptr = irq_ptr;
1832                 q->mask=1<<(31-i);
1833                 q->q_no=i;
1834                 q->first_to_check=0;
1835                 q->last_move_ftc=0;
1836                 q->handler=output_handler;
1837
1838                 q->tasklet.data=(unsigned long)q;
1839                 q->tasklet.func=(void(*)(unsigned long))
1840                         &qdio_outbound_processing;
1841
1842                 atomic_set(&q->busy_siga_counter,0);
1843                 q->timing.busy_start=0;
1844
1845                 /* fill in slib */
1846                 if (i>0) irq_ptr->output_qs[i-1]->slib->nsliba=
1847                                  (unsigned long)(q->slib);
1848                 q->slib->sla=(unsigned long)(q->sl);
1849                 q->slib->slsba=(unsigned long)(&q->slsb.acc.val[0]);
1850
1851                 /* fill in sl */
1852                 for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++)
1853                         q->sl->element[j].sbal=(unsigned long)(q->sbal[j]);
1854
1855                 QDIO_DBF_TEXT2(0,setup,"sl-sb-b0");
1856                 ptr=(void*)q->sl;
1857                 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1858                 ptr=(void*)&q->slsb;
1859                 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1860                 ptr=(void*)q->sbal[0];
1861                 QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*));
1862
1863                 /* fill in slsb */
1864                 if (!irq_ptr->is_qebsm) {
1865                         unsigned int count = 1;
1866                         for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
1867                                 set_slsb(q, &j, SLSB_P_OUTPUT_NOT_INIT, &count);
1868                 }
1869         }
1870 }
1871
1872 static void
1873 qdio_fill_thresholds(struct qdio_irq *irq_ptr,
1874                      unsigned int no_input_qs,
1875                      unsigned int no_output_qs,
1876                      unsigned int min_input_threshold,
1877                      unsigned int max_input_threshold,
1878                      unsigned int min_output_threshold,
1879                      unsigned int max_output_threshold)
1880 {
1881         int i;
1882         struct qdio_q *q;
1883
1884         for (i=0;i<no_input_qs;i++) {
1885                 q=irq_ptr->input_qs[i];
1886                 q->timing.threshold=max_input_threshold;
1887 /*              for (j=0;j<QDIO_STATS_CLASSES;j++) {
1888                         q->threshold_classes[j].threshold=
1889                                 min_input_threshold+
1890                                 (max_input_threshold-min_input_threshold)/
1891                                 QDIO_STATS_CLASSES;
1892                 }
1893                 qdio_use_thresholds(q,QDIO_STATS_CLASSES/2);*/
1894         }
1895         for (i=0;i<no_output_qs;i++) {
1896                 q=irq_ptr->output_qs[i];
1897                 q->timing.threshold=max_output_threshold;
1898 /*              for (j=0;j<QDIO_STATS_CLASSES;j++) {
1899                         q->threshold_classes[j].threshold=
1900                                 min_output_threshold+
1901                                 (max_output_threshold-min_output_threshold)/
1902                                 QDIO_STATS_CLASSES;
1903                 }
1904                 qdio_use_thresholds(q,QDIO_STATS_CLASSES/2);*/
1905         }
1906 }
1907
1908 static int
1909 tiqdio_thinint_handler(void)
1910 {
1911         QDIO_DBF_TEXT4(0,trace,"thin_int");
1912
1913         qdio_perf_stat_inc(&perf_stats.thinints);
1914
1915         /* SVS only when needed:
1916          * issue SVS to benefit from iqdio interrupt avoidance
1917          * (SVS clears AISOI)*/
1918         if (!omit_svs)
1919                 tiqdio_clear_global_summary();
1920
1921         tiqdio_inbound_checks();
1922         return 0;
1923 }
1924
1925 static void
1926 qdio_set_state(struct qdio_irq *irq_ptr, enum qdio_irq_states state)
1927 {
1928         int i;
1929 #ifdef CONFIG_QDIO_DEBUG
1930         char dbf_text[15];
1931
1932         QDIO_DBF_TEXT5(0,trace,"newstate");
1933         sprintf(dbf_text,"%4x%4x",irq_ptr->schid.sch_no,state);
1934         QDIO_DBF_TEXT5(0,trace,dbf_text);
1935 #endif /* CONFIG_QDIO_DEBUG */
1936
1937         irq_ptr->state=state;
1938         for (i=0;i<irq_ptr->no_input_qs;i++)
1939                 irq_ptr->input_qs[i]->state=state;
1940         for (i=0;i<irq_ptr->no_output_qs;i++)
1941                 irq_ptr->output_qs[i]->state=state;
1942         mb();
1943 }
1944
1945 static void
1946 qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb)
1947 {
1948         char dbf_text[15];
1949
1950         if (irb->esw.esw0.erw.cons) {
1951                 sprintf(dbf_text,"sens%4x",schid.sch_no);
1952                 QDIO_DBF_TEXT2(1,trace,dbf_text);
1953                 QDIO_DBF_HEX0(0,sense,irb,QDIO_DBF_SENSE_LEN);
1954
1955                 QDIO_PRINT_WARN("sense data available on qdio channel.\n");
1956                 QDIO_HEXDUMP16(WARN,"irb: ",irb);
1957                 QDIO_HEXDUMP16(WARN,"sense data: ",irb->ecw);
1958         }
1959                 
1960 }
1961
1962 static void
1963 qdio_handle_pci(struct qdio_irq *irq_ptr)
1964 {
1965         int i;
1966         struct qdio_q *q;
1967
1968         qdio_perf_stat_inc(&perf_stats.pcis);
1969         for (i=0;i<irq_ptr->no_input_qs;i++) {
1970                 q=irq_ptr->input_qs[i];
1971                 if (q->is_input_q&QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT)
1972                         qdio_mark_q(q);
1973                 else {
1974                         __qdio_inbound_processing(q);
1975                 }
1976         }
1977         if (!irq_ptr->hydra_gives_outbound_pcis)
1978                 return;
1979         for (i=0;i<irq_ptr->no_output_qs;i++) {
1980                 q=irq_ptr->output_qs[i];
1981                 if (qdio_is_outbound_q_done(q))
1982                         continue;
1983                 qdio_perf_stat_dec(&perf_stats.tl_runs);
1984                 if (!irq_ptr->sync_done_on_outb_pcis)
1985                         SYNC_MEMORY;
1986                 __qdio_outbound_processing(q);
1987         }
1988 }
1989
1990 static void qdio_establish_handle_irq(struct ccw_device*, int, int);
1991
1992 static void
1993 qdio_handle_activate_check(struct ccw_device *cdev, unsigned long intparm,
1994                            int cstat, int dstat)
1995 {
1996         struct qdio_irq *irq_ptr;
1997         struct qdio_q *q;
1998         char dbf_text[15];
1999
2000         irq_ptr = cdev->private->qdio_data;
2001
2002         QDIO_DBF_TEXT2(1, trace, "ick2");
2003         sprintf(dbf_text,"%s", cdev->dev.bus_id);
2004         QDIO_DBF_TEXT2(1,trace,dbf_text);
2005         QDIO_DBF_HEX2(0,trace,&intparm,sizeof(int));
2006         QDIO_DBF_HEX2(0,trace,&dstat,sizeof(int));
2007         QDIO_DBF_HEX2(0,trace,&cstat,sizeof(int));
2008         QDIO_PRINT_ERR("received check condition on activate " \
2009                        "queues on device %s (cs=x%x, ds=x%x).\n",
2010                        cdev->dev.bus_id, cstat, dstat);
2011         if (irq_ptr->no_input_qs) {
2012                 q=irq_ptr->input_qs[0];
2013         } else if (irq_ptr->no_output_qs) {
2014                 q=irq_ptr->output_qs[0];
2015         } else {
2016                 QDIO_PRINT_ERR("oops... no queue registered for device %s!?\n",
2017                                cdev->dev.bus_id);
2018                 goto omit_handler_call;
2019         }
2020         q->handler(q->cdev,QDIO_STATUS_ACTIVATE_CHECK_CONDITION|
2021                    QDIO_STATUS_LOOK_FOR_ERROR,
2022                    0,0,0,-1,-1,q->int_parm);
2023 omit_handler_call:
2024         qdio_set_state(irq_ptr,QDIO_IRQ_STATE_STOPPED);
2025
2026 }
2027
2028 static void
2029 qdio_call_shutdown(struct work_struct *work)
2030 {
2031         struct ccw_device_private *priv;
2032         struct ccw_device *cdev;
2033
2034         priv = container_of(work, struct ccw_device_private, kick_work);
2035         cdev = priv->cdev;
2036         qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
2037         put_device(&cdev->dev);
2038 }
2039
2040 static void
2041 qdio_timeout_handler(struct ccw_device *cdev)
2042 {
2043         struct qdio_irq *irq_ptr;
2044         char dbf_text[15];
2045
2046         QDIO_DBF_TEXT2(0, trace, "qtoh");
2047         sprintf(dbf_text, "%s", cdev->dev.bus_id);
2048         QDIO_DBF_TEXT2(0, trace, dbf_text);
2049
2050         irq_ptr = cdev->private->qdio_data;
2051         sprintf(dbf_text, "state:%d", irq_ptr->state);
2052         QDIO_DBF_TEXT2(0, trace, dbf_text);
2053
2054         switch (irq_ptr->state) {
2055         case QDIO_IRQ_STATE_INACTIVE:
2056                 QDIO_PRINT_ERR("establish queues on irq 0.%x.%04x: timed out\n",
2057                                irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2058                 QDIO_DBF_TEXT2(1,setup,"eq:timeo");
2059                 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
2060                 break;
2061         case QDIO_IRQ_STATE_CLEANUP:
2062                 QDIO_PRINT_INFO("Did not get interrupt on cleanup, "
2063                                 "irq=0.%x.%x.\n",
2064                                 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2065                 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
2066                 break;
2067         case QDIO_IRQ_STATE_ESTABLISHED:
2068         case QDIO_IRQ_STATE_ACTIVE:
2069                 /* I/O has been terminated by common I/O layer. */
2070                 QDIO_PRINT_INFO("Queues on irq 0.%x.%04x killed by cio.\n",
2071                                 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2072                 QDIO_DBF_TEXT2(1, trace, "cio:term");
2073                 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED);
2074                 if (get_device(&cdev->dev)) {
2075                         /* Can't call shutdown from interrupt context. */
2076                         PREPARE_WORK(&cdev->private->kick_work,
2077                                      qdio_call_shutdown);
2078                         queue_work(ccw_device_work, &cdev->private->kick_work);
2079                 }
2080                 break;
2081         default:
2082                 BUG();
2083         }
2084         ccw_device_set_timeout(cdev, 0);
2085         wake_up(&cdev->private->wait_q);
2086 }
2087
2088 static void
2089 qdio_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
2090 {
2091         struct qdio_irq *irq_ptr;
2092         int cstat,dstat;
2093         char dbf_text[15];
2094
2095 #ifdef CONFIG_QDIO_DEBUG
2096         QDIO_DBF_TEXT4(0, trace, "qint");
2097         sprintf(dbf_text, "%s", cdev->dev.bus_id);
2098         QDIO_DBF_TEXT4(0, trace, dbf_text);
2099 #endif /* CONFIG_QDIO_DEBUG */
2100         
2101         if (!intparm) {
2102                 QDIO_PRINT_ERR("got unsolicited interrupt in qdio " \
2103                                   "handler, device %s\n", cdev->dev.bus_id);
2104                 return;
2105         }
2106
2107         irq_ptr = cdev->private->qdio_data;
2108         if (!irq_ptr) {
2109                 QDIO_DBF_TEXT2(1, trace, "uint");
2110                 sprintf(dbf_text,"%s", cdev->dev.bus_id);
2111                 QDIO_DBF_TEXT2(1,trace,dbf_text);
2112                 QDIO_PRINT_ERR("received interrupt on unused device %s!\n",
2113                                cdev->dev.bus_id);
2114                 return;
2115         }
2116
2117         if (IS_ERR(irb)) {
2118                 /* Currently running i/o is in error. */
2119                 switch (PTR_ERR(irb)) {
2120                 case -EIO:
2121                         QDIO_PRINT_ERR("i/o error on device %s\n",
2122                                        cdev->dev.bus_id);
2123                         return;
2124                 case -ETIMEDOUT:
2125                         qdio_timeout_handler(cdev);
2126                         return;
2127                 default:
2128                         QDIO_PRINT_ERR("unknown error state %ld on device %s\n",
2129                                        PTR_ERR(irb), cdev->dev.bus_id);
2130                         return;
2131                 }
2132         }
2133
2134         qdio_irq_check_sense(irq_ptr->schid, irb);
2135
2136 #ifdef CONFIG_QDIO_DEBUG
2137         sprintf(dbf_text, "state:%d", irq_ptr->state);
2138         QDIO_DBF_TEXT4(0, trace, dbf_text);
2139 #endif /* CONFIG_QDIO_DEBUG */
2140
2141         cstat = irb->scsw.cstat;
2142         dstat = irb->scsw.dstat;
2143
2144         switch (irq_ptr->state) {
2145         case QDIO_IRQ_STATE_INACTIVE:
2146                 qdio_establish_handle_irq(cdev, cstat, dstat);
2147                 break;
2148
2149         case QDIO_IRQ_STATE_CLEANUP:
2150                 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
2151                 break;
2152
2153         case QDIO_IRQ_STATE_ESTABLISHED:
2154         case QDIO_IRQ_STATE_ACTIVE:
2155                 if (cstat & SCHN_STAT_PCI) {
2156                         qdio_handle_pci(irq_ptr);
2157                         break;
2158                 }
2159
2160                 if ((cstat&~SCHN_STAT_PCI)||dstat) {
2161                         qdio_handle_activate_check(cdev, intparm, cstat, dstat);
2162                         break;
2163                 }
2164         default:
2165                 QDIO_PRINT_ERR("got interrupt for queues in state %d on " \
2166                                "device %s?!\n",
2167                                irq_ptr->state, cdev->dev.bus_id);
2168         }
2169         wake_up(&cdev->private->wait_q);
2170
2171 }
2172
2173 int
2174 qdio_synchronize(struct ccw_device *cdev, unsigned int flags,
2175                  unsigned int queue_number)
2176 {
2177         int cc = 0;
2178         struct qdio_q *q;
2179         struct qdio_irq *irq_ptr;
2180         void *ptr;
2181 #ifdef CONFIG_QDIO_DEBUG
2182         char dbf_text[15]="SyncXXXX";
2183 #endif
2184
2185         irq_ptr = cdev->private->qdio_data;
2186         if (!irq_ptr)
2187                 return -ENODEV;
2188
2189 #ifdef CONFIG_QDIO_DEBUG
2190         *((int*)(&dbf_text[4])) = irq_ptr->schid.sch_no;
2191         QDIO_DBF_HEX4(0,trace,dbf_text,QDIO_DBF_TRACE_LEN);
2192         *((int*)(&dbf_text[0]))=flags;
2193         *((int*)(&dbf_text[4]))=queue_number;
2194         QDIO_DBF_HEX4(0,trace,dbf_text,QDIO_DBF_TRACE_LEN);
2195 #endif /* CONFIG_QDIO_DEBUG */
2196
2197         if (flags&QDIO_FLAG_SYNC_INPUT) {
2198                 q=irq_ptr->input_qs[queue_number];
2199                 if (!q)
2200                         return -EINVAL;
2201                 if (!(irq_ptr->is_qebsm))
2202                         cc = do_siga_sync(q->schid, 0, q->mask);
2203         } else if (flags&QDIO_FLAG_SYNC_OUTPUT) {
2204                 q=irq_ptr->output_qs[queue_number];
2205                 if (!q)
2206                         return -EINVAL;
2207                 if (!(irq_ptr->is_qebsm))
2208                         cc = do_siga_sync(q->schid, q->mask, 0);
2209         } else 
2210                 return -EINVAL;
2211
2212         ptr=&cc;
2213         if (cc)
2214                 QDIO_DBF_HEX3(0,trace,&ptr,sizeof(int));
2215
2216         return cc;
2217 }
2218
2219 static void
2220 qdio_check_subchannel_qebsm(struct qdio_irq *irq_ptr, unsigned char qdioac,
2221                             unsigned long token)
2222 {
2223         struct qdio_q *q;
2224         int i;
2225         unsigned int count, start_buf;
2226         char dbf_text[15];
2227
2228         /*check if QEBSM is disabled */
2229         if (!(irq_ptr->is_qebsm) || !(qdioac & 0x01)) {
2230                 irq_ptr->is_qebsm  = 0;
2231                 irq_ptr->sch_token = 0;
2232                 irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM;
2233                 QDIO_DBF_TEXT0(0,setup,"noV=V");
2234                 return;
2235         }
2236         irq_ptr->sch_token = token;
2237         /*input queue*/
2238         for (i = 0; i < irq_ptr->no_input_qs;i++) {
2239                 q = irq_ptr->input_qs[i];
2240                 count = QDIO_MAX_BUFFERS_PER_Q;
2241                 start_buf = 0;
2242                 set_slsb(q, &start_buf, SLSB_P_INPUT_NOT_INIT, &count);
2243         }
2244         sprintf(dbf_text,"V=V:%2x",irq_ptr->is_qebsm);
2245         QDIO_DBF_TEXT0(0,setup,dbf_text);
2246         sprintf(dbf_text,"%8lx",irq_ptr->sch_token);
2247         QDIO_DBF_TEXT0(0,setup,dbf_text);
2248         /*output queue*/
2249         for (i = 0; i < irq_ptr->no_output_qs; i++) {
2250                 q = irq_ptr->output_qs[i];
2251                 count = QDIO_MAX_BUFFERS_PER_Q;
2252                 start_buf = 0;
2253                 set_slsb(q, &start_buf, SLSB_P_OUTPUT_NOT_INIT, &count);
2254         }
2255 }
2256
2257 static void
2258 qdio_get_ssqd_information(struct qdio_irq *irq_ptr)
2259 {
2260         int result;
2261         unsigned char qdioac;
2262         struct {
2263                 struct chsc_header request;
2264                 u16 reserved1:10;
2265                 u16 ssid:2;
2266                 u16 fmt:4;
2267                 u16 first_sch;
2268                 u16 reserved2;
2269                 u16 last_sch;
2270                 u32 reserved3;
2271                 struct chsc_header response;
2272                 u32 reserved4;
2273                 u8  flags;
2274                 u8  reserved5;
2275                 u16 sch;
2276                 u8  qfmt;
2277                 u8  parm;
2278                 u8  qdioac1;
2279                 u8  sch_class;
2280                 u8  reserved7;
2281                 u8  icnt;
2282                 u8  reserved8;
2283                 u8  ocnt;
2284                 u8 reserved9;
2285                 u8 mbccnt;
2286                 u16 qdioac2;
2287                 u64 sch_token;
2288         } *ssqd_area;
2289
2290         QDIO_DBF_TEXT0(0,setup,"getssqd");
2291         qdioac = 0;
2292         ssqd_area = mempool_alloc(qdio_mempool_scssc, GFP_ATOMIC);
2293         if (!ssqd_area) {
2294                 QDIO_PRINT_WARN("Could not get memory for chsc. Using all " \
2295                                 "SIGAs for sch x%x.\n", irq_ptr->schid.sch_no);
2296                 irq_ptr->qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY ||
2297                                   CHSC_FLAG_SIGA_OUTPUT_NECESSARY ||
2298                                   CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
2299                 irq_ptr->is_qebsm = 0;
2300                 irq_ptr->sch_token = 0;
2301                 irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM;
2302                 return;
2303         }
2304
2305         ssqd_area->request = (struct chsc_header) {
2306                 .length = 0x0010,
2307                 .code   = 0x0024,
2308         };
2309         ssqd_area->first_sch = irq_ptr->schid.sch_no;
2310         ssqd_area->last_sch = irq_ptr->schid.sch_no;
2311         ssqd_area->ssid = irq_ptr->schid.ssid;
2312         result = chsc(ssqd_area);
2313
2314         if (result) {
2315                 QDIO_PRINT_WARN("CHSC returned cc %i. Using all " \
2316                                 "SIGAs for sch 0.%x.%x.\n", result,
2317                                 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2318                 qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY ||
2319                         CHSC_FLAG_SIGA_OUTPUT_NECESSARY ||
2320                         CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
2321                 irq_ptr->is_qebsm  = 0;
2322                 goto out;
2323         }
2324
2325         if (ssqd_area->response.code != QDIO_CHSC_RESPONSE_CODE_OK) {
2326                 QDIO_PRINT_WARN("response upon checking SIGA needs " \
2327                                 "is 0x%x. Using all SIGAs for sch 0.%x.%x.\n",
2328                                 ssqd_area->response.code,
2329                                 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2330                 qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY ||
2331                         CHSC_FLAG_SIGA_OUTPUT_NECESSARY ||
2332                         CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */
2333                 irq_ptr->is_qebsm  = 0;
2334                 goto out;
2335         }
2336         if (!(ssqd_area->flags & CHSC_FLAG_QDIO_CAPABILITY) ||
2337             !(ssqd_area->flags & CHSC_FLAG_VALIDITY) ||
2338             (ssqd_area->sch != irq_ptr->schid.sch_no)) {
2339                 QDIO_PRINT_WARN("huh? problems checking out sch 0.%x.%x... " \
2340                                 "using all SIGAs.\n",
2341                                 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2342                 qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY |
2343                         CHSC_FLAG_SIGA_OUTPUT_NECESSARY |
2344                         CHSC_FLAG_SIGA_SYNC_NECESSARY; /* worst case */
2345                 irq_ptr->is_qebsm  = 0;
2346                 goto out;
2347         }
2348         qdioac = ssqd_area->qdioac1;
2349 out:
2350         qdio_check_subchannel_qebsm(irq_ptr, qdioac,
2351                                     ssqd_area->sch_token);
2352         mempool_free(ssqd_area, qdio_mempool_scssc);
2353         irq_ptr->qdioac = qdioac;
2354 }
2355
2356 static unsigned int
2357 tiqdio_check_chsc_availability(void)
2358 {
2359         char dbf_text[15];
2360
2361         if (!css_characteristics_avail)
2362                 return -EIO;
2363
2364         /* Check for bit 41. */
2365         if (!css_general_characteristics.aif) {
2366                 QDIO_PRINT_WARN("Adapter interruption facility not " \
2367                                 "installed.\n");
2368                 return -ENOENT;
2369         }
2370
2371         /* Check for bits 107 and 108. */
2372         if (!css_chsc_characteristics.scssc ||
2373             !css_chsc_characteristics.scsscf) {
2374                 QDIO_PRINT_WARN("Set Chan Subsys. Char. & Fast-CHSCs " \
2375                                 "not available.\n");
2376                 return -ENOENT;
2377         }
2378
2379         /* Check for OSA/FCP thin interrupts (bit 67). */
2380         hydra_thinints = css_general_characteristics.aif_osa;
2381         sprintf(dbf_text,"hydrati%1x", hydra_thinints);
2382         QDIO_DBF_TEXT0(0,setup,dbf_text);
2383
2384 #ifdef CONFIG_64BIT
2385         /* Check for QEBSM support in general (bit 58). */
2386         is_passthrough = css_general_characteristics.qebsm;
2387 #endif
2388         sprintf(dbf_text,"cssQBS:%1x", is_passthrough);
2389         QDIO_DBF_TEXT0(0,setup,dbf_text);
2390
2391         /* Check for aif time delay disablement fac (bit 56). If installed,
2392          * omit svs even under lpar (good point by rick again) */
2393         omit_svs = css_general_characteristics.aif_tdd;
2394         sprintf(dbf_text,"omitsvs%1x", omit_svs);
2395         QDIO_DBF_TEXT0(0,setup,dbf_text);
2396         return 0;
2397 }
2398
2399
2400 static unsigned int
2401 tiqdio_set_subchannel_ind(struct qdio_irq *irq_ptr, int reset_to_zero)
2402 {
2403         unsigned long real_addr_local_summary_bit;
2404         unsigned long real_addr_dev_st_chg_ind;
2405         void *ptr;
2406         char dbf_text[15];
2407
2408         unsigned int resp_code;
2409         int result;
2410
2411         struct {
2412                 struct chsc_header request;
2413                 u16 operation_code;
2414                 u16 reserved1;
2415                 u32 reserved2;
2416                 u32 reserved3;
2417                 u64 summary_indicator_addr;
2418                 u64 subchannel_indicator_addr;
2419                 u32 ks:4;
2420                 u32 kc:4;
2421                 u32 reserved4:21;
2422                 u32 isc:3;
2423                 u32 word_with_d_bit;
2424                 /* set to 0x10000000 to enable
2425                  * time delay disablement facility */
2426                 u32 reserved5;
2427                 struct subchannel_id schid;
2428                 u32 reserved6[1004];
2429                 struct chsc_header response;
2430                 u32 reserved7;
2431         } *scssc_area;
2432
2433         if (!irq_ptr->is_thinint_irq)
2434                 return -ENODEV;
2435
2436         if (reset_to_zero) {
2437                 real_addr_local_summary_bit=0;
2438                 real_addr_dev_st_chg_ind=0;
2439         } else {
2440                 real_addr_local_summary_bit=
2441                         virt_to_phys((volatile void *)indicators);
2442                 real_addr_dev_st_chg_ind=
2443                         virt_to_phys((volatile void *)irq_ptr->dev_st_chg_ind);
2444         }
2445
2446         scssc_area = mempool_alloc(qdio_mempool_scssc, GFP_ATOMIC);
2447         if (!scssc_area) {
2448                 QDIO_PRINT_WARN("No memory for setting indicators on " \
2449                                 "subchannel 0.%x.%x.\n",
2450                                 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2451                 return -ENOMEM;
2452         }
2453         scssc_area->request = (struct chsc_header) {
2454                 .length = 0x0fe0,
2455                 .code   = 0x0021,
2456         };
2457         scssc_area->operation_code = 0;
2458
2459         scssc_area->summary_indicator_addr = real_addr_local_summary_bit;
2460         scssc_area->subchannel_indicator_addr = real_addr_dev_st_chg_ind;
2461         scssc_area->ks = QDIO_STORAGE_KEY;
2462         scssc_area->kc = QDIO_STORAGE_KEY;
2463         scssc_area->isc = TIQDIO_THININT_ISC;
2464         scssc_area->schid = irq_ptr->schid;
2465         /* enables the time delay disablement facility. Don't care
2466          * whether it is really there (i.e. we haven't checked for
2467          * it) */
2468         if (css_general_characteristics.aif_tdd)
2469                 scssc_area->word_with_d_bit = 0x10000000;
2470         else
2471                 QDIO_PRINT_WARN("Time delay disablement facility " \
2472                                 "not available\n");
2473
2474         result = chsc(scssc_area);
2475         if (result) {
2476                 QDIO_PRINT_WARN("could not set indicators on irq 0.%x.%x, " \
2477                                 "cc=%i.\n",
2478                                 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,result);
2479                 result = -EIO;
2480                 goto out;
2481         }
2482
2483         resp_code = scssc_area->response.code;
2484         if (resp_code!=QDIO_CHSC_RESPONSE_CODE_OK) {
2485                 QDIO_PRINT_WARN("response upon setting indicators " \
2486                                 "is 0x%x.\n",resp_code);
2487                 sprintf(dbf_text,"sidR%4x",resp_code);
2488                 QDIO_DBF_TEXT1(0,trace,dbf_text);
2489                 QDIO_DBF_TEXT1(0,setup,dbf_text);
2490                 ptr=&scssc_area->response;
2491                 QDIO_DBF_HEX2(1,setup,&ptr,QDIO_DBF_SETUP_LEN);
2492                 result = -EIO;
2493                 goto out;
2494         }
2495
2496         QDIO_DBF_TEXT2(0,setup,"setscind");
2497         QDIO_DBF_HEX2(0,setup,&real_addr_local_summary_bit,
2498                       sizeof(unsigned long));
2499         QDIO_DBF_HEX2(0,setup,&real_addr_dev_st_chg_ind,sizeof(unsigned long));
2500         result = 0;
2501 out:
2502         mempool_free(scssc_area, qdio_mempool_scssc);
2503         return result;
2504
2505 }
2506
2507 static unsigned int
2508 tiqdio_set_delay_target(struct qdio_irq *irq_ptr, unsigned long delay_target)
2509 {
2510         unsigned int resp_code;
2511         int result;
2512         void *ptr;
2513         char dbf_text[15];
2514
2515         struct {
2516                 struct chsc_header request;
2517                 u16 operation_code;
2518                 u16 reserved1;
2519                 u32 reserved2;
2520                 u32 reserved3;
2521                 u32 reserved4[2];
2522                 u32 delay_target;
2523                 u32 reserved5[1009];
2524                 struct chsc_header response;
2525                 u32 reserved6;
2526         } *scsscf_area;
2527
2528         if (!irq_ptr->is_thinint_irq)
2529                 return -ENODEV;
2530
2531         scsscf_area = mempool_alloc(qdio_mempool_scssc, GFP_ATOMIC);
2532         if (!scsscf_area) {
2533                 QDIO_PRINT_WARN("No memory for setting delay target on " \
2534                                 "subchannel 0.%x.%x.\n",
2535                                 irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
2536                 return -ENOMEM;
2537         }
2538         scsscf_area->request = (struct chsc_header) {
2539                 .length = 0x0fe0,
2540                 .code   = 0x1027,
2541         };
2542
2543         scsscf_area->delay_target = delay_target<<16;
2544
2545         result=chsc(scsscf_area);
2546         if (result) {
2547                 QDIO_PRINT_WARN("could not set delay target on irq 0.%x.%x, " \
2548                                 "cc=%i. Continuing.\n",
2549                                 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
2550                                 result);
2551                 result = -EIO;
2552                 goto out;
2553         }
2554
2555         resp_code = scsscf_area->response.code;
2556         if (resp_code!=QDIO_CHSC_RESPONSE_CODE_OK) {
2557                 QDIO_PRINT_WARN("response upon setting delay target " \
2558                                 "is 0x%x. Continuing.\n",resp_code);
2559                 sprintf(dbf_text,"sdtR%4x",resp_code);
2560                 QDIO_DBF_TEXT1(0,trace,dbf_text);
2561                 QDIO_DBF_TEXT1(0,setup,dbf_text);
2562                 ptr=&scsscf_area->response;
2563                 QDIO_DBF_HEX2(1,trace,&ptr,QDIO_DBF_TRACE_LEN);
2564         }
2565         QDIO_DBF_TEXT2(0,trace,"delytrgt");
2566         QDIO_DBF_HEX2(0,trace,&delay_target,sizeof(unsigned long));
2567         result = 0; /* not critical */
2568 out:
2569         mempool_free(scsscf_area, qdio_mempool_scssc);
2570         return result;
2571 }
2572
2573 int
2574 qdio_cleanup(struct ccw_device *cdev, int how)
2575 {
2576         struct qdio_irq *irq_ptr;
2577         char dbf_text[15];
2578         int rc;
2579
2580         irq_ptr = cdev->private->qdio_data;
2581         if (!irq_ptr)
2582                 return -ENODEV;
2583
2584         sprintf(dbf_text,"qcln%4x",irq_ptr->schid.sch_no);
2585         QDIO_DBF_TEXT1(0,trace,dbf_text);
2586         QDIO_DBF_TEXT0(0,setup,dbf_text);
2587
2588         rc = qdio_shutdown(cdev, how);
2589         if ((rc == 0) || (rc == -EINPROGRESS))
2590                 rc = qdio_free(cdev);
2591         return rc;
2592 }
2593
2594 int
2595 qdio_shutdown(struct ccw_device *cdev, int how)
2596 {
2597         struct qdio_irq *irq_ptr;
2598         int i;
2599         int result = 0;
2600         int rc;
2601         unsigned long flags;
2602         int timeout;
2603         char dbf_text[15];
2604
2605         irq_ptr = cdev->private->qdio_data;
2606         if (!irq_ptr)
2607                 return -ENODEV;
2608
2609         down(&irq_ptr->setting_up_sema);
2610
2611         sprintf(dbf_text,"qsqs%4x",irq_ptr->schid.sch_no);
2612         QDIO_DBF_TEXT1(0,trace,dbf_text);
2613         QDIO_DBF_TEXT0(0,setup,dbf_text);
2614
2615         /* mark all qs as uninteresting */
2616         for (i=0;i<irq_ptr->no_input_qs;i++)
2617                 atomic_set(&irq_ptr->input_qs[i]->is_in_shutdown,1);
2618
2619         for (i=0;i<irq_ptr->no_output_qs;i++)
2620                 atomic_set(&irq_ptr->output_qs[i]->is_in_shutdown,1);
2621
2622         tasklet_kill(&tiqdio_tasklet);
2623
2624         for (i=0;i<irq_ptr->no_input_qs;i++) {
2625                 qdio_unmark_q(irq_ptr->input_qs[i]);
2626                 tasklet_kill(&irq_ptr->input_qs[i]->tasklet);
2627                 wait_event_interruptible_timeout(cdev->private->wait_q,
2628                                                  !atomic_read(&irq_ptr->
2629                                                               input_qs[i]->
2630                                                               use_count),
2631                                                  QDIO_NO_USE_COUNT_TIMEOUT);
2632                 if (atomic_read(&irq_ptr->input_qs[i]->use_count))
2633                         result=-EINPROGRESS;
2634         }
2635
2636         for (i=0;i<irq_ptr->no_output_qs;i++) {
2637                 tasklet_kill(&irq_ptr->output_qs[i]->tasklet);
2638                 wait_event_interruptible_timeout(cdev->private->wait_q,
2639                                                  !atomic_read(&irq_ptr->
2640                                                               output_qs[i]->
2641                                                               use_count),
2642                                                  QDIO_NO_USE_COUNT_TIMEOUT);
2643                 if (atomic_read(&irq_ptr->output_qs[i]->use_count))
2644                         result=-EINPROGRESS;
2645         }
2646
2647         /* cleanup subchannel */
2648         spin_lock_irqsave(get_ccwdev_lock(cdev),flags);
2649         if (how&QDIO_FLAG_CLEANUP_USING_CLEAR) {
2650                 rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP);
2651                 timeout=QDIO_CLEANUP_CLEAR_TIMEOUT;
2652         } else if (how&QDIO_FLAG_CLEANUP_USING_HALT) {
2653                 rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
2654                 timeout=QDIO_CLEANUP_HALT_TIMEOUT;
2655         } else { /* default behaviour */
2656                 rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP);
2657                 timeout=QDIO_CLEANUP_HALT_TIMEOUT;
2658         }
2659         if (rc == -ENODEV) {
2660                 /* No need to wait for device no longer present. */
2661                 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
2662                 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
2663         } else if (((void *)cdev->handler != (void *)qdio_handler) && rc == 0) {
2664                 /*
2665                  * Whoever put another handler there, has to cope with the
2666                  * interrupt theirself. Might happen if qdio_shutdown was
2667                  * called on already shutdown queues, but this shouldn't have
2668                  * bad side effects.
2669                  */
2670                 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
2671                 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
2672         } else if (rc == 0) {
2673                 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP);
2674                 ccw_device_set_timeout(cdev, timeout);
2675                 spin_unlock_irqrestore(get_ccwdev_lock(cdev),flags);
2676
2677                 wait_event(cdev->private->wait_q,
2678                            irq_ptr->state == QDIO_IRQ_STATE_INACTIVE ||
2679                            irq_ptr->state == QDIO_IRQ_STATE_ERR);
2680         } else {
2681                 QDIO_PRINT_INFO("ccw_device_{halt,clear} returned %d for "
2682                                 "device %s\n", result, cdev->dev.bus_id);
2683                 spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags);
2684                 result = rc;
2685                 goto out;
2686         }
2687         if (irq_ptr->is_thinint_irq) {
2688                 qdio_put_indicator((__u32*)irq_ptr->dev_st_chg_ind);
2689                 tiqdio_set_subchannel_ind(irq_ptr,1); 
2690                 /* reset adapter interrupt indicators */
2691         }
2692
2693         /* exchange int handlers, if necessary */
2694         if ((void*)cdev->handler == (void*)qdio_handler)
2695                 cdev->handler=irq_ptr->original_int_handler;
2696
2697         /* Ignore errors. */
2698         qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE);
2699         ccw_device_set_timeout(cdev, 0);
2700 out:
2701         up(&irq_ptr->setting_up_sema);
2702         return result;
2703 }
2704
2705 int
2706 qdio_free(struct ccw_device *cdev)
2707 {
2708         struct qdio_irq *irq_ptr;
2709         char dbf_text[15];
2710
2711         irq_ptr = cdev->private->qdio_data;
2712         if (!irq_ptr)
2713                 return -ENODEV;
2714
2715         down(&irq_ptr->setting_up_sema);
2716
2717         sprintf(dbf_text,"qfqs%4x",irq_ptr->schid.sch_no);
2718         QDIO_DBF_TEXT1(0,trace,dbf_text);
2719         QDIO_DBF_TEXT0(0,setup,dbf_text);
2720
2721         cdev->private->qdio_data = NULL;
2722
2723         up(&irq_ptr->setting_up_sema);
2724
2725         qdio_release_irq_memory(irq_ptr);
2726         module_put(THIS_MODULE);
2727         return 0;
2728 }
2729
2730 static void
2731 qdio_allocate_do_dbf(struct qdio_initialize *init_data)
2732 {
2733         char dbf_text[20]; /* if a printf printed out more than 8 chars */
2734
2735         sprintf(dbf_text,"qfmt:%x",init_data->q_format);
2736         QDIO_DBF_TEXT0(0,setup,dbf_text);
2737         QDIO_DBF_HEX0(0,setup,init_data->adapter_name,8);
2738         sprintf(dbf_text,"qpff%4x",init_data->qib_param_field_format);
2739         QDIO_DBF_TEXT0(0,setup,dbf_text);
2740         QDIO_DBF_HEX0(0,setup,&init_data->qib_param_field,sizeof(char*));
2741         QDIO_DBF_HEX0(0,setup,&init_data->input_slib_elements,sizeof(long*));
2742         QDIO_DBF_HEX0(0,setup,&init_data->output_slib_elements,sizeof(long*));
2743         sprintf(dbf_text,"miit%4x",init_data->min_input_threshold);
2744         QDIO_DBF_TEXT0(0,setup,dbf_text);
2745         sprintf(dbf_text,"mait%4x",init_data->max_input_threshold);
2746         QDIO_DBF_TEXT0(0,setup,dbf_text);
2747         sprintf(dbf_text,"miot%4x",init_data->min_output_threshold);
2748         QDIO_DBF_TEXT0(0,setup,dbf_text);
2749         sprintf(dbf_text,"maot%4x",init_data->max_output_threshold);
2750         QDIO_DBF_TEXT0(0,setup,dbf_text);
2751         sprintf(dbf_text,"niq:%4x",init_data->no_input_qs);
2752         QDIO_DBF_TEXT0(0,setup,dbf_text);
2753         sprintf(dbf_text,"noq:%4x",init_data->no_output_qs);
2754         QDIO_DBF_TEXT0(0,setup,dbf_text);
2755         QDIO_DBF_HEX0(0,setup,&init_data->input_handler,sizeof(void*));
2756         QDIO_DBF_HEX0(0,setup,&init_data->output_handler,sizeof(void*));
2757         QDIO_DBF_HEX0(0,setup,&init_data->int_parm,sizeof(long));
2758         QDIO_DBF_HEX0(0,setup,&init_data->flags,sizeof(long));
2759         QDIO_DBF_HEX0(0,setup,&init_data->input_sbal_addr_array,sizeof(void*));
2760         QDIO_DBF_HEX0(0,setup,&init_data->output_sbal_addr_array,sizeof(void*));
2761 }
2762
2763 static void
2764 qdio_allocate_fill_input_desc(struct qdio_irq *irq_ptr, int i, int iqfmt)
2765 {
2766         irq_ptr->input_qs[i]->is_iqdio_q = iqfmt;
2767         irq_ptr->input_qs[i]->is_thinint_q = irq_ptr->is_thinint_irq;
2768
2769         irq_ptr->qdr->qdf0[i].sliba=(unsigned long)(irq_ptr->input_qs[i]->slib);
2770
2771         irq_ptr->qdr->qdf0[i].sla=(unsigned long)(irq_ptr->input_qs[i]->sl);
2772
2773         irq_ptr->qdr->qdf0[i].slsba=
2774                 (unsigned long)(&irq_ptr->input_qs[i]->slsb.acc.val[0]);
2775
2776         irq_ptr->qdr->qdf0[i].akey=QDIO_STORAGE_KEY;
2777         irq_ptr->qdr->qdf0[i].bkey=QDIO_STORAGE_KEY;
2778         irq_ptr->qdr->qdf0[i].ckey=QDIO_STORAGE_KEY;
2779         irq_ptr->qdr->qdf0[i].dkey=QDIO_STORAGE_KEY;
2780 }
2781
2782 static void
2783 qdio_allocate_fill_output_desc(struct qdio_irq *irq_ptr, int i,
2784                                int j, int iqfmt)
2785 {
2786         irq_ptr->output_qs[i]->is_iqdio_q = iqfmt;
2787         irq_ptr->output_qs[i]->is_thinint_q = irq_ptr->is_thinint_irq;
2788
2789         irq_ptr->qdr->qdf0[i+j].sliba=(unsigned long)(irq_ptr->output_qs[i]->slib);
2790
2791         irq_ptr->qdr->qdf0[i+j].sla=(unsigned long)(irq_ptr->output_qs[i]->sl);
2792
2793         irq_ptr->qdr->qdf0[i+j].slsba=
2794                 (unsigned long)(&irq_ptr->output_qs[i]->slsb.acc.val[0]);
2795
2796         irq_ptr->qdr->qdf0[i+j].akey=QDIO_STORAGE_KEY;
2797         irq_ptr->qdr->qdf0[i+j].bkey=QDIO_STORAGE_KEY;
2798         irq_ptr->qdr->qdf0[i+j].ckey=QDIO_STORAGE_KEY;
2799         irq_ptr->qdr->qdf0[i+j].dkey=QDIO_STORAGE_KEY;
2800 }
2801
2802
2803 static void
2804 qdio_initialize_set_siga_flags_input(struct qdio_irq *irq_ptr)
2805 {
2806         int i;
2807
2808         for (i=0;i<irq_ptr->no_input_qs;i++) {
2809                 irq_ptr->input_qs[i]->siga_sync=
2810                         irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY;
2811                 irq_ptr->input_qs[i]->siga_in=
2812                         irq_ptr->qdioac&CHSC_FLAG_SIGA_INPUT_NECESSARY;
2813                 irq_ptr->input_qs[i]->siga_out=
2814                         irq_ptr->qdioac&CHSC_FLAG_SIGA_OUTPUT_NECESSARY;
2815                 irq_ptr->input_qs[i]->siga_sync_done_on_thinints=
2816                         irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS;
2817                 irq_ptr->input_qs[i]->hydra_gives_outbound_pcis=
2818                         irq_ptr->hydra_gives_outbound_pcis;
2819                 irq_ptr->input_qs[i]->siga_sync_done_on_outb_tis=
2820                         ((irq_ptr->qdioac&
2821                           (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS|
2822                            CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS))==
2823                          (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS|
2824                           CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS));
2825
2826         }
2827 }
2828
2829 static void
2830 qdio_initialize_set_siga_flags_output(struct qdio_irq *irq_ptr)
2831 {
2832         int i;
2833
2834         for (i=0;i<irq_ptr->no_output_qs;i++) {
2835                 irq_ptr->output_qs[i]->siga_sync=
2836                         irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY;
2837                 irq_ptr->output_qs[i]->siga_in=
2838                         irq_ptr->qdioac&CHSC_FLAG_SIGA_INPUT_NECESSARY;
2839                 irq_ptr->output_qs[i]->siga_out=
2840                         irq_ptr->qdioac&CHSC_FLAG_SIGA_OUTPUT_NECESSARY;
2841                 irq_ptr->output_qs[i]->siga_sync_done_on_thinints=
2842                         irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS;
2843                 irq_ptr->output_qs[i]->hydra_gives_outbound_pcis=
2844                         irq_ptr->hydra_gives_outbound_pcis;
2845                 irq_ptr->output_qs[i]->siga_sync_done_on_outb_tis=
2846                         ((irq_ptr->qdioac&
2847                           (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS|
2848                            CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS))==
2849                          (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS|
2850                           CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS));
2851
2852         }
2853 }
2854
2855 static int
2856 qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat,
2857                                     int dstat)
2858 {
2859         char dbf_text[15];
2860         struct qdio_irq *irq_ptr;
2861
2862         irq_ptr = cdev->private->qdio_data;
2863
2864         if (cstat || (dstat & ~(DEV_STAT_CHN_END|DEV_STAT_DEV_END))) {
2865                 sprintf(dbf_text,"ick1%4x",irq_ptr->schid.sch_no);
2866                 QDIO_DBF_TEXT2(1,trace,dbf_text);
2867                 QDIO_DBF_HEX2(0,trace,&dstat,sizeof(int));
2868                 QDIO_DBF_HEX2(0,trace,&cstat,sizeof(int));
2869                 QDIO_PRINT_ERR("received check condition on establish " \
2870                                "queues on irq 0.%x.%x (cs=x%x, ds=x%x).\n",
2871                                irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
2872                                cstat,dstat);
2873                 qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ERR);
2874         }
2875         
2876         if (!(dstat & DEV_STAT_DEV_END)) {
2877                 QDIO_DBF_TEXT2(1,setup,"eq:no de");
2878                 QDIO_DBF_HEX2(0,setup,&dstat, sizeof(dstat));
2879                 QDIO_DBF_HEX2(0,setup,&cstat, sizeof(cstat));
2880                 QDIO_PRINT_ERR("establish queues on irq 0.%x.%04x: didn't get "
2881                                "device end: dstat=%02x, cstat=%02x\n",
2882                                irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
2883                                dstat, cstat);
2884                 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
2885                 return 1;
2886         }
2887
2888         if (dstat & ~(DEV_STAT_CHN_END|DEV_STAT_DEV_END)) {
2889                 QDIO_DBF_TEXT2(1,setup,"eq:badio");
2890                 QDIO_DBF_HEX2(0,setup,&dstat, sizeof(dstat));
2891                 QDIO_DBF_HEX2(0,setup,&cstat, sizeof(cstat));
2892                 QDIO_PRINT_ERR("establish queues on irq 0.%x.%04x: got "
2893                                "the following devstat: dstat=%02x, "
2894                                "cstat=%02x\n", irq_ptr->schid.ssid,
2895                                irq_ptr->schid.sch_no, dstat, cstat);
2896                 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR);
2897                 return 1;
2898         }
2899         return 0;
2900 }
2901
2902 static void
2903 qdio_establish_handle_irq(struct ccw_device *cdev, int cstat, int dstat)
2904 {
2905         struct qdio_irq *irq_ptr;
2906         char dbf_text[15];
2907
2908         irq_ptr = cdev->private->qdio_data;
2909
2910         sprintf(dbf_text,"qehi%4x",cdev->private->schid.sch_no);
2911         QDIO_DBF_TEXT0(0,setup,dbf_text);
2912         QDIO_DBF_TEXT0(0,trace,dbf_text);
2913
2914         if (qdio_establish_irq_check_for_errors(cdev, cstat, dstat)) {
2915                 ccw_device_set_timeout(cdev, 0);
2916                 return;
2917         }
2918
2919         qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ESTABLISHED);
2920         ccw_device_set_timeout(cdev, 0);
2921 }
2922
2923 int
2924 qdio_initialize(struct qdio_initialize *init_data)
2925 {
2926         int rc;
2927         char dbf_text[15];
2928
2929         sprintf(dbf_text,"qini%4x",init_data->cdev->private->schid.sch_no);
2930         QDIO_DBF_TEXT0(0,setup,dbf_text);
2931         QDIO_DBF_TEXT0(0,trace,dbf_text);
2932
2933         rc = qdio_allocate(init_data);
2934         if (rc == 0) {
2935                 rc = qdio_establish(init_data);
2936                 if (rc != 0)
2937                         qdio_free(init_data->cdev);
2938         }
2939
2940         return rc;
2941 }
2942
2943
2944 int
2945 qdio_allocate(struct qdio_initialize *init_data)
2946 {
2947         struct qdio_irq *irq_ptr;
2948         char dbf_text[15];
2949
2950         sprintf(dbf_text,"qalc%4x",init_data->cdev->private->schid.sch_no);
2951         QDIO_DBF_TEXT0(0,setup,dbf_text);
2952         QDIO_DBF_TEXT0(0,trace,dbf_text);
2953         if ( (init_data->no_input_qs>QDIO_MAX_QUEUES_PER_IRQ) ||
2954              (init_data->no_output_qs>QDIO_MAX_QUEUES_PER_IRQ) ||
2955              ((init_data->no_input_qs) && (!init_data->input_handler)) ||
2956              ((init_data->no_output_qs) && (!init_data->output_handler)) )
2957                 return -EINVAL;
2958
2959         if (!init_data->input_sbal_addr_array)
2960                 return -EINVAL;
2961
2962         if (!init_data->output_sbal_addr_array)
2963                 return -EINVAL;
2964
2965         qdio_allocate_do_dbf(init_data);
2966
2967         /* create irq */
2968         irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
2969
2970         QDIO_DBF_TEXT0(0,setup,"irq_ptr:");
2971         QDIO_DBF_HEX0(0,setup,&irq_ptr,sizeof(void*));
2972
2973         if (!irq_ptr) {
2974                 QDIO_PRINT_ERR("kmalloc of irq_ptr failed!\n");
2975                 return -ENOMEM;
2976         }
2977
2978         init_MUTEX(&irq_ptr->setting_up_sema);
2979
2980         /* QDR must be in DMA area since CCW data address is only 32 bit */
2981         irq_ptr->qdr=kmalloc(sizeof(struct qdr), GFP_KERNEL | GFP_DMA);
2982         if (!(irq_ptr->qdr)) {
2983                 free_page((unsigned long) irq_ptr);
2984                 QDIO_PRINT_ERR("kmalloc of irq_ptr->qdr failed!\n");
2985                 return -ENOMEM;
2986         }
2987         QDIO_DBF_TEXT0(0,setup,"qdr:");
2988         QDIO_DBF_HEX0(0,setup,&irq_ptr->qdr,sizeof(void*));
2989
2990         if (qdio_alloc_qs(irq_ptr,
2991                           init_data->no_input_qs,
2992                           init_data->no_output_qs)) {
2993                 qdio_release_irq_memory(irq_ptr);
2994                 return -ENOMEM;
2995         }
2996
2997         init_data->cdev->private->qdio_data = irq_ptr;
2998
2999         qdio_set_state(irq_ptr,QDIO_IRQ_STATE_INACTIVE);
3000
3001         return 0;
3002 }
3003
3004 static int qdio_fill_irq(struct qdio_initialize *init_data)
3005 {
3006         int i;
3007         char dbf_text[15];
3008         struct ciw *ciw;
3009         int is_iqdio;
3010         struct qdio_irq *irq_ptr;
3011
3012         irq_ptr = init_data->cdev->private->qdio_data;
3013
3014         memset(irq_ptr,0,((char*)&irq_ptr->qdr)-((char*)irq_ptr));
3015
3016         /* wipes qib.ac, required by ar7063 */
3017         memset(irq_ptr->qdr,0,sizeof(struct qdr));
3018
3019         irq_ptr->int_parm=init_data->int_parm;
3020
3021         irq_ptr->schid = ccw_device_get_subchannel_id(init_data->cdev);
3022         irq_ptr->no_input_qs=init_data->no_input_qs;
3023         irq_ptr->no_output_qs=init_data->no_output_qs;
3024
3025         if (init_data->q_format==QDIO_IQDIO_QFMT) {
3026                 irq_ptr->is_iqdio_irq=1;
3027                 irq_ptr->is_thinint_irq=1;
3028         } else {
3029                 irq_ptr->is_iqdio_irq=0;
3030                 irq_ptr->is_thinint_irq=hydra_thinints;
3031         }
3032         sprintf(dbf_text,"is_i_t%1x%1x",
3033                 irq_ptr->is_iqdio_irq,irq_ptr->is_thinint_irq);
3034         QDIO_DBF_TEXT2(0,setup,dbf_text);
3035
3036         if (irq_ptr->is_thinint_irq) {
3037                 irq_ptr->dev_st_chg_ind = qdio_get_indicator();
3038                 QDIO_DBF_HEX1(0,setup,&irq_ptr->dev_st_chg_ind,sizeof(void*));
3039                 if (!irq_ptr->dev_st_chg_ind) {
3040                         QDIO_PRINT_WARN("no indicator location available " \
3041                                         "for irq 0.%x.%x\n",
3042                                         irq_ptr->schid.ssid, irq_ptr->schid.sch_no);
3043                         qdio_release_irq_memory(irq_ptr);
3044                         return -ENOBUFS;
3045                 }
3046         }
3047
3048         /* defaults */
3049         irq_ptr->equeue.cmd=DEFAULT_ESTABLISH_QS_CMD;
3050         irq_ptr->equeue.count=DEFAULT_ESTABLISH_QS_COUNT;
3051         irq_ptr->aqueue.cmd=DEFAULT_ACTIVATE_QS_CMD;
3052         irq_ptr->aqueue.count=DEFAULT_ACTIVATE_QS_COUNT;
3053
3054         qdio_fill_qs(irq_ptr, init_data->cdev,
3055                      init_data->no_input_qs,
3056                      init_data->no_output_qs,
3057                      init_data->input_handler,
3058                      init_data->output_handler,init_data->int_parm,
3059                      init_data->q_format,init_data->flags,
3060                      init_data->input_sbal_addr_array,
3061                      init_data->output_sbal_addr_array);
3062
3063         if (!try_module_get(THIS_MODULE)) {
3064                 QDIO_PRINT_CRIT("try_module_get() failed!\n");
3065                 qdio_release_irq_memory(irq_ptr);
3066                 return -EINVAL;
3067         }
3068
3069         qdio_fill_thresholds(irq_ptr,init_data->no_input_qs,
3070                              init_data->no_output_qs,
3071                              init_data->min_input_threshold,
3072                              init_data->max_input_threshold,
3073                              init_data->min_output_threshold,
3074                              init_data->max_output_threshold);
3075
3076         /* fill in qdr */
3077         irq_ptr->qdr->qfmt=init_data->q_format;
3078         irq_ptr->qdr->iqdcnt=init_data->no_input_qs;
3079         irq_ptr->qdr->oqdcnt=init_data->no_output_qs;
3080         irq_ptr->qdr->iqdsz=sizeof(struct qdesfmt0)/4; /* size in words */
3081         irq_ptr->qdr->oqdsz=sizeof(struct qdesfmt0)/4;
3082
3083         irq_ptr->qdr->qiba=(unsigned long)&irq_ptr->qib;
3084         irq_ptr->qdr->qkey=QDIO_STORAGE_KEY;
3085
3086         /* fill in qib */
3087         irq_ptr->is_qebsm = is_passthrough;
3088         if (irq_ptr->is_qebsm)
3089                 irq_ptr->qib.rflags |= QIB_RFLAGS_ENABLE_QEBSM;
3090
3091         irq_ptr->qib.qfmt=init_data->q_format;
3092         if (init_data->no_input_qs)
3093                 irq_ptr->qib.isliba=(unsigned long)(irq_ptr->input_qs[0]->slib);
3094         if (init_data->no_output_qs)
3095                 irq_ptr->qib.osliba=(unsigned long)(irq_ptr->output_qs[0]->slib);
3096         memcpy(irq_ptr->qib.ebcnam,init_data->adapter_name,8);
3097
3098         qdio_set_impl_params(irq_ptr,init_data->qib_param_field_format,
3099                              init_data->qib_param_field,
3100                              init_data->no_input_qs,
3101                              init_data->no_output_qs,
3102                              init_data->input_slib_elements,
3103                              init_data->output_slib_elements);
3104
3105         /* first input descriptors, then output descriptors */
3106         is_iqdio = (init_data->q_format == QDIO_IQDIO_QFMT) ? 1 : 0;
3107         for (i=0;i<init_data->no_input_qs;i++)
3108                 qdio_allocate_fill_input_desc(irq_ptr, i, is_iqdio);
3109
3110         for (i=0;i<init_data->no_output_qs;i++)
3111                 qdio_allocate_fill_output_desc(irq_ptr, i,
3112                                                init_data->no_input_qs,
3113                                                is_iqdio);
3114
3115         /* qdr, qib, sls, slsbs, slibs, sbales filled. */
3116
3117         /* get qdio commands */
3118         ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE);
3119         if (!ciw) {
3120                 QDIO_DBF_TEXT2(1,setup,"no eq");
3121                 QDIO_PRINT_INFO("No equeue CIW found for QDIO commands. "
3122                                 "Trying to use default.\n");
3123         } else
3124                 irq_ptr->equeue = *ciw;
3125         ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE);
3126         if (!ciw) {
3127                 QDIO_DBF_TEXT2(1,setup,"no aq");
3128                 QDIO_PRINT_INFO("No aqueue CIW found for QDIO commands. "
3129                                 "Trying to use default.\n");
3130         } else
3131                 irq_ptr->aqueue = *ciw;
3132
3133         /* Set new interrupt handler. */
3134         irq_ptr->original_int_handler = init_data->cdev->handler;
3135         init_data->cdev->handler = qdio_handler;
3136
3137         return 0;
3138 }
3139
3140 int
3141 qdio_establish(struct qdio_initialize *init_data)
3142 {
3143         struct qdio_irq *irq_ptr;
3144         unsigned long saveflags;
3145         int result, result2;
3146         struct ccw_device *cdev;
3147         char dbf_text[20];
3148
3149         cdev=init_data->cdev;
3150         irq_ptr = cdev->private->qdio_data;
3151         if (!irq_ptr)
3152                 return -EINVAL;
3153
3154         if (cdev->private->state != DEV_STATE_ONLINE)
3155                 return -EINVAL;
3156         
3157         down(&irq_ptr->setting_up_sema);
3158
3159         qdio_fill_irq(init_data);
3160
3161         /* the thinint CHSC stuff */
3162         if (irq_ptr->is_thinint_irq) {
3163
3164                 result = tiqdio_set_subchannel_ind(irq_ptr,0);
3165                 if (result) {
3166                         up(&irq_ptr->setting_up_sema);
3167                         qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
3168                         return result;
3169                 }
3170                 tiqdio_set_delay_target(irq_ptr,TIQDIO_DELAY_TARGET);
3171         }
3172
3173         sprintf(dbf_text,"qest%4x",cdev->private->schid.sch_no);
3174         QDIO_DBF_TEXT0(0,setup,dbf_text);
3175         QDIO_DBF_TEXT0(0,trace,dbf_text);
3176
3177         /* establish q */
3178         irq_ptr->ccw.cmd_code=irq_ptr->equeue.cmd;
3179         irq_ptr->ccw.flags=CCW_FLAG_SLI;
3180         irq_ptr->ccw.count=irq_ptr->equeue.count;
3181         irq_ptr->ccw.cda=QDIO_GET_ADDR(irq_ptr->qdr);
3182
3183         spin_lock_irqsave(get_ccwdev_lock(cdev),saveflags);
3184
3185         ccw_device_set_options_mask(cdev, 0);
3186         result=ccw_device_start_timeout(cdev,&irq_ptr->ccw,
3187                                         QDIO_DOING_ESTABLISH,0, 0,
3188                                         QDIO_ESTABLISH_TIMEOUT);
3189         if (result) {
3190                 result2=ccw_device_start_timeout(cdev,&irq_ptr->ccw,
3191                                                  QDIO_DOING_ESTABLISH,0,0,
3192                                                  QDIO_ESTABLISH_TIMEOUT);
3193                 sprintf(dbf_text,"eq:io%4x",result);
3194                 QDIO_DBF_TEXT2(1,setup,dbf_text);
3195                 if (result2) {
3196                         sprintf(dbf_text,"eq:io%4x",result);
3197                         QDIO_DBF_TEXT2(1,setup,dbf_text);
3198                 }
3199                 QDIO_PRINT_WARN("establish queues on irq 0.%x.%04x: do_IO " \
3200                                 "returned %i, next try returned %i\n",
3201                                 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
3202                                 result, result2);
3203                 result=result2;
3204                 if (result)
3205                         ccw_device_set_timeout(cdev, 0);
3206         }
3207
3208         spin_unlock_irqrestore(get_ccwdev_lock(cdev),saveflags);
3209
3210         if (result) {
3211                 up(&irq_ptr->setting_up_sema);
3212                 qdio_shutdown(cdev,QDIO_FLAG_CLEANUP_USING_CLEAR);
3213                 return result;
3214         }
3215         
3216         /* Timeout is cared for already by using ccw_device_start_timeout(). */
3217         wait_event_interruptible(cdev->private->wait_q,
3218                  irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED ||
3219                  irq_ptr->state == QDIO_IRQ_STATE_ERR);
3220
3221         if (irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED)
3222                 result = 0;
3223         else {
3224                 up(&irq_ptr->setting_up_sema);
3225                 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
3226                 return -EIO;
3227         }
3228
3229         qdio_get_ssqd_information(irq_ptr);
3230         /* if this gets set once, we're running under VM and can omit SVSes */
3231         if (irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY)
3232                 omit_svs=1;
3233
3234         sprintf(dbf_text,"qdioac%2x",irq_ptr->qdioac);
3235         QDIO_DBF_TEXT2(0,setup,dbf_text);
3236
3237         sprintf(dbf_text,"qib ac%2x",irq_ptr->qib.ac);
3238         QDIO_DBF_TEXT2(0,setup,dbf_text);
3239
3240         irq_ptr->hydra_gives_outbound_pcis=
3241                 irq_ptr->qib.ac&QIB_AC_OUTBOUND_PCI_SUPPORTED;
3242         irq_ptr->sync_done_on_outb_pcis=
3243                 irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS;
3244
3245         qdio_initialize_set_siga_flags_input(irq_ptr);
3246         qdio_initialize_set_siga_flags_output(irq_ptr);
3247
3248         up(&irq_ptr->setting_up_sema);
3249
3250         return result;
3251         
3252 }
3253
3254 int
3255 qdio_activate(struct ccw_device *cdev, int flags)
3256 {
3257         struct qdio_irq *irq_ptr;
3258         int i,result=0,result2;
3259         unsigned long saveflags;
3260         char dbf_text[20]; /* see qdio_initialize */
3261
3262         irq_ptr = cdev->private->qdio_data;
3263         if (!irq_ptr)
3264                 return -ENODEV;
3265
3266         if (cdev->private->state != DEV_STATE_ONLINE)
3267                 return -EINVAL;
3268
3269         down(&irq_ptr->setting_up_sema);
3270         if (irq_ptr->state==QDIO_IRQ_STATE_INACTIVE) {
3271                 result=-EBUSY;
3272                 goto out;
3273         }
3274
3275         sprintf(dbf_text,"qact%4x", irq_ptr->schid.sch_no);
3276         QDIO_DBF_TEXT2(0,setup,dbf_text);
3277         QDIO_DBF_TEXT2(0,trace,dbf_text);
3278
3279         /* activate q */
3280         irq_ptr->ccw.cmd_code=irq_ptr->aqueue.cmd;
3281         irq_ptr->ccw.flags=CCW_FLAG_SLI;
3282         irq_ptr->ccw.count=irq_ptr->aqueue.count;
3283         irq_ptr->ccw.cda=QDIO_GET_ADDR(0);
3284
3285         spin_lock_irqsave(get_ccwdev_lock(cdev),saveflags);
3286
3287         ccw_device_set_timeout(cdev, 0);
3288         ccw_device_set_options(cdev, CCWDEV_REPORT_ALL);
3289         result=ccw_device_start(cdev,&irq_ptr->ccw,QDIO_DOING_ACTIVATE,
3290                                 0, DOIO_DENY_PREFETCH);
3291         if (result) {
3292                 result2=ccw_device_start(cdev,&irq_ptr->ccw,
3293                                          QDIO_DOING_ACTIVATE,0,0);
3294                 sprintf(dbf_text,"aq:io%4x",result);
3295                 QDIO_DBF_TEXT2(1,setup,dbf_text);
3296                 if (result2) {
3297                         sprintf(dbf_text,"aq:io%4x",result);
3298                         QDIO_DBF_TEXT2(1,setup,dbf_text);
3299                 }
3300                 QDIO_PRINT_WARN("activate queues on irq 0.%x.%04x: do_IO " \
3301                                 "returned %i, next try returned %i\n",
3302                                 irq_ptr->schid.ssid, irq_ptr->schid.sch_no,
3303                                 result, result2);
3304                 result=result2;
3305         }
3306
3307         spin_unlock_irqrestore(get_ccwdev_lock(cdev),saveflags);
3308         if (result)
3309                 goto out;
3310
3311         for (i=0;i<irq_ptr->no_input_qs;i++) {
3312                 if (irq_ptr->is_thinint_irq) {
3313                         /* 
3314                          * that way we know, that, if we will get interrupted
3315                          * by tiqdio_inbound_processing, qdio_unmark_q will
3316                          * not be called 
3317                          */
3318                         qdio_reserve_q(irq_ptr->input_qs[i]);
3319                         qdio_mark_tiq(irq_ptr->input_qs[i]);
3320                         qdio_release_q(irq_ptr->input_qs[i]);
3321                 }
3322         }
3323
3324         if (flags&QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT) {
3325                 for (i=0;i<irq_ptr->no_input_qs;i++) {
3326                         irq_ptr->input_qs[i]->is_input_q|=
3327                                 QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT;
3328                 }
3329         }
3330
3331         wait_event_interruptible_timeout(cdev->private->wait_q,
3332                                          ((irq_ptr->state ==
3333                                           QDIO_IRQ_STATE_STOPPED) ||
3334                                           (irq_ptr->state ==
3335                                            QDIO_IRQ_STATE_ERR)),
3336                                          QDIO_ACTIVATE_TIMEOUT);
3337
3338         switch (irq_ptr->state) {
3339         case QDIO_IRQ_STATE_STOPPED:
3340         case QDIO_IRQ_STATE_ERR:
3341                 up(&irq_ptr->setting_up_sema);
3342                 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
3343                 down(&irq_ptr->setting_up_sema);
3344                 result = -EIO;
3345                 break;
3346         default:
3347                 qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE);
3348                 result = 0;
3349         }
3350  out:
3351         up(&irq_ptr->setting_up_sema);
3352
3353         return result;
3354 }
3355
3356 /* buffers filled forwards again to make Rick happy */
3357 static void
3358 qdio_do_qdio_fill_input(struct qdio_q *q, unsigned int qidx,
3359                         unsigned int count, struct qdio_buffer *buffers)
3360 {
3361         struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
3362         int tmp = 0;
3363
3364         qidx &= (QDIO_MAX_BUFFERS_PER_Q - 1);
3365         if (irq->is_qebsm) {
3366                 while (count) {
3367                         tmp = set_slsb(q, &qidx, SLSB_CU_INPUT_EMPTY, &count);
3368                         if (!tmp)
3369                                 return;
3370                 }
3371                 return;
3372         }
3373         for (;;) {
3374                 set_slsb(q, &qidx, SLSB_CU_INPUT_EMPTY, &count);
3375                 count--;
3376                 if (!count) break;
3377                 qidx = (qidx + 1) & (QDIO_MAX_BUFFERS_PER_Q - 1);
3378         }
3379 }
3380
3381 static void
3382 qdio_do_qdio_fill_output(struct qdio_q *q, unsigned int qidx,
3383                          unsigned int count, struct qdio_buffer *buffers)
3384 {
3385         struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
3386         int tmp = 0;
3387
3388         qidx &= (QDIO_MAX_BUFFERS_PER_Q - 1);
3389         if (irq->is_qebsm) {
3390                 while (count) {
3391                         tmp = set_slsb(q, &qidx, SLSB_CU_OUTPUT_PRIMED, &count);
3392                         if (!tmp)
3393                                 return;
3394                 }
3395                 return;
3396         }
3397
3398         for (;;) {
3399                 set_slsb(q, &qidx, SLSB_CU_OUTPUT_PRIMED, &count);
3400                 count--;
3401                 if (!count) break;
3402                 qidx = (qidx + 1) & (QDIO_MAX_BUFFERS_PER_Q - 1);
3403         }
3404 }
3405
3406 static void
3407 do_qdio_handle_inbound(struct qdio_q *q, unsigned int callflags,
3408                        unsigned int qidx, unsigned int count,
3409                        struct qdio_buffer *buffers)
3410 {
3411         int used_elements;
3412
3413         /* This is the inbound handling of queues */
3414         used_elements=atomic_add_return(count, &q->number_of_buffers_used) - count;
3415         
3416         qdio_do_qdio_fill_input(q,qidx,count,buffers);
3417         
3418         if ((used_elements+count==QDIO_MAX_BUFFERS_PER_Q)&&
3419             (callflags&QDIO_FLAG_UNDER_INTERRUPT))
3420                 atomic_xchg(&q->polling,0);
3421         
3422         if (used_elements) 
3423                 return;
3424         if (callflags&QDIO_FLAG_DONT_SIGA)
3425                 return;
3426         if (q->siga_in) {
3427                 int result;
3428                 
3429                 result=qdio_siga_input(q);
3430                 if (result) {
3431                         if (q->siga_error)
3432                                 q->error_status_flags|=
3433                                         QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR;
3434                         q->error_status_flags|=QDIO_STATUS_LOOK_FOR_ERROR;
3435                         q->siga_error=result;
3436                 }
3437         }
3438                 
3439         qdio_mark_q(q);
3440 }
3441
3442 static void
3443 do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags,
3444                         unsigned int qidx, unsigned int count,
3445                         struct qdio_buffer *buffers)
3446 {
3447         int used_elements;
3448         unsigned int cnt, start_buf;
3449         unsigned char state = 0;
3450         struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
3451
3452         /* This is the outbound handling of queues */
3453         qdio_do_qdio_fill_output(q,qidx,count,buffers);
3454
3455         used_elements=atomic_add_return(count, &q->number_of_buffers_used) - count;
3456
3457         if (callflags&QDIO_FLAG_DONT_SIGA) {
3458                 qdio_perf_stat_inc(&perf_stats.outbound_cnt);
3459                 return;
3460         }
3461         if (q->is_iqdio_q) {
3462                 /* one siga for every sbal */
3463                 while (count--)
3464                         qdio_kick_outbound_q(q);
3465                         
3466                 __qdio_outbound_processing(q);
3467         } else {
3468                 /* under VM, we do a SIGA sync unconditionally */
3469                 SYNC_MEMORY;
3470                 else {
3471                         /* 
3472                          * w/o shadow queues (else branch of
3473                          * SYNC_MEMORY :-/ ), we try to
3474                          * fast-requeue buffers 
3475                          */
3476                         if (irq->is_qebsm) {
3477                                 cnt = 1;
3478                                 start_buf = ((qidx+QDIO_MAX_BUFFERS_PER_Q-1) &
3479                                              (QDIO_MAX_BUFFERS_PER_Q-1));
3480                                 qdio_do_eqbs(q, &state, &start_buf, &cnt);
3481                         } else
3482                                 state = q->slsb.acc.val[(qidx+QDIO_MAX_BUFFERS_PER_Q-1)
3483                                         &(QDIO_MAX_BUFFERS_PER_Q-1) ];
3484                          if (state != SLSB_CU_OUTPUT_PRIMED) {
3485                                 qdio_kick_outbound_q(q);
3486                         } else {
3487                                 QDIO_DBF_TEXT3(0,trace, "fast-req");
3488                                 qdio_perf_stat_inc(&perf_stats.fast_reqs);
3489                         }
3490                 }
3491                 /* 
3492                  * only marking the q could take too long,
3493                  * the upper layer module could do a lot of
3494                  * traffic in that time 
3495                  */
3496                 __qdio_outbound_processing(q);
3497         }
3498
3499         qdio_perf_stat_inc(&perf_stats.outbound_cnt);
3500 }
3501
3502 /* count must be 1 in iqdio */
3503 int
3504 do_QDIO(struct ccw_device *cdev,unsigned int callflags,
3505         unsigned int queue_number, unsigned int qidx,
3506         unsigned int count,struct qdio_buffer *buffers)
3507 {
3508         struct qdio_irq *irq_ptr;
3509 #ifdef CONFIG_QDIO_DEBUG
3510         char dbf_text[20];
3511
3512         sprintf(dbf_text,"doQD%04x",cdev->private->schid.sch_no);
3513         QDIO_DBF_TEXT3(0,trace,dbf_text);
3514 #endif /* CONFIG_QDIO_DEBUG */
3515
3516         if ( (qidx>QDIO_MAX_BUFFERS_PER_Q) ||
3517              (count>QDIO_MAX_BUFFERS_PER_Q) ||
3518              (queue_number>QDIO_MAX_QUEUES_PER_IRQ) )
3519                 return -EINVAL;
3520
3521         if (count==0)
3522                 return 0;
3523
3524         irq_ptr = cdev->private->qdio_data;
3525         if (!irq_ptr)
3526                 return -ENODEV;
3527
3528 #ifdef CONFIG_QDIO_DEBUG
3529         if (callflags&QDIO_FLAG_SYNC_INPUT)
3530                 QDIO_DBF_HEX3(0,trace,&irq_ptr->input_qs[queue_number],
3531                               sizeof(void*));
3532         else
3533                 QDIO_DBF_HEX3(0,trace,&irq_ptr->output_qs[queue_number],
3534                               sizeof(void*));
3535         sprintf(dbf_text,"flag%04x",callflags);
3536         QDIO_DBF_TEXT3(0,trace,dbf_text);
3537         sprintf(dbf_text,"qi%02xct%02x",qidx,count);
3538         QDIO_DBF_TEXT3(0,trace,dbf_text);
3539 #endif /* CONFIG_QDIO_DEBUG */
3540
3541         if (irq_ptr->state!=QDIO_IRQ_STATE_ACTIVE)
3542                 return -EBUSY;
3543
3544         if (callflags&QDIO_FLAG_SYNC_INPUT)
3545                 do_qdio_handle_inbound(irq_ptr->input_qs[queue_number],
3546                                        callflags, qidx, count, buffers);
3547         else if (callflags&QDIO_FLAG_SYNC_OUTPUT)
3548                 do_qdio_handle_outbound(irq_ptr->output_qs[queue_number],
3549                                         callflags, qidx, count, buffers);
3550         else {
3551                 QDIO_DBF_TEXT3(1,trace,"doQD:inv");
3552                 return -EINVAL;
3553         }
3554         return 0;
3555 }
3556
3557 static int
3558 qdio_perf_procfile_read(char *buffer, char **buffer_location, off_t offset,
3559                         int buffer_length, int *eof, void *data)
3560 {
3561         int c=0;
3562
3563         /* we are always called with buffer_length=4k, so we all
3564            deliver on the first read */
3565         if (offset>0)
3566                 return 0;
3567
3568 #define _OUTP_IT(x...) c+=sprintf(buffer+c,x)
3569 #ifdef CONFIG_64BIT
3570         _OUTP_IT("Number of tasklet runs (total)                  : %li\n",
3571                  (long)atomic64_read(&perf_stats.tl_runs));
3572         _OUTP_IT("Inbound tasklet runs      tried/retried         : %li/%li\n",
3573                  (long)atomic64_read(&perf_stats.inbound_tl_runs),
3574                  (long)atomic64_read(&perf_stats.inbound_tl_runs_resched));
3575         _OUTP_IT("Inbound-thin tasklet runs tried/retried         : %li/%li\n",
3576                  (long)atomic64_read(&perf_stats.inbound_thin_tl_runs),
3577                  (long)atomic64_read(&perf_stats.inbound_thin_tl_runs_resched));
3578         _OUTP_IT("Outbound tasklet runs     tried/retried         : %li/%li\n",
3579                  (long)atomic64_read(&perf_stats.outbound_tl_runs),
3580                  (long)atomic64_read(&perf_stats.outbound_tl_runs_resched));
3581         _OUTP_IT("\n");
3582         _OUTP_IT("Number of SIGA sync's issued                    : %li\n",
3583                  (long)atomic64_read(&perf_stats.siga_syncs));
3584         _OUTP_IT("Number of SIGA in's issued                      : %li\n",
3585                  (long)atomic64_read(&perf_stats.siga_ins));
3586         _OUTP_IT("Number of SIGA out's issued                     : %li\n",
3587                  (long)atomic64_read(&perf_stats.siga_outs));
3588         _OUTP_IT("Number of PCIs caught                           : %li\n",
3589                  (long)atomic64_read(&perf_stats.pcis));
3590         _OUTP_IT("Number of adapter interrupts caught             : %li\n",
3591                  (long)atomic64_read(&perf_stats.thinints));
3592         _OUTP_IT("Number of fast requeues (outg. SBALs w/o SIGA)  : %li\n",
3593                  (long)atomic64_read(&perf_stats.fast_reqs));
3594         _OUTP_IT("\n");
3595         _OUTP_IT("Number of inbound transfers                     : %li\n",
3596                  (long)atomic64_read(&perf_stats.inbound_cnt));
3597         _OUTP_IT("Number of do_QDIOs outbound                     : %li\n",
3598                  (long)atomic64_read(&perf_stats.outbound_cnt));
3599 #else /* CONFIG_64BIT */
3600         _OUTP_IT("Number of tasklet runs (total)                  : %i\n",
3601                  atomic_read(&perf_stats.tl_runs));
3602         _OUTP_IT("Inbound tasklet runs      tried/retried         : %i/%i\n",
3603                  atomic_read(&perf_stats.inbound_tl_runs),
3604                  atomic_read(&perf_stats.inbound_tl_runs_resched));
3605         _OUTP_IT("Inbound-thin tasklet runs tried/retried         : %i/%i\n",
3606                  atomic_read(&perf_stats.inbound_thin_tl_runs),
3607                  atomic_read(&perf_stats.inbound_thin_tl_runs_resched));
3608         _OUTP_IT("Outbound tasklet runs     tried/retried         : %i/%i\n",
3609                  atomic_read(&perf_stats.outbound_tl_runs),
3610                  atomic_read(&perf_stats.outbound_tl_runs_resched));
3611         _OUTP_IT("\n");
3612         _OUTP_IT("Number of SIGA sync's issued                    : %i\n",
3613                  atomic_read(&perf_stats.siga_syncs));
3614         _OUTP_IT("Number of SIGA in's issued                      : %i\n",
3615                  atomic_read(&perf_stats.siga_ins));
3616         _OUTP_IT("Number of SIGA out's issued                     : %i\n",
3617                  atomic_read(&perf_stats.siga_outs));
3618         _OUTP_IT("Number of PCIs caught                           : %i\n",
3619                  atomic_read(&perf_stats.pcis));
3620         _OUTP_IT("Number of adapter interrupts caught             : %i\n",
3621                  atomic_read(&perf_stats.thinints));
3622         _OUTP_IT("Number of fast requeues (outg. SBALs w/o SIGA)  : %i\n",
3623                  atomic_read(&perf_stats.fast_reqs));
3624         _OUTP_IT("\n");
3625         _OUTP_IT("Number of inbound transfers                     : %i\n",
3626                  atomic_read(&perf_stats.inbound_cnt));
3627         _OUTP_IT("Number of do_QDIOs outbound                     : %i\n",
3628                  atomic_read(&perf_stats.outbound_cnt));
3629 #endif /* CONFIG_64BIT */
3630         _OUTP_IT("\n");
3631
3632         return c;
3633 }
3634
3635 static struct proc_dir_entry *qdio_perf_proc_file;
3636
3637 static void
3638 qdio_add_procfs_entry(void)
3639 {
3640         proc_perf_file_registration=0;
3641         qdio_perf_proc_file=create_proc_entry(QDIO_PERF,
3642                                               S_IFREG|0444,&proc_root);
3643         if (qdio_perf_proc_file) {
3644                 qdio_perf_proc_file->read_proc=&qdio_perf_procfile_read;
3645         } else proc_perf_file_registration=-1;
3646
3647         if (proc_perf_file_registration)
3648                 QDIO_PRINT_WARN("was not able to register perf. " \
3649                                 "proc-file (%i).\n",
3650                                 proc_perf_file_registration);
3651 }
3652
3653 static void
3654 qdio_remove_procfs_entry(void)
3655 {
3656         if (!proc_perf_file_registration) /* means if it went ok earlier */
3657                 remove_proc_entry(QDIO_PERF,&proc_root);
3658 }
3659
3660 /**
3661  * attributes in sysfs
3662  *****************************************************************************/
3663
3664 static ssize_t
3665 qdio_performance_stats_show(struct bus_type *bus, char *buf)
3666 {
3667         return sprintf(buf, "%i\n", qdio_performance_stats ? 1 : 0);
3668 }
3669
3670 static ssize_t
3671 qdio_performance_stats_store(struct bus_type *bus, const char *buf, size_t count)
3672 {
3673         char *tmp;
3674         int i;
3675
3676         i = simple_strtoul(buf, &tmp, 16);
3677         if ((i == 0) || (i == 1)) {
3678                 if (i == qdio_performance_stats)
3679                         return count;
3680                 qdio_performance_stats = i;
3681                 if (i==0) {
3682                         /* reset perf. stat. info */
3683 #ifdef CONFIG_64BIT
3684                         atomic64_set(&perf_stats.tl_runs, 0);
3685                         atomic64_set(&perf_stats.outbound_tl_runs, 0);
3686                         atomic64_set(&perf_stats.inbound_tl_runs, 0);
3687                         atomic64_set(&perf_stats.inbound_tl_runs_resched, 0);
3688                         atomic64_set(&perf_stats.inbound_thin_tl_runs, 0);
3689                         atomic64_set(&perf_stats.inbound_thin_tl_runs_resched,
3690                                      0);
3691                         atomic64_set(&perf_stats.siga_outs, 0);
3692                         atomic64_set(&perf_stats.siga_ins, 0);
3693                         atomic64_set(&perf_stats.siga_syncs, 0);
3694                         atomic64_set(&perf_stats.pcis, 0);
3695                         atomic64_set(&perf_stats.thinints, 0);
3696                         atomic64_set(&perf_stats.fast_reqs, 0);
3697                         atomic64_set(&perf_stats.outbound_cnt, 0);
3698                         atomic64_set(&perf_stats.inbound_cnt, 0);
3699 #else /* CONFIG_64BIT */
3700                         atomic_set(&perf_stats.tl_runs, 0);
3701                         atomic_set(&perf_stats.outbound_tl_runs, 0);
3702                         atomic_set(&perf_stats.inbound_tl_runs, 0);
3703                         atomic_set(&perf_stats.inbound_tl_runs_resched, 0);
3704                         atomic_set(&perf_stats.inbound_thin_tl_runs, 0);
3705                         atomic_set(&perf_stats.inbound_thin_tl_runs_resched, 0);
3706                         atomic_set(&perf_stats.siga_outs, 0);
3707                         atomic_set(&perf_stats.siga_ins, 0);
3708                         atomic_set(&perf_stats.siga_syncs, 0);
3709                         atomic_set(&perf_stats.pcis, 0);
3710                         atomic_set(&perf_stats.thinints, 0);
3711                         atomic_set(&perf_stats.fast_reqs, 0);
3712                         atomic_set(&perf_stats.outbound_cnt, 0);
3713                         atomic_set(&perf_stats.inbound_cnt, 0);
3714 #endif /* CONFIG_64BIT */
3715                 }
3716         } else {
3717                 QDIO_PRINT_WARN("QDIO performance_stats: write 0 or 1 to this file!\n");
3718                 return -EINVAL;
3719         }
3720         return count;
3721 }
3722
3723 static BUS_ATTR(qdio_performance_stats, 0644, qdio_performance_stats_show,
3724                         qdio_performance_stats_store);
3725
3726 static void
3727 tiqdio_register_thinints(void)
3728 {
3729         char dbf_text[20];
3730         register_thinint_result=
3731                 s390_register_adapter_interrupt(&tiqdio_thinint_handler);
3732         if (register_thinint_result) {
3733                 sprintf(dbf_text,"regthn%x",(register_thinint_result&0xff));
3734                 QDIO_DBF_TEXT0(0,setup,dbf_text);
3735                 QDIO_PRINT_ERR("failed to register adapter handler " \
3736                                "(rc=%i).\nAdapter interrupts might " \
3737                                "not work. Continuing.\n",
3738                                register_thinint_result);
3739         }
3740 }
3741
3742 static void
3743 tiqdio_unregister_thinints(void)
3744 {
3745         if (!register_thinint_result)
3746                 s390_unregister_adapter_interrupt(&tiqdio_thinint_handler);
3747 }
3748
3749 static int
3750 qdio_get_qdio_memory(void)
3751 {
3752         int i;
3753         indicator_used[0]=1;
3754
3755         for (i=1;i<INDICATORS_PER_CACHELINE;i++)
3756                 indicator_used[i]=0;
3757         indicators = kzalloc(sizeof(__u32)*(INDICATORS_PER_CACHELINE),
3758                                    GFP_KERNEL);
3759         if (!indicators)
3760                 return -ENOMEM;
3761         return 0;
3762 }
3763
3764 static void
3765 qdio_release_qdio_memory(void)
3766 {
3767         kfree(indicators);
3768 }
3769
3770
3771 static void
3772 qdio_unregister_dbf_views(void)
3773 {
3774         if (qdio_dbf_setup)
3775                 debug_unregister(qdio_dbf_setup);
3776         if (qdio_dbf_sbal)
3777                 debug_unregister(qdio_dbf_sbal);
3778         if (qdio_dbf_sense)
3779                 debug_unregister(qdio_dbf_sense);
3780         if (qdio_dbf_trace)
3781                 debug_unregister(qdio_dbf_trace);
3782 #ifdef CONFIG_QDIO_DEBUG
3783         if (qdio_dbf_slsb_out)
3784                 debug_unregister(qdio_dbf_slsb_out);
3785         if (qdio_dbf_slsb_in)
3786                 debug_unregister(qdio_dbf_slsb_in);
3787 #endif /* CONFIG_QDIO_DEBUG */
3788 }
3789
3790 static int
3791 qdio_register_dbf_views(void)
3792 {
3793         qdio_dbf_setup=debug_register(QDIO_DBF_SETUP_NAME,
3794                                       QDIO_DBF_SETUP_PAGES,
3795                                       QDIO_DBF_SETUP_NR_AREAS,
3796                                       QDIO_DBF_SETUP_LEN);
3797         if (!qdio_dbf_setup)
3798                 goto oom;
3799         debug_register_view(qdio_dbf_setup,&debug_hex_ascii_view);
3800         debug_set_level(qdio_dbf_setup,QDIO_DBF_SETUP_LEVEL);
3801
3802         qdio_dbf_sbal=debug_register(QDIO_DBF_SBAL_NAME,
3803                                      QDIO_DBF_SBAL_PAGES,
3804                                      QDIO_DBF_SBAL_NR_AREAS,
3805                                      QDIO_DBF_SBAL_LEN);
3806         if (!qdio_dbf_sbal)
3807                 goto oom;
3808
3809         debug_register_view(qdio_dbf_sbal,&debug_hex_ascii_view);
3810         debug_set_level(qdio_dbf_sbal,QDIO_DBF_SBAL_LEVEL);
3811
3812         qdio_dbf_sense=debug_register(QDIO_DBF_SENSE_NAME,
3813                                       QDIO_DBF_SENSE_PAGES,
3814                                       QDIO_DBF_SENSE_NR_AREAS,
3815                                       QDIO_DBF_SENSE_LEN);
3816         if (!qdio_dbf_sense)
3817                 goto oom;
3818
3819         debug_register_view(qdio_dbf_sense,&debug_hex_ascii_view);
3820         debug_set_level(qdio_dbf_sense,QDIO_DBF_SENSE_LEVEL);
3821
3822         qdio_dbf_trace=debug_register(QDIO_DBF_TRACE_NAME,
3823                                       QDIO_DBF_TRACE_PAGES,
3824                                       QDIO_DBF_TRACE_NR_AREAS,
3825                                       QDIO_DBF_TRACE_LEN);
3826         if (!qdio_dbf_trace)
3827                 goto oom;
3828
3829         debug_register_view(qdio_dbf_trace,&debug_hex_ascii_view);
3830         debug_set_level(qdio_dbf_trace,QDIO_DBF_TRACE_LEVEL);
3831
3832 #ifdef CONFIG_QDIO_DEBUG
3833         qdio_dbf_slsb_out=debug_register(QDIO_DBF_SLSB_OUT_NAME,
3834                                          QDIO_DBF_SLSB_OUT_PAGES,
3835                                          QDIO_DBF_SLSB_OUT_NR_AREAS,
3836                                          QDIO_DBF_SLSB_OUT_LEN);
3837         if (!qdio_dbf_slsb_out)
3838                 goto oom;
3839         debug_register_view(qdio_dbf_slsb_out,&debug_hex_ascii_view);
3840         debug_set_level(qdio_dbf_slsb_out,QDIO_DBF_SLSB_OUT_LEVEL);
3841
3842         qdio_dbf_slsb_in=debug_register(QDIO_DBF_SLSB_IN_NAME,
3843                                         QDIO_DBF_SLSB_IN_PAGES,
3844                                         QDIO_DBF_SLSB_IN_NR_AREAS,
3845                                         QDIO_DBF_SLSB_IN_LEN);
3846         if (!qdio_dbf_slsb_in)
3847                 goto oom;
3848         debug_register_view(qdio_dbf_slsb_in,&debug_hex_ascii_view);
3849         debug_set_level(qdio_dbf_slsb_in,QDIO_DBF_SLSB_IN_LEVEL);
3850 #endif /* CONFIG_QDIO_DEBUG */
3851         return 0;
3852 oom:
3853         QDIO_PRINT_ERR("not enough memory for dbf.\n");
3854         qdio_unregister_dbf_views();
3855         return -ENOMEM;
3856 }
3857
3858 static void *qdio_mempool_alloc(gfp_t gfp_mask, void *size)
3859 {
3860         return (void *) get_zeroed_page(gfp_mask|GFP_DMA);
3861 }
3862
3863 static void qdio_mempool_free(void *element, void *size)
3864 {
3865         free_page((unsigned long) element);
3866 }
3867
3868 static int __init
3869 init_QDIO(void)
3870 {
3871         int res;
3872         void *ptr;
3873
3874         printk("qdio: loading %s\n",version);
3875
3876         res=qdio_get_qdio_memory();
3877         if (res)
3878                 return res;
3879
3880         res = qdio_register_dbf_views();
3881         if (res)
3882                 return res;
3883
3884         QDIO_DBF_TEXT0(0,setup,"initQDIO");
3885         res = bus_create_file(&ccw_bus_type, &bus_attr_qdio_performance_stats);
3886
3887         memset((void*)&perf_stats,0,sizeof(perf_stats));
3888         QDIO_DBF_TEXT0(0,setup,"perfstat");
3889         ptr=&perf_stats;
3890         QDIO_DBF_HEX0(0,setup,&ptr,sizeof(void*));
3891
3892         qdio_add_procfs_entry();
3893
3894         qdio_mempool_scssc = mempool_create(QDIO_MEMPOOL_SCSSC_ELEMENTS,
3895                                             qdio_mempool_alloc,
3896                                             qdio_mempool_free, NULL);
3897
3898         if (tiqdio_check_chsc_availability())
3899                 QDIO_PRINT_ERR("Not all CHSCs supported. Continuing.\n");
3900
3901         tiqdio_register_thinints();
3902
3903         return 0;
3904  }
3905
3906 static void __exit
3907 cleanup_QDIO(void)
3908 {
3909         tiqdio_unregister_thinints();
3910         qdio_remove_procfs_entry();
3911         qdio_release_qdio_memory();
3912         qdio_unregister_dbf_views();
3913         mempool_destroy(qdio_mempool_scssc);
3914         bus_remove_file(&ccw_bus_type, &bus_attr_qdio_performance_stats);
3915         printk("qdio: %s: module removed\n",version);
3916 }
3917
3918 module_init(init_QDIO);
3919 module_exit(cleanup_QDIO);
3920
3921 EXPORT_SYMBOL(qdio_allocate);
3922 EXPORT_SYMBOL(qdio_establish);
3923 EXPORT_SYMBOL(qdio_initialize);
3924 EXPORT_SYMBOL(qdio_activate);
3925 EXPORT_SYMBOL(do_QDIO);
3926 EXPORT_SYMBOL(qdio_shutdown);
3927 EXPORT_SYMBOL(qdio_free);
3928 EXPORT_SYMBOL(qdio_cleanup);
3929 EXPORT_SYMBOL(qdio_synchronize);