Commit | Line | Data |
---|---|---|
779e6e1c | 1 | /* |
a53c8fab | 2 | * Copyright IBM Corp. 2000, 2009 |
779e6e1c JG |
3 | * Author(s): Utz Bacher <utz.bacher@de.ibm.com> |
4 | * Cornelia Huck <cornelia.huck@de.ibm.com> | |
5 | * Jan Glauber <jang@linux.vnet.ibm.com> | |
6 | */ | |
7 | #include <linux/io.h> | |
5a0e3ad6 | 8 | #include <linux/slab.h> |
30d77c3e | 9 | #include <linux/kernel_stat.h> |
60063497 | 10 | #include <linux/atomic.h> |
779e6e1c JG |
11 | #include <asm/debug.h> |
12 | #include <asm/qdio.h> | |
13 | #include <asm/airq.h> | |
14 | #include <asm/isc.h> | |
15 | ||
16 | #include "cio.h" | |
17 | #include "ioasm.h" | |
18 | #include "qdio.h" | |
19 | #include "qdio_debug.h" | |
779e6e1c JG |
20 | |
21 | /* | |
22 | * Restriction: only 63 iqdio subchannels would have its own indicator, | |
23 | * after that, subsequent subchannels share one indicator | |
24 | */ | |
25 | #define TIQDIO_NR_NONSHARED_IND 63 | |
26 | #define TIQDIO_NR_INDICATORS (TIQDIO_NR_NONSHARED_IND + 1) | |
5f4026f8 JG |
27 | #define TIQDIO_SHARED_IND 63 |
28 | ||
29 | /* device state change indicators */ | |
30 | struct indicator_t { | |
31 | u32 ind; /* u32 because of compare-and-swap performance */ | |
32 | atomic_t count; /* use count, 0 or 1 for non-shared indicators */ | |
33 | }; | |
779e6e1c JG |
34 | |
35 | /* list of thin interrupt input queues */ | |
36 | static LIST_HEAD(tiq_list); | |
c4736d96 | 37 | static DEFINE_MUTEX(tiq_list_lock); |
779e6e1c JG |
38 | |
39 | /* adapter local summary indicator */ | |
d36deae7 | 40 | static u8 *tiqdio_alsi; |
779e6e1c | 41 | |
5f4026f8 | 42 | static struct indicator_t *q_indicators; |
779e6e1c | 43 | |
a2b86019 | 44 | u64 last_ai_time; |
d36deae7 | 45 | |
779e6e1c JG |
46 | /* returns addr for the device state change indicator */ |
47 | static u32 *get_indicator(void) | |
48 | { | |
49 | int i; | |
50 | ||
51 | for (i = 0; i < TIQDIO_NR_NONSHARED_IND; i++) | |
52 | if (!atomic_read(&q_indicators[i].count)) { | |
53 | atomic_set(&q_indicators[i].count, 1); | |
54 | return &q_indicators[i].ind; | |
55 | } | |
56 | ||
57 | /* use the shared indicator */ | |
58 | atomic_inc(&q_indicators[TIQDIO_SHARED_IND].count); | |
59 | return &q_indicators[TIQDIO_SHARED_IND].ind; | |
60 | } | |
61 | ||
62 | static void put_indicator(u32 *addr) | |
63 | { | |
64 | int i; | |
65 | ||
66 | if (!addr) | |
67 | return; | |
68 | i = ((unsigned long)addr - (unsigned long)q_indicators) / | |
69 | sizeof(struct indicator_t); | |
70 | atomic_dec(&q_indicators[i].count); | |
71 | } | |
72 | ||
73 | void tiqdio_add_input_queues(struct qdio_irq *irq_ptr) | |
74 | { | |
b4547402 | 75 | mutex_lock(&tiq_list_lock); |
104ea556 | 76 | list_add_rcu(&irq_ptr->input_qs[0]->entry, &tiq_list); |
b4547402 | 77 | mutex_unlock(&tiq_list_lock); |
d0c9d4a8 | 78 | xchg(irq_ptr->dsci, 1 << 7); |
779e6e1c JG |
79 | } |
80 | ||
779e6e1c JG |
81 | void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr) |
82 | { | |
83 | struct qdio_q *q; | |
779e6e1c | 84 | |
104ea556 | 85 | q = irq_ptr->input_qs[0]; |
86 | /* if establish triggered an error */ | |
87 | if (!q || !q->entry.prev || !q->entry.next) | |
88 | return; | |
b4547402 | 89 | |
104ea556 | 90 | mutex_lock(&tiq_list_lock); |
91 | list_del_rcu(&q->entry); | |
92 | mutex_unlock(&tiq_list_lock); | |
93 | synchronize_rcu(); | |
779e6e1c JG |
94 | } |
95 | ||
5f4026f8 JG |
96 | static inline int has_multiple_inq_on_dsci(struct qdio_irq *irq_ptr) |
97 | { | |
98 | return irq_ptr->nr_input_qs > 1; | |
99 | } | |
100 | ||
101 | static inline int references_shared_dsci(struct qdio_irq *irq_ptr) | |
102 | { | |
103 | return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind; | |
104 | } | |
105 | ||
106 | static inline int shared_ind(struct qdio_irq *irq_ptr) | |
107 | { | |
108 | return references_shared_dsci(irq_ptr) || | |
109 | has_multiple_inq_on_dsci(irq_ptr); | |
110 | } | |
111 | ||
112 | void clear_nonshared_ind(struct qdio_irq *irq_ptr) | |
113 | { | |
114 | if (!is_thinint_irq(irq_ptr)) | |
115 | return; | |
116 | if (shared_ind(irq_ptr)) | |
117 | return; | |
118 | xchg(irq_ptr->dsci, 0); | |
119 | } | |
120 | ||
121 | int test_nonshared_ind(struct qdio_irq *irq_ptr) | |
122 | { | |
123 | if (!is_thinint_irq(irq_ptr)) | |
124 | return 0; | |
125 | if (shared_ind(irq_ptr)) | |
126 | return 0; | |
127 | if (*irq_ptr->dsci) | |
128 | return 1; | |
129 | else | |
130 | return 0; | |
131 | } | |
132 | ||
b02f0c2e | 133 | static inline u32 clear_shared_ind(void) |
779e6e1c | 134 | { |
b02f0c2e JG |
135 | if (!atomic_read(&q_indicators[TIQDIO_SHARED_IND].count)) |
136 | return 0; | |
137 | return xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0); | |
779e6e1c JG |
138 | } |
139 | ||
104ea556 | 140 | static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq) |
141 | { | |
142 | struct qdio_q *q; | |
143 | int i; | |
144 | ||
8e68a4d1 JW |
145 | if (!references_shared_dsci(irq) && |
146 | has_multiple_inq_on_dsci(irq)) | |
147 | xchg(irq->dsci, 0); | |
104ea556 | 148 | |
8e68a4d1 | 149 | for_each_input_queue(irq, q, i) { |
104ea556 | 150 | if (q->u.in.queue_start_poll) { |
151 | /* skip if polling is enabled or already in work */ | |
152 | if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED, | |
153 | &q->u.in.queue_irq_state)) { | |
154 | qperf_inc(q, int_discarded); | |
155 | continue; | |
156 | } | |
157 | ||
158 | /* avoid dsci clear here, done after processing */ | |
159 | q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr, | |
160 | q->irq_ptr->int_parm); | |
161 | } else { | |
5f4026f8 | 162 | if (!shared_ind(q->irq_ptr)) |
104ea556 | 163 | xchg(q->irq_ptr->dsci, 0); |
164 | ||
165 | /* | |
166 | * Call inbound processing but not directly | |
167 | * since that could starve other thinint queues. | |
168 | */ | |
169 | tasklet_schedule(&q->tasklet); | |
170 | } | |
171 | } | |
172 | } | |
173 | ||
cf9a031c JG |
174 | /** |
175 | * tiqdio_thinint_handler - thin interrupt handler for qdio | |
d36deae7 JG |
176 | * @alsi: pointer to adapter local summary indicator |
177 | * @data: NULL | |
cf9a031c | 178 | */ |
d36deae7 | 179 | static void tiqdio_thinint_handler(void *alsi, void *data) |
779e6e1c | 180 | { |
b02f0c2e | 181 | u32 si_used = clear_shared_ind(); |
779e6e1c JG |
182 | struct qdio_q *q; |
183 | ||
d36deae7 | 184 | last_ai_time = S390_lowcore.int_clock; |
420f42ec | 185 | inc_irq_stat(IRQIO_QAI); |
d36deae7 | 186 | |
779e6e1c JG |
187 | /* protect tiq_list entries, only changed in activate or shutdown */ |
188 | rcu_read_lock(); | |
189 | ||
cf9a031c | 190 | /* check for work on all inbound thinint queues */ |
d36deae7 | 191 | list_for_each_entry_rcu(q, &tiq_list, entry) { |
104ea556 | 192 | struct qdio_irq *irq; |
d36deae7 | 193 | |
779e6e1c | 194 | /* only process queues from changed sets */ |
104ea556 | 195 | irq = q->irq_ptr; |
196 | if (unlikely(references_shared_dsci(irq))) { | |
4f325184 JG |
197 | if (!si_used) |
198 | continue; | |
104ea556 | 199 | } else if (!*irq->dsci) |
d36deae7 | 200 | continue; |
779e6e1c | 201 | |
104ea556 | 202 | tiqdio_call_inq_handlers(irq); |
d36deae7 | 203 | |
d36deae7 JG |
204 | qperf_inc(q, adapter_int); |
205 | } | |
779e6e1c | 206 | rcu_read_unlock(); |
779e6e1c JG |
207 | } |
208 | ||
209 | static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset) | |
210 | { | |
211 | struct scssc_area *scssc_area; | |
779e6e1c JG |
212 | int rc; |
213 | ||
214 | scssc_area = (struct scssc_area *)irq_ptr->chsc_page; | |
215 | memset(scssc_area, 0, PAGE_SIZE); | |
216 | ||
217 | if (reset) { | |
218 | scssc_area->summary_indicator_addr = 0; | |
219 | scssc_area->subchannel_indicator_addr = 0; | |
220 | } else { | |
221 | scssc_area->summary_indicator_addr = virt_to_phys(tiqdio_alsi); | |
222 | scssc_area->subchannel_indicator_addr = | |
223 | virt_to_phys(irq_ptr->dsci); | |
224 | } | |
225 | ||
226 | scssc_area->request = (struct chsc_header) { | |
227 | .length = 0x0fe0, | |
228 | .code = 0x0021, | |
229 | }; | |
230 | scssc_area->operation_code = 0; | |
d1bf8590 HC |
231 | scssc_area->ks = PAGE_DEFAULT_KEY >> 4; |
232 | scssc_area->kc = PAGE_DEFAULT_KEY >> 4; | |
779e6e1c JG |
233 | scssc_area->isc = QDIO_AIRQ_ISC; |
234 | scssc_area->schid = irq_ptr->schid; | |
235 | ||
236 | /* enable the time delay disablement facility */ | |
237 | if (css_general_characteristics.aif_tdd) | |
238 | scssc_area->word_with_d_bit = 0x10000000; | |
239 | ||
240 | rc = chsc(scssc_area); | |
241 | if (rc) | |
242 | return -EIO; | |
243 | ||
244 | rc = chsc_error_from_response(scssc_area->response.code); | |
245 | if (rc) { | |
22f99347 JG |
246 | DBF_ERROR("%4x SSI r:%4x", irq_ptr->schid.sch_no, |
247 | scssc_area->response.code); | |
248 | DBF_ERROR_HEX(&scssc_area->response, sizeof(void *)); | |
779e6e1c JG |
249 | return rc; |
250 | } | |
251 | ||
22f99347 JG |
252 | DBF_EVENT("setscind"); |
253 | DBF_HEX(&scssc_area->summary_indicator_addr, sizeof(unsigned long)); | |
254 | DBF_HEX(&scssc_area->subchannel_indicator_addr, sizeof(unsigned long)); | |
779e6e1c JG |
255 | return 0; |
256 | } | |
257 | ||
258 | /* allocate non-shared indicators and shared indicator */ | |
259 | int __init tiqdio_allocate_memory(void) | |
260 | { | |
261 | q_indicators = kzalloc(sizeof(struct indicator_t) * TIQDIO_NR_INDICATORS, | |
262 | GFP_KERNEL); | |
263 | if (!q_indicators) | |
264 | return -ENOMEM; | |
265 | return 0; | |
266 | } | |
267 | ||
268 | void tiqdio_free_memory(void) | |
269 | { | |
270 | kfree(q_indicators); | |
271 | } | |
272 | ||
273 | int __init tiqdio_register_thinints(void) | |
274 | { | |
779e6e1c JG |
275 | isc_register(QDIO_AIRQ_ISC); |
276 | tiqdio_alsi = s390_register_adapter_interrupt(&tiqdio_thinint_handler, | |
277 | NULL, QDIO_AIRQ_ISC); | |
278 | if (IS_ERR(tiqdio_alsi)) { | |
22f99347 | 279 | DBF_EVENT("RTI:%lx", PTR_ERR(tiqdio_alsi)); |
779e6e1c JG |
280 | tiqdio_alsi = NULL; |
281 | isc_unregister(QDIO_AIRQ_ISC); | |
282 | return -ENOMEM; | |
283 | } | |
284 | return 0; | |
285 | } | |
286 | ||
287 | int qdio_establish_thinint(struct qdio_irq *irq_ptr) | |
288 | { | |
289 | if (!is_thinint_irq(irq_ptr)) | |
290 | return 0; | |
779e6e1c JG |
291 | return set_subchannel_ind(irq_ptr, 0); |
292 | } | |
293 | ||
294 | void qdio_setup_thinint(struct qdio_irq *irq_ptr) | |
295 | { | |
296 | if (!is_thinint_irq(irq_ptr)) | |
297 | return; | |
298 | irq_ptr->dsci = get_indicator(); | |
22f99347 | 299 | DBF_HEX(&irq_ptr->dsci, sizeof(void *)); |
779e6e1c JG |
300 | } |
301 | ||
302 | void qdio_shutdown_thinint(struct qdio_irq *irq_ptr) | |
303 | { | |
304 | if (!is_thinint_irq(irq_ptr)) | |
305 | return; | |
306 | ||
307 | /* reset adapter interrupt indicators */ | |
779e6e1c | 308 | set_subchannel_ind(irq_ptr, 1); |
4814a2b3 | 309 | put_indicator(irq_ptr->dsci); |
779e6e1c JG |
310 | } |
311 | ||
312 | void __exit tiqdio_unregister_thinints(void) | |
313 | { | |
9e890ad8 | 314 | WARN_ON(!list_empty(&tiq_list)); |
779e6e1c JG |
315 | |
316 | if (tiqdio_alsi) { | |
317 | s390_unregister_adapter_interrupt(tiqdio_alsi, QDIO_AIRQ_ISC); | |
318 | isc_unregister(QDIO_AIRQ_ISC); | |
319 | } | |
320 | } |