Commit | Line | Data |
---|---|---|
28bffaf0 JS |
1 | /* |
2 | * SN Platform GRU Driver | |
3 | * | |
4 | * KERNEL SERVICES THAT USE THE GRU | |
5 | * | |
6 | * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved. | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License as published by | |
10 | * the Free Software Foundation; either version 2 of the License, or | |
11 | * (at your option) any later version. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | * GNU General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program; if not, write to the Free Software | |
20 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
21 | */ | |
22 | ||
23 | #include <linux/kernel.h> | |
24 | #include <linux/errno.h> | |
25 | #include <linux/slab.h> | |
26 | #include <linux/mm.h> | |
27 | #include <linux/smp_lock.h> | |
28 | #include <linux/spinlock.h> | |
29 | #include <linux/device.h> | |
30 | #include <linux/miscdevice.h> | |
31 | #include <linux/proc_fs.h> | |
32 | #include <linux/interrupt.h> | |
33 | #include <linux/uaccess.h> | |
34 | #include "gru.h" | |
35 | #include "grulib.h" | |
36 | #include "grutables.h" | |
37 | #include "grukservices.h" | |
38 | #include "gru_instructions.h" | |
39 | #include <asm/uv/uv_hub.h> | |
40 | ||
41 | /* | |
42 | * Kernel GRU Usage | |
43 | * | |
44 | * The following is an interim algorithm for management of kernel GRU | |
45 | * resources. This will likely be replaced when we better understand the | |
46 | * kernel/user requirements. | |
47 | * | |
48 | * At boot time, the kernel permanently reserves a fixed number of | |
49 | * CBRs/DSRs for each cpu to use. The resources are all taken from | |
50 | * the GRU chiplet 1 on the blade. This leaves the full set of resources | |
51 | * of chiplet 0 available to be allocated to a single user. | |
52 | */ | |
53 | ||
54 | /* Blade percpu resources PERMANENTLY reserved for kernel use */ | |
55 | #define GRU_NUM_KERNEL_CBR 1 | |
56 | #define GRU_NUM_KERNEL_DSR_BYTES 256 | |
57 | #define KERNEL_CTXNUM 15 | |
58 | ||
59 | /* GRU instruction attributes for all instructions */ | |
60 | #define IMA IMA_CB_DELAY | |
61 | ||
62 | /* GRU cacheline size is always 64 bytes - even on arches with 128 byte lines */ | |
63 | #define __gru_cacheline_aligned__ \ | |
64 | __attribute__((__aligned__(GRU_CACHE_LINE_BYTES))) | |
65 | ||
66 | #define MAGIC 0x1234567887654321UL | |
67 | ||
68 | /* Default retry count for GRU errors on kernel instructions */ | |
69 | #define EXCEPTION_RETRY_LIMIT 3 | |
70 | ||
71 | /* Status of message queue sections */ | |
72 | #define MQS_EMPTY 0 | |
73 | #define MQS_FULL 1 | |
74 | #define MQS_NOOP 2 | |
75 | ||
76 | /*----------------- RESOURCE MANAGEMENT -------------------------------------*/ | |
77 | /* optimized for x86_64 */ | |
78 | struct message_queue { | |
79 | union gru_mesqhead head __gru_cacheline_aligned__; /* CL 0 */ | |
80 | int qlines; /* DW 1 */ | |
81 | long hstatus[2]; | |
82 | void *next __gru_cacheline_aligned__;/* CL 1 */ | |
83 | void *limit; | |
84 | void *start; | |
85 | void *start2; | |
86 | char data ____cacheline_aligned; /* CL 2 */ | |
87 | }; | |
88 | ||
89 | /* First word in every message - used by mesq interface */ | |
90 | struct message_header { | |
91 | char present; | |
92 | char present2; | |
93 | char lines; | |
94 | char fill; | |
95 | }; | |
96 | ||
97 | #define QLINES(mq) ((mq) + offsetof(struct message_queue, qlines)) | |
98 | #define HSTATUS(mq, h) ((mq) + offsetof(struct message_queue, hstatus[h])) | |
99 | ||
100 | static int gru_get_cpu_resources(int dsr_bytes, void **cb, void **dsr) | |
101 | { | |
102 | struct gru_blade_state *bs; | |
103 | int lcpu; | |
104 | ||
105 | BUG_ON(dsr_bytes > GRU_NUM_KERNEL_DSR_BYTES); | |
106 | preempt_disable(); | |
107 | bs = gru_base[uv_numa_blade_id()]; | |
108 | lcpu = uv_blade_processor_id(); | |
109 | *cb = bs->kernel_cb + lcpu * GRU_HANDLE_STRIDE; | |
110 | *dsr = bs->kernel_dsr + lcpu * GRU_NUM_KERNEL_DSR_BYTES; | |
111 | return 0; | |
112 | } | |
113 | ||
114 | static void gru_free_cpu_resources(void *cb, void *dsr) | |
115 | { | |
116 | preempt_enable(); | |
117 | } | |
118 | ||
119 | int gru_get_cb_exception_detail(void *cb, | |
120 | struct control_block_extended_exc_detail *excdet) | |
121 | { | |
122 | struct gru_control_block_extended *cbe; | |
123 | ||
124 | cbe = get_cbe(GRUBASE(cb), get_cb_number(cb)); | |
125 | excdet->opc = cbe->opccpy; | |
126 | excdet->exopc = cbe->exopccpy; | |
127 | excdet->ecause = cbe->ecause; | |
128 | excdet->exceptdet0 = cbe->idef1upd; | |
129 | excdet->exceptdet1 = cbe->idef3upd; | |
130 | return 0; | |
131 | } | |
132 | ||
133 | char *gru_get_cb_exception_detail_str(int ret, void *cb, | |
134 | char *buf, int size) | |
135 | { | |
136 | struct gru_control_block_status *gen = (void *)cb; | |
137 | struct control_block_extended_exc_detail excdet; | |
138 | ||
139 | if (ret > 0 && gen->istatus == CBS_EXCEPTION) { | |
140 | gru_get_cb_exception_detail(cb, &excdet); | |
141 | snprintf(buf, size, | |
142 | "GRU exception: cb %p, opc %d, exopc %d, ecause 0x%x," | |
143 | "excdet0 0x%lx, excdet1 0x%x", | |
144 | gen, excdet.opc, excdet.exopc, excdet.ecause, | |
145 | excdet.exceptdet0, excdet.exceptdet1); | |
146 | } else { | |
147 | snprintf(buf, size, "No exception"); | |
148 | } | |
149 | return buf; | |
150 | } | |
151 | ||
152 | static int gru_wait_idle_or_exception(struct gru_control_block_status *gen) | |
153 | { | |
154 | while (gen->istatus >= CBS_ACTIVE) { | |
155 | cpu_relax(); | |
156 | barrier(); | |
157 | } | |
158 | return gen->istatus; | |
159 | } | |
160 | ||
161 | static int gru_retry_exception(void *cb) | |
162 | { | |
163 | struct gru_control_block_status *gen = (void *)cb; | |
164 | struct control_block_extended_exc_detail excdet; | |
165 | int retry = EXCEPTION_RETRY_LIMIT; | |
166 | ||
167 | while (1) { | |
168 | if (gru_get_cb_message_queue_substatus(cb)) | |
169 | break; | |
170 | if (gru_wait_idle_or_exception(gen) == CBS_IDLE) | |
171 | return CBS_IDLE; | |
172 | ||
173 | gru_get_cb_exception_detail(cb, &excdet); | |
174 | if (excdet.ecause & ~EXCEPTION_RETRY_BITS) | |
175 | break; | |
176 | if (retry-- == 0) | |
177 | break; | |
178 | gen->icmd = 1; | |
179 | gru_flush_cache(gen); | |
180 | } | |
181 | return CBS_EXCEPTION; | |
182 | } | |
183 | ||
184 | int gru_check_status_proc(void *cb) | |
185 | { | |
186 | struct gru_control_block_status *gen = (void *)cb; | |
187 | int ret; | |
188 | ||
189 | ret = gen->istatus; | |
190 | if (ret != CBS_EXCEPTION) | |
191 | return ret; | |
192 | return gru_retry_exception(cb); | |
193 | ||
194 | } | |
195 | ||
196 | int gru_wait_proc(void *cb) | |
197 | { | |
198 | struct gru_control_block_status *gen = (void *)cb; | |
199 | int ret; | |
200 | ||
201 | ret = gru_wait_idle_or_exception(gen); | |
202 | if (ret == CBS_EXCEPTION) | |
203 | ret = gru_retry_exception(cb); | |
204 | ||
205 | return ret; | |
206 | } | |
207 | ||
208 | void gru_abort(int ret, void *cb, char *str) | |
209 | { | |
210 | char buf[GRU_EXC_STR_SIZE]; | |
211 | ||
212 | panic("GRU FATAL ERROR: %s - %s\n", str, | |
213 | gru_get_cb_exception_detail_str(ret, cb, buf, sizeof(buf))); | |
214 | } | |
215 | ||
216 | void gru_wait_abort_proc(void *cb) | |
217 | { | |
218 | int ret; | |
219 | ||
220 | ret = gru_wait_proc(cb); | |
221 | if (ret) | |
222 | gru_abort(ret, cb, "gru_wait_abort"); | |
223 | } | |
224 | ||
225 | ||
226 | /*------------------------------ MESSAGE QUEUES -----------------------------*/ | |
227 | ||
228 | /* Internal status . These are NOT returned to the user. */ | |
229 | #define MQIE_AGAIN -1 /* try again */ | |
230 | ||
231 | ||
232 | /* | |
233 | * Save/restore the "present" flag that is in the second line of 2-line | |
234 | * messages | |
235 | */ | |
236 | static inline int get_present2(void *p) | |
237 | { | |
238 | struct message_header *mhdr = p + GRU_CACHE_LINE_BYTES; | |
239 | return mhdr->present; | |
240 | } | |
241 | ||
242 | static inline void restore_present2(void *p, int val) | |
243 | { | |
244 | struct message_header *mhdr = p + GRU_CACHE_LINE_BYTES; | |
245 | mhdr->present = val; | |
246 | } | |
247 | ||
248 | /* | |
249 | * Create a message queue. | |
250 | * qlines - message queue size in cache lines. Includes 2-line header. | |
251 | */ | |
252 | int gru_create_message_queue(void *p, unsigned int bytes) | |
253 | { | |
254 | struct message_queue *mq = p; | |
255 | unsigned int qlines; | |
256 | ||
257 | qlines = bytes / GRU_CACHE_LINE_BYTES - 2; | |
258 | memset(mq, 0, bytes); | |
259 | mq->start = &mq->data; | |
260 | mq->start2 = &mq->data + (qlines / 2 - 1) * GRU_CACHE_LINE_BYTES; | |
261 | mq->next = &mq->data; | |
262 | mq->limit = &mq->data + (qlines - 2) * GRU_CACHE_LINE_BYTES; | |
263 | mq->qlines = qlines; | |
264 | mq->hstatus[0] = 0; | |
265 | mq->hstatus[1] = 1; | |
266 | mq->head = gru_mesq_head(2, qlines / 2 + 1); | |
267 | return 0; | |
268 | } | |
269 | EXPORT_SYMBOL_GPL(gru_create_message_queue); | |
270 | ||
271 | /* | |
272 | * Send a NOOP message to a message queue | |
273 | * Returns: | |
274 | * 0 - if queue is full after the send. This is the normal case | |
275 | * but various races can change this. | |
276 | * -1 - if mesq sent successfully but queue not full | |
277 | * >0 - unexpected error. MQE_xxx returned | |
278 | */ | |
279 | static int send_noop_message(void *cb, | |
280 | unsigned long mq, void *mesg) | |
281 | { | |
282 | const struct message_header noop_header = { | |
283 | .present = MQS_NOOP, .lines = 1}; | |
284 | unsigned long m; | |
285 | int substatus, ret; | |
286 | struct message_header save_mhdr, *mhdr = mesg; | |
287 | ||
288 | STAT(mesq_noop); | |
289 | save_mhdr = *mhdr; | |
290 | *mhdr = noop_header; | |
291 | gru_mesq(cb, mq, gru_get_tri(mhdr), 1, IMA); | |
292 | ret = gru_wait(cb); | |
293 | ||
294 | if (ret) { | |
295 | substatus = gru_get_cb_message_queue_substatus(cb); | |
296 | switch (substatus) { | |
297 | case CBSS_NO_ERROR: | |
298 | STAT(mesq_noop_unexpected_error); | |
299 | ret = MQE_UNEXPECTED_CB_ERR; | |
300 | break; | |
301 | case CBSS_LB_OVERFLOWED: | |
302 | STAT(mesq_noop_lb_overflow); | |
303 | ret = MQE_CONGESTION; | |
304 | break; | |
305 | case CBSS_QLIMIT_REACHED: | |
306 | STAT(mesq_noop_qlimit_reached); | |
307 | ret = 0; | |
308 | break; | |
309 | case CBSS_AMO_NACKED: | |
310 | STAT(mesq_noop_amo_nacked); | |
311 | ret = MQE_CONGESTION; | |
312 | break; | |
313 | case CBSS_PUT_NACKED: | |
314 | STAT(mesq_noop_put_nacked); | |
315 | m = mq + (gru_get_amo_value_head(cb) << 6); | |
316 | gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, 1, 1, | |
317 | IMA); | |
318 | if (gru_wait(cb) == CBS_IDLE) | |
319 | ret = MQIE_AGAIN; | |
320 | else | |
321 | ret = MQE_UNEXPECTED_CB_ERR; | |
322 | break; | |
323 | case CBSS_PAGE_OVERFLOW: | |
324 | default: | |
325 | BUG(); | |
326 | } | |
327 | } | |
328 | *mhdr = save_mhdr; | |
329 | return ret; | |
330 | } | |
331 | ||
332 | /* | |
333 | * Handle a gru_mesq full. | |
334 | */ | |
335 | static int send_message_queue_full(void *cb, | |
336 | unsigned long mq, void *mesg, int lines) | |
337 | { | |
338 | union gru_mesqhead mqh; | |
339 | unsigned int limit, head; | |
340 | unsigned long avalue; | |
341 | int half, qlines, save; | |
342 | ||
343 | /* Determine if switching to first/second half of q */ | |
344 | avalue = gru_get_amo_value(cb); | |
345 | head = gru_get_amo_value_head(cb); | |
346 | limit = gru_get_amo_value_limit(cb); | |
347 | ||
348 | /* | |
349 | * Fetch "qlines" from the queue header. Since the queue may be | |
350 | * in memory that can't be accessed using socket addresses, use | |
351 | * the GRU to access the data. Use DSR space from the message. | |
352 | */ | |
353 | save = *(int *)mesg; | |
354 | gru_vload(cb, QLINES(mq), gru_get_tri(mesg), XTYPE_W, 1, 1, IMA); | |
355 | if (gru_wait(cb) != CBS_IDLE) | |
356 | goto cberr; | |
357 | qlines = *(int *)mesg; | |
358 | *(int *)mesg = save; | |
359 | half = (limit != qlines); | |
360 | ||
361 | if (half) | |
362 | mqh = gru_mesq_head(qlines / 2 + 1, qlines); | |
363 | else | |
364 | mqh = gru_mesq_head(2, qlines / 2 + 1); | |
365 | ||
366 | /* Try to get lock for switching head pointer */ | |
367 | gru_gamir(cb, EOP_IR_CLR, HSTATUS(mq, half), XTYPE_DW, IMA); | |
368 | if (gru_wait(cb) != CBS_IDLE) | |
369 | goto cberr; | |
370 | if (!gru_get_amo_value(cb)) { | |
371 | STAT(mesq_qf_locked); | |
372 | return MQE_QUEUE_FULL; | |
373 | } | |
374 | ||
375 | /* Got the lock. Send optional NOP if queue not full, */ | |
376 | if (head != limit) { | |
377 | if (send_noop_message(cb, mq, mesg)) { | |
378 | gru_gamir(cb, EOP_IR_INC, HSTATUS(mq, half), | |
379 | XTYPE_DW, IMA); | |
380 | if (gru_wait(cb) != CBS_IDLE) | |
381 | goto cberr; | |
382 | STAT(mesq_qf_noop_not_full); | |
383 | return MQIE_AGAIN; | |
384 | } | |
385 | avalue++; | |
386 | } | |
387 | ||
388 | /* Then flip queuehead to other half of queue. */ | |
389 | gru_gamer(cb, EOP_ERR_CSWAP, mq, XTYPE_DW, mqh.val, avalue, IMA); | |
390 | if (gru_wait(cb) != CBS_IDLE) | |
391 | goto cberr; | |
392 | ||
393 | /* If not successfully in swapping queue head, clear the hstatus lock */ | |
394 | if (gru_get_amo_value(cb) != avalue) { | |
395 | STAT(mesq_qf_switch_head_failed); | |
396 | gru_gamir(cb, EOP_IR_INC, HSTATUS(mq, half), XTYPE_DW, IMA); | |
397 | if (gru_wait(cb) != CBS_IDLE) | |
398 | goto cberr; | |
399 | } | |
400 | return MQIE_AGAIN; | |
401 | cberr: | |
402 | STAT(mesq_qf_unexpected_error); | |
403 | return MQE_UNEXPECTED_CB_ERR; | |
404 | } | |
405 | ||
406 | ||
407 | /* | |
408 | * Handle a gru_mesq failure. Some of these failures are software recoverable | |
409 | * or retryable. | |
410 | */ | |
411 | static int send_message_failure(void *cb, | |
412 | unsigned long mq, | |
413 | void *mesg, | |
414 | int lines) | |
415 | { | |
416 | int substatus, ret = 0; | |
417 | unsigned long m; | |
418 | ||
419 | substatus = gru_get_cb_message_queue_substatus(cb); | |
420 | switch (substatus) { | |
421 | case CBSS_NO_ERROR: | |
422 | STAT(mesq_send_unexpected_error); | |
423 | ret = MQE_UNEXPECTED_CB_ERR; | |
424 | break; | |
425 | case CBSS_LB_OVERFLOWED: | |
426 | STAT(mesq_send_lb_overflow); | |
427 | ret = MQE_CONGESTION; | |
428 | break; | |
429 | case CBSS_QLIMIT_REACHED: | |
430 | STAT(mesq_send_qlimit_reached); | |
431 | ret = send_message_queue_full(cb, mq, mesg, lines); | |
432 | break; | |
433 | case CBSS_AMO_NACKED: | |
434 | STAT(mesq_send_amo_nacked); | |
435 | ret = MQE_CONGESTION; | |
436 | break; | |
437 | case CBSS_PUT_NACKED: | |
438 | STAT(mesq_send_put_nacked); | |
439 | m =mq + (gru_get_amo_value_head(cb) << 6); | |
440 | gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, lines, 1, IMA); | |
441 | if (gru_wait(cb) == CBS_IDLE) | |
442 | ret = MQE_OK; | |
443 | else | |
444 | ret = MQE_UNEXPECTED_CB_ERR; | |
445 | break; | |
446 | default: | |
447 | BUG(); | |
448 | } | |
449 | return ret; | |
450 | } | |
451 | ||
452 | /* | |
453 | * Send a message to a message queue | |
454 | * cb GRU control block to use to send message | |
455 | * mq message queue | |
456 | * mesg message. ust be vaddr within a GSEG | |
457 | * bytes message size (<= 2 CL) | |
458 | */ | |
459 | int gru_send_message_gpa(unsigned long mq, void *mesg, unsigned int bytes) | |
460 | { | |
461 | struct message_header *mhdr; | |
462 | void *cb; | |
463 | void *dsr; | |
464 | int istatus, clines, ret; | |
465 | ||
466 | STAT(mesq_send); | |
467 | BUG_ON(bytes < sizeof(int) || bytes > 2 * GRU_CACHE_LINE_BYTES); | |
468 | ||
469 | clines = (bytes + GRU_CACHE_LINE_BYTES - 1) / GRU_CACHE_LINE_BYTES; | |
470 | if (gru_get_cpu_resources(bytes, &cb, &dsr)) | |
471 | return MQE_BUG_NO_RESOURCES; | |
472 | memcpy(dsr, mesg, bytes); | |
473 | mhdr = dsr; | |
474 | mhdr->present = MQS_FULL; | |
475 | mhdr->lines = clines; | |
476 | if (clines == 2) { | |
477 | mhdr->present2 = get_present2(mhdr); | |
478 | restore_present2(mhdr, MQS_FULL); | |
479 | } | |
480 | ||
481 | do { | |
482 | ret = MQE_OK; | |
483 | gru_mesq(cb, mq, gru_get_tri(mhdr), clines, IMA); | |
484 | istatus = gru_wait(cb); | |
485 | if (istatus != CBS_IDLE) | |
486 | ret = send_message_failure(cb, mq, dsr, clines); | |
487 | } while (ret == MQIE_AGAIN); | |
488 | gru_free_cpu_resources(cb, dsr); | |
489 | ||
490 | if (ret) | |
491 | STAT(mesq_send_failed); | |
492 | return ret; | |
493 | } | |
494 | EXPORT_SYMBOL_GPL(gru_send_message_gpa); | |
495 | ||
496 | /* | |
497 | * Advance the receive pointer for the queue to the next message. | |
498 | */ | |
499 | void gru_free_message(void *rmq, void *mesg) | |
500 | { | |
501 | struct message_queue *mq = rmq; | |
502 | struct message_header *mhdr = mq->next; | |
503 | void *next, *pnext; | |
504 | int half = -1; | |
505 | int lines = mhdr->lines; | |
506 | ||
507 | if (lines == 2) | |
508 | restore_present2(mhdr, MQS_EMPTY); | |
509 | mhdr->present = MQS_EMPTY; | |
510 | ||
511 | pnext = mq->next; | |
512 | next = pnext + GRU_CACHE_LINE_BYTES * lines; | |
513 | if (next == mq->limit) { | |
514 | next = mq->start; | |
515 | half = 1; | |
516 | } else if (pnext < mq->start2 && next >= mq->start2) { | |
517 | half = 0; | |
518 | } | |
519 | ||
520 | if (half >= 0) | |
521 | mq->hstatus[half] = 1; | |
522 | mq->next = next; | |
523 | } | |
524 | EXPORT_SYMBOL_GPL(gru_free_message); | |
525 | ||
526 | /* | |
527 | * Get next message from message queue. Return NULL if no message | |
528 | * present. User must call next_message() to move to next message. | |
529 | * rmq message queue | |
530 | */ | |
531 | void *gru_get_next_message(void *rmq) | |
532 | { | |
533 | struct message_queue *mq = rmq; | |
534 | struct message_header *mhdr = mq->next; | |
535 | int present = mhdr->present; | |
536 | ||
537 | /* skip NOOP messages */ | |
538 | STAT(mesq_receive); | |
539 | while (present == MQS_NOOP) { | |
540 | gru_free_message(rmq, mhdr); | |
541 | mhdr = mq->next; | |
542 | present = mhdr->present; | |
543 | } | |
544 | ||
545 | /* Wait for both halves of 2 line messages */ | |
546 | if (present == MQS_FULL && mhdr->lines == 2 && | |
547 | get_present2(mhdr) == MQS_EMPTY) | |
548 | present = MQS_EMPTY; | |
549 | ||
550 | if (!present) { | |
551 | STAT(mesq_receive_none); | |
552 | return NULL; | |
553 | } | |
554 | ||
555 | if (mhdr->lines == 2) | |
556 | restore_present2(mhdr, mhdr->present2); | |
557 | ||
558 | return mhdr; | |
559 | } | |
560 | EXPORT_SYMBOL_GPL(gru_get_next_message); | |
561 | ||
562 | /* ---------------------- GRU DATA COPY FUNCTIONS ---------------------------*/ | |
563 | ||
564 | /* | |
565 | * Copy a block of data using the GRU resources | |
566 | */ | |
567 | int gru_copy_gpa(unsigned long dest_gpa, unsigned long src_gpa, | |
568 | unsigned int bytes) | |
569 | { | |
570 | void *cb; | |
571 | void *dsr; | |
572 | int ret; | |
573 | ||
574 | STAT(copy_gpa); | |
575 | if (gru_get_cpu_resources(GRU_NUM_KERNEL_DSR_BYTES, &cb, &dsr)) | |
576 | return MQE_BUG_NO_RESOURCES; | |
577 | gru_bcopy(cb, src_gpa, dest_gpa, gru_get_tri(dsr), | |
578 | XTYPE_B, bytes, GRU_NUM_KERNEL_DSR_BYTES, IMA); | |
579 | ret = gru_wait(cb); | |
580 | gru_free_cpu_resources(cb, dsr); | |
581 | return ret; | |
582 | } | |
583 | EXPORT_SYMBOL_GPL(gru_copy_gpa); | |
584 | ||
585 | /* ------------------- KERNEL QUICKTESTS RUN AT STARTUP ----------------*/ | |
586 | /* Temp - will delete after we gain confidence in the GRU */ | |
587 | static __cacheline_aligned unsigned long word0; | |
588 | static __cacheline_aligned unsigned long word1; | |
589 | ||
590 | static int quicktest(struct gru_state *gru) | |
591 | { | |
592 | void *cb; | |
593 | void *ds; | |
594 | unsigned long *p; | |
595 | ||
596 | cb = get_gseg_base_address_cb(gru->gs_gru_base_vaddr, KERNEL_CTXNUM, 0); | |
597 | ds = get_gseg_base_address_ds(gru->gs_gru_base_vaddr, KERNEL_CTXNUM, 0); | |
598 | p = ds; | |
599 | word0 = MAGIC; | |
600 | ||
601 | gru_vload(cb, uv_gpa(&word0), 0, XTYPE_DW, 1, 1, IMA); | |
602 | if (gru_wait(cb) != CBS_IDLE) | |
603 | BUG(); | |
604 | ||
605 | if (*(unsigned long *)ds != MAGIC) | |
606 | BUG(); | |
607 | gru_vstore(cb, uv_gpa(&word1), 0, XTYPE_DW, 1, 1, IMA); | |
608 | if (gru_wait(cb) != CBS_IDLE) | |
609 | BUG(); | |
610 | ||
611 | if (word0 != word1 || word0 != MAGIC) { | |
612 | printk | |
613 | ("GRU quicktest err: gru %d, found 0x%lx, expected 0x%lx\n", | |
614 | gru->gs_gid, word1, MAGIC); | |
615 | BUG(); /* ZZZ should not be fatal */ | |
616 | } | |
617 | ||
618 | return 0; | |
619 | } | |
620 | ||
621 | ||
622 | int gru_kservices_init(struct gru_state *gru) | |
623 | { | |
624 | struct gru_blade_state *bs; | |
625 | struct gru_context_configuration_handle *cch; | |
626 | unsigned long cbr_map, dsr_map; | |
627 | int err, num, cpus_possible; | |
628 | ||
629 | /* | |
630 | * Currently, resources are reserved ONLY on the second chiplet | |
631 | * on each blade. This leaves ALL resources on chiplet 0 available | |
632 | * for user code. | |
633 | */ | |
634 | bs = gru->gs_blade; | |
635 | if (gru != &bs->bs_grus[1]) | |
636 | return 0; | |
637 | ||
638 | cpus_possible = uv_blade_nr_possible_cpus(gru->gs_blade_id); | |
639 | ||
640 | num = GRU_NUM_KERNEL_CBR * cpus_possible; | |
641 | cbr_map = reserve_gru_cb_resources(gru, GRU_CB_COUNT_TO_AU(num), NULL); | |
642 | gru->gs_reserved_cbrs += num; | |
643 | ||
644 | num = GRU_NUM_KERNEL_DSR_BYTES * cpus_possible; | |
645 | dsr_map = reserve_gru_ds_resources(gru, GRU_DS_BYTES_TO_AU(num), NULL); | |
646 | gru->gs_reserved_dsr_bytes += num; | |
647 | ||
648 | gru->gs_active_contexts++; | |
649 | __set_bit(KERNEL_CTXNUM, &gru->gs_context_map); | |
650 | cch = get_cch(gru->gs_gru_base_vaddr, KERNEL_CTXNUM); | |
651 | ||
652 | bs->kernel_cb = get_gseg_base_address_cb(gru->gs_gru_base_vaddr, | |
653 | KERNEL_CTXNUM, 0); | |
654 | bs->kernel_dsr = get_gseg_base_address_ds(gru->gs_gru_base_vaddr, | |
655 | KERNEL_CTXNUM, 0); | |
656 | ||
657 | lock_cch_handle(cch); | |
658 | cch->tfm_fault_bit_enable = 0; | |
659 | cch->tlb_int_enable = 0; | |
660 | cch->tfm_done_bit_enable = 0; | |
661 | cch->unmap_enable = 1; | |
662 | err = cch_allocate(cch, 0, cbr_map, dsr_map); | |
663 | if (err) { | |
664 | gru_dbg(grudev, | |
665 | "Unable to allocate kernel CCH: gru %d, err %d\n", | |
666 | gru->gs_gid, err); | |
667 | BUG(); | |
668 | } | |
669 | if (cch_start(cch)) { | |
670 | gru_dbg(grudev, "Unable to start kernel CCH: gru %d, err %d\n", | |
671 | gru->gs_gid, err); | |
672 | BUG(); | |
673 | } | |
674 | unlock_cch_handle(cch); | |
675 | ||
676 | if (options & GRU_QUICKLOOK) | |
677 | quicktest(gru); | |
678 | return 0; | |
679 | } |