gru: support for asynchronous gru instructions
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / misc / sgi-gru / grukservices.c
CommitLineData
28bffaf0
JS
1/*
2 * SN Platform GRU Driver
3 *
4 * KERNEL SERVICES THAT USE THE GRU
5 *
6 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23#include <linux/kernel.h>
24#include <linux/errno.h>
25#include <linux/slab.h>
26#include <linux/mm.h>
27#include <linux/smp_lock.h>
28#include <linux/spinlock.h>
29#include <linux/device.h>
30#include <linux/miscdevice.h>
31#include <linux/proc_fs.h>
32#include <linux/interrupt.h>
33#include <linux/uaccess.h>
836ce679 34#include <linux/delay.h>
28bffaf0
JS
35#include "gru.h"
36#include "grulib.h"
37#include "grutables.h"
38#include "grukservices.h"
39#include "gru_instructions.h"
40#include <asm/uv/uv_hub.h>
41
42/*
43 * Kernel GRU Usage
44 *
45 * The following is an interim algorithm for management of kernel GRU
46 * resources. This will likely be replaced when we better understand the
47 * kernel/user requirements.
48 *
836ce679
JS
49 * Blade percpu resources reserved for kernel use. These resources are
50 * reserved whenever the the kernel context for the blade is loaded. Note
51 * that the kernel context is not guaranteed to be always available. It is
52 * loaded on demand & can be stolen by a user if the user demand exceeds the
53 * kernel demand. The kernel can always reload the kernel context but
54 * a SLEEP may be required!!!.
9120dec4
JS
55 *
56 * Async Overview:
57 *
58 * Each blade has one "kernel context" that owns GRU kernel resources
59 * located on the blade. Kernel drivers use GRU resources in this context
60 * for sending messages, zeroing memory, etc.
61 *
62 * The kernel context is dynamically loaded on demand. If it is not in
63 * use by the kernel, the kernel context can be unloaded & given to a user.
64 * The kernel context will be reloaded when needed. This may require that
65 * a context be stolen from a user.
66 * NOTE: frequent unloading/reloading of the kernel context is
67 * expensive. We are depending on batch schedulers, cpusets, sane
68 * drivers or some other mechanism to prevent the need for frequent
69 * stealing/reloading.
70 *
71 * The kernel context consists of two parts:
72 * - 1 CB & a few DSRs that are reserved for each cpu on the blade.
73 * Each cpu has it's own private resources & does not share them
74 * with other cpus. These resources are used serially, ie,
75 * locked, used & unlocked on each call to a function in
76 * grukservices.
77 * (Now that we have dynamic loading of kernel contexts, I
78 * may rethink this & allow sharing between cpus....)
79 *
80 * - Additional resources can be reserved long term & used directly
81 * by UV drivers located in the kernel. Drivers using these GRU
82 * resources can use asynchronous GRU instructions that send
83 * interrupts on completion.
84 * - these resources must be explicitly locked/unlocked
85 * - locked resources prevent (obviously) the kernel
86 * context from being unloaded.
87 * - drivers using these resource directly issue their own
88 * GRU instruction and must wait/check completion.
89 *
90 * When these resources are reserved, the caller can optionally
91 * associate a wait_queue with the resources and use asynchronous
92 * GRU instructions. When an async GRU instruction completes, the
93 * driver will do a wakeup on the event.
94 *
28bffaf0 95 */
9120dec4
JS
96
97
98#define ASYNC_HAN_TO_BID(h) ((h) - 1)
99#define ASYNC_BID_TO_HAN(b) ((b) + 1)
100#define ASYNC_HAN_TO_BS(h) gru_base[ASYNC_HAN_TO_BID(h)]
101
6f2584f4 102#define GRU_NUM_KERNEL_CBR 1
28bffaf0 103#define GRU_NUM_KERNEL_DSR_BYTES 256
6f2584f4
JS
104#define GRU_NUM_KERNEL_DSR_CL (GRU_NUM_KERNEL_DSR_BYTES / \
105 GRU_CACHE_LINE_BYTES)
28bffaf0
JS
106
107/* GRU instruction attributes for all instructions */
108#define IMA IMA_CB_DELAY
109
110/* GRU cacheline size is always 64 bytes - even on arches with 128 byte lines */
111#define __gru_cacheline_aligned__ \
112 __attribute__((__aligned__(GRU_CACHE_LINE_BYTES)))
113
114#define MAGIC 0x1234567887654321UL
115
116/* Default retry count for GRU errors on kernel instructions */
117#define EXCEPTION_RETRY_LIMIT 3
118
119/* Status of message queue sections */
120#define MQS_EMPTY 0
121#define MQS_FULL 1
122#define MQS_NOOP 2
123
124/*----------------- RESOURCE MANAGEMENT -------------------------------------*/
125/* optimized for x86_64 */
126struct message_queue {
127 union gru_mesqhead head __gru_cacheline_aligned__; /* CL 0 */
128 int qlines; /* DW 1 */
129 long hstatus[2];
130 void *next __gru_cacheline_aligned__;/* CL 1 */
131 void *limit;
132 void *start;
133 void *start2;
134 char data ____cacheline_aligned; /* CL 2 */
135};
136
137/* First word in every message - used by mesq interface */
138struct message_header {
139 char present;
140 char present2;
141 char lines;
142 char fill;
143};
144
28bffaf0
JS
145#define HSTATUS(mq, h) ((mq) + offsetof(struct message_queue, hstatus[h]))
146
836ce679
JS
147/*
148 * Reload the blade's kernel context into a GRU chiplet. Called holding
149 * the bs_kgts_sema for READ. Will steal user contexts if necessary.
150 */
151static void gru_load_kernel_context(struct gru_blade_state *bs, int blade_id)
152{
153 struct gru_state *gru;
154 struct gru_thread_state *kgts;
155 void *vaddr;
9120dec4 156 int ctxnum, ncpus;
836ce679
JS
157
158 up_read(&bs->bs_kgts_sema);
159 down_write(&bs->bs_kgts_sema);
160
161 if (!bs->bs_kgts)
9120dec4 162 bs->bs_kgts = gru_alloc_gts(NULL, 0, 0, 0, 0);
836ce679
JS
163 kgts = bs->bs_kgts;
164
165 if (!kgts->ts_gru) {
166 STAT(load_kernel_context);
9120dec4
JS
167 ncpus = uv_blade_nr_possible_cpus(blade_id);
168 kgts->ts_cbr_au_count = GRU_CB_COUNT_TO_AU(
169 GRU_NUM_KERNEL_CBR * ncpus + bs->bs_async_cbrs);
170 kgts->ts_dsr_au_count = GRU_DS_BYTES_TO_AU(
171 GRU_NUM_KERNEL_DSR_BYTES * ncpus +
172 bs->bs_async_dsr_bytes);
836ce679
JS
173 while (!gru_assign_gru_context(kgts, blade_id)) {
174 msleep(1);
175 gru_steal_context(kgts, blade_id);
176 }
177 gru_load_context(kgts);
178 gru = bs->bs_kgts->ts_gru;
179 vaddr = gru->gs_gru_base_vaddr;
180 ctxnum = kgts->ts_ctxnum;
181 bs->kernel_cb = get_gseg_base_address_cb(vaddr, ctxnum, 0);
182 bs->kernel_dsr = get_gseg_base_address_ds(vaddr, ctxnum, 0);
183 }
184 downgrade_write(&bs->bs_kgts_sema);
185}
186
187/*
188 * Lock & load the kernel context for the specified blade.
189 */
190static struct gru_blade_state *gru_lock_kernel_context(int blade_id)
191{
192 struct gru_blade_state *bs;
193
194 STAT(lock_kernel_context);
195 bs = gru_base[blade_id];
196
197 down_read(&bs->bs_kgts_sema);
198 if (!bs->bs_kgts || !bs->bs_kgts->ts_gru)
199 gru_load_kernel_context(bs, blade_id);
200 return bs;
201
202}
203
204/*
205 * Unlock the kernel context for the specified blade. Context is not
206 * unloaded but may be stolen before next use.
207 */
208static void gru_unlock_kernel_context(int blade_id)
209{
210 struct gru_blade_state *bs;
211
212 bs = gru_base[blade_id];
213 up_read(&bs->bs_kgts_sema);
214 STAT(unlock_kernel_context);
215}
216
217/*
218 * Reserve & get pointers to the DSR/CBRs reserved for the current cpu.
219 * - returns with preemption disabled
220 */
28bffaf0
JS
221static int gru_get_cpu_resources(int dsr_bytes, void **cb, void **dsr)
222{
223 struct gru_blade_state *bs;
224 int lcpu;
225
226 BUG_ON(dsr_bytes > GRU_NUM_KERNEL_DSR_BYTES);
227 preempt_disable();
836ce679 228 bs = gru_lock_kernel_context(uv_numa_blade_id());
28bffaf0
JS
229 lcpu = uv_blade_processor_id();
230 *cb = bs->kernel_cb + lcpu * GRU_HANDLE_STRIDE;
231 *dsr = bs->kernel_dsr + lcpu * GRU_NUM_KERNEL_DSR_BYTES;
232 return 0;
233}
234
836ce679
JS
235/*
236 * Free the current cpus reserved DSR/CBR resources.
237 */
28bffaf0
JS
238static void gru_free_cpu_resources(void *cb, void *dsr)
239{
836ce679 240 gru_unlock_kernel_context(uv_numa_blade_id());
28bffaf0
JS
241 preempt_enable();
242}
243
9120dec4
JS
244/*
245 * Reserve GRU resources to be used asynchronously.
246 * Note: currently supports only 1 reservation per blade.
247 *
248 * input:
249 * blade_id - blade on which resources should be reserved
250 * cbrs - number of CBRs
251 * dsr_bytes - number of DSR bytes needed
252 * output:
253 * handle to identify resource
254 * (0 = async resources already reserved)
255 */
256unsigned long gru_reserve_async_resources(int blade_id, int cbrs, int dsr_bytes,
257 struct completion *cmp)
258{
259 struct gru_blade_state *bs;
260 struct gru_thread_state *kgts;
261 int ret = 0;
262
263 bs = gru_base[blade_id];
264
265 down_write(&bs->bs_kgts_sema);
266
267 /* Verify no resources already reserved */
268 if (bs->bs_async_dsr_bytes + bs->bs_async_cbrs)
269 goto done;
270 bs->bs_async_dsr_bytes = dsr_bytes;
271 bs->bs_async_cbrs = cbrs;
272 bs->bs_async_wq = cmp;
273 kgts = bs->bs_kgts;
274
275 /* Resources changed. Unload context if already loaded */
276 if (kgts && kgts->ts_gru)
277 gru_unload_context(kgts, 0);
278 ret = ASYNC_BID_TO_HAN(blade_id);
279
280done:
281 up_write(&bs->bs_kgts_sema);
282 return ret;
283}
284
285/*
286 * Release async resources previously reserved.
287 *
288 * input:
289 * han - handle to identify resources
290 */
291void gru_release_async_resources(unsigned long han)
292{
293 struct gru_blade_state *bs = ASYNC_HAN_TO_BS(han);
294
295 down_write(&bs->bs_kgts_sema);
296 bs->bs_async_dsr_bytes = 0;
297 bs->bs_async_cbrs = 0;
298 bs->bs_async_wq = NULL;
299 up_write(&bs->bs_kgts_sema);
300}
301
302/*
303 * Wait for async GRU instructions to complete.
304 *
305 * input:
306 * han - handle to identify resources
307 */
308void gru_wait_async_cbr(unsigned long han)
309{
310 struct gru_blade_state *bs = ASYNC_HAN_TO_BS(han);
311
312 wait_for_completion(bs->bs_async_wq);
313 mb();
314}
315
316/*
317 * Lock previous reserved async GRU resources
318 *
319 * input:
320 * han - handle to identify resources
321 * output:
322 * cb - pointer to first CBR
323 * dsr - pointer to first DSR
324 */
325void gru_lock_async_resource(unsigned long han, void **cb, void **dsr)
326{
327 struct gru_blade_state *bs = ASYNC_HAN_TO_BS(han);
328 int blade_id = ASYNC_HAN_TO_BID(han);
329 int ncpus;
330
331 gru_lock_kernel_context(blade_id);
332 ncpus = uv_blade_nr_possible_cpus(blade_id);
333 if (cb)
334 *cb = bs->kernel_cb + ncpus * GRU_HANDLE_STRIDE;
335 if (dsr)
336 *dsr = bs->kernel_dsr + ncpus * GRU_NUM_KERNEL_DSR_BYTES;
337}
338
339/*
340 * Unlock previous reserved async GRU resources
341 *
342 * input:
343 * han - handle to identify resources
344 */
345void gru_unlock_async_resource(unsigned long han)
346{
347 int blade_id = ASYNC_HAN_TO_BID(han);
348
349 gru_unlock_kernel_context(blade_id);
350}
351
836ce679 352/*----------------------------------------------------------------------*/
28bffaf0
JS
353int gru_get_cb_exception_detail(void *cb,
354 struct control_block_extended_exc_detail *excdet)
355{
356 struct gru_control_block_extended *cbe;
357
358 cbe = get_cbe(GRUBASE(cb), get_cb_number(cb));
fe5bb6b0 359 prefetchw(cbe); /* Harmless on hardware, required for emulator */
28bffaf0
JS
360 excdet->opc = cbe->opccpy;
361 excdet->exopc = cbe->exopccpy;
362 excdet->ecause = cbe->ecause;
363 excdet->exceptdet0 = cbe->idef1upd;
364 excdet->exceptdet1 = cbe->idef3upd;
365 return 0;
366}
367
368char *gru_get_cb_exception_detail_str(int ret, void *cb,
369 char *buf, int size)
370{
371 struct gru_control_block_status *gen = (void *)cb;
372 struct control_block_extended_exc_detail excdet;
373
374 if (ret > 0 && gen->istatus == CBS_EXCEPTION) {
375 gru_get_cb_exception_detail(cb, &excdet);
376 snprintf(buf, size,
377 "GRU exception: cb %p, opc %d, exopc %d, ecause 0x%x,"
378 "excdet0 0x%lx, excdet1 0x%x",
379 gen, excdet.opc, excdet.exopc, excdet.ecause,
380 excdet.exceptdet0, excdet.exceptdet1);
381 } else {
382 snprintf(buf, size, "No exception");
383 }
384 return buf;
385}
386
387static int gru_wait_idle_or_exception(struct gru_control_block_status *gen)
388{
389 while (gen->istatus >= CBS_ACTIVE) {
390 cpu_relax();
391 barrier();
392 }
393 return gen->istatus;
394}
395
396static int gru_retry_exception(void *cb)
397{
398 struct gru_control_block_status *gen = (void *)cb;
399 struct control_block_extended_exc_detail excdet;
400 int retry = EXCEPTION_RETRY_LIMIT;
401
402 while (1) {
403 if (gru_get_cb_message_queue_substatus(cb))
404 break;
405 if (gru_wait_idle_or_exception(gen) == CBS_IDLE)
406 return CBS_IDLE;
407
408 gru_get_cb_exception_detail(cb, &excdet);
409 if (excdet.ecause & ~EXCEPTION_RETRY_BITS)
410 break;
411 if (retry-- == 0)
412 break;
413 gen->icmd = 1;
414 gru_flush_cache(gen);
415 }
416 return CBS_EXCEPTION;
417}
418
419int gru_check_status_proc(void *cb)
420{
421 struct gru_control_block_status *gen = (void *)cb;
422 int ret;
423
424 ret = gen->istatus;
425 if (ret != CBS_EXCEPTION)
426 return ret;
427 return gru_retry_exception(cb);
428
429}
430
431int gru_wait_proc(void *cb)
432{
433 struct gru_control_block_status *gen = (void *)cb;
434 int ret;
435
436 ret = gru_wait_idle_or_exception(gen);
437 if (ret == CBS_EXCEPTION)
438 ret = gru_retry_exception(cb);
439
440 return ret;
441}
442
443void gru_abort(int ret, void *cb, char *str)
444{
445 char buf[GRU_EXC_STR_SIZE];
446
447 panic("GRU FATAL ERROR: %s - %s\n", str,
448 gru_get_cb_exception_detail_str(ret, cb, buf, sizeof(buf)));
449}
450
451void gru_wait_abort_proc(void *cb)
452{
453 int ret;
454
455 ret = gru_wait_proc(cb);
456 if (ret)
457 gru_abort(ret, cb, "gru_wait_abort");
458}
459
460
461/*------------------------------ MESSAGE QUEUES -----------------------------*/
462
463/* Internal status . These are NOT returned to the user. */
464#define MQIE_AGAIN -1 /* try again */
465
466
467/*
468 * Save/restore the "present" flag that is in the second line of 2-line
469 * messages
470 */
471static inline int get_present2(void *p)
472{
473 struct message_header *mhdr = p + GRU_CACHE_LINE_BYTES;
474 return mhdr->present;
475}
476
477static inline void restore_present2(void *p, int val)
478{
479 struct message_header *mhdr = p + GRU_CACHE_LINE_BYTES;
480 mhdr->present = val;
481}
482
483/*
484 * Create a message queue.
485 * qlines - message queue size in cache lines. Includes 2-line header.
486 */
6f2584f4
JS
487int gru_create_message_queue(struct gru_message_queue_desc *mqd,
488 void *p, unsigned int bytes, int nasid, int vector, int apicid)
28bffaf0
JS
489{
490 struct message_queue *mq = p;
491 unsigned int qlines;
492
493 qlines = bytes / GRU_CACHE_LINE_BYTES - 2;
494 memset(mq, 0, bytes);
495 mq->start = &mq->data;
496 mq->start2 = &mq->data + (qlines / 2 - 1) * GRU_CACHE_LINE_BYTES;
497 mq->next = &mq->data;
498 mq->limit = &mq->data + (qlines - 2) * GRU_CACHE_LINE_BYTES;
499 mq->qlines = qlines;
500 mq->hstatus[0] = 0;
501 mq->hstatus[1] = 1;
502 mq->head = gru_mesq_head(2, qlines / 2 + 1);
6f2584f4
JS
503 mqd->mq = mq;
504 mqd->mq_gpa = uv_gpa(mq);
505 mqd->qlines = qlines;
506 mqd->interrupt_pnode = UV_NASID_TO_PNODE(nasid);
507 mqd->interrupt_vector = vector;
508 mqd->interrupt_apicid = apicid;
28bffaf0
JS
509 return 0;
510}
511EXPORT_SYMBOL_GPL(gru_create_message_queue);
512
513/*
514 * Send a NOOP message to a message queue
515 * Returns:
516 * 0 - if queue is full after the send. This is the normal case
517 * but various races can change this.
518 * -1 - if mesq sent successfully but queue not full
519 * >0 - unexpected error. MQE_xxx returned
520 */
6f2584f4
JS
521static int send_noop_message(void *cb, struct gru_message_queue_desc *mqd,
522 void *mesg)
28bffaf0
JS
523{
524 const struct message_header noop_header = {
525 .present = MQS_NOOP, .lines = 1};
526 unsigned long m;
527 int substatus, ret;
528 struct message_header save_mhdr, *mhdr = mesg;
529
530 STAT(mesq_noop);
531 save_mhdr = *mhdr;
532 *mhdr = noop_header;
6f2584f4 533 gru_mesq(cb, mqd->mq_gpa, gru_get_tri(mhdr), 1, IMA);
28bffaf0
JS
534 ret = gru_wait(cb);
535
536 if (ret) {
537 substatus = gru_get_cb_message_queue_substatus(cb);
538 switch (substatus) {
539 case CBSS_NO_ERROR:
540 STAT(mesq_noop_unexpected_error);
541 ret = MQE_UNEXPECTED_CB_ERR;
542 break;
543 case CBSS_LB_OVERFLOWED:
544 STAT(mesq_noop_lb_overflow);
545 ret = MQE_CONGESTION;
546 break;
547 case CBSS_QLIMIT_REACHED:
548 STAT(mesq_noop_qlimit_reached);
549 ret = 0;
550 break;
551 case CBSS_AMO_NACKED:
552 STAT(mesq_noop_amo_nacked);
553 ret = MQE_CONGESTION;
554 break;
555 case CBSS_PUT_NACKED:
556 STAT(mesq_noop_put_nacked);
6f2584f4 557 m = mqd->mq_gpa + (gru_get_amo_value_head(cb) << 6);
28bffaf0
JS
558 gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, 1, 1,
559 IMA);
560 if (gru_wait(cb) == CBS_IDLE)
561 ret = MQIE_AGAIN;
562 else
563 ret = MQE_UNEXPECTED_CB_ERR;
564 break;
565 case CBSS_PAGE_OVERFLOW:
566 default:
567 BUG();
568 }
569 }
570 *mhdr = save_mhdr;
571 return ret;
572}
573
574/*
575 * Handle a gru_mesq full.
576 */
6f2584f4
JS
577static int send_message_queue_full(void *cb, struct gru_message_queue_desc *mqd,
578 void *mesg, int lines)
28bffaf0
JS
579{
580 union gru_mesqhead mqh;
581 unsigned int limit, head;
582 unsigned long avalue;
6f2584f4 583 int half, qlines;
28bffaf0
JS
584
585 /* Determine if switching to first/second half of q */
586 avalue = gru_get_amo_value(cb);
587 head = gru_get_amo_value_head(cb);
588 limit = gru_get_amo_value_limit(cb);
589
6f2584f4 590 qlines = mqd->qlines;
28bffaf0
JS
591 half = (limit != qlines);
592
593 if (half)
594 mqh = gru_mesq_head(qlines / 2 + 1, qlines);
595 else
596 mqh = gru_mesq_head(2, qlines / 2 + 1);
597
598 /* Try to get lock for switching head pointer */
6f2584f4 599 gru_gamir(cb, EOP_IR_CLR, HSTATUS(mqd->mq_gpa, half), XTYPE_DW, IMA);
28bffaf0
JS
600 if (gru_wait(cb) != CBS_IDLE)
601 goto cberr;
602 if (!gru_get_amo_value(cb)) {
603 STAT(mesq_qf_locked);
604 return MQE_QUEUE_FULL;
605 }
606
607 /* Got the lock. Send optional NOP if queue not full, */
608 if (head != limit) {
6f2584f4
JS
609 if (send_noop_message(cb, mqd, mesg)) {
610 gru_gamir(cb, EOP_IR_INC, HSTATUS(mqd->mq_gpa, half),
28bffaf0
JS
611 XTYPE_DW, IMA);
612 if (gru_wait(cb) != CBS_IDLE)
613 goto cberr;
614 STAT(mesq_qf_noop_not_full);
615 return MQIE_AGAIN;
616 }
617 avalue++;
618 }
619
620 /* Then flip queuehead to other half of queue. */
6f2584f4
JS
621 gru_gamer(cb, EOP_ERR_CSWAP, mqd->mq_gpa, XTYPE_DW, mqh.val, avalue,
622 IMA);
28bffaf0
JS
623 if (gru_wait(cb) != CBS_IDLE)
624 goto cberr;
625
626 /* If not successfully in swapping queue head, clear the hstatus lock */
627 if (gru_get_amo_value(cb) != avalue) {
628 STAT(mesq_qf_switch_head_failed);
6f2584f4
JS
629 gru_gamir(cb, EOP_IR_INC, HSTATUS(mqd->mq_gpa, half), XTYPE_DW,
630 IMA);
28bffaf0
JS
631 if (gru_wait(cb) != CBS_IDLE)
632 goto cberr;
633 }
634 return MQIE_AGAIN;
635cberr:
636 STAT(mesq_qf_unexpected_error);
637 return MQE_UNEXPECTED_CB_ERR;
638}
639
6f2584f4
JS
640/*
641 * Send a cross-partition interrupt to the SSI that contains the target
642 * message queue. Normally, the interrupt is automatically delivered by hardware
643 * but some error conditions require explicit delivery.
644 */
645static void send_message_queue_interrupt(struct gru_message_queue_desc *mqd)
646{
647 if (mqd->interrupt_vector)
648 uv_hub_send_ipi(mqd->interrupt_pnode, mqd->interrupt_apicid,
649 mqd->interrupt_vector);
650}
651
17b49a67
JS
652/*
653 * Handle a PUT failure. Note: if message was a 2-line message, one of the
654 * lines might have successfully have been written. Before sending the
655 * message, "present" must be cleared in BOTH lines to prevent the receiver
656 * from prematurely seeing the full message.
657 */
658static int send_message_put_nacked(void *cb, struct gru_message_queue_desc *mqd,
659 void *mesg, int lines)
660{
661 unsigned long m;
662
663 m = mqd->mq_gpa + (gru_get_amo_value_head(cb) << 6);
664 if (lines == 2) {
665 gru_vset(cb, m, 0, XTYPE_CL, lines, 1, IMA);
666 if (gru_wait(cb) != CBS_IDLE)
667 return MQE_UNEXPECTED_CB_ERR;
668 }
669 gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, lines, 1, IMA);
670 if (gru_wait(cb) != CBS_IDLE)
671 return MQE_UNEXPECTED_CB_ERR;
672 send_message_queue_interrupt(mqd);
673 return MQE_OK;
674}
28bffaf0
JS
675
676/*
677 * Handle a gru_mesq failure. Some of these failures are software recoverable
678 * or retryable.
679 */
6f2584f4
JS
680static int send_message_failure(void *cb, struct gru_message_queue_desc *mqd,
681 void *mesg, int lines)
28bffaf0
JS
682{
683 int substatus, ret = 0;
28bffaf0
JS
684
685 substatus = gru_get_cb_message_queue_substatus(cb);
686 switch (substatus) {
687 case CBSS_NO_ERROR:
688 STAT(mesq_send_unexpected_error);
689 ret = MQE_UNEXPECTED_CB_ERR;
690 break;
691 case CBSS_LB_OVERFLOWED:
692 STAT(mesq_send_lb_overflow);
693 ret = MQE_CONGESTION;
694 break;
695 case CBSS_QLIMIT_REACHED:
696 STAT(mesq_send_qlimit_reached);
6f2584f4 697 ret = send_message_queue_full(cb, mqd, mesg, lines);
28bffaf0
JS
698 break;
699 case CBSS_AMO_NACKED:
700 STAT(mesq_send_amo_nacked);
701 ret = MQE_CONGESTION;
702 break;
703 case CBSS_PUT_NACKED:
704 STAT(mesq_send_put_nacked);
17b49a67 705 ret = send_message_put_nacked(cb, mqd, mesg, lines);
28bffaf0
JS
706 break;
707 default:
708 BUG();
709 }
710 return ret;
711}
712
713/*
714 * Send a message to a message queue
6f2584f4 715 * mqd message queue descriptor
28bffaf0
JS
716 * mesg message. ust be vaddr within a GSEG
717 * bytes message size (<= 2 CL)
718 */
6f2584f4
JS
719int gru_send_message_gpa(struct gru_message_queue_desc *mqd, void *mesg,
720 unsigned int bytes)
28bffaf0
JS
721{
722 struct message_header *mhdr;
723 void *cb;
724 void *dsr;
725 int istatus, clines, ret;
726
727 STAT(mesq_send);
728 BUG_ON(bytes < sizeof(int) || bytes > 2 * GRU_CACHE_LINE_BYTES);
729
cbf330b9 730 clines = DIV_ROUND_UP(bytes, GRU_CACHE_LINE_BYTES);
28bffaf0
JS
731 if (gru_get_cpu_resources(bytes, &cb, &dsr))
732 return MQE_BUG_NO_RESOURCES;
733 memcpy(dsr, mesg, bytes);
734 mhdr = dsr;
735 mhdr->present = MQS_FULL;
736 mhdr->lines = clines;
737 if (clines == 2) {
738 mhdr->present2 = get_present2(mhdr);
739 restore_present2(mhdr, MQS_FULL);
740 }
741
742 do {
743 ret = MQE_OK;
6f2584f4 744 gru_mesq(cb, mqd->mq_gpa, gru_get_tri(mhdr), clines, IMA);
28bffaf0
JS
745 istatus = gru_wait(cb);
746 if (istatus != CBS_IDLE)
6f2584f4 747 ret = send_message_failure(cb, mqd, dsr, clines);
28bffaf0
JS
748 } while (ret == MQIE_AGAIN);
749 gru_free_cpu_resources(cb, dsr);
750
751 if (ret)
752 STAT(mesq_send_failed);
753 return ret;
754}
755EXPORT_SYMBOL_GPL(gru_send_message_gpa);
756
757/*
758 * Advance the receive pointer for the queue to the next message.
759 */
6f2584f4 760void gru_free_message(struct gru_message_queue_desc *mqd, void *mesg)
28bffaf0 761{
6f2584f4 762 struct message_queue *mq = mqd->mq;
28bffaf0
JS
763 struct message_header *mhdr = mq->next;
764 void *next, *pnext;
765 int half = -1;
766 int lines = mhdr->lines;
767
768 if (lines == 2)
769 restore_present2(mhdr, MQS_EMPTY);
770 mhdr->present = MQS_EMPTY;
771
772 pnext = mq->next;
773 next = pnext + GRU_CACHE_LINE_BYTES * lines;
774 if (next == mq->limit) {
775 next = mq->start;
776 half = 1;
777 } else if (pnext < mq->start2 && next >= mq->start2) {
778 half = 0;
779 }
780
781 if (half >= 0)
782 mq->hstatus[half] = 1;
783 mq->next = next;
784}
785EXPORT_SYMBOL_GPL(gru_free_message);
786
787/*
788 * Get next message from message queue. Return NULL if no message
789 * present. User must call next_message() to move to next message.
790 * rmq message queue
791 */
6f2584f4 792void *gru_get_next_message(struct gru_message_queue_desc *mqd)
28bffaf0 793{
6f2584f4 794 struct message_queue *mq = mqd->mq;
28bffaf0
JS
795 struct message_header *mhdr = mq->next;
796 int present = mhdr->present;
797
798 /* skip NOOP messages */
799 STAT(mesq_receive);
800 while (present == MQS_NOOP) {
6f2584f4 801 gru_free_message(mqd, mhdr);
28bffaf0
JS
802 mhdr = mq->next;
803 present = mhdr->present;
804 }
805
806 /* Wait for both halves of 2 line messages */
807 if (present == MQS_FULL && mhdr->lines == 2 &&
808 get_present2(mhdr) == MQS_EMPTY)
809 present = MQS_EMPTY;
810
811 if (!present) {
812 STAT(mesq_receive_none);
813 return NULL;
814 }
815
816 if (mhdr->lines == 2)
817 restore_present2(mhdr, mhdr->present2);
818
819 return mhdr;
820}
821EXPORT_SYMBOL_GPL(gru_get_next_message);
822
823/* ---------------------- GRU DATA COPY FUNCTIONS ---------------------------*/
824
825/*
826 * Copy a block of data using the GRU resources
827 */
828int gru_copy_gpa(unsigned long dest_gpa, unsigned long src_gpa,
829 unsigned int bytes)
830{
831 void *cb;
832 void *dsr;
833 int ret;
834
835 STAT(copy_gpa);
836 if (gru_get_cpu_resources(GRU_NUM_KERNEL_DSR_BYTES, &cb, &dsr))
837 return MQE_BUG_NO_RESOURCES;
838 gru_bcopy(cb, src_gpa, dest_gpa, gru_get_tri(dsr),
6f2584f4 839 XTYPE_B, bytes, GRU_NUM_KERNEL_DSR_CL, IMA);
28bffaf0
JS
840 ret = gru_wait(cb);
841 gru_free_cpu_resources(cb, dsr);
842 return ret;
843}
844EXPORT_SYMBOL_GPL(gru_copy_gpa);
845
846/* ------------------- KERNEL QUICKTESTS RUN AT STARTUP ----------------*/
847/* Temp - will delete after we gain confidence in the GRU */
28bffaf0 848
836ce679 849int quicktest(void)
28bffaf0 850{
836ce679
JS
851 unsigned long word0;
852 unsigned long word1;
28bffaf0 853 void *cb;
836ce679 854 void *dsr;
28bffaf0
JS
855 unsigned long *p;
856
836ce679
JS
857 if (gru_get_cpu_resources(GRU_CACHE_LINE_BYTES, &cb, &dsr))
858 return MQE_BUG_NO_RESOURCES;
859 p = dsr;
28bffaf0 860 word0 = MAGIC;
836ce679 861 word1 = 0;
28bffaf0 862
836ce679 863 gru_vload(cb, uv_gpa(&word0), gru_get_tri(dsr), XTYPE_DW, 1, 1, IMA);
28bffaf0
JS
864 if (gru_wait(cb) != CBS_IDLE)
865 BUG();
866
836ce679 867 if (*p != MAGIC)
28bffaf0 868 BUG();
836ce679 869 gru_vstore(cb, uv_gpa(&word1), gru_get_tri(dsr), XTYPE_DW, 1, 1, IMA);
28bffaf0
JS
870 if (gru_wait(cb) != CBS_IDLE)
871 BUG();
836ce679 872 gru_free_cpu_resources(cb, dsr);
28bffaf0 873
836ce679 874 if (word0 != word1 || word1 != MAGIC) {
28bffaf0 875 printk
836ce679
JS
876 ("GRU quicktest err: found 0x%lx, expected 0x%lx\n",
877 word1, MAGIC);
28bffaf0
JS
878 BUG(); /* ZZZ should not be fatal */
879 }
880
881 return 0;
882}
883
884
885int gru_kservices_init(struct gru_state *gru)
886{
887 struct gru_blade_state *bs;
836ce679 888
28bffaf0 889 bs = gru->gs_blade;
836ce679 890 if (gru != &bs->bs_grus[0])
28bffaf0
JS
891 return 0;
892
836ce679 893 init_rwsem(&bs->bs_kgts_sema);
28bffaf0 894
9ca8e40c 895 if (gru_options & GRU_QUICKLOOK)
836ce679 896 quicktest();
28bffaf0
JS
897 return 0;
898}
27ca8a7b
JS
899
900void gru_kservices_exit(struct gru_state *gru)
901{
27ca8a7b 902 struct gru_blade_state *bs;
836ce679 903 struct gru_thread_state *kgts;
27ca8a7b
JS
904
905 bs = gru->gs_blade;
836ce679 906 if (gru != &bs->bs_grus[0])
27ca8a7b
JS
907 return;
908
836ce679
JS
909 kgts = bs->bs_kgts;
910 if (kgts && kgts->ts_gru)
911 gru_unload_context(kgts, 0);
912 kfree(kgts);
27ca8a7b
JS
913}
914