2 * SN Platform GRU Driver
4 * FILE OPERATIONS & DRIVER INITIALIZATION
6 * This file supports the user system call for file open, close, mmap, etc.
7 * This also incudes the driver initialization code.
9 * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 #include <linux/module.h>
27 #include <linux/kernel.h>
28 #include <linux/errno.h>
29 #include <linux/slab.h>
32 #include <linux/smp_lock.h>
33 #include <linux/spinlock.h>
34 #include <linux/device.h>
35 #include <linux/miscdevice.h>
36 #include <linux/interrupt.h>
37 #include <linux/proc_fs.h>
38 #include <linux/uaccess.h>
39 #include <asm/uv/uv.h>
42 #include "grutables.h"
44 #include <asm/uv/uv_hub.h>
45 #include <asm/uv/uv_mmrs.h>
47 struct gru_blade_state
*gru_base
[GRU_MAX_BLADES
] __read_mostly
;
48 unsigned long gru_start_paddr __read_mostly
;
49 void *gru_start_vaddr __read_mostly
;
50 unsigned long gru_end_paddr __read_mostly
;
51 unsigned int gru_max_gids __read_mostly
;
52 struct gru_stats_s gru_stats
;
54 /* Guaranteed user available resources on each node */
55 static int max_user_cbrs
, max_user_dsr_bytes
;
57 static struct file_operations gru_fops
;
58 static struct miscdevice gru_miscdev
;
64 * Called when unmapping a device mapping. Frees all gru resources
65 * and tables belonging to the vma.
67 static void gru_vma_close(struct vm_area_struct
*vma
)
69 struct gru_vma_data
*vdata
;
70 struct gru_thread_state
*gts
;
71 struct list_head
*entry
, *next
;
73 if (!vma
->vm_private_data
)
76 vdata
= vma
->vm_private_data
;
77 vma
->vm_private_data
= NULL
;
78 gru_dbg(grudev
, "vma %p, file %p, vdata %p\n", vma
, vma
->vm_file
,
80 list_for_each_safe(entry
, next
, &vdata
->vd_head
) {
82 list_entry(entry
, struct gru_thread_state
, ts_next
);
83 list_del(>s
->ts_next
);
84 mutex_lock(>s
->ts_ctxlock
);
86 gru_unload_context(gts
, 0);
87 mutex_unlock(>s
->ts_ctxlock
);
97 * Called when mmaping the device. Initializes the vma with a fault handler
98 * and private data structure necessary to allocate, track, and free the
101 static int gru_file_mmap(struct file
*file
, struct vm_area_struct
*vma
)
103 if ((vma
->vm_flags
& (VM_SHARED
| VM_WRITE
)) != (VM_SHARED
| VM_WRITE
))
106 if (vma
->vm_start
& (GRU_GSEG_PAGESIZE
- 1) ||
107 vma
->vm_end
& (GRU_GSEG_PAGESIZE
- 1))
111 (VM_IO
| VM_DONTCOPY
| VM_LOCKED
| VM_DONTEXPAND
| VM_PFNMAP
|
113 vma
->vm_page_prot
= PAGE_SHARED
;
114 vma
->vm_ops
= &gru_vm_ops
;
116 vma
->vm_private_data
= gru_alloc_vma_data(vma
, 0);
117 if (!vma
->vm_private_data
)
120 gru_dbg(grudev
, "file %p, vaddr 0x%lx, vma %p, vdata %p\n",
121 file
, vma
->vm_start
, vma
, vma
->vm_private_data
);
126 * Create a new GRU context
128 static int gru_create_new_context(unsigned long arg
)
130 struct gru_create_context_req req
;
131 struct vm_area_struct
*vma
;
132 struct gru_vma_data
*vdata
;
136 if (copy_from_user(&req
, (void __user
*)arg
, sizeof(req
)))
139 if (req
.data_segment_bytes
> max_user_dsr_bytes
)
141 if (req
.control_blocks
> max_user_cbrs
|| !req
.maximum_thread_count
)
144 if (!(req
.options
& GRU_OPT_MISS_MASK
))
145 req
.options
|= GRU_OPT_MISS_FMM_INTR
;
147 down_write(¤t
->mm
->mmap_sem
);
148 vma
= gru_find_vma(req
.gseg
);
150 vdata
= vma
->vm_private_data
;
151 vdata
->vd_user_options
= req
.options
;
152 vdata
->vd_dsr_au_count
=
153 GRU_DS_BYTES_TO_AU(req
.data_segment_bytes
);
154 vdata
->vd_cbr_au_count
= GRU_CB_COUNT_TO_AU(req
.control_blocks
);
157 up_write(¤t
->mm
->mmap_sem
);
163 * Get GRU configuration info (temp - for emulator testing)
165 static long gru_get_config_info(unsigned long arg
)
167 struct gru_config_info info
;
170 if (num_online_nodes() > 1 &&
171 (uv_node_to_blade_id(1) == uv_node_to_blade_id(0)))
175 info
.cpus
= num_online_cpus();
176 info
.nodes
= num_online_nodes();
177 info
.blades
= info
.nodes
/ nodesperblade
;
178 info
.chiplets
= GRU_CHIPLETS_PER_BLADE
* info
.blades
;
180 if (copy_to_user((void __user
*)arg
, &info
, sizeof(info
)))
186 * Get GRU chiplet status
188 static long gru_get_chiplet_status(unsigned long arg
)
190 struct gru_state
*gru
;
191 struct gru_chiplet_info info
;
193 if (copy_from_user(&info
, (void __user
*)arg
, sizeof(info
)))
197 info
.node
= numa_node_id();
198 if (info
.node
>= num_possible_nodes() ||
199 info
.chiplet
>= GRU_CHIPLETS_PER_HUB
||
200 info
.node
< 0 || info
.chiplet
< 0)
203 info
.blade
= uv_node_to_blade_id(info
.node
);
204 gru
= get_gru(info
.blade
, info
.chiplet
);
206 info
.total_dsr_bytes
= GRU_NUM_DSR_BYTES
;
207 info
.total_cbr
= GRU_NUM_CB
;
208 info
.total_user_dsr_bytes
= GRU_NUM_DSR_BYTES
-
209 gru
->gs_reserved_dsr_bytes
;
210 info
.total_user_cbr
= GRU_NUM_CB
- gru
->gs_reserved_cbrs
;
211 info
.free_user_dsr_bytes
= hweight64(gru
->gs_dsr_map
) *
213 info
.free_user_cbr
= hweight64(gru
->gs_cbr_map
) * GRU_CBR_AU_SIZE
;
215 if (copy_to_user((void __user
*)arg
, &info
, sizeof(info
)))
221 * gru_file_unlocked_ioctl
223 * Called to update file attributes via IOCTL calls.
225 static long gru_file_unlocked_ioctl(struct file
*file
, unsigned int req
,
230 gru_dbg(grudev
, "file %p\n", file
);
233 case GRU_CREATE_CONTEXT
:
234 err
= gru_create_new_context(arg
);
236 case GRU_SET_TASK_SLICE
:
237 err
= gru_set_task_slice(arg
);
239 case GRU_USER_GET_EXCEPTION_DETAIL
:
240 err
= gru_get_exception_detail(arg
);
242 case GRU_USER_UNLOAD_CONTEXT
:
243 err
= gru_user_unload_context(arg
);
245 case GRU_GET_CHIPLET_STATUS
:
246 err
= gru_get_chiplet_status(arg
);
248 case GRU_USER_FLUSH_TLB
:
249 err
= gru_user_flush_tlb(arg
);
251 case GRU_USER_CALL_OS
:
252 err
= gru_handle_user_call_os(arg
);
255 err
= gru_ktest(arg
);
257 case GRU_GET_CONFIG_INFO
:
258 err
= gru_get_config_info(arg
);
260 case GRU_DUMP_CHIPLET_STATE
:
261 err
= gru_dump_chiplet_request(arg
);
268 * Called at init time to build tables for all GRUs that are present in the
271 static void gru_init_chiplet(struct gru_state
*gru
, unsigned long paddr
,
272 void *vaddr
, int nid
, int bid
, int grunum
)
274 spin_lock_init(&gru
->gs_lock
);
275 spin_lock_init(&gru
->gs_asid_lock
);
276 gru
->gs_gru_base_paddr
= paddr
;
277 gru
->gs_gru_base_vaddr
= vaddr
;
278 gru
->gs_gid
= bid
* GRU_CHIPLETS_PER_BLADE
+ grunum
;
279 gru
->gs_blade
= gru_base
[bid
];
280 gru
->gs_blade_id
= bid
;
281 gru
->gs_cbr_map
= (GRU_CBR_AU
== 64) ? ~0 : (1UL << GRU_CBR_AU
) - 1;
282 gru
->gs_dsr_map
= (1UL << GRU_DSR_AU
) - 1;
283 gru
->gs_asid_limit
= MAX_ASID
;
284 gru_tgh_flush_init(gru
);
285 if (gru
->gs_gid
>= gru_max_gids
)
286 gru_max_gids
= gru
->gs_gid
+ 1;
287 gru_dbg(grudev
, "bid %d, nid %d, gid %d, vaddr %p (0x%lx)\n",
288 bid
, nid
, gru
->gs_gid
, gru
->gs_gru_base_vaddr
,
289 gru
->gs_gru_base_paddr
);
292 static int gru_init_tables(unsigned long gru_base_paddr
, void *gru_base_vaddr
)
294 int pnode
, nid
, bid
, chip
;
295 int cbrs
, dsrbytes
, n
;
296 int order
= get_order(sizeof(struct gru_blade_state
));
298 struct gru_state
*gru
;
302 max_user_cbrs
= GRU_NUM_CB
;
303 max_user_dsr_bytes
= GRU_NUM_DSR_BYTES
;
304 for_each_online_node(nid
) {
305 bid
= uv_node_to_blade_id(nid
);
306 pnode
= uv_node_to_pnode(nid
);
307 if (bid
< 0 || gru_base
[bid
])
309 page
= alloc_pages_exact_node(nid
, GFP_KERNEL
, order
);
312 gru_base
[bid
] = page_address(page
);
313 memset(gru_base
[bid
], 0, sizeof(struct gru_blade_state
));
314 gru_base
[bid
]->bs_lru_gru
= &gru_base
[bid
]->bs_grus
[0];
315 spin_lock_init(&gru_base
[bid
]->bs_lock
);
316 init_rwsem(&gru_base
[bid
]->bs_kgts_sema
);
320 for (gru
= gru_base
[bid
]->bs_grus
, chip
= 0;
321 chip
< GRU_CHIPLETS_PER_BLADE
;
323 paddr
= gru_chiplet_paddr(gru_base_paddr
, pnode
, chip
);
324 vaddr
= gru_chiplet_vaddr(gru_base_vaddr
, pnode
, chip
);
325 gru_init_chiplet(gru
, paddr
, vaddr
, nid
, bid
, chip
);
326 n
= hweight64(gru
->gs_cbr_map
) * GRU_CBR_AU_SIZE
;
328 n
= hweight64(gru
->gs_dsr_map
) * GRU_DSR_AU_BYTES
;
329 dsrbytes
= max(dsrbytes
, n
);
331 max_user_cbrs
= min(max_user_cbrs
, cbrs
);
332 max_user_dsr_bytes
= min(max_user_dsr_bytes
, dsrbytes
);
338 for (nid
--; nid
>= 0; nid
--)
339 free_pages((unsigned long)gru_base
[nid
], order
);
345 static int get_base_irq(void)
350 #elif defined CONFIG_X86_64
352 static void noop(unsigned int irq
)
356 static struct irq_chip gru_chip
= {
363 static int get_base_irq(void)
365 set_irq_chip(IRQ_GRU
, &gru_chip
);
366 set_irq_chip(IRQ_GRU
+ 1, &gru_chip
);
374 * Called at boot or module load time to initialize the GRUs.
376 static int __init
gru_init(void)
384 #if defined CONFIG_IA64
385 gru_start_paddr
= 0xd000000000UL
; /* ZZZZZZZZZZZZZZZZZZZ fixme */
387 gru_start_paddr
= uv_read_local_mmr(UVH_RH_GAM_GRU_OVERLAY_CONFIG_MMR
) &
390 gru_start_vaddr
= __va(gru_start_paddr
);
391 gru_end_paddr
= gru_start_paddr
+ GRU_MAX_BLADES
* GRU_SIZE
;
392 printk(KERN_INFO
"GRU space: 0x%lx - 0x%lx\n",
393 gru_start_paddr
, gru_end_paddr
);
394 irq
= get_base_irq();
395 for (chip
= 0; chip
< GRU_CHIPLETS_PER_BLADE
; chip
++) {
396 ret
= request_irq(irq
+ chip
, gru_intr
, 0, id
, NULL
);
397 /* TODO: fix irq handling on x86. For now ignore failure because
398 * interrupts are not required & not yet fully supported */
401 "!!!WARNING: GRU ignoring request failure!!!\n");
405 printk(KERN_ERR
"%s: request_irq failed\n",
411 ret
= misc_register(&gru_miscdev
);
413 printk(KERN_ERR
"%s: misc_register failed\n",
418 ret
= gru_proc_init();
420 printk(KERN_ERR
"%s: proc init failed\n", GRU_DRIVER_ID_STR
);
424 ret
= gru_init_tables(gru_start_paddr
, gru_start_vaddr
);
426 printk(KERN_ERR
"%s: init tables failed\n", GRU_DRIVER_ID_STR
);
429 gru_kservices_init();
431 printk(KERN_INFO
"%s: v%s\n", GRU_DRIVER_ID_STR
,
432 GRU_DRIVER_VERSION_STR
);
438 misc_deregister(&gru_miscdev
);
440 for (--chip
; chip
>= 0; chip
--)
441 free_irq(irq
+ chip
, NULL
);
446 static void __exit
gru_exit(void)
449 int order
= get_order(sizeof(struct gru_state
) *
450 GRU_CHIPLETS_PER_BLADE
);
455 for (i
= 0; i
< GRU_CHIPLETS_PER_BLADE
; i
++)
456 free_irq(IRQ_GRU
+ i
, NULL
);
457 gru_kservices_exit();
458 for (bid
= 0; bid
< GRU_MAX_BLADES
; bid
++)
459 free_pages((unsigned long)gru_base
[bid
], order
);
461 misc_deregister(&gru_miscdev
);
465 static struct file_operations gru_fops
= {
466 .owner
= THIS_MODULE
,
467 .unlocked_ioctl
= gru_file_unlocked_ioctl
,
468 .mmap
= gru_file_mmap
,
471 static struct miscdevice gru_miscdev
= {
472 .minor
= MISC_DYNAMIC_MINOR
,
477 struct vm_operations_struct gru_vm_ops
= {
478 .close
= gru_vma_close
,
483 fs_initcall(gru_init
);
485 module_init(gru_init
);
487 module_exit(gru_exit
);
489 module_param(gru_options
, ulong
, 0644);
490 MODULE_PARM_DESC(gru_options
, "Various debug options");
492 MODULE_AUTHOR("Silicon Graphics, Inc.");
493 MODULE_LICENSE("GPL");
494 MODULE_DESCRIPTION(GRU_DRIVER_ID_STR GRU_DRIVER_VERSION_STR
);
495 MODULE_VERSION(GRU_DRIVER_VERSION_STR
);