Commit | Line | Data |
---|---|---|
7f510b46 BS |
1 | /* |
2 | * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. | |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the | |
8 | * OpenIB.org BSD license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or | |
11 | * without modification, are permitted provided that the following | |
12 | * conditions are met: | |
13 | * | |
14 | * - Redistributions of source code must retain the above | |
15 | * copyright notice, this list of conditions and the following | |
16 | * disclaimer. | |
17 | * | |
18 | * - Redistributions in binary form must reproduce the above | |
19 | * copyright notice, this list of conditions and the following | |
20 | * disclaimer in the documentation and/or other materials | |
21 | * provided with the distribution. | |
22 | * | |
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
30 | * SOFTWARE. | |
31 | */ | |
32 | ||
33 | #include <linux/pci.h> | |
34 | #include <linux/poll.h> | |
35 | #include <linux/cdev.h> | |
36 | #include <linux/swap.h> | |
37 | #include <linux/vmalloc.h> | |
38 | #include <asm/pgtable.h> | |
39 | ||
40 | #include "ipath_kernel.h" | |
41 | #include "ips_common.h" | |
42 | #include "ipath_layer.h" | |
43 | ||
44 | static int ipath_open(struct inode *, struct file *); | |
45 | static int ipath_close(struct inode *, struct file *); | |
46 | static ssize_t ipath_write(struct file *, const char __user *, size_t, | |
47 | loff_t *); | |
48 | static unsigned int ipath_poll(struct file *, struct poll_table_struct *); | |
49 | static int ipath_mmap(struct file *, struct vm_area_struct *); | |
50 | ||
51 | static struct file_operations ipath_file_ops = { | |
52 | .owner = THIS_MODULE, | |
53 | .write = ipath_write, | |
54 | .open = ipath_open, | |
55 | .release = ipath_close, | |
56 | .poll = ipath_poll, | |
57 | .mmap = ipath_mmap | |
58 | }; | |
59 | ||
60 | static int ipath_get_base_info(struct ipath_portdata *pd, | |
61 | void __user *ubase, size_t ubase_size) | |
62 | { | |
63 | int ret = 0; | |
64 | struct ipath_base_info *kinfo = NULL; | |
65 | struct ipath_devdata *dd = pd->port_dd; | |
66 | ||
67 | if (ubase_size < sizeof(*kinfo)) { | |
68 | ipath_cdbg(PROC, | |
69 | "Base size %lu, need %lu (version mismatch?)\n", | |
70 | (unsigned long) ubase_size, | |
71 | (unsigned long) sizeof(*kinfo)); | |
72 | ret = -EINVAL; | |
73 | goto bail; | |
74 | } | |
75 | ||
76 | kinfo = kzalloc(sizeof(*kinfo), GFP_KERNEL); | |
77 | if (kinfo == NULL) { | |
78 | ret = -ENOMEM; | |
79 | goto bail; | |
80 | } | |
81 | ||
82 | ret = dd->ipath_f_get_base_info(pd, kinfo); | |
83 | if (ret < 0) | |
84 | goto bail; | |
85 | ||
86 | kinfo->spi_rcvhdr_cnt = dd->ipath_rcvhdrcnt; | |
87 | kinfo->spi_rcvhdrent_size = dd->ipath_rcvhdrentsize; | |
88 | kinfo->spi_tidegrcnt = dd->ipath_rcvegrcnt; | |
89 | kinfo->spi_rcv_egrbufsize = dd->ipath_rcvegrbufsize; | |
90 | /* | |
91 | * have to mmap whole thing | |
92 | */ | |
93 | kinfo->spi_rcv_egrbuftotlen = | |
94 | pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size; | |
95 | kinfo->spi_rcv_egrperchunk = pd->port_rcvegrbufs_perchunk; | |
96 | kinfo->spi_rcv_egrchunksize = kinfo->spi_rcv_egrbuftotlen / | |
97 | pd->port_rcvegrbuf_chunks; | |
98 | kinfo->spi_tidcnt = dd->ipath_rcvtidcnt; | |
99 | /* | |
100 | * for this use, may be ipath_cfgports summed over all chips that | |
101 | * are are configured and present | |
102 | */ | |
103 | kinfo->spi_nports = dd->ipath_cfgports; | |
104 | /* unit (chip/board) our port is on */ | |
105 | kinfo->spi_unit = dd->ipath_unit; | |
106 | /* for now, only a single page */ | |
107 | kinfo->spi_tid_maxsize = PAGE_SIZE; | |
108 | ||
109 | /* | |
110 | * Doing this per port, and based on the skip value, etc. This has | |
111 | * to be the actual buffer size, since the protocol code treats it | |
112 | * as an array. | |
113 | * | |
114 | * These have to be set to user addresses in the user code via mmap. | |
115 | * These values are used on return to user code for the mmap target | |
116 | * addresses only. For 32 bit, same 44 bit address problem, so use | |
117 | * the physical address, not virtual. Before 2.6.11, using the | |
118 | * page_address() macro worked, but in 2.6.11, even that returns the | |
119 | * full 64 bit address (upper bits all 1's). So far, using the | |
120 | * physical addresses (or chip offsets, for chip mapping) works, but | |
121 | * no doubt some future kernel release will chang that, and we'll be | |
122 | * on to yet another method of dealing with this | |
123 | */ | |
124 | kinfo->spi_rcvhdr_base = (u64) pd->port_rcvhdrq_phys; | |
125 | kinfo->spi_rcv_egrbufs = (u64) pd->port_rcvegr_phys; | |
126 | kinfo->spi_pioavailaddr = (u64) dd->ipath_pioavailregs_phys; | |
127 | kinfo->spi_status = (u64) kinfo->spi_pioavailaddr + | |
128 | (void *) dd->ipath_statusp - | |
129 | (void *) dd->ipath_pioavailregs_dma; | |
130 | kinfo->spi_piobufbase = (u64) pd->port_piobufs; | |
131 | kinfo->__spi_uregbase = | |
132 | dd->ipath_uregbase + dd->ipath_palign * pd->port_port; | |
133 | ||
134 | kinfo->spi_pioindex = dd->ipath_pbufsport * (pd->port_port - 1); | |
135 | kinfo->spi_piocnt = dd->ipath_pbufsport; | |
136 | kinfo->spi_pioalign = dd->ipath_palign; | |
137 | ||
138 | kinfo->spi_qpair = IPATH_KD_QP; | |
139 | kinfo->spi_piosize = dd->ipath_ibmaxlen; | |
140 | kinfo->spi_mtu = dd->ipath_ibmaxlen; /* maxlen, not ibmtu */ | |
141 | kinfo->spi_port = pd->port_port; | |
142 | kinfo->spi_sw_version = IPATH_USER_SWVERSION; | |
143 | kinfo->spi_hw_version = dd->ipath_revision; | |
144 | ||
145 | if (copy_to_user(ubase, kinfo, sizeof(*kinfo))) | |
146 | ret = -EFAULT; | |
147 | ||
148 | bail: | |
149 | kfree(kinfo); | |
150 | return ret; | |
151 | } | |
152 | ||
153 | /** | |
154 | * ipath_tid_update - update a port TID | |
155 | * @pd: the port | |
156 | * @ti: the TID information | |
157 | * | |
158 | * The new implementation as of Oct 2004 is that the driver assigns | |
159 | * the tid and returns it to the caller. To make it easier to | |
160 | * catch bugs, and to reduce search time, we keep a cursor for | |
161 | * each port, walking the shadow tid array to find one that's not | |
162 | * in use. | |
163 | * | |
164 | * For now, if we can't allocate the full list, we fail, although | |
165 | * in the long run, we'll allocate as many as we can, and the | |
166 | * caller will deal with that by trying the remaining pages later. | |
167 | * That means that when we fail, we have to mark the tids as not in | |
168 | * use again, in our shadow copy. | |
169 | * | |
170 | * It's up to the caller to free the tids when they are done. | |
171 | * We'll unlock the pages as they free them. | |
172 | * | |
173 | * Also, right now we are locking one page at a time, but since | |
174 | * the intended use of this routine is for a single group of | |
175 | * virtually contiguous pages, that should change to improve | |
176 | * performance. | |
177 | */ | |
178 | static int ipath_tid_update(struct ipath_portdata *pd, | |
179 | const struct ipath_tid_info *ti) | |
180 | { | |
181 | int ret = 0, ntids; | |
182 | u32 tid, porttid, cnt, i, tidcnt; | |
183 | u16 *tidlist; | |
184 | struct ipath_devdata *dd = pd->port_dd; | |
185 | u64 physaddr; | |
186 | unsigned long vaddr; | |
187 | u64 __iomem *tidbase; | |
188 | unsigned long tidmap[8]; | |
189 | struct page **pagep = NULL; | |
190 | ||
191 | if (!dd->ipath_pageshadow) { | |
192 | ret = -ENOMEM; | |
193 | goto done; | |
194 | } | |
195 | ||
196 | cnt = ti->tidcnt; | |
197 | if (!cnt) { | |
198 | ipath_dbg("After copyin, tidcnt 0, tidlist %llx\n", | |
199 | (unsigned long long) ti->tidlist); | |
200 | /* | |
201 | * Should we treat as success? likely a bug | |
202 | */ | |
203 | ret = -EFAULT; | |
204 | goto done; | |
205 | } | |
206 | tidcnt = dd->ipath_rcvtidcnt; | |
207 | if (cnt >= tidcnt) { | |
208 | /* make sure it all fits in port_tid_pg_list */ | |
209 | dev_info(&dd->pcidev->dev, "Process tried to allocate %u " | |
210 | "TIDs, only trying max (%u)\n", cnt, tidcnt); | |
211 | cnt = tidcnt; | |
212 | } | |
213 | pagep = (struct page **)pd->port_tid_pg_list; | |
214 | tidlist = (u16 *) (&pagep[cnt]); | |
215 | ||
216 | memset(tidmap, 0, sizeof(tidmap)); | |
217 | tid = pd->port_tidcursor; | |
218 | /* before decrement; chip actual # */ | |
219 | porttid = pd->port_port * tidcnt; | |
220 | ntids = tidcnt; | |
221 | tidbase = (u64 __iomem *) (((char __iomem *) dd->ipath_kregbase) + | |
222 | dd->ipath_rcvtidbase + | |
223 | porttid * sizeof(*tidbase)); | |
224 | ||
225 | ipath_cdbg(VERBOSE, "Port%u %u tids, cursor %u, tidbase %p\n", | |
226 | pd->port_port, cnt, tid, tidbase); | |
227 | ||
228 | /* virtual address of first page in transfer */ | |
229 | vaddr = ti->tidvaddr; | |
230 | if (!access_ok(VERIFY_WRITE, (void __user *) vaddr, | |
231 | cnt * PAGE_SIZE)) { | |
232 | ipath_dbg("Fail vaddr %p, %u pages, !access_ok\n", | |
233 | (void *)vaddr, cnt); | |
234 | ret = -EFAULT; | |
235 | goto done; | |
236 | } | |
237 | ret = ipath_get_user_pages(vaddr, cnt, pagep); | |
238 | if (ret) { | |
239 | if (ret == -EBUSY) { | |
240 | ipath_dbg("Failed to lock addr %p, %u pages " | |
241 | "(already locked)\n", | |
242 | (void *) vaddr, cnt); | |
243 | /* | |
244 | * for now, continue, and see what happens but with | |
245 | * the new implementation, this should never happen, | |
246 | * unless perhaps the user has mpin'ed the pages | |
247 | * themselves (something we need to test) | |
248 | */ | |
249 | ret = 0; | |
250 | } else { | |
251 | dev_info(&dd->pcidev->dev, | |
252 | "Failed to lock addr %p, %u pages: " | |
253 | "errno %d\n", (void *) vaddr, cnt, -ret); | |
254 | goto done; | |
255 | } | |
256 | } | |
257 | for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) { | |
258 | for (; ntids--; tid++) { | |
259 | if (tid == tidcnt) | |
260 | tid = 0; | |
261 | if (!dd->ipath_pageshadow[porttid + tid]) | |
262 | break; | |
263 | } | |
264 | if (ntids < 0) { | |
265 | /* | |
266 | * oops, wrapped all the way through their TIDs, | |
267 | * and didn't have enough free; see comments at | |
268 | * start of routine | |
269 | */ | |
270 | ipath_dbg("Not enough free TIDs for %u pages " | |
271 | "(index %d), failing\n", cnt, i); | |
272 | i--; /* last tidlist[i] not filled in */ | |
273 | ret = -ENOMEM; | |
274 | break; | |
275 | } | |
276 | tidlist[i] = tid; | |
277 | ipath_cdbg(VERBOSE, "Updating idx %u to TID %u, " | |
278 | "vaddr %lx\n", i, tid, vaddr); | |
279 | /* we "know" system pages and TID pages are same size */ | |
280 | dd->ipath_pageshadow[porttid + tid] = pagep[i]; | |
281 | /* | |
282 | * don't need atomic or it's overhead | |
283 | */ | |
284 | __set_bit(tid, tidmap); | |
285 | physaddr = page_to_phys(pagep[i]); | |
286 | ipath_stats.sps_pagelocks++; | |
287 | ipath_cdbg(VERBOSE, | |
288 | "TID %u, vaddr %lx, physaddr %llx pgp %p\n", | |
289 | tid, vaddr, (unsigned long long) physaddr, | |
290 | pagep[i]); | |
291 | dd->ipath_f_put_tid(dd, &tidbase[tid], 1, physaddr); | |
292 | /* | |
293 | * don't check this tid in ipath_portshadow, since we | |
294 | * just filled it in; start with the next one. | |
295 | */ | |
296 | tid++; | |
297 | } | |
298 | ||
299 | if (ret) { | |
300 | u32 limit; | |
301 | cleanup: | |
302 | /* jump here if copy out of updated info failed... */ | |
303 | ipath_dbg("After failure (ret=%d), undo %d of %d entries\n", | |
304 | -ret, i, cnt); | |
305 | /* same code that's in ipath_free_tid() */ | |
306 | limit = sizeof(tidmap) * BITS_PER_BYTE; | |
307 | if (limit > tidcnt) | |
308 | /* just in case size changes in future */ | |
309 | limit = tidcnt; | |
310 | tid = find_first_bit((const unsigned long *)tidmap, limit); | |
311 | for (; tid < limit; tid++) { | |
312 | if (!test_bit(tid, tidmap)) | |
313 | continue; | |
314 | if (dd->ipath_pageshadow[porttid + tid]) { | |
315 | ipath_cdbg(VERBOSE, "Freeing TID %u\n", | |
316 | tid); | |
317 | dd->ipath_f_put_tid(dd, &tidbase[tid], 1, | |
318 | dd->ipath_tidinvalid); | |
319 | dd->ipath_pageshadow[porttid + tid] = NULL; | |
320 | ipath_stats.sps_pageunlocks++; | |
321 | } | |
322 | } | |
323 | ipath_release_user_pages(pagep, cnt); | |
324 | } else { | |
325 | /* | |
326 | * Copy the updated array, with ipath_tid's filled in, back | |
327 | * to user. Since we did the copy in already, this "should | |
328 | * never fail" If it does, we have to clean up... | |
329 | */ | |
330 | if (copy_to_user((void __user *) | |
331 | (unsigned long) ti->tidlist, | |
332 | tidlist, cnt * sizeof(*tidlist))) { | |
333 | ret = -EFAULT; | |
334 | goto cleanup; | |
335 | } | |
336 | if (copy_to_user((void __user *) (unsigned long) ti->tidmap, | |
337 | tidmap, sizeof tidmap)) { | |
338 | ret = -EFAULT; | |
339 | goto cleanup; | |
340 | } | |
341 | if (tid == tidcnt) | |
342 | tid = 0; | |
343 | pd->port_tidcursor = tid; | |
344 | } | |
345 | ||
346 | done: | |
347 | if (ret) | |
348 | ipath_dbg("Failed to map %u TID pages, failing with %d\n", | |
349 | ti->tidcnt, -ret); | |
350 | return ret; | |
351 | } | |
352 | ||
353 | /** | |
354 | * ipath_tid_free - free a port TID | |
355 | * @pd: the port | |
356 | * @ti: the TID info | |
357 | * | |
358 | * right now we are unlocking one page at a time, but since | |
359 | * the intended use of this routine is for a single group of | |
360 | * virtually contiguous pages, that should change to improve | |
361 | * performance. We check that the TID is in range for this port | |
362 | * but otherwise don't check validity; if user has an error and | |
363 | * frees the wrong tid, it's only their own data that can thereby | |
364 | * be corrupted. We do check that the TID was in use, for sanity | |
365 | * We always use our idea of the saved address, not the address that | |
366 | * they pass in to us. | |
367 | */ | |
368 | ||
369 | static int ipath_tid_free(struct ipath_portdata *pd, | |
370 | const struct ipath_tid_info *ti) | |
371 | { | |
372 | int ret = 0; | |
373 | u32 tid, porttid, cnt, limit, tidcnt; | |
374 | struct ipath_devdata *dd = pd->port_dd; | |
375 | u64 __iomem *tidbase; | |
376 | unsigned long tidmap[8]; | |
377 | ||
378 | if (!dd->ipath_pageshadow) { | |
379 | ret = -ENOMEM; | |
380 | goto done; | |
381 | } | |
382 | ||
383 | if (copy_from_user(tidmap, (void __user *)(unsigned long)ti->tidmap, | |
384 | sizeof tidmap)) { | |
385 | ret = -EFAULT; | |
386 | goto done; | |
387 | } | |
388 | ||
389 | porttid = pd->port_port * dd->ipath_rcvtidcnt; | |
390 | tidbase = (u64 __iomem *) ((char __iomem *)(dd->ipath_kregbase) + | |
391 | dd->ipath_rcvtidbase + | |
392 | porttid * sizeof(*tidbase)); | |
393 | ||
394 | tidcnt = dd->ipath_rcvtidcnt; | |
395 | limit = sizeof(tidmap) * BITS_PER_BYTE; | |
396 | if (limit > tidcnt) | |
397 | /* just in case size changes in future */ | |
398 | limit = tidcnt; | |
399 | tid = find_first_bit(tidmap, limit); | |
400 | ipath_cdbg(VERBOSE, "Port%u free %u tids; first bit (max=%d) " | |
401 | "set is %d, porttid %u\n", pd->port_port, ti->tidcnt, | |
402 | limit, tid, porttid); | |
403 | for (cnt = 0; tid < limit; tid++) { | |
404 | /* | |
405 | * small optimization; if we detect a run of 3 or so without | |
406 | * any set, use find_first_bit again. That's mainly to | |
407 | * accelerate the case where we wrapped, so we have some at | |
408 | * the beginning, and some at the end, and a big gap | |
409 | * in the middle. | |
410 | */ | |
411 | if (!test_bit(tid, tidmap)) | |
412 | continue; | |
413 | cnt++; | |
414 | if (dd->ipath_pageshadow[porttid + tid]) { | |
415 | ipath_cdbg(VERBOSE, "PID %u freeing TID %u\n", | |
416 | pd->port_pid, tid); | |
417 | dd->ipath_f_put_tid(dd, &tidbase[tid], 1, | |
418 | dd->ipath_tidinvalid); | |
419 | ipath_release_user_pages( | |
420 | &dd->ipath_pageshadow[porttid + tid], 1); | |
421 | dd->ipath_pageshadow[porttid + tid] = NULL; | |
422 | ipath_stats.sps_pageunlocks++; | |
423 | } else | |
424 | ipath_dbg("Unused tid %u, ignoring\n", tid); | |
425 | } | |
426 | if (cnt != ti->tidcnt) | |
427 | ipath_dbg("passed in tidcnt %d, only %d bits set in map\n", | |
428 | ti->tidcnt, cnt); | |
429 | done: | |
430 | if (ret) | |
431 | ipath_dbg("Failed to unmap %u TID pages, failing with %d\n", | |
432 | ti->tidcnt, -ret); | |
433 | return ret; | |
434 | } | |
435 | ||
436 | /** | |
437 | * ipath_set_part_key - set a partition key | |
438 | * @pd: the port | |
439 | * @key: the key | |
440 | * | |
441 | * We can have up to 4 active at a time (other than the default, which is | |
442 | * always allowed). This is somewhat tricky, since multiple ports may set | |
443 | * the same key, so we reference count them, and clean up at exit. All 4 | |
444 | * partition keys are packed into a single infinipath register. It's an | |
445 | * error for a process to set the same pkey multiple times. We provide no | |
446 | * mechanism to de-allocate a pkey at this time, we may eventually need to | |
447 | * do that. I've used the atomic operations, and no locking, and only make | |
448 | * a single pass through what's available. This should be more than | |
449 | * adequate for some time. I'll think about spinlocks or the like if and as | |
450 | * it's necessary. | |
451 | */ | |
452 | static int ipath_set_part_key(struct ipath_portdata *pd, u16 key) | |
453 | { | |
454 | struct ipath_devdata *dd = pd->port_dd; | |
455 | int i, any = 0, pidx = -1; | |
456 | u16 lkey = key & 0x7FFF; | |
457 | int ret; | |
458 | ||
459 | if (lkey == (IPS_DEFAULT_P_KEY & 0x7FFF)) { | |
460 | /* nothing to do; this key always valid */ | |
461 | ret = 0; | |
462 | goto bail; | |
463 | } | |
464 | ||
465 | ipath_cdbg(VERBOSE, "p%u try to set pkey %hx, current keys " | |
466 | "%hx:%x %hx:%x %hx:%x %hx:%x\n", | |
467 | pd->port_port, key, dd->ipath_pkeys[0], | |
468 | atomic_read(&dd->ipath_pkeyrefs[0]), dd->ipath_pkeys[1], | |
469 | atomic_read(&dd->ipath_pkeyrefs[1]), dd->ipath_pkeys[2], | |
470 | atomic_read(&dd->ipath_pkeyrefs[2]), dd->ipath_pkeys[3], | |
471 | atomic_read(&dd->ipath_pkeyrefs[3])); | |
472 | ||
473 | if (!lkey) { | |
474 | ipath_cdbg(PROC, "p%u tries to set key 0, not allowed\n", | |
475 | pd->port_port); | |
476 | ret = -EINVAL; | |
477 | goto bail; | |
478 | } | |
479 | ||
480 | /* | |
481 | * Set the full membership bit, because it has to be | |
482 | * set in the register or the packet, and it seems | |
483 | * cleaner to set in the register than to force all | |
484 | * callers to set it. (see bug 4331) | |
485 | */ | |
486 | key |= 0x8000; | |
487 | ||
488 | for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) { | |
489 | if (!pd->port_pkeys[i] && pidx == -1) | |
490 | pidx = i; | |
491 | if (pd->port_pkeys[i] == key) { | |
492 | ipath_cdbg(VERBOSE, "p%u tries to set same pkey " | |
493 | "(%x) more than once\n", | |
494 | pd->port_port, key); | |
495 | ret = -EEXIST; | |
496 | goto bail; | |
497 | } | |
498 | } | |
499 | if (pidx == -1) { | |
500 | ipath_dbg("All pkeys for port %u already in use, " | |
501 | "can't set %x\n", pd->port_port, key); | |
502 | ret = -EBUSY; | |
503 | goto bail; | |
504 | } | |
505 | for (any = i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) { | |
506 | if (!dd->ipath_pkeys[i]) { | |
507 | any++; | |
508 | continue; | |
509 | } | |
510 | if (dd->ipath_pkeys[i] == key) { | |
511 | atomic_t *pkrefs = &dd->ipath_pkeyrefs[i]; | |
512 | ||
513 | if (atomic_inc_return(pkrefs) > 1) { | |
514 | pd->port_pkeys[pidx] = key; | |
515 | ipath_cdbg(VERBOSE, "p%u set key %x " | |
516 | "matches #%d, count now %d\n", | |
517 | pd->port_port, key, i, | |
518 | atomic_read(pkrefs)); | |
519 | ret = 0; | |
520 | goto bail; | |
521 | } else { | |
522 | /* | |
523 | * lost race, decrement count, catch below | |
524 | */ | |
525 | atomic_dec(pkrefs); | |
526 | ipath_cdbg(VERBOSE, "Lost race, count was " | |
527 | "0, after dec, it's %d\n", | |
528 | atomic_read(pkrefs)); | |
529 | any++; | |
530 | } | |
531 | } | |
532 | if ((dd->ipath_pkeys[i] & 0x7FFF) == lkey) { | |
533 | /* | |
534 | * It makes no sense to have both the limited and | |
535 | * full membership PKEY set at the same time since | |
536 | * the unlimited one will disable the limited one. | |
537 | */ | |
538 | ret = -EEXIST; | |
539 | goto bail; | |
540 | } | |
541 | } | |
542 | if (!any) { | |
543 | ipath_dbg("port %u, all pkeys already in use, " | |
544 | "can't set %x\n", pd->port_port, key); | |
545 | ret = -EBUSY; | |
546 | goto bail; | |
547 | } | |
548 | for (any = i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) { | |
549 | if (!dd->ipath_pkeys[i] && | |
550 | atomic_inc_return(&dd->ipath_pkeyrefs[i]) == 1) { | |
551 | u64 pkey; | |
552 | ||
553 | /* for ipathstats, etc. */ | |
554 | ipath_stats.sps_pkeys[i] = lkey; | |
555 | pd->port_pkeys[pidx] = dd->ipath_pkeys[i] = key; | |
556 | pkey = | |
557 | (u64) dd->ipath_pkeys[0] | | |
558 | ((u64) dd->ipath_pkeys[1] << 16) | | |
559 | ((u64) dd->ipath_pkeys[2] << 32) | | |
560 | ((u64) dd->ipath_pkeys[3] << 48); | |
561 | ipath_cdbg(PROC, "p%u set key %x in #%d, " | |
562 | "portidx %d, new pkey reg %llx\n", | |
563 | pd->port_port, key, i, pidx, | |
564 | (unsigned long long) pkey); | |
565 | ipath_write_kreg( | |
566 | dd, dd->ipath_kregs->kr_partitionkey, pkey); | |
567 | ||
568 | ret = 0; | |
569 | goto bail; | |
570 | } | |
571 | } | |
572 | ipath_dbg("port %u, all pkeys already in use 2nd pass, " | |
573 | "can't set %x\n", pd->port_port, key); | |
574 | ret = -EBUSY; | |
575 | ||
576 | bail: | |
577 | return ret; | |
578 | } | |
579 | ||
580 | /** | |
581 | * ipath_manage_rcvq - manage a port's receive queue | |
582 | * @pd: the port | |
583 | * @start_stop: action to carry out | |
584 | * | |
585 | * start_stop == 0 disables receive on the port, for use in queue | |
586 | * overflow conditions. start_stop==1 re-enables, to be used to | |
587 | * re-init the software copy of the head register | |
588 | */ | |
589 | static int ipath_manage_rcvq(struct ipath_portdata *pd, int start_stop) | |
590 | { | |
591 | struct ipath_devdata *dd = pd->port_dd; | |
592 | u64 tval; | |
593 | ||
594 | ipath_cdbg(PROC, "%sabling rcv for unit %u port %u\n", | |
595 | start_stop ? "en" : "dis", dd->ipath_unit, | |
596 | pd->port_port); | |
597 | /* atomically clear receive enable port. */ | |
598 | if (start_stop) { | |
599 | /* | |
600 | * On enable, force in-memory copy of the tail register to | |
601 | * 0, so that protocol code doesn't have to worry about | |
602 | * whether or not the chip has yet updated the in-memory | |
603 | * copy or not on return from the system call. The chip | |
604 | * always resets it's tail register back to 0 on a | |
605 | * transition from disabled to enabled. This could cause a | |
606 | * problem if software was broken, and did the enable w/o | |
607 | * the disable, but eventually the in-memory copy will be | |
608 | * updated and correct itself, even in the face of software | |
609 | * bugs. | |
610 | */ | |
611 | *pd->port_rcvhdrtail_kvaddr = 0; | |
612 | set_bit(INFINIPATH_R_PORTENABLE_SHIFT + pd->port_port, | |
613 | &dd->ipath_rcvctrl); | |
614 | } else | |
615 | clear_bit(INFINIPATH_R_PORTENABLE_SHIFT + pd->port_port, | |
616 | &dd->ipath_rcvctrl); | |
617 | ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, | |
618 | dd->ipath_rcvctrl); | |
619 | /* now be sure chip saw it before we return */ | |
620 | tval = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); | |
621 | if (start_stop) { | |
622 | /* | |
623 | * And try to be sure that tail reg update has happened too. | |
624 | * This should in theory interlock with the RXE changes to | |
625 | * the tail register. Don't assign it to the tail register | |
626 | * in memory copy, since we could overwrite an update by the | |
627 | * chip if we did. | |
628 | */ | |
629 | tval = ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port); | |
630 | } | |
631 | /* always; new head should be equal to new tail; see above */ | |
632 | return 0; | |
633 | } | |
634 | ||
635 | static void ipath_clean_part_key(struct ipath_portdata *pd, | |
636 | struct ipath_devdata *dd) | |
637 | { | |
638 | int i, j, pchanged = 0; | |
639 | u64 oldpkey; | |
640 | ||
641 | /* for debugging only */ | |
642 | oldpkey = (u64) dd->ipath_pkeys[0] | | |
643 | ((u64) dd->ipath_pkeys[1] << 16) | | |
644 | ((u64) dd->ipath_pkeys[2] << 32) | | |
645 | ((u64) dd->ipath_pkeys[3] << 48); | |
646 | ||
647 | for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) { | |
648 | if (!pd->port_pkeys[i]) | |
649 | continue; | |
650 | ipath_cdbg(VERBOSE, "look for key[%d] %hx in pkeys\n", i, | |
651 | pd->port_pkeys[i]); | |
652 | for (j = 0; j < ARRAY_SIZE(dd->ipath_pkeys); j++) { | |
653 | /* check for match independent of the global bit */ | |
654 | if ((dd->ipath_pkeys[j] & 0x7fff) != | |
655 | (pd->port_pkeys[i] & 0x7fff)) | |
656 | continue; | |
657 | if (atomic_dec_and_test(&dd->ipath_pkeyrefs[j])) { | |
658 | ipath_cdbg(VERBOSE, "p%u clear key " | |
659 | "%x matches #%d\n", | |
660 | pd->port_port, | |
661 | pd->port_pkeys[i], j); | |
662 | ipath_stats.sps_pkeys[j] = | |
663 | dd->ipath_pkeys[j] = 0; | |
664 | pchanged++; | |
665 | } | |
666 | else ipath_cdbg( | |
667 | VERBOSE, "p%u key %x matches #%d, " | |
668 | "but ref still %d\n", pd->port_port, | |
669 | pd->port_pkeys[i], j, | |
670 | atomic_read(&dd->ipath_pkeyrefs[j])); | |
671 | break; | |
672 | } | |
673 | pd->port_pkeys[i] = 0; | |
674 | } | |
675 | if (pchanged) { | |
676 | u64 pkey = (u64) dd->ipath_pkeys[0] | | |
677 | ((u64) dd->ipath_pkeys[1] << 16) | | |
678 | ((u64) dd->ipath_pkeys[2] << 32) | | |
679 | ((u64) dd->ipath_pkeys[3] << 48); | |
680 | ipath_cdbg(VERBOSE, "p%u old pkey reg %llx, " | |
681 | "new pkey reg %llx\n", pd->port_port, | |
682 | (unsigned long long) oldpkey, | |
683 | (unsigned long long) pkey); | |
684 | ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey, | |
685 | pkey); | |
686 | } | |
687 | } | |
688 | ||
689 | /** | |
690 | * ipath_create_user_egr - allocate eager TID buffers | |
691 | * @pd: the port to allocate TID buffers for | |
692 | * | |
693 | * This routine is now quite different for user and kernel, because | |
694 | * the kernel uses skb's, for the accelerated network performance | |
695 | * This is the user port version | |
696 | * | |
697 | * Allocate the eager TID buffers and program them into infinipath | |
698 | * They are no longer completely contiguous, we do multiple allocation | |
699 | * calls. | |
700 | */ | |
701 | static int ipath_create_user_egr(struct ipath_portdata *pd) | |
702 | { | |
703 | struct ipath_devdata *dd = pd->port_dd; | |
704 | unsigned e, egrcnt, alloced, egrperchunk, chunk, egrsize, egroff; | |
705 | size_t size; | |
706 | int ret; | |
707 | ||
708 | egrcnt = dd->ipath_rcvegrcnt; | |
709 | /* TID number offset for this port */ | |
710 | egroff = pd->port_port * egrcnt; | |
711 | egrsize = dd->ipath_rcvegrbufsize; | |
712 | ipath_cdbg(VERBOSE, "Allocating %d egr buffers, at egrtid " | |
713 | "offset %x, egrsize %u\n", egrcnt, egroff, egrsize); | |
714 | ||
715 | /* | |
716 | * to avoid wasting a lot of memory, we allocate 32KB chunks of | |
717 | * physically contiguous memory, advance through it until used up | |
718 | * and then allocate more. Of course, we need memory to store those | |
719 | * extra pointers, now. Started out with 256KB, but under heavy | |
720 | * memory pressure (creating large files and then copying them over | |
721 | * NFS while doing lots of MPI jobs), we hit some allocation | |
722 | * failures, even though we can sleep... (2.6.10) Still get | |
723 | * failures at 64K. 32K is the lowest we can go without waiting | |
724 | * more memory again. It seems likely that the coalescing in | |
725 | * free_pages, etc. still has issues (as it has had previously | |
726 | * during 2.6.x development). | |
727 | */ | |
728 | size = 0x8000; | |
729 | alloced = ALIGN(egrsize * egrcnt, size); | |
730 | egrperchunk = size / egrsize; | |
731 | chunk = (egrcnt + egrperchunk - 1) / egrperchunk; | |
732 | pd->port_rcvegrbuf_chunks = chunk; | |
733 | pd->port_rcvegrbufs_perchunk = egrperchunk; | |
734 | pd->port_rcvegrbuf_size = size; | |
735 | pd->port_rcvegrbuf = vmalloc(chunk * sizeof(pd->port_rcvegrbuf[0])); | |
736 | if (!pd->port_rcvegrbuf) { | |
737 | ret = -ENOMEM; | |
738 | goto bail; | |
739 | } | |
740 | pd->port_rcvegrbuf_phys = | |
741 | vmalloc(chunk * sizeof(pd->port_rcvegrbuf_phys[0])); | |
742 | if (!pd->port_rcvegrbuf_phys) { | |
743 | ret = -ENOMEM; | |
744 | goto bail_rcvegrbuf; | |
745 | } | |
746 | for (e = 0; e < pd->port_rcvegrbuf_chunks; e++) { | |
747 | /* | |
748 | * GFP_USER, but without GFP_FS, so buffer cache can be | |
749 | * coalesced (we hope); otherwise, even at order 4, | |
750 | * heavy filesystem activity makes these fail | |
751 | */ | |
752 | gfp_t gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP; | |
753 | ||
754 | pd->port_rcvegrbuf[e] = dma_alloc_coherent( | |
755 | &dd->pcidev->dev, size, &pd->port_rcvegrbuf_phys[e], | |
756 | gfp_flags); | |
757 | ||
758 | if (!pd->port_rcvegrbuf[e]) { | |
759 | ret = -ENOMEM; | |
760 | goto bail_rcvegrbuf_phys; | |
761 | } | |
762 | } | |
763 | ||
764 | pd->port_rcvegr_phys = pd->port_rcvegrbuf_phys[0]; | |
765 | ||
766 | for (e = chunk = 0; chunk < pd->port_rcvegrbuf_chunks; chunk++) { | |
767 | dma_addr_t pa = pd->port_rcvegrbuf_phys[chunk]; | |
768 | unsigned i; | |
769 | ||
770 | for (i = 0; e < egrcnt && i < egrperchunk; e++, i++) { | |
771 | dd->ipath_f_put_tid(dd, e + egroff + | |
772 | (u64 __iomem *) | |
773 | ((char __iomem *) | |
774 | dd->ipath_kregbase + | |
775 | dd->ipath_rcvegrbase), 0, pa); | |
776 | pa += egrsize; | |
777 | } | |
778 | cond_resched(); /* don't hog the cpu */ | |
779 | } | |
780 | ||
781 | ret = 0; | |
782 | goto bail; | |
783 | ||
784 | bail_rcvegrbuf_phys: | |
785 | for (e = 0; e < pd->port_rcvegrbuf_chunks && | |
786 | pd->port_rcvegrbuf[e]; e++) | |
787 | dma_free_coherent(&dd->pcidev->dev, size, | |
788 | pd->port_rcvegrbuf[e], | |
789 | pd->port_rcvegrbuf_phys[e]); | |
790 | ||
791 | vfree(pd->port_rcvegrbuf_phys); | |
792 | pd->port_rcvegrbuf_phys = NULL; | |
793 | bail_rcvegrbuf: | |
794 | vfree(pd->port_rcvegrbuf); | |
795 | pd->port_rcvegrbuf = NULL; | |
796 | bail: | |
797 | return ret; | |
798 | } | |
799 | ||
800 | static int ipath_do_user_init(struct ipath_portdata *pd, | |
801 | const struct ipath_user_info *uinfo) | |
802 | { | |
803 | int ret = 0; | |
804 | struct ipath_devdata *dd = pd->port_dd; | |
805 | u64 physaddr, uaddr, off, atmp; | |
806 | struct page *pagep; | |
807 | u32 head32; | |
808 | u64 head; | |
809 | ||
810 | /* for now, if major version is different, bail */ | |
811 | if ((uinfo->spu_userversion >> 16) != IPATH_USER_SWMAJOR) { | |
812 | dev_info(&dd->pcidev->dev, | |
813 | "User major version %d not same as driver " | |
814 | "major %d\n", uinfo->spu_userversion >> 16, | |
815 | IPATH_USER_SWMAJOR); | |
816 | ret = -ENODEV; | |
817 | goto done; | |
818 | } | |
819 | ||
820 | if ((uinfo->spu_userversion & 0xffff) != IPATH_USER_SWMINOR) | |
821 | ipath_dbg("User minor version %d not same as driver " | |
822 | "minor %d\n", uinfo->spu_userversion & 0xffff, | |
823 | IPATH_USER_SWMINOR); | |
824 | ||
825 | if (uinfo->spu_rcvhdrsize) { | |
826 | ret = ipath_setrcvhdrsize(dd, uinfo->spu_rcvhdrsize); | |
827 | if (ret) | |
828 | goto done; | |
829 | } | |
830 | ||
831 | /* for now we do nothing with rcvhdrcnt: uinfo->spu_rcvhdrcnt */ | |
832 | ||
833 | /* set up for the rcvhdr Q tail register writeback to user memory */ | |
834 | if (!uinfo->spu_rcvhdraddr || | |
835 | !access_ok(VERIFY_WRITE, (u64 __user *) (unsigned long) | |
836 | uinfo->spu_rcvhdraddr, sizeof(u64))) { | |
837 | ipath_dbg("Port %d rcvhdrtail addr %llx not valid\n", | |
838 | pd->port_port, | |
839 | (unsigned long long) uinfo->spu_rcvhdraddr); | |
840 | ret = -EINVAL; | |
841 | goto done; | |
842 | } | |
843 | ||
844 | off = offset_in_page(uinfo->spu_rcvhdraddr); | |
845 | uaddr = PAGE_MASK & (unsigned long) uinfo->spu_rcvhdraddr; | |
846 | ret = ipath_get_user_pages_nocopy(uaddr, &pagep); | |
847 | if (ret) { | |
848 | dev_info(&dd->pcidev->dev, "Failed to lookup and lock " | |
849 | "address %llx for rcvhdrtail: errno %d\n", | |
850 | (unsigned long long) uinfo->spu_rcvhdraddr, -ret); | |
851 | goto done; | |
852 | } | |
853 | ipath_stats.sps_pagelocks++; | |
854 | pd->port_rcvhdrtail_uaddr = uaddr; | |
855 | pd->port_rcvhdrtail_pagep = pagep; | |
856 | pd->port_rcvhdrtail_kvaddr = | |
857 | page_address(pagep); | |
858 | pd->port_rcvhdrtail_kvaddr += off; | |
859 | physaddr = page_to_phys(pagep) + off; | |
860 | ipath_cdbg(VERBOSE, "port %d user addr %llx hdrtailaddr, %llx " | |
861 | "physical (off=%llx)\n", | |
862 | pd->port_port, | |
863 | (unsigned long long) uinfo->spu_rcvhdraddr, | |
864 | (unsigned long long) physaddr, (unsigned long long) off); | |
865 | ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdrtailaddr, | |
866 | pd->port_port, physaddr); | |
867 | atmp = ipath_read_kreg64_port(dd, | |
868 | dd->ipath_kregs->kr_rcvhdrtailaddr, | |
869 | pd->port_port); | |
870 | if (physaddr != atmp) { | |
871 | ipath_dev_err(dd, | |
872 | "Catastrophic software error, " | |
873 | "RcvHdrTailAddr%u written as %llx, " | |
874 | "read back as %llx\n", pd->port_port, | |
875 | (unsigned long long) physaddr, | |
876 | (unsigned long long) atmp); | |
877 | ret = -EINVAL; | |
878 | goto done; | |
879 | } | |
880 | ||
881 | /* for right now, kernel piobufs are at end, so port 1 is at 0 */ | |
882 | pd->port_piobufs = dd->ipath_piobufbase + | |
883 | dd->ipath_pbufsport * (pd->port_port - | |
884 | 1) * dd->ipath_palign; | |
885 | ipath_cdbg(VERBOSE, "Set base of piobufs for port %u to 0x%x\n", | |
886 | pd->port_port, pd->port_piobufs); | |
887 | ||
888 | /* | |
889 | * Now allocate the rcvhdr Q and eager TIDs; skip the TID | |
890 | * array for time being. If pd->port_port > chip-supported, | |
891 | * we need to do extra stuff here to handle by handling overflow | |
892 | * through port 0, someday | |
893 | */ | |
894 | ret = ipath_create_rcvhdrq(dd, pd); | |
895 | if (!ret) | |
896 | ret = ipath_create_user_egr(pd); | |
897 | if (ret) | |
898 | goto done; | |
899 | /* enable receives now */ | |
900 | /* atomically set enable bit for this port */ | |
901 | set_bit(INFINIPATH_R_PORTENABLE_SHIFT + pd->port_port, | |
902 | &dd->ipath_rcvctrl); | |
903 | ||
904 | /* | |
905 | * set the head registers for this port to the current values | |
906 | * of the tail pointers, since we don't know if they were | |
907 | * updated on last use of the port. | |
908 | */ | |
909 | head32 = ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port); | |
910 | head = (u64) head32; | |
911 | ipath_write_ureg(dd, ur_rcvhdrhead, head, pd->port_port); | |
912 | head32 = ipath_read_ureg32(dd, ur_rcvegrindextail, pd->port_port); | |
913 | ipath_write_ureg(dd, ur_rcvegrindexhead, head32, pd->port_port); | |
914 | dd->ipath_lastegrheads[pd->port_port] = -1; | |
915 | dd->ipath_lastrcvhdrqtails[pd->port_port] = -1; | |
916 | ipath_cdbg(VERBOSE, "Wrote port%d head %llx, egrhead %x from " | |
917 | "tail regs\n", pd->port_port, | |
918 | (unsigned long long) head, head32); | |
919 | pd->port_tidcursor = 0; /* start at beginning after open */ | |
920 | /* | |
921 | * now enable the port; the tail registers will be written to memory | |
922 | * by the chip as soon as it sees the write to | |
923 | * dd->ipath_kregs->kr_rcvctrl. The update only happens on | |
924 | * transition from 0 to 1, so clear it first, then set it as part of | |
925 | * enabling the port. This will (very briefly) affect any other | |
926 | * open ports, but it shouldn't be long enough to be an issue. | |
927 | */ | |
928 | ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, | |
929 | dd->ipath_rcvctrl & ~INFINIPATH_R_TAILUPD); | |
930 | ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, | |
931 | dd->ipath_rcvctrl); | |
932 | ||
933 | done: | |
934 | return ret; | |
935 | } | |
936 | ||
937 | static int mmap_ureg(struct vm_area_struct *vma, struct ipath_devdata *dd, | |
938 | u64 ureg) | |
939 | { | |
940 | unsigned long phys; | |
941 | int ret; | |
942 | ||
943 | /* it's the real hardware, so io_remap works */ | |
944 | ||
945 | if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) { | |
946 | dev_info(&dd->pcidev->dev, "FAIL mmap userreg: reqlen " | |
947 | "%lx > PAGE\n", vma->vm_end - vma->vm_start); | |
948 | ret = -EFAULT; | |
949 | } else { | |
950 | phys = dd->ipath_physaddr + ureg; | |
951 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | |
952 | ||
953 | vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; | |
954 | ret = io_remap_pfn_range(vma, vma->vm_start, | |
955 | phys >> PAGE_SHIFT, | |
956 | vma->vm_end - vma->vm_start, | |
957 | vma->vm_page_prot); | |
958 | } | |
959 | return ret; | |
960 | } | |
961 | ||
962 | static int mmap_piobufs(struct vm_area_struct *vma, | |
963 | struct ipath_devdata *dd, | |
964 | struct ipath_portdata *pd) | |
965 | { | |
966 | unsigned long phys; | |
967 | int ret; | |
968 | ||
969 | /* | |
970 | * When we map the PIO buffers, we want to map them as writeonly, no | |
971 | * read possible. | |
972 | */ | |
973 | ||
974 | if ((vma->vm_end - vma->vm_start) > | |
975 | (dd->ipath_pbufsport * dd->ipath_palign)) { | |
976 | dev_info(&dd->pcidev->dev, "FAIL mmap piobufs: " | |
977 | "reqlen %lx > PAGE\n", | |
978 | vma->vm_end - vma->vm_start); | |
979 | ret = -EFAULT; | |
980 | goto bail; | |
981 | } | |
982 | ||
983 | phys = dd->ipath_physaddr + pd->port_piobufs; | |
984 | /* | |
985 | * Do *NOT* mark this as non-cached (PWT bit), or we don't get the | |
986 | * write combining behavior we want on the PIO buffers! | |
987 | * vma->vm_page_prot = | |
988 | * pgprot_noncached(vma->vm_page_prot); | |
989 | */ | |
990 | ||
991 | if (vma->vm_flags & VM_READ) { | |
992 | dev_info(&dd->pcidev->dev, | |
993 | "Can't map piobufs as readable (flags=%lx)\n", | |
994 | vma->vm_flags); | |
995 | ret = -EPERM; | |
996 | goto bail; | |
997 | } | |
998 | ||
999 | /* don't allow them to later change to readable with mprotect */ | |
1000 | ||
1001 | vma->vm_flags &= ~VM_MAYWRITE; | |
1002 | vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; | |
1003 | ||
1004 | ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT, | |
1005 | vma->vm_end - vma->vm_start, | |
1006 | vma->vm_page_prot); | |
1007 | bail: | |
1008 | return ret; | |
1009 | } | |
1010 | ||
1011 | static int mmap_rcvegrbufs(struct vm_area_struct *vma, | |
1012 | struct ipath_portdata *pd) | |
1013 | { | |
1014 | struct ipath_devdata *dd = pd->port_dd; | |
1015 | unsigned long start, size; | |
1016 | size_t total_size, i; | |
1017 | dma_addr_t *phys; | |
1018 | int ret; | |
1019 | ||
1020 | if (!pd->port_rcvegrbuf) { | |
1021 | ret = -EFAULT; | |
1022 | goto bail; | |
1023 | } | |
1024 | ||
1025 | size = pd->port_rcvegrbuf_size; | |
1026 | total_size = pd->port_rcvegrbuf_chunks * size; | |
1027 | if ((vma->vm_end - vma->vm_start) > total_size) { | |
1028 | dev_info(&dd->pcidev->dev, "FAIL on egr bufs: " | |
1029 | "reqlen %lx > actual %lx\n", | |
1030 | vma->vm_end - vma->vm_start, | |
1031 | (unsigned long) total_size); | |
1032 | ret = -EFAULT; | |
1033 | goto bail; | |
1034 | } | |
1035 | ||
1036 | if (vma->vm_flags & VM_WRITE) { | |
1037 | dev_info(&dd->pcidev->dev, "Can't map eager buffers as " | |
1038 | "writable (flags=%lx)\n", vma->vm_flags); | |
1039 | ret = -EPERM; | |
1040 | goto bail; | |
1041 | } | |
1042 | ||
1043 | start = vma->vm_start; | |
1044 | phys = pd->port_rcvegrbuf_phys; | |
1045 | ||
1046 | /* don't allow them to later change to writeable with mprotect */ | |
1047 | vma->vm_flags &= ~VM_MAYWRITE; | |
1048 | ||
1049 | for (i = 0; i < pd->port_rcvegrbuf_chunks; i++, start += size) { | |
1050 | ret = remap_pfn_range(vma, start, phys[i] >> PAGE_SHIFT, | |
1051 | size, vma->vm_page_prot); | |
1052 | if (ret < 0) | |
1053 | goto bail; | |
1054 | } | |
1055 | ret = 0; | |
1056 | ||
1057 | bail: | |
1058 | return ret; | |
1059 | } | |
1060 | ||
1061 | static int mmap_rcvhdrq(struct vm_area_struct *vma, | |
1062 | struct ipath_portdata *pd) | |
1063 | { | |
1064 | struct ipath_devdata *dd = pd->port_dd; | |
1065 | size_t total_size; | |
1066 | int ret; | |
1067 | ||
1068 | /* | |
1069 | * kmalloc'ed memory, physically contiguous; this is from | |
1070 | * spi_rcvhdr_base; we allow user to map read-write so they can | |
1071 | * write hdrq entries to allow protocol code to directly poll | |
1072 | * whether a hdrq entry has been written. | |
1073 | */ | |
1074 | total_size = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize * | |
1075 | sizeof(u32), PAGE_SIZE); | |
1076 | if ((vma->vm_end - vma->vm_start) > total_size) { | |
1077 | dev_info(&dd->pcidev->dev, | |
1078 | "FAIL on rcvhdrq: reqlen %lx > actual %lx\n", | |
1079 | vma->vm_end - vma->vm_start, | |
1080 | (unsigned long) total_size); | |
1081 | ret = -EFAULT; | |
1082 | goto bail; | |
1083 | } | |
1084 | ||
1085 | ret = remap_pfn_range(vma, vma->vm_start, | |
1086 | pd->port_rcvhdrq_phys >> PAGE_SHIFT, | |
1087 | vma->vm_end - vma->vm_start, | |
1088 | vma->vm_page_prot); | |
1089 | bail: | |
1090 | return ret; | |
1091 | } | |
1092 | ||
1093 | static int mmap_pioavailregs(struct vm_area_struct *vma, | |
1094 | struct ipath_portdata *pd) | |
1095 | { | |
1096 | struct ipath_devdata *dd = pd->port_dd; | |
1097 | int ret; | |
1098 | ||
1099 | /* | |
1100 | * when we map the PIO bufferavail registers, we want to map them as | |
1101 | * readonly, no write possible. | |
1102 | * | |
1103 | * kmalloc'ed memory, physically contiguous, one page only, readonly | |
1104 | */ | |
1105 | ||
1106 | if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) { | |
1107 | dev_info(&dd->pcidev->dev, "FAIL on pioavailregs_dma: " | |
1108 | "reqlen %lx > actual %lx\n", | |
1109 | vma->vm_end - vma->vm_start, | |
1110 | (unsigned long) PAGE_SIZE); | |
1111 | ret = -EFAULT; | |
1112 | goto bail; | |
1113 | } | |
1114 | ||
1115 | if (vma->vm_flags & VM_WRITE) { | |
1116 | dev_info(&dd->pcidev->dev, | |
1117 | "Can't map pioavailregs as writable (flags=%lx)\n", | |
1118 | vma->vm_flags); | |
1119 | ret = -EPERM; | |
1120 | goto bail; | |
1121 | } | |
1122 | ||
1123 | /* don't allow them to later change with mprotect */ | |
1124 | vma->vm_flags &= ~VM_MAYWRITE; | |
1125 | ||
1126 | ret = remap_pfn_range(vma, vma->vm_start, | |
1127 | dd->ipath_pioavailregs_phys >> PAGE_SHIFT, | |
1128 | PAGE_SIZE, vma->vm_page_prot); | |
1129 | bail: | |
1130 | return ret; | |
1131 | } | |
1132 | ||
1133 | /** | |
1134 | * ipath_mmap - mmap various structures into user space | |
1135 | * @fp: the file pointer | |
1136 | * @vma: the VM area | |
1137 | * | |
1138 | * We use this to have a shared buffer between the kernel and the user code | |
1139 | * for the rcvhdr queue, egr buffers, and the per-port user regs and pio | |
1140 | * buffers in the chip. We have the open and close entries so we can bump | |
1141 | * the ref count and keep the driver from being unloaded while still mapped. | |
1142 | */ | |
1143 | static int ipath_mmap(struct file *fp, struct vm_area_struct *vma) | |
1144 | { | |
1145 | struct ipath_portdata *pd; | |
1146 | struct ipath_devdata *dd; | |
1147 | u64 pgaddr, ureg; | |
1148 | int ret; | |
1149 | ||
1150 | pd = port_fp(fp); | |
1151 | dd = pd->port_dd; | |
1152 | /* | |
1153 | * This is the ipath_do_user_init() code, mapping the shared buffers | |
1154 | * into the user process. The address referred to by vm_pgoff is the | |
1155 | * virtual, not physical, address; we only do one mmap for each | |
1156 | * space mapped. | |
1157 | */ | |
1158 | pgaddr = vma->vm_pgoff << PAGE_SHIFT; | |
1159 | ||
1160 | /* | |
1161 | * note that ureg does *NOT* have the kregvirt as part of it, to be | |
1162 | * sure that for 32 bit programs, we don't end up trying to map a > | |
1163 | * 44 address. Has to match ipath_get_base_info() code that sets | |
1164 | * __spi_uregbase | |
1165 | */ | |
1166 | ||
1167 | ureg = dd->ipath_uregbase + dd->ipath_palign * pd->port_port; | |
1168 | ||
1169 | ipath_cdbg(MM, "ushare: pgaddr %llx vm_start=%lx, vmlen %lx\n", | |
1170 | (unsigned long long) pgaddr, vma->vm_start, | |
1171 | vma->vm_end - vma->vm_start); | |
1172 | ||
1173 | if (pgaddr == ureg) | |
1174 | ret = mmap_ureg(vma, dd, ureg); | |
1175 | else if (pgaddr == pd->port_piobufs) | |
1176 | ret = mmap_piobufs(vma, dd, pd); | |
1177 | else if (pgaddr == (u64) pd->port_rcvegr_phys) | |
1178 | ret = mmap_rcvegrbufs(vma, pd); | |
1179 | else if (pgaddr == (u64) pd->port_rcvhdrq_phys) | |
1180 | ret = mmap_rcvhdrq(vma, pd); | |
1181 | else if (pgaddr == dd->ipath_pioavailregs_phys) | |
1182 | ret = mmap_pioavailregs(vma, pd); | |
1183 | else | |
1184 | ret = -EINVAL; | |
1185 | ||
1186 | vma->vm_private_data = NULL; | |
1187 | ||
1188 | if (ret < 0) | |
1189 | dev_info(&dd->pcidev->dev, | |
1190 | "Failure %d on addr %lx, off %lx\n", | |
1191 | -ret, vma->vm_start, vma->vm_pgoff); | |
1192 | ||
1193 | return ret; | |
1194 | } | |
1195 | ||
1196 | static unsigned int ipath_poll(struct file *fp, | |
1197 | struct poll_table_struct *pt) | |
1198 | { | |
1199 | struct ipath_portdata *pd; | |
1200 | u32 head, tail; | |
1201 | int bit; | |
1202 | struct ipath_devdata *dd; | |
1203 | ||
1204 | pd = port_fp(fp); | |
1205 | dd = pd->port_dd; | |
1206 | ||
1207 | bit = pd->port_port + INFINIPATH_R_INTRAVAIL_SHIFT; | |
1208 | set_bit(bit, &dd->ipath_rcvctrl); | |
1209 | ||
1210 | /* | |
1211 | * Before blocking, make sure that head is still == tail, | |
1212 | * reading from the chip, so we can be sure the interrupt | |
1213 | * enable has made it to the chip. If not equal, disable | |
1214 | * interrupt again and return immediately. This avoids races, | |
1215 | * and the overhead of the chip read doesn't matter much at | |
1216 | * this point, since we are waiting for something anyway. | |
1217 | */ | |
1218 | ||
1219 | ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, | |
1220 | dd->ipath_rcvctrl); | |
1221 | ||
1222 | head = ipath_read_ureg32(dd, ur_rcvhdrhead, pd->port_port); | |
1223 | tail = ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port); | |
1224 | ||
1225 | if (tail == head) { | |
1226 | set_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag); | |
1227 | poll_wait(fp, &pd->port_wait, pt); | |
1228 | ||
1229 | if (test_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag)) { | |
1230 | /* timed out, no packets received */ | |
1231 | clear_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag); | |
1232 | pd->port_rcvwait_to++; | |
1233 | } | |
1234 | } | |
1235 | else { | |
1236 | /* it's already happened; don't do wait_event overhead */ | |
1237 | pd->port_rcvnowait++; | |
1238 | } | |
1239 | ||
1240 | clear_bit(bit, &dd->ipath_rcvctrl); | |
1241 | ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, | |
1242 | dd->ipath_rcvctrl); | |
1243 | ||
1244 | return 0; | |
1245 | } | |
1246 | ||
1247 | static int try_alloc_port(struct ipath_devdata *dd, int port, | |
1248 | struct file *fp) | |
1249 | { | |
1250 | int ret; | |
1251 | ||
1252 | if (!dd->ipath_pd[port]) { | |
1253 | void *p, *ptmp; | |
1254 | ||
1255 | p = kzalloc(sizeof(struct ipath_portdata), GFP_KERNEL); | |
1256 | ||
1257 | /* | |
1258 | * Allocate memory for use in ipath_tid_update() just once | |
1259 | * at open, not per call. Reduces cost of expected send | |
1260 | * setup. | |
1261 | */ | |
1262 | ptmp = kmalloc(dd->ipath_rcvtidcnt * sizeof(u16) + | |
1263 | dd->ipath_rcvtidcnt * sizeof(struct page **), | |
1264 | GFP_KERNEL); | |
1265 | if (!p || !ptmp) { | |
1266 | ipath_dev_err(dd, "Unable to allocate portdata " | |
1267 | "memory, failing open\n"); | |
1268 | ret = -ENOMEM; | |
1269 | kfree(p); | |
1270 | kfree(ptmp); | |
1271 | goto bail; | |
1272 | } | |
1273 | dd->ipath_pd[port] = p; | |
1274 | dd->ipath_pd[port]->port_port = port; | |
1275 | dd->ipath_pd[port]->port_dd = dd; | |
1276 | dd->ipath_pd[port]->port_tid_pg_list = ptmp; | |
1277 | init_waitqueue_head(&dd->ipath_pd[port]->port_wait); | |
1278 | } | |
1279 | if (!dd->ipath_pd[port]->port_cnt) { | |
1280 | dd->ipath_pd[port]->port_cnt = 1; | |
1281 | fp->private_data = (void *) dd->ipath_pd[port]; | |
1282 | ipath_cdbg(PROC, "%s[%u] opened unit:port %u:%u\n", | |
1283 | current->comm, current->pid, dd->ipath_unit, | |
1284 | port); | |
1285 | dd->ipath_pd[port]->port_pid = current->pid; | |
1286 | strncpy(dd->ipath_pd[port]->port_comm, current->comm, | |
1287 | sizeof(dd->ipath_pd[port]->port_comm)); | |
1288 | ipath_stats.sps_ports++; | |
1289 | ret = 0; | |
1290 | goto bail; | |
1291 | } | |
1292 | ret = -EBUSY; | |
1293 | ||
1294 | bail: | |
1295 | return ret; | |
1296 | } | |
1297 | ||
1298 | static inline int usable(struct ipath_devdata *dd) | |
1299 | { | |
1300 | return dd && | |
1301 | (dd->ipath_flags & IPATH_PRESENT) && | |
1302 | dd->ipath_kregbase && | |
1303 | dd->ipath_lid && | |
1304 | !(dd->ipath_flags & (IPATH_LINKDOWN | IPATH_DISABLED | |
1305 | | IPATH_LINKUNK)); | |
1306 | } | |
1307 | ||
1308 | static int find_free_port(int unit, struct file *fp) | |
1309 | { | |
1310 | struct ipath_devdata *dd = ipath_lookup(unit); | |
1311 | int ret, i; | |
1312 | ||
1313 | if (!dd) { | |
1314 | ret = -ENODEV; | |
1315 | goto bail; | |
1316 | } | |
1317 | ||
1318 | if (!usable(dd)) { | |
1319 | ret = -ENETDOWN; | |
1320 | goto bail; | |
1321 | } | |
1322 | ||
1323 | for (i = 0; i < dd->ipath_cfgports; i++) { | |
1324 | ret = try_alloc_port(dd, i, fp); | |
1325 | if (ret != -EBUSY) | |
1326 | goto bail; | |
1327 | } | |
1328 | ret = -EBUSY; | |
1329 | ||
1330 | bail: | |
1331 | return ret; | |
1332 | } | |
1333 | ||
1334 | static int find_best_unit(struct file *fp) | |
1335 | { | |
1336 | int ret = 0, i, prefunit = -1, devmax; | |
1337 | int maxofallports, npresent, nup; | |
1338 | int ndev; | |
1339 | ||
1340 | (void) ipath_count_units(&npresent, &nup, &maxofallports); | |
1341 | ||
1342 | /* | |
1343 | * This code is present to allow a knowledgeable person to | |
1344 | * specify the layout of processes to processors before opening | |
1345 | * this driver, and then we'll assign the process to the "closest" | |
1346 | * HT-400 to that processor (we assume reasonable connectivity, | |
1347 | * for now). This code assumes that if affinity has been set | |
1348 | * before this point, that at most one cpu is set; for now this | |
1349 | * is reasonable. I check for both cpus_empty() and cpus_full(), | |
1350 | * in case some kernel variant sets none of the bits when no | |
1351 | * affinity is set. 2.6.11 and 12 kernels have all present | |
1352 | * cpus set. Some day we'll have to fix it up further to handle | |
1353 | * a cpu subset. This algorithm fails for two HT-400's connected | |
1354 | * in tunnel fashion. Eventually this needs real topology | |
1355 | * information. There may be some issues with dual core numbering | |
1356 | * as well. This needs more work prior to release. | |
1357 | */ | |
1358 | if (!cpus_empty(current->cpus_allowed) && | |
1359 | !cpus_full(current->cpus_allowed)) { | |
1360 | int ncpus = num_online_cpus(), curcpu = -1; | |
1361 | for (i = 0; i < ncpus; i++) | |
1362 | if (cpu_isset(i, current->cpus_allowed)) { | |
1363 | ipath_cdbg(PROC, "%s[%u] affinity set for " | |
1364 | "cpu %d\n", current->comm, | |
1365 | current->pid, i); | |
1366 | curcpu = i; | |
1367 | } | |
1368 | if (curcpu != -1) { | |
1369 | if (npresent) { | |
1370 | prefunit = curcpu / (ncpus / npresent); | |
1371 | ipath_dbg("%s[%u] %d chips, %d cpus, " | |
1372 | "%d cpus/chip, select unit %d\n", | |
1373 | current->comm, current->pid, | |
1374 | npresent, ncpus, ncpus / npresent, | |
1375 | prefunit); | |
1376 | } | |
1377 | } | |
1378 | } | |
1379 | ||
1380 | /* | |
1381 | * user ports start at 1, kernel port is 0 | |
1382 | * For now, we do round-robin access across all chips | |
1383 | */ | |
1384 | ||
1385 | if (prefunit != -1) | |
1386 | devmax = prefunit + 1; | |
1387 | else | |
1388 | devmax = ipath_count_units(NULL, NULL, NULL); | |
1389 | recheck: | |
1390 | for (i = 1; i < maxofallports; i++) { | |
1391 | for (ndev = prefunit != -1 ? prefunit : 0; ndev < devmax; | |
1392 | ndev++) { | |
1393 | struct ipath_devdata *dd = ipath_lookup(ndev); | |
1394 | ||
1395 | if (!usable(dd)) | |
1396 | continue; /* can't use this unit */ | |
1397 | if (i >= dd->ipath_cfgports) | |
1398 | /* | |
1399 | * Maxed out on users of this unit. Try | |
1400 | * next. | |
1401 | */ | |
1402 | continue; | |
1403 | ret = try_alloc_port(dd, i, fp); | |
1404 | if (!ret) | |
1405 | goto done; | |
1406 | } | |
1407 | } | |
1408 | ||
1409 | if (npresent) { | |
1410 | if (nup == 0) { | |
1411 | ret = -ENETDOWN; | |
1412 | ipath_dbg("No ports available (none initialized " | |
1413 | "and ready)\n"); | |
1414 | } else { | |
1415 | if (prefunit > 0) { | |
1416 | /* if started above 0, retry from 0 */ | |
1417 | ipath_cdbg(PROC, | |
1418 | "%s[%u] no ports on prefunit " | |
1419 | "%d, clear and re-check\n", | |
1420 | current->comm, current->pid, | |
1421 | prefunit); | |
1422 | devmax = ipath_count_units(NULL, NULL, | |
1423 | NULL); | |
1424 | prefunit = -1; | |
1425 | goto recheck; | |
1426 | } | |
1427 | ret = -EBUSY; | |
1428 | ipath_dbg("No ports available\n"); | |
1429 | } | |
1430 | } else { | |
1431 | ret = -ENXIO; | |
1432 | ipath_dbg("No boards found\n"); | |
1433 | } | |
1434 | ||
1435 | done: | |
1436 | return ret; | |
1437 | } | |
1438 | ||
1439 | static int ipath_open(struct inode *in, struct file *fp) | |
1440 | { | |
1441 | int ret, minor; | |
1442 | ||
1443 | mutex_lock(&ipath_mutex); | |
1444 | ||
1445 | minor = iminor(in); | |
1446 | ipath_cdbg(VERBOSE, "open on dev %lx (minor %d)\n", | |
1447 | (long)in->i_rdev, minor); | |
1448 | ||
1449 | if (minor) | |
1450 | ret = find_free_port(minor - 1, fp); | |
1451 | else | |
1452 | ret = find_best_unit(fp); | |
1453 | ||
1454 | mutex_unlock(&ipath_mutex); | |
1455 | return ret; | |
1456 | } | |
1457 | ||
1458 | /** | |
1459 | * unlock_exptid - unlock any expected TID entries port still had in use | |
1460 | * @pd: port | |
1461 | * | |
1462 | * We don't actually update the chip here, because we do a bulk update | |
1463 | * below, using ipath_f_clear_tids. | |
1464 | */ | |
1465 | static void unlock_expected_tids(struct ipath_portdata *pd) | |
1466 | { | |
1467 | struct ipath_devdata *dd = pd->port_dd; | |
1468 | int port_tidbase = pd->port_port * dd->ipath_rcvtidcnt; | |
1469 | int i, cnt = 0, maxtid = port_tidbase + dd->ipath_rcvtidcnt; | |
1470 | ||
1471 | ipath_cdbg(VERBOSE, "Port %u unlocking any locked expTID pages\n", | |
1472 | pd->port_port); | |
1473 | for (i = port_tidbase; i < maxtid; i++) { | |
1474 | if (!dd->ipath_pageshadow[i]) | |
1475 | continue; | |
1476 | ||
1477 | ipath_release_user_pages_on_close(&dd->ipath_pageshadow[i], | |
1478 | 1); | |
1479 | dd->ipath_pageshadow[i] = NULL; | |
1480 | cnt++; | |
1481 | ipath_stats.sps_pageunlocks++; | |
1482 | } | |
1483 | if (cnt) | |
1484 | ipath_cdbg(VERBOSE, "Port %u locked %u expTID entries\n", | |
1485 | pd->port_port, cnt); | |
1486 | ||
1487 | if (ipath_stats.sps_pagelocks || ipath_stats.sps_pageunlocks) | |
1488 | ipath_cdbg(VERBOSE, "%llu pages locked, %llu unlocked\n", | |
1489 | (unsigned long long) ipath_stats.sps_pagelocks, | |
1490 | (unsigned long long) | |
1491 | ipath_stats.sps_pageunlocks); | |
1492 | } | |
1493 | ||
1494 | static int ipath_close(struct inode *in, struct file *fp) | |
1495 | { | |
1496 | int ret = 0; | |
1497 | struct ipath_portdata *pd; | |
1498 | struct ipath_devdata *dd; | |
1499 | unsigned port; | |
1500 | ||
1501 | ipath_cdbg(VERBOSE, "close on dev %lx, private data %p\n", | |
1502 | (long)in->i_rdev, fp->private_data); | |
1503 | ||
1504 | mutex_lock(&ipath_mutex); | |
1505 | ||
1506 | pd = port_fp(fp); | |
1507 | port = pd->port_port; | |
1508 | fp->private_data = NULL; | |
1509 | dd = pd->port_dd; | |
1510 | ||
1511 | if (pd->port_hdrqfull) { | |
1512 | ipath_cdbg(PROC, "%s[%u] had %u rcvhdrqfull errors " | |
1513 | "during run\n", pd->port_comm, pd->port_pid, | |
1514 | pd->port_hdrqfull); | |
1515 | pd->port_hdrqfull = 0; | |
1516 | } | |
1517 | ||
1518 | if (pd->port_rcvwait_to || pd->port_piowait_to | |
1519 | || pd->port_rcvnowait || pd->port_pionowait) { | |
1520 | ipath_cdbg(VERBOSE, "port%u, %u rcv, %u pio wait timeo; " | |
1521 | "%u rcv %u, pio already\n", | |
1522 | pd->port_port, pd->port_rcvwait_to, | |
1523 | pd->port_piowait_to, pd->port_rcvnowait, | |
1524 | pd->port_pionowait); | |
1525 | pd->port_rcvwait_to = pd->port_piowait_to = | |
1526 | pd->port_rcvnowait = pd->port_pionowait = 0; | |
1527 | } | |
1528 | if (pd->port_flag) { | |
1529 | ipath_dbg("port %u port_flag still set to 0x%lx\n", | |
1530 | pd->port_port, pd->port_flag); | |
1531 | pd->port_flag = 0; | |
1532 | } | |
1533 | ||
1534 | if (dd->ipath_kregbase) { | |
1535 | if (pd->port_rcvhdrtail_uaddr) { | |
1536 | pd->port_rcvhdrtail_uaddr = 0; | |
1537 | pd->port_rcvhdrtail_kvaddr = NULL; | |
1538 | ipath_release_user_pages_on_close( | |
1539 | &pd->port_rcvhdrtail_pagep, 1); | |
1540 | pd->port_rcvhdrtail_pagep = NULL; | |
1541 | ipath_stats.sps_pageunlocks++; | |
1542 | } | |
1543 | ipath_write_kreg_port( | |
1544 | dd, dd->ipath_kregs->kr_rcvhdrtailaddr, | |
1545 | port, 0ULL); | |
1546 | ipath_write_kreg_port( | |
1547 | dd, dd->ipath_kregs->kr_rcvhdraddr, | |
1548 | pd->port_port, 0); | |
1549 | ||
1550 | /* clean up the pkeys for this port user */ | |
1551 | ipath_clean_part_key(pd, dd); | |
1552 | ||
1553 | if (port < dd->ipath_cfgports) { | |
1554 | int i = dd->ipath_pbufsport * (port - 1); | |
1555 | ipath_disarm_piobufs(dd, i, dd->ipath_pbufsport); | |
1556 | ||
1557 | /* atomically clear receive enable port. */ | |
1558 | clear_bit(INFINIPATH_R_PORTENABLE_SHIFT + port, | |
1559 | &dd->ipath_rcvctrl); | |
1560 | ipath_write_kreg( | |
1561 | dd, | |
1562 | dd->ipath_kregs->kr_rcvctrl, | |
1563 | dd->ipath_rcvctrl); | |
1564 | ||
1565 | if (dd->ipath_pageshadow) | |
1566 | unlock_expected_tids(pd); | |
1567 | ipath_stats.sps_ports--; | |
1568 | ipath_cdbg(PROC, "%s[%u] closed port %u:%u\n", | |
1569 | pd->port_comm, pd->port_pid, | |
1570 | dd->ipath_unit, port); | |
1571 | } | |
1572 | } | |
1573 | ||
1574 | pd->port_cnt = 0; | |
1575 | pd->port_pid = 0; | |
1576 | ||
1577 | dd->ipath_f_clear_tids(dd, pd->port_port); | |
1578 | ||
1579 | ipath_free_pddata(dd, pd->port_port, 0); | |
1580 | ||
1581 | mutex_unlock(&ipath_mutex); | |
1582 | ||
1583 | return ret; | |
1584 | } | |
1585 | ||
1586 | static int ipath_port_info(struct ipath_portdata *pd, | |
1587 | struct ipath_port_info __user *uinfo) | |
1588 | { | |
1589 | struct ipath_port_info info; | |
1590 | int nup; | |
1591 | int ret; | |
1592 | ||
1593 | (void) ipath_count_units(NULL, &nup, NULL); | |
1594 | info.num_active = nup; | |
1595 | info.unit = pd->port_dd->ipath_unit; | |
1596 | info.port = pd->port_port; | |
1597 | ||
1598 | if (copy_to_user(uinfo, &info, sizeof(info))) { | |
1599 | ret = -EFAULT; | |
1600 | goto bail; | |
1601 | } | |
1602 | ret = 0; | |
1603 | ||
1604 | bail: | |
1605 | return ret; | |
1606 | } | |
1607 | ||
1608 | static ssize_t ipath_write(struct file *fp, const char __user *data, | |
1609 | size_t count, loff_t *off) | |
1610 | { | |
1611 | const struct ipath_cmd __user *ucmd; | |
1612 | struct ipath_portdata *pd; | |
1613 | const void __user *src; | |
1614 | size_t consumed, copy; | |
1615 | struct ipath_cmd cmd; | |
1616 | ssize_t ret = 0; | |
1617 | void *dest; | |
1618 | ||
1619 | if (count < sizeof(cmd.type)) { | |
1620 | ret = -EINVAL; | |
1621 | goto bail; | |
1622 | } | |
1623 | ||
1624 | ucmd = (const struct ipath_cmd __user *) data; | |
1625 | ||
1626 | if (copy_from_user(&cmd.type, &ucmd->type, sizeof(cmd.type))) { | |
1627 | ret = -EFAULT; | |
1628 | goto bail; | |
1629 | } | |
1630 | ||
1631 | consumed = sizeof(cmd.type); | |
1632 | ||
1633 | switch (cmd.type) { | |
1634 | case IPATH_CMD_USER_INIT: | |
1635 | copy = sizeof(cmd.cmd.user_info); | |
1636 | dest = &cmd.cmd.user_info; | |
1637 | src = &ucmd->cmd.user_info; | |
1638 | break; | |
1639 | case IPATH_CMD_RECV_CTRL: | |
1640 | copy = sizeof(cmd.cmd.recv_ctrl); | |
1641 | dest = &cmd.cmd.recv_ctrl; | |
1642 | src = &ucmd->cmd.recv_ctrl; | |
1643 | break; | |
1644 | case IPATH_CMD_PORT_INFO: | |
1645 | copy = sizeof(cmd.cmd.port_info); | |
1646 | dest = &cmd.cmd.port_info; | |
1647 | src = &ucmd->cmd.port_info; | |
1648 | break; | |
1649 | case IPATH_CMD_TID_UPDATE: | |
1650 | case IPATH_CMD_TID_FREE: | |
1651 | copy = sizeof(cmd.cmd.tid_info); | |
1652 | dest = &cmd.cmd.tid_info; | |
1653 | src = &ucmd->cmd.tid_info; | |
1654 | break; | |
1655 | case IPATH_CMD_SET_PART_KEY: | |
1656 | copy = sizeof(cmd.cmd.part_key); | |
1657 | dest = &cmd.cmd.part_key; | |
1658 | src = &ucmd->cmd.part_key; | |
1659 | break; | |
1660 | default: | |
1661 | ret = -EINVAL; | |
1662 | goto bail; | |
1663 | } | |
1664 | ||
1665 | if ((count - consumed) < copy) { | |
1666 | ret = -EINVAL; | |
1667 | goto bail; | |
1668 | } | |
1669 | ||
1670 | if (copy_from_user(dest, src, copy)) { | |
1671 | ret = -EFAULT; | |
1672 | goto bail; | |
1673 | } | |
1674 | ||
1675 | consumed += copy; | |
1676 | pd = port_fp(fp); | |
1677 | ||
1678 | switch (cmd.type) { | |
1679 | case IPATH_CMD_USER_INIT: | |
1680 | ret = ipath_do_user_init(pd, &cmd.cmd.user_info); | |
1681 | if (ret < 0) | |
1682 | goto bail; | |
1683 | ret = ipath_get_base_info( | |
1684 | pd, (void __user *) (unsigned long) | |
1685 | cmd.cmd.user_info.spu_base_info, | |
1686 | cmd.cmd.user_info.spu_base_info_size); | |
1687 | break; | |
1688 | case IPATH_CMD_RECV_CTRL: | |
1689 | ret = ipath_manage_rcvq(pd, cmd.cmd.recv_ctrl); | |
1690 | break; | |
1691 | case IPATH_CMD_PORT_INFO: | |
1692 | ret = ipath_port_info(pd, | |
1693 | (struct ipath_port_info __user *) | |
1694 | (unsigned long) cmd.cmd.port_info); | |
1695 | break; | |
1696 | case IPATH_CMD_TID_UPDATE: | |
1697 | ret = ipath_tid_update(pd, &cmd.cmd.tid_info); | |
1698 | break; | |
1699 | case IPATH_CMD_TID_FREE: | |
1700 | ret = ipath_tid_free(pd, &cmd.cmd.tid_info); | |
1701 | break; | |
1702 | case IPATH_CMD_SET_PART_KEY: | |
1703 | ret = ipath_set_part_key(pd, cmd.cmd.part_key); | |
1704 | break; | |
1705 | } | |
1706 | ||
1707 | if (ret >= 0) | |
1708 | ret = consumed; | |
1709 | ||
1710 | bail: | |
1711 | return ret; | |
1712 | } | |
1713 | ||
1714 | static struct class *ipath_class; | |
1715 | ||
1716 | static int init_cdev(int minor, char *name, struct file_operations *fops, | |
1717 | struct cdev **cdevp, struct class_device **class_devp) | |
1718 | { | |
1719 | const dev_t dev = MKDEV(IPATH_MAJOR, minor); | |
1720 | struct cdev *cdev = NULL; | |
1721 | struct class_device *class_dev = NULL; | |
1722 | int ret; | |
1723 | ||
1724 | cdev = cdev_alloc(); | |
1725 | if (!cdev) { | |
1726 | printk(KERN_ERR IPATH_DRV_NAME | |
1727 | ": Could not allocate cdev for minor %d, %s\n", | |
1728 | minor, name); | |
1729 | ret = -ENOMEM; | |
1730 | goto done; | |
1731 | } | |
1732 | ||
1733 | cdev->owner = THIS_MODULE; | |
1734 | cdev->ops = fops; | |
1735 | kobject_set_name(&cdev->kobj, name); | |
1736 | ||
1737 | ret = cdev_add(cdev, dev, 1); | |
1738 | if (ret < 0) { | |
1739 | printk(KERN_ERR IPATH_DRV_NAME | |
1740 | ": Could not add cdev for minor %d, %s (err %d)\n", | |
1741 | minor, name, -ret); | |
1742 | goto err_cdev; | |
1743 | } | |
1744 | ||
1745 | class_dev = class_device_create(ipath_class, NULL, dev, NULL, name); | |
1746 | ||
1747 | if (IS_ERR(class_dev)) { | |
1748 | ret = PTR_ERR(class_dev); | |
1749 | printk(KERN_ERR IPATH_DRV_NAME ": Could not create " | |
1750 | "class_dev for minor %d, %s (err %d)\n", | |
1751 | minor, name, -ret); | |
1752 | goto err_cdev; | |
1753 | } | |
1754 | ||
1755 | goto done; | |
1756 | ||
1757 | err_cdev: | |
1758 | cdev_del(cdev); | |
1759 | cdev = NULL; | |
1760 | ||
1761 | done: | |
1762 | if (ret >= 0) { | |
1763 | *cdevp = cdev; | |
1764 | *class_devp = class_dev; | |
1765 | } else { | |
1766 | *cdevp = NULL; | |
1767 | *class_devp = NULL; | |
1768 | } | |
1769 | ||
1770 | return ret; | |
1771 | } | |
1772 | ||
1773 | int ipath_cdev_init(int minor, char *name, struct file_operations *fops, | |
1774 | struct cdev **cdevp, struct class_device **class_devp) | |
1775 | { | |
1776 | return init_cdev(minor, name, fops, cdevp, class_devp); | |
1777 | } | |
1778 | ||
1779 | static void cleanup_cdev(struct cdev **cdevp, | |
1780 | struct class_device **class_devp) | |
1781 | { | |
1782 | struct class_device *class_dev = *class_devp; | |
1783 | ||
1784 | if (class_dev) { | |
1785 | class_device_unregister(class_dev); | |
1786 | *class_devp = NULL; | |
1787 | } | |
1788 | ||
1789 | if (*cdevp) { | |
1790 | cdev_del(*cdevp); | |
1791 | *cdevp = NULL; | |
1792 | } | |
1793 | } | |
1794 | ||
1795 | void ipath_cdev_cleanup(struct cdev **cdevp, | |
1796 | struct class_device **class_devp) | |
1797 | { | |
1798 | cleanup_cdev(cdevp, class_devp); | |
1799 | } | |
1800 | ||
1801 | static struct cdev *wildcard_cdev; | |
1802 | static struct class_device *wildcard_class_dev; | |
1803 | ||
1804 | static const dev_t dev = MKDEV(IPATH_MAJOR, 0); | |
1805 | ||
1806 | static int user_init(void) | |
1807 | { | |
1808 | int ret; | |
1809 | ||
1810 | ret = register_chrdev_region(dev, IPATH_NMINORS, IPATH_DRV_NAME); | |
1811 | if (ret < 0) { | |
1812 | printk(KERN_ERR IPATH_DRV_NAME ": Could not register " | |
1813 | "chrdev region (err %d)\n", -ret); | |
1814 | goto done; | |
1815 | } | |
1816 | ||
1817 | ipath_class = class_create(THIS_MODULE, IPATH_DRV_NAME); | |
1818 | ||
1819 | if (IS_ERR(ipath_class)) { | |
1820 | ret = PTR_ERR(ipath_class); | |
1821 | printk(KERN_ERR IPATH_DRV_NAME ": Could not create " | |
1822 | "device class (err %d)\n", -ret); | |
1823 | goto bail; | |
1824 | } | |
1825 | ||
1826 | goto done; | |
1827 | bail: | |
1828 | unregister_chrdev_region(dev, IPATH_NMINORS); | |
1829 | done: | |
1830 | return ret; | |
1831 | } | |
1832 | ||
1833 | static void user_cleanup(void) | |
1834 | { | |
1835 | if (ipath_class) { | |
1836 | class_destroy(ipath_class); | |
1837 | ipath_class = NULL; | |
1838 | } | |
1839 | ||
1840 | unregister_chrdev_region(dev, IPATH_NMINORS); | |
1841 | } | |
1842 | ||
1843 | static atomic_t user_count = ATOMIC_INIT(0); | |
1844 | static atomic_t user_setup = ATOMIC_INIT(0); | |
1845 | ||
1846 | int ipath_user_add(struct ipath_devdata *dd) | |
1847 | { | |
1848 | char name[10]; | |
1849 | int ret; | |
1850 | ||
1851 | if (atomic_inc_return(&user_count) == 1) { | |
1852 | ret = user_init(); | |
1853 | if (ret < 0) { | |
1854 | ipath_dev_err(dd, "Unable to set up user support: " | |
1855 | "error %d\n", -ret); | |
1856 | goto bail; | |
1857 | } | |
1858 | ret = ipath_diag_init(); | |
1859 | if (ret < 0) { | |
1860 | ipath_dev_err(dd, "Unable to set up diag support: " | |
1861 | "error %d\n", -ret); | |
1862 | goto bail_sma; | |
1863 | } | |
1864 | ||
1865 | ret = init_cdev(0, "ipath", &ipath_file_ops, &wildcard_cdev, | |
1866 | &wildcard_class_dev); | |
1867 | if (ret < 0) { | |
1868 | ipath_dev_err(dd, "Could not create wildcard " | |
1869 | "minor: error %d\n", -ret); | |
1870 | goto bail_diag; | |
1871 | } | |
1872 | ||
1873 | atomic_set(&user_setup, 1); | |
1874 | } | |
1875 | ||
1876 | snprintf(name, sizeof(name), "ipath%d", dd->ipath_unit); | |
1877 | ||
1878 | ret = init_cdev(dd->ipath_unit + 1, name, &ipath_file_ops, | |
1879 | &dd->cdev, &dd->class_dev); | |
1880 | if (ret < 0) | |
1881 | ipath_dev_err(dd, "Could not create user minor %d, %s\n", | |
1882 | dd->ipath_unit + 1, name); | |
1883 | ||
1884 | goto bail; | |
1885 | ||
1886 | bail_diag: | |
1887 | ipath_diag_cleanup(); | |
1888 | bail_sma: | |
1889 | user_cleanup(); | |
1890 | bail: | |
1891 | return ret; | |
1892 | } | |
1893 | ||
1894 | void ipath_user_del(struct ipath_devdata *dd) | |
1895 | { | |
1896 | cleanup_cdev(&dd->cdev, &dd->class_dev); | |
1897 | ||
1898 | if (atomic_dec_return(&user_count) == 0) { | |
1899 | if (atomic_read(&user_setup) == 0) | |
1900 | goto bail; | |
1901 | ||
1902 | cleanup_cdev(&wildcard_cdev, &wildcard_class_dev); | |
1903 | ipath_diag_cleanup(); | |
1904 | user_cleanup(); | |
1905 | ||
1906 | atomic_set(&user_setup, 0); | |
1907 | } | |
1908 | bail: | |
1909 | return; | |
1910 | } |