IB/ipath: fix handling of kpiobufs
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / infiniband / hw / ipath / ipath_file_ops.c
CommitLineData
7f510b46 1/*
759d5768 2 * Copyright (c) 2006 QLogic, Inc. All rights reserved.
7f510b46
BS
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/pci.h>
35#include <linux/poll.h>
36#include <linux/cdev.h>
37#include <linux/swap.h>
38#include <linux/vmalloc.h>
39#include <asm/pgtable.h>
40
41#include "ipath_kernel.h"
7f510b46 42#include "ipath_layer.h"
27b678dd 43#include "ipath_common.h"
7f510b46
BS
44
45static int ipath_open(struct inode *, struct file *);
46static int ipath_close(struct inode *, struct file *);
47static ssize_t ipath_write(struct file *, const char __user *, size_t,
48 loff_t *);
49static unsigned int ipath_poll(struct file *, struct poll_table_struct *);
50static int ipath_mmap(struct file *, struct vm_area_struct *);
51
52static struct file_operations ipath_file_ops = {
53 .owner = THIS_MODULE,
54 .write = ipath_write,
55 .open = ipath_open,
56 .release = ipath_close,
57 .poll = ipath_poll,
58 .mmap = ipath_mmap
59};
60
61static int ipath_get_base_info(struct ipath_portdata *pd,
62 void __user *ubase, size_t ubase_size)
63{
64 int ret = 0;
65 struct ipath_base_info *kinfo = NULL;
66 struct ipath_devdata *dd = pd->port_dd;
67
68 if (ubase_size < sizeof(*kinfo)) {
69 ipath_cdbg(PROC,
70 "Base size %lu, need %lu (version mismatch?)\n",
71 (unsigned long) ubase_size,
72 (unsigned long) sizeof(*kinfo));
73 ret = -EINVAL;
74 goto bail;
75 }
76
77 kinfo = kzalloc(sizeof(*kinfo), GFP_KERNEL);
78 if (kinfo == NULL) {
79 ret = -ENOMEM;
80 goto bail;
81 }
82
83 ret = dd->ipath_f_get_base_info(pd, kinfo);
84 if (ret < 0)
85 goto bail;
86
87 kinfo->spi_rcvhdr_cnt = dd->ipath_rcvhdrcnt;
88 kinfo->spi_rcvhdrent_size = dd->ipath_rcvhdrentsize;
89 kinfo->spi_tidegrcnt = dd->ipath_rcvegrcnt;
90 kinfo->spi_rcv_egrbufsize = dd->ipath_rcvegrbufsize;
91 /*
92 * have to mmap whole thing
93 */
94 kinfo->spi_rcv_egrbuftotlen =
95 pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size;
96 kinfo->spi_rcv_egrperchunk = pd->port_rcvegrbufs_perchunk;
97 kinfo->spi_rcv_egrchunksize = kinfo->spi_rcv_egrbuftotlen /
98 pd->port_rcvegrbuf_chunks;
99 kinfo->spi_tidcnt = dd->ipath_rcvtidcnt;
100 /*
101 * for this use, may be ipath_cfgports summed over all chips that
102 * are are configured and present
103 */
104 kinfo->spi_nports = dd->ipath_cfgports;
105 /* unit (chip/board) our port is on */
106 kinfo->spi_unit = dd->ipath_unit;
107 /* for now, only a single page */
108 kinfo->spi_tid_maxsize = PAGE_SIZE;
109
110 /*
111 * Doing this per port, and based on the skip value, etc. This has
112 * to be the actual buffer size, since the protocol code treats it
113 * as an array.
114 *
115 * These have to be set to user addresses in the user code via mmap.
116 * These values are used on return to user code for the mmap target
117 * addresses only. For 32 bit, same 44 bit address problem, so use
118 * the physical address, not virtual. Before 2.6.11, using the
119 * page_address() macro worked, but in 2.6.11, even that returns the
120 * full 64 bit address (upper bits all 1's). So far, using the
121 * physical addresses (or chip offsets, for chip mapping) works, but
122 * no doubt some future kernel release will chang that, and we'll be
123 * on to yet another method of dealing with this
124 */
125 kinfo->spi_rcvhdr_base = (u64) pd->port_rcvhdrq_phys;
f37bda92 126 kinfo->spi_rcvhdr_tailaddr = (u64)pd->port_rcvhdrqtailaddr_phys;
7f510b46
BS
127 kinfo->spi_rcv_egrbufs = (u64) pd->port_rcvegr_phys;
128 kinfo->spi_pioavailaddr = (u64) dd->ipath_pioavailregs_phys;
129 kinfo->spi_status = (u64) kinfo->spi_pioavailaddr +
130 (void *) dd->ipath_statusp -
131 (void *) dd->ipath_pioavailregs_dma;
132 kinfo->spi_piobufbase = (u64) pd->port_piobufs;
133 kinfo->__spi_uregbase =
134 dd->ipath_uregbase + dd->ipath_palign * pd->port_port;
135
136 kinfo->spi_pioindex = dd->ipath_pbufsport * (pd->port_port - 1);
137 kinfo->spi_piocnt = dd->ipath_pbufsport;
138 kinfo->spi_pioalign = dd->ipath_palign;
139
140 kinfo->spi_qpair = IPATH_KD_QP;
141 kinfo->spi_piosize = dd->ipath_ibmaxlen;
142 kinfo->spi_mtu = dd->ipath_ibmaxlen; /* maxlen, not ibmtu */
143 kinfo->spi_port = pd->port_port;
eaf6733b 144 kinfo->spi_sw_version = IPATH_KERN_SWVERSION;
7f510b46
BS
145 kinfo->spi_hw_version = dd->ipath_revision;
146
147 if (copy_to_user(ubase, kinfo, sizeof(*kinfo)))
148 ret = -EFAULT;
149
150bail:
151 kfree(kinfo);
152 return ret;
153}
154
155/**
156 * ipath_tid_update - update a port TID
157 * @pd: the port
158 * @ti: the TID information
159 *
160 * The new implementation as of Oct 2004 is that the driver assigns
161 * the tid and returns it to the caller. To make it easier to
162 * catch bugs, and to reduce search time, we keep a cursor for
163 * each port, walking the shadow tid array to find one that's not
164 * in use.
165 *
166 * For now, if we can't allocate the full list, we fail, although
167 * in the long run, we'll allocate as many as we can, and the
168 * caller will deal with that by trying the remaining pages later.
169 * That means that when we fail, we have to mark the tids as not in
170 * use again, in our shadow copy.
171 *
172 * It's up to the caller to free the tids when they are done.
173 * We'll unlock the pages as they free them.
174 *
175 * Also, right now we are locking one page at a time, but since
176 * the intended use of this routine is for a single group of
177 * virtually contiguous pages, that should change to improve
178 * performance.
179 */
180static int ipath_tid_update(struct ipath_portdata *pd,
181 const struct ipath_tid_info *ti)
182{
183 int ret = 0, ntids;
184 u32 tid, porttid, cnt, i, tidcnt;
185 u16 *tidlist;
186 struct ipath_devdata *dd = pd->port_dd;
187 u64 physaddr;
188 unsigned long vaddr;
189 u64 __iomem *tidbase;
190 unsigned long tidmap[8];
191 struct page **pagep = NULL;
192
193 if (!dd->ipath_pageshadow) {
194 ret = -ENOMEM;
195 goto done;
196 }
197
198 cnt = ti->tidcnt;
199 if (!cnt) {
200 ipath_dbg("After copyin, tidcnt 0, tidlist %llx\n",
201 (unsigned long long) ti->tidlist);
202 /*
203 * Should we treat as success? likely a bug
204 */
205 ret = -EFAULT;
206 goto done;
207 }
208 tidcnt = dd->ipath_rcvtidcnt;
209 if (cnt >= tidcnt) {
210 /* make sure it all fits in port_tid_pg_list */
211 dev_info(&dd->pcidev->dev, "Process tried to allocate %u "
212 "TIDs, only trying max (%u)\n", cnt, tidcnt);
213 cnt = tidcnt;
214 }
215 pagep = (struct page **)pd->port_tid_pg_list;
216 tidlist = (u16 *) (&pagep[cnt]);
217
218 memset(tidmap, 0, sizeof(tidmap));
219 tid = pd->port_tidcursor;
220 /* before decrement; chip actual # */
221 porttid = pd->port_port * tidcnt;
222 ntids = tidcnt;
223 tidbase = (u64 __iomem *) (((char __iomem *) dd->ipath_kregbase) +
224 dd->ipath_rcvtidbase +
225 porttid * sizeof(*tidbase));
226
227 ipath_cdbg(VERBOSE, "Port%u %u tids, cursor %u, tidbase %p\n",
228 pd->port_port, cnt, tid, tidbase);
229
230 /* virtual address of first page in transfer */
231 vaddr = ti->tidvaddr;
232 if (!access_ok(VERIFY_WRITE, (void __user *) vaddr,
233 cnt * PAGE_SIZE)) {
234 ipath_dbg("Fail vaddr %p, %u pages, !access_ok\n",
235 (void *)vaddr, cnt);
236 ret = -EFAULT;
237 goto done;
238 }
239 ret = ipath_get_user_pages(vaddr, cnt, pagep);
240 if (ret) {
241 if (ret == -EBUSY) {
242 ipath_dbg("Failed to lock addr %p, %u pages "
243 "(already locked)\n",
244 (void *) vaddr, cnt);
245 /*
246 * for now, continue, and see what happens but with
247 * the new implementation, this should never happen,
248 * unless perhaps the user has mpin'ed the pages
249 * themselves (something we need to test)
250 */
251 ret = 0;
252 } else {
253 dev_info(&dd->pcidev->dev,
254 "Failed to lock addr %p, %u pages: "
255 "errno %d\n", (void *) vaddr, cnt, -ret);
256 goto done;
257 }
258 }
259 for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) {
260 for (; ntids--; tid++) {
261 if (tid == tidcnt)
262 tid = 0;
263 if (!dd->ipath_pageshadow[porttid + tid])
264 break;
265 }
266 if (ntids < 0) {
267 /*
268 * oops, wrapped all the way through their TIDs,
269 * and didn't have enough free; see comments at
270 * start of routine
271 */
272 ipath_dbg("Not enough free TIDs for %u pages "
273 "(index %d), failing\n", cnt, i);
274 i--; /* last tidlist[i] not filled in */
275 ret = -ENOMEM;
276 break;
277 }
278 tidlist[i] = tid;
279 ipath_cdbg(VERBOSE, "Updating idx %u to TID %u, "
280 "vaddr %lx\n", i, tid, vaddr);
281 /* we "know" system pages and TID pages are same size */
282 dd->ipath_pageshadow[porttid + tid] = pagep[i];
283 /*
284 * don't need atomic or it's overhead
285 */
286 __set_bit(tid, tidmap);
287 physaddr = page_to_phys(pagep[i]);
288 ipath_stats.sps_pagelocks++;
289 ipath_cdbg(VERBOSE,
290 "TID %u, vaddr %lx, physaddr %llx pgp %p\n",
291 tid, vaddr, (unsigned long long) physaddr,
292 pagep[i]);
293 dd->ipath_f_put_tid(dd, &tidbase[tid], 1, physaddr);
294 /*
295 * don't check this tid in ipath_portshadow, since we
296 * just filled it in; start with the next one.
297 */
298 tid++;
299 }
300
301 if (ret) {
302 u32 limit;
303 cleanup:
304 /* jump here if copy out of updated info failed... */
305 ipath_dbg("After failure (ret=%d), undo %d of %d entries\n",
306 -ret, i, cnt);
307 /* same code that's in ipath_free_tid() */
308 limit = sizeof(tidmap) * BITS_PER_BYTE;
309 if (limit > tidcnt)
310 /* just in case size changes in future */
311 limit = tidcnt;
312 tid = find_first_bit((const unsigned long *)tidmap, limit);
313 for (; tid < limit; tid++) {
314 if (!test_bit(tid, tidmap))
315 continue;
316 if (dd->ipath_pageshadow[porttid + tid]) {
317 ipath_cdbg(VERBOSE, "Freeing TID %u\n",
318 tid);
319 dd->ipath_f_put_tid(dd, &tidbase[tid], 1,
320 dd->ipath_tidinvalid);
321 dd->ipath_pageshadow[porttid + tid] = NULL;
322 ipath_stats.sps_pageunlocks++;
323 }
324 }
325 ipath_release_user_pages(pagep, cnt);
326 } else {
327 /*
328 * Copy the updated array, with ipath_tid's filled in, back
329 * to user. Since we did the copy in already, this "should
330 * never fail" If it does, we have to clean up...
331 */
332 if (copy_to_user((void __user *)
333 (unsigned long) ti->tidlist,
334 tidlist, cnt * sizeof(*tidlist))) {
335 ret = -EFAULT;
336 goto cleanup;
337 }
338 if (copy_to_user((void __user *) (unsigned long) ti->tidmap,
339 tidmap, sizeof tidmap)) {
340 ret = -EFAULT;
341 goto cleanup;
342 }
343 if (tid == tidcnt)
344 tid = 0;
345 pd->port_tidcursor = tid;
346 }
347
348done:
349 if (ret)
350 ipath_dbg("Failed to map %u TID pages, failing with %d\n",
351 ti->tidcnt, -ret);
352 return ret;
353}
354
355/**
356 * ipath_tid_free - free a port TID
357 * @pd: the port
358 * @ti: the TID info
359 *
360 * right now we are unlocking one page at a time, but since
361 * the intended use of this routine is for a single group of
362 * virtually contiguous pages, that should change to improve
363 * performance. We check that the TID is in range for this port
364 * but otherwise don't check validity; if user has an error and
365 * frees the wrong tid, it's only their own data that can thereby
366 * be corrupted. We do check that the TID was in use, for sanity
367 * We always use our idea of the saved address, not the address that
368 * they pass in to us.
369 */
370
371static int ipath_tid_free(struct ipath_portdata *pd,
372 const struct ipath_tid_info *ti)
373{
374 int ret = 0;
375 u32 tid, porttid, cnt, limit, tidcnt;
376 struct ipath_devdata *dd = pd->port_dd;
377 u64 __iomem *tidbase;
378 unsigned long tidmap[8];
379
380 if (!dd->ipath_pageshadow) {
381 ret = -ENOMEM;
382 goto done;
383 }
384
385 if (copy_from_user(tidmap, (void __user *)(unsigned long)ti->tidmap,
386 sizeof tidmap)) {
387 ret = -EFAULT;
388 goto done;
389 }
390
391 porttid = pd->port_port * dd->ipath_rcvtidcnt;
392 tidbase = (u64 __iomem *) ((char __iomem *)(dd->ipath_kregbase) +
393 dd->ipath_rcvtidbase +
394 porttid * sizeof(*tidbase));
395
396 tidcnt = dd->ipath_rcvtidcnt;
397 limit = sizeof(tidmap) * BITS_PER_BYTE;
398 if (limit > tidcnt)
399 /* just in case size changes in future */
400 limit = tidcnt;
401 tid = find_first_bit(tidmap, limit);
402 ipath_cdbg(VERBOSE, "Port%u free %u tids; first bit (max=%d) "
403 "set is %d, porttid %u\n", pd->port_port, ti->tidcnt,
404 limit, tid, porttid);
405 for (cnt = 0; tid < limit; tid++) {
406 /*
407 * small optimization; if we detect a run of 3 or so without
408 * any set, use find_first_bit again. That's mainly to
409 * accelerate the case where we wrapped, so we have some at
410 * the beginning, and some at the end, and a big gap
411 * in the middle.
412 */
413 if (!test_bit(tid, tidmap))
414 continue;
415 cnt++;
416 if (dd->ipath_pageshadow[porttid + tid]) {
417 ipath_cdbg(VERBOSE, "PID %u freeing TID %u\n",
418 pd->port_pid, tid);
419 dd->ipath_f_put_tid(dd, &tidbase[tid], 1,
420 dd->ipath_tidinvalid);
421 ipath_release_user_pages(
422 &dd->ipath_pageshadow[porttid + tid], 1);
423 dd->ipath_pageshadow[porttid + tid] = NULL;
424 ipath_stats.sps_pageunlocks++;
425 } else
426 ipath_dbg("Unused tid %u, ignoring\n", tid);
427 }
428 if (cnt != ti->tidcnt)
429 ipath_dbg("passed in tidcnt %d, only %d bits set in map\n",
430 ti->tidcnt, cnt);
431done:
432 if (ret)
433 ipath_dbg("Failed to unmap %u TID pages, failing with %d\n",
434 ti->tidcnt, -ret);
435 return ret;
436}
437
438/**
439 * ipath_set_part_key - set a partition key
440 * @pd: the port
441 * @key: the key
442 *
443 * We can have up to 4 active at a time (other than the default, which is
444 * always allowed). This is somewhat tricky, since multiple ports may set
445 * the same key, so we reference count them, and clean up at exit. All 4
446 * partition keys are packed into a single infinipath register. It's an
447 * error for a process to set the same pkey multiple times. We provide no
448 * mechanism to de-allocate a pkey at this time, we may eventually need to
449 * do that. I've used the atomic operations, and no locking, and only make
450 * a single pass through what's available. This should be more than
451 * adequate for some time. I'll think about spinlocks or the like if and as
452 * it's necessary.
453 */
454static int ipath_set_part_key(struct ipath_portdata *pd, u16 key)
455{
456 struct ipath_devdata *dd = pd->port_dd;
457 int i, any = 0, pidx = -1;
458 u16 lkey = key & 0x7FFF;
459 int ret;
460
27b678dd 461 if (lkey == (IPATH_DEFAULT_P_KEY & 0x7FFF)) {
7f510b46
BS
462 /* nothing to do; this key always valid */
463 ret = 0;
464 goto bail;
465 }
466
467 ipath_cdbg(VERBOSE, "p%u try to set pkey %hx, current keys "
468 "%hx:%x %hx:%x %hx:%x %hx:%x\n",
469 pd->port_port, key, dd->ipath_pkeys[0],
470 atomic_read(&dd->ipath_pkeyrefs[0]), dd->ipath_pkeys[1],
471 atomic_read(&dd->ipath_pkeyrefs[1]), dd->ipath_pkeys[2],
472 atomic_read(&dd->ipath_pkeyrefs[2]), dd->ipath_pkeys[3],
473 atomic_read(&dd->ipath_pkeyrefs[3]));
474
475 if (!lkey) {
476 ipath_cdbg(PROC, "p%u tries to set key 0, not allowed\n",
477 pd->port_port);
478 ret = -EINVAL;
479 goto bail;
480 }
481
482 /*
483 * Set the full membership bit, because it has to be
484 * set in the register or the packet, and it seems
485 * cleaner to set in the register than to force all
486 * callers to set it. (see bug 4331)
487 */
488 key |= 0x8000;
489
490 for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {
491 if (!pd->port_pkeys[i] && pidx == -1)
492 pidx = i;
493 if (pd->port_pkeys[i] == key) {
494 ipath_cdbg(VERBOSE, "p%u tries to set same pkey "
495 "(%x) more than once\n",
496 pd->port_port, key);
497 ret = -EEXIST;
498 goto bail;
499 }
500 }
501 if (pidx == -1) {
502 ipath_dbg("All pkeys for port %u already in use, "
503 "can't set %x\n", pd->port_port, key);
504 ret = -EBUSY;
505 goto bail;
506 }
507 for (any = i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
508 if (!dd->ipath_pkeys[i]) {
509 any++;
510 continue;
511 }
512 if (dd->ipath_pkeys[i] == key) {
513 atomic_t *pkrefs = &dd->ipath_pkeyrefs[i];
514
515 if (atomic_inc_return(pkrefs) > 1) {
516 pd->port_pkeys[pidx] = key;
517 ipath_cdbg(VERBOSE, "p%u set key %x "
518 "matches #%d, count now %d\n",
519 pd->port_port, key, i,
520 atomic_read(pkrefs));
521 ret = 0;
522 goto bail;
523 } else {
524 /*
525 * lost race, decrement count, catch below
526 */
527 atomic_dec(pkrefs);
528 ipath_cdbg(VERBOSE, "Lost race, count was "
529 "0, after dec, it's %d\n",
530 atomic_read(pkrefs));
531 any++;
532 }
533 }
534 if ((dd->ipath_pkeys[i] & 0x7FFF) == lkey) {
535 /*
536 * It makes no sense to have both the limited and
537 * full membership PKEY set at the same time since
538 * the unlimited one will disable the limited one.
539 */
540 ret = -EEXIST;
541 goto bail;
542 }
543 }
544 if (!any) {
545 ipath_dbg("port %u, all pkeys already in use, "
546 "can't set %x\n", pd->port_port, key);
547 ret = -EBUSY;
548 goto bail;
549 }
550 for (any = i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
551 if (!dd->ipath_pkeys[i] &&
552 atomic_inc_return(&dd->ipath_pkeyrefs[i]) == 1) {
553 u64 pkey;
554
555 /* for ipathstats, etc. */
556 ipath_stats.sps_pkeys[i] = lkey;
557 pd->port_pkeys[pidx] = dd->ipath_pkeys[i] = key;
558 pkey =
559 (u64) dd->ipath_pkeys[0] |
560 ((u64) dd->ipath_pkeys[1] << 16) |
561 ((u64) dd->ipath_pkeys[2] << 32) |
562 ((u64) dd->ipath_pkeys[3] << 48);
563 ipath_cdbg(PROC, "p%u set key %x in #%d, "
564 "portidx %d, new pkey reg %llx\n",
565 pd->port_port, key, i, pidx,
566 (unsigned long long) pkey);
567 ipath_write_kreg(
568 dd, dd->ipath_kregs->kr_partitionkey, pkey);
569
570 ret = 0;
571 goto bail;
572 }
573 }
574 ipath_dbg("port %u, all pkeys already in use 2nd pass, "
575 "can't set %x\n", pd->port_port, key);
576 ret = -EBUSY;
577
578bail:
579 return ret;
580}
581
582/**
583 * ipath_manage_rcvq - manage a port's receive queue
584 * @pd: the port
585 * @start_stop: action to carry out
586 *
587 * start_stop == 0 disables receive on the port, for use in queue
588 * overflow conditions. start_stop==1 re-enables, to be used to
589 * re-init the software copy of the head register
590 */
591static int ipath_manage_rcvq(struct ipath_portdata *pd, int start_stop)
592{
593 struct ipath_devdata *dd = pd->port_dd;
594 u64 tval;
595
596 ipath_cdbg(PROC, "%sabling rcv for unit %u port %u\n",
597 start_stop ? "en" : "dis", dd->ipath_unit,
598 pd->port_port);
599 /* atomically clear receive enable port. */
600 if (start_stop) {
601 /*
602 * On enable, force in-memory copy of the tail register to
603 * 0, so that protocol code doesn't have to worry about
604 * whether or not the chip has yet updated the in-memory
605 * copy or not on return from the system call. The chip
606 * always resets it's tail register back to 0 on a
607 * transition from disabled to enabled. This could cause a
608 * problem if software was broken, and did the enable w/o
609 * the disable, but eventually the in-memory copy will be
610 * updated and correct itself, even in the face of software
611 * bugs.
612 */
613 *pd->port_rcvhdrtail_kvaddr = 0;
614 set_bit(INFINIPATH_R_PORTENABLE_SHIFT + pd->port_port,
615 &dd->ipath_rcvctrl);
616 } else
617 clear_bit(INFINIPATH_R_PORTENABLE_SHIFT + pd->port_port,
618 &dd->ipath_rcvctrl);
619 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
620 dd->ipath_rcvctrl);
621 /* now be sure chip saw it before we return */
622 tval = ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
623 if (start_stop) {
624 /*
625 * And try to be sure that tail reg update has happened too.
626 * This should in theory interlock with the RXE changes to
627 * the tail register. Don't assign it to the tail register
628 * in memory copy, since we could overwrite an update by the
629 * chip if we did.
630 */
631 tval = ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port);
632 }
633 /* always; new head should be equal to new tail; see above */
634 return 0;
635}
636
637static void ipath_clean_part_key(struct ipath_portdata *pd,
638 struct ipath_devdata *dd)
639{
640 int i, j, pchanged = 0;
641 u64 oldpkey;
642
643 /* for debugging only */
644 oldpkey = (u64) dd->ipath_pkeys[0] |
645 ((u64) dd->ipath_pkeys[1] << 16) |
646 ((u64) dd->ipath_pkeys[2] << 32) |
647 ((u64) dd->ipath_pkeys[3] << 48);
648
649 for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {
650 if (!pd->port_pkeys[i])
651 continue;
652 ipath_cdbg(VERBOSE, "look for key[%d] %hx in pkeys\n", i,
653 pd->port_pkeys[i]);
654 for (j = 0; j < ARRAY_SIZE(dd->ipath_pkeys); j++) {
655 /* check for match independent of the global bit */
656 if ((dd->ipath_pkeys[j] & 0x7fff) !=
657 (pd->port_pkeys[i] & 0x7fff))
658 continue;
659 if (atomic_dec_and_test(&dd->ipath_pkeyrefs[j])) {
660 ipath_cdbg(VERBOSE, "p%u clear key "
661 "%x matches #%d\n",
662 pd->port_port,
663 pd->port_pkeys[i], j);
664 ipath_stats.sps_pkeys[j] =
665 dd->ipath_pkeys[j] = 0;
666 pchanged++;
667 }
668 else ipath_cdbg(
669 VERBOSE, "p%u key %x matches #%d, "
670 "but ref still %d\n", pd->port_port,
671 pd->port_pkeys[i], j,
672 atomic_read(&dd->ipath_pkeyrefs[j]));
673 break;
674 }
675 pd->port_pkeys[i] = 0;
676 }
677 if (pchanged) {
678 u64 pkey = (u64) dd->ipath_pkeys[0] |
679 ((u64) dd->ipath_pkeys[1] << 16) |
680 ((u64) dd->ipath_pkeys[2] << 32) |
681 ((u64) dd->ipath_pkeys[3] << 48);
682 ipath_cdbg(VERBOSE, "p%u old pkey reg %llx, "
683 "new pkey reg %llx\n", pd->port_port,
684 (unsigned long long) oldpkey,
685 (unsigned long long) pkey);
686 ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey,
687 pkey);
688 }
689}
690
691/**
692 * ipath_create_user_egr - allocate eager TID buffers
693 * @pd: the port to allocate TID buffers for
694 *
695 * This routine is now quite different for user and kernel, because
696 * the kernel uses skb's, for the accelerated network performance
697 * This is the user port version
698 *
699 * Allocate the eager TID buffers and program them into infinipath
700 * They are no longer completely contiguous, we do multiple allocation
701 * calls.
702 */
703static int ipath_create_user_egr(struct ipath_portdata *pd)
704{
705 struct ipath_devdata *dd = pd->port_dd;
706 unsigned e, egrcnt, alloced, egrperchunk, chunk, egrsize, egroff;
707 size_t size;
708 int ret;
0ed9a4a0
BS
709 gfp_t gfp_flags;
710
711 /*
712 * GFP_USER, but without GFP_FS, so buffer cache can be
713 * coalesced (we hope); otherwise, even at order 4,
714 * heavy filesystem activity makes these fail, and we can
715 * use compound pages.
716 */
717 gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP;
7f510b46
BS
718
719 egrcnt = dd->ipath_rcvegrcnt;
720 /* TID number offset for this port */
721 egroff = pd->port_port * egrcnt;
722 egrsize = dd->ipath_rcvegrbufsize;
723 ipath_cdbg(VERBOSE, "Allocating %d egr buffers, at egrtid "
724 "offset %x, egrsize %u\n", egrcnt, egroff, egrsize);
725
726 /*
727 * to avoid wasting a lot of memory, we allocate 32KB chunks of
728 * physically contiguous memory, advance through it until used up
729 * and then allocate more. Of course, we need memory to store those
730 * extra pointers, now. Started out with 256KB, but under heavy
731 * memory pressure (creating large files and then copying them over
732 * NFS while doing lots of MPI jobs), we hit some allocation
733 * failures, even though we can sleep... (2.6.10) Still get
0ed9a4a0
BS
734 * failures at 64K. 32K is the lowest we can go without wasting
735 * additional memory.
7f510b46
BS
736 */
737 size = 0x8000;
738 alloced = ALIGN(egrsize * egrcnt, size);
739 egrperchunk = size / egrsize;
740 chunk = (egrcnt + egrperchunk - 1) / egrperchunk;
741 pd->port_rcvegrbuf_chunks = chunk;
742 pd->port_rcvegrbufs_perchunk = egrperchunk;
743 pd->port_rcvegrbuf_size = size;
744 pd->port_rcvegrbuf = vmalloc(chunk * sizeof(pd->port_rcvegrbuf[0]));
745 if (!pd->port_rcvegrbuf) {
746 ret = -ENOMEM;
747 goto bail;
748 }
749 pd->port_rcvegrbuf_phys =
750 vmalloc(chunk * sizeof(pd->port_rcvegrbuf_phys[0]));
751 if (!pd->port_rcvegrbuf_phys) {
752 ret = -ENOMEM;
753 goto bail_rcvegrbuf;
754 }
755 for (e = 0; e < pd->port_rcvegrbuf_chunks; e++) {
7f510b46
BS
756
757 pd->port_rcvegrbuf[e] = dma_alloc_coherent(
758 &dd->pcidev->dev, size, &pd->port_rcvegrbuf_phys[e],
759 gfp_flags);
760
761 if (!pd->port_rcvegrbuf[e]) {
762 ret = -ENOMEM;
763 goto bail_rcvegrbuf_phys;
764 }
765 }
766
767 pd->port_rcvegr_phys = pd->port_rcvegrbuf_phys[0];
768
769 for (e = chunk = 0; chunk < pd->port_rcvegrbuf_chunks; chunk++) {
770 dma_addr_t pa = pd->port_rcvegrbuf_phys[chunk];
771 unsigned i;
772
773 for (i = 0; e < egrcnt && i < egrperchunk; e++, i++) {
774 dd->ipath_f_put_tid(dd, e + egroff +
775 (u64 __iomem *)
776 ((char __iomem *)
777 dd->ipath_kregbase +
778 dd->ipath_rcvegrbase), 0, pa);
779 pa += egrsize;
780 }
781 cond_resched(); /* don't hog the cpu */
782 }
783
784 ret = 0;
785 goto bail;
786
787bail_rcvegrbuf_phys:
788 for (e = 0; e < pd->port_rcvegrbuf_chunks &&
f37bda92 789 pd->port_rcvegrbuf[e]; e++) {
7f510b46
BS
790 dma_free_coherent(&dd->pcidev->dev, size,
791 pd->port_rcvegrbuf[e],
792 pd->port_rcvegrbuf_phys[e]);
793
f37bda92 794 }
7f510b46
BS
795 vfree(pd->port_rcvegrbuf_phys);
796 pd->port_rcvegrbuf_phys = NULL;
797bail_rcvegrbuf:
798 vfree(pd->port_rcvegrbuf);
799 pd->port_rcvegrbuf = NULL;
800bail:
801 return ret;
802}
803
804static int ipath_do_user_init(struct ipath_portdata *pd,
805 const struct ipath_user_info *uinfo)
806{
807 int ret = 0;
808 struct ipath_devdata *dd = pd->port_dd;
7f510b46 809 u32 head32;
7f510b46
BS
810
811 /* for now, if major version is different, bail */
812 if ((uinfo->spu_userversion >> 16) != IPATH_USER_SWMAJOR) {
813 dev_info(&dd->pcidev->dev,
814 "User major version %d not same as driver "
815 "major %d\n", uinfo->spu_userversion >> 16,
816 IPATH_USER_SWMAJOR);
817 ret = -ENODEV;
818 goto done;
819 }
820
821 if ((uinfo->spu_userversion & 0xffff) != IPATH_USER_SWMINOR)
822 ipath_dbg("User minor version %d not same as driver "
823 "minor %d\n", uinfo->spu_userversion & 0xffff,
824 IPATH_USER_SWMINOR);
825
826 if (uinfo->spu_rcvhdrsize) {
827 ret = ipath_setrcvhdrsize(dd, uinfo->spu_rcvhdrsize);
828 if (ret)
829 goto done;
830 }
831
832 /* for now we do nothing with rcvhdrcnt: uinfo->spu_rcvhdrcnt */
833
7f510b46
BS
834 /* for right now, kernel piobufs are at end, so port 1 is at 0 */
835 pd->port_piobufs = dd->ipath_piobufbase +
836 dd->ipath_pbufsport * (pd->port_port -
837 1) * dd->ipath_palign;
838 ipath_cdbg(VERBOSE, "Set base of piobufs for port %u to 0x%x\n",
839 pd->port_port, pd->port_piobufs);
840
841 /*
842 * Now allocate the rcvhdr Q and eager TIDs; skip the TID
843 * array for time being. If pd->port_port > chip-supported,
844 * we need to do extra stuff here to handle by handling overflow
845 * through port 0, someday
846 */
847 ret = ipath_create_rcvhdrq(dd, pd);
848 if (!ret)
849 ret = ipath_create_user_egr(pd);
850 if (ret)
851 goto done;
7f510b46
BS
852
853 /*
f37bda92 854 * set the eager head register for this port to the current values
7f510b46
BS
855 * of the tail pointers, since we don't know if they were
856 * updated on last use of the port.
857 */
7f510b46
BS
858 head32 = ipath_read_ureg32(dd, ur_rcvegrindextail, pd->port_port);
859 ipath_write_ureg(dd, ur_rcvegrindexhead, head32, pd->port_port);
860 dd->ipath_lastegrheads[pd->port_port] = -1;
861 dd->ipath_lastrcvhdrqtails[pd->port_port] = -1;
f37bda92
BS
862 ipath_cdbg(VERBOSE, "Wrote port%d egrhead %x from tail regs\n",
863 pd->port_port, head32);
7f510b46
BS
864 pd->port_tidcursor = 0; /* start at beginning after open */
865 /*
866 * now enable the port; the tail registers will be written to memory
867 * by the chip as soon as it sees the write to
868 * dd->ipath_kregs->kr_rcvctrl. The update only happens on
869 * transition from 0 to 1, so clear it first, then set it as part of
870 * enabling the port. This will (very briefly) affect any other
871 * open ports, but it shouldn't be long enough to be an issue.
f37bda92
BS
872 * We explictly set the in-memory copy to 0 beforehand, so we don't
873 * have to wait to be sure the DMA update has happened.
7f510b46 874 */
f37bda92
BS
875 *pd->port_rcvhdrtail_kvaddr = 0ULL;
876 set_bit(INFINIPATH_R_PORTENABLE_SHIFT + pd->port_port,
877 &dd->ipath_rcvctrl);
7f510b46
BS
878 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
879 dd->ipath_rcvctrl & ~INFINIPATH_R_TAILUPD);
880 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
881 dd->ipath_rcvctrl);
7f510b46
BS
882done:
883 return ret;
884}
885
f37bda92
BS
886
887/* common code for the mappings on dma_alloc_coherent mem */
888static int ipath_mmap_mem(struct vm_area_struct *vma,
889 struct ipath_portdata *pd, unsigned len,
890 int write_ok, dma_addr_t addr, char *what)
891{
892 struct ipath_devdata *dd = pd->port_dd;
893 unsigned pfn = (unsigned long)addr >> PAGE_SHIFT;
894 int ret;
895
896 if ((vma->vm_end - vma->vm_start) > len) {
897 dev_info(&dd->pcidev->dev,
898 "FAIL on %s: len %lx > %x\n", what,
899 vma->vm_end - vma->vm_start, len);
900 ret = -EFAULT;
901 goto bail;
902 }
903
904 if (!write_ok) {
905 if (vma->vm_flags & VM_WRITE) {
906 dev_info(&dd->pcidev->dev,
907 "%s must be mapped readonly\n", what);
908 ret = -EPERM;
909 goto bail;
910 }
911
912 /* don't allow them to later change with mprotect */
913 vma->vm_flags &= ~VM_MAYWRITE;
914 }
915
916 ret = remap_pfn_range(vma, vma->vm_start, pfn,
917 len, vma->vm_page_prot);
918 if (ret)
919 dev_info(&dd->pcidev->dev,
920 "%s port%u mmap of %lx, %x bytes r%c failed: %d\n",
921 what, pd->port_port, (unsigned long)addr, len,
922 write_ok?'w':'o', ret);
923 else
924 ipath_cdbg(VERBOSE, "%s port%u mmaped %lx, %x bytes r%c\n",
925 what, pd->port_port, (unsigned long)addr, len,
926 write_ok?'w':'o');
927bail:
928 return ret;
929}
930
7f510b46
BS
931static int mmap_ureg(struct vm_area_struct *vma, struct ipath_devdata *dd,
932 u64 ureg)
933{
934 unsigned long phys;
935 int ret;
936
f37bda92
BS
937 /*
938 * This is real hardware, so use io_remap. This is the mechanism
939 * for the user process to update the head registers for their port
940 * in the chip.
941 */
7f510b46
BS
942 if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) {
943 dev_info(&dd->pcidev->dev, "FAIL mmap userreg: reqlen "
944 "%lx > PAGE\n", vma->vm_end - vma->vm_start);
945 ret = -EFAULT;
946 } else {
947 phys = dd->ipath_physaddr + ureg;
948 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
949
950 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
951 ret = io_remap_pfn_range(vma, vma->vm_start,
952 phys >> PAGE_SHIFT,
953 vma->vm_end - vma->vm_start,
954 vma->vm_page_prot);
955 }
956 return ret;
957}
958
959static int mmap_piobufs(struct vm_area_struct *vma,
960 struct ipath_devdata *dd,
961 struct ipath_portdata *pd)
962{
963 unsigned long phys;
964 int ret;
965
966 /*
f37bda92
BS
967 * When we map the PIO buffers in the chip, we want to map them as
968 * writeonly, no read possible. This prevents access to previous
969 * process data, and catches users who might try to read the i/o
970 * space due to a bug.
7f510b46 971 */
7f510b46
BS
972 if ((vma->vm_end - vma->vm_start) >
973 (dd->ipath_pbufsport * dd->ipath_palign)) {
974 dev_info(&dd->pcidev->dev, "FAIL mmap piobufs: "
975 "reqlen %lx > PAGE\n",
976 vma->vm_end - vma->vm_start);
977 ret = -EFAULT;
978 goto bail;
979 }
980
981 phys = dd->ipath_physaddr + pd->port_piobufs;
f37bda92 982
7f510b46 983 /*
f37bda92 984 * Don't mark this as non-cached, or we don't get the
7f510b46 985 * write combining behavior we want on the PIO buffers!
7f510b46
BS
986 */
987
eb9dc6f4
BS
988#if defined(__powerpc__)
989 /* There isn't a generic way to specify writethrough mappings */
990 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
991 pgprot_val(vma->vm_page_prot) |= _PAGE_WRITETHRU;
992 pgprot_val(vma->vm_page_prot) &= ~_PAGE_GUARDED;
993#endif
994
7f510b46
BS
995 if (vma->vm_flags & VM_READ) {
996 dev_info(&dd->pcidev->dev,
997 "Can't map piobufs as readable (flags=%lx)\n",
998 vma->vm_flags);
999 ret = -EPERM;
1000 goto bail;
1001 }
1002
1003 /* don't allow them to later change to readable with mprotect */
f37bda92 1004 vma->vm_flags &= ~VM_MAYREAD;
7f510b46
BS
1005 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
1006
1007 ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT,
1008 vma->vm_end - vma->vm_start,
1009 vma->vm_page_prot);
1010bail:
1011 return ret;
1012}
1013
1014static int mmap_rcvegrbufs(struct vm_area_struct *vma,
1015 struct ipath_portdata *pd)
1016{
1017 struct ipath_devdata *dd = pd->port_dd;
1018 unsigned long start, size;
1019 size_t total_size, i;
1020 dma_addr_t *phys;
1021 int ret;
1022
7f510b46
BS
1023 size = pd->port_rcvegrbuf_size;
1024 total_size = pd->port_rcvegrbuf_chunks * size;
1025 if ((vma->vm_end - vma->vm_start) > total_size) {
1026 dev_info(&dd->pcidev->dev, "FAIL on egr bufs: "
1027 "reqlen %lx > actual %lx\n",
1028 vma->vm_end - vma->vm_start,
1029 (unsigned long) total_size);
1030 ret = -EFAULT;
1031 goto bail;
1032 }
1033
1034 if (vma->vm_flags & VM_WRITE) {
1035 dev_info(&dd->pcidev->dev, "Can't map eager buffers as "
1036 "writable (flags=%lx)\n", vma->vm_flags);
1037 ret = -EPERM;
1038 goto bail;
1039 }
f37bda92
BS
1040 /* don't allow them to later change to writeable with mprotect */
1041 vma->vm_flags &= ~VM_MAYWRITE;
7f510b46
BS
1042
1043 start = vma->vm_start;
1044 phys = pd->port_rcvegrbuf_phys;
1045
7f510b46
BS
1046 for (i = 0; i < pd->port_rcvegrbuf_chunks; i++, start += size) {
1047 ret = remap_pfn_range(vma, start, phys[i] >> PAGE_SHIFT,
1048 size, vma->vm_page_prot);
1049 if (ret < 0)
1050 goto bail;
1051 }
1052 ret = 0;
1053
1054bail:
1055 return ret;
1056}
1057
7f510b46
BS
1058/**
1059 * ipath_mmap - mmap various structures into user space
1060 * @fp: the file pointer
1061 * @vma: the VM area
1062 *
1063 * We use this to have a shared buffer between the kernel and the user code
1064 * for the rcvhdr queue, egr buffers, and the per-port user regs and pio
1065 * buffers in the chip. We have the open and close entries so we can bump
1066 * the ref count and keep the driver from being unloaded while still mapped.
1067 */
1068static int ipath_mmap(struct file *fp, struct vm_area_struct *vma)
1069{
1070 struct ipath_portdata *pd;
1071 struct ipath_devdata *dd;
1072 u64 pgaddr, ureg;
1073 int ret;
1074
1075 pd = port_fp(fp);
1076 dd = pd->port_dd;
f37bda92 1077
7f510b46
BS
1078 /*
1079 * This is the ipath_do_user_init() code, mapping the shared buffers
1080 * into the user process. The address referred to by vm_pgoff is the
1081 * virtual, not physical, address; we only do one mmap for each
1082 * space mapped.
1083 */
1084 pgaddr = vma->vm_pgoff << PAGE_SHIFT;
1085
1086 /*
f37bda92
BS
1087 * Must fit in 40 bits for our hardware; some checked elsewhere,
1088 * but we'll be paranoid. Check for 0 is mostly in case one of the
1089 * allocations failed, but user called mmap anyway. We want to catch
1090 * that before it can match.
7f510b46 1091 */
f37bda92
BS
1092 if (!pgaddr || pgaddr >= (1ULL<<40)) {
1093 ipath_dev_err(dd, "Bad phys addr %llx, start %lx, end %lx\n",
1094 (unsigned long long)pgaddr, vma->vm_start, vma->vm_end);
1095 return -EINVAL;
1096 }
7f510b46 1097
f37bda92 1098 /* just the offset of the port user registers, not physical addr */
7f510b46
BS
1099 ureg = dd->ipath_uregbase + dd->ipath_palign * pd->port_port;
1100
f37bda92 1101 ipath_cdbg(MM, "ushare: pgaddr %llx vm_start=%lx, vmlen %lx\n",
7f510b46 1102 (unsigned long long) pgaddr, vma->vm_start,
f37bda92 1103 vma->vm_end - vma->vm_start);
7f510b46 1104
f37bda92
BS
1105 if (vma->vm_start & (PAGE_SIZE-1)) {
1106 ipath_dev_err(dd,
1107 "vm_start not aligned: %lx, end=%lx phys %lx\n",
1108 vma->vm_start, vma->vm_end, (unsigned long)pgaddr);
1109 ret = -EINVAL;
1110 }
1111 else if (pgaddr == ureg)
7f510b46
BS
1112 ret = mmap_ureg(vma, dd, ureg);
1113 else if (pgaddr == pd->port_piobufs)
1114 ret = mmap_piobufs(vma, dd, pd);
1115 else if (pgaddr == (u64) pd->port_rcvegr_phys)
1116 ret = mmap_rcvegrbufs(vma, pd);
f37bda92
BS
1117 else if (pgaddr == (u64) pd->port_rcvhdrq_phys) {
1118 /*
1119 * The rcvhdrq itself; readonly except on HT-400 (so have
1120 * to allow writable mapping), multiple pages, contiguous
1121 * from an i/o perspective.
1122 */
1123 unsigned total_size =
1124 ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize
1125 * sizeof(u32), PAGE_SIZE);
1126 ret = ipath_mmap_mem(vma, pd, total_size, 1,
1127 pd->port_rcvhdrq_phys,
1128 "rcvhdrq");
1129 }
1130 else if (pgaddr == (u64)pd->port_rcvhdrqtailaddr_phys)
1131 /* in-memory copy of rcvhdrq tail register */
1132 ret = ipath_mmap_mem(vma, pd, PAGE_SIZE, 0,
1133 pd->port_rcvhdrqtailaddr_phys,
1134 "rcvhdrq tail");
7f510b46 1135 else if (pgaddr == dd->ipath_pioavailregs_phys)
f37bda92
BS
1136 /* in-memory copy of pioavail registers */
1137 ret = ipath_mmap_mem(vma, pd, PAGE_SIZE, 0,
1138 dd->ipath_pioavailregs_phys,
1139 "pioavail registers");
7f510b46
BS
1140 else
1141 ret = -EINVAL;
1142
1143 vma->vm_private_data = NULL;
1144
1145 if (ret < 0)
1146 dev_info(&dd->pcidev->dev,
1147 "Failure %d on addr %lx, off %lx\n",
1148 -ret, vma->vm_start, vma->vm_pgoff);
1149
1150 return ret;
1151}
1152
1153static unsigned int ipath_poll(struct file *fp,
1154 struct poll_table_struct *pt)
1155{
1156 struct ipath_portdata *pd;
1157 u32 head, tail;
1158 int bit;
1159 struct ipath_devdata *dd;
1160
1161 pd = port_fp(fp);
1162 dd = pd->port_dd;
1163
1164 bit = pd->port_port + INFINIPATH_R_INTRAVAIL_SHIFT;
1165 set_bit(bit, &dd->ipath_rcvctrl);
1166
1167 /*
1168 * Before blocking, make sure that head is still == tail,
1169 * reading from the chip, so we can be sure the interrupt
1170 * enable has made it to the chip. If not equal, disable
1171 * interrupt again and return immediately. This avoids races,
1172 * and the overhead of the chip read doesn't matter much at
1173 * this point, since we are waiting for something anyway.
1174 */
1175
1176 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
1177 dd->ipath_rcvctrl);
1178
1179 head = ipath_read_ureg32(dd, ur_rcvhdrhead, pd->port_port);
1180 tail = ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port);
1181
1182 if (tail == head) {
1183 set_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag);
9dcc0e58
BS
1184 if(dd->ipath_rhdrhead_intr_off) /* arm rcv interrupt */
1185 (void)ipath_write_ureg(dd, ur_rcvhdrhead,
1186 dd->ipath_rhdrhead_intr_off
1187 | head, pd->port_port);
7f510b46
BS
1188 poll_wait(fp, &pd->port_wait, pt);
1189
1190 if (test_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag)) {
1191 /* timed out, no packets received */
1192 clear_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag);
1193 pd->port_rcvwait_to++;
1194 }
1195 }
1196 else {
1197 /* it's already happened; don't do wait_event overhead */
1198 pd->port_rcvnowait++;
1199 }
1200
1201 clear_bit(bit, &dd->ipath_rcvctrl);
1202 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
1203 dd->ipath_rcvctrl);
1204
1205 return 0;
1206}
1207
1208static int try_alloc_port(struct ipath_devdata *dd, int port,
1209 struct file *fp)
1210{
1211 int ret;
1212
1213 if (!dd->ipath_pd[port]) {
1214 void *p, *ptmp;
1215
1216 p = kzalloc(sizeof(struct ipath_portdata), GFP_KERNEL);
1217
1218 /*
1219 * Allocate memory for use in ipath_tid_update() just once
1220 * at open, not per call. Reduces cost of expected send
1221 * setup.
1222 */
1223 ptmp = kmalloc(dd->ipath_rcvtidcnt * sizeof(u16) +
1224 dd->ipath_rcvtidcnt * sizeof(struct page **),
1225 GFP_KERNEL);
1226 if (!p || !ptmp) {
1227 ipath_dev_err(dd, "Unable to allocate portdata "
1228 "memory, failing open\n");
1229 ret = -ENOMEM;
1230 kfree(p);
1231 kfree(ptmp);
1232 goto bail;
1233 }
1234 dd->ipath_pd[port] = p;
1235 dd->ipath_pd[port]->port_port = port;
1236 dd->ipath_pd[port]->port_dd = dd;
1237 dd->ipath_pd[port]->port_tid_pg_list = ptmp;
1238 init_waitqueue_head(&dd->ipath_pd[port]->port_wait);
1239 }
1240 if (!dd->ipath_pd[port]->port_cnt) {
1241 dd->ipath_pd[port]->port_cnt = 1;
1242 fp->private_data = (void *) dd->ipath_pd[port];
1243 ipath_cdbg(PROC, "%s[%u] opened unit:port %u:%u\n",
1244 current->comm, current->pid, dd->ipath_unit,
1245 port);
1246 dd->ipath_pd[port]->port_pid = current->pid;
1247 strncpy(dd->ipath_pd[port]->port_comm, current->comm,
1248 sizeof(dd->ipath_pd[port]->port_comm));
1249 ipath_stats.sps_ports++;
1250 ret = 0;
1251 goto bail;
1252 }
1253 ret = -EBUSY;
1254
1255bail:
1256 return ret;
1257}
1258
1259static inline int usable(struct ipath_devdata *dd)
1260{
1261 return dd &&
1262 (dd->ipath_flags & IPATH_PRESENT) &&
1263 dd->ipath_kregbase &&
1264 dd->ipath_lid &&
1265 !(dd->ipath_flags & (IPATH_LINKDOWN | IPATH_DISABLED
1266 | IPATH_LINKUNK));
1267}
1268
1269static int find_free_port(int unit, struct file *fp)
1270{
1271 struct ipath_devdata *dd = ipath_lookup(unit);
1272 int ret, i;
1273
1274 if (!dd) {
1275 ret = -ENODEV;
1276 goto bail;
1277 }
1278
1279 if (!usable(dd)) {
1280 ret = -ENETDOWN;
1281 goto bail;
1282 }
1283
1284 for (i = 0; i < dd->ipath_cfgports; i++) {
1285 ret = try_alloc_port(dd, i, fp);
1286 if (ret != -EBUSY)
1287 goto bail;
1288 }
1289 ret = -EBUSY;
1290
1291bail:
1292 return ret;
1293}
1294
1295static int find_best_unit(struct file *fp)
1296{
1297 int ret = 0, i, prefunit = -1, devmax;
1298 int maxofallports, npresent, nup;
1299 int ndev;
1300
1301 (void) ipath_count_units(&npresent, &nup, &maxofallports);
1302
1303 /*
1304 * This code is present to allow a knowledgeable person to
1305 * specify the layout of processes to processors before opening
1306 * this driver, and then we'll assign the process to the "closest"
1307 * HT-400 to that processor (we assume reasonable connectivity,
1308 * for now). This code assumes that if affinity has been set
1309 * before this point, that at most one cpu is set; for now this
1310 * is reasonable. I check for both cpus_empty() and cpus_full(),
1311 * in case some kernel variant sets none of the bits when no
1312 * affinity is set. 2.6.11 and 12 kernels have all present
1313 * cpus set. Some day we'll have to fix it up further to handle
1314 * a cpu subset. This algorithm fails for two HT-400's connected
1315 * in tunnel fashion. Eventually this needs real topology
1316 * information. There may be some issues with dual core numbering
1317 * as well. This needs more work prior to release.
1318 */
1319 if (!cpus_empty(current->cpus_allowed) &&
1320 !cpus_full(current->cpus_allowed)) {
1321 int ncpus = num_online_cpus(), curcpu = -1;
1322 for (i = 0; i < ncpus; i++)
1323 if (cpu_isset(i, current->cpus_allowed)) {
1324 ipath_cdbg(PROC, "%s[%u] affinity set for "
1325 "cpu %d\n", current->comm,
1326 current->pid, i);
1327 curcpu = i;
1328 }
1329 if (curcpu != -1) {
1330 if (npresent) {
1331 prefunit = curcpu / (ncpus / npresent);
1332 ipath_dbg("%s[%u] %d chips, %d cpus, "
1333 "%d cpus/chip, select unit %d\n",
1334 current->comm, current->pid,
1335 npresent, ncpus, ncpus / npresent,
1336 prefunit);
1337 }
1338 }
1339 }
1340
1341 /*
1342 * user ports start at 1, kernel port is 0
1343 * For now, we do round-robin access across all chips
1344 */
1345
1346 if (prefunit != -1)
1347 devmax = prefunit + 1;
1348 else
1349 devmax = ipath_count_units(NULL, NULL, NULL);
1350recheck:
1351 for (i = 1; i < maxofallports; i++) {
1352 for (ndev = prefunit != -1 ? prefunit : 0; ndev < devmax;
1353 ndev++) {
1354 struct ipath_devdata *dd = ipath_lookup(ndev);
1355
1356 if (!usable(dd))
1357 continue; /* can't use this unit */
1358 if (i >= dd->ipath_cfgports)
1359 /*
1360 * Maxed out on users of this unit. Try
1361 * next.
1362 */
1363 continue;
1364 ret = try_alloc_port(dd, i, fp);
1365 if (!ret)
1366 goto done;
1367 }
1368 }
1369
1370 if (npresent) {
1371 if (nup == 0) {
1372 ret = -ENETDOWN;
1373 ipath_dbg("No ports available (none initialized "
1374 "and ready)\n");
1375 } else {
1376 if (prefunit > 0) {
1377 /* if started above 0, retry from 0 */
1378 ipath_cdbg(PROC,
1379 "%s[%u] no ports on prefunit "
1380 "%d, clear and re-check\n",
1381 current->comm, current->pid,
1382 prefunit);
1383 devmax = ipath_count_units(NULL, NULL,
1384 NULL);
1385 prefunit = -1;
1386 goto recheck;
1387 }
1388 ret = -EBUSY;
1389 ipath_dbg("No ports available\n");
1390 }
1391 } else {
1392 ret = -ENXIO;
1393 ipath_dbg("No boards found\n");
1394 }
1395
1396done:
1397 return ret;
1398}
1399
1400static int ipath_open(struct inode *in, struct file *fp)
1401{
a2acb2ff 1402 int ret, user_minor;
7f510b46
BS
1403
1404 mutex_lock(&ipath_mutex);
1405
a2acb2ff 1406 user_minor = iminor(in) - IPATH_USER_MINOR_BASE;
7f510b46 1407 ipath_cdbg(VERBOSE, "open on dev %lx (minor %d)\n",
a2acb2ff 1408 (long)in->i_rdev, user_minor);
7f510b46 1409
a2acb2ff
BS
1410 if (user_minor)
1411 ret = find_free_port(user_minor - 1, fp);
7f510b46
BS
1412 else
1413 ret = find_best_unit(fp);
1414
1415 mutex_unlock(&ipath_mutex);
1416 return ret;
1417}
1418
1419/**
1420 * unlock_exptid - unlock any expected TID entries port still had in use
1421 * @pd: port
1422 *
1423 * We don't actually update the chip here, because we do a bulk update
1424 * below, using ipath_f_clear_tids.
1425 */
1426static void unlock_expected_tids(struct ipath_portdata *pd)
1427{
1428 struct ipath_devdata *dd = pd->port_dd;
1429 int port_tidbase = pd->port_port * dd->ipath_rcvtidcnt;
1430 int i, cnt = 0, maxtid = port_tidbase + dd->ipath_rcvtidcnt;
1431
1432 ipath_cdbg(VERBOSE, "Port %u unlocking any locked expTID pages\n",
1433 pd->port_port);
1434 for (i = port_tidbase; i < maxtid; i++) {
1435 if (!dd->ipath_pageshadow[i])
1436 continue;
1437
1438 ipath_release_user_pages_on_close(&dd->ipath_pageshadow[i],
1439 1);
1440 dd->ipath_pageshadow[i] = NULL;
1441 cnt++;
1442 ipath_stats.sps_pageunlocks++;
1443 }
1444 if (cnt)
1445 ipath_cdbg(VERBOSE, "Port %u locked %u expTID entries\n",
1446 pd->port_port, cnt);
1447
1448 if (ipath_stats.sps_pagelocks || ipath_stats.sps_pageunlocks)
1449 ipath_cdbg(VERBOSE, "%llu pages locked, %llu unlocked\n",
1450 (unsigned long long) ipath_stats.sps_pagelocks,
1451 (unsigned long long)
1452 ipath_stats.sps_pageunlocks);
1453}
1454
1455static int ipath_close(struct inode *in, struct file *fp)
1456{
1457 int ret = 0;
1458 struct ipath_portdata *pd;
1459 struct ipath_devdata *dd;
1460 unsigned port;
1461
1462 ipath_cdbg(VERBOSE, "close on dev %lx, private data %p\n",
1463 (long)in->i_rdev, fp->private_data);
1464
1465 mutex_lock(&ipath_mutex);
1466
1467 pd = port_fp(fp);
1468 port = pd->port_port;
1469 fp->private_data = NULL;
1470 dd = pd->port_dd;
1471
1472 if (pd->port_hdrqfull) {
1473 ipath_cdbg(PROC, "%s[%u] had %u rcvhdrqfull errors "
1474 "during run\n", pd->port_comm, pd->port_pid,
1475 pd->port_hdrqfull);
1476 pd->port_hdrqfull = 0;
1477 }
1478
1479 if (pd->port_rcvwait_to || pd->port_piowait_to
1480 || pd->port_rcvnowait || pd->port_pionowait) {
1481 ipath_cdbg(VERBOSE, "port%u, %u rcv, %u pio wait timeo; "
1482 "%u rcv %u, pio already\n",
1483 pd->port_port, pd->port_rcvwait_to,
1484 pd->port_piowait_to, pd->port_rcvnowait,
1485 pd->port_pionowait);
1486 pd->port_rcvwait_to = pd->port_piowait_to =
1487 pd->port_rcvnowait = pd->port_pionowait = 0;
1488 }
1489 if (pd->port_flag) {
1490 ipath_dbg("port %u port_flag still set to 0x%lx\n",
1491 pd->port_port, pd->port_flag);
1492 pd->port_flag = 0;
1493 }
1494
1495 if (dd->ipath_kregbase) {
35783ec0
BS
1496 int i;
1497 /* atomically clear receive enable port. */
1498 clear_bit(INFINIPATH_R_PORTENABLE_SHIFT + port,
1499 &dd->ipath_rcvctrl);
1500 ipath_write_kreg( dd, dd->ipath_kregs->kr_rcvctrl,
1501 dd->ipath_rcvctrl);
1502 /* and read back from chip to be sure that nothing
1503 * else is in flight when we do the rest */
1504 (void)ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
7f510b46
BS
1505
1506 /* clean up the pkeys for this port user */
1507 ipath_clean_part_key(pd, dd);
1508
7f510b46 1509
35783ec0
BS
1510 /*
1511 * be paranoid, and never write 0's to these, just use an
1512 * unused part of the port 0 tail page. Of course,
1513 * rcvhdraddr points to a large chunk of memory, so this
1514 * could still trash things, but at least it won't trash
1515 * page 0, and by disabling the port, it should stop "soon",
1516 * even if a packet or two is in already in flight after we
1517 * disabled the port.
1518 */
1519 ipath_write_kreg_port(dd,
1520 dd->ipath_kregs->kr_rcvhdrtailaddr, port,
1521 dd->ipath_dummy_hdrq_phys);
1522 ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdraddr,
1523 pd->port_port, dd->ipath_dummy_hdrq_phys);
1524
1525 i = dd->ipath_pbufsport * (port - 1);
1526 ipath_disarm_piobufs(dd, i, dd->ipath_pbufsport);
1527
1528 if (dd->ipath_pageshadow)
1529 unlock_expected_tids(pd);
1530 ipath_stats.sps_ports--;
1531 ipath_cdbg(PROC, "%s[%u] closed port %u:%u\n",
1532 pd->port_comm, pd->port_pid,
1533 dd->ipath_unit, port);
1534
1535 dd->ipath_f_clear_tids(dd, pd->port_port);
7f510b46
BS
1536 }
1537
1538 pd->port_cnt = 0;
1539 pd->port_pid = 0;
1540
f37bda92 1541 dd->ipath_pd[pd->port_port] = NULL; /* before releasing mutex */
7f510b46 1542 mutex_unlock(&ipath_mutex);
f37bda92 1543 ipath_free_pddata(dd, pd); /* after releasing the mutex */
7f510b46
BS
1544
1545 return ret;
1546}
1547
1548static int ipath_port_info(struct ipath_portdata *pd,
1549 struct ipath_port_info __user *uinfo)
1550{
1551 struct ipath_port_info info;
1552 int nup;
1553 int ret;
1554
1555 (void) ipath_count_units(NULL, &nup, NULL);
1556 info.num_active = nup;
1557 info.unit = pd->port_dd->ipath_unit;
1558 info.port = pd->port_port;
1559
1560 if (copy_to_user(uinfo, &info, sizeof(info))) {
1561 ret = -EFAULT;
1562 goto bail;
1563 }
1564 ret = 0;
1565
1566bail:
1567 return ret;
1568}
1569
1570static ssize_t ipath_write(struct file *fp, const char __user *data,
1571 size_t count, loff_t *off)
1572{
1573 const struct ipath_cmd __user *ucmd;
1574 struct ipath_portdata *pd;
1575 const void __user *src;
1576 size_t consumed, copy;
1577 struct ipath_cmd cmd;
1578 ssize_t ret = 0;
1579 void *dest;
1580
1581 if (count < sizeof(cmd.type)) {
1582 ret = -EINVAL;
1583 goto bail;
1584 }
1585
1586 ucmd = (const struct ipath_cmd __user *) data;
1587
1588 if (copy_from_user(&cmd.type, &ucmd->type, sizeof(cmd.type))) {
1589 ret = -EFAULT;
1590 goto bail;
1591 }
1592
1593 consumed = sizeof(cmd.type);
1594
1595 switch (cmd.type) {
1596 case IPATH_CMD_USER_INIT:
1597 copy = sizeof(cmd.cmd.user_info);
1598 dest = &cmd.cmd.user_info;
1599 src = &ucmd->cmd.user_info;
1600 break;
1601 case IPATH_CMD_RECV_CTRL:
1602 copy = sizeof(cmd.cmd.recv_ctrl);
1603 dest = &cmd.cmd.recv_ctrl;
1604 src = &ucmd->cmd.recv_ctrl;
1605 break;
1606 case IPATH_CMD_PORT_INFO:
1607 copy = sizeof(cmd.cmd.port_info);
1608 dest = &cmd.cmd.port_info;
1609 src = &ucmd->cmd.port_info;
1610 break;
1611 case IPATH_CMD_TID_UPDATE:
1612 case IPATH_CMD_TID_FREE:
1613 copy = sizeof(cmd.cmd.tid_info);
1614 dest = &cmd.cmd.tid_info;
1615 src = &ucmd->cmd.tid_info;
1616 break;
1617 case IPATH_CMD_SET_PART_KEY:
1618 copy = sizeof(cmd.cmd.part_key);
1619 dest = &cmd.cmd.part_key;
1620 src = &ucmd->cmd.part_key;
1621 break;
1622 default:
1623 ret = -EINVAL;
1624 goto bail;
1625 }
1626
1627 if ((count - consumed) < copy) {
1628 ret = -EINVAL;
1629 goto bail;
1630 }
1631
1632 if (copy_from_user(dest, src, copy)) {
1633 ret = -EFAULT;
1634 goto bail;
1635 }
1636
1637 consumed += copy;
1638 pd = port_fp(fp);
1639
1640 switch (cmd.type) {
1641 case IPATH_CMD_USER_INIT:
1642 ret = ipath_do_user_init(pd, &cmd.cmd.user_info);
1643 if (ret < 0)
1644 goto bail;
1645 ret = ipath_get_base_info(
1646 pd, (void __user *) (unsigned long)
1647 cmd.cmd.user_info.spu_base_info,
1648 cmd.cmd.user_info.spu_base_info_size);
1649 break;
1650 case IPATH_CMD_RECV_CTRL:
1651 ret = ipath_manage_rcvq(pd, cmd.cmd.recv_ctrl);
1652 break;
1653 case IPATH_CMD_PORT_INFO:
1654 ret = ipath_port_info(pd,
1655 (struct ipath_port_info __user *)
1656 (unsigned long) cmd.cmd.port_info);
1657 break;
1658 case IPATH_CMD_TID_UPDATE:
1659 ret = ipath_tid_update(pd, &cmd.cmd.tid_info);
1660 break;
1661 case IPATH_CMD_TID_FREE:
1662 ret = ipath_tid_free(pd, &cmd.cmd.tid_info);
1663 break;
1664 case IPATH_CMD_SET_PART_KEY:
1665 ret = ipath_set_part_key(pd, cmd.cmd.part_key);
1666 break;
1667 }
1668
1669 if (ret >= 0)
1670 ret = consumed;
1671
1672bail:
1673 return ret;
1674}
1675
1676static struct class *ipath_class;
1677
1678static int init_cdev(int minor, char *name, struct file_operations *fops,
1679 struct cdev **cdevp, struct class_device **class_devp)
1680{
1681 const dev_t dev = MKDEV(IPATH_MAJOR, minor);
1682 struct cdev *cdev = NULL;
1683 struct class_device *class_dev = NULL;
1684 int ret;
1685
1686 cdev = cdev_alloc();
1687 if (!cdev) {
1688 printk(KERN_ERR IPATH_DRV_NAME
1689 ": Could not allocate cdev for minor %d, %s\n",
1690 minor, name);
1691 ret = -ENOMEM;
1692 goto done;
1693 }
1694
1695 cdev->owner = THIS_MODULE;
1696 cdev->ops = fops;
1697 kobject_set_name(&cdev->kobj, name);
1698
1699 ret = cdev_add(cdev, dev, 1);
1700 if (ret < 0) {
1701 printk(KERN_ERR IPATH_DRV_NAME
1702 ": Could not add cdev for minor %d, %s (err %d)\n",
1703 minor, name, -ret);
1704 goto err_cdev;
1705 }
1706
1707 class_dev = class_device_create(ipath_class, NULL, dev, NULL, name);
1708
1709 if (IS_ERR(class_dev)) {
1710 ret = PTR_ERR(class_dev);
1711 printk(KERN_ERR IPATH_DRV_NAME ": Could not create "
1712 "class_dev for minor %d, %s (err %d)\n",
1713 minor, name, -ret);
1714 goto err_cdev;
1715 }
1716
1717 goto done;
1718
1719err_cdev:
1720 cdev_del(cdev);
1721 cdev = NULL;
1722
1723done:
1724 if (ret >= 0) {
1725 *cdevp = cdev;
1726 *class_devp = class_dev;
1727 } else {
1728 *cdevp = NULL;
1729 *class_devp = NULL;
1730 }
1731
1732 return ret;
1733}
1734
1735int ipath_cdev_init(int minor, char *name, struct file_operations *fops,
1736 struct cdev **cdevp, struct class_device **class_devp)
1737{
1738 return init_cdev(minor, name, fops, cdevp, class_devp);
1739}
1740
1741static void cleanup_cdev(struct cdev **cdevp,
1742 struct class_device **class_devp)
1743{
1744 struct class_device *class_dev = *class_devp;
1745
1746 if (class_dev) {
1747 class_device_unregister(class_dev);
1748 *class_devp = NULL;
1749 }
1750
1751 if (*cdevp) {
1752 cdev_del(*cdevp);
1753 *cdevp = NULL;
1754 }
1755}
1756
1757void ipath_cdev_cleanup(struct cdev **cdevp,
1758 struct class_device **class_devp)
1759{
1760 cleanup_cdev(cdevp, class_devp);
1761}
1762
1763static struct cdev *wildcard_cdev;
1764static struct class_device *wildcard_class_dev;
1765
1766static const dev_t dev = MKDEV(IPATH_MAJOR, 0);
1767
1768static int user_init(void)
1769{
1770 int ret;
1771
1772 ret = register_chrdev_region(dev, IPATH_NMINORS, IPATH_DRV_NAME);
1773 if (ret < 0) {
1774 printk(KERN_ERR IPATH_DRV_NAME ": Could not register "
1775 "chrdev region (err %d)\n", -ret);
1776 goto done;
1777 }
1778
1779 ipath_class = class_create(THIS_MODULE, IPATH_DRV_NAME);
1780
1781 if (IS_ERR(ipath_class)) {
1782 ret = PTR_ERR(ipath_class);
1783 printk(KERN_ERR IPATH_DRV_NAME ": Could not create "
1784 "device class (err %d)\n", -ret);
1785 goto bail;
1786 }
1787
1788 goto done;
1789bail:
1790 unregister_chrdev_region(dev, IPATH_NMINORS);
1791done:
1792 return ret;
1793}
1794
1795static void user_cleanup(void)
1796{
1797 if (ipath_class) {
1798 class_destroy(ipath_class);
1799 ipath_class = NULL;
1800 }
1801
1802 unregister_chrdev_region(dev, IPATH_NMINORS);
1803}
1804
1805static atomic_t user_count = ATOMIC_INIT(0);
1806static atomic_t user_setup = ATOMIC_INIT(0);
1807
1808int ipath_user_add(struct ipath_devdata *dd)
1809{
1810 char name[10];
1811 int ret;
1812
1813 if (atomic_inc_return(&user_count) == 1) {
1814 ret = user_init();
1815 if (ret < 0) {
1816 ipath_dev_err(dd, "Unable to set up user support: "
1817 "error %d\n", -ret);
1818 goto bail;
1819 }
7f510b46
BS
1820 ret = init_cdev(0, "ipath", &ipath_file_ops, &wildcard_cdev,
1821 &wildcard_class_dev);
1822 if (ret < 0) {
1823 ipath_dev_err(dd, "Could not create wildcard "
1824 "minor: error %d\n", -ret);
a2acb2ff 1825 goto bail_sma;
7f510b46
BS
1826 }
1827
1828 atomic_set(&user_setup, 1);
1829 }
1830
1831 snprintf(name, sizeof(name), "ipath%d", dd->ipath_unit);
1832
1833 ret = init_cdev(dd->ipath_unit + 1, name, &ipath_file_ops,
a2acb2ff 1834 &dd->user_cdev, &dd->user_class_dev);
7f510b46
BS
1835 if (ret < 0)
1836 ipath_dev_err(dd, "Could not create user minor %d, %s\n",
1837 dd->ipath_unit + 1, name);
1838
1839 goto bail;
1840
7f510b46
BS
1841bail_sma:
1842 user_cleanup();
1843bail:
1844 return ret;
1845}
1846
a2acb2ff 1847void ipath_user_remove(struct ipath_devdata *dd)
7f510b46 1848{
a2acb2ff 1849 cleanup_cdev(&dd->user_cdev, &dd->user_class_dev);
7f510b46
BS
1850
1851 if (atomic_dec_return(&user_count) == 0) {
1852 if (atomic_read(&user_setup) == 0)
1853 goto bail;
1854
1855 cleanup_cdev(&wildcard_cdev, &wildcard_class_dev);
7f510b46
BS
1856 user_cleanup();
1857
1858 atomic_set(&user_setup, 0);
1859 }
1860bail:
1861 return;
1862}
f37bda92 1863