Merge git://git.kernel.org/pub/scm/linux/kernel/git/jk/spufs
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / net / can / bcm.c
1 /*
2 * bcm.c - Broadcast Manager to filter/send (cyclic) CAN content
3 *
4 * Copyright (c) 2002-2007 Volkswagen Group Electronic Research
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of Volkswagen nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * Alternatively, provided that this notice is retained in full, this
20 * software may be distributed under the terms of the GNU General
21 * Public License ("GPL") version 2, in which case the provisions of the
22 * GPL apply INSTEAD OF those given above.
23 *
24 * The provided data structures and external interfaces from this code
25 * are not restricted to be used by modules with a GPL compatible license.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
38 * DAMAGE.
39 *
40 * Send feedback to <socketcan-users@lists.berlios.de>
41 *
42 */
43
44 #include <linux/module.h>
45 #include <linux/init.h>
46 #include <linux/hrtimer.h>
47 #include <linux/list.h>
48 #include <linux/proc_fs.h>
49 #include <linux/uio.h>
50 #include <linux/net.h>
51 #include <linux/netdevice.h>
52 #include <linux/socket.h>
53 #include <linux/if_arp.h>
54 #include <linux/skbuff.h>
55 #include <linux/can.h>
56 #include <linux/can/core.h>
57 #include <linux/can/bcm.h>
58 #include <net/sock.h>
59 #include <net/net_namespace.h>
60
61 /* use of last_frames[index].can_dlc */
62 #define RX_RECV 0x40 /* received data for this element */
63 #define RX_THR 0x80 /* element not been sent due to throttle feature */
64 #define BCM_CAN_DLC_MASK 0x0F /* clean private flags in can_dlc by masking */
65
66 /* get best masking value for can_rx_register() for a given single can_id */
67 #define REGMASK(id) ((id & CAN_RTR_FLAG) | ((id & CAN_EFF_FLAG) ? \
68 (CAN_EFF_MASK | CAN_EFF_FLAG) : CAN_SFF_MASK))
69
70 #define CAN_BCM_VERSION "20080415"
71 static __initdata const char banner[] = KERN_INFO
72 "can: broadcast manager protocol (rev " CAN_BCM_VERSION ")\n";
73
74 MODULE_DESCRIPTION("PF_CAN broadcast manager protocol");
75 MODULE_LICENSE("Dual BSD/GPL");
76 MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
77
78 /* easy access to can_frame payload */
79 static inline u64 GET_U64(const struct can_frame *cp)
80 {
81 return *(u64 *)cp->data;
82 }
83
84 struct bcm_op {
85 struct list_head list;
86 int ifindex;
87 canid_t can_id;
88 int flags;
89 unsigned long frames_abs, frames_filtered;
90 struct timeval ival1, ival2;
91 struct hrtimer timer, thrtimer;
92 ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg;
93 int rx_ifindex;
94 int count;
95 int nframes;
96 int currframe;
97 struct can_frame *frames;
98 struct can_frame *last_frames;
99 struct can_frame sframe;
100 struct can_frame last_sframe;
101 struct sock *sk;
102 struct net_device *rx_reg_dev;
103 };
104
105 static struct proc_dir_entry *proc_dir;
106
107 struct bcm_sock {
108 struct sock sk;
109 int bound;
110 int ifindex;
111 struct notifier_block notifier;
112 struct list_head rx_ops;
113 struct list_head tx_ops;
114 unsigned long dropped_usr_msgs;
115 struct proc_dir_entry *bcm_proc_read;
116 char procname [9]; /* pointer printed in ASCII with \0 */
117 };
118
119 static inline struct bcm_sock *bcm_sk(const struct sock *sk)
120 {
121 return (struct bcm_sock *)sk;
122 }
123
124 #define CFSIZ sizeof(struct can_frame)
125 #define OPSIZ sizeof(struct bcm_op)
126 #define MHSIZ sizeof(struct bcm_msg_head)
127
128 /*
129 * procfs functions
130 */
131 static char *bcm_proc_getifname(int ifindex)
132 {
133 struct net_device *dev;
134
135 if (!ifindex)
136 return "any";
137
138 /* no usage counting */
139 dev = __dev_get_by_index(&init_net, ifindex);
140 if (dev)
141 return dev->name;
142
143 return "???";
144 }
145
146 static int bcm_read_proc(char *page, char **start, off_t off,
147 int count, int *eof, void *data)
148 {
149 int len = 0;
150 struct sock *sk = (struct sock *)data;
151 struct bcm_sock *bo = bcm_sk(sk);
152 struct bcm_op *op;
153
154 len += snprintf(page + len, PAGE_SIZE - len, ">>> socket %p",
155 sk->sk_socket);
156 len += snprintf(page + len, PAGE_SIZE - len, " / sk %p", sk);
157 len += snprintf(page + len, PAGE_SIZE - len, " / bo %p", bo);
158 len += snprintf(page + len, PAGE_SIZE - len, " / dropped %lu",
159 bo->dropped_usr_msgs);
160 len += snprintf(page + len, PAGE_SIZE - len, " / bound %s",
161 bcm_proc_getifname(bo->ifindex));
162 len += snprintf(page + len, PAGE_SIZE - len, " <<<\n");
163
164 list_for_each_entry(op, &bo->rx_ops, list) {
165
166 unsigned long reduction;
167
168 /* print only active entries & prevent division by zero */
169 if (!op->frames_abs)
170 continue;
171
172 len += snprintf(page + len, PAGE_SIZE - len,
173 "rx_op: %03X %-5s ",
174 op->can_id, bcm_proc_getifname(op->ifindex));
175 len += snprintf(page + len, PAGE_SIZE - len, "[%d]%c ",
176 op->nframes,
177 (op->flags & RX_CHECK_DLC)?'d':' ');
178 if (op->kt_ival1.tv64)
179 len += snprintf(page + len, PAGE_SIZE - len,
180 "timeo=%lld ",
181 (long long)
182 ktime_to_us(op->kt_ival1));
183
184 if (op->kt_ival2.tv64)
185 len += snprintf(page + len, PAGE_SIZE - len,
186 "thr=%lld ",
187 (long long)
188 ktime_to_us(op->kt_ival2));
189
190 len += snprintf(page + len, PAGE_SIZE - len,
191 "# recv %ld (%ld) => reduction: ",
192 op->frames_filtered, op->frames_abs);
193
194 reduction = 100 - (op->frames_filtered * 100) / op->frames_abs;
195
196 len += snprintf(page + len, PAGE_SIZE - len, "%s%ld%%\n",
197 (reduction == 100)?"near ":"", reduction);
198
199 if (len > PAGE_SIZE - 200) {
200 /* mark output cut off */
201 len += snprintf(page + len, PAGE_SIZE - len, "(..)\n");
202 break;
203 }
204 }
205
206 list_for_each_entry(op, &bo->tx_ops, list) {
207
208 len += snprintf(page + len, PAGE_SIZE - len,
209 "tx_op: %03X %s [%d] ",
210 op->can_id, bcm_proc_getifname(op->ifindex),
211 op->nframes);
212
213 if (op->kt_ival1.tv64)
214 len += snprintf(page + len, PAGE_SIZE - len, "t1=%lld ",
215 (long long) ktime_to_us(op->kt_ival1));
216
217 if (op->kt_ival2.tv64)
218 len += snprintf(page + len, PAGE_SIZE - len, "t2=%lld ",
219 (long long) ktime_to_us(op->kt_ival2));
220
221 len += snprintf(page + len, PAGE_SIZE - len, "# sent %ld\n",
222 op->frames_abs);
223
224 if (len > PAGE_SIZE - 100) {
225 /* mark output cut off */
226 len += snprintf(page + len, PAGE_SIZE - len, "(..)\n");
227 break;
228 }
229 }
230
231 len += snprintf(page + len, PAGE_SIZE - len, "\n");
232
233 *eof = 1;
234 return len;
235 }
236
237 /*
238 * bcm_can_tx - send the (next) CAN frame to the appropriate CAN interface
239 * of the given bcm tx op
240 */
241 static void bcm_can_tx(struct bcm_op *op)
242 {
243 struct sk_buff *skb;
244 struct net_device *dev;
245 struct can_frame *cf = &op->frames[op->currframe];
246
247 /* no target device? => exit */
248 if (!op->ifindex)
249 return;
250
251 dev = dev_get_by_index(&init_net, op->ifindex);
252 if (!dev) {
253 /* RFC: should this bcm_op remove itself here? */
254 return;
255 }
256
257 skb = alloc_skb(CFSIZ, gfp_any());
258 if (!skb)
259 goto out;
260
261 memcpy(skb_put(skb, CFSIZ), cf, CFSIZ);
262
263 /* send with loopback */
264 skb->dev = dev;
265 skb->sk = op->sk;
266 can_send(skb, 1);
267
268 /* update statistics */
269 op->currframe++;
270 op->frames_abs++;
271
272 /* reached last frame? */
273 if (op->currframe >= op->nframes)
274 op->currframe = 0;
275 out:
276 dev_put(dev);
277 }
278
279 /*
280 * bcm_send_to_user - send a BCM message to the userspace
281 * (consisting of bcm_msg_head + x CAN frames)
282 */
283 static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head,
284 struct can_frame *frames, int has_timestamp)
285 {
286 struct sk_buff *skb;
287 struct can_frame *firstframe;
288 struct sockaddr_can *addr;
289 struct sock *sk = op->sk;
290 int datalen = head->nframes * CFSIZ;
291 int err;
292
293 skb = alloc_skb(sizeof(*head) + datalen, gfp_any());
294 if (!skb)
295 return;
296
297 memcpy(skb_put(skb, sizeof(*head)), head, sizeof(*head));
298
299 if (head->nframes) {
300 /* can_frames starting here */
301 firstframe = (struct can_frame *) skb_tail_pointer(skb);
302
303 memcpy(skb_put(skb, datalen), frames, datalen);
304
305 /*
306 * the BCM uses the can_dlc-element of the can_frame
307 * structure for internal purposes. This is only
308 * relevant for updates that are generated by the
309 * BCM, where nframes is 1
310 */
311 if (head->nframes == 1)
312 firstframe->can_dlc &= BCM_CAN_DLC_MASK;
313 }
314
315 if (has_timestamp) {
316 /* restore rx timestamp */
317 skb->tstamp = op->rx_stamp;
318 }
319
320 /*
321 * Put the datagram to the queue so that bcm_recvmsg() can
322 * get it from there. We need to pass the interface index to
323 * bcm_recvmsg(). We pass a whole struct sockaddr_can in skb->cb
324 * containing the interface index.
325 */
326
327 BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct sockaddr_can));
328 addr = (struct sockaddr_can *)skb->cb;
329 memset(addr, 0, sizeof(*addr));
330 addr->can_family = AF_CAN;
331 addr->can_ifindex = op->rx_ifindex;
332
333 err = sock_queue_rcv_skb(sk, skb);
334 if (err < 0) {
335 struct bcm_sock *bo = bcm_sk(sk);
336
337 kfree_skb(skb);
338 /* don't care about overflows in this statistic */
339 bo->dropped_usr_msgs++;
340 }
341 }
342
343 /*
344 * bcm_tx_timeout_handler - performes cyclic CAN frame transmissions
345 */
346 static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer)
347 {
348 struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
349 enum hrtimer_restart ret = HRTIMER_NORESTART;
350
351 if (op->kt_ival1.tv64 && (op->count > 0)) {
352
353 op->count--;
354 if (!op->count && (op->flags & TX_COUNTEVT)) {
355 struct bcm_msg_head msg_head;
356
357 /* create notification to user */
358 msg_head.opcode = TX_EXPIRED;
359 msg_head.flags = op->flags;
360 msg_head.count = op->count;
361 msg_head.ival1 = op->ival1;
362 msg_head.ival2 = op->ival2;
363 msg_head.can_id = op->can_id;
364 msg_head.nframes = 0;
365
366 bcm_send_to_user(op, &msg_head, NULL, 0);
367 }
368 }
369
370 if (op->kt_ival1.tv64 && (op->count > 0)) {
371
372 /* send (next) frame */
373 bcm_can_tx(op);
374 hrtimer_forward(hrtimer, ktime_get(), op->kt_ival1);
375 ret = HRTIMER_RESTART;
376
377 } else {
378 if (op->kt_ival2.tv64) {
379
380 /* send (next) frame */
381 bcm_can_tx(op);
382 hrtimer_forward(hrtimer, ktime_get(), op->kt_ival2);
383 ret = HRTIMER_RESTART;
384 }
385 }
386
387 return ret;
388 }
389
390 /*
391 * bcm_rx_changed - create a RX_CHANGED notification due to changed content
392 */
393 static void bcm_rx_changed(struct bcm_op *op, struct can_frame *data)
394 {
395 struct bcm_msg_head head;
396
397 /* update statistics */
398 op->frames_filtered++;
399
400 /* prevent statistics overflow */
401 if (op->frames_filtered > ULONG_MAX/100)
402 op->frames_filtered = op->frames_abs = 0;
403
404 head.opcode = RX_CHANGED;
405 head.flags = op->flags;
406 head.count = op->count;
407 head.ival1 = op->ival1;
408 head.ival2 = op->ival2;
409 head.can_id = op->can_id;
410 head.nframes = 1;
411
412 bcm_send_to_user(op, &head, data, 1);
413 }
414
415 /* TODO: move to linux/hrtimer.h */
416 static inline int hrtimer_callback_running(struct hrtimer *timer)
417 {
418 return timer->state & HRTIMER_STATE_CALLBACK;
419 }
420
421 /*
422 * bcm_rx_update_and_send - process a detected relevant receive content change
423 * 1. update the last received data
424 * 2. send a notification to the user (if possible)
425 */
426 static void bcm_rx_update_and_send(struct bcm_op *op,
427 struct can_frame *lastdata,
428 struct can_frame *rxdata)
429 {
430 memcpy(lastdata, rxdata, CFSIZ);
431
432 /* mark as used */
433 lastdata->can_dlc |= RX_RECV;
434
435 /* throtteling mode inactive OR data update already on the run ? */
436 if (!op->kt_ival2.tv64 || hrtimer_callback_running(&op->thrtimer)) {
437 /* send RX_CHANGED to the user immediately */
438 bcm_rx_changed(op, rxdata);
439 return;
440 }
441
442 if (hrtimer_active(&op->thrtimer)) {
443 /* mark as 'throttled' */
444 lastdata->can_dlc |= RX_THR;
445 return;
446 }
447
448 if (!op->kt_lastmsg.tv64) {
449 /* send first RX_CHANGED to the user immediately */
450 bcm_rx_changed(op, rxdata);
451 op->kt_lastmsg = ktime_get();
452 return;
453 }
454
455 if (ktime_us_delta(ktime_get(), op->kt_lastmsg) <
456 ktime_to_us(op->kt_ival2)) {
457 /* mark as 'throttled' and start timer */
458 lastdata->can_dlc |= RX_THR;
459 hrtimer_start(&op->thrtimer,
460 ktime_add(op->kt_lastmsg, op->kt_ival2),
461 HRTIMER_MODE_ABS);
462 return;
463 }
464
465 /* the gap was that big, that throttling was not needed here */
466 bcm_rx_changed(op, rxdata);
467 op->kt_lastmsg = ktime_get();
468 }
469
470 /*
471 * bcm_rx_cmp_to_index - (bit)compares the currently received data to formerly
472 * received data stored in op->last_frames[]
473 */
474 static void bcm_rx_cmp_to_index(struct bcm_op *op, int index,
475 struct can_frame *rxdata)
476 {
477 /*
478 * no one uses the MSBs of can_dlc for comparation,
479 * so we use it here to detect the first time of reception
480 */
481
482 if (!(op->last_frames[index].can_dlc & RX_RECV)) {
483 /* received data for the first time => send update to user */
484 bcm_rx_update_and_send(op, &op->last_frames[index], rxdata);
485 return;
486 }
487
488 /* do a real check in can_frame data section */
489
490 if ((GET_U64(&op->frames[index]) & GET_U64(rxdata)) !=
491 (GET_U64(&op->frames[index]) & GET_U64(&op->last_frames[index]))) {
492 bcm_rx_update_and_send(op, &op->last_frames[index], rxdata);
493 return;
494 }
495
496 if (op->flags & RX_CHECK_DLC) {
497 /* do a real check in can_frame dlc */
498 if (rxdata->can_dlc != (op->last_frames[index].can_dlc &
499 BCM_CAN_DLC_MASK)) {
500 bcm_rx_update_and_send(op, &op->last_frames[index],
501 rxdata);
502 return;
503 }
504 }
505 }
506
507 /*
508 * bcm_rx_starttimer - enable timeout monitoring for CAN frame receiption
509 */
510 static void bcm_rx_starttimer(struct bcm_op *op)
511 {
512 if (op->flags & RX_NO_AUTOTIMER)
513 return;
514
515 if (op->kt_ival1.tv64)
516 hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL);
517 }
518
519 /*
520 * bcm_rx_timeout_handler - when the (cyclic) CAN frame receiption timed out
521 */
522 static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer)
523 {
524 struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer);
525 struct bcm_msg_head msg_head;
526
527 msg_head.opcode = RX_TIMEOUT;
528 msg_head.flags = op->flags;
529 msg_head.count = op->count;
530 msg_head.ival1 = op->ival1;
531 msg_head.ival2 = op->ival2;
532 msg_head.can_id = op->can_id;
533 msg_head.nframes = 0;
534
535 bcm_send_to_user(op, &msg_head, NULL, 0);
536
537 /* no restart of the timer is done here! */
538
539 /* if user wants to be informed, when cyclic CAN-Messages come back */
540 if ((op->flags & RX_ANNOUNCE_RESUME) && op->last_frames) {
541 /* clear received can_frames to indicate 'nothing received' */
542 memset(op->last_frames, 0, op->nframes * CFSIZ);
543 }
544
545 return HRTIMER_NORESTART;
546 }
547
548 /*
549 * bcm_rx_thr_flush - Check for throttled data and send it to the userspace
550 */
551 static int bcm_rx_thr_flush(struct bcm_op *op)
552 {
553 int updated = 0;
554
555 if (op->nframes > 1) {
556 int i;
557
558 /* for MUX filter we start at index 1 */
559 for (i = 1; i < op->nframes; i++) {
560 if ((op->last_frames) &&
561 (op->last_frames[i].can_dlc & RX_THR)) {
562 op->last_frames[i].can_dlc &= ~RX_THR;
563 bcm_rx_changed(op, &op->last_frames[i]);
564 updated++;
565 }
566 }
567
568 } else {
569 /* for RX_FILTER_ID and simple filter */
570 if (op->last_frames && (op->last_frames[0].can_dlc & RX_THR)) {
571 op->last_frames[0].can_dlc &= ~RX_THR;
572 bcm_rx_changed(op, &op->last_frames[0]);
573 updated++;
574 }
575 }
576
577 return updated;
578 }
579
580 /*
581 * bcm_rx_thr_handler - the time for blocked content updates is over now:
582 * Check for throttled data and send it to the userspace
583 */
584 static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer)
585 {
586 struct bcm_op *op = container_of(hrtimer, struct bcm_op, thrtimer);
587
588 if (bcm_rx_thr_flush(op)) {
589 hrtimer_forward(hrtimer, ktime_get(), op->kt_ival2);
590 return HRTIMER_RESTART;
591 } else {
592 /* rearm throttle handling */
593 op->kt_lastmsg = ktime_set(0, 0);
594 return HRTIMER_NORESTART;
595 }
596 }
597
598 /*
599 * bcm_rx_handler - handle a CAN frame receiption
600 */
601 static void bcm_rx_handler(struct sk_buff *skb, void *data)
602 {
603 struct bcm_op *op = (struct bcm_op *)data;
604 struct can_frame rxframe;
605 int i;
606
607 /* disable timeout */
608 hrtimer_cancel(&op->timer);
609
610 if (skb->len == sizeof(rxframe)) {
611 memcpy(&rxframe, skb->data, sizeof(rxframe));
612 /* save rx timestamp */
613 op->rx_stamp = skb->tstamp;
614 /* save originator for recvfrom() */
615 op->rx_ifindex = skb->dev->ifindex;
616 /* update statistics */
617 op->frames_abs++;
618 kfree_skb(skb);
619
620 } else {
621 kfree_skb(skb);
622 return;
623 }
624
625 if (op->can_id != rxframe.can_id)
626 return;
627
628 if (op->flags & RX_RTR_FRAME) {
629 /* send reply for RTR-request (placed in op->frames[0]) */
630 bcm_can_tx(op);
631 return;
632 }
633
634 if (op->flags & RX_FILTER_ID) {
635 /* the easiest case */
636 bcm_rx_update_and_send(op, &op->last_frames[0], &rxframe);
637 bcm_rx_starttimer(op);
638 return;
639 }
640
641 if (op->nframes == 1) {
642 /* simple compare with index 0 */
643 bcm_rx_cmp_to_index(op, 0, &rxframe);
644 bcm_rx_starttimer(op);
645 return;
646 }
647
648 if (op->nframes > 1) {
649 /*
650 * multiplex compare
651 *
652 * find the first multiplex mask that fits.
653 * Remark: The MUX-mask is stored in index 0
654 */
655
656 for (i = 1; i < op->nframes; i++) {
657 if ((GET_U64(&op->frames[0]) & GET_U64(&rxframe)) ==
658 (GET_U64(&op->frames[0]) &
659 GET_U64(&op->frames[i]))) {
660 bcm_rx_cmp_to_index(op, i, &rxframe);
661 break;
662 }
663 }
664 bcm_rx_starttimer(op);
665 }
666 }
667
668 /*
669 * helpers for bcm_op handling: find & delete bcm [rx|tx] op elements
670 */
671 static struct bcm_op *bcm_find_op(struct list_head *ops, canid_t can_id,
672 int ifindex)
673 {
674 struct bcm_op *op;
675
676 list_for_each_entry(op, ops, list) {
677 if ((op->can_id == can_id) && (op->ifindex == ifindex))
678 return op;
679 }
680
681 return NULL;
682 }
683
684 static void bcm_remove_op(struct bcm_op *op)
685 {
686 hrtimer_cancel(&op->timer);
687 hrtimer_cancel(&op->thrtimer);
688
689 if ((op->frames) && (op->frames != &op->sframe))
690 kfree(op->frames);
691
692 if ((op->last_frames) && (op->last_frames != &op->last_sframe))
693 kfree(op->last_frames);
694
695 kfree(op);
696
697 return;
698 }
699
700 static void bcm_rx_unreg(struct net_device *dev, struct bcm_op *op)
701 {
702 if (op->rx_reg_dev == dev) {
703 can_rx_unregister(dev, op->can_id, REGMASK(op->can_id),
704 bcm_rx_handler, op);
705
706 /* mark as removed subscription */
707 op->rx_reg_dev = NULL;
708 } else
709 printk(KERN_ERR "can-bcm: bcm_rx_unreg: registered device "
710 "mismatch %p %p\n", op->rx_reg_dev, dev);
711 }
712
713 /*
714 * bcm_delete_rx_op - find and remove a rx op (returns number of removed ops)
715 */
716 static int bcm_delete_rx_op(struct list_head *ops, canid_t can_id, int ifindex)
717 {
718 struct bcm_op *op, *n;
719
720 list_for_each_entry_safe(op, n, ops, list) {
721 if ((op->can_id == can_id) && (op->ifindex == ifindex)) {
722
723 /*
724 * Don't care if we're bound or not (due to netdev
725 * problems) can_rx_unregister() is always a save
726 * thing to do here.
727 */
728 if (op->ifindex) {
729 /*
730 * Only remove subscriptions that had not
731 * been removed due to NETDEV_UNREGISTER
732 * in bcm_notifier()
733 */
734 if (op->rx_reg_dev) {
735 struct net_device *dev;
736
737 dev = dev_get_by_index(&init_net,
738 op->ifindex);
739 if (dev) {
740 bcm_rx_unreg(dev, op);
741 dev_put(dev);
742 }
743 }
744 } else
745 can_rx_unregister(NULL, op->can_id,
746 REGMASK(op->can_id),
747 bcm_rx_handler, op);
748
749 list_del(&op->list);
750 bcm_remove_op(op);
751 return 1; /* done */
752 }
753 }
754
755 return 0; /* not found */
756 }
757
758 /*
759 * bcm_delete_tx_op - find and remove a tx op (returns number of removed ops)
760 */
761 static int bcm_delete_tx_op(struct list_head *ops, canid_t can_id, int ifindex)
762 {
763 struct bcm_op *op, *n;
764
765 list_for_each_entry_safe(op, n, ops, list) {
766 if ((op->can_id == can_id) && (op->ifindex == ifindex)) {
767 list_del(&op->list);
768 bcm_remove_op(op);
769 return 1; /* done */
770 }
771 }
772
773 return 0; /* not found */
774 }
775
776 /*
777 * bcm_read_op - read out a bcm_op and send it to the user (for bcm_sendmsg)
778 */
779 static int bcm_read_op(struct list_head *ops, struct bcm_msg_head *msg_head,
780 int ifindex)
781 {
782 struct bcm_op *op = bcm_find_op(ops, msg_head->can_id, ifindex);
783
784 if (!op)
785 return -EINVAL;
786
787 /* put current values into msg_head */
788 msg_head->flags = op->flags;
789 msg_head->count = op->count;
790 msg_head->ival1 = op->ival1;
791 msg_head->ival2 = op->ival2;
792 msg_head->nframes = op->nframes;
793
794 bcm_send_to_user(op, msg_head, op->frames, 0);
795
796 return MHSIZ;
797 }
798
799 /*
800 * bcm_tx_setup - create or update a bcm tx op (for bcm_sendmsg)
801 */
802 static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
803 int ifindex, struct sock *sk)
804 {
805 struct bcm_sock *bo = bcm_sk(sk);
806 struct bcm_op *op;
807 int i, err;
808
809 /* we need a real device to send frames */
810 if (!ifindex)
811 return -ENODEV;
812
813 /* we need at least one can_frame */
814 if (msg_head->nframes < 1)
815 return -EINVAL;
816
817 /* check the given can_id */
818 op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex);
819
820 if (op) {
821 /* update existing BCM operation */
822
823 /*
824 * Do we need more space for the can_frames than currently
825 * allocated? -> This is a _really_ unusual use-case and
826 * therefore (complexity / locking) it is not supported.
827 */
828 if (msg_head->nframes > op->nframes)
829 return -E2BIG;
830
831 /* update can_frames content */
832 for (i = 0; i < msg_head->nframes; i++) {
833 err = memcpy_fromiovec((u8 *)&op->frames[i],
834 msg->msg_iov, CFSIZ);
835 if (err < 0)
836 return err;
837
838 if (msg_head->flags & TX_CP_CAN_ID) {
839 /* copy can_id into frame */
840 op->frames[i].can_id = msg_head->can_id;
841 }
842 }
843
844 } else {
845 /* insert new BCM operation for the given can_id */
846
847 op = kzalloc(OPSIZ, GFP_KERNEL);
848 if (!op)
849 return -ENOMEM;
850
851 op->can_id = msg_head->can_id;
852
853 /* create array for can_frames and copy the data */
854 if (msg_head->nframes > 1) {
855 op->frames = kmalloc(msg_head->nframes * CFSIZ,
856 GFP_KERNEL);
857 if (!op->frames) {
858 kfree(op);
859 return -ENOMEM;
860 }
861 } else
862 op->frames = &op->sframe;
863
864 for (i = 0; i < msg_head->nframes; i++) {
865 err = memcpy_fromiovec((u8 *)&op->frames[i],
866 msg->msg_iov, CFSIZ);
867 if (err < 0) {
868 if (op->frames != &op->sframe)
869 kfree(op->frames);
870 kfree(op);
871 return err;
872 }
873
874 if (msg_head->flags & TX_CP_CAN_ID) {
875 /* copy can_id into frame */
876 op->frames[i].can_id = msg_head->can_id;
877 }
878 }
879
880 /* tx_ops never compare with previous received messages */
881 op->last_frames = NULL;
882
883 /* bcm_can_tx / bcm_tx_timeout_handler needs this */
884 op->sk = sk;
885 op->ifindex = ifindex;
886
887 /* initialize uninitialized (kzalloc) structure */
888 hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
889 op->timer.function = bcm_tx_timeout_handler;
890
891 /* currently unused in tx_ops */
892 hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
893
894 /* add this bcm_op to the list of the tx_ops */
895 list_add(&op->list, &bo->tx_ops);
896
897 } /* if ((op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex))) */
898
899 if (op->nframes != msg_head->nframes) {
900 op->nframes = msg_head->nframes;
901 /* start multiple frame transmission with index 0 */
902 op->currframe = 0;
903 }
904
905 /* check flags */
906
907 op->flags = msg_head->flags;
908
909 if (op->flags & TX_RESET_MULTI_IDX) {
910 /* start multiple frame transmission with index 0 */
911 op->currframe = 0;
912 }
913
914 if (op->flags & SETTIMER) {
915 /* set timer values */
916 op->count = msg_head->count;
917 op->ival1 = msg_head->ival1;
918 op->ival2 = msg_head->ival2;
919 op->kt_ival1 = timeval_to_ktime(msg_head->ival1);
920 op->kt_ival2 = timeval_to_ktime(msg_head->ival2);
921
922 /* disable an active timer due to zero values? */
923 if (!op->kt_ival1.tv64 && !op->kt_ival2.tv64)
924 hrtimer_cancel(&op->timer);
925 }
926
927 if ((op->flags & STARTTIMER) &&
928 ((op->kt_ival1.tv64 && op->count) || op->kt_ival2.tv64)) {
929
930 /* spec: send can_frame when starting timer */
931 op->flags |= TX_ANNOUNCE;
932
933 if (op->kt_ival1.tv64 && (op->count > 0)) {
934 /* op->count-- is done in bcm_tx_timeout_handler */
935 hrtimer_start(&op->timer, op->kt_ival1,
936 HRTIMER_MODE_REL);
937 } else
938 hrtimer_start(&op->timer, op->kt_ival2,
939 HRTIMER_MODE_REL);
940 }
941
942 if (op->flags & TX_ANNOUNCE)
943 bcm_can_tx(op);
944
945 return msg_head->nframes * CFSIZ + MHSIZ;
946 }
947
948 /*
949 * bcm_rx_setup - create or update a bcm rx op (for bcm_sendmsg)
950 */
951 static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
952 int ifindex, struct sock *sk)
953 {
954 struct bcm_sock *bo = bcm_sk(sk);
955 struct bcm_op *op;
956 int do_rx_register;
957 int err = 0;
958
959 if ((msg_head->flags & RX_FILTER_ID) || (!(msg_head->nframes))) {
960 /* be robust against wrong usage ... */
961 msg_head->flags |= RX_FILTER_ID;
962 /* ignore trailing garbage */
963 msg_head->nframes = 0;
964 }
965
966 if ((msg_head->flags & RX_RTR_FRAME) &&
967 ((msg_head->nframes != 1) ||
968 (!(msg_head->can_id & CAN_RTR_FLAG))))
969 return -EINVAL;
970
971 /* check the given can_id */
972 op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex);
973 if (op) {
974 /* update existing BCM operation */
975
976 /*
977 * Do we need more space for the can_frames than currently
978 * allocated? -> This is a _really_ unusual use-case and
979 * therefore (complexity / locking) it is not supported.
980 */
981 if (msg_head->nframes > op->nframes)
982 return -E2BIG;
983
984 if (msg_head->nframes) {
985 /* update can_frames content */
986 err = memcpy_fromiovec((u8 *)op->frames,
987 msg->msg_iov,
988 msg_head->nframes * CFSIZ);
989 if (err < 0)
990 return err;
991
992 /* clear last_frames to indicate 'nothing received' */
993 memset(op->last_frames, 0, msg_head->nframes * CFSIZ);
994 }
995
996 op->nframes = msg_head->nframes;
997
998 /* Only an update -> do not call can_rx_register() */
999 do_rx_register = 0;
1000
1001 } else {
1002 /* insert new BCM operation for the given can_id */
1003 op = kzalloc(OPSIZ, GFP_KERNEL);
1004 if (!op)
1005 return -ENOMEM;
1006
1007 op->can_id = msg_head->can_id;
1008 op->nframes = msg_head->nframes;
1009
1010 if (msg_head->nframes > 1) {
1011 /* create array for can_frames and copy the data */
1012 op->frames = kmalloc(msg_head->nframes * CFSIZ,
1013 GFP_KERNEL);
1014 if (!op->frames) {
1015 kfree(op);
1016 return -ENOMEM;
1017 }
1018
1019 /* create and init array for received can_frames */
1020 op->last_frames = kzalloc(msg_head->nframes * CFSIZ,
1021 GFP_KERNEL);
1022 if (!op->last_frames) {
1023 kfree(op->frames);
1024 kfree(op);
1025 return -ENOMEM;
1026 }
1027
1028 } else {
1029 op->frames = &op->sframe;
1030 op->last_frames = &op->last_sframe;
1031 }
1032
1033 if (msg_head->nframes) {
1034 err = memcpy_fromiovec((u8 *)op->frames, msg->msg_iov,
1035 msg_head->nframes * CFSIZ);
1036 if (err < 0) {
1037 if (op->frames != &op->sframe)
1038 kfree(op->frames);
1039 if (op->last_frames != &op->last_sframe)
1040 kfree(op->last_frames);
1041 kfree(op);
1042 return err;
1043 }
1044 }
1045
1046 /* bcm_can_tx / bcm_tx_timeout_handler needs this */
1047 op->sk = sk;
1048 op->ifindex = ifindex;
1049
1050 /* initialize uninitialized (kzalloc) structure */
1051 hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1052 op->timer.function = bcm_rx_timeout_handler;
1053
1054 hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1055 op->thrtimer.function = bcm_rx_thr_handler;
1056
1057 /* add this bcm_op to the list of the rx_ops */
1058 list_add(&op->list, &bo->rx_ops);
1059
1060 /* call can_rx_register() */
1061 do_rx_register = 1;
1062
1063 } /* if ((op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex))) */
1064
1065 /* check flags */
1066 op->flags = msg_head->flags;
1067
1068 if (op->flags & RX_RTR_FRAME) {
1069
1070 /* no timers in RTR-mode */
1071 hrtimer_cancel(&op->thrtimer);
1072 hrtimer_cancel(&op->timer);
1073
1074 /*
1075 * funny feature in RX(!)_SETUP only for RTR-mode:
1076 * copy can_id into frame BUT without RTR-flag to
1077 * prevent a full-load-loopback-test ... ;-]
1078 */
1079 if ((op->flags & TX_CP_CAN_ID) ||
1080 (op->frames[0].can_id == op->can_id))
1081 op->frames[0].can_id = op->can_id & ~CAN_RTR_FLAG;
1082
1083 } else {
1084 if (op->flags & SETTIMER) {
1085
1086 /* set timer value */
1087 op->ival1 = msg_head->ival1;
1088 op->ival2 = msg_head->ival2;
1089 op->kt_ival1 = timeval_to_ktime(msg_head->ival1);
1090 op->kt_ival2 = timeval_to_ktime(msg_head->ival2);
1091
1092 /* disable an active timer due to zero value? */
1093 if (!op->kt_ival1.tv64)
1094 hrtimer_cancel(&op->timer);
1095
1096 /*
1097 * In any case cancel the throttle timer, flush
1098 * potentially blocked msgs and reset throttle handling
1099 */
1100 op->kt_lastmsg = ktime_set(0, 0);
1101 hrtimer_cancel(&op->thrtimer);
1102 bcm_rx_thr_flush(op);
1103 }
1104
1105 if ((op->flags & STARTTIMER) && op->kt_ival1.tv64)
1106 hrtimer_start(&op->timer, op->kt_ival1,
1107 HRTIMER_MODE_REL);
1108 }
1109
1110 /* now we can register for can_ids, if we added a new bcm_op */
1111 if (do_rx_register) {
1112 if (ifindex) {
1113 struct net_device *dev;
1114
1115 dev = dev_get_by_index(&init_net, ifindex);
1116 if (dev) {
1117 err = can_rx_register(dev, op->can_id,
1118 REGMASK(op->can_id),
1119 bcm_rx_handler, op,
1120 "bcm");
1121
1122 op->rx_reg_dev = dev;
1123 dev_put(dev);
1124 }
1125
1126 } else
1127 err = can_rx_register(NULL, op->can_id,
1128 REGMASK(op->can_id),
1129 bcm_rx_handler, op, "bcm");
1130 if (err) {
1131 /* this bcm rx op is broken -> remove it */
1132 list_del(&op->list);
1133 bcm_remove_op(op);
1134 return err;
1135 }
1136 }
1137
1138 return msg_head->nframes * CFSIZ + MHSIZ;
1139 }
1140
1141 /*
1142 * bcm_tx_send - send a single CAN frame to the CAN interface (for bcm_sendmsg)
1143 */
1144 static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk)
1145 {
1146 struct sk_buff *skb;
1147 struct net_device *dev;
1148 int err;
1149
1150 /* we need a real device to send frames */
1151 if (!ifindex)
1152 return -ENODEV;
1153
1154 skb = alloc_skb(CFSIZ, GFP_KERNEL);
1155
1156 if (!skb)
1157 return -ENOMEM;
1158
1159 err = memcpy_fromiovec(skb_put(skb, CFSIZ), msg->msg_iov, CFSIZ);
1160 if (err < 0) {
1161 kfree_skb(skb);
1162 return err;
1163 }
1164
1165 dev = dev_get_by_index(&init_net, ifindex);
1166 if (!dev) {
1167 kfree_skb(skb);
1168 return -ENODEV;
1169 }
1170
1171 skb->dev = dev;
1172 skb->sk = sk;
1173 can_send(skb, 1); /* send with loopback */
1174 dev_put(dev);
1175
1176 return CFSIZ + MHSIZ;
1177 }
1178
1179 /*
1180 * bcm_sendmsg - process BCM commands (opcodes) from the userspace
1181 */
1182 static int bcm_sendmsg(struct kiocb *iocb, struct socket *sock,
1183 struct msghdr *msg, size_t size)
1184 {
1185 struct sock *sk = sock->sk;
1186 struct bcm_sock *bo = bcm_sk(sk);
1187 int ifindex = bo->ifindex; /* default ifindex for this bcm_op */
1188 struct bcm_msg_head msg_head;
1189 int ret; /* read bytes or error codes as return value */
1190
1191 if (!bo->bound)
1192 return -ENOTCONN;
1193
1194 /* check for alternative ifindex for this bcm_op */
1195
1196 if (!ifindex && msg->msg_name) {
1197 /* no bound device as default => check msg_name */
1198 struct sockaddr_can *addr =
1199 (struct sockaddr_can *)msg->msg_name;
1200
1201 if (addr->can_family != AF_CAN)
1202 return -EINVAL;
1203
1204 /* ifindex from sendto() */
1205 ifindex = addr->can_ifindex;
1206
1207 if (ifindex) {
1208 struct net_device *dev;
1209
1210 dev = dev_get_by_index(&init_net, ifindex);
1211 if (!dev)
1212 return -ENODEV;
1213
1214 if (dev->type != ARPHRD_CAN) {
1215 dev_put(dev);
1216 return -ENODEV;
1217 }
1218
1219 dev_put(dev);
1220 }
1221 }
1222
1223 /* read message head information */
1224
1225 ret = memcpy_fromiovec((u8 *)&msg_head, msg->msg_iov, MHSIZ);
1226 if (ret < 0)
1227 return ret;
1228
1229 lock_sock(sk);
1230
1231 switch (msg_head.opcode) {
1232
1233 case TX_SETUP:
1234 ret = bcm_tx_setup(&msg_head, msg, ifindex, sk);
1235 break;
1236
1237 case RX_SETUP:
1238 ret = bcm_rx_setup(&msg_head, msg, ifindex, sk);
1239 break;
1240
1241 case TX_DELETE:
1242 if (bcm_delete_tx_op(&bo->tx_ops, msg_head.can_id, ifindex))
1243 ret = MHSIZ;
1244 else
1245 ret = -EINVAL;
1246 break;
1247
1248 case RX_DELETE:
1249 if (bcm_delete_rx_op(&bo->rx_ops, msg_head.can_id, ifindex))
1250 ret = MHSIZ;
1251 else
1252 ret = -EINVAL;
1253 break;
1254
1255 case TX_READ:
1256 /* reuse msg_head for the reply to TX_READ */
1257 msg_head.opcode = TX_STATUS;
1258 ret = bcm_read_op(&bo->tx_ops, &msg_head, ifindex);
1259 break;
1260
1261 case RX_READ:
1262 /* reuse msg_head for the reply to RX_READ */
1263 msg_head.opcode = RX_STATUS;
1264 ret = bcm_read_op(&bo->rx_ops, &msg_head, ifindex);
1265 break;
1266
1267 case TX_SEND:
1268 /* we need at least one can_frame */
1269 if (msg_head.nframes < 1)
1270 ret = -EINVAL;
1271 else
1272 ret = bcm_tx_send(msg, ifindex, sk);
1273 break;
1274
1275 default:
1276 ret = -EINVAL;
1277 break;
1278 }
1279
1280 release_sock(sk);
1281
1282 return ret;
1283 }
1284
1285 /*
1286 * notification handler for netdevice status changes
1287 */
1288 static int bcm_notifier(struct notifier_block *nb, unsigned long msg,
1289 void *data)
1290 {
1291 struct net_device *dev = (struct net_device *)data;
1292 struct bcm_sock *bo = container_of(nb, struct bcm_sock, notifier);
1293 struct sock *sk = &bo->sk;
1294 struct bcm_op *op;
1295 int notify_enodev = 0;
1296
1297 if (dev_net(dev) != &init_net)
1298 return NOTIFY_DONE;
1299
1300 if (dev->type != ARPHRD_CAN)
1301 return NOTIFY_DONE;
1302
1303 switch (msg) {
1304
1305 case NETDEV_UNREGISTER:
1306 lock_sock(sk);
1307
1308 /* remove device specific receive entries */
1309 list_for_each_entry(op, &bo->rx_ops, list)
1310 if (op->rx_reg_dev == dev)
1311 bcm_rx_unreg(dev, op);
1312
1313 /* remove device reference, if this is our bound device */
1314 if (bo->bound && bo->ifindex == dev->ifindex) {
1315 bo->bound = 0;
1316 bo->ifindex = 0;
1317 notify_enodev = 1;
1318 }
1319
1320 release_sock(sk);
1321
1322 if (notify_enodev) {
1323 sk->sk_err = ENODEV;
1324 if (!sock_flag(sk, SOCK_DEAD))
1325 sk->sk_error_report(sk);
1326 }
1327 break;
1328
1329 case NETDEV_DOWN:
1330 if (bo->bound && bo->ifindex == dev->ifindex) {
1331 sk->sk_err = ENETDOWN;
1332 if (!sock_flag(sk, SOCK_DEAD))
1333 sk->sk_error_report(sk);
1334 }
1335 }
1336
1337 return NOTIFY_DONE;
1338 }
1339
1340 /*
1341 * initial settings for all BCM sockets to be set at socket creation time
1342 */
1343 static int bcm_init(struct sock *sk)
1344 {
1345 struct bcm_sock *bo = bcm_sk(sk);
1346
1347 bo->bound = 0;
1348 bo->ifindex = 0;
1349 bo->dropped_usr_msgs = 0;
1350 bo->bcm_proc_read = NULL;
1351
1352 INIT_LIST_HEAD(&bo->tx_ops);
1353 INIT_LIST_HEAD(&bo->rx_ops);
1354
1355 /* set notifier */
1356 bo->notifier.notifier_call = bcm_notifier;
1357
1358 register_netdevice_notifier(&bo->notifier);
1359
1360 return 0;
1361 }
1362
1363 /*
1364 * standard socket functions
1365 */
1366 static int bcm_release(struct socket *sock)
1367 {
1368 struct sock *sk = sock->sk;
1369 struct bcm_sock *bo = bcm_sk(sk);
1370 struct bcm_op *op, *next;
1371
1372 /* remove bcm_ops, timer, rx_unregister(), etc. */
1373
1374 unregister_netdevice_notifier(&bo->notifier);
1375
1376 lock_sock(sk);
1377
1378 list_for_each_entry_safe(op, next, &bo->tx_ops, list)
1379 bcm_remove_op(op);
1380
1381 list_for_each_entry_safe(op, next, &bo->rx_ops, list) {
1382 /*
1383 * Don't care if we're bound or not (due to netdev problems)
1384 * can_rx_unregister() is always a save thing to do here.
1385 */
1386 if (op->ifindex) {
1387 /*
1388 * Only remove subscriptions that had not
1389 * been removed due to NETDEV_UNREGISTER
1390 * in bcm_notifier()
1391 */
1392 if (op->rx_reg_dev) {
1393 struct net_device *dev;
1394
1395 dev = dev_get_by_index(&init_net, op->ifindex);
1396 if (dev) {
1397 bcm_rx_unreg(dev, op);
1398 dev_put(dev);
1399 }
1400 }
1401 } else
1402 can_rx_unregister(NULL, op->can_id,
1403 REGMASK(op->can_id),
1404 bcm_rx_handler, op);
1405
1406 bcm_remove_op(op);
1407 }
1408
1409 /* remove procfs entry */
1410 if (proc_dir && bo->bcm_proc_read)
1411 remove_proc_entry(bo->procname, proc_dir);
1412
1413 /* remove device reference */
1414 if (bo->bound) {
1415 bo->bound = 0;
1416 bo->ifindex = 0;
1417 }
1418
1419 release_sock(sk);
1420 sock_put(sk);
1421
1422 return 0;
1423 }
1424
1425 static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len,
1426 int flags)
1427 {
1428 struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
1429 struct sock *sk = sock->sk;
1430 struct bcm_sock *bo = bcm_sk(sk);
1431
1432 if (bo->bound)
1433 return -EISCONN;
1434
1435 /* bind a device to this socket */
1436 if (addr->can_ifindex) {
1437 struct net_device *dev;
1438
1439 dev = dev_get_by_index(&init_net, addr->can_ifindex);
1440 if (!dev)
1441 return -ENODEV;
1442
1443 if (dev->type != ARPHRD_CAN) {
1444 dev_put(dev);
1445 return -ENODEV;
1446 }
1447
1448 bo->ifindex = dev->ifindex;
1449 dev_put(dev);
1450
1451 } else {
1452 /* no interface reference for ifindex = 0 ('any' CAN device) */
1453 bo->ifindex = 0;
1454 }
1455
1456 bo->bound = 1;
1457
1458 if (proc_dir) {
1459 /* unique socket address as filename */
1460 sprintf(bo->procname, "%p", sock);
1461 bo->bcm_proc_read = create_proc_read_entry(bo->procname, 0644,
1462 proc_dir,
1463 bcm_read_proc, sk);
1464 }
1465
1466 return 0;
1467 }
1468
1469 static int bcm_recvmsg(struct kiocb *iocb, struct socket *sock,
1470 struct msghdr *msg, size_t size, int flags)
1471 {
1472 struct sock *sk = sock->sk;
1473 struct sk_buff *skb;
1474 int error = 0;
1475 int noblock;
1476 int err;
1477
1478 noblock = flags & MSG_DONTWAIT;
1479 flags &= ~MSG_DONTWAIT;
1480 skb = skb_recv_datagram(sk, flags, noblock, &error);
1481 if (!skb)
1482 return error;
1483
1484 if (skb->len < size)
1485 size = skb->len;
1486
1487 err = memcpy_toiovec(msg->msg_iov, skb->data, size);
1488 if (err < 0) {
1489 skb_free_datagram(sk, skb);
1490 return err;
1491 }
1492
1493 sock_recv_timestamp(msg, sk, skb);
1494
1495 if (msg->msg_name) {
1496 msg->msg_namelen = sizeof(struct sockaddr_can);
1497 memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
1498 }
1499
1500 skb_free_datagram(sk, skb);
1501
1502 return size;
1503 }
1504
1505 static struct proto_ops bcm_ops __read_mostly = {
1506 .family = PF_CAN,
1507 .release = bcm_release,
1508 .bind = sock_no_bind,
1509 .connect = bcm_connect,
1510 .socketpair = sock_no_socketpair,
1511 .accept = sock_no_accept,
1512 .getname = sock_no_getname,
1513 .poll = datagram_poll,
1514 .ioctl = NULL, /* use can_ioctl() from af_can.c */
1515 .listen = sock_no_listen,
1516 .shutdown = sock_no_shutdown,
1517 .setsockopt = sock_no_setsockopt,
1518 .getsockopt = sock_no_getsockopt,
1519 .sendmsg = bcm_sendmsg,
1520 .recvmsg = bcm_recvmsg,
1521 .mmap = sock_no_mmap,
1522 .sendpage = sock_no_sendpage,
1523 };
1524
1525 static struct proto bcm_proto __read_mostly = {
1526 .name = "CAN_BCM",
1527 .owner = THIS_MODULE,
1528 .obj_size = sizeof(struct bcm_sock),
1529 .init = bcm_init,
1530 };
1531
1532 static struct can_proto bcm_can_proto __read_mostly = {
1533 .type = SOCK_DGRAM,
1534 .protocol = CAN_BCM,
1535 .capability = -1,
1536 .ops = &bcm_ops,
1537 .prot = &bcm_proto,
1538 };
1539
1540 static int __init bcm_module_init(void)
1541 {
1542 int err;
1543
1544 printk(banner);
1545
1546 err = can_proto_register(&bcm_can_proto);
1547 if (err < 0) {
1548 printk(KERN_ERR "can: registration of bcm protocol failed\n");
1549 return err;
1550 }
1551
1552 /* create /proc/net/can-bcm directory */
1553 proc_dir = proc_mkdir("can-bcm", init_net.proc_net);
1554
1555 if (proc_dir)
1556 proc_dir->owner = THIS_MODULE;
1557
1558 return 0;
1559 }
1560
1561 static void __exit bcm_module_exit(void)
1562 {
1563 can_proto_unregister(&bcm_can_proto);
1564
1565 if (proc_dir)
1566 proc_net_remove(&init_net, "can-bcm");
1567 }
1568
1569 module_init(bcm_module_init);
1570 module_exit(bcm_module_exit);