045f98fed476f96f506d75d23c355c595c42c2d4
[GitHub/exynos8895/android_kernel_samsung_universal8895.git] / drivers / net / ethernet / mellanox / mlxsw / pci.c
1 /*
2 * drivers/net/ethernet/mellanox/mlxsw/pci.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the names of the copyright holders nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
17 *
18 * Alternatively, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") version 2 as published by the Free
20 * Software Foundation.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
33 */
34
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/export.h>
38 #include <linux/err.h>
39 #include <linux/device.h>
40 #include <linux/pci.h>
41 #include <linux/interrupt.h>
42 #include <linux/wait.h>
43 #include <linux/types.h>
44 #include <linux/skbuff.h>
45 #include <linux/if_vlan.h>
46 #include <linux/log2.h>
47 #include <linux/debugfs.h>
48 #include <linux/seq_file.h>
49
50 #include "pci.h"
51 #include "core.h"
52 #include "cmd.h"
53 #include "port.h"
54
55 static const char mlxsw_pci_driver_name[] = "mlxsw_pci";
56
57 static const struct pci_device_id mlxsw_pci_id_table[] = {
58 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SWITCHX2), 0},
59 {0, }
60 };
61
62 static struct dentry *mlxsw_pci_dbg_root;
63
64 static const char *mlxsw_pci_device_kind_get(const struct pci_device_id *id)
65 {
66 switch (id->device) {
67 case PCI_DEVICE_ID_MELLANOX_SWITCHX2:
68 return MLXSW_DEVICE_KIND_SWITCHX2;
69 default:
70 BUG();
71 }
72 }
73
74 #define mlxsw_pci_write32(mlxsw_pci, reg, val) \
75 iowrite32be(val, (mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
76 #define mlxsw_pci_read32(mlxsw_pci, reg) \
77 ioread32be((mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
78
79 enum mlxsw_pci_queue_type {
80 MLXSW_PCI_QUEUE_TYPE_SDQ,
81 MLXSW_PCI_QUEUE_TYPE_RDQ,
82 MLXSW_PCI_QUEUE_TYPE_CQ,
83 MLXSW_PCI_QUEUE_TYPE_EQ,
84 };
85
86 static const char *mlxsw_pci_queue_type_str(enum mlxsw_pci_queue_type q_type)
87 {
88 switch (q_type) {
89 case MLXSW_PCI_QUEUE_TYPE_SDQ:
90 return "sdq";
91 case MLXSW_PCI_QUEUE_TYPE_RDQ:
92 return "rdq";
93 case MLXSW_PCI_QUEUE_TYPE_CQ:
94 return "cq";
95 case MLXSW_PCI_QUEUE_TYPE_EQ:
96 return "eq";
97 }
98 BUG();
99 }
100
101 #define MLXSW_PCI_QUEUE_TYPE_COUNT 4
102
103 static const u16 mlxsw_pci_doorbell_type_offset[] = {
104 MLXSW_PCI_DOORBELL_SDQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_SDQ */
105 MLXSW_PCI_DOORBELL_RDQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_RDQ */
106 MLXSW_PCI_DOORBELL_CQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */
107 MLXSW_PCI_DOORBELL_EQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */
108 };
109
110 static const u16 mlxsw_pci_doorbell_arm_type_offset[] = {
111 0, /* unused */
112 0, /* unused */
113 MLXSW_PCI_DOORBELL_ARM_CQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */
114 MLXSW_PCI_DOORBELL_ARM_EQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */
115 };
116
117 struct mlxsw_pci_mem_item {
118 char *buf;
119 dma_addr_t mapaddr;
120 size_t size;
121 };
122
123 struct mlxsw_pci_queue_elem_info {
124 char *elem; /* pointer to actual dma mapped element mem chunk */
125 union {
126 struct {
127 struct sk_buff *skb;
128 } sdq;
129 struct {
130 struct sk_buff *skb;
131 } rdq;
132 } u;
133 };
134
135 struct mlxsw_pci_queue {
136 spinlock_t lock; /* for queue accesses */
137 struct mlxsw_pci_mem_item mem_item;
138 struct mlxsw_pci_queue_elem_info *elem_info;
139 u16 producer_counter;
140 u16 consumer_counter;
141 u16 count; /* number of elements in queue */
142 u8 num; /* queue number */
143 u8 elem_size; /* size of one element */
144 enum mlxsw_pci_queue_type type;
145 struct tasklet_struct tasklet; /* queue processing tasklet */
146 struct mlxsw_pci *pci;
147 union {
148 struct {
149 u32 comp_sdq_count;
150 u32 comp_rdq_count;
151 } cq;
152 struct {
153 u32 ev_cmd_count;
154 u32 ev_comp_count;
155 u32 ev_other_count;
156 } eq;
157 } u;
158 };
159
160 struct mlxsw_pci_queue_type_group {
161 struct mlxsw_pci_queue *q;
162 u8 count; /* number of queues in group */
163 };
164
165 struct mlxsw_pci {
166 struct pci_dev *pdev;
167 u8 __iomem *hw_addr;
168 struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT];
169 u32 doorbell_offset;
170 struct msix_entry msix_entry;
171 struct mlxsw_core *core;
172 struct {
173 u16 num_pages;
174 struct mlxsw_pci_mem_item *items;
175 } fw_area;
176 struct {
177 struct mutex lock; /* Lock access to command registers */
178 bool nopoll;
179 wait_queue_head_t wait;
180 bool wait_done;
181 struct {
182 u8 status;
183 u64 out_param;
184 } comp;
185 } cmd;
186 struct mlxsw_bus_info bus_info;
187 struct dentry *dbg_dir;
188 };
189
190 static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q)
191 {
192 tasklet_schedule(&q->tasklet);
193 }
194
195 static char *__mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q,
196 size_t elem_size, int elem_index)
197 {
198 return q->mem_item.buf + (elem_size * elem_index);
199 }
200
201 static struct mlxsw_pci_queue_elem_info *
202 mlxsw_pci_queue_elem_info_get(struct mlxsw_pci_queue *q, int elem_index)
203 {
204 return &q->elem_info[elem_index];
205 }
206
207 static struct mlxsw_pci_queue_elem_info *
208 mlxsw_pci_queue_elem_info_producer_get(struct mlxsw_pci_queue *q)
209 {
210 int index = q->producer_counter & (q->count - 1);
211
212 if ((q->producer_counter - q->consumer_counter) == q->count)
213 return NULL;
214 return mlxsw_pci_queue_elem_info_get(q, index);
215 }
216
217 static struct mlxsw_pci_queue_elem_info *
218 mlxsw_pci_queue_elem_info_consumer_get(struct mlxsw_pci_queue *q)
219 {
220 int index = q->consumer_counter & (q->count - 1);
221
222 return mlxsw_pci_queue_elem_info_get(q, index);
223 }
224
225 static char *mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q, int elem_index)
226 {
227 return mlxsw_pci_queue_elem_info_get(q, elem_index)->elem;
228 }
229
230 static bool mlxsw_pci_elem_hw_owned(struct mlxsw_pci_queue *q, bool owner_bit)
231 {
232 return owner_bit != !!(q->consumer_counter & q->count);
233 }
234
235 static char *mlxsw_pci_queue_sw_elem_get(struct mlxsw_pci_queue *q,
236 u32 (*get_elem_owner_func)(char *))
237 {
238 struct mlxsw_pci_queue_elem_info *elem_info;
239 char *elem;
240 bool owner_bit;
241
242 elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
243 elem = elem_info->elem;
244 owner_bit = get_elem_owner_func(elem);
245 if (mlxsw_pci_elem_hw_owned(q, owner_bit))
246 return NULL;
247 q->consumer_counter++;
248 rmb(); /* make sure we read owned bit before the rest of elem */
249 return elem;
250 }
251
252 static struct mlxsw_pci_queue_type_group *
253 mlxsw_pci_queue_type_group_get(struct mlxsw_pci *mlxsw_pci,
254 enum mlxsw_pci_queue_type q_type)
255 {
256 return &mlxsw_pci->queues[q_type];
257 }
258
259 static u8 __mlxsw_pci_queue_count(struct mlxsw_pci *mlxsw_pci,
260 enum mlxsw_pci_queue_type q_type)
261 {
262 struct mlxsw_pci_queue_type_group *queue_group;
263
264 queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_type);
265 return queue_group->count;
266 }
267
268 static u8 mlxsw_pci_sdq_count(struct mlxsw_pci *mlxsw_pci)
269 {
270 return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_SDQ);
271 }
272
273 static u8 mlxsw_pci_rdq_count(struct mlxsw_pci *mlxsw_pci)
274 {
275 return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_RDQ);
276 }
277
278 static u8 mlxsw_pci_cq_count(struct mlxsw_pci *mlxsw_pci)
279 {
280 return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ);
281 }
282
283 static u8 mlxsw_pci_eq_count(struct mlxsw_pci *mlxsw_pci)
284 {
285 return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ);
286 }
287
288 static struct mlxsw_pci_queue *
289 __mlxsw_pci_queue_get(struct mlxsw_pci *mlxsw_pci,
290 enum mlxsw_pci_queue_type q_type, u8 q_num)
291 {
292 return &mlxsw_pci->queues[q_type].q[q_num];
293 }
294
295 static struct mlxsw_pci_queue *mlxsw_pci_sdq_get(struct mlxsw_pci *mlxsw_pci,
296 u8 q_num)
297 {
298 return __mlxsw_pci_queue_get(mlxsw_pci,
299 MLXSW_PCI_QUEUE_TYPE_SDQ, q_num);
300 }
301
302 static struct mlxsw_pci_queue *mlxsw_pci_rdq_get(struct mlxsw_pci *mlxsw_pci,
303 u8 q_num)
304 {
305 return __mlxsw_pci_queue_get(mlxsw_pci,
306 MLXSW_PCI_QUEUE_TYPE_RDQ, q_num);
307 }
308
309 static struct mlxsw_pci_queue *mlxsw_pci_cq_get(struct mlxsw_pci *mlxsw_pci,
310 u8 q_num)
311 {
312 return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ, q_num);
313 }
314
315 static struct mlxsw_pci_queue *mlxsw_pci_eq_get(struct mlxsw_pci *mlxsw_pci,
316 u8 q_num)
317 {
318 return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ, q_num);
319 }
320
321 static void __mlxsw_pci_queue_doorbell_set(struct mlxsw_pci *mlxsw_pci,
322 struct mlxsw_pci_queue *q,
323 u16 val)
324 {
325 mlxsw_pci_write32(mlxsw_pci,
326 DOORBELL(mlxsw_pci->doorbell_offset,
327 mlxsw_pci_doorbell_type_offset[q->type],
328 q->num), val);
329 }
330
331 static void __mlxsw_pci_queue_doorbell_arm_set(struct mlxsw_pci *mlxsw_pci,
332 struct mlxsw_pci_queue *q,
333 u16 val)
334 {
335 mlxsw_pci_write32(mlxsw_pci,
336 DOORBELL(mlxsw_pci->doorbell_offset,
337 mlxsw_pci_doorbell_arm_type_offset[q->type],
338 q->num), val);
339 }
340
341 static void mlxsw_pci_queue_doorbell_producer_ring(struct mlxsw_pci *mlxsw_pci,
342 struct mlxsw_pci_queue *q)
343 {
344 wmb(); /* ensure all writes are done before we ring a bell */
345 __mlxsw_pci_queue_doorbell_set(mlxsw_pci, q, q->producer_counter);
346 }
347
348 static void mlxsw_pci_queue_doorbell_consumer_ring(struct mlxsw_pci *mlxsw_pci,
349 struct mlxsw_pci_queue *q)
350 {
351 wmb(); /* ensure all writes are done before we ring a bell */
352 __mlxsw_pci_queue_doorbell_set(mlxsw_pci, q,
353 q->consumer_counter + q->count);
354 }
355
356 static void
357 mlxsw_pci_queue_doorbell_arm_consumer_ring(struct mlxsw_pci *mlxsw_pci,
358 struct mlxsw_pci_queue *q)
359 {
360 wmb(); /* ensure all writes are done before we ring a bell */
361 __mlxsw_pci_queue_doorbell_arm_set(mlxsw_pci, q, q->consumer_counter);
362 }
363
364 static dma_addr_t __mlxsw_pci_queue_page_get(struct mlxsw_pci_queue *q,
365 int page_index)
366 {
367 return q->mem_item.mapaddr + MLXSW_PCI_PAGE_SIZE * page_index;
368 }
369
370 static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
371 struct mlxsw_pci_queue *q)
372 {
373 int i;
374 int err;
375
376 q->producer_counter = 0;
377 q->consumer_counter = 0;
378
379 /* Set CQ of same number of this SDQ. */
380 mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num);
381 mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(mbox, 7);
382 mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
383 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
384 dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
385
386 mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr);
387 }
388
389 err = mlxsw_cmd_sw2hw_sdq(mlxsw_pci->core, mbox, q->num);
390 if (err)
391 return err;
392 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
393 return 0;
394 }
395
396 static void mlxsw_pci_sdq_fini(struct mlxsw_pci *mlxsw_pci,
397 struct mlxsw_pci_queue *q)
398 {
399 mlxsw_cmd_hw2sw_sdq(mlxsw_pci->core, q->num);
400 }
401
402 static int mlxsw_pci_sdq_dbg_read(struct seq_file *file, void *data)
403 {
404 struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
405 struct mlxsw_pci_queue *q;
406 int i;
407 static const char hdr[] =
408 "NUM PROD_COUNT CONS_COUNT COUNT\n";
409
410 seq_printf(file, hdr);
411 for (i = 0; i < mlxsw_pci_sdq_count(mlxsw_pci); i++) {
412 q = mlxsw_pci_sdq_get(mlxsw_pci, i);
413 spin_lock_bh(&q->lock);
414 seq_printf(file, "%3d %10d %10d %5d\n",
415 i, q->producer_counter, q->consumer_counter,
416 q->count);
417 spin_unlock_bh(&q->lock);
418 }
419 return 0;
420 }
421
422 static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe,
423 int index, char *frag_data, size_t frag_len,
424 int direction)
425 {
426 struct pci_dev *pdev = mlxsw_pci->pdev;
427 dma_addr_t mapaddr;
428
429 mapaddr = pci_map_single(pdev, frag_data, frag_len, direction);
430 if (unlikely(pci_dma_mapping_error(pdev, mapaddr))) {
431 if (net_ratelimit())
432 dev_err(&pdev->dev, "failed to dma map tx frag\n");
433 return -EIO;
434 }
435 mlxsw_pci_wqe_address_set(wqe, index, mapaddr);
436 mlxsw_pci_wqe_byte_count_set(wqe, index, frag_len);
437 return 0;
438 }
439
440 static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci *mlxsw_pci, char *wqe,
441 int index, int direction)
442 {
443 struct pci_dev *pdev = mlxsw_pci->pdev;
444 size_t frag_len = mlxsw_pci_wqe_byte_count_get(wqe, index);
445 dma_addr_t mapaddr = mlxsw_pci_wqe_address_get(wqe, index);
446
447 if (!frag_len)
448 return;
449 pci_unmap_single(pdev, mapaddr, frag_len, direction);
450 }
451
452 static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci *mlxsw_pci,
453 struct mlxsw_pci_queue_elem_info *elem_info)
454 {
455 size_t buf_len = MLXSW_PORT_MAX_MTU;
456 char *wqe = elem_info->elem;
457 struct sk_buff *skb;
458 int err;
459
460 elem_info->u.rdq.skb = NULL;
461 skb = netdev_alloc_skb_ip_align(NULL, buf_len);
462 if (!skb)
463 return -ENOMEM;
464
465 /* Assume that wqe was previously zeroed. */
466
467 err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
468 buf_len, DMA_FROM_DEVICE);
469 if (err)
470 goto err_frag_map;
471
472 elem_info->u.rdq.skb = skb;
473 return 0;
474
475 err_frag_map:
476 dev_kfree_skb_any(skb);
477 return err;
478 }
479
480 static void mlxsw_pci_rdq_skb_free(struct mlxsw_pci *mlxsw_pci,
481 struct mlxsw_pci_queue_elem_info *elem_info)
482 {
483 struct sk_buff *skb;
484 char *wqe;
485
486 skb = elem_info->u.rdq.skb;
487 wqe = elem_info->elem;
488
489 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
490 dev_kfree_skb_any(skb);
491 }
492
493 static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
494 struct mlxsw_pci_queue *q)
495 {
496 struct mlxsw_pci_queue_elem_info *elem_info;
497 int i;
498 int err;
499
500 q->producer_counter = 0;
501 q->consumer_counter = 0;
502
503 /* Set CQ of same number of this RDQ with base
504 * above MLXSW_PCI_SDQS_MAX as the lower ones are assigned to SDQs.
505 */
506 mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num + MLXSW_PCI_SDQS_COUNT);
507 mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */
508 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
509 dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
510
511 mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox, i, mapaddr);
512 }
513
514 err = mlxsw_cmd_sw2hw_rdq(mlxsw_pci->core, mbox, q->num);
515 if (err)
516 return err;
517
518 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
519
520 for (i = 0; i < q->count; i++) {
521 elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
522 BUG_ON(!elem_info);
523 err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
524 if (err)
525 goto rollback;
526 /* Everything is set up, ring doorbell to pass elem to HW */
527 q->producer_counter++;
528 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
529 }
530
531 return 0;
532
533 rollback:
534 for (i--; i >= 0; i--) {
535 elem_info = mlxsw_pci_queue_elem_info_get(q, i);
536 mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
537 }
538 mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
539
540 return err;
541 }
542
543 static void mlxsw_pci_rdq_fini(struct mlxsw_pci *mlxsw_pci,
544 struct mlxsw_pci_queue *q)
545 {
546 struct mlxsw_pci_queue_elem_info *elem_info;
547 int i;
548
549 mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num);
550 for (i = 0; i < q->count; i++) {
551 elem_info = mlxsw_pci_queue_elem_info_get(q, i);
552 mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info);
553 }
554 }
555
556 static int mlxsw_pci_rdq_dbg_read(struct seq_file *file, void *data)
557 {
558 struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
559 struct mlxsw_pci_queue *q;
560 int i;
561 static const char hdr[] =
562 "NUM PROD_COUNT CONS_COUNT COUNT\n";
563
564 seq_printf(file, hdr);
565 for (i = 0; i < mlxsw_pci_rdq_count(mlxsw_pci); i++) {
566 q = mlxsw_pci_rdq_get(mlxsw_pci, i);
567 spin_lock_bh(&q->lock);
568 seq_printf(file, "%3d %10d %10d %5d\n",
569 i, q->producer_counter, q->consumer_counter,
570 q->count);
571 spin_unlock_bh(&q->lock);
572 }
573 return 0;
574 }
575
576 static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
577 struct mlxsw_pci_queue *q)
578 {
579 int i;
580 int err;
581
582 q->consumer_counter = 0;
583
584 for (i = 0; i < q->count; i++) {
585 char *elem = mlxsw_pci_queue_elem_get(q, i);
586
587 mlxsw_pci_cqe_owner_set(elem, 1);
588 }
589
590 mlxsw_cmd_mbox_sw2hw_cq_cv_set(mbox, 0); /* CQE ver 0 */
591 mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM);
592 mlxsw_cmd_mbox_sw2hw_cq_oi_set(mbox, 0);
593 mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0);
594 mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count));
595 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
596 dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
597
598 mlxsw_cmd_mbox_sw2hw_cq_pa_set(mbox, i, mapaddr);
599 }
600 err = mlxsw_cmd_sw2hw_cq(mlxsw_pci->core, mbox, q->num);
601 if (err)
602 return err;
603 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
604 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
605 return 0;
606 }
607
608 static void mlxsw_pci_cq_fini(struct mlxsw_pci *mlxsw_pci,
609 struct mlxsw_pci_queue *q)
610 {
611 mlxsw_cmd_hw2sw_cq(mlxsw_pci->core, q->num);
612 }
613
614 static int mlxsw_pci_cq_dbg_read(struct seq_file *file, void *data)
615 {
616 struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
617
618 struct mlxsw_pci_queue *q;
619 int i;
620 static const char hdr[] =
621 "NUM CONS_INDEX SDQ_COUNT RDQ_COUNT COUNT\n";
622
623 seq_printf(file, hdr);
624 for (i = 0; i < mlxsw_pci_cq_count(mlxsw_pci); i++) {
625 q = mlxsw_pci_cq_get(mlxsw_pci, i);
626 spin_lock_bh(&q->lock);
627 seq_printf(file, "%3d %10d %10d %10d %5d\n",
628 i, q->consumer_counter, q->u.cq.comp_sdq_count,
629 q->u.cq.comp_rdq_count, q->count);
630 spin_unlock_bh(&q->lock);
631 }
632 return 0;
633 }
634
635 static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci,
636 struct mlxsw_pci_queue *q,
637 u16 consumer_counter_limit,
638 char *cqe)
639 {
640 struct pci_dev *pdev = mlxsw_pci->pdev;
641 struct mlxsw_pci_queue_elem_info *elem_info;
642 char *wqe;
643 struct sk_buff *skb;
644 int i;
645
646 spin_lock(&q->lock);
647 elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
648 skb = elem_info->u.sdq.skb;
649 wqe = elem_info->elem;
650 for (i = 0; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
651 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
652 dev_kfree_skb_any(skb);
653 elem_info->u.sdq.skb = NULL;
654
655 if (q->consumer_counter++ != consumer_counter_limit)
656 dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in SDQ\n");
657 spin_unlock(&q->lock);
658 }
659
660 static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci,
661 struct mlxsw_pci_queue *q,
662 u16 consumer_counter_limit,
663 char *cqe)
664 {
665 struct pci_dev *pdev = mlxsw_pci->pdev;
666 struct mlxsw_pci_queue_elem_info *elem_info;
667 char *wqe;
668 struct sk_buff *skb;
669 struct mlxsw_rx_info rx_info;
670 u16 byte_count;
671 int err;
672
673 elem_info = mlxsw_pci_queue_elem_info_consumer_get(q);
674 skb = elem_info->u.sdq.skb;
675 if (!skb)
676 return;
677 wqe = elem_info->elem;
678 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE);
679
680 if (q->consumer_counter++ != consumer_counter_limit)
681 dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n");
682
683 /* We do not support lag now */
684 if (mlxsw_pci_cqe_lag_get(cqe))
685 goto drop;
686
687 rx_info.sys_port = mlxsw_pci_cqe_system_port_get(cqe);
688 rx_info.trap_id = mlxsw_pci_cqe_trap_id_get(cqe);
689
690 byte_count = mlxsw_pci_cqe_byte_count_get(cqe);
691 if (mlxsw_pci_cqe_crc_get(cqe))
692 byte_count -= ETH_FCS_LEN;
693 skb_put(skb, byte_count);
694 mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info);
695
696 put_new_skb:
697 memset(wqe, 0, q->elem_size);
698 err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info);
699 if (err && net_ratelimit())
700 dev_dbg(&pdev->dev, "Failed to alloc skb for RDQ\n");
701 /* Everything is set up, ring doorbell to pass elem to HW */
702 q->producer_counter++;
703 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
704 return;
705
706 drop:
707 dev_kfree_skb_any(skb);
708 goto put_new_skb;
709 }
710
711 static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q)
712 {
713 return mlxsw_pci_queue_sw_elem_get(q, mlxsw_pci_cqe_owner_get);
714 }
715
716 static void mlxsw_pci_cq_tasklet(unsigned long data)
717 {
718 struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data;
719 struct mlxsw_pci *mlxsw_pci = q->pci;
720 char *cqe;
721 int items = 0;
722 int credits = q->count >> 1;
723
724 while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) {
725 u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe);
726 u8 sendq = mlxsw_pci_cqe_sr_get(cqe);
727 u8 dqn = mlxsw_pci_cqe_dqn_get(cqe);
728
729 if (sendq) {
730 struct mlxsw_pci_queue *sdq;
731
732 sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn);
733 mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq,
734 wqe_counter, cqe);
735 q->u.cq.comp_sdq_count++;
736 } else {
737 struct mlxsw_pci_queue *rdq;
738
739 rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn);
740 mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq,
741 wqe_counter, cqe);
742 q->u.cq.comp_rdq_count++;
743 }
744 if (++items == credits)
745 break;
746 }
747 if (items) {
748 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
749 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
750 }
751 }
752
753 static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
754 struct mlxsw_pci_queue *q)
755 {
756 int i;
757 int err;
758
759 q->consumer_counter = 0;
760
761 for (i = 0; i < q->count; i++) {
762 char *elem = mlxsw_pci_queue_elem_get(q, i);
763
764 mlxsw_pci_eqe_owner_set(elem, 1);
765 }
766
767 mlxsw_cmd_mbox_sw2hw_eq_int_msix_set(mbox, 1); /* MSI-X used */
768 mlxsw_cmd_mbox_sw2hw_eq_oi_set(mbox, 0);
769 mlxsw_cmd_mbox_sw2hw_eq_st_set(mbox, 1); /* armed */
770 mlxsw_cmd_mbox_sw2hw_eq_log_eq_size_set(mbox, ilog2(q->count));
771 for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) {
772 dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i);
773
774 mlxsw_cmd_mbox_sw2hw_eq_pa_set(mbox, i, mapaddr);
775 }
776 err = mlxsw_cmd_sw2hw_eq(mlxsw_pci->core, mbox, q->num);
777 if (err)
778 return err;
779 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
780 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
781 return 0;
782 }
783
784 static void mlxsw_pci_eq_fini(struct mlxsw_pci *mlxsw_pci,
785 struct mlxsw_pci_queue *q)
786 {
787 mlxsw_cmd_hw2sw_eq(mlxsw_pci->core, q->num);
788 }
789
790 static int mlxsw_pci_eq_dbg_read(struct seq_file *file, void *data)
791 {
792 struct mlxsw_pci *mlxsw_pci = dev_get_drvdata(file->private);
793 struct mlxsw_pci_queue *q;
794 int i;
795 static const char hdr[] =
796 "NUM CONS_COUNT EV_CMD EV_COMP EV_OTHER COUNT\n";
797
798 seq_printf(file, hdr);
799 for (i = 0; i < mlxsw_pci_eq_count(mlxsw_pci); i++) {
800 q = mlxsw_pci_eq_get(mlxsw_pci, i);
801 spin_lock_bh(&q->lock);
802 seq_printf(file, "%3d %10d %10d %10d %10d %5d\n",
803 i, q->consumer_counter, q->u.eq.ev_cmd_count,
804 q->u.eq.ev_comp_count, q->u.eq.ev_other_count,
805 q->count);
806 spin_unlock_bh(&q->lock);
807 }
808 return 0;
809 }
810
811 static void mlxsw_pci_eq_cmd_event(struct mlxsw_pci *mlxsw_pci, char *eqe)
812 {
813 mlxsw_pci->cmd.comp.status = mlxsw_pci_eqe_cmd_status_get(eqe);
814 mlxsw_pci->cmd.comp.out_param =
815 ((u64) mlxsw_pci_eqe_cmd_out_param_h_get(eqe)) << 32 |
816 mlxsw_pci_eqe_cmd_out_param_l_get(eqe);
817 mlxsw_pci->cmd.wait_done = true;
818 wake_up(&mlxsw_pci->cmd.wait);
819 }
820
821 static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q)
822 {
823 return mlxsw_pci_queue_sw_elem_get(q, mlxsw_pci_eqe_owner_get);
824 }
825
826 static void mlxsw_pci_eq_tasklet(unsigned long data)
827 {
828 struct mlxsw_pci_queue *q = (struct mlxsw_pci_queue *) data;
829 struct mlxsw_pci *mlxsw_pci = q->pci;
830 unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_COUNT)];
831 char *eqe;
832 u8 cqn;
833 bool cq_handle = false;
834 int items = 0;
835 int credits = q->count >> 1;
836
837 memset(&active_cqns, 0, sizeof(active_cqns));
838
839 while ((eqe = mlxsw_pci_eq_sw_eqe_get(q))) {
840 u8 event_type = mlxsw_pci_eqe_event_type_get(eqe);
841
842 switch (event_type) {
843 case MLXSW_PCI_EQE_EVENT_TYPE_CMD:
844 mlxsw_pci_eq_cmd_event(mlxsw_pci, eqe);
845 q->u.eq.ev_cmd_count++;
846 break;
847 case MLXSW_PCI_EQE_EVENT_TYPE_COMP:
848 cqn = mlxsw_pci_eqe_cqn_get(eqe);
849 set_bit(cqn, active_cqns);
850 cq_handle = true;
851 q->u.eq.ev_comp_count++;
852 break;
853 default:
854 q->u.eq.ev_other_count++;
855 }
856 if (++items == credits)
857 break;
858 }
859 if (items) {
860 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q);
861 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q);
862 }
863
864 if (!cq_handle)
865 return;
866 for_each_set_bit(cqn, active_cqns, MLXSW_PCI_CQS_COUNT) {
867 q = mlxsw_pci_cq_get(mlxsw_pci, cqn);
868 mlxsw_pci_queue_tasklet_schedule(q);
869 }
870 }
871
872 struct mlxsw_pci_queue_ops {
873 const char *name;
874 enum mlxsw_pci_queue_type type;
875 int (*init)(struct mlxsw_pci *mlxsw_pci, char *mbox,
876 struct mlxsw_pci_queue *q);
877 void (*fini)(struct mlxsw_pci *mlxsw_pci,
878 struct mlxsw_pci_queue *q);
879 void (*tasklet)(unsigned long data);
880 int (*dbg_read)(struct seq_file *s, void *data);
881 u16 elem_count;
882 u8 elem_size;
883 };
884
885 static const struct mlxsw_pci_queue_ops mlxsw_pci_sdq_ops = {
886 .type = MLXSW_PCI_QUEUE_TYPE_SDQ,
887 .init = mlxsw_pci_sdq_init,
888 .fini = mlxsw_pci_sdq_fini,
889 .dbg_read = mlxsw_pci_sdq_dbg_read,
890 .elem_count = MLXSW_PCI_WQE_COUNT,
891 .elem_size = MLXSW_PCI_WQE_SIZE,
892 };
893
894 static const struct mlxsw_pci_queue_ops mlxsw_pci_rdq_ops = {
895 .type = MLXSW_PCI_QUEUE_TYPE_RDQ,
896 .init = mlxsw_pci_rdq_init,
897 .fini = mlxsw_pci_rdq_fini,
898 .dbg_read = mlxsw_pci_rdq_dbg_read,
899 .elem_count = MLXSW_PCI_WQE_COUNT,
900 .elem_size = MLXSW_PCI_WQE_SIZE
901 };
902
903 static const struct mlxsw_pci_queue_ops mlxsw_pci_cq_ops = {
904 .type = MLXSW_PCI_QUEUE_TYPE_CQ,
905 .init = mlxsw_pci_cq_init,
906 .fini = mlxsw_pci_cq_fini,
907 .tasklet = mlxsw_pci_cq_tasklet,
908 .dbg_read = mlxsw_pci_cq_dbg_read,
909 .elem_count = MLXSW_PCI_CQE_COUNT,
910 .elem_size = MLXSW_PCI_CQE_SIZE
911 };
912
913 static const struct mlxsw_pci_queue_ops mlxsw_pci_eq_ops = {
914 .type = MLXSW_PCI_QUEUE_TYPE_EQ,
915 .init = mlxsw_pci_eq_init,
916 .fini = mlxsw_pci_eq_fini,
917 .tasklet = mlxsw_pci_eq_tasklet,
918 .dbg_read = mlxsw_pci_eq_dbg_read,
919 .elem_count = MLXSW_PCI_EQE_COUNT,
920 .elem_size = MLXSW_PCI_EQE_SIZE
921 };
922
923 static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
924 const struct mlxsw_pci_queue_ops *q_ops,
925 struct mlxsw_pci_queue *q, u8 q_num)
926 {
927 struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
928 int i;
929 int err;
930
931 spin_lock_init(&q->lock);
932 q->num = q_num;
933 q->count = q_ops->elem_count;
934 q->elem_size = q_ops->elem_size;
935 q->type = q_ops->type;
936 q->pci = mlxsw_pci;
937
938 if (q_ops->tasklet)
939 tasklet_init(&q->tasklet, q_ops->tasklet, (unsigned long) q);
940
941 mem_item->size = MLXSW_PCI_AQ_SIZE;
942 mem_item->buf = pci_alloc_consistent(mlxsw_pci->pdev,
943 mem_item->size,
944 &mem_item->mapaddr);
945 if (!mem_item->buf)
946 return -ENOMEM;
947 memset(mem_item->buf, 0, mem_item->size);
948
949 q->elem_info = kcalloc(q->count, sizeof(*q->elem_info), GFP_KERNEL);
950 if (!q->elem_info) {
951 err = -ENOMEM;
952 goto err_elem_info_alloc;
953 }
954
955 /* Initialize dma mapped elements info elem_info for
956 * future easy access.
957 */
958 for (i = 0; i < q->count; i++) {
959 struct mlxsw_pci_queue_elem_info *elem_info;
960
961 elem_info = mlxsw_pci_queue_elem_info_get(q, i);
962 elem_info->elem =
963 __mlxsw_pci_queue_elem_get(q, q_ops->elem_size, i);
964 }
965
966 mlxsw_cmd_mbox_zero(mbox);
967 err = q_ops->init(mlxsw_pci, mbox, q);
968 if (err)
969 goto err_q_ops_init;
970 return 0;
971
972 err_q_ops_init:
973 kfree(q->elem_info);
974 err_elem_info_alloc:
975 pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
976 mem_item->buf, mem_item->mapaddr);
977 return err;
978 }
979
980 static void mlxsw_pci_queue_fini(struct mlxsw_pci *mlxsw_pci,
981 const struct mlxsw_pci_queue_ops *q_ops,
982 struct mlxsw_pci_queue *q)
983 {
984 struct mlxsw_pci_mem_item *mem_item = &q->mem_item;
985
986 q_ops->fini(mlxsw_pci, q);
987 kfree(q->elem_info);
988 pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
989 mem_item->buf, mem_item->mapaddr);
990 }
991
992 static int mlxsw_pci_queue_group_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
993 const struct mlxsw_pci_queue_ops *q_ops,
994 u8 num_qs)
995 {
996 struct pci_dev *pdev = mlxsw_pci->pdev;
997 struct mlxsw_pci_queue_type_group *queue_group;
998 char tmp[16];
999 int i;
1000 int err;
1001
1002 queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type);
1003 queue_group->q = kcalloc(num_qs, sizeof(*queue_group->q), GFP_KERNEL);
1004 if (!queue_group->q)
1005 return -ENOMEM;
1006
1007 for (i = 0; i < num_qs; i++) {
1008 err = mlxsw_pci_queue_init(mlxsw_pci, mbox, q_ops,
1009 &queue_group->q[i], i);
1010 if (err)
1011 goto err_queue_init;
1012 }
1013 queue_group->count = num_qs;
1014
1015 sprintf(tmp, "%s_stats", mlxsw_pci_queue_type_str(q_ops->type));
1016 debugfs_create_devm_seqfile(&pdev->dev, tmp, mlxsw_pci->dbg_dir,
1017 q_ops->dbg_read);
1018
1019 return 0;
1020
1021 err_queue_init:
1022 for (i--; i >= 0; i--)
1023 mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
1024 kfree(queue_group->q);
1025 return err;
1026 }
1027
1028 static void mlxsw_pci_queue_group_fini(struct mlxsw_pci *mlxsw_pci,
1029 const struct mlxsw_pci_queue_ops *q_ops)
1030 {
1031 struct mlxsw_pci_queue_type_group *queue_group;
1032 int i;
1033
1034 queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_ops->type);
1035 for (i = 0; i < queue_group->count; i++)
1036 mlxsw_pci_queue_fini(mlxsw_pci, q_ops, &queue_group->q[i]);
1037 kfree(queue_group->q);
1038 }
1039
1040 static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox)
1041 {
1042 struct pci_dev *pdev = mlxsw_pci->pdev;
1043 u8 num_sdqs;
1044 u8 sdq_log2sz;
1045 u8 num_rdqs;
1046 u8 rdq_log2sz;
1047 u8 num_cqs;
1048 u8 cq_log2sz;
1049 u8 num_eqs;
1050 u8 eq_log2sz;
1051 int err;
1052
1053 mlxsw_cmd_mbox_zero(mbox);
1054 err = mlxsw_cmd_query_aq_cap(mlxsw_pci->core, mbox);
1055 if (err)
1056 return err;
1057
1058 num_sdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_sdqs_get(mbox);
1059 sdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_sdq_sz_get(mbox);
1060 num_rdqs = mlxsw_cmd_mbox_query_aq_cap_max_num_rdqs_get(mbox);
1061 rdq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_rdq_sz_get(mbox);
1062 num_cqs = mlxsw_cmd_mbox_query_aq_cap_max_num_cqs_get(mbox);
1063 cq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_cq_sz_get(mbox);
1064 num_eqs = mlxsw_cmd_mbox_query_aq_cap_max_num_eqs_get(mbox);
1065 eq_log2sz = mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get(mbox);
1066
1067 if ((num_sdqs != MLXSW_PCI_SDQS_COUNT) ||
1068 (num_rdqs != MLXSW_PCI_RDQS_COUNT) ||
1069 (num_cqs != MLXSW_PCI_CQS_COUNT) ||
1070 (num_eqs != MLXSW_PCI_EQS_COUNT)) {
1071 dev_err(&pdev->dev, "Unsupported number of queues\n");
1072 return -EINVAL;
1073 }
1074
1075 if ((1 << sdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
1076 (1 << rdq_log2sz != MLXSW_PCI_WQE_COUNT) ||
1077 (1 << cq_log2sz != MLXSW_PCI_CQE_COUNT) ||
1078 (1 << eq_log2sz != MLXSW_PCI_EQE_COUNT)) {
1079 dev_err(&pdev->dev, "Unsupported number of async queue descriptors\n");
1080 return -EINVAL;
1081 }
1082
1083 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_eq_ops,
1084 num_eqs);
1085 if (err) {
1086 dev_err(&pdev->dev, "Failed to initialize event queues\n");
1087 return err;
1088 }
1089
1090 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_cq_ops,
1091 num_cqs);
1092 if (err) {
1093 dev_err(&pdev->dev, "Failed to initialize completion queues\n");
1094 goto err_cqs_init;
1095 }
1096
1097 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_sdq_ops,
1098 num_sdqs);
1099 if (err) {
1100 dev_err(&pdev->dev, "Failed to initialize send descriptor queues\n");
1101 goto err_sdqs_init;
1102 }
1103
1104 err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_rdq_ops,
1105 num_rdqs);
1106 if (err) {
1107 dev_err(&pdev->dev, "Failed to initialize receive descriptor queues\n");
1108 goto err_rdqs_init;
1109 }
1110
1111 /* We have to poll in command interface until queues are initialized */
1112 mlxsw_pci->cmd.nopoll = true;
1113 return 0;
1114
1115 err_rdqs_init:
1116 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
1117 err_sdqs_init:
1118 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
1119 err_cqs_init:
1120 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops);
1121 return err;
1122 }
1123
1124 static void mlxsw_pci_aqs_fini(struct mlxsw_pci *mlxsw_pci)
1125 {
1126 mlxsw_pci->cmd.nopoll = false;
1127 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_rdq_ops);
1128 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops);
1129 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops);
1130 mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_eq_ops);
1131 }
1132
1133 static void
1134 mlxsw_pci_config_profile_swid_config(struct mlxsw_pci *mlxsw_pci,
1135 char *mbox, int index,
1136 const struct mlxsw_swid_config *swid)
1137 {
1138 u8 mask = 0;
1139
1140 if (swid->used_type) {
1141 mlxsw_cmd_mbox_config_profile_swid_config_type_set(
1142 mbox, index, swid->type);
1143 mask |= 1;
1144 }
1145 if (swid->used_properties) {
1146 mlxsw_cmd_mbox_config_profile_swid_config_properties_set(
1147 mbox, index, swid->properties);
1148 mask |= 2;
1149 }
1150 mlxsw_cmd_mbox_config_profile_swid_config_mask_set(mbox, index, mask);
1151 }
1152
1153 static int mlxsw_pci_config_profile(struct mlxsw_pci *mlxsw_pci, char *mbox,
1154 const struct mlxsw_config_profile *profile)
1155 {
1156 int i;
1157
1158 mlxsw_cmd_mbox_zero(mbox);
1159
1160 if (profile->used_max_vepa_channels) {
1161 mlxsw_cmd_mbox_config_profile_set_max_vepa_channels_set(
1162 mbox, 1);
1163 mlxsw_cmd_mbox_config_profile_max_vepa_channels_set(
1164 mbox, profile->max_vepa_channels);
1165 }
1166 if (profile->used_max_lag) {
1167 mlxsw_cmd_mbox_config_profile_set_max_lag_set(
1168 mbox, 1);
1169 mlxsw_cmd_mbox_config_profile_max_lag_set(
1170 mbox, profile->max_lag);
1171 }
1172 if (profile->used_max_port_per_lag) {
1173 mlxsw_cmd_mbox_config_profile_set_max_port_per_lag_set(
1174 mbox, 1);
1175 mlxsw_cmd_mbox_config_profile_max_port_per_lag_set(
1176 mbox, profile->max_port_per_lag);
1177 }
1178 if (profile->used_max_mid) {
1179 mlxsw_cmd_mbox_config_profile_set_max_mid_set(
1180 mbox, 1);
1181 mlxsw_cmd_mbox_config_profile_max_mid_set(
1182 mbox, profile->max_mid);
1183 }
1184 if (profile->used_max_pgt) {
1185 mlxsw_cmd_mbox_config_profile_set_max_pgt_set(
1186 mbox, 1);
1187 mlxsw_cmd_mbox_config_profile_max_pgt_set(
1188 mbox, profile->max_pgt);
1189 }
1190 if (profile->used_max_system_port) {
1191 mlxsw_cmd_mbox_config_profile_set_max_system_port_set(
1192 mbox, 1);
1193 mlxsw_cmd_mbox_config_profile_max_system_port_set(
1194 mbox, profile->max_system_port);
1195 }
1196 if (profile->used_max_vlan_groups) {
1197 mlxsw_cmd_mbox_config_profile_set_max_vlan_groups_set(
1198 mbox, 1);
1199 mlxsw_cmd_mbox_config_profile_max_vlan_groups_set(
1200 mbox, profile->max_vlan_groups);
1201 }
1202 if (profile->used_max_regions) {
1203 mlxsw_cmd_mbox_config_profile_set_max_regions_set(
1204 mbox, 1);
1205 mlxsw_cmd_mbox_config_profile_max_regions_set(
1206 mbox, profile->max_regions);
1207 }
1208 if (profile->used_flood_tables) {
1209 mlxsw_cmd_mbox_config_profile_set_flood_tables_set(
1210 mbox, 1);
1211 mlxsw_cmd_mbox_config_profile_max_flood_tables_set(
1212 mbox, profile->max_flood_tables);
1213 mlxsw_cmd_mbox_config_profile_max_vid_flood_tables_set(
1214 mbox, profile->max_vid_flood_tables);
1215 }
1216 if (profile->used_flood_mode) {
1217 mlxsw_cmd_mbox_config_profile_set_flood_mode_set(
1218 mbox, 1);
1219 mlxsw_cmd_mbox_config_profile_flood_mode_set(
1220 mbox, profile->flood_mode);
1221 }
1222 if (profile->used_max_ib_mc) {
1223 mlxsw_cmd_mbox_config_profile_set_max_ib_mc_set(
1224 mbox, 1);
1225 mlxsw_cmd_mbox_config_profile_max_ib_mc_set(
1226 mbox, profile->max_ib_mc);
1227 }
1228 if (profile->used_max_pkey) {
1229 mlxsw_cmd_mbox_config_profile_set_max_pkey_set(
1230 mbox, 1);
1231 mlxsw_cmd_mbox_config_profile_max_pkey_set(
1232 mbox, profile->max_pkey);
1233 }
1234 if (profile->used_ar_sec) {
1235 mlxsw_cmd_mbox_config_profile_set_ar_sec_set(
1236 mbox, 1);
1237 mlxsw_cmd_mbox_config_profile_ar_sec_set(
1238 mbox, profile->ar_sec);
1239 }
1240 if (profile->used_adaptive_routing_group_cap) {
1241 mlxsw_cmd_mbox_config_profile_set_adaptive_routing_group_cap_set(
1242 mbox, 1);
1243 mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set(
1244 mbox, profile->adaptive_routing_group_cap);
1245 }
1246
1247 for (i = 0; i < MLXSW_CONFIG_PROFILE_SWID_COUNT; i++)
1248 mlxsw_pci_config_profile_swid_config(mlxsw_pci, mbox, i,
1249 &profile->swid_config[i]);
1250
1251 return mlxsw_cmd_config_profile_set(mlxsw_pci->core, mbox);
1252 }
1253
1254 static int mlxsw_pci_boardinfo(struct mlxsw_pci *mlxsw_pci, char *mbox)
1255 {
1256 struct mlxsw_bus_info *bus_info = &mlxsw_pci->bus_info;
1257 int err;
1258
1259 mlxsw_cmd_mbox_zero(mbox);
1260 err = mlxsw_cmd_boardinfo(mlxsw_pci->core, mbox);
1261 if (err)
1262 return err;
1263 mlxsw_cmd_mbox_boardinfo_vsd_memcpy_from(mbox, bus_info->vsd);
1264 mlxsw_cmd_mbox_boardinfo_psid_memcpy_from(mbox, bus_info->psid);
1265 return 0;
1266 }
1267
1268 static int mlxsw_pci_fw_area_init(struct mlxsw_pci *mlxsw_pci, char *mbox,
1269 u16 num_pages)
1270 {
1271 struct mlxsw_pci_mem_item *mem_item;
1272 int i;
1273 int err;
1274
1275 mlxsw_pci->fw_area.items = kcalloc(num_pages, sizeof(*mem_item),
1276 GFP_KERNEL);
1277 if (!mlxsw_pci->fw_area.items)
1278 return -ENOMEM;
1279 mlxsw_pci->fw_area.num_pages = num_pages;
1280
1281 mlxsw_cmd_mbox_zero(mbox);
1282 for (i = 0; i < num_pages; i++) {
1283 mem_item = &mlxsw_pci->fw_area.items[i];
1284
1285 mem_item->size = MLXSW_PCI_PAGE_SIZE;
1286 mem_item->buf = pci_alloc_consistent(mlxsw_pci->pdev,
1287 mem_item->size,
1288 &mem_item->mapaddr);
1289 if (!mem_item->buf) {
1290 err = -ENOMEM;
1291 goto err_alloc;
1292 }
1293 mlxsw_cmd_mbox_map_fa_pa_set(mbox, i, mem_item->mapaddr);
1294 mlxsw_cmd_mbox_map_fa_log2size_set(mbox, i, 0); /* 1 page */
1295 }
1296
1297 err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, num_pages);
1298 if (err)
1299 goto err_cmd_map_fa;
1300
1301 return 0;
1302
1303 err_cmd_map_fa:
1304 err_alloc:
1305 for (i--; i >= 0; i--) {
1306 mem_item = &mlxsw_pci->fw_area.items[i];
1307
1308 pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
1309 mem_item->buf, mem_item->mapaddr);
1310 }
1311 kfree(mlxsw_pci->fw_area.items);
1312 return err;
1313 }
1314
1315 static void mlxsw_pci_fw_area_fini(struct mlxsw_pci *mlxsw_pci)
1316 {
1317 struct mlxsw_pci_mem_item *mem_item;
1318 int i;
1319
1320 mlxsw_cmd_unmap_fa(mlxsw_pci->core);
1321
1322 for (i = 0; i < mlxsw_pci->fw_area.num_pages; i++) {
1323 mem_item = &mlxsw_pci->fw_area.items[i];
1324
1325 pci_free_consistent(mlxsw_pci->pdev, mem_item->size,
1326 mem_item->buf, mem_item->mapaddr);
1327 }
1328 kfree(mlxsw_pci->fw_area.items);
1329 }
1330
1331 static irqreturn_t mlxsw_pci_eq_irq_handler(int irq, void *dev_id)
1332 {
1333 struct mlxsw_pci *mlxsw_pci = dev_id;
1334 struct mlxsw_pci_queue *q;
1335 int i;
1336
1337 for (i = 0; i < MLXSW_PCI_EQS_COUNT; i++) {
1338 q = mlxsw_pci_eq_get(mlxsw_pci, i);
1339 mlxsw_pci_queue_tasklet_schedule(q);
1340 }
1341 return IRQ_HANDLED;
1342 }
1343
1344 static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core,
1345 const struct mlxsw_config_profile *profile)
1346 {
1347 struct mlxsw_pci *mlxsw_pci = bus_priv;
1348 struct pci_dev *pdev = mlxsw_pci->pdev;
1349 char *mbox;
1350 u16 num_pages;
1351 int err;
1352
1353 mutex_init(&mlxsw_pci->cmd.lock);
1354 init_waitqueue_head(&mlxsw_pci->cmd.wait);
1355
1356 mlxsw_pci->core = mlxsw_core;
1357
1358 mbox = mlxsw_cmd_mbox_alloc();
1359 if (!mbox)
1360 return -ENOMEM;
1361 err = mlxsw_cmd_query_fw(mlxsw_core, mbox);
1362 if (err)
1363 goto err_query_fw;
1364
1365 mlxsw_pci->bus_info.fw_rev.major =
1366 mlxsw_cmd_mbox_query_fw_fw_rev_major_get(mbox);
1367 mlxsw_pci->bus_info.fw_rev.minor =
1368 mlxsw_cmd_mbox_query_fw_fw_rev_minor_get(mbox);
1369 mlxsw_pci->bus_info.fw_rev.subminor =
1370 mlxsw_cmd_mbox_query_fw_fw_rev_subminor_get(mbox);
1371
1372 if (mlxsw_cmd_mbox_query_fw_cmd_interface_rev_get(mbox) != 1) {
1373 dev_err(&pdev->dev, "Unsupported cmd interface revision ID queried from hw\n");
1374 err = -EINVAL;
1375 goto err_iface_rev;
1376 }
1377 if (mlxsw_cmd_mbox_query_fw_doorbell_page_bar_get(mbox) != 0) {
1378 dev_err(&pdev->dev, "Unsupported doorbell page bar queried from hw\n");
1379 err = -EINVAL;
1380 goto err_doorbell_page_bar;
1381 }
1382
1383 mlxsw_pci->doorbell_offset =
1384 mlxsw_cmd_mbox_query_fw_doorbell_page_offset_get(mbox);
1385
1386 num_pages = mlxsw_cmd_mbox_query_fw_fw_pages_get(mbox);
1387 err = mlxsw_pci_fw_area_init(mlxsw_pci, mbox, num_pages);
1388 if (err)
1389 goto err_fw_area_init;
1390
1391 err = mlxsw_pci_boardinfo(mlxsw_pci, mbox);
1392 if (err)
1393 goto err_boardinfo;
1394
1395 err = mlxsw_pci_config_profile(mlxsw_pci, mbox, profile);
1396 if (err)
1397 goto err_config_profile;
1398
1399 err = mlxsw_pci_aqs_init(mlxsw_pci, mbox);
1400 if (err)
1401 goto err_aqs_init;
1402
1403 err = request_irq(mlxsw_pci->msix_entry.vector,
1404 mlxsw_pci_eq_irq_handler, 0,
1405 mlxsw_pci_driver_name, mlxsw_pci);
1406 if (err) {
1407 dev_err(&pdev->dev, "IRQ request failed\n");
1408 goto err_request_eq_irq;
1409 }
1410
1411 goto mbox_put;
1412
1413 err_request_eq_irq:
1414 mlxsw_pci_aqs_fini(mlxsw_pci);
1415 err_aqs_init:
1416 err_config_profile:
1417 err_boardinfo:
1418 mlxsw_pci_fw_area_fini(mlxsw_pci);
1419 err_fw_area_init:
1420 err_doorbell_page_bar:
1421 err_iface_rev:
1422 err_query_fw:
1423 mbox_put:
1424 mlxsw_cmd_mbox_free(mbox);
1425 return err;
1426 }
1427
1428 static void mlxsw_pci_fini(void *bus_priv)
1429 {
1430 struct mlxsw_pci *mlxsw_pci = bus_priv;
1431
1432 free_irq(mlxsw_pci->msix_entry.vector, mlxsw_pci);
1433 mlxsw_pci_aqs_fini(mlxsw_pci);
1434 mlxsw_pci_fw_area_fini(mlxsw_pci);
1435 }
1436
1437 static struct mlxsw_pci_queue *
1438 mlxsw_pci_sdq_pick(struct mlxsw_pci *mlxsw_pci,
1439 const struct mlxsw_tx_info *tx_info)
1440 {
1441 u8 sdqn = tx_info->local_port % mlxsw_pci_sdq_count(mlxsw_pci);
1442
1443 return mlxsw_pci_sdq_get(mlxsw_pci, sdqn);
1444 }
1445
1446 static bool mlxsw_pci_skb_transmit_busy(void *bus_priv,
1447 const struct mlxsw_tx_info *tx_info)
1448 {
1449 struct mlxsw_pci *mlxsw_pci = bus_priv;
1450 struct mlxsw_pci_queue *q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
1451
1452 return !mlxsw_pci_queue_elem_info_producer_get(q);
1453 }
1454
1455 static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb,
1456 const struct mlxsw_tx_info *tx_info)
1457 {
1458 struct mlxsw_pci *mlxsw_pci = bus_priv;
1459 struct mlxsw_pci_queue *q;
1460 struct mlxsw_pci_queue_elem_info *elem_info;
1461 char *wqe;
1462 int i;
1463 int err;
1464
1465 if (skb_shinfo(skb)->nr_frags > MLXSW_PCI_WQE_SG_ENTRIES - 1) {
1466 err = skb_linearize(skb);
1467 if (err)
1468 return err;
1469 }
1470
1471 q = mlxsw_pci_sdq_pick(mlxsw_pci, tx_info);
1472 spin_lock_bh(&q->lock);
1473 elem_info = mlxsw_pci_queue_elem_info_producer_get(q);
1474 if (!elem_info) {
1475 /* queue is full */
1476 err = -EAGAIN;
1477 goto unlock;
1478 }
1479 elem_info->u.sdq.skb = skb;
1480
1481 wqe = elem_info->elem;
1482 mlxsw_pci_wqe_c_set(wqe, 1); /* always report completion */
1483 mlxsw_pci_wqe_lp_set(wqe, !!tx_info->is_emad);
1484 mlxsw_pci_wqe_type_set(wqe, MLXSW_PCI_WQE_TYPE_ETHERNET);
1485
1486 err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data,
1487 skb_headlen(skb), DMA_TO_DEVICE);
1488 if (err)
1489 goto unlock;
1490
1491 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1492 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1493
1494 err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, i + 1,
1495 skb_frag_address(frag),
1496 skb_frag_size(frag),
1497 DMA_TO_DEVICE);
1498 if (err)
1499 goto unmap_frags;
1500 }
1501
1502 /* Set unused sq entries byte count to zero. */
1503 for (i++; i < MLXSW_PCI_WQE_SG_ENTRIES; i++)
1504 mlxsw_pci_wqe_byte_count_set(wqe, i, 0);
1505
1506 /* Everything is set up, ring producer doorbell to get HW going */
1507 q->producer_counter++;
1508 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q);
1509
1510 goto unlock;
1511
1512 unmap_frags:
1513 for (; i >= 0; i--)
1514 mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE);
1515 unlock:
1516 spin_unlock_bh(&q->lock);
1517 return err;
1518 }
1519
1520 static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod,
1521 u32 in_mod, bool out_mbox_direct,
1522 char *in_mbox, size_t in_mbox_size,
1523 char *out_mbox, size_t out_mbox_size,
1524 u8 *p_status)
1525 {
1526 struct mlxsw_pci *mlxsw_pci = bus_priv;
1527 dma_addr_t in_mapaddr = 0;
1528 dma_addr_t out_mapaddr = 0;
1529 bool evreq = mlxsw_pci->cmd.nopoll;
1530 unsigned long timeout = msecs_to_jiffies(MLXSW_PCI_CIR_TIMEOUT_MSECS);
1531 bool *p_wait_done = &mlxsw_pci->cmd.wait_done;
1532 int err;
1533
1534 *p_status = MLXSW_CMD_STATUS_OK;
1535
1536 err = mutex_lock_interruptible(&mlxsw_pci->cmd.lock);
1537 if (err)
1538 return err;
1539
1540 if (in_mbox) {
1541 in_mapaddr = pci_map_single(mlxsw_pci->pdev, in_mbox,
1542 in_mbox_size, PCI_DMA_TODEVICE);
1543 if (unlikely(pci_dma_mapping_error(mlxsw_pci->pdev,
1544 in_mapaddr))) {
1545 err = -EIO;
1546 goto err_in_mbox_map;
1547 }
1548 }
1549 mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_HI, in_mapaddr >> 32);
1550 mlxsw_pci_write32(mlxsw_pci, CIR_IN_PARAM_LO, in_mapaddr);
1551
1552 if (out_mbox) {
1553 out_mapaddr = pci_map_single(mlxsw_pci->pdev, out_mbox,
1554 out_mbox_size, PCI_DMA_FROMDEVICE);
1555 if (unlikely(pci_dma_mapping_error(mlxsw_pci->pdev,
1556 out_mapaddr))) {
1557 err = -EIO;
1558 goto err_out_mbox_map;
1559 }
1560 }
1561 mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_HI, out_mapaddr >> 32);
1562 mlxsw_pci_write32(mlxsw_pci, CIR_OUT_PARAM_LO, out_mapaddr);
1563
1564 mlxsw_pci_write32(mlxsw_pci, CIR_IN_MODIFIER, in_mod);
1565 mlxsw_pci_write32(mlxsw_pci, CIR_TOKEN, 0);
1566
1567 *p_wait_done = false;
1568
1569 wmb(); /* all needs to be written before we write control register */
1570 mlxsw_pci_write32(mlxsw_pci, CIR_CTRL,
1571 MLXSW_PCI_CIR_CTRL_GO_BIT |
1572 (evreq ? MLXSW_PCI_CIR_CTRL_EVREQ_BIT : 0) |
1573 (opcode_mod << MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT) |
1574 opcode);
1575
1576 if (!evreq) {
1577 unsigned long end;
1578
1579 end = jiffies + timeout;
1580 do {
1581 u32 ctrl = mlxsw_pci_read32(mlxsw_pci, CIR_CTRL);
1582
1583 if (!(ctrl & MLXSW_PCI_CIR_CTRL_GO_BIT)) {
1584 *p_wait_done = true;
1585 *p_status = ctrl >> MLXSW_PCI_CIR_CTRL_STATUS_SHIFT;
1586 break;
1587 }
1588 cond_resched();
1589 } while (time_before(jiffies, end));
1590 } else {
1591 wait_event_timeout(mlxsw_pci->cmd.wait, *p_wait_done, timeout);
1592 *p_status = mlxsw_pci->cmd.comp.status;
1593 }
1594
1595 err = 0;
1596 if (*p_wait_done) {
1597 if (*p_status)
1598 err = -EIO;
1599 } else {
1600 err = -ETIMEDOUT;
1601 }
1602
1603 if (!err && out_mbox && out_mbox_direct) {
1604 /* Some commands does not use output param as address to mailbox
1605 * but they store output directly into registers. In that case,
1606 * copy registers into mbox buffer.
1607 */
1608 __be32 tmp;
1609
1610 if (!evreq) {
1611 tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
1612 CIR_OUT_PARAM_HI));
1613 memcpy(out_mbox, &tmp, sizeof(tmp));
1614 tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci,
1615 CIR_OUT_PARAM_LO));
1616 memcpy(out_mbox + sizeof(tmp), &tmp, sizeof(tmp));
1617 }
1618 }
1619
1620 if (out_mapaddr)
1621 pci_unmap_single(mlxsw_pci->pdev, out_mapaddr, out_mbox_size,
1622 PCI_DMA_FROMDEVICE);
1623
1624 /* fall through */
1625
1626 err_out_mbox_map:
1627 if (in_mapaddr)
1628 pci_unmap_single(mlxsw_pci->pdev, in_mapaddr, in_mbox_size,
1629 PCI_DMA_TODEVICE);
1630 err_in_mbox_map:
1631 mutex_unlock(&mlxsw_pci->cmd.lock);
1632
1633 return err;
1634 }
1635
1636 static const struct mlxsw_bus mlxsw_pci_bus = {
1637 .kind = "pci",
1638 .init = mlxsw_pci_init,
1639 .fini = mlxsw_pci_fini,
1640 .skb_transmit_busy = mlxsw_pci_skb_transmit_busy,
1641 .skb_transmit = mlxsw_pci_skb_transmit,
1642 .cmd_exec = mlxsw_pci_cmd_exec,
1643 };
1644
1645 static int mlxsw_pci_sw_reset(struct mlxsw_pci *mlxsw_pci)
1646 {
1647 mlxsw_pci_write32(mlxsw_pci, SW_RESET, MLXSW_PCI_SW_RESET_RST_BIT);
1648 /* Current firware does not let us know when the reset is done.
1649 * So we just wait here for constant time and hope for the best.
1650 */
1651 msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS);
1652 return 0;
1653 }
1654
1655 static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1656 {
1657 struct mlxsw_pci *mlxsw_pci;
1658 int err;
1659
1660 mlxsw_pci = kzalloc(sizeof(*mlxsw_pci), GFP_KERNEL);
1661 if (!mlxsw_pci)
1662 return -ENOMEM;
1663
1664 err = pci_enable_device(pdev);
1665 if (err) {
1666 dev_err(&pdev->dev, "pci_enable_device failed\n");
1667 goto err_pci_enable_device;
1668 }
1669
1670 err = pci_request_regions(pdev, mlxsw_pci_driver_name);
1671 if (err) {
1672 dev_err(&pdev->dev, "pci_request_regions failed\n");
1673 goto err_pci_request_regions;
1674 }
1675
1676 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1677 if (!err) {
1678 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
1679 if (err) {
1680 dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed\n");
1681 goto err_pci_set_dma_mask;
1682 }
1683 } else {
1684 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1685 if (err) {
1686 dev_err(&pdev->dev, "pci_set_dma_mask failed\n");
1687 goto err_pci_set_dma_mask;
1688 }
1689 }
1690
1691 if (pci_resource_len(pdev, 0) < MLXSW_PCI_BAR0_SIZE) {
1692 dev_err(&pdev->dev, "invalid PCI region size\n");
1693 err = -EINVAL;
1694 goto err_pci_resource_len_check;
1695 }
1696
1697 mlxsw_pci->hw_addr = ioremap(pci_resource_start(pdev, 0),
1698 pci_resource_len(pdev, 0));
1699 if (!mlxsw_pci->hw_addr) {
1700 dev_err(&pdev->dev, "ioremap failed\n");
1701 err = -EIO;
1702 goto err_ioremap;
1703 }
1704 pci_set_master(pdev);
1705
1706 mlxsw_pci->pdev = pdev;
1707 pci_set_drvdata(pdev, mlxsw_pci);
1708
1709 err = mlxsw_pci_sw_reset(mlxsw_pci);
1710 if (err) {
1711 dev_err(&pdev->dev, "Software reset failed\n");
1712 goto err_sw_reset;
1713 }
1714
1715 err = pci_enable_msix_exact(pdev, &mlxsw_pci->msix_entry, 1);
1716 if (err) {
1717 dev_err(&pdev->dev, "MSI-X init failed\n");
1718 goto err_msix_init;
1719 }
1720
1721 mlxsw_pci->bus_info.device_kind = mlxsw_pci_device_kind_get(id);
1722 mlxsw_pci->bus_info.device_name = pci_name(mlxsw_pci->pdev);
1723 mlxsw_pci->bus_info.dev = &pdev->dev;
1724
1725 mlxsw_pci->dbg_dir = debugfs_create_dir(mlxsw_pci->bus_info.device_name,
1726 mlxsw_pci_dbg_root);
1727 if (!mlxsw_pci->dbg_dir) {
1728 dev_err(&pdev->dev, "Failed to create debugfs dir\n");
1729 err = -ENOMEM;
1730 goto err_dbg_create_dir;
1731 }
1732
1733 err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info,
1734 &mlxsw_pci_bus, mlxsw_pci);
1735 if (err) {
1736 dev_err(&pdev->dev, "cannot register bus device\n");
1737 goto err_bus_device_register;
1738 }
1739
1740 return 0;
1741
1742 err_bus_device_register:
1743 debugfs_remove_recursive(mlxsw_pci->dbg_dir);
1744 err_dbg_create_dir:
1745 pci_disable_msix(mlxsw_pci->pdev);
1746 err_msix_init:
1747 err_sw_reset:
1748 iounmap(mlxsw_pci->hw_addr);
1749 err_ioremap:
1750 err_pci_resource_len_check:
1751 err_pci_set_dma_mask:
1752 pci_release_regions(pdev);
1753 err_pci_request_regions:
1754 pci_disable_device(pdev);
1755 err_pci_enable_device:
1756 kfree(mlxsw_pci);
1757 return err;
1758 }
1759
1760 static void mlxsw_pci_remove(struct pci_dev *pdev)
1761 {
1762 struct mlxsw_pci *mlxsw_pci = pci_get_drvdata(pdev);
1763
1764 mlxsw_core_bus_device_unregister(mlxsw_pci->core);
1765 debugfs_remove_recursive(mlxsw_pci->dbg_dir);
1766 pci_disable_msix(mlxsw_pci->pdev);
1767 iounmap(mlxsw_pci->hw_addr);
1768 pci_release_regions(mlxsw_pci->pdev);
1769 pci_disable_device(mlxsw_pci->pdev);
1770 kfree(mlxsw_pci);
1771 }
1772
1773 static struct pci_driver mlxsw_pci_driver = {
1774 .name = mlxsw_pci_driver_name,
1775 .id_table = mlxsw_pci_id_table,
1776 .probe = mlxsw_pci_probe,
1777 .remove = mlxsw_pci_remove,
1778 };
1779
1780 static int __init mlxsw_pci_module_init(void)
1781 {
1782 int err;
1783
1784 mlxsw_pci_dbg_root = debugfs_create_dir(mlxsw_pci_driver_name, NULL);
1785 if (!mlxsw_pci_dbg_root)
1786 return -ENOMEM;
1787 err = pci_register_driver(&mlxsw_pci_driver);
1788 if (err)
1789 goto err_register_driver;
1790 return 0;
1791
1792 err_register_driver:
1793 debugfs_remove_recursive(mlxsw_pci_dbg_root);
1794 return err;
1795 }
1796
1797 static void __exit mlxsw_pci_module_exit(void)
1798 {
1799 pci_unregister_driver(&mlxsw_pci_driver);
1800 debugfs_remove_recursive(mlxsw_pci_dbg_root);
1801 }
1802
1803 module_init(mlxsw_pci_module_init);
1804 module_exit(mlxsw_pci_module_exit);
1805
1806 MODULE_LICENSE("Dual BSD/GPL");
1807 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
1808 MODULE_DESCRIPTION("Mellanox switch PCI interface driver");
1809 MODULE_DEVICE_TABLE(pci, mlxsw_pci_id_table);