2 * drivers/net/ethernet/mellanox/mlxsw/pci.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the names of the copyright holders nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
18 * Alternatively, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") version 2 as published by the Free
20 * Software Foundation.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
35 #include <linux/kernel.h>
36 #include <linux/module.h>
37 #include <linux/export.h>
38 #include <linux/err.h>
39 #include <linux/device.h>
40 #include <linux/pci.h>
41 #include <linux/interrupt.h>
42 #include <linux/wait.h>
43 #include <linux/types.h>
44 #include <linux/skbuff.h>
45 #include <linux/if_vlan.h>
46 #include <linux/log2.h>
47 #include <linux/debugfs.h>
48 #include <linux/seq_file.h>
55 static const char mlxsw_pci_driver_name
[] = "mlxsw_pci";
57 static const struct pci_device_id mlxsw_pci_id_table
[] = {
58 {PCI_VDEVICE(MELLANOX
, PCI_DEVICE_ID_MELLANOX_SWITCHX2
), 0},
62 static struct dentry
*mlxsw_pci_dbg_root
;
64 static const char *mlxsw_pci_device_kind_get(const struct pci_device_id
*id
)
67 case PCI_DEVICE_ID_MELLANOX_SWITCHX2
:
68 return MLXSW_DEVICE_KIND_SWITCHX2
;
74 #define mlxsw_pci_write32(mlxsw_pci, reg, val) \
75 iowrite32be(val, (mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
76 #define mlxsw_pci_read32(mlxsw_pci, reg) \
77 ioread32be((mlxsw_pci)->hw_addr + (MLXSW_PCI_ ## reg))
79 enum mlxsw_pci_queue_type
{
80 MLXSW_PCI_QUEUE_TYPE_SDQ
,
81 MLXSW_PCI_QUEUE_TYPE_RDQ
,
82 MLXSW_PCI_QUEUE_TYPE_CQ
,
83 MLXSW_PCI_QUEUE_TYPE_EQ
,
86 static const char *mlxsw_pci_queue_type_str(enum mlxsw_pci_queue_type q_type
)
89 case MLXSW_PCI_QUEUE_TYPE_SDQ
:
91 case MLXSW_PCI_QUEUE_TYPE_RDQ
:
93 case MLXSW_PCI_QUEUE_TYPE_CQ
:
95 case MLXSW_PCI_QUEUE_TYPE_EQ
:
101 #define MLXSW_PCI_QUEUE_TYPE_COUNT 4
103 static const u16 mlxsw_pci_doorbell_type_offset
[] = {
104 MLXSW_PCI_DOORBELL_SDQ_OFFSET
, /* for type MLXSW_PCI_QUEUE_TYPE_SDQ */
105 MLXSW_PCI_DOORBELL_RDQ_OFFSET
, /* for type MLXSW_PCI_QUEUE_TYPE_RDQ */
106 MLXSW_PCI_DOORBELL_CQ_OFFSET
, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */
107 MLXSW_PCI_DOORBELL_EQ_OFFSET
, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */
110 static const u16 mlxsw_pci_doorbell_arm_type_offset
[] = {
113 MLXSW_PCI_DOORBELL_ARM_CQ_OFFSET
, /* for type MLXSW_PCI_QUEUE_TYPE_CQ */
114 MLXSW_PCI_DOORBELL_ARM_EQ_OFFSET
, /* for type MLXSW_PCI_QUEUE_TYPE_EQ */
117 struct mlxsw_pci_mem_item
{
123 struct mlxsw_pci_queue_elem_info
{
124 char *elem
; /* pointer to actual dma mapped element mem chunk */
135 struct mlxsw_pci_queue
{
136 spinlock_t lock
; /* for queue accesses */
137 struct mlxsw_pci_mem_item mem_item
;
138 struct mlxsw_pci_queue_elem_info
*elem_info
;
139 u16 producer_counter
;
140 u16 consumer_counter
;
141 u16 count
; /* number of elements in queue */
142 u8 num
; /* queue number */
143 u8 elem_size
; /* size of one element */
144 enum mlxsw_pci_queue_type type
;
145 struct tasklet_struct tasklet
; /* queue processing tasklet */
146 struct mlxsw_pci
*pci
;
160 struct mlxsw_pci_queue_type_group
{
161 struct mlxsw_pci_queue
*q
;
162 u8 count
; /* number of queues in group */
166 struct pci_dev
*pdev
;
168 struct mlxsw_pci_queue_type_group queues
[MLXSW_PCI_QUEUE_TYPE_COUNT
];
170 struct msix_entry msix_entry
;
171 struct mlxsw_core
*core
;
174 struct mlxsw_pci_mem_item
*items
;
177 struct mutex lock
; /* Lock access to command registers */
179 wait_queue_head_t wait
;
186 struct mlxsw_bus_info bus_info
;
187 struct dentry
*dbg_dir
;
190 static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue
*q
)
192 tasklet_schedule(&q
->tasklet
);
195 static char *__mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue
*q
,
196 size_t elem_size
, int elem_index
)
198 return q
->mem_item
.buf
+ (elem_size
* elem_index
);
201 static struct mlxsw_pci_queue_elem_info
*
202 mlxsw_pci_queue_elem_info_get(struct mlxsw_pci_queue
*q
, int elem_index
)
204 return &q
->elem_info
[elem_index
];
207 static struct mlxsw_pci_queue_elem_info
*
208 mlxsw_pci_queue_elem_info_producer_get(struct mlxsw_pci_queue
*q
)
210 int index
= q
->producer_counter
& (q
->count
- 1);
212 if ((q
->producer_counter
- q
->consumer_counter
) == q
->count
)
214 return mlxsw_pci_queue_elem_info_get(q
, index
);
217 static struct mlxsw_pci_queue_elem_info
*
218 mlxsw_pci_queue_elem_info_consumer_get(struct mlxsw_pci_queue
*q
)
220 int index
= q
->consumer_counter
& (q
->count
- 1);
222 return mlxsw_pci_queue_elem_info_get(q
, index
);
225 static char *mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue
*q
, int elem_index
)
227 return mlxsw_pci_queue_elem_info_get(q
, elem_index
)->elem
;
230 static bool mlxsw_pci_elem_hw_owned(struct mlxsw_pci_queue
*q
, bool owner_bit
)
232 return owner_bit
!= !!(q
->consumer_counter
& q
->count
);
235 static char *mlxsw_pci_queue_sw_elem_get(struct mlxsw_pci_queue
*q
,
236 u32 (*get_elem_owner_func
)(char *))
238 struct mlxsw_pci_queue_elem_info
*elem_info
;
242 elem_info
= mlxsw_pci_queue_elem_info_consumer_get(q
);
243 elem
= elem_info
->elem
;
244 owner_bit
= get_elem_owner_func(elem
);
245 if (mlxsw_pci_elem_hw_owned(q
, owner_bit
))
247 q
->consumer_counter
++;
248 rmb(); /* make sure we read owned bit before the rest of elem */
252 static struct mlxsw_pci_queue_type_group
*
253 mlxsw_pci_queue_type_group_get(struct mlxsw_pci
*mlxsw_pci
,
254 enum mlxsw_pci_queue_type q_type
)
256 return &mlxsw_pci
->queues
[q_type
];
259 static u8
__mlxsw_pci_queue_count(struct mlxsw_pci
*mlxsw_pci
,
260 enum mlxsw_pci_queue_type q_type
)
262 struct mlxsw_pci_queue_type_group
*queue_group
;
264 queue_group
= mlxsw_pci_queue_type_group_get(mlxsw_pci
, q_type
);
265 return queue_group
->count
;
268 static u8
mlxsw_pci_sdq_count(struct mlxsw_pci
*mlxsw_pci
)
270 return __mlxsw_pci_queue_count(mlxsw_pci
, MLXSW_PCI_QUEUE_TYPE_SDQ
);
273 static u8
mlxsw_pci_rdq_count(struct mlxsw_pci
*mlxsw_pci
)
275 return __mlxsw_pci_queue_count(mlxsw_pci
, MLXSW_PCI_QUEUE_TYPE_RDQ
);
278 static u8
mlxsw_pci_cq_count(struct mlxsw_pci
*mlxsw_pci
)
280 return __mlxsw_pci_queue_count(mlxsw_pci
, MLXSW_PCI_QUEUE_TYPE_CQ
);
283 static u8
mlxsw_pci_eq_count(struct mlxsw_pci
*mlxsw_pci
)
285 return __mlxsw_pci_queue_count(mlxsw_pci
, MLXSW_PCI_QUEUE_TYPE_EQ
);
288 static struct mlxsw_pci_queue
*
289 __mlxsw_pci_queue_get(struct mlxsw_pci
*mlxsw_pci
,
290 enum mlxsw_pci_queue_type q_type
, u8 q_num
)
292 return &mlxsw_pci
->queues
[q_type
].q
[q_num
];
295 static struct mlxsw_pci_queue
*mlxsw_pci_sdq_get(struct mlxsw_pci
*mlxsw_pci
,
298 return __mlxsw_pci_queue_get(mlxsw_pci
,
299 MLXSW_PCI_QUEUE_TYPE_SDQ
, q_num
);
302 static struct mlxsw_pci_queue
*mlxsw_pci_rdq_get(struct mlxsw_pci
*mlxsw_pci
,
305 return __mlxsw_pci_queue_get(mlxsw_pci
,
306 MLXSW_PCI_QUEUE_TYPE_RDQ
, q_num
);
309 static struct mlxsw_pci_queue
*mlxsw_pci_cq_get(struct mlxsw_pci
*mlxsw_pci
,
312 return __mlxsw_pci_queue_get(mlxsw_pci
, MLXSW_PCI_QUEUE_TYPE_CQ
, q_num
);
315 static struct mlxsw_pci_queue
*mlxsw_pci_eq_get(struct mlxsw_pci
*mlxsw_pci
,
318 return __mlxsw_pci_queue_get(mlxsw_pci
, MLXSW_PCI_QUEUE_TYPE_EQ
, q_num
);
321 static void __mlxsw_pci_queue_doorbell_set(struct mlxsw_pci
*mlxsw_pci
,
322 struct mlxsw_pci_queue
*q
,
325 mlxsw_pci_write32(mlxsw_pci
,
326 DOORBELL(mlxsw_pci
->doorbell_offset
,
327 mlxsw_pci_doorbell_type_offset
[q
->type
],
331 static void __mlxsw_pci_queue_doorbell_arm_set(struct mlxsw_pci
*mlxsw_pci
,
332 struct mlxsw_pci_queue
*q
,
335 mlxsw_pci_write32(mlxsw_pci
,
336 DOORBELL(mlxsw_pci
->doorbell_offset
,
337 mlxsw_pci_doorbell_arm_type_offset
[q
->type
],
341 static void mlxsw_pci_queue_doorbell_producer_ring(struct mlxsw_pci
*mlxsw_pci
,
342 struct mlxsw_pci_queue
*q
)
344 wmb(); /* ensure all writes are done before we ring a bell */
345 __mlxsw_pci_queue_doorbell_set(mlxsw_pci
, q
, q
->producer_counter
);
348 static void mlxsw_pci_queue_doorbell_consumer_ring(struct mlxsw_pci
*mlxsw_pci
,
349 struct mlxsw_pci_queue
*q
)
351 wmb(); /* ensure all writes are done before we ring a bell */
352 __mlxsw_pci_queue_doorbell_set(mlxsw_pci
, q
,
353 q
->consumer_counter
+ q
->count
);
357 mlxsw_pci_queue_doorbell_arm_consumer_ring(struct mlxsw_pci
*mlxsw_pci
,
358 struct mlxsw_pci_queue
*q
)
360 wmb(); /* ensure all writes are done before we ring a bell */
361 __mlxsw_pci_queue_doorbell_arm_set(mlxsw_pci
, q
, q
->consumer_counter
);
364 static dma_addr_t
__mlxsw_pci_queue_page_get(struct mlxsw_pci_queue
*q
,
367 return q
->mem_item
.mapaddr
+ MLXSW_PCI_PAGE_SIZE
* page_index
;
370 static int mlxsw_pci_sdq_init(struct mlxsw_pci
*mlxsw_pci
, char *mbox
,
371 struct mlxsw_pci_queue
*q
)
376 q
->producer_counter
= 0;
377 q
->consumer_counter
= 0;
379 /* Set CQ of same number of this SDQ. */
380 mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox
, q
->num
);
381 mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(mbox
, 7);
382 mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox
, 3); /* 8 pages */
383 for (i
= 0; i
< MLXSW_PCI_AQ_PAGES
; i
++) {
384 dma_addr_t mapaddr
= __mlxsw_pci_queue_page_get(q
, i
);
386 mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox
, i
, mapaddr
);
389 err
= mlxsw_cmd_sw2hw_sdq(mlxsw_pci
->core
, mbox
, q
->num
);
392 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci
, q
);
396 static void mlxsw_pci_sdq_fini(struct mlxsw_pci
*mlxsw_pci
,
397 struct mlxsw_pci_queue
*q
)
399 mlxsw_cmd_hw2sw_sdq(mlxsw_pci
->core
, q
->num
);
402 static int mlxsw_pci_sdq_dbg_read(struct seq_file
*file
, void *data
)
404 struct mlxsw_pci
*mlxsw_pci
= dev_get_drvdata(file
->private);
405 struct mlxsw_pci_queue
*q
;
407 static const char hdr
[] =
408 "NUM PROD_COUNT CONS_COUNT COUNT\n";
410 seq_printf(file
, hdr
);
411 for (i
= 0; i
< mlxsw_pci_sdq_count(mlxsw_pci
); i
++) {
412 q
= mlxsw_pci_sdq_get(mlxsw_pci
, i
);
413 spin_lock_bh(&q
->lock
);
414 seq_printf(file
, "%3d %10d %10d %5d\n",
415 i
, q
->producer_counter
, q
->consumer_counter
,
417 spin_unlock_bh(&q
->lock
);
422 static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci
*mlxsw_pci
, char *wqe
,
423 int index
, char *frag_data
, size_t frag_len
,
426 struct pci_dev
*pdev
= mlxsw_pci
->pdev
;
429 mapaddr
= pci_map_single(pdev
, frag_data
, frag_len
, direction
);
430 if (unlikely(pci_dma_mapping_error(pdev
, mapaddr
))) {
432 dev_err(&pdev
->dev
, "failed to dma map tx frag\n");
435 mlxsw_pci_wqe_address_set(wqe
, index
, mapaddr
);
436 mlxsw_pci_wqe_byte_count_set(wqe
, index
, frag_len
);
440 static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci
*mlxsw_pci
, char *wqe
,
441 int index
, int direction
)
443 struct pci_dev
*pdev
= mlxsw_pci
->pdev
;
444 size_t frag_len
= mlxsw_pci_wqe_byte_count_get(wqe
, index
);
445 dma_addr_t mapaddr
= mlxsw_pci_wqe_address_get(wqe
, index
);
449 pci_unmap_single(pdev
, mapaddr
, frag_len
, direction
);
452 static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci
*mlxsw_pci
,
453 struct mlxsw_pci_queue_elem_info
*elem_info
)
455 size_t buf_len
= MLXSW_PORT_MAX_MTU
;
456 char *wqe
= elem_info
->elem
;
460 elem_info
->u
.rdq
.skb
= NULL
;
461 skb
= netdev_alloc_skb_ip_align(NULL
, buf_len
);
465 /* Assume that wqe was previously zeroed. */
467 err
= mlxsw_pci_wqe_frag_map(mlxsw_pci
, wqe
, 0, skb
->data
,
468 buf_len
, DMA_FROM_DEVICE
);
472 elem_info
->u
.rdq
.skb
= skb
;
476 dev_kfree_skb_any(skb
);
480 static void mlxsw_pci_rdq_skb_free(struct mlxsw_pci
*mlxsw_pci
,
481 struct mlxsw_pci_queue_elem_info
*elem_info
)
486 skb
= elem_info
->u
.rdq
.skb
;
487 wqe
= elem_info
->elem
;
489 mlxsw_pci_wqe_frag_unmap(mlxsw_pci
, wqe
, 0, DMA_FROM_DEVICE
);
490 dev_kfree_skb_any(skb
);
493 static int mlxsw_pci_rdq_init(struct mlxsw_pci
*mlxsw_pci
, char *mbox
,
494 struct mlxsw_pci_queue
*q
)
496 struct mlxsw_pci_queue_elem_info
*elem_info
;
500 q
->producer_counter
= 0;
501 q
->consumer_counter
= 0;
503 /* Set CQ of same number of this RDQ with base
504 * above MLXSW_PCI_SDQS_MAX as the lower ones are assigned to SDQs.
506 mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox
, q
->num
+ MLXSW_PCI_SDQS_COUNT
);
507 mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox
, 3); /* 8 pages */
508 for (i
= 0; i
< MLXSW_PCI_AQ_PAGES
; i
++) {
509 dma_addr_t mapaddr
= __mlxsw_pci_queue_page_get(q
, i
);
511 mlxsw_cmd_mbox_sw2hw_dq_pa_set(mbox
, i
, mapaddr
);
514 err
= mlxsw_cmd_sw2hw_rdq(mlxsw_pci
->core
, mbox
, q
->num
);
518 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci
, q
);
520 for (i
= 0; i
< q
->count
; i
++) {
521 elem_info
= mlxsw_pci_queue_elem_info_producer_get(q
);
523 err
= mlxsw_pci_rdq_skb_alloc(mlxsw_pci
, elem_info
);
526 /* Everything is set up, ring doorbell to pass elem to HW */
527 q
->producer_counter
++;
528 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci
, q
);
534 for (i
--; i
>= 0; i
--) {
535 elem_info
= mlxsw_pci_queue_elem_info_get(q
, i
);
536 mlxsw_pci_rdq_skb_free(mlxsw_pci
, elem_info
);
538 mlxsw_cmd_hw2sw_rdq(mlxsw_pci
->core
, q
->num
);
543 static void mlxsw_pci_rdq_fini(struct mlxsw_pci
*mlxsw_pci
,
544 struct mlxsw_pci_queue
*q
)
546 struct mlxsw_pci_queue_elem_info
*elem_info
;
549 mlxsw_cmd_hw2sw_rdq(mlxsw_pci
->core
, q
->num
);
550 for (i
= 0; i
< q
->count
; i
++) {
551 elem_info
= mlxsw_pci_queue_elem_info_get(q
, i
);
552 mlxsw_pci_rdq_skb_free(mlxsw_pci
, elem_info
);
556 static int mlxsw_pci_rdq_dbg_read(struct seq_file
*file
, void *data
)
558 struct mlxsw_pci
*mlxsw_pci
= dev_get_drvdata(file
->private);
559 struct mlxsw_pci_queue
*q
;
561 static const char hdr
[] =
562 "NUM PROD_COUNT CONS_COUNT COUNT\n";
564 seq_printf(file
, hdr
);
565 for (i
= 0; i
< mlxsw_pci_rdq_count(mlxsw_pci
); i
++) {
566 q
= mlxsw_pci_rdq_get(mlxsw_pci
, i
);
567 spin_lock_bh(&q
->lock
);
568 seq_printf(file
, "%3d %10d %10d %5d\n",
569 i
, q
->producer_counter
, q
->consumer_counter
,
571 spin_unlock_bh(&q
->lock
);
576 static int mlxsw_pci_cq_init(struct mlxsw_pci
*mlxsw_pci
, char *mbox
,
577 struct mlxsw_pci_queue
*q
)
582 q
->consumer_counter
= 0;
584 for (i
= 0; i
< q
->count
; i
++) {
585 char *elem
= mlxsw_pci_queue_elem_get(q
, i
);
587 mlxsw_pci_cqe_owner_set(elem
, 1);
590 mlxsw_cmd_mbox_sw2hw_cq_cv_set(mbox
, 0); /* CQE ver 0 */
591 mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox
, MLXSW_PCI_EQ_COMP_NUM
);
592 mlxsw_cmd_mbox_sw2hw_cq_oi_set(mbox
, 0);
593 mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox
, 0);
594 mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox
, ilog2(q
->count
));
595 for (i
= 0; i
< MLXSW_PCI_AQ_PAGES
; i
++) {
596 dma_addr_t mapaddr
= __mlxsw_pci_queue_page_get(q
, i
);
598 mlxsw_cmd_mbox_sw2hw_cq_pa_set(mbox
, i
, mapaddr
);
600 err
= mlxsw_cmd_sw2hw_cq(mlxsw_pci
->core
, mbox
, q
->num
);
603 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci
, q
);
604 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci
, q
);
608 static void mlxsw_pci_cq_fini(struct mlxsw_pci
*mlxsw_pci
,
609 struct mlxsw_pci_queue
*q
)
611 mlxsw_cmd_hw2sw_cq(mlxsw_pci
->core
, q
->num
);
614 static int mlxsw_pci_cq_dbg_read(struct seq_file
*file
, void *data
)
616 struct mlxsw_pci
*mlxsw_pci
= dev_get_drvdata(file
->private);
618 struct mlxsw_pci_queue
*q
;
620 static const char hdr
[] =
621 "NUM CONS_INDEX SDQ_COUNT RDQ_COUNT COUNT\n";
623 seq_printf(file
, hdr
);
624 for (i
= 0; i
< mlxsw_pci_cq_count(mlxsw_pci
); i
++) {
625 q
= mlxsw_pci_cq_get(mlxsw_pci
, i
);
626 spin_lock_bh(&q
->lock
);
627 seq_printf(file
, "%3d %10d %10d %10d %5d\n",
628 i
, q
->consumer_counter
, q
->u
.cq
.comp_sdq_count
,
629 q
->u
.cq
.comp_rdq_count
, q
->count
);
630 spin_unlock_bh(&q
->lock
);
635 static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci
*mlxsw_pci
,
636 struct mlxsw_pci_queue
*q
,
637 u16 consumer_counter_limit
,
640 struct pci_dev
*pdev
= mlxsw_pci
->pdev
;
641 struct mlxsw_pci_queue_elem_info
*elem_info
;
647 elem_info
= mlxsw_pci_queue_elem_info_consumer_get(q
);
648 skb
= elem_info
->u
.sdq
.skb
;
649 wqe
= elem_info
->elem
;
650 for (i
= 0; i
< MLXSW_PCI_WQE_SG_ENTRIES
; i
++)
651 mlxsw_pci_wqe_frag_unmap(mlxsw_pci
, wqe
, i
, DMA_TO_DEVICE
);
652 dev_kfree_skb_any(skb
);
653 elem_info
->u
.sdq
.skb
= NULL
;
655 if (q
->consumer_counter
++ != consumer_counter_limit
)
656 dev_dbg_ratelimited(&pdev
->dev
, "Consumer counter does not match limit in SDQ\n");
657 spin_unlock(&q
->lock
);
660 static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci
*mlxsw_pci
,
661 struct mlxsw_pci_queue
*q
,
662 u16 consumer_counter_limit
,
665 struct pci_dev
*pdev
= mlxsw_pci
->pdev
;
666 struct mlxsw_pci_queue_elem_info
*elem_info
;
669 struct mlxsw_rx_info rx_info
;
673 elem_info
= mlxsw_pci_queue_elem_info_consumer_get(q
);
674 skb
= elem_info
->u
.sdq
.skb
;
677 wqe
= elem_info
->elem
;
678 mlxsw_pci_wqe_frag_unmap(mlxsw_pci
, wqe
, 0, DMA_FROM_DEVICE
);
680 if (q
->consumer_counter
++ != consumer_counter_limit
)
681 dev_dbg_ratelimited(&pdev
->dev
, "Consumer counter does not match limit in RDQ\n");
683 /* We do not support lag now */
684 if (mlxsw_pci_cqe_lag_get(cqe
))
687 rx_info
.sys_port
= mlxsw_pci_cqe_system_port_get(cqe
);
688 rx_info
.trap_id
= mlxsw_pci_cqe_trap_id_get(cqe
);
690 byte_count
= mlxsw_pci_cqe_byte_count_get(cqe
);
691 if (mlxsw_pci_cqe_crc_get(cqe
))
692 byte_count
-= ETH_FCS_LEN
;
693 skb_put(skb
, byte_count
);
694 mlxsw_core_skb_receive(mlxsw_pci
->core
, skb
, &rx_info
);
697 memset(wqe
, 0, q
->elem_size
);
698 err
= mlxsw_pci_rdq_skb_alloc(mlxsw_pci
, elem_info
);
699 if (err
&& net_ratelimit())
700 dev_dbg(&pdev
->dev
, "Failed to alloc skb for RDQ\n");
701 /* Everything is set up, ring doorbell to pass elem to HW */
702 q
->producer_counter
++;
703 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci
, q
);
707 dev_kfree_skb_any(skb
);
711 static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue
*q
)
713 return mlxsw_pci_queue_sw_elem_get(q
, mlxsw_pci_cqe_owner_get
);
716 static void mlxsw_pci_cq_tasklet(unsigned long data
)
718 struct mlxsw_pci_queue
*q
= (struct mlxsw_pci_queue
*) data
;
719 struct mlxsw_pci
*mlxsw_pci
= q
->pci
;
722 int credits
= q
->count
>> 1;
724 while ((cqe
= mlxsw_pci_cq_sw_cqe_get(q
))) {
725 u16 wqe_counter
= mlxsw_pci_cqe_wqe_counter_get(cqe
);
726 u8 sendq
= mlxsw_pci_cqe_sr_get(cqe
);
727 u8 dqn
= mlxsw_pci_cqe_dqn_get(cqe
);
730 struct mlxsw_pci_queue
*sdq
;
732 sdq
= mlxsw_pci_sdq_get(mlxsw_pci
, dqn
);
733 mlxsw_pci_cqe_sdq_handle(mlxsw_pci
, sdq
,
735 q
->u
.cq
.comp_sdq_count
++;
737 struct mlxsw_pci_queue
*rdq
;
739 rdq
= mlxsw_pci_rdq_get(mlxsw_pci
, dqn
);
740 mlxsw_pci_cqe_rdq_handle(mlxsw_pci
, rdq
,
742 q
->u
.cq
.comp_rdq_count
++;
744 if (++items
== credits
)
748 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci
, q
);
749 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci
, q
);
753 static int mlxsw_pci_eq_init(struct mlxsw_pci
*mlxsw_pci
, char *mbox
,
754 struct mlxsw_pci_queue
*q
)
759 q
->consumer_counter
= 0;
761 for (i
= 0; i
< q
->count
; i
++) {
762 char *elem
= mlxsw_pci_queue_elem_get(q
, i
);
764 mlxsw_pci_eqe_owner_set(elem
, 1);
767 mlxsw_cmd_mbox_sw2hw_eq_int_msix_set(mbox
, 1); /* MSI-X used */
768 mlxsw_cmd_mbox_sw2hw_eq_oi_set(mbox
, 0);
769 mlxsw_cmd_mbox_sw2hw_eq_st_set(mbox
, 1); /* armed */
770 mlxsw_cmd_mbox_sw2hw_eq_log_eq_size_set(mbox
, ilog2(q
->count
));
771 for (i
= 0; i
< MLXSW_PCI_AQ_PAGES
; i
++) {
772 dma_addr_t mapaddr
= __mlxsw_pci_queue_page_get(q
, i
);
774 mlxsw_cmd_mbox_sw2hw_eq_pa_set(mbox
, i
, mapaddr
);
776 err
= mlxsw_cmd_sw2hw_eq(mlxsw_pci
->core
, mbox
, q
->num
);
779 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci
, q
);
780 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci
, q
);
784 static void mlxsw_pci_eq_fini(struct mlxsw_pci
*mlxsw_pci
,
785 struct mlxsw_pci_queue
*q
)
787 mlxsw_cmd_hw2sw_eq(mlxsw_pci
->core
, q
->num
);
790 static int mlxsw_pci_eq_dbg_read(struct seq_file
*file
, void *data
)
792 struct mlxsw_pci
*mlxsw_pci
= dev_get_drvdata(file
->private);
793 struct mlxsw_pci_queue
*q
;
795 static const char hdr
[] =
796 "NUM CONS_COUNT EV_CMD EV_COMP EV_OTHER COUNT\n";
798 seq_printf(file
, hdr
);
799 for (i
= 0; i
< mlxsw_pci_eq_count(mlxsw_pci
); i
++) {
800 q
= mlxsw_pci_eq_get(mlxsw_pci
, i
);
801 spin_lock_bh(&q
->lock
);
802 seq_printf(file
, "%3d %10d %10d %10d %10d %5d\n",
803 i
, q
->consumer_counter
, q
->u
.eq
.ev_cmd_count
,
804 q
->u
.eq
.ev_comp_count
, q
->u
.eq
.ev_other_count
,
806 spin_unlock_bh(&q
->lock
);
811 static void mlxsw_pci_eq_cmd_event(struct mlxsw_pci
*mlxsw_pci
, char *eqe
)
813 mlxsw_pci
->cmd
.comp
.status
= mlxsw_pci_eqe_cmd_status_get(eqe
);
814 mlxsw_pci
->cmd
.comp
.out_param
=
815 ((u64
) mlxsw_pci_eqe_cmd_out_param_h_get(eqe
)) << 32 |
816 mlxsw_pci_eqe_cmd_out_param_l_get(eqe
);
817 mlxsw_pci
->cmd
.wait_done
= true;
818 wake_up(&mlxsw_pci
->cmd
.wait
);
821 static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue
*q
)
823 return mlxsw_pci_queue_sw_elem_get(q
, mlxsw_pci_eqe_owner_get
);
826 static void mlxsw_pci_eq_tasklet(unsigned long data
)
828 struct mlxsw_pci_queue
*q
= (struct mlxsw_pci_queue
*) data
;
829 struct mlxsw_pci
*mlxsw_pci
= q
->pci
;
830 unsigned long active_cqns
[BITS_TO_LONGS(MLXSW_PCI_CQS_COUNT
)];
833 bool cq_handle
= false;
835 int credits
= q
->count
>> 1;
837 memset(&active_cqns
, 0, sizeof(active_cqns
));
839 while ((eqe
= mlxsw_pci_eq_sw_eqe_get(q
))) {
840 u8 event_type
= mlxsw_pci_eqe_event_type_get(eqe
);
842 switch (event_type
) {
843 case MLXSW_PCI_EQE_EVENT_TYPE_CMD
:
844 mlxsw_pci_eq_cmd_event(mlxsw_pci
, eqe
);
845 q
->u
.eq
.ev_cmd_count
++;
847 case MLXSW_PCI_EQE_EVENT_TYPE_COMP
:
848 cqn
= mlxsw_pci_eqe_cqn_get(eqe
);
849 set_bit(cqn
, active_cqns
);
851 q
->u
.eq
.ev_comp_count
++;
854 q
->u
.eq
.ev_other_count
++;
856 if (++items
== credits
)
860 mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci
, q
);
861 mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci
, q
);
866 for_each_set_bit(cqn
, active_cqns
, MLXSW_PCI_CQS_COUNT
) {
867 q
= mlxsw_pci_cq_get(mlxsw_pci
, cqn
);
868 mlxsw_pci_queue_tasklet_schedule(q
);
872 struct mlxsw_pci_queue_ops
{
874 enum mlxsw_pci_queue_type type
;
875 int (*init
)(struct mlxsw_pci
*mlxsw_pci
, char *mbox
,
876 struct mlxsw_pci_queue
*q
);
877 void (*fini
)(struct mlxsw_pci
*mlxsw_pci
,
878 struct mlxsw_pci_queue
*q
);
879 void (*tasklet
)(unsigned long data
);
880 int (*dbg_read
)(struct seq_file
*s
, void *data
);
885 static const struct mlxsw_pci_queue_ops mlxsw_pci_sdq_ops
= {
886 .type
= MLXSW_PCI_QUEUE_TYPE_SDQ
,
887 .init
= mlxsw_pci_sdq_init
,
888 .fini
= mlxsw_pci_sdq_fini
,
889 .dbg_read
= mlxsw_pci_sdq_dbg_read
,
890 .elem_count
= MLXSW_PCI_WQE_COUNT
,
891 .elem_size
= MLXSW_PCI_WQE_SIZE
,
894 static const struct mlxsw_pci_queue_ops mlxsw_pci_rdq_ops
= {
895 .type
= MLXSW_PCI_QUEUE_TYPE_RDQ
,
896 .init
= mlxsw_pci_rdq_init
,
897 .fini
= mlxsw_pci_rdq_fini
,
898 .dbg_read
= mlxsw_pci_rdq_dbg_read
,
899 .elem_count
= MLXSW_PCI_WQE_COUNT
,
900 .elem_size
= MLXSW_PCI_WQE_SIZE
903 static const struct mlxsw_pci_queue_ops mlxsw_pci_cq_ops
= {
904 .type
= MLXSW_PCI_QUEUE_TYPE_CQ
,
905 .init
= mlxsw_pci_cq_init
,
906 .fini
= mlxsw_pci_cq_fini
,
907 .tasklet
= mlxsw_pci_cq_tasklet
,
908 .dbg_read
= mlxsw_pci_cq_dbg_read
,
909 .elem_count
= MLXSW_PCI_CQE_COUNT
,
910 .elem_size
= MLXSW_PCI_CQE_SIZE
913 static const struct mlxsw_pci_queue_ops mlxsw_pci_eq_ops
= {
914 .type
= MLXSW_PCI_QUEUE_TYPE_EQ
,
915 .init
= mlxsw_pci_eq_init
,
916 .fini
= mlxsw_pci_eq_fini
,
917 .tasklet
= mlxsw_pci_eq_tasklet
,
918 .dbg_read
= mlxsw_pci_eq_dbg_read
,
919 .elem_count
= MLXSW_PCI_EQE_COUNT
,
920 .elem_size
= MLXSW_PCI_EQE_SIZE
923 static int mlxsw_pci_queue_init(struct mlxsw_pci
*mlxsw_pci
, char *mbox
,
924 const struct mlxsw_pci_queue_ops
*q_ops
,
925 struct mlxsw_pci_queue
*q
, u8 q_num
)
927 struct mlxsw_pci_mem_item
*mem_item
= &q
->mem_item
;
931 spin_lock_init(&q
->lock
);
933 q
->count
= q_ops
->elem_count
;
934 q
->elem_size
= q_ops
->elem_size
;
935 q
->type
= q_ops
->type
;
939 tasklet_init(&q
->tasklet
, q_ops
->tasklet
, (unsigned long) q
);
941 mem_item
->size
= MLXSW_PCI_AQ_SIZE
;
942 mem_item
->buf
= pci_alloc_consistent(mlxsw_pci
->pdev
,
947 memset(mem_item
->buf
, 0, mem_item
->size
);
949 q
->elem_info
= kcalloc(q
->count
, sizeof(*q
->elem_info
), GFP_KERNEL
);
952 goto err_elem_info_alloc
;
955 /* Initialize dma mapped elements info elem_info for
956 * future easy access.
958 for (i
= 0; i
< q
->count
; i
++) {
959 struct mlxsw_pci_queue_elem_info
*elem_info
;
961 elem_info
= mlxsw_pci_queue_elem_info_get(q
, i
);
963 __mlxsw_pci_queue_elem_get(q
, q_ops
->elem_size
, i
);
966 mlxsw_cmd_mbox_zero(mbox
);
967 err
= q_ops
->init(mlxsw_pci
, mbox
, q
);
975 pci_free_consistent(mlxsw_pci
->pdev
, mem_item
->size
,
976 mem_item
->buf
, mem_item
->mapaddr
);
980 static void mlxsw_pci_queue_fini(struct mlxsw_pci
*mlxsw_pci
,
981 const struct mlxsw_pci_queue_ops
*q_ops
,
982 struct mlxsw_pci_queue
*q
)
984 struct mlxsw_pci_mem_item
*mem_item
= &q
->mem_item
;
986 q_ops
->fini(mlxsw_pci
, q
);
988 pci_free_consistent(mlxsw_pci
->pdev
, mem_item
->size
,
989 mem_item
->buf
, mem_item
->mapaddr
);
992 static int mlxsw_pci_queue_group_init(struct mlxsw_pci
*mlxsw_pci
, char *mbox
,
993 const struct mlxsw_pci_queue_ops
*q_ops
,
996 struct pci_dev
*pdev
= mlxsw_pci
->pdev
;
997 struct mlxsw_pci_queue_type_group
*queue_group
;
1002 queue_group
= mlxsw_pci_queue_type_group_get(mlxsw_pci
, q_ops
->type
);
1003 queue_group
->q
= kcalloc(num_qs
, sizeof(*queue_group
->q
), GFP_KERNEL
);
1004 if (!queue_group
->q
)
1007 for (i
= 0; i
< num_qs
; i
++) {
1008 err
= mlxsw_pci_queue_init(mlxsw_pci
, mbox
, q_ops
,
1009 &queue_group
->q
[i
], i
);
1011 goto err_queue_init
;
1013 queue_group
->count
= num_qs
;
1015 sprintf(tmp
, "%s_stats", mlxsw_pci_queue_type_str(q_ops
->type
));
1016 debugfs_create_devm_seqfile(&pdev
->dev
, tmp
, mlxsw_pci
->dbg_dir
,
1022 for (i
--; i
>= 0; i
--)
1023 mlxsw_pci_queue_fini(mlxsw_pci
, q_ops
, &queue_group
->q
[i
]);
1024 kfree(queue_group
->q
);
1028 static void mlxsw_pci_queue_group_fini(struct mlxsw_pci
*mlxsw_pci
,
1029 const struct mlxsw_pci_queue_ops
*q_ops
)
1031 struct mlxsw_pci_queue_type_group
*queue_group
;
1034 queue_group
= mlxsw_pci_queue_type_group_get(mlxsw_pci
, q_ops
->type
);
1035 for (i
= 0; i
< queue_group
->count
; i
++)
1036 mlxsw_pci_queue_fini(mlxsw_pci
, q_ops
, &queue_group
->q
[i
]);
1037 kfree(queue_group
->q
);
1040 static int mlxsw_pci_aqs_init(struct mlxsw_pci
*mlxsw_pci
, char *mbox
)
1042 struct pci_dev
*pdev
= mlxsw_pci
->pdev
;
1053 mlxsw_cmd_mbox_zero(mbox
);
1054 err
= mlxsw_cmd_query_aq_cap(mlxsw_pci
->core
, mbox
);
1058 num_sdqs
= mlxsw_cmd_mbox_query_aq_cap_max_num_sdqs_get(mbox
);
1059 sdq_log2sz
= mlxsw_cmd_mbox_query_aq_cap_log_max_sdq_sz_get(mbox
);
1060 num_rdqs
= mlxsw_cmd_mbox_query_aq_cap_max_num_rdqs_get(mbox
);
1061 rdq_log2sz
= mlxsw_cmd_mbox_query_aq_cap_log_max_rdq_sz_get(mbox
);
1062 num_cqs
= mlxsw_cmd_mbox_query_aq_cap_max_num_cqs_get(mbox
);
1063 cq_log2sz
= mlxsw_cmd_mbox_query_aq_cap_log_max_cq_sz_get(mbox
);
1064 num_eqs
= mlxsw_cmd_mbox_query_aq_cap_max_num_eqs_get(mbox
);
1065 eq_log2sz
= mlxsw_cmd_mbox_query_aq_cap_log_max_eq_sz_get(mbox
);
1067 if ((num_sdqs
!= MLXSW_PCI_SDQS_COUNT
) ||
1068 (num_rdqs
!= MLXSW_PCI_RDQS_COUNT
) ||
1069 (num_cqs
!= MLXSW_PCI_CQS_COUNT
) ||
1070 (num_eqs
!= MLXSW_PCI_EQS_COUNT
)) {
1071 dev_err(&pdev
->dev
, "Unsupported number of queues\n");
1075 if ((1 << sdq_log2sz
!= MLXSW_PCI_WQE_COUNT
) ||
1076 (1 << rdq_log2sz
!= MLXSW_PCI_WQE_COUNT
) ||
1077 (1 << cq_log2sz
!= MLXSW_PCI_CQE_COUNT
) ||
1078 (1 << eq_log2sz
!= MLXSW_PCI_EQE_COUNT
)) {
1079 dev_err(&pdev
->dev
, "Unsupported number of async queue descriptors\n");
1083 err
= mlxsw_pci_queue_group_init(mlxsw_pci
, mbox
, &mlxsw_pci_eq_ops
,
1086 dev_err(&pdev
->dev
, "Failed to initialize event queues\n");
1090 err
= mlxsw_pci_queue_group_init(mlxsw_pci
, mbox
, &mlxsw_pci_cq_ops
,
1093 dev_err(&pdev
->dev
, "Failed to initialize completion queues\n");
1097 err
= mlxsw_pci_queue_group_init(mlxsw_pci
, mbox
, &mlxsw_pci_sdq_ops
,
1100 dev_err(&pdev
->dev
, "Failed to initialize send descriptor queues\n");
1104 err
= mlxsw_pci_queue_group_init(mlxsw_pci
, mbox
, &mlxsw_pci_rdq_ops
,
1107 dev_err(&pdev
->dev
, "Failed to initialize receive descriptor queues\n");
1111 /* We have to poll in command interface until queues are initialized */
1112 mlxsw_pci
->cmd
.nopoll
= true;
1116 mlxsw_pci_queue_group_fini(mlxsw_pci
, &mlxsw_pci_sdq_ops
);
1118 mlxsw_pci_queue_group_fini(mlxsw_pci
, &mlxsw_pci_cq_ops
);
1120 mlxsw_pci_queue_group_fini(mlxsw_pci
, &mlxsw_pci_eq_ops
);
1124 static void mlxsw_pci_aqs_fini(struct mlxsw_pci
*mlxsw_pci
)
1126 mlxsw_pci
->cmd
.nopoll
= false;
1127 mlxsw_pci_queue_group_fini(mlxsw_pci
, &mlxsw_pci_rdq_ops
);
1128 mlxsw_pci_queue_group_fini(mlxsw_pci
, &mlxsw_pci_sdq_ops
);
1129 mlxsw_pci_queue_group_fini(mlxsw_pci
, &mlxsw_pci_cq_ops
);
1130 mlxsw_pci_queue_group_fini(mlxsw_pci
, &mlxsw_pci_eq_ops
);
1134 mlxsw_pci_config_profile_swid_config(struct mlxsw_pci
*mlxsw_pci
,
1135 char *mbox
, int index
,
1136 const struct mlxsw_swid_config
*swid
)
1140 if (swid
->used_type
) {
1141 mlxsw_cmd_mbox_config_profile_swid_config_type_set(
1142 mbox
, index
, swid
->type
);
1145 if (swid
->used_properties
) {
1146 mlxsw_cmd_mbox_config_profile_swid_config_properties_set(
1147 mbox
, index
, swid
->properties
);
1150 mlxsw_cmd_mbox_config_profile_swid_config_mask_set(mbox
, index
, mask
);
1153 static int mlxsw_pci_config_profile(struct mlxsw_pci
*mlxsw_pci
, char *mbox
,
1154 const struct mlxsw_config_profile
*profile
)
1158 mlxsw_cmd_mbox_zero(mbox
);
1160 if (profile
->used_max_vepa_channels
) {
1161 mlxsw_cmd_mbox_config_profile_set_max_vepa_channels_set(
1163 mlxsw_cmd_mbox_config_profile_max_vepa_channels_set(
1164 mbox
, profile
->max_vepa_channels
);
1166 if (profile
->used_max_lag
) {
1167 mlxsw_cmd_mbox_config_profile_set_max_lag_set(
1169 mlxsw_cmd_mbox_config_profile_max_lag_set(
1170 mbox
, profile
->max_lag
);
1172 if (profile
->used_max_port_per_lag
) {
1173 mlxsw_cmd_mbox_config_profile_set_max_port_per_lag_set(
1175 mlxsw_cmd_mbox_config_profile_max_port_per_lag_set(
1176 mbox
, profile
->max_port_per_lag
);
1178 if (profile
->used_max_mid
) {
1179 mlxsw_cmd_mbox_config_profile_set_max_mid_set(
1181 mlxsw_cmd_mbox_config_profile_max_mid_set(
1182 mbox
, profile
->max_mid
);
1184 if (profile
->used_max_pgt
) {
1185 mlxsw_cmd_mbox_config_profile_set_max_pgt_set(
1187 mlxsw_cmd_mbox_config_profile_max_pgt_set(
1188 mbox
, profile
->max_pgt
);
1190 if (profile
->used_max_system_port
) {
1191 mlxsw_cmd_mbox_config_profile_set_max_system_port_set(
1193 mlxsw_cmd_mbox_config_profile_max_system_port_set(
1194 mbox
, profile
->max_system_port
);
1196 if (profile
->used_max_vlan_groups
) {
1197 mlxsw_cmd_mbox_config_profile_set_max_vlan_groups_set(
1199 mlxsw_cmd_mbox_config_profile_max_vlan_groups_set(
1200 mbox
, profile
->max_vlan_groups
);
1202 if (profile
->used_max_regions
) {
1203 mlxsw_cmd_mbox_config_profile_set_max_regions_set(
1205 mlxsw_cmd_mbox_config_profile_max_regions_set(
1206 mbox
, profile
->max_regions
);
1208 if (profile
->used_flood_tables
) {
1209 mlxsw_cmd_mbox_config_profile_set_flood_tables_set(
1211 mlxsw_cmd_mbox_config_profile_max_flood_tables_set(
1212 mbox
, profile
->max_flood_tables
);
1213 mlxsw_cmd_mbox_config_profile_max_vid_flood_tables_set(
1214 mbox
, profile
->max_vid_flood_tables
);
1216 if (profile
->used_flood_mode
) {
1217 mlxsw_cmd_mbox_config_profile_set_flood_mode_set(
1219 mlxsw_cmd_mbox_config_profile_flood_mode_set(
1220 mbox
, profile
->flood_mode
);
1222 if (profile
->used_max_ib_mc
) {
1223 mlxsw_cmd_mbox_config_profile_set_max_ib_mc_set(
1225 mlxsw_cmd_mbox_config_profile_max_ib_mc_set(
1226 mbox
, profile
->max_ib_mc
);
1228 if (profile
->used_max_pkey
) {
1229 mlxsw_cmd_mbox_config_profile_set_max_pkey_set(
1231 mlxsw_cmd_mbox_config_profile_max_pkey_set(
1232 mbox
, profile
->max_pkey
);
1234 if (profile
->used_ar_sec
) {
1235 mlxsw_cmd_mbox_config_profile_set_ar_sec_set(
1237 mlxsw_cmd_mbox_config_profile_ar_sec_set(
1238 mbox
, profile
->ar_sec
);
1240 if (profile
->used_adaptive_routing_group_cap
) {
1241 mlxsw_cmd_mbox_config_profile_set_adaptive_routing_group_cap_set(
1243 mlxsw_cmd_mbox_config_profile_adaptive_routing_group_cap_set(
1244 mbox
, profile
->adaptive_routing_group_cap
);
1247 for (i
= 0; i
< MLXSW_CONFIG_PROFILE_SWID_COUNT
; i
++)
1248 mlxsw_pci_config_profile_swid_config(mlxsw_pci
, mbox
, i
,
1249 &profile
->swid_config
[i
]);
1251 return mlxsw_cmd_config_profile_set(mlxsw_pci
->core
, mbox
);
1254 static int mlxsw_pci_boardinfo(struct mlxsw_pci
*mlxsw_pci
, char *mbox
)
1256 struct mlxsw_bus_info
*bus_info
= &mlxsw_pci
->bus_info
;
1259 mlxsw_cmd_mbox_zero(mbox
);
1260 err
= mlxsw_cmd_boardinfo(mlxsw_pci
->core
, mbox
);
1263 mlxsw_cmd_mbox_boardinfo_vsd_memcpy_from(mbox
, bus_info
->vsd
);
1264 mlxsw_cmd_mbox_boardinfo_psid_memcpy_from(mbox
, bus_info
->psid
);
1268 static int mlxsw_pci_fw_area_init(struct mlxsw_pci
*mlxsw_pci
, char *mbox
,
1271 struct mlxsw_pci_mem_item
*mem_item
;
1275 mlxsw_pci
->fw_area
.items
= kcalloc(num_pages
, sizeof(*mem_item
),
1277 if (!mlxsw_pci
->fw_area
.items
)
1279 mlxsw_pci
->fw_area
.num_pages
= num_pages
;
1281 mlxsw_cmd_mbox_zero(mbox
);
1282 for (i
= 0; i
< num_pages
; i
++) {
1283 mem_item
= &mlxsw_pci
->fw_area
.items
[i
];
1285 mem_item
->size
= MLXSW_PCI_PAGE_SIZE
;
1286 mem_item
->buf
= pci_alloc_consistent(mlxsw_pci
->pdev
,
1288 &mem_item
->mapaddr
);
1289 if (!mem_item
->buf
) {
1293 mlxsw_cmd_mbox_map_fa_pa_set(mbox
, i
, mem_item
->mapaddr
);
1294 mlxsw_cmd_mbox_map_fa_log2size_set(mbox
, i
, 0); /* 1 page */
1297 err
= mlxsw_cmd_map_fa(mlxsw_pci
->core
, mbox
, num_pages
);
1299 goto err_cmd_map_fa
;
1305 for (i
--; i
>= 0; i
--) {
1306 mem_item
= &mlxsw_pci
->fw_area
.items
[i
];
1308 pci_free_consistent(mlxsw_pci
->pdev
, mem_item
->size
,
1309 mem_item
->buf
, mem_item
->mapaddr
);
1311 kfree(mlxsw_pci
->fw_area
.items
);
1315 static void mlxsw_pci_fw_area_fini(struct mlxsw_pci
*mlxsw_pci
)
1317 struct mlxsw_pci_mem_item
*mem_item
;
1320 mlxsw_cmd_unmap_fa(mlxsw_pci
->core
);
1322 for (i
= 0; i
< mlxsw_pci
->fw_area
.num_pages
; i
++) {
1323 mem_item
= &mlxsw_pci
->fw_area
.items
[i
];
1325 pci_free_consistent(mlxsw_pci
->pdev
, mem_item
->size
,
1326 mem_item
->buf
, mem_item
->mapaddr
);
1328 kfree(mlxsw_pci
->fw_area
.items
);
1331 static irqreturn_t
mlxsw_pci_eq_irq_handler(int irq
, void *dev_id
)
1333 struct mlxsw_pci
*mlxsw_pci
= dev_id
;
1334 struct mlxsw_pci_queue
*q
;
1337 for (i
= 0; i
< MLXSW_PCI_EQS_COUNT
; i
++) {
1338 q
= mlxsw_pci_eq_get(mlxsw_pci
, i
);
1339 mlxsw_pci_queue_tasklet_schedule(q
);
1344 static int mlxsw_pci_init(void *bus_priv
, struct mlxsw_core
*mlxsw_core
,
1345 const struct mlxsw_config_profile
*profile
)
1347 struct mlxsw_pci
*mlxsw_pci
= bus_priv
;
1348 struct pci_dev
*pdev
= mlxsw_pci
->pdev
;
1353 mutex_init(&mlxsw_pci
->cmd
.lock
);
1354 init_waitqueue_head(&mlxsw_pci
->cmd
.wait
);
1356 mlxsw_pci
->core
= mlxsw_core
;
1358 mbox
= mlxsw_cmd_mbox_alloc();
1361 err
= mlxsw_cmd_query_fw(mlxsw_core
, mbox
);
1365 mlxsw_pci
->bus_info
.fw_rev
.major
=
1366 mlxsw_cmd_mbox_query_fw_fw_rev_major_get(mbox
);
1367 mlxsw_pci
->bus_info
.fw_rev
.minor
=
1368 mlxsw_cmd_mbox_query_fw_fw_rev_minor_get(mbox
);
1369 mlxsw_pci
->bus_info
.fw_rev
.subminor
=
1370 mlxsw_cmd_mbox_query_fw_fw_rev_subminor_get(mbox
);
1372 if (mlxsw_cmd_mbox_query_fw_cmd_interface_rev_get(mbox
) != 1) {
1373 dev_err(&pdev
->dev
, "Unsupported cmd interface revision ID queried from hw\n");
1377 if (mlxsw_cmd_mbox_query_fw_doorbell_page_bar_get(mbox
) != 0) {
1378 dev_err(&pdev
->dev
, "Unsupported doorbell page bar queried from hw\n");
1380 goto err_doorbell_page_bar
;
1383 mlxsw_pci
->doorbell_offset
=
1384 mlxsw_cmd_mbox_query_fw_doorbell_page_offset_get(mbox
);
1386 num_pages
= mlxsw_cmd_mbox_query_fw_fw_pages_get(mbox
);
1387 err
= mlxsw_pci_fw_area_init(mlxsw_pci
, mbox
, num_pages
);
1389 goto err_fw_area_init
;
1391 err
= mlxsw_pci_boardinfo(mlxsw_pci
, mbox
);
1395 err
= mlxsw_pci_config_profile(mlxsw_pci
, mbox
, profile
);
1397 goto err_config_profile
;
1399 err
= mlxsw_pci_aqs_init(mlxsw_pci
, mbox
);
1403 err
= request_irq(mlxsw_pci
->msix_entry
.vector
,
1404 mlxsw_pci_eq_irq_handler
, 0,
1405 mlxsw_pci_driver_name
, mlxsw_pci
);
1407 dev_err(&pdev
->dev
, "IRQ request failed\n");
1408 goto err_request_eq_irq
;
1414 mlxsw_pci_aqs_fini(mlxsw_pci
);
1418 mlxsw_pci_fw_area_fini(mlxsw_pci
);
1420 err_doorbell_page_bar
:
1424 mlxsw_cmd_mbox_free(mbox
);
1428 static void mlxsw_pci_fini(void *bus_priv
)
1430 struct mlxsw_pci
*mlxsw_pci
= bus_priv
;
1432 free_irq(mlxsw_pci
->msix_entry
.vector
, mlxsw_pci
);
1433 mlxsw_pci_aqs_fini(mlxsw_pci
);
1434 mlxsw_pci_fw_area_fini(mlxsw_pci
);
1437 static struct mlxsw_pci_queue
*
1438 mlxsw_pci_sdq_pick(struct mlxsw_pci
*mlxsw_pci
,
1439 const struct mlxsw_tx_info
*tx_info
)
1441 u8 sdqn
= tx_info
->local_port
% mlxsw_pci_sdq_count(mlxsw_pci
);
1443 return mlxsw_pci_sdq_get(mlxsw_pci
, sdqn
);
1446 static bool mlxsw_pci_skb_transmit_busy(void *bus_priv
,
1447 const struct mlxsw_tx_info
*tx_info
)
1449 struct mlxsw_pci
*mlxsw_pci
= bus_priv
;
1450 struct mlxsw_pci_queue
*q
= mlxsw_pci_sdq_pick(mlxsw_pci
, tx_info
);
1452 return !mlxsw_pci_queue_elem_info_producer_get(q
);
1455 static int mlxsw_pci_skb_transmit(void *bus_priv
, struct sk_buff
*skb
,
1456 const struct mlxsw_tx_info
*tx_info
)
1458 struct mlxsw_pci
*mlxsw_pci
= bus_priv
;
1459 struct mlxsw_pci_queue
*q
;
1460 struct mlxsw_pci_queue_elem_info
*elem_info
;
1465 if (skb_shinfo(skb
)->nr_frags
> MLXSW_PCI_WQE_SG_ENTRIES
- 1) {
1466 err
= skb_linearize(skb
);
1471 q
= mlxsw_pci_sdq_pick(mlxsw_pci
, tx_info
);
1472 spin_lock_bh(&q
->lock
);
1473 elem_info
= mlxsw_pci_queue_elem_info_producer_get(q
);
1479 elem_info
->u
.sdq
.skb
= skb
;
1481 wqe
= elem_info
->elem
;
1482 mlxsw_pci_wqe_c_set(wqe
, 1); /* always report completion */
1483 mlxsw_pci_wqe_lp_set(wqe
, !!tx_info
->is_emad
);
1484 mlxsw_pci_wqe_type_set(wqe
, MLXSW_PCI_WQE_TYPE_ETHERNET
);
1486 err
= mlxsw_pci_wqe_frag_map(mlxsw_pci
, wqe
, 0, skb
->data
,
1487 skb_headlen(skb
), DMA_TO_DEVICE
);
1491 for (i
= 0; i
< skb_shinfo(skb
)->nr_frags
; i
++) {
1492 const skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
];
1494 err
= mlxsw_pci_wqe_frag_map(mlxsw_pci
, wqe
, i
+ 1,
1495 skb_frag_address(frag
),
1496 skb_frag_size(frag
),
1502 /* Set unused sq entries byte count to zero. */
1503 for (i
++; i
< MLXSW_PCI_WQE_SG_ENTRIES
; i
++)
1504 mlxsw_pci_wqe_byte_count_set(wqe
, i
, 0);
1506 /* Everything is set up, ring producer doorbell to get HW going */
1507 q
->producer_counter
++;
1508 mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci
, q
);
1514 mlxsw_pci_wqe_frag_unmap(mlxsw_pci
, wqe
, i
, DMA_TO_DEVICE
);
1516 spin_unlock_bh(&q
->lock
);
1520 static int mlxsw_pci_cmd_exec(void *bus_priv
, u16 opcode
, u8 opcode_mod
,
1521 u32 in_mod
, bool out_mbox_direct
,
1522 char *in_mbox
, size_t in_mbox_size
,
1523 char *out_mbox
, size_t out_mbox_size
,
1526 struct mlxsw_pci
*mlxsw_pci
= bus_priv
;
1527 dma_addr_t in_mapaddr
= 0;
1528 dma_addr_t out_mapaddr
= 0;
1529 bool evreq
= mlxsw_pci
->cmd
.nopoll
;
1530 unsigned long timeout
= msecs_to_jiffies(MLXSW_PCI_CIR_TIMEOUT_MSECS
);
1531 bool *p_wait_done
= &mlxsw_pci
->cmd
.wait_done
;
1534 *p_status
= MLXSW_CMD_STATUS_OK
;
1536 err
= mutex_lock_interruptible(&mlxsw_pci
->cmd
.lock
);
1541 in_mapaddr
= pci_map_single(mlxsw_pci
->pdev
, in_mbox
,
1542 in_mbox_size
, PCI_DMA_TODEVICE
);
1543 if (unlikely(pci_dma_mapping_error(mlxsw_pci
->pdev
,
1546 goto err_in_mbox_map
;
1549 mlxsw_pci_write32(mlxsw_pci
, CIR_IN_PARAM_HI
, in_mapaddr
>> 32);
1550 mlxsw_pci_write32(mlxsw_pci
, CIR_IN_PARAM_LO
, in_mapaddr
);
1553 out_mapaddr
= pci_map_single(mlxsw_pci
->pdev
, out_mbox
,
1554 out_mbox_size
, PCI_DMA_FROMDEVICE
);
1555 if (unlikely(pci_dma_mapping_error(mlxsw_pci
->pdev
,
1558 goto err_out_mbox_map
;
1561 mlxsw_pci_write32(mlxsw_pci
, CIR_OUT_PARAM_HI
, out_mapaddr
>> 32);
1562 mlxsw_pci_write32(mlxsw_pci
, CIR_OUT_PARAM_LO
, out_mapaddr
);
1564 mlxsw_pci_write32(mlxsw_pci
, CIR_IN_MODIFIER
, in_mod
);
1565 mlxsw_pci_write32(mlxsw_pci
, CIR_TOKEN
, 0);
1567 *p_wait_done
= false;
1569 wmb(); /* all needs to be written before we write control register */
1570 mlxsw_pci_write32(mlxsw_pci
, CIR_CTRL
,
1571 MLXSW_PCI_CIR_CTRL_GO_BIT
|
1572 (evreq
? MLXSW_PCI_CIR_CTRL_EVREQ_BIT
: 0) |
1573 (opcode_mod
<< MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT
) |
1579 end
= jiffies
+ timeout
;
1581 u32 ctrl
= mlxsw_pci_read32(mlxsw_pci
, CIR_CTRL
);
1583 if (!(ctrl
& MLXSW_PCI_CIR_CTRL_GO_BIT
)) {
1584 *p_wait_done
= true;
1585 *p_status
= ctrl
>> MLXSW_PCI_CIR_CTRL_STATUS_SHIFT
;
1589 } while (time_before(jiffies
, end
));
1591 wait_event_timeout(mlxsw_pci
->cmd
.wait
, *p_wait_done
, timeout
);
1592 *p_status
= mlxsw_pci
->cmd
.comp
.status
;
1603 if (!err
&& out_mbox
&& out_mbox_direct
) {
1604 /* Some commands does not use output param as address to mailbox
1605 * but they store output directly into registers. In that case,
1606 * copy registers into mbox buffer.
1611 tmp
= cpu_to_be32(mlxsw_pci_read32(mlxsw_pci
,
1613 memcpy(out_mbox
, &tmp
, sizeof(tmp
));
1614 tmp
= cpu_to_be32(mlxsw_pci_read32(mlxsw_pci
,
1616 memcpy(out_mbox
+ sizeof(tmp
), &tmp
, sizeof(tmp
));
1621 pci_unmap_single(mlxsw_pci
->pdev
, out_mapaddr
, out_mbox_size
,
1622 PCI_DMA_FROMDEVICE
);
1628 pci_unmap_single(mlxsw_pci
->pdev
, in_mapaddr
, in_mbox_size
,
1631 mutex_unlock(&mlxsw_pci
->cmd
.lock
);
1636 static const struct mlxsw_bus mlxsw_pci_bus
= {
1638 .init
= mlxsw_pci_init
,
1639 .fini
= mlxsw_pci_fini
,
1640 .skb_transmit_busy
= mlxsw_pci_skb_transmit_busy
,
1641 .skb_transmit
= mlxsw_pci_skb_transmit
,
1642 .cmd_exec
= mlxsw_pci_cmd_exec
,
1645 static int mlxsw_pci_sw_reset(struct mlxsw_pci
*mlxsw_pci
)
1647 mlxsw_pci_write32(mlxsw_pci
, SW_RESET
, MLXSW_PCI_SW_RESET_RST_BIT
);
1648 /* Current firware does not let us know when the reset is done.
1649 * So we just wait here for constant time and hope for the best.
1651 msleep(MLXSW_PCI_SW_RESET_TIMEOUT_MSECS
);
1655 static int mlxsw_pci_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
1657 struct mlxsw_pci
*mlxsw_pci
;
1660 mlxsw_pci
= kzalloc(sizeof(*mlxsw_pci
), GFP_KERNEL
);
1664 err
= pci_enable_device(pdev
);
1666 dev_err(&pdev
->dev
, "pci_enable_device failed\n");
1667 goto err_pci_enable_device
;
1670 err
= pci_request_regions(pdev
, mlxsw_pci_driver_name
);
1672 dev_err(&pdev
->dev
, "pci_request_regions failed\n");
1673 goto err_pci_request_regions
;
1676 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
1678 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
1680 dev_err(&pdev
->dev
, "pci_set_consistent_dma_mask failed\n");
1681 goto err_pci_set_dma_mask
;
1684 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
1686 dev_err(&pdev
->dev
, "pci_set_dma_mask failed\n");
1687 goto err_pci_set_dma_mask
;
1691 if (pci_resource_len(pdev
, 0) < MLXSW_PCI_BAR0_SIZE
) {
1692 dev_err(&pdev
->dev
, "invalid PCI region size\n");
1694 goto err_pci_resource_len_check
;
1697 mlxsw_pci
->hw_addr
= ioremap(pci_resource_start(pdev
, 0),
1698 pci_resource_len(pdev
, 0));
1699 if (!mlxsw_pci
->hw_addr
) {
1700 dev_err(&pdev
->dev
, "ioremap failed\n");
1704 pci_set_master(pdev
);
1706 mlxsw_pci
->pdev
= pdev
;
1707 pci_set_drvdata(pdev
, mlxsw_pci
);
1709 err
= mlxsw_pci_sw_reset(mlxsw_pci
);
1711 dev_err(&pdev
->dev
, "Software reset failed\n");
1715 err
= pci_enable_msix_exact(pdev
, &mlxsw_pci
->msix_entry
, 1);
1717 dev_err(&pdev
->dev
, "MSI-X init failed\n");
1721 mlxsw_pci
->bus_info
.device_kind
= mlxsw_pci_device_kind_get(id
);
1722 mlxsw_pci
->bus_info
.device_name
= pci_name(mlxsw_pci
->pdev
);
1723 mlxsw_pci
->bus_info
.dev
= &pdev
->dev
;
1725 mlxsw_pci
->dbg_dir
= debugfs_create_dir(mlxsw_pci
->bus_info
.device_name
,
1726 mlxsw_pci_dbg_root
);
1727 if (!mlxsw_pci
->dbg_dir
) {
1728 dev_err(&pdev
->dev
, "Failed to create debugfs dir\n");
1730 goto err_dbg_create_dir
;
1733 err
= mlxsw_core_bus_device_register(&mlxsw_pci
->bus_info
,
1734 &mlxsw_pci_bus
, mlxsw_pci
);
1736 dev_err(&pdev
->dev
, "cannot register bus device\n");
1737 goto err_bus_device_register
;
1742 err_bus_device_register
:
1743 debugfs_remove_recursive(mlxsw_pci
->dbg_dir
);
1745 pci_disable_msix(mlxsw_pci
->pdev
);
1748 iounmap(mlxsw_pci
->hw_addr
);
1750 err_pci_resource_len_check
:
1751 err_pci_set_dma_mask
:
1752 pci_release_regions(pdev
);
1753 err_pci_request_regions
:
1754 pci_disable_device(pdev
);
1755 err_pci_enable_device
:
1760 static void mlxsw_pci_remove(struct pci_dev
*pdev
)
1762 struct mlxsw_pci
*mlxsw_pci
= pci_get_drvdata(pdev
);
1764 mlxsw_core_bus_device_unregister(mlxsw_pci
->core
);
1765 debugfs_remove_recursive(mlxsw_pci
->dbg_dir
);
1766 pci_disable_msix(mlxsw_pci
->pdev
);
1767 iounmap(mlxsw_pci
->hw_addr
);
1768 pci_release_regions(mlxsw_pci
->pdev
);
1769 pci_disable_device(mlxsw_pci
->pdev
);
1773 static struct pci_driver mlxsw_pci_driver
= {
1774 .name
= mlxsw_pci_driver_name
,
1775 .id_table
= mlxsw_pci_id_table
,
1776 .probe
= mlxsw_pci_probe
,
1777 .remove
= mlxsw_pci_remove
,
1780 static int __init
mlxsw_pci_module_init(void)
1784 mlxsw_pci_dbg_root
= debugfs_create_dir(mlxsw_pci_driver_name
, NULL
);
1785 if (!mlxsw_pci_dbg_root
)
1787 err
= pci_register_driver(&mlxsw_pci_driver
);
1789 goto err_register_driver
;
1792 err_register_driver
:
1793 debugfs_remove_recursive(mlxsw_pci_dbg_root
);
1797 static void __exit
mlxsw_pci_module_exit(void)
1799 pci_unregister_driver(&mlxsw_pci_driver
);
1800 debugfs_remove_recursive(mlxsw_pci_dbg_root
);
1803 module_init(mlxsw_pci_module_init
);
1804 module_exit(mlxsw_pci_module_exit
);
1806 MODULE_LICENSE("Dual BSD/GPL");
1807 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
1808 MODULE_DESCRIPTION("Mellanox switch PCI interface driver");
1809 MODULE_DEVICE_TABLE(pci
, mlxsw_pci_id_table
);