Input: sur40 - skip all blobs that are not touches
[GitHub/LineageOS/android_kernel_motorola_exynos9610.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_stats.h
1 /*
2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32 #ifndef __MLX5_EN_STATS_H__
33 #define __MLX5_EN_STATS_H__
34
35 #define MLX5E_READ_CTR64_CPU(ptr, dsc, i) \
36 (*(u64 *)((char *)ptr + dsc[i].offset))
37 #define MLX5E_READ_CTR64_BE(ptr, dsc, i) \
38 be64_to_cpu(*(__be64 *)((char *)ptr + dsc[i].offset))
39 #define MLX5E_READ_CTR32_CPU(ptr, dsc, i) \
40 (*(u32 *)((char *)ptr + dsc[i].offset))
41 #define MLX5E_READ_CTR32_BE(ptr, dsc, i) \
42 be32_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset))
43
44 #define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld)
45 #define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld)
46 #define MLX5E_DECLARE_TX_STAT(type, fld) "tx%d_"#fld, offsetof(type, fld)
47
48 struct counter_desc {
49 char format[ETH_GSTRING_LEN];
50 int offset; /* Byte offset */
51 };
52
53 struct mlx5e_sw_stats {
54 u64 rx_packets;
55 u64 rx_bytes;
56 u64 tx_packets;
57 u64 tx_bytes;
58 u64 tx_tso_packets;
59 u64 tx_tso_bytes;
60 u64 tx_tso_inner_packets;
61 u64 tx_tso_inner_bytes;
62 u64 rx_lro_packets;
63 u64 rx_lro_bytes;
64 u64 rx_csum_unnecessary;
65 u64 rx_csum_none;
66 u64 rx_csum_complete;
67 u64 rx_csum_unnecessary_inner;
68 u64 rx_xdp_drop;
69 u64 rx_xdp_tx;
70 u64 rx_xdp_tx_full;
71 u64 tx_csum_partial;
72 u64 tx_csum_partial_inner;
73 u64 tx_queue_stopped;
74 u64 tx_queue_wake;
75 u64 tx_queue_dropped;
76 u64 tx_xmit_more;
77 u64 rx_wqe_err;
78 u64 rx_mpwqe_filler;
79 u64 rx_buff_alloc_err;
80 u64 rx_cqe_compress_blks;
81 u64 rx_cqe_compress_pkts;
82 u64 rx_cache_reuse;
83 u64 rx_cache_full;
84 u64 rx_cache_empty;
85 u64 rx_cache_busy;
86
87 /* Special handling counters */
88 u64 link_down_events_phy;
89 };
90
91 static const struct counter_desc sw_stats_desc[] = {
92 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
93 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
94 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
95 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
96 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_packets) },
97 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) },
98 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
99 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
100 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
101 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
102 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
103 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
104 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
105 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
106 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
107 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx) },
108 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
109 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
110 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
111 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
112 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
113 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
114 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
115 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
116 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) },
117 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
118 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
119 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
120 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_reuse) },
121 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_full) },
122 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) },
123 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
124 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events_phy) },
125 };
126
127 struct mlx5e_qcounter_stats {
128 u32 rx_out_of_buffer;
129 };
130
131 static const struct counter_desc q_stats_desc[] = {
132 { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) },
133 };
134
135 #define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c)
136 #define VPORT_COUNTER_GET(vstats, c) MLX5_GET64(query_vport_counter_out, \
137 vstats->query_vport_out, c)
138
139 struct mlx5e_vport_stats {
140 __be64 query_vport_out[MLX5_ST_SZ_QW(query_vport_counter_out)];
141 };
142
143 static const struct counter_desc vport_stats_desc[] = {
144 { "rx_vport_unicast_packets",
145 VPORT_COUNTER_OFF(received_eth_unicast.packets) },
146 { "rx_vport_unicast_bytes",
147 VPORT_COUNTER_OFF(received_eth_unicast.octets) },
148 { "tx_vport_unicast_packets",
149 VPORT_COUNTER_OFF(transmitted_eth_unicast.packets) },
150 { "tx_vport_unicast_bytes",
151 VPORT_COUNTER_OFF(transmitted_eth_unicast.octets) },
152 { "rx_vport_multicast_packets",
153 VPORT_COUNTER_OFF(received_eth_multicast.packets) },
154 { "rx_vport_multicast_bytes",
155 VPORT_COUNTER_OFF(received_eth_multicast.octets) },
156 { "tx_vport_multicast_packets",
157 VPORT_COUNTER_OFF(transmitted_eth_multicast.packets) },
158 { "tx_vport_multicast_bytes",
159 VPORT_COUNTER_OFF(transmitted_eth_multicast.octets) },
160 { "rx_vport_broadcast_packets",
161 VPORT_COUNTER_OFF(received_eth_broadcast.packets) },
162 { "rx_vport_broadcast_bytes",
163 VPORT_COUNTER_OFF(received_eth_broadcast.octets) },
164 { "tx_vport_broadcast_packets",
165 VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) },
166 { "tx_vport_broadcast_bytes",
167 VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) },
168 { "rx_vport_rdma_unicast_packets",
169 VPORT_COUNTER_OFF(received_ib_unicast.packets) },
170 { "rx_vport_rdma_unicast_bytes",
171 VPORT_COUNTER_OFF(received_ib_unicast.octets) },
172 { "tx_vport_rdma_unicast_packets",
173 VPORT_COUNTER_OFF(transmitted_ib_unicast.packets) },
174 { "tx_vport_rdma_unicast_bytes",
175 VPORT_COUNTER_OFF(transmitted_ib_unicast.octets) },
176 { "rx_vport_rdma_multicast_packets",
177 VPORT_COUNTER_OFF(received_ib_multicast.packets) },
178 { "rx_vport_rdma_multicast_bytes",
179 VPORT_COUNTER_OFF(received_ib_multicast.octets) },
180 { "tx_vport_rdma_multicast_packets",
181 VPORT_COUNTER_OFF(transmitted_ib_multicast.packets) },
182 { "tx_vport_rdma_multicast_bytes",
183 VPORT_COUNTER_OFF(transmitted_ib_multicast.octets) },
184 };
185
186 #define PPORT_802_3_OFF(c) \
187 MLX5_BYTE_OFF(ppcnt_reg, \
188 counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
189 #define PPORT_802_3_GET(pstats, c) \
190 MLX5_GET64(ppcnt_reg, pstats->IEEE_802_3_counters, \
191 counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
192 #define PPORT_2863_OFF(c) \
193 MLX5_BYTE_OFF(ppcnt_reg, \
194 counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
195 #define PPORT_2863_GET(pstats, c) \
196 MLX5_GET64(ppcnt_reg, pstats->RFC_2863_counters, \
197 counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
198 #define PPORT_2819_OFF(c) \
199 MLX5_BYTE_OFF(ppcnt_reg, \
200 counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
201 #define PPORT_2819_GET(pstats, c) \
202 MLX5_GET64(ppcnt_reg, pstats->RFC_2819_counters, \
203 counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
204 #define PPORT_PHY_STATISTICAL_OFF(c) \
205 MLX5_BYTE_OFF(ppcnt_reg, \
206 counter_set.phys_layer_statistical_cntrs.c##_high)
207 #define PPORT_PHY_STATISTICAL_GET(pstats, c) \
208 MLX5_GET64(ppcnt_reg, (pstats)->phy_statistical_counters, \
209 counter_set.phys_layer_statistical_cntrs.c##_high)
210 #define PPORT_PER_PRIO_OFF(c) \
211 MLX5_BYTE_OFF(ppcnt_reg, \
212 counter_set.eth_per_prio_grp_data_layout.c##_high)
213 #define PPORT_PER_PRIO_GET(pstats, prio, c) \
214 MLX5_GET64(ppcnt_reg, pstats->per_prio_counters[prio], \
215 counter_set.eth_per_prio_grp_data_layout.c##_high)
216 #define NUM_PPORT_PRIO 8
217
218 struct mlx5e_pport_stats {
219 __be64 IEEE_802_3_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
220 __be64 RFC_2863_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
221 __be64 RFC_2819_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
222 __be64 per_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)];
223 __be64 phy_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
224 __be64 phy_statistical_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
225 };
226
227 static const struct counter_desc pport_802_3_stats_desc[] = {
228 { "tx_packets_phy", PPORT_802_3_OFF(a_frames_transmitted_ok) },
229 { "rx_packets_phy", PPORT_802_3_OFF(a_frames_received_ok) },
230 { "rx_crc_errors_phy", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
231 { "tx_bytes_phy", PPORT_802_3_OFF(a_octets_transmitted_ok) },
232 { "rx_bytes_phy", PPORT_802_3_OFF(a_octets_received_ok) },
233 { "tx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
234 { "tx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
235 { "rx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
236 { "rx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
237 { "rx_in_range_len_errors_phy", PPORT_802_3_OFF(a_in_range_length_errors) },
238 { "rx_out_of_range_len_phy", PPORT_802_3_OFF(a_out_of_range_length_field) },
239 { "rx_oversize_pkts_phy", PPORT_802_3_OFF(a_frame_too_long_errors) },
240 { "rx_symbol_err_phy", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
241 { "tx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
242 { "rx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_received) },
243 { "rx_unsupported_op_phy", PPORT_802_3_OFF(a_unsupported_opcodes_received) },
244 { "rx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
245 { "tx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
246 };
247
248 static const struct counter_desc pport_2863_stats_desc[] = {
249 { "rx_discards_phy", PPORT_2863_OFF(if_in_discards) },
250 { "tx_discards_phy", PPORT_2863_OFF(if_out_discards) },
251 { "tx_errors_phy", PPORT_2863_OFF(if_out_errors) },
252 };
253
254 static const struct counter_desc pport_2819_stats_desc[] = {
255 { "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) },
256 { "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) },
257 { "rx_jabbers_phy", PPORT_2819_OFF(ether_stats_jabbers) },
258 { "rx_64_bytes_phy", PPORT_2819_OFF(ether_stats_pkts64octets) },
259 { "rx_65_to_127_bytes_phy", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
260 { "rx_128_to_255_bytes_phy", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
261 { "rx_256_to_511_bytes_phy", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
262 { "rx_512_to_1023_bytes_phy", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
263 { "rx_1024_to_1518_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
264 { "rx_1519_to_2047_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
265 { "rx_2048_to_4095_bytes_phy", PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
266 { "rx_4096_to_8191_bytes_phy", PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
267 { "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
268 };
269
270 static const struct counter_desc pport_phy_statistical_stats_desc[] = {
271 { "rx_symbol_errors_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) },
272 { "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) },
273 };
274
275 static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
276 { "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
277 { "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
278 { "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) },
279 { "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) },
280 };
281
282 static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
283 /* %s is "global" or "prio{i}" */
284 { "rx_%s_pause", PPORT_PER_PRIO_OFF(rx_pause) },
285 { "rx_%s_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
286 { "tx_%s_pause", PPORT_PER_PRIO_OFF(tx_pause) },
287 { "tx_%s_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
288 { "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
289 };
290
291 #define PCIE_PERF_OFF(c) \
292 MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
293 #define PCIE_PERF_GET(pcie_stats, c) \
294 MLX5_GET(mpcnt_reg, (pcie_stats)->pcie_perf_counters, \
295 counter_set.pcie_perf_cntrs_grp_data_layout.c)
296
297 struct mlx5e_pcie_stats {
298 __be64 pcie_perf_counters[MLX5_ST_SZ_QW(mpcnt_reg)];
299 };
300
301 static const struct counter_desc pcie_perf_stats_desc[] = {
302 { "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) },
303 { "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) },
304 };
305
306 struct mlx5e_rq_stats {
307 u64 packets;
308 u64 bytes;
309 u64 csum_complete;
310 u64 csum_unnecessary_inner;
311 u64 csum_none;
312 u64 lro_packets;
313 u64 lro_bytes;
314 u64 xdp_drop;
315 u64 xdp_tx;
316 u64 xdp_tx_full;
317 u64 wqe_err;
318 u64 mpwqe_filler;
319 u64 buff_alloc_err;
320 u64 cqe_compress_blks;
321 u64 cqe_compress_pkts;
322 u64 cache_reuse;
323 u64 cache_full;
324 u64 cache_empty;
325 u64 cache_busy;
326 };
327
328 static const struct counter_desc rq_stats_desc[] = {
329 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
330 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
331 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
332 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
333 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
334 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
335 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx) },
336 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx_full) },
337 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
338 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
339 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
340 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler) },
341 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
342 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
343 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
344 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_reuse) },
345 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_full) },
346 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) },
347 { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) },
348 };
349
350 struct mlx5e_sq_stats {
351 /* commonly accessed in data path */
352 u64 packets;
353 u64 bytes;
354 u64 xmit_more;
355 u64 tso_packets;
356 u64 tso_bytes;
357 u64 tso_inner_packets;
358 u64 tso_inner_bytes;
359 u64 csum_partial_inner;
360 u64 nop;
361 /* less likely accessed in data path */
362 u64 csum_none;
363 u64 stopped;
364 u64 wake;
365 u64 dropped;
366 };
367
368 static const struct counter_desc sq_stats_desc[] = {
369 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) },
370 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) },
371 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
372 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
373 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
374 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
375 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
376 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
377 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
378 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
379 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
380 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
381 { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
382 };
383
384 #define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc)
385 #define NUM_Q_COUNTERS ARRAY_SIZE(q_stats_desc)
386 #define NUM_VPORT_COUNTERS ARRAY_SIZE(vport_stats_desc)
387 #define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc)
388 #define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc)
389 #define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc)
390 #define NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv) \
391 (ARRAY_SIZE(pport_phy_statistical_stats_desc) * \
392 MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group))
393 #define NUM_PCIE_PERF_COUNTERS(priv) \
394 (ARRAY_SIZE(pcie_perf_stats_desc) * \
395 MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
396 #define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS \
397 ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
398 #define NUM_PPORT_PER_PRIO_PFC_COUNTERS \
399 ARRAY_SIZE(pport_per_prio_pfc_stats_desc)
400 #define NUM_PPORT_COUNTERS(priv) (NUM_PPORT_802_3_COUNTERS + \
401 NUM_PPORT_2863_COUNTERS + \
402 NUM_PPORT_2819_COUNTERS + \
403 NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv) + \
404 NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * \
405 NUM_PPORT_PRIO)
406 #define NUM_PCIE_COUNTERS(priv) NUM_PCIE_PERF_COUNTERS(priv)
407 #define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc)
408 #define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc)
409
410 struct mlx5e_stats {
411 struct mlx5e_sw_stats sw;
412 struct mlx5e_qcounter_stats qcnt;
413 struct mlx5e_vport_stats vport;
414 struct mlx5e_pport_stats pport;
415 struct rtnl_link_stats64 vf_vport;
416 struct mlx5e_pcie_stats pcie;
417 };
418
419 static const struct counter_desc mlx5e_pme_status_desc[] = {
420 { "module_plug", 0 },
421 { "module_unplug", 8 },
422 };
423
424 static const struct counter_desc mlx5e_pme_error_desc[] = {
425 { "module_pwr_budget_exd", 0 }, /* power budget exceed */
426 { "module_long_range", 8 }, /* long range for non MLNX cable */
427 { "module_bus_stuck", 16 }, /* bus stuck (I2C or data shorted) */
428 { "module_no_eeprom", 24 }, /* no eeprom/retry time out */
429 { "module_enforce_part", 32 }, /* enforce part number list */
430 { "module_unknown_id", 40 }, /* unknown identifier */
431 { "module_high_temp", 48 }, /* high temperature */
432 { "module_bad_shorted", 56 }, /* bad or shorted cable/module */
433 { "module_unknown_status", 64 },
434 };
435
436 #endif /* __MLX5_EN_STATS_H__ */