bcmdhd_1_77: Import A320F (A320FLXXU2CRE3) Oreo driver
[GitHub/LineageOS/android_kernel_samsung_universal7580.git] / drivers / net / wireless / bcmdhd_1_77 / dhd_msgbuf.c
CommitLineData
3c2a0909
S
1/**
2 * @file definition of host message ring functionality
3 * Provides type definitions and function prototypes used to link the
4 * DHD OS, bus, and protocol modules.
5 *
4c205efb 6 * Copyright (C) 1999-2018, Broadcom Corporation
3c2a0909
S
7 *
8 * Unless you and Broadcom execute a separate written software license
9 * agreement governing use of this software, this software is licensed to you
10 * under the terms of the GNU General Public License version 2 (the "GPL"),
11 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
12 * following added to such license:
13 *
14 * As a special exception, the copyright holders of this software give you
15 * permission to link this software with independent modules, and to copy and
16 * distribute the resulting executable under terms of your choice, provided that
17 * you also meet, for each linked independent module, the terms and conditions of
18 * the license of that module. An independent module is a module which is not
19 * derived from this software. The special exception does not apply to any
20 * modifications of the software.
21 *
22 * Notwithstanding the above, under no circumstances may you combine this
23 * software in any way with any other Broadcom software provided under a license
24 * other than the GPL, without Broadcom's express prior written consent.
25 *
26 *
27 * <<Broadcom-WL-IPTag/Open:>>
28 *
4c205efb 29 * $Id: dhd_msgbuf.c 733632 2017-11-29 08:46:58Z $
3c2a0909
S
30 */
31
32
33#include <typedefs.h>
34#include <osl.h>
35
36#include <bcmutils.h>
37#include <bcmmsgbuf.h>
38#include <bcmendian.h>
39
40#include <dngl_stats.h>
41#include <dhd.h>
42#include <dhd_proto.h>
43
44#include <dhd_bus.h>
45
46#include <dhd_dbg.h>
47#include <siutils.h>
48#include <dhd_debug.h>
49
50#include <dhd_flowring.h>
51
52#include <pcie_core.h>
53#include <bcmpcie.h>
54#include <dhd_pcie.h>
55#ifdef DHD_TIMESYNC
56#include <dhd_timesync.h>
57#endif /* DHD_TIMESYNC */
58
59#if defined(DHD_LB)
60#include <linux/cpu.h>
61#include <bcm_ring.h>
62#define DHD_LB_WORKQ_SZ (8192)
63#define DHD_LB_WORKQ_SYNC (16)
64#define DHD_LB_WORK_SCHED (DHD_LB_WORKQ_SYNC * 2)
65#endif /* DHD_LB */
66
67#include <hnd_debug.h>
68#include <hnd_armtrap.h>
69
4c205efb
DW
70#ifdef DHD_PKT_LOGGING
71#include <dhd_pktlog.h>
72#endif /* DHD_PKT_LOGGING */
73
3c2a0909
S
74extern char dhd_version[];
75extern char fw_version[];
76
77/**
78 * Host configures a soft doorbell for d2h rings, by specifying a 32bit host
79 * address where a value must be written. Host may also interrupt coalescing
80 * on this soft doorbell.
81 * Use Case: Hosts with network processors, may register with the dongle the
82 * network processor's thread wakeup register and a value corresponding to the
83 * core/thread context. Dongle will issue a write transaction <address,value>
84 * to the PCIE RC which will need to be routed to the mapped register space, by
85 * the host.
86 */
87/* #define DHD_D2H_SOFT_DOORBELL_SUPPORT */
88
89/* Dependency Check */
90#if defined(IOCTLRESP_USE_CONSTMEM) && defined(DHD_USE_STATIC_CTRLBUF)
91#error "DHD_USE_STATIC_CTRLBUF is NOT working with DHD_USE_OSLPKT_FOR_RESPBUF"
92#endif /* IOCTLRESP_USE_CONSTMEM && DHD_USE_STATIC_CTRLBUF */
93
94#define RETRIES 2 /* # of retries to retrieve matching ioctl response */
95
96#define DEFAULT_RX_BUFFERS_TO_POST 256
97#define RXBUFPOST_THRESHOLD 32
98#define RX_BUF_BURST 32 /* Rx buffers for MSDU Data */
99
100#define DHD_STOP_QUEUE_THRESHOLD 200
101#define DHD_START_QUEUE_THRESHOLD 100
102
103#define RX_DMA_OFFSET 8 /* Mem2mem DMA inserts an extra 8 */
104#define IOCT_RETBUF_SIZE (RX_DMA_OFFSET + WLC_IOCTL_MAXLEN)
105
106/* flags for ioctl pending status */
107#define MSGBUF_IOCTL_ACK_PENDING (1<<0)
108#define MSGBUF_IOCTL_RESP_PENDING (1<<1)
109
110#define DMA_ALIGN_LEN 4
111
112#define DMA_D2H_SCRATCH_BUF_LEN 8
113#define DMA_XFER_LEN_LIMIT 0x400000
114
115#ifdef BCM_HOST_BUF
116#ifndef DMA_HOST_BUFFER_LEN
117#define DMA_HOST_BUFFER_LEN 0x200000
118#endif
119#endif /* BCM_HOST_BUF */
120
121#define DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ 8192
122
123#define DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D 1
124#define DHD_FLOWRING_MAX_EVENTBUF_POST 32
125#define DHD_FLOWRING_MAX_IOCTLRESPBUF_POST 8
126#define DHD_H2D_INFORING_MAX_BUF_POST 32
127#define DHD_MAX_TSBUF_POST 8
128
129#define DHD_PROT_FUNCS 41
130
131/* Length of buffer in host for bus throughput measurement */
132#define DHD_BUS_TPUT_BUF_LEN 2048
133
134#define TXP_FLUSH_NITEMS
135
136/* optimization to write "n" tx items at a time to ring */
137#define TXP_FLUSH_MAX_ITEMS_FLUSH_CNT 48
138
139#define RING_NAME_MAX_LENGTH 24
140#define CTRLSUB_HOSTTS_MEESAGE_SIZE 1024
141/* Giving room before ioctl_trans_id rollsover. */
142#define BUFFER_BEFORE_ROLLOVER 300
143
144struct msgbuf_ring; /* ring context for common and flow rings */
145
146/**
147 * PCIE D2H DMA Complete Sync Modes
148 *
149 * Firmware may interrupt the host, prior to the D2H Mem2Mem DMA completes into
150 * Host system memory. A WAR using one of 3 approaches is needed:
151 * 1. Dongle places a modulo-253 seqnum in last word of each D2H message
152 * 2. XOR Checksum, with epoch# in each work item. Dongle builds an XOR checksum
153 * writes in the last word of each work item. Each work item has a seqnum
154 * number = sequence num % 253.
155 *
156 * 3. Read Barrier: Dongle does a host memory read access prior to posting an
157 * interrupt, ensuring that D2H data transfer indeed completed.
158 * 4. Dongle DMA's all indices after producing items in the D2H ring, flushing
159 * ring contents before the indices.
160 *
161 * Host does not sync for DMA to complete with option #3 or #4, and a noop sync
162 * callback (see dhd_prot_d2h_sync_none) may be bound.
163 *
164 * Dongle advertizes host side sync mechanism requirements.
165 */
166
167#define PCIE_D2H_SYNC_WAIT_TRIES (512UL)
168#define PCIE_D2H_SYNC_NUM_OF_STEPS (5UL)
169#define PCIE_D2H_SYNC_DELAY (100UL) /* in terms of usecs */
170
171/**
172 * Custom callback attached based upon D2H DMA Sync mode advertized by dongle.
173 *
174 * On success: return cmn_msg_hdr_t::msg_type
175 * On failure: return 0 (invalid msg_type)
176 */
177typedef uint8 (* d2h_sync_cb_t)(dhd_pub_t *dhd, struct msgbuf_ring *ring,
178 volatile cmn_msg_hdr_t *msg, int msglen);
179
180/*
181 * +----------------------------------------------------------------------------
182 *
183 * RingIds and FlowId are not equivalent as ringids include D2H rings whereas
184 * flowids do not.
185 *
186 * Dongle advertizes the max H2D rings, as max_sub_queues = 'N' which includes
187 * the H2D common rings as well as the (N-BCMPCIE_H2D_COMMON_MSGRINGS) flowrings
188 *
189 * Here is a sample mapping for (based on PCIE Full Dongle Rev5) where,
190 * BCMPCIE_H2D_COMMON_MSGRINGS = 2, i.e. 2 H2D common rings,
191 * BCMPCIE_COMMON_MSGRINGS = 5, i.e. include 3 D2H common rings.
192 *
193 * H2D Control Submit RingId = 0 FlowId = 0 reserved never allocated
194 * H2D RxPost Submit RingId = 1 FlowId = 1 reserved never allocated
195 *
196 * D2H Control Complete RingId = 2
197 * D2H Transmit Complete RingId = 3
198 * D2H Receive Complete RingId = 4
199 *
200 * H2D TxPost FLOWRING RingId = 5 FlowId = 2 (1st flowring)
201 * H2D TxPost FLOWRING RingId = 6 FlowId = 3 (2nd flowring)
202 * H2D TxPost FLOWRING RingId = 5 + (N-1) FlowId = (N-1) (Nth flowring)
203 *
204 * When TxPost FlowId(s) are allocated, the FlowIds [0..FLOWID_RESERVED) are
205 * unused, where FLOWID_RESERVED is BCMPCIE_H2D_COMMON_MSGRINGS.
206 *
207 * Example: when a system supports 4 bc/mc and 128 uc flowrings, with
208 * BCMPCIE_H2D_COMMON_MSGRINGS = 2, and BCMPCIE_H2D_COMMON_MSGRINGS = 5, and the
209 * FlowId values would be in the range [2..133] and the corresponding
210 * RingId values would be in the range [5..136].
211 *
212 * The flowId allocator, may chose to, allocate Flowids:
213 * bc/mc (per virtual interface) in one consecutive range [2..(2+VIFS))
214 * X# of uc flowids in consecutive ranges (per station Id), where X is the
215 * packet's access category (e.g. 4 uc flowids per station).
216 *
217 * CAUTION:
218 * When DMA indices array feature is used, RingId=5, corresponding to the 0th
219 * FLOWRING, will actually use the FlowId as index into the H2D DMA index,
220 * since the FlowId truly represents the index in the H2D DMA indices array.
221 *
222 * Likewise, in the D2H direction, the RingId - BCMPCIE_H2D_COMMON_MSGRINGS,
223 * will represent the index in the D2H DMA indices array.
224 *
225 * +----------------------------------------------------------------------------
226 */
227
228/* First TxPost Flowring Id */
229#define DHD_FLOWRING_START_FLOWID BCMPCIE_H2D_COMMON_MSGRINGS
230
231/* Determine whether a ringid belongs to a TxPost flowring */
232#define DHD_IS_FLOWRING(ringid, max_flow_rings) \
233 ((ringid) >= BCMPCIE_COMMON_MSGRINGS && \
234 (ringid) < ((max_flow_rings) + BCMPCIE_COMMON_MSGRINGS))
235
236/* Convert a H2D TxPost FlowId to a MsgBuf RingId */
237#define DHD_FLOWID_TO_RINGID(flowid) \
238 (BCMPCIE_COMMON_MSGRINGS + ((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS))
239
240/* Convert a MsgBuf RingId to a H2D TxPost FlowId */
241#define DHD_RINGID_TO_FLOWID(ringid) \
242 (BCMPCIE_H2D_COMMON_MSGRINGS + ((ringid) - BCMPCIE_COMMON_MSGRINGS))
243
244/* Convert a H2D MsgBuf RingId to an offset index into the H2D DMA indices array
245 * This may be used for the H2D DMA WR index array or H2D DMA RD index array or
246 * any array of H2D rings.
247 */
248#define DHD_H2D_RING_OFFSET(ringid) \
249 (((ringid) >= BCMPCIE_COMMON_MSGRINGS) ? DHD_RINGID_TO_FLOWID(ringid) : (ringid))
250
251/* Convert a H2D MsgBuf Flowring Id to an offset index into the H2D DMA indices array
252 * This may be used for IFRM.
253 */
254#define DHD_H2D_FRM_FLOW_RING_OFFSET(ringid) \
255 ((ringid) - BCMPCIE_COMMON_MSGRINGS)
256
257/* Convert a D2H MsgBuf RingId to an offset index into the D2H DMA indices array
258 * This may be used for the D2H DMA WR index array or D2H DMA RD index array or
259 * any array of D2H rings.
260 * d2h debug ring is located at the end, i.e. after all the tx flow rings and h2d debug ring
261 * max_h2d_rings: total number of h2d rings
262 */
263#define DHD_D2H_RING_OFFSET(ringid, max_h2d_rings) \
264 ((ringid) > (max_h2d_rings) ? \
265 ((ringid) - max_h2d_rings) : \
266 ((ringid) - BCMPCIE_H2D_COMMON_MSGRINGS))
267
268/* Convert a D2H DMA Indices Offset to a RingId */
269#define DHD_D2H_RINGID(offset) \
270 ((offset) + BCMPCIE_H2D_COMMON_MSGRINGS)
271
272
273#define DHD_DMAH_NULL ((void*)NULL)
274
275/*
276 * Pad a DMA-able buffer by an additional cachline. If the end of the DMA-able
277 * buffer does not occupy the entire cacheline, and another object is placed
278 * following the DMA-able buffer, data corruption may occur if the DMA-able
279 * buffer is used to DMAing into (e.g. D2H direction), when HW cache coherency
280 * is not available.
281 */
282#if defined(L1_CACHE_BYTES)
283#define DHD_DMA_PAD (L1_CACHE_BYTES)
284#else
285#define DHD_DMA_PAD (128)
286#endif
287
288/* Used in loopback tests */
289typedef struct dhd_dmaxfer {
290 dhd_dma_buf_t srcmem;
291 dhd_dma_buf_t dstmem;
292 uint32 srcdelay;
293 uint32 destdelay;
294 uint32 len;
295 bool in_progress;
296 uint64 start_usec;
297 uint32 d11_lpbk;
4c205efb 298 int status;
3c2a0909
S
299} dhd_dmaxfer_t;
300
301/**
302 * msgbuf_ring : This object manages the host side ring that includes a DMA-able
303 * buffer, the WR and RD indices, ring parameters such as max number of items
304 * an length of each items, and other miscellaneous runtime state.
305 * A msgbuf_ring may be used to represent a H2D or D2H common ring or a
306 * H2D TxPost ring as specified in the PCIE FullDongle Spec.
307 * Ring parameters are conveyed to the dongle, which maintains its own peer end
308 * ring state. Depending on whether the DMA Indices feature is supported, the
309 * host will update the WR/RD index in the DMA indices array in host memory or
310 * directly in dongle memory.
311 */
312typedef struct msgbuf_ring {
313 bool inited;
314 uint16 idx; /* ring id */
315 uint16 rd; /* read index */
316 uint16 curr_rd; /* read index for debug */
317 uint16 wr; /* write index */
318 uint16 max_items; /* maximum number of items in ring */
319 uint16 item_len; /* length of each item in the ring */
320 sh_addr_t base_addr; /* LITTLE ENDIAN formatted: base address */
321 dhd_dma_buf_t dma_buf; /* DMA-able buffer: pa, va, len, dmah, secdma */
322 uint32 seqnum; /* next expected item's sequence number */
323#ifdef TXP_FLUSH_NITEMS
324 void *start_addr;
325 /* # of messages on ring not yet announced to dongle */
326 uint16 pend_items_count;
327#endif /* TXP_FLUSH_NITEMS */
328
329 uint8 ring_type;
330 uint8 n_completion_ids;
331 bool create_pending;
332 uint16 create_req_id;
333 uint8 current_phase;
334 uint16 compeltion_ring_ids[MAX_COMPLETION_RING_IDS_ASSOCIATED];
335 uchar name[RING_NAME_MAX_LENGTH];
336 uint32 ring_mem_allocated;
337} msgbuf_ring_t;
338
339#define DHD_RING_BGN_VA(ring) ((ring)->dma_buf.va)
340#define DHD_RING_END_VA(ring) \
341 ((uint8 *)(DHD_RING_BGN_VA((ring))) + \
342 (((ring)->max_items - 1) * (ring)->item_len))
343
344
345
346/* This can be overwritten by module parameter defined in dhd_linux.c
347 * or by dhd iovar h2d_max_txpost.
348 */
349int h2d_max_txpost = H2DRING_TXPOST_MAX_ITEM;
350
351/** DHD protocol handle. Is an opaque type to other DHD software layers. */
352typedef struct dhd_prot {
353 osl_t *osh; /* OSL handle */
354 uint16 rxbufpost;
355 uint16 max_rxbufpost;
356 uint16 max_eventbufpost;
357 uint16 max_ioctlrespbufpost;
358 uint16 max_tsbufpost;
359 uint16 max_infobufpost;
360 uint16 infobufpost;
361 uint16 cur_event_bufs_posted;
362 uint16 cur_ioctlresp_bufs_posted;
363 uint16 cur_ts_bufs_posted;
364
365 /* Flow control mechanism based on active transmits pending */
366 uint16 active_tx_count; /* increments on every packet tx, and decrements on tx_status */
367 uint16 h2d_max_txpost;
368 uint16 txp_threshold; /* optimization to write "n" tx items at a time to ring */
369
370 /* MsgBuf Ring info: has a dhd_dma_buf that is dynamically allocated */
371 msgbuf_ring_t h2dring_ctrl_subn; /* H2D ctrl message submission ring */
372 msgbuf_ring_t h2dring_rxp_subn; /* H2D RxBuf post ring */
373 msgbuf_ring_t d2hring_ctrl_cpln; /* D2H ctrl completion ring */
374 msgbuf_ring_t d2hring_tx_cpln; /* D2H Tx complete message ring */
375 msgbuf_ring_t d2hring_rx_cpln; /* D2H Rx complete message ring */
376 msgbuf_ring_t *h2dring_info_subn; /* H2D info submission ring */
377 msgbuf_ring_t *d2hring_info_cpln; /* D2H info completion ring */
378
379 msgbuf_ring_t *h2d_flowrings_pool; /* Pool of preallocated flowings */
380 dhd_dma_buf_t flowrings_dma_buf; /* Contiguous DMA buffer for flowrings */
381 uint16 h2d_rings_total; /* total H2D (common rings + flowrings) */
382
383 uint32 rx_dataoffset;
384
385 dhd_mb_ring_t mb_ring_fn; /* called when dongle needs to be notified of new msg */
386 dhd_mb_ring_2_t mb_2_ring_fn; /* called when dongle needs to be notified of new msg */
387
388 /* ioctl related resources */
389 uint8 ioctl_state;
390 int16 ioctl_status; /* status returned from dongle */
391 uint16 ioctl_resplen;
392 dhd_ioctl_recieved_status_t ioctl_received;
393 uint curr_ioctl_cmd;
394 dhd_dma_buf_t retbuf; /* For holding ioctl response */
395 dhd_dma_buf_t ioctbuf; /* For holding ioctl request */
396
397 dhd_dma_buf_t d2h_dma_scratch_buf; /* For holding d2h scratch */
398
399 /* DMA-able arrays for holding WR and RD indices */
400 uint32 rw_index_sz; /* Size of a RD or WR index in dongle */
401 dhd_dma_buf_t h2d_dma_indx_wr_buf; /* Array of H2D WR indices */
402 dhd_dma_buf_t h2d_dma_indx_rd_buf; /* Array of H2D RD indices */
403 dhd_dma_buf_t d2h_dma_indx_wr_buf; /* Array of D2H WR indices */
404 dhd_dma_buf_t d2h_dma_indx_rd_buf; /* Array of D2H RD indices */
405 dhd_dma_buf_t h2d_ifrm_indx_wr_buf; /* Array of H2D WR indices for ifrm */
406
407 dhd_dma_buf_t host_bus_throughput_buf; /* bus throughput measure buffer */
408
409 dhd_dma_buf_t *flowring_buf; /* pool of flow ring buf */
410 uint32 flowring_num;
411
412 d2h_sync_cb_t d2h_sync_cb; /* Sync on D2H DMA done: SEQNUM or XORCSUM */
413 ulong d2h_sync_wait_max; /* max number of wait loops to receive one msg */
414 ulong d2h_sync_wait_tot; /* total wait loops */
415
416 dhd_dmaxfer_t dmaxfer; /* for test/DMA loopback */
417
418 uint16 ioctl_seq_no;
419 uint16 data_seq_no;
420 uint16 ioctl_trans_id;
421 void *pktid_ctrl_map; /* a pktid maps to a packet and its metadata */
422 void *pktid_rx_map; /* pktid map for rx path */
423 void *pktid_tx_map; /* pktid map for tx path */
424 void *rx_lock; /* rx pktid map and rings access protection */
425 bool metadata_dbg;
426 void *pktid_map_handle_ioctl;
427
428 /* Applications/utilities can read tx and rx metadata using IOVARs */
429 uint16 rx_metadata_offset;
430 uint16 tx_metadata_offset;
431
432
433#if defined(DHD_D2H_SOFT_DOORBELL_SUPPORT)
434 /* Host's soft doorbell configuration */
435 bcmpcie_soft_doorbell_t soft_doorbell[BCMPCIE_D2H_COMMON_MSGRINGS];
436#endif /* DHD_D2H_SOFT_DOORBELL_SUPPORT */
437
438 /* Work Queues to be used by the producer and the consumer, and threshold
439 * when the WRITE index must be synced to consumer's workq
440 */
441#if defined(DHD_LB_TXC)
442 uint32 tx_compl_prod_sync ____cacheline_aligned;
443 bcm_workq_t tx_compl_prod, tx_compl_cons;
444#endif /* DHD_LB_TXC */
445#if defined(DHD_LB_RXC)
446 uint32 rx_compl_prod_sync ____cacheline_aligned;
447 bcm_workq_t rx_compl_prod, rx_compl_cons;
448#endif /* DHD_LB_RXC */
449
450 dhd_dma_buf_t fw_trap_buf; /* firmware trap buffer */
451
452 uint32 host_ipc_version; /* Host sypported IPC rev */
453 uint32 device_ipc_version; /* FW supported IPC rev */
454 uint32 active_ipc_version; /* Host advertised IPC rev */
455 dhd_dma_buf_t hostts_req_buf; /* For holding host timestamp request buf */
456 bool hostts_req_buf_inuse;
457 bool rx_ts_log_enabled;
458 bool tx_ts_log_enabled;
459} dhd_prot_t;
460
461extern void dhd_schedule_dmaxfer_free(dhd_pub_t* dhdp, dmaxref_mem_map_t *dmmap);
462
463/* Convert a dmaaddr_t to a base_addr with htol operations */
464static INLINE void dhd_base_addr_htolpa(sh_addr_t *base_addr, dmaaddr_t pa);
465
466/* APIs for managing a DMA-able buffer */
467static int dhd_dma_buf_audit(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
468static int dhd_dma_buf_alloc(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf, uint32 buf_len);
469static void dhd_dma_buf_reset(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
470static void dhd_dma_buf_free(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf);
471
472/* msgbuf ring management */
473static int dhd_prot_ring_attach(dhd_pub_t *dhd, msgbuf_ring_t *ring,
474 const char *name, uint16 max_items, uint16 len_item, uint16 ringid);
475static void dhd_prot_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring);
476static void dhd_prot_ring_reset(dhd_pub_t *dhd, msgbuf_ring_t *ring);
477static void dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t *ring);
478static void dhd_prot_process_fw_timestamp(dhd_pub_t *dhd, void* buf);
479
480/* Pool of pre-allocated msgbuf_ring_t with DMA-able buffers for Flowrings */
481static int dhd_prot_flowrings_pool_attach(dhd_pub_t *dhd);
482static void dhd_prot_flowrings_pool_reset(dhd_pub_t *dhd);
483static void dhd_prot_flowrings_pool_detach(dhd_pub_t *dhd);
484
485/* Fetch and Release a flowring msgbuf_ring from flowring pool */
486static msgbuf_ring_t *dhd_prot_flowrings_pool_fetch(dhd_pub_t *dhd,
487 uint16 flowid);
488/* see also dhd_prot_flowrings_pool_release() in dhd_prot.h */
489
490/* Producer: Allocate space in a msgbuf ring */
491static void* dhd_prot_alloc_ring_space(dhd_pub_t *dhd, msgbuf_ring_t *ring,
492 uint16 nitems, uint16 *alloced, bool exactly_nitems);
493static void* dhd_prot_get_ring_space(msgbuf_ring_t *ring, uint16 nitems,
494 uint16 *alloced, bool exactly_nitems);
495
496/* Consumer: Determine the location where the next message may be consumed */
497static uint8* dhd_prot_get_read_addr(dhd_pub_t *dhd, msgbuf_ring_t *ring,
498 uint32 *available_len);
499
500/* Producer (WR index update) or Consumer (RD index update) indication */
501static void dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t *ring,
502 void *p, uint16 len);
503static void dhd_prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t *ring);
504
505static INLINE int dhd_prot_dma_indx_alloc(dhd_pub_t *dhd, uint8 type,
506 dhd_dma_buf_t *dma_buf, uint32 bufsz);
507
508/* Set/Get a RD or WR index in the array of indices */
509/* See also: dhd_prot_dma_indx_init() */
510void dhd_prot_dma_indx_set(dhd_pub_t *dhd, uint16 new_index, uint8 type,
511 uint16 ringid);
512static uint16 dhd_prot_dma_indx_get(dhd_pub_t *dhd, uint8 type, uint16 ringid);
513
514/* Locate a packet given a pktid */
515static INLINE void *dhd_prot_packet_get(dhd_pub_t *dhd, uint32 pktid, uint8 pkttype,
516 bool free_pktid);
517/* Locate a packet given a PktId and free it. */
518static INLINE void dhd_prot_packet_free(dhd_pub_t *dhd, void *pkt, uint8 pkttype, bool send);
519
520static int dhd_msgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd,
521 void *buf, uint len, uint8 action);
522static int dhd_msgbuf_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd,
523 void *buf, uint len, uint8 action);
524static int dhd_msgbuf_wait_ioctl_cmplt(dhd_pub_t *dhd, uint32 len, void *buf);
525static int dhd_fillup_ioct_reqst(dhd_pub_t *dhd, uint16 len, uint cmd,
526 void *buf, int ifidx);
527
528/* Post buffers for Rx, control ioctl response and events */
529static uint16 dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, uint8 msgid, uint32 max_to_post);
530static void dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *pub);
531static void dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *pub);
532static void dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd, bool use_rsv_pktid);
533static int dhd_prot_rxbuf_post(dhd_pub_t *dhd, uint16 count, bool use_rsv_pktid);
534static int dhd_msgbuf_rxbuf_post_ts_bufs(dhd_pub_t *pub);
535
536static void dhd_prot_return_rxbuf(dhd_pub_t *dhd, uint32 pktid, uint32 rxcnt);
537
538
539/* D2H Message handling */
540static int dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8 *buf, uint32 len);
541
542/* D2H Message handlers */
543static void dhd_prot_noop(dhd_pub_t *dhd, void *msg);
544static void dhd_prot_txstatus_process(dhd_pub_t *dhd, void *msg);
545static void dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void *msg);
546static void dhd_prot_ioctack_process(dhd_pub_t *dhd, void *msg);
547static void dhd_prot_ringstatus_process(dhd_pub_t *dhd, void *msg);
548static void dhd_prot_genstatus_process(dhd_pub_t *dhd, void *msg);
549static void dhd_prot_event_process(dhd_pub_t *dhd, void *msg);
550
551/* Loopback test with dongle */
552static void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dma);
553static int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len, uint srcdelay,
554 uint destdelay, dhd_dmaxfer_t *dma);
555static void dhd_msgbuf_dmaxfer_process(dhd_pub_t *dhd, void *msg);
556
557/* Flowring management communication with dongle */
558static void dhd_prot_flow_ring_create_response_process(dhd_pub_t *dhd, void *msg);
559static void dhd_prot_flow_ring_delete_response_process(dhd_pub_t *dhd, void *msg);
560static void dhd_prot_flow_ring_flush_response_process(dhd_pub_t *dhd, void *msg);
561static void dhd_prot_process_flow_ring_resume_response(dhd_pub_t *dhd, void* msg);
562static void dhd_prot_process_flow_ring_suspend_response(dhd_pub_t *dhd, void* msg);
563
564/* Monitor Mode */
565#ifdef WL_MONITOR
566extern bool dhd_monitor_enabled(dhd_pub_t *dhd, int ifidx);
567extern void dhd_rx_mon_pkt(dhd_pub_t *dhdp, host_rxbuf_cmpl_t* msg, void *pkt, int ifidx);
568#endif /* WL_MONITOR */
569
570/* Configure a soft doorbell per D2H ring */
571static void dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd_pub_t *dhd);
572static void dhd_prot_process_d2h_ring_config_complete(dhd_pub_t *dhd, void *msg);
573static void dhd_prot_process_d2h_ring_create_complete(dhd_pub_t *dhd, void *buf);
574static void dhd_prot_process_h2d_ring_create_complete(dhd_pub_t *dhd, void *buf);
575static void dhd_prot_process_d2h_mb_data(dhd_pub_t *dhd, void* buf);
576static void dhd_prot_process_infobuf_complete(dhd_pub_t *dhd, void* buf);
577static void dhd_prot_detach_info_rings(dhd_pub_t *dhd);
578static void dhd_prot_process_d2h_host_ts_complete(dhd_pub_t *dhd, void* buf);
579
580typedef void (*dhd_msgbuf_func_t)(dhd_pub_t *dhd, void *msg);
581
582/** callback functions for messages generated by the dongle */
583#define MSG_TYPE_INVALID 0
584
585static dhd_msgbuf_func_t table_lookup[DHD_PROT_FUNCS] = {
586 dhd_prot_noop, /* 0 is MSG_TYPE_INVALID */
587 dhd_prot_genstatus_process, /* MSG_TYPE_GEN_STATUS */
588 dhd_prot_ringstatus_process, /* MSG_TYPE_RING_STATUS */
589 NULL,
590 dhd_prot_flow_ring_create_response_process, /* MSG_TYPE_FLOW_RING_CREATE_CMPLT */
591 NULL,
592 dhd_prot_flow_ring_delete_response_process, /* MSG_TYPE_FLOW_RING_DELETE_CMPLT */
593 NULL,
594 dhd_prot_flow_ring_flush_response_process, /* MSG_TYPE_FLOW_RING_FLUSH_CMPLT */
595 NULL,
596 dhd_prot_ioctack_process, /* MSG_TYPE_IOCTLPTR_REQ_ACK */
597 NULL,
598 dhd_prot_ioctcmplt_process, /* MSG_TYPE_IOCTL_CMPLT */
599 NULL,
600 dhd_prot_event_process, /* MSG_TYPE_WL_EVENT */
601 NULL,
602 dhd_prot_txstatus_process, /* MSG_TYPE_TX_STATUS */
603 NULL,
604 NULL, /* MSG_TYPE_RX_CMPLT use dedicated handler */
605 NULL,
606 dhd_msgbuf_dmaxfer_process, /* MSG_TYPE_LPBK_DMAXFER_CMPLT */
607 NULL, /* MSG_TYPE_FLOW_RING_RESUME */
608 dhd_prot_process_flow_ring_resume_response, /* MSG_TYPE_FLOW_RING_RESUME_CMPLT */
609 NULL, /* MSG_TYPE_FLOW_RING_SUSPEND */
610 dhd_prot_process_flow_ring_suspend_response, /* MSG_TYPE_FLOW_RING_SUSPEND_CMPLT */
611 NULL, /* MSG_TYPE_INFO_BUF_POST */
612 dhd_prot_process_infobuf_complete, /* MSG_TYPE_INFO_BUF_CMPLT */
613 NULL, /* MSG_TYPE_H2D_RING_CREATE */
614 NULL, /* MSG_TYPE_D2H_RING_CREATE */
615 dhd_prot_process_h2d_ring_create_complete, /* MSG_TYPE_H2D_RING_CREATE_CMPLT */
616 dhd_prot_process_d2h_ring_create_complete, /* MSG_TYPE_D2H_RING_CREATE_CMPLT */
617 NULL, /* MSG_TYPE_H2D_RING_CONFIG */
618 NULL, /* MSG_TYPE_D2H_RING_CONFIG */
619 NULL, /* MSG_TYPE_H2D_RING_CONFIG_CMPLT */
620 dhd_prot_process_d2h_ring_config_complete, /* MSG_TYPE_D2H_RING_CONFIG_CMPLT */
621 NULL, /* MSG_TYPE_H2D_MAILBOX_DATA */
622 dhd_prot_process_d2h_mb_data, /* MSG_TYPE_D2H_MAILBOX_DATA */
623 NULL, /* MSG_TYPE_TIMSTAMP_BUFPOST */
624 NULL, /* MSG_TYPE_HOSTTIMSTAMP */
625 dhd_prot_process_d2h_host_ts_complete, /* MSG_TYPE_HOSTTIMSTAMP_CMPLT */
626 dhd_prot_process_fw_timestamp, /* MSG_TYPE_FIRMWARE_TIMESTAMP */
627};
628
629
630#ifdef DHD_RX_CHAINING
631
632#define PKT_CTF_CHAINABLE(dhd, ifidx, evh, prio, h_sa, h_da, h_prio) \
633 (dhd_wet_chainable(dhd) && \
634 dhd_rx_pkt_chainable((dhd), (ifidx)) && \
635 !ETHER_ISNULLDEST(((struct ether_header *)(evh))->ether_dhost) && \
636 !ETHER_ISMULTI(((struct ether_header *)(evh))->ether_dhost) && \
637 !eacmp((h_da), ((struct ether_header *)(evh))->ether_dhost) && \
638 !eacmp((h_sa), ((struct ether_header *)(evh))->ether_shost) && \
639 ((h_prio) == (prio)) && (dhd_ctf_hotbrc_check((dhd), (evh), (ifidx))) && \
640 ((((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IP)) || \
641 (((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IPV6))))
642
643static INLINE void BCMFASTPATH dhd_rxchain_reset(rxchain_info_t *rxchain);
644static void BCMFASTPATH dhd_rxchain_frame(dhd_pub_t *dhd, void *pkt, uint ifidx);
645static void BCMFASTPATH dhd_rxchain_commit(dhd_pub_t *dhd);
646
647#define DHD_PKT_CTF_MAX_CHAIN_LEN 64
648
649#endif /* DHD_RX_CHAINING */
650
4c205efb
DW
651#define DHD_LPBKDTDUMP_ON() (dhd_msg_level & DHD_LPBKDTDUMP_VAL)
652
3c2a0909
S
653static void dhd_prot_h2d_sync_init(dhd_pub_t *dhd);
654
655/**
656 * D2H DMA to completion callback handlers. Based on the mode advertised by the
657 * dongle through the PCIE shared region, the appropriate callback will be
658 * registered in the proto layer to be invoked prior to precessing any message
659 * from a D2H DMA ring. If the dongle uses a read barrier or another mode that
660 * does not require host participation, then a noop callback handler will be
661 * bound that simply returns the msg_type.
662 */
663static void dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, uint32 msg_seqnum, msgbuf_ring_t *ring,
664 uint32 tries, volatile uchar *msg, int msglen);
665static uint8 dhd_prot_d2h_sync_seqnum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
666 volatile cmn_msg_hdr_t *msg, int msglen);
667static uint8 dhd_prot_d2h_sync_xorcsum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
668 volatile cmn_msg_hdr_t *msg, int msglen);
669static uint8 dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring,
670 volatile cmn_msg_hdr_t *msg, int msglen);
671static void dhd_prot_d2h_sync_init(dhd_pub_t *dhd);
672static int dhd_send_d2h_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create);
673static int dhd_send_h2d_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create);
674static uint16 dhd_get_max_flow_rings(dhd_pub_t *dhd);
675
676bool
677dhd_prot_is_cmpl_ring_empty(dhd_pub_t *dhd, void *prot_info)
678{
679 msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)prot_info;
680 uint16 rd, wr;
681 bool ret;
682
683 if (dhd->dma_d2h_ring_upd_support) {
684 wr = flow_ring->wr;
685 } else {
686 dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, flow_ring->idx);
687 }
688 if (dhd->dma_h2d_ring_upd_support) {
689 rd = flow_ring->rd;
690 } else {
691 dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, flow_ring->idx);
692 }
693 ret = (wr == rd) ? TRUE : FALSE;
694 return ret;
695}
696uint16
697dhd_prot_get_h2d_max_txpost(dhd_pub_t *dhd)
698{
699 return (uint16)h2d_max_txpost;
700}
701void
702dhd_prot_set_h2d_max_txpost(dhd_pub_t *dhd, uint16 max_txpost)
703{
704 h2d_max_txpost = max_txpost;
705}
706/**
707 * dhd_prot_d2h_sync_livelock - when the host determines that a DMA transfer has
708 * not completed, a livelock condition occurs. Host will avert this livelock by
709 * dropping this message and moving to the next. This dropped message can lead
710 * to a packet leak, or even something disastrous in the case the dropped
711 * message happens to be a control response.
712 * Here we will log this condition. One may choose to reboot the dongle.
713 *
714 */
715static void
716dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, uint32 msg_seqnum, msgbuf_ring_t *ring, uint32 tries,
717 volatile uchar *msg, int msglen)
718{
719 uint32 ring_seqnum = ring->seqnum;
720 DHD_ERROR((
721 "LIVELOCK DHD<%p> ring<%s> msg_seqnum<%u> ring_seqnum<%u:%u> tries<%u> max<%lu>"
722 " tot<%lu> dma_buf va<%p> msg<%p> curr_rd<%d>\n",
723 dhd, ring->name, msg_seqnum, ring_seqnum, ring_seqnum% D2H_EPOCH_MODULO, tries,
724 dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot,
725 ring->dma_buf.va, msg, ring->curr_rd));
726 prhex("D2H MsgBuf Failure", (volatile uchar *)msg, msglen);
727
728 dhd_bus_dump_console_buffer(dhd->bus);
729 dhd_prot_debug_info_print(dhd);
730
731#ifdef DHD_FW_COREDUMP
732 if (dhd->memdump_enabled) {
733 /* collect core dump */
734 dhd->memdump_type = DUMP_TYPE_BY_LIVELOCK;
735 dhd_bus_mem_dump(dhd);
736 }
737#endif /* DHD_FW_COREDUMP */
738
739 dhd_schedule_reset(dhd);
740
741#ifdef SUPPORT_LINKDOWN_RECOVERY
742#ifdef CONFIG_ARCH_MSM
743 dhd->bus->no_cfg_restore = 1;
744#endif /* CONFIG_ARCH_MSM */
745 dhd->hang_reason = HANG_REASON_MSGBUF_LIVELOCK;
746 dhd_os_send_hang_message(dhd);
747#endif /* SUPPORT_LINKDOWN_RECOVERY */
748}
749
750/**
751 * dhd_prot_d2h_sync_seqnum - Sync on a D2H DMA completion using the SEQNUM
752 * mode. Sequence number is always in the last word of a message.
753 */
754static uint8 BCMFASTPATH
755dhd_prot_d2h_sync_seqnum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
756 volatile cmn_msg_hdr_t *msg, int msglen)
757{
758 uint32 tries;
759 uint32 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO;
760 int num_words = msglen / sizeof(uint32); /* num of 32bit words */
761 volatile uint32 *marker = (volatile uint32 *)msg + (num_words - 1); /* last word */
762 dhd_prot_t *prot = dhd->prot;
763 uint32 msg_seqnum;
764 uint32 step = 0;
765 uint32 delay = PCIE_D2H_SYNC_DELAY;
766 uint32 total_tries = 0;
767
768 ASSERT(msglen == ring->item_len);
769
770 BCM_REFERENCE(delay);
771 /*
772 * For retries we have to make some sort of stepper algorithm.
773 * We see that every time when the Dongle comes out of the D3
774 * Cold state, the first D2H mem2mem DMA takes more time to
775 * complete, leading to livelock issues.
776 *
777 * Case 1 - Apart from Host CPU some other bus master is
778 * accessing the DDR port, probably page close to the ring
779 * so, PCIE does not get a change to update the memory.
780 * Solution - Increase the number of tries.
781 *
782 * Case 2 - The 50usec delay given by the Host CPU is not
783 * sufficient for the PCIe RC to start its work.
784 * In this case the breathing time of 50usec given by
785 * the Host CPU is not sufficient.
786 * Solution: Increase the delay in a stepper fashion.
787 * This is done to ensure that there are no
788 * unwanted extra delay introdcued in normal conditions.
789 */
790 for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) {
791 for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
792 msg_seqnum = *marker;
793 if (ltoh32(msg_seqnum) == ring_seqnum) { /* dma upto last word done */
794 ring->seqnum++; /* next expected sequence number */
795 goto dma_completed;
796 }
797
798 total_tries = ((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries;
799
800 if (total_tries > prot->d2h_sync_wait_max)
801 prot->d2h_sync_wait_max = total_tries;
802
803 OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
804 OSL_CPU_RELAX(); /* CPU relax for msg_seqnum value to update */
805 OSL_DELAY(delay * step); /* Add stepper delay */
806
807 } /* for PCIE_D2H_SYNC_WAIT_TRIES */
808 } /* for PCIE_D2H_SYNC_NUM_OF_STEPS */
809
810 dhd_prot_d2h_sync_livelock(dhd, msg_seqnum, ring, total_tries,
811 (volatile uchar *) msg, msglen);
812
813 ring->seqnum++; /* skip this message ... leak of a pktid */
814 return MSG_TYPE_INVALID; /* invalid msg_type 0 -> noop callback */
815
816dma_completed:
817
818 prot->d2h_sync_wait_tot += tries;
819 return msg->msg_type;
820}
821
822/**
823 * dhd_prot_d2h_sync_xorcsum - Sync on a D2H DMA completion using the XORCSUM
824 * mode. The xorcsum is placed in the last word of a message. Dongle will also
825 * place a seqnum in the epoch field of the cmn_msg_hdr.
826 */
827static uint8 BCMFASTPATH
828dhd_prot_d2h_sync_xorcsum(dhd_pub_t *dhd, msgbuf_ring_t *ring,
829 volatile cmn_msg_hdr_t *msg, int msglen)
830{
831 uint32 tries;
832 uint32 prot_checksum = 0; /* computed checksum */
833 int num_words = msglen / sizeof(uint32); /* num of 32bit words */
834 uint8 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO;
835 dhd_prot_t *prot = dhd->prot;
836 uint32 step = 0;
837 uint32 delay = PCIE_D2H_SYNC_DELAY;
838 uint32 total_tries = 0;
839
840 ASSERT(msglen == ring->item_len);
841
842 BCM_REFERENCE(delay);
843 /*
844 * For retries we have to make some sort of stepper algorithm.
845 * We see that every time when the Dongle comes out of the D3
846 * Cold state, the first D2H mem2mem DMA takes more time to
847 * complete, leading to livelock issues.
848 *
849 * Case 1 - Apart from Host CPU some other bus master is
850 * accessing the DDR port, probably page close to the ring
851 * so, PCIE does not get a change to update the memory.
852 * Solution - Increase the number of tries.
853 *
854 * Case 2 - The 50usec delay given by the Host CPU is not
855 * sufficient for the PCIe RC to start its work.
856 * In this case the breathing time of 50usec given by
857 * the Host CPU is not sufficient.
858 * Solution: Increase the delay in a stepper fashion.
859 * This is done to ensure that there are no
860 * unwanted extra delay introdcued in normal conditions.
861 */
862 for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) {
863 for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) {
864 prot_checksum = bcm_compute_xor32((volatile uint32 *)msg, num_words);
865 if (prot_checksum == 0U) { /* checksum is OK */
866 if (msg->epoch == ring_seqnum) {
867 ring->seqnum++; /* next expected sequence number */
868 goto dma_completed;
869 }
870 }
871
872 total_tries = ((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries;
873
874 if (total_tries > prot->d2h_sync_wait_max)
875 prot->d2h_sync_wait_max = total_tries;
876
877 OSL_CACHE_INV(msg, msglen); /* invalidate and try again */
878 OSL_CPU_RELAX(); /* CPU relax for msg_seqnum value to update */
879 OSL_DELAY(delay * step); /* Add stepper delay */
880
881 } /* for PCIE_D2H_SYNC_WAIT_TRIES */
882 } /* for PCIE_D2H_SYNC_NUM_OF_STEPS */
883
884 DHD_ERROR(("%s: prot_checksum = 0x%x\n", __FUNCTION__, prot_checksum));
885 dhd_prot_d2h_sync_livelock(dhd, msg->epoch, ring, total_tries,
886 (volatile uchar *) msg, msglen);
887
888 ring->seqnum++; /* skip this message ... leak of a pktid */
889 return MSG_TYPE_INVALID; /* invalid msg_type 0 -> noop callback */
890
891dma_completed:
892
893 prot->d2h_sync_wait_tot += tries;
894 return msg->msg_type;
895}
896
897/**
898 * dhd_prot_d2h_sync_none - Dongle ensure that the DMA will complete and host
899 * need to try to sync. This noop sync handler will be bound when the dongle
900 * advertises that neither the SEQNUM nor XORCSUM mode of DMA sync is required.
901 */
902static uint8 BCMFASTPATH
903dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring,
904 volatile cmn_msg_hdr_t *msg, int msglen)
905{
906 return msg->msg_type;
907}
908
909INLINE void
910dhd_wakeup_ioctl_event(dhd_pub_t *dhd, dhd_ioctl_recieved_status_t reason)
911{
912 /* To synchronize with the previous memory operations call wmb() */
913 OSL_SMP_WMB();
914 dhd->prot->ioctl_received = reason;
915 /* Call another wmb() to make sure before waking up the other event value gets updated */
916 OSL_SMP_WMB();
917 dhd_os_ioctl_resp_wake(dhd);
918}
919
920/**
921 * dhd_prot_d2h_sync_init - Setup the host side DMA sync mode based on what
922 * dongle advertizes.
923 */
924static void
925dhd_prot_d2h_sync_init(dhd_pub_t *dhd)
926{
927 dhd_prot_t *prot = dhd->prot;
928 prot->d2h_sync_wait_max = 0UL;
929 prot->d2h_sync_wait_tot = 0UL;
930
931 prot->d2hring_ctrl_cpln.seqnum = D2H_EPOCH_INIT_VAL;
932 prot->d2hring_ctrl_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
933
934 prot->d2hring_tx_cpln.seqnum = D2H_EPOCH_INIT_VAL;
935 prot->d2hring_tx_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
936
937 prot->d2hring_rx_cpln.seqnum = D2H_EPOCH_INIT_VAL;
938 prot->d2hring_rx_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT;
939
940 if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM) {
941 prot->d2h_sync_cb = dhd_prot_d2h_sync_seqnum;
942 DHD_ERROR(("%s(): D2H sync mechanism is SEQNUM \r\n", __FUNCTION__));
943 } else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM) {
944 prot->d2h_sync_cb = dhd_prot_d2h_sync_xorcsum;
945 DHD_ERROR(("%s(): D2H sync mechanism is XORCSUM \r\n", __FUNCTION__));
946 } else {
947 prot->d2h_sync_cb = dhd_prot_d2h_sync_none;
948 DHD_ERROR(("%s(): D2H sync mechanism is NONE \r\n", __FUNCTION__));
949 }
950}
951
952/**
953 * dhd_prot_h2d_sync_init - Per H2D common ring, setup the msgbuf ring seqnum
954 */
955static void
956dhd_prot_h2d_sync_init(dhd_pub_t *dhd)
957{
958 dhd_prot_t *prot = dhd->prot;
959 prot->h2dring_rxp_subn.seqnum = H2D_EPOCH_INIT_VAL;
960 prot->h2dring_rxp_subn.current_phase = 0;
961
962 prot->h2dring_ctrl_subn.seqnum = H2D_EPOCH_INIT_VAL;
963 prot->h2dring_ctrl_subn.current_phase = 0;
964}
965
966/* +----------------- End of PCIE DHD H2D DMA SYNC ------------------------+ */
967
968
969/*
970 * +---------------------------------------------------------------------------+
971 * PCIE DMA-able buffer. Sets up a dhd_dma_buf_t object, which includes the
972 * virtual and physical address, the buffer lenght and the DMA handler.
973 * A secdma handler is also included in the dhd_dma_buf object.
974 * +---------------------------------------------------------------------------+
975 */
976
977static INLINE void
978dhd_base_addr_htolpa(sh_addr_t *base_addr, dmaaddr_t pa)
979{
980 base_addr->low_addr = htol32(PHYSADDRLO(pa));
981 base_addr->high_addr = htol32(PHYSADDRHI(pa));
982}
983
984
985/**
986 * dhd_dma_buf_audit - Any audits on a DHD DMA Buffer.
987 */
988static int
989dhd_dma_buf_audit(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
990{
991 uint32 pa_lowaddr, end; /* dongle uses 32bit ptr arithmetic */
992 ASSERT(dma_buf);
993 pa_lowaddr = PHYSADDRLO(dma_buf->pa);
994 ASSERT(PHYSADDRLO(dma_buf->pa) || PHYSADDRHI(dma_buf->pa));
995 ASSERT(ISALIGNED(pa_lowaddr, DMA_ALIGN_LEN));
996 ASSERT(dma_buf->len != 0);
997
998 /* test 32bit offset arithmetic over dma buffer for loss of carry-over */
999 end = (pa_lowaddr + dma_buf->len); /* end address */
1000
1001 if ((end & 0xFFFFFFFF) < (pa_lowaddr & 0xFFFFFFFF)) { /* exclude carryover */
1002 DHD_ERROR(("%s: dma_buf %x len %d spans dongle 32bit ptr arithmetic\n",
1003 __FUNCTION__, pa_lowaddr, dma_buf->len));
1004 return BCME_ERROR;
1005 }
1006
1007 return BCME_OK;
1008}
1009
1010/**
1011 * dhd_dma_buf_alloc - Allocate a cache coherent DMA-able buffer.
1012 * returns BCME_OK=0 on success
1013 * returns non-zero negative error value on failure.
1014 */
1015static int
1016dhd_dma_buf_alloc(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf, uint32 buf_len)
1017{
1018 uint32 dma_pad = 0;
1019 osl_t *osh = dhd->osh;
1020 uint16 dma_align = DMA_ALIGN_LEN;
1021
1022
1023 ASSERT(dma_buf != NULL);
1024 ASSERT(dma_buf->va == NULL);
1025 ASSERT(dma_buf->len == 0);
1026
1027 /* Pad the buffer length by one extra cacheline size.
1028 * Required for D2H direction.
1029 */
1030 dma_pad = (buf_len % DHD_DMA_PAD) ? DHD_DMA_PAD : 0;
1031 dma_buf->va = DMA_ALLOC_CONSISTENT(osh, buf_len + dma_pad,
1032 dma_align, &dma_buf->_alloced, &dma_buf->pa, &dma_buf->dmah);
1033
1034 if (dma_buf->va == NULL) {
1035 DHD_ERROR(("%s: buf_len %d, no memory available\n",
1036 __FUNCTION__, buf_len));
1037 return BCME_NOMEM;
1038 }
1039
1040 dma_buf->len = buf_len; /* not including padded len */
1041
1042 if (dhd_dma_buf_audit(dhd, dma_buf) != BCME_OK) { /* audit dma buf */
1043 dhd_dma_buf_free(dhd, dma_buf);
1044 return BCME_ERROR;
1045 }
1046
1047 dhd_dma_buf_reset(dhd, dma_buf); /* zero out and cache flush */
1048
1049 return BCME_OK;
1050}
1051
1052/**
1053 * dhd_dma_buf_reset - Reset a cache coherent DMA-able buffer.
1054 */
1055static void
1056dhd_dma_buf_reset(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
1057{
1058 if ((dma_buf == NULL) || (dma_buf->va == NULL))
1059 return;
1060
1061 (void)dhd_dma_buf_audit(dhd, dma_buf);
1062
1063 /* Zero out the entire buffer and cache flush */
1064 memset((void*)dma_buf->va, 0, dma_buf->len);
1065 OSL_CACHE_FLUSH((void *)dma_buf->va, dma_buf->len);
1066}
1067
1068/**
1069 * dhd_dma_buf_free - Free a DMA-able buffer that was previously allocated using
1070 * dhd_dma_buf_alloc().
1071 */
1072static void
1073dhd_dma_buf_free(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf)
1074{
1075 osl_t *osh = dhd->osh;
1076
1077 ASSERT(dma_buf);
1078
1079 if (dma_buf->va == NULL)
1080 return; /* Allow for free invocation, when alloc failed */
1081
1082 /* DEBUG: dhd_dma_buf_reset(dhd, dma_buf) */
1083 (void)dhd_dma_buf_audit(dhd, dma_buf);
1084
1085 /* dma buffer may have been padded at allocation */
1086 DMA_FREE_CONSISTENT(osh, dma_buf->va, dma_buf->_alloced,
1087 dma_buf->pa, dma_buf->dmah);
1088
1089 memset(dma_buf, 0, sizeof(dhd_dma_buf_t));
1090}
1091
1092/**
1093 * dhd_dma_buf_init - Initialize a dhd_dma_buf with speicifed values.
1094 * Do not use dhd_dma_buf_init to zero out a dhd_dma_buf_t object. Use memset 0.
1095 */
1096void
1097dhd_dma_buf_init(dhd_pub_t *dhd, void *dhd_dma_buf,
1098 void *va, uint32 len, dmaaddr_t pa, void *dmah, void *secdma)
1099{
1100 dhd_dma_buf_t *dma_buf;
1101 ASSERT(dhd_dma_buf);
1102 dma_buf = (dhd_dma_buf_t *)dhd_dma_buf;
1103 dma_buf->va = va;
1104 dma_buf->len = len;
1105 dma_buf->pa = pa;
1106 dma_buf->dmah = dmah;
1107 dma_buf->secdma = secdma;
1108
1109 /* Audit user defined configuration */
1110 (void)dhd_dma_buf_audit(dhd, dma_buf);
1111}
1112
1113/* +------------------ End of PCIE DHD DMA BUF ADT ------------------------+ */
1114
1115/*
1116 * +---------------------------------------------------------------------------+
1117 * PktId Map: Provides a native packet pointer to unique 32bit PktId mapping.
1118 * Main purpose is to save memory on the dongle, has other purposes as well.
1119 * The packet id map, also includes storage for some packet parameters that
1120 * may be saved. A native packet pointer along with the parameters may be saved
1121 * and a unique 32bit pkt id will be returned. Later, the saved packet pointer
1122 * and the metadata may be retrieved using the previously allocated packet id.
1123 * +---------------------------------------------------------------------------+
1124 */
1125#define DHD_PCIE_PKTID
1126#define MAX_CTRL_PKTID (1024) /* Maximum number of pktids supported */
1127#define MAX_RX_PKTID (1024)
1128#define MAX_TX_PKTID (3072 * 2)
1129
1130/* On Router, the pktptr serves as a pktid. */
1131
1132
1133#if defined(PROP_TXSTATUS) && !defined(DHD_PCIE_PKTID)
1134#error "PKTIDMAP must be supported with PROP_TXSTATUS/WLFC"
1135#endif
1136
1137/* Enum for marking the buffer color based on usage */
1138typedef enum dhd_pkttype {
1139 PKTTYPE_DATA_TX = 0,
1140 PKTTYPE_DATA_RX,
1141 PKTTYPE_IOCTL_RX,
1142 PKTTYPE_EVENT_RX,
1143 PKTTYPE_INFO_RX,
1144 /* dhd_prot_pkt_free no check, if pktid reserved and no space avail case */
1145 PKTTYPE_NO_CHECK,
1146 PKTTYPE_TSBUF_RX
1147} dhd_pkttype_t;
1148
1149#define DHD_PKTID_INVALID (0U)
1150#define DHD_IOCTL_REQ_PKTID (0xFFFE)
1151#define DHD_FAKE_PKTID (0xFACE)
1152#define DHD_H2D_DBGRING_REQ_PKTID 0xFFFD
1153#define DHD_D2H_DBGRING_REQ_PKTID 0xFFFC
1154#define DHD_H2D_HOSTTS_REQ_PKTID 0xFFFB
1155
1156#define IS_FLOWRING(ring) \
1157 ((strncmp(ring->name, "h2dflr", sizeof("h2dflr"))) == (0))
1158
1159typedef void * dhd_pktid_map_handle_t; /* opaque handle to a pktid map */
1160
1161/* Construct a packet id mapping table, returning an opaque map handle */
1162static dhd_pktid_map_handle_t *dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items);
1163
1164/* Destroy a packet id mapping table, freeing all packets active in the table */
1165static void dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map);
1166
1167#define DHD_NATIVE_TO_PKTID_INIT(dhd, items) dhd_pktid_map_init((dhd), (items))
1168#define DHD_NATIVE_TO_PKTID_RESET(dhd, map) dhd_pktid_map_reset((dhd), (map))
1169#define DHD_NATIVE_TO_PKTID_FINI(dhd, map) dhd_pktid_map_fini((dhd), (map))
1170#define DHD_NATIVE_TO_PKTID_FINI_IOCTL(osh, map) dhd_pktid_map_fini_ioctl((osh), (map))
1171
1172#ifdef MACOSX_DHD
1173#undef DHD_PCIE_PKTID
1174#define DHD_PCIE_PKTID 1
1175#endif /* MACOSX_DHD */
1176
1177#if defined(DHD_PCIE_PKTID)
1178#if defined(MACOSX_DHD) || defined(DHD_EFI)
1179#define IOCTLRESP_USE_CONSTMEM
1180static void free_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf);
1181static int alloc_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf);
1182#endif
1183
1184/* Determine number of pktids that are available */
1185static INLINE uint32 dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *handle);
1186
1187/* Allocate a unique pktid against which a pkt and some metadata is saved */
1188static INLINE uint32 dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
1189 void *pkt, dhd_pkttype_t pkttype);
1190static INLINE void dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
1191 void *pkt, uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dma,
1192 void *dmah, void *secdma, dhd_pkttype_t pkttype);
1193static uint32 dhd_pktid_map_alloc(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map,
1194 void *pkt, dmaaddr_t pa, uint32 len, uint8 dma,
1195 void *dmah, void *secdma, dhd_pkttype_t pkttype);
1196
1197/* Return an allocated pktid, retrieving previously saved pkt and metadata */
1198static void *dhd_pktid_map_free(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map,
1199 uint32 id, dmaaddr_t *pa, uint32 *len, void **dmah,
1200 void **secdma, dhd_pkttype_t pkttype, bool rsv_locker);
1201
1202/*
1203 * DHD_PKTID_AUDIT_ENABLED: Audit of PktIds in DHD for duplicate alloc and frees
1204 *
1205 * DHD_PKTID_AUDIT_MAP: Audit the LIFO or FIFO PktIdMap allocator
1206 * DHD_PKTID_AUDIT_RING: Audit the pktid during producer/consumer ring operation
1207 *
1208 * CAUTION: When DHD_PKTID_AUDIT_ENABLED is defined,
1209 * either DHD_PKTID_AUDIT_MAP or DHD_PKTID_AUDIT_RING may be selected.
1210 */
1211#if defined(DHD_PKTID_AUDIT_ENABLED)
1212#define USE_DHD_PKTID_AUDIT_LOCK 1
1213/* Audit the pktidmap allocator */
1214/* #define DHD_PKTID_AUDIT_MAP */
1215
1216/* Audit the pktid during production/consumption of workitems */
1217#define DHD_PKTID_AUDIT_RING
1218
1219#if defined(DHD_PKTID_AUDIT_MAP) && defined(DHD_PKTID_AUDIT_RING)
1220#error "May only enabled audit of MAP or RING, at a time."
1221#endif /* DHD_PKTID_AUDIT_MAP && DHD_PKTID_AUDIT_RING */
1222
1223#define DHD_DUPLICATE_ALLOC 1
1224#define DHD_DUPLICATE_FREE 2
1225#define DHD_TEST_IS_ALLOC 3
1226#define DHD_TEST_IS_FREE 4
1227
1228#ifdef USE_DHD_PKTID_AUDIT_LOCK
1229#define DHD_PKTID_AUDIT_LOCK_INIT(osh) dhd_os_spin_lock_init(osh)
1230#define DHD_PKTID_AUDIT_LOCK_DEINIT(osh, lock) dhd_os_spin_lock_deinit(osh, lock)
1231#define DHD_PKTID_AUDIT_LOCK(lock) dhd_os_spin_lock(lock)
1232#define DHD_PKTID_AUDIT_UNLOCK(lock, flags) dhd_os_spin_unlock(lock, flags)
1233#else
1234#define DHD_PKTID_AUDIT_LOCK_INIT(osh) (void *)(1)
1235#define DHD_PKTID_AUDIT_LOCK_DEINIT(osh, lock) do { /* noop */ } while (0)
1236#define DHD_PKTID_AUDIT_LOCK(lock) 0
1237#define DHD_PKTID_AUDIT_UNLOCK(lock, flags) do { /* noop */ } while (0)
1238#endif /* !USE_DHD_PKTID_AUDIT_LOCK */
1239
1240#endif /* DHD_PKTID_AUDIT_ENABLED */
1241
1242/* #define USE_DHD_PKTID_LOCK 1 */
1243
1244#ifdef USE_DHD_PKTID_LOCK
1245#define DHD_PKTID_LOCK_INIT(osh) dhd_os_spin_lock_init(osh)
1246#define DHD_PKTID_LOCK_DEINIT(osh, lock) dhd_os_spin_lock_deinit(osh, lock)
1247#define DHD_PKTID_LOCK(lock) dhd_os_spin_lock(lock)
1248#define DHD_PKTID_UNLOCK(lock, flags) dhd_os_spin_unlock(lock, flags)
1249#else
1250#define DHD_PKTID_LOCK_INIT(osh) (void *)(1)
1251#define DHD_PKTID_LOCK_DEINIT(osh, lock) \
1252 do { \
1253 BCM_REFERENCE(osh); \
1254 BCM_REFERENCE(lock); \
1255 } while (0)
1256#define DHD_PKTID_LOCK(lock) 0
1257#define DHD_PKTID_UNLOCK(lock, flags) \
1258 do { \
1259 BCM_REFERENCE(lock); \
1260 BCM_REFERENCE(flags); \
1261 } while (0)
1262#endif /* !USE_DHD_PKTID_LOCK */
1263
1264typedef enum dhd_locker_state {
1265 LOCKER_IS_FREE,
1266 LOCKER_IS_BUSY,
1267 LOCKER_IS_RSVD
1268} dhd_locker_state_t;
1269
1270/* Packet metadata saved in packet id mapper */
1271
1272typedef struct dhd_pktid_item {
1273 dhd_locker_state_t state; /* tag a locker to be free, busy or reserved */
1274 uint8 dir; /* dma map direction (Tx=flush or Rx=invalidate) */
1275 dhd_pkttype_t pkttype; /* pktlists are maintained based on pkttype */
1276 uint16 len; /* length of mapped packet's buffer */
1277 void *pkt; /* opaque native pointer to a packet */
1278 dmaaddr_t pa; /* physical address of mapped packet's buffer */
1279 void *dmah; /* handle to OS specific DMA map */
1280 void *secdma;
1281} dhd_pktid_item_t;
1282
1283typedef uint32 dhd_pktid_key_t;
1284
1285typedef struct dhd_pktid_map {
1286 uint32 items; /* total items in map */
1287 uint32 avail; /* total available items */
1288 int failures; /* lockers unavailable count */
1289#if defined(DHD_PKTID_AUDIT_ENABLED)
1290 void *pktid_audit_lock;
1291 struct bcm_mwbmap *pktid_audit; /* multi word bitmap based audit */
1292#endif /* DHD_PKTID_AUDIT_ENABLED */
1293 dhd_pktid_key_t *keys; /* map_items +1 unique pkt ids */
1294 dhd_pktid_item_t lockers[0]; /* metadata storage */
1295} dhd_pktid_map_t;
1296
1297/*
1298 * PktId (Locker) #0 is never allocated and is considered invalid.
1299 *
1300 * On request for a pktid, a value DHD_PKTID_INVALID must be treated as a
1301 * depleted pktid pool and must not be used by the caller.
1302 *
1303 * Likewise, a caller must never free a pktid of value DHD_PKTID_INVALID.
1304 */
1305
1306#define DHD_PKTID_FREE_LOCKER (FALSE)
1307#define DHD_PKTID_RSV_LOCKER (TRUE)
1308
1309#define DHD_PKTID_ITEM_SZ (sizeof(dhd_pktid_item_t))
1310#define DHD_PKIDMAP_ITEMS(items) (items)
1311#define DHD_PKTID_MAP_SZ(items) (sizeof(dhd_pktid_map_t) + \
1312 (DHD_PKTID_ITEM_SZ * ((items) + 1)))
1313#define DHD_PKTIDMAP_KEYS_SZ(items) (sizeof(dhd_pktid_key_t) * ((items) + 1))
1314
1315#define DHD_NATIVE_TO_PKTID_RESET_IOCTL(dhd, map) dhd_pktid_map_reset_ioctl((dhd), (map))
1316
1317/* Convert a packet to a pktid, and save pkt pointer in busy locker */
1318#define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt, pkttype) \
1319 dhd_pktid_map_reserve((dhd), (map), (pkt), (pkttype))
1320/* Reuse a previously reserved locker to save packet params */
1321#define DHD_NATIVE_TO_PKTID_SAVE(dhd, map, pkt, nkey, pa, len, dir, dmah, secdma, pkttype) \
1322 dhd_pktid_map_save((dhd), (map), (void *)(pkt), (nkey), (pa), (uint32)(len), \
1323 (uint8)(dir), (void *)(dmah), (void *)(secdma), \
1324 (dhd_pkttype_t)(pkttype))
1325/* Convert a packet to a pktid, and save packet params in locker */
1326#define DHD_NATIVE_TO_PKTID(dhd, map, pkt, pa, len, dir, dmah, secdma, pkttype) \
1327 dhd_pktid_map_alloc((dhd), (map), (void *)(pkt), (pa), (uint32)(len), \
1328 (uint8)(dir), (void *)(dmah), (void *)(secdma), \
1329 (dhd_pkttype_t)(pkttype))
1330
1331/* Convert pktid to a packet, and free the locker */
1332#define DHD_PKTID_TO_NATIVE(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \
1333 dhd_pktid_map_free((dhd), (map), (uint32)(pktid), \
1334 (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
1335 (void **)&(secdma), (dhd_pkttype_t)(pkttype), DHD_PKTID_FREE_LOCKER)
1336
1337/* Convert the pktid to a packet, empty locker, but keep it reserved */
1338#define DHD_PKTID_TO_NATIVE_RSV(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \
1339 dhd_pktid_map_free((dhd), (map), (uint32)(pktid), \
1340 (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
1341 (void **)&(secdma), (dhd_pkttype_t)(pkttype), DHD_PKTID_RSV_LOCKER)
1342
1343#define DHD_PKTID_AVAIL(map) dhd_pktid_map_avail_cnt(map)
1344
1345#if defined(DHD_PKTID_AUDIT_ENABLED)
1346/**
1347* dhd_pktid_audit - Use the mwbmap to audit validity of a pktid.
1348*/
1349static int
1350dhd_pktid_audit(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map, uint32 pktid,
1351 const int test_for, const char *errmsg)
1352{
1353#define DHD_PKT_AUDIT_STR "ERROR: %16s Host PktId Audit: "
1354 struct bcm_mwbmap *handle;
1355 uint32 flags;
1356 bool ignore_audit;
1357
1358 if (pktid_map == (dhd_pktid_map_t *)NULL) {
1359 DHD_ERROR((DHD_PKT_AUDIT_STR "Pkt id map NULL\n", errmsg));
1360 return BCME_OK;
1361 }
1362
1363 flags = DHD_PKTID_AUDIT_LOCK(pktid_map->pktid_audit_lock);
1364
1365 handle = pktid_map->pktid_audit;
1366 if (handle == (struct bcm_mwbmap *)NULL) {
1367 DHD_ERROR((DHD_PKT_AUDIT_STR "Handle NULL\n", errmsg));
1368 DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags);
1369 return BCME_OK;
1370 }
1371
1372 /* Exclude special pktids from audit */
1373 ignore_audit = (pktid == DHD_IOCTL_REQ_PKTID) | (pktid == DHD_FAKE_PKTID);
1374 if (ignore_audit) {
1375 DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags);
1376 return BCME_OK;
1377 }
1378
1379 if ((pktid == DHD_PKTID_INVALID) || (pktid > pktid_map->items)) {
1380 DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> invalid\n", errmsg, pktid));
1381 /* lock is released in "error" */
1382 goto error;
1383 }
1384
1385 /* Perform audit */
1386 switch (test_for) {
1387 case DHD_DUPLICATE_ALLOC:
1388 if (!bcm_mwbmap_isfree(handle, pktid)) {
1389 DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> alloc duplicate\n",
1390 errmsg, pktid));
1391 goto error;
1392 }
1393 bcm_mwbmap_force(handle, pktid);
1394 break;
1395
1396 case DHD_DUPLICATE_FREE:
1397 if (bcm_mwbmap_isfree(handle, pktid)) {
1398 DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> free duplicate\n",
1399 errmsg, pktid));
1400 goto error;
1401 }
1402 bcm_mwbmap_free(handle, pktid);
1403 break;
1404
1405 case DHD_TEST_IS_ALLOC:
1406 if (bcm_mwbmap_isfree(handle, pktid)) {
1407 DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> is not allocated\n",
1408 errmsg, pktid));
1409 goto error;
1410 }
1411 break;
1412
1413 case DHD_TEST_IS_FREE:
1414 if (!bcm_mwbmap_isfree(handle, pktid)) {
1415 DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> is not free",
1416 errmsg, pktid));
1417 goto error;
1418 }
1419 break;
1420
1421 default:
1422 goto error;
1423 }
1424
1425 DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags);
1426 return BCME_OK;
1427
1428error:
1429
1430 DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags);
1431 /* May insert any trap mechanism here ! */
1432 dhd_pktid_error_handler(dhd);
1433
1434 return BCME_ERROR;
1435}
1436
1437#define DHD_PKTID_AUDIT(dhdp, map, pktid, test_for) \
1438 dhd_pktid_audit((dhdp), (dhd_pktid_map_t *)(map), (pktid), (test_for), __FUNCTION__)
1439
1440static int
1441dhd_pktid_audit_ring_debug(dhd_pub_t *dhdp, dhd_pktid_map_t *map, uint32 pktid,
1442 const int test_for, void *msg, uint32 msg_len, const char * func)
1443{
1444 int ret = 0;
1445 ret = DHD_PKTID_AUDIT(dhdp, map, pktid, test_for);
1446 if (ret == BCME_ERROR) {
1447 prhex(func, (uchar *)msg, msg_len);
1448 }
1449 return ret;
1450}
1451#define DHD_PKTID_AUDIT_RING_DEBUG(dhdp, map, pktid, test_for, msg, msg_len) \
1452 dhd_pktid_audit_ring_debug((dhdp), (dhd_pktid_map_t *)(map), \
1453 (pktid), (test_for), msg, msg_len, __FUNCTION__)
1454
1455#endif /* DHD_PKTID_AUDIT_ENABLED */
1456
1457
1458/**
1459 * +---------------------------------------------------------------------------+
1460 * Packet to Packet Id mapper using a <numbered_key, locker> paradigm.
1461 *
1462 * dhd_pktid_map manages a set of unique Packet Ids range[1..MAX_xxx_PKTID].
1463 *
1464 * dhd_pktid_map_alloc() may be used to save some packet metadata, and a unique
1465 * packet id is returned. This unique packet id may be used to retrieve the
1466 * previously saved packet metadata, using dhd_pktid_map_free(). On invocation
1467 * of dhd_pktid_map_free(), the unique packet id is essentially freed. A
1468 * subsequent call to dhd_pktid_map_alloc() may reuse this packet id.
1469 *
1470 * Implementation Note:
1471 * Convert this into a <key,locker> abstraction and place into bcmutils !
1472 * Locker abstraction should treat contents as opaque storage, and a
1473 * callback should be registered to handle busy lockers on destructor.
1474 *
1475 * +---------------------------------------------------------------------------+
1476 */
1477
1478/** Allocate and initialize a mapper of num_items <numbered_key, locker> */
1479
1480static dhd_pktid_map_handle_t *
1481dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items)
1482{
1483 void* osh;
1484 uint32 nkey;
1485 dhd_pktid_map_t *map;
1486 uint32 dhd_pktid_map_sz;
1487 uint32 map_items;
1488 uint32 map_keys_sz;
1489 osh = dhd->osh;
1490
1491 dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(num_items);
1492
1493 map = (dhd_pktid_map_t *)VMALLOC(osh, dhd_pktid_map_sz);
1494 if (map == NULL) {
1495 DHD_ERROR(("%s:%d: MALLOC failed for size %d\n",
1496 __FUNCTION__, __LINE__, dhd_pktid_map_sz));
1497 return (dhd_pktid_map_handle_t *)NULL;
1498 }
1499
1500 /* Initialize the lock that protects this structure */
1501 map->items = num_items;
1502 map->avail = num_items;
1503
1504 map_items = DHD_PKIDMAP_ITEMS(map->items);
1505
1506 map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items);
1507 map->keys = (dhd_pktid_key_t *)MALLOC(osh, map_keys_sz);
1508 if (map->keys == NULL) {
1509 DHD_ERROR(("%s:%d: MALLOC failed for map->keys size %d\n",
1510 __FUNCTION__, __LINE__, map_keys_sz));
1511 goto error;
1512 }
1513
1514#if defined(DHD_PKTID_AUDIT_ENABLED)
1515 /* Incarnate a hierarchical multiword bitmap for auditing pktid allocator */
1516 map->pktid_audit = bcm_mwbmap_init(osh, map_items + 1);
1517 if (map->pktid_audit == (struct bcm_mwbmap *)NULL) {
1518 DHD_ERROR(("%s:%d: pktid_audit init failed\r\n", __FUNCTION__, __LINE__));
1519 goto error;
1520 } else {
1521 DHD_ERROR(("%s:%d: pktid_audit init succeeded %d\n",
1522 __FUNCTION__, __LINE__, map_items + 1));
1523 }
1524 map->pktid_audit_lock = DHD_PKTID_AUDIT_LOCK_INIT(osh);
1525#endif /* DHD_PKTID_AUDIT_ENABLED */
1526
1527 for (nkey = 1; nkey <= map_items; nkey++) { /* locker #0 is reserved */
1528 map->keys[nkey] = nkey; /* populate with unique keys */
1529 map->lockers[nkey].state = LOCKER_IS_FREE;
1530 map->lockers[nkey].pkt = NULL; /* bzero: redundant */
1531 map->lockers[nkey].len = 0;
1532 }
1533
1534 /* Reserve pktid #0, i.e. DHD_PKTID_INVALID to be inuse */
1535 map->lockers[DHD_PKTID_INVALID].state = LOCKER_IS_BUSY; /* tag locker #0 as inuse */
1536 map->lockers[DHD_PKTID_INVALID].pkt = NULL; /* bzero: redundant */
1537 map->lockers[DHD_PKTID_INVALID].len = 0;
1538
1539#if defined(DHD_PKTID_AUDIT_ENABLED)
1540 /* do not use dhd_pktid_audit() here, use bcm_mwbmap_force directly */
1541 bcm_mwbmap_force(map->pktid_audit, DHD_PKTID_INVALID);
1542#endif /* DHD_PKTID_AUDIT_ENABLED */
1543
1544 return (dhd_pktid_map_handle_t *)map; /* opaque handle */
1545
1546error:
1547 if (map) {
1548#if defined(DHD_PKTID_AUDIT_ENABLED)
1549 if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
1550 bcm_mwbmap_fini(osh, map->pktid_audit); /* Destruct pktid_audit */
1551 map->pktid_audit = (struct bcm_mwbmap *)NULL;
1552 if (map->pktid_audit_lock)
1553 DHD_PKTID_AUDIT_LOCK_DEINIT(osh, map->pktid_audit_lock);
1554 }
1555#endif /* DHD_PKTID_AUDIT_ENABLED */
1556 if (map->keys) {
1557 MFREE(osh, map->keys, map_keys_sz);
1558 }
1559 VMFREE(osh, map, dhd_pktid_map_sz);
1560 }
1561 return (dhd_pktid_map_handle_t *)NULL;
1562}
1563
1564/**
1565 * Retrieve all allocated keys and free all <numbered_key, locker>.
1566 * Freeing implies: unmapping the buffers and freeing the native packet
1567 * This could have been a callback registered with the pktid mapper.
1568 */
1569static void
1570dhd_pktid_map_reset(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
1571{
1572 void *osh;
1573 uint32 nkey;
1574 dhd_pktid_map_t *map;
1575 dhd_pktid_item_t *locker;
1576 uint32 map_items;
1577 uint32 flags;
1578 bool data_tx = FALSE;
1579
1580 map = (dhd_pktid_map_t *)handle;
1581 DHD_GENERAL_LOCK(dhd, flags);
1582 osh = dhd->osh;
1583
1584 map_items = DHD_PKIDMAP_ITEMS(map->items);
1585 /* skip reserved KEY #0, and start from 1 */
1586
1587 for (nkey = 1; nkey <= map_items; nkey++) {
1588 if (map->lockers[nkey].state == LOCKER_IS_BUSY) {
1589 locker = &map->lockers[nkey];
1590 locker->state = LOCKER_IS_FREE;
1591 data_tx = (locker->pkttype == PKTTYPE_DATA_TX);
1592 if (data_tx) {
1593 dhd->prot->active_tx_count--;
1594 }
1595
1596#ifdef DHD_PKTID_AUDIT_RING
1597 DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* duplicate frees */
1598#endif /* DHD_PKTID_AUDIT_RING */
1599
1600 {
1601 if (SECURE_DMA_ENAB(dhd->osh))
1602 SECURE_DMA_UNMAP(osh, locker->pa,
1603 locker->len, locker->dir, 0,
1604 locker->dmah, locker->secdma, 0);
1605 else
1606 DMA_UNMAP(osh, locker->pa, locker->len,
1607 locker->dir, 0, locker->dmah);
1608 }
1609 dhd_prot_packet_free(dhd, (ulong*)locker->pkt,
1610 locker->pkttype, data_tx);
1611 }
1612 else {
1613#ifdef DHD_PKTID_AUDIT_RING
1614 DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
1615#endif /* DHD_PKTID_AUDIT_RING */
1616 }
1617 map->keys[nkey] = nkey; /* populate with unique keys */
1618 }
1619
1620 map->avail = map_items;
1621 memset(&map->lockers[1], 0, sizeof(dhd_pktid_item_t) * map_items);
1622 DHD_GENERAL_UNLOCK(dhd, flags);
1623}
1624
1625#ifdef IOCTLRESP_USE_CONSTMEM
1626/** Called in detach scenario. Releasing IOCTL buffers. */
1627static void
1628dhd_pktid_map_reset_ioctl(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
1629{
1630 uint32 nkey;
1631 dhd_pktid_map_t *map;
1632 dhd_pktid_item_t *locker;
1633 uint32 map_items;
1634 uint32 flags;
1635
1636 map = (dhd_pktid_map_t *)handle;
1637 DHD_GENERAL_LOCK(dhd, flags);
1638
1639 map_items = DHD_PKIDMAP_ITEMS(map->items);
1640 /* skip reserved KEY #0, and start from 1 */
1641 for (nkey = 1; nkey <= map_items; nkey++) {
1642 if (map->lockers[nkey].state == LOCKER_IS_BUSY) {
1643 dhd_dma_buf_t retbuf;
1644
1645#ifdef DHD_PKTID_AUDIT_RING
1646 DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* duplicate frees */
1647#endif /* DHD_PKTID_AUDIT_RING */
1648
1649 locker = &map->lockers[nkey];
1650 retbuf.va = locker->pkt;
1651 retbuf.len = locker->len;
1652 retbuf.pa = locker->pa;
1653 retbuf.dmah = locker->dmah;
1654 retbuf.secdma = locker->secdma;
1655
1656 /* This could be a callback registered with dhd_pktid_map */
1657 DHD_GENERAL_UNLOCK(dhd, flags);
1658 free_ioctl_return_buffer(dhd, &retbuf);
1659 DHD_GENERAL_LOCK(dhd, flags);
1660 }
1661 else {
1662#ifdef DHD_PKTID_AUDIT_RING
1663 DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
1664#endif /* DHD_PKTID_AUDIT_RING */
1665 }
1666 map->keys[nkey] = nkey; /* populate with unique keys */
1667 }
1668
1669 map->avail = map_items;
1670 memset(&map->lockers[1], 0, sizeof(dhd_pktid_item_t) * map_items);
1671 DHD_GENERAL_UNLOCK(dhd, flags);
1672}
1673#endif /* IOCTLRESP_USE_CONSTMEM */
1674
1675
1676/**
1677 * Free the pktid map.
1678 */
1679static void
1680dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
1681{
1682 dhd_pktid_map_t *map;
1683 uint32 dhd_pktid_map_sz;
1684 uint32 map_keys_sz;
1685
1686 /* Free any pending packets */
1687 dhd_pktid_map_reset(dhd, handle);
1688
1689 map = (dhd_pktid_map_t *)handle;
1690 dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items);
1691 map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items);
1692
1693#if defined(DHD_PKTID_AUDIT_ENABLED)
1694 if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
1695 bcm_mwbmap_fini(dhd->osh, map->pktid_audit); /* Destruct pktid_audit */
1696 map->pktid_audit = (struct bcm_mwbmap *)NULL;
1697 if (map->pktid_audit_lock) {
1698 DHD_PKTID_AUDIT_LOCK_DEINIT(dhd->osh, map->pktid_audit_lock);
1699 }
1700 }
1701#endif /* DHD_PKTID_AUDIT_ENABLED */
1702 MFREE(dhd->osh, map->keys, map_keys_sz);
1703 VMFREE(dhd->osh, handle, dhd_pktid_map_sz);
1704}
1705#ifdef IOCTLRESP_USE_CONSTMEM
1706static void
1707dhd_pktid_map_fini_ioctl(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle)
1708{
1709 dhd_pktid_map_t *map;
1710 uint32 dhd_pktid_map_sz;
1711 uint32 map_keys_sz;
1712
1713 /* Free any pending packets */
1714 dhd_pktid_map_reset_ioctl(dhd, handle);
1715
1716 map = (dhd_pktid_map_t *)handle;
1717 dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items);
1718 map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items);
1719
1720#if defined(DHD_PKTID_AUDIT_ENABLED)
1721 if (map->pktid_audit != (struct bcm_mwbmap *)NULL) {
1722 bcm_mwbmap_fini(dhd->osh, map->pktid_audit); /* Destruct pktid_audit */
1723 map->pktid_audit = (struct bcm_mwbmap *)NULL;
1724 if (map->pktid_audit_lock) {
1725 DHD_PKTID_AUDIT_LOCK_DEINIT(dhd->osh, map->pktid_audit_lock);
1726 }
1727 }
1728#endif /* DHD_PKTID_AUDIT_ENABLED */
1729
1730 MFREE(dhd->osh, map->keys, map_keys_sz);
1731 VMFREE(dhd->osh, handle, dhd_pktid_map_sz);
1732}
1733#endif /* IOCTLRESP_USE_CONSTMEM */
1734
1735/** Get the pktid free count */
1736static INLINE uint32 BCMFASTPATH
1737dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *handle)
1738{
1739 dhd_pktid_map_t *map;
1740 uint32 avail;
1741
1742 ASSERT(handle != NULL);
1743 map = (dhd_pktid_map_t *)handle;
1744
1745 avail = map->avail;
1746
1747 return avail;
1748}
1749
1750/**
1751 * dhd_pktid_map_reserve - reserve a unique numbered key. Reserved locker is not
1752 * yet populated. Invoke the pktid save api to populate the packet parameters
1753 * into the locker. This function is not reentrant, and is the caller's
1754 * responsibility. Caller must treat a returned value DHD_PKTID_INVALID as
1755 * a failure case, implying a depleted pool of pktids.
1756 */
1757static INLINE uint32
1758dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle,
1759 void *pkt, dhd_pkttype_t pkttype)
1760{
1761 uint32 nkey;
1762 dhd_pktid_map_t *map;
1763 dhd_pktid_item_t *locker;
1764
1765 ASSERT(handle != NULL);
1766 map = (dhd_pktid_map_t *)handle;
1767
1768 if ((int)(map->avail) <= 0) { /* no more pktids to allocate */
1769 map->failures++;
1770 DHD_INFO(("%s:%d: failed, no free keys\n", __FUNCTION__, __LINE__));
1771 return DHD_PKTID_INVALID; /* failed alloc request */
1772 }
1773
1774 ASSERT(map->avail <= map->items);
1775 nkey = map->keys[map->avail]; /* fetch a free locker, pop stack */
1776
1777 if ((map->avail > map->items) || (nkey > map->items)) {
1778 map->failures++;
1779 DHD_ERROR(("%s:%d: failed to allocate a new pktid,"
1780 " map->avail<%u>, nkey<%u>, pkttype<%u>\n",
1781 __FUNCTION__, __LINE__, map->avail, nkey,
1782 pkttype));
1783 return DHD_PKTID_INVALID; /* failed alloc request */
1784 }
1785
1786 locker = &map->lockers[nkey]; /* save packet metadata in locker */
1787 map->avail--;
1788 locker->pkt = pkt; /* pkt is saved, other params not yet saved. */
1789 locker->len = 0;
1790 locker->state = LOCKER_IS_BUSY; /* reserve this locker */
1791
1792 ASSERT(nkey != DHD_PKTID_INVALID);
1793 return nkey; /* return locker's numbered key */
1794}
1795
1796/*
1797 * dhd_pktid_map_save - Save a packet's parameters into a locker
1798 * corresponding to a previously reserved unique numbered key.
1799 */
1800static INLINE void
1801dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt,
1802 uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma,
1803 dhd_pkttype_t pkttype)
1804{
1805 dhd_pktid_map_t *map;
1806 dhd_pktid_item_t *locker;
1807
1808 ASSERT(handle != NULL);
1809 map = (dhd_pktid_map_t *)handle;
1810
1811 if ((nkey == DHD_PKTID_INVALID) || (nkey > DHD_PKIDMAP_ITEMS(map->items))) {
1812 DHD_ERROR(("%s:%d: Error! saving invalid pktid<%u> pkttype<%u>\n",
1813 __FUNCTION__, __LINE__, nkey, pkttype));
1814#ifdef DHD_FW_COREDUMP
1815 if (dhd->memdump_enabled) {
1816 /* collect core dump */
1817 dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
1818 dhd_bus_mem_dump(dhd);
1819 }
1820#else
1821 ASSERT(0);
1822#endif /* DHD_FW_COREDUMP */
1823 return;
1824 }
1825
1826 locker = &map->lockers[nkey];
1827
1828 ASSERT(((locker->state == LOCKER_IS_BUSY) && (locker->pkt == pkt)) ||
1829 ((locker->state == LOCKER_IS_RSVD) && (locker->pkt == NULL)));
1830
1831 /* store contents in locker */
1832 locker->dir = dir;
1833 locker->pa = pa;
1834 locker->len = (uint16)len; /* 16bit len */
1835 locker->dmah = dmah; /* 16bit len */
1836 locker->secdma = secdma;
1837 locker->pkttype = pkttype;
1838 locker->pkt = pkt;
1839 locker->state = LOCKER_IS_BUSY; /* make this locker busy */
1840}
1841
1842/**
1843 * dhd_pktid_map_alloc - Allocate a unique numbered key and save the packet
1844 * contents into the corresponding locker. Return the numbered key.
1845 */
1846static uint32 BCMFASTPATH
1847dhd_pktid_map_alloc(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt,
1848 dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma,
1849 dhd_pkttype_t pkttype)
1850{
1851 uint32 nkey;
1852
1853 nkey = dhd_pktid_map_reserve(dhd, handle, pkt, pkttype);
1854 if (nkey != DHD_PKTID_INVALID) {
1855 dhd_pktid_map_save(dhd, handle, pkt, nkey, pa,
1856 len, dir, dmah, secdma, pkttype);
1857 }
1858
1859 return nkey;
1860}
1861
1862/**
1863 * dhd_pktid_map_free - Given a numbered key, return the locker contents.
1864 * dhd_pktid_map_free() is not reentrant, and is the caller's responsibility.
1865 * Caller may not free a pktid value DHD_PKTID_INVALID or an arbitrary pktid
1866 * value. Only a previously allocated pktid may be freed.
1867 */
1868static void * BCMFASTPATH
1869dhd_pktid_map_free(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, uint32 nkey,
1870 dmaaddr_t *pa, uint32 *len, void **dmah, void **secdma, dhd_pkttype_t pkttype,
1871 bool rsv_locker)
1872{
1873 dhd_pktid_map_t *map;
1874 dhd_pktid_item_t *locker;
1875 void * pkt;
1876 unsigned long long locker_addr;
1877
1878 ASSERT(handle != NULL);
1879
1880 map = (dhd_pktid_map_t *)handle;
1881
1882 if ((nkey == DHD_PKTID_INVALID) || (nkey > DHD_PKIDMAP_ITEMS(map->items))) {
1883 DHD_ERROR(("%s:%d: Error! Try to free invalid pktid<%u>, pkttype<%d>\n",
1884 __FUNCTION__, __LINE__, nkey, pkttype));
1885#ifdef DHD_FW_COREDUMP
1886 if (dhd->memdump_enabled) {
1887 /* collect core dump */
1888 dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
1889 dhd_bus_mem_dump(dhd);
1890 }
1891#else
1892 ASSERT(0);
1893#endif /* DHD_FW_COREDUMP */
1894 return NULL;
1895 }
1896
1897 locker = &map->lockers[nkey];
1898
1899#if defined(DHD_PKTID_AUDIT_MAP)
1900 DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* Audit duplicate FREE */
1901#endif /* DHD_PKTID_AUDIT_MAP */
1902
1903 /* Debug check for cloned numbered key */
1904 if (locker->state == LOCKER_IS_FREE) {
1905 DHD_ERROR(("%s:%d: Error! freeing already freed invalid pktid<%u>\n",
1906 __FUNCTION__, __LINE__, nkey));
1907#ifdef DHD_FW_COREDUMP
1908 if (dhd->memdump_enabled) {
1909 /* collect core dump */
1910 dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
1911 dhd_bus_mem_dump(dhd);
1912 }
1913#else
1914 ASSERT(0);
1915#endif /* DHD_FW_COREDUMP */
1916 return NULL;
1917 }
1918
1919 /* Check for the colour of the buffer i.e The buffer posted for TX,
1920 * should be freed for TX completion. Similarly the buffer posted for
1921 * IOCTL should be freed for IOCT completion etc.
1922 */
1923 if ((pkttype != PKTTYPE_NO_CHECK) && (locker->pkttype != pkttype)) {
1924
1925 DHD_ERROR(("%s:%d: Error! Invalid Buffer Free for pktid<%u> \n",
1926 __FUNCTION__, __LINE__, nkey));
1927#ifdef BCMDMA64OSL
1928 PHYSADDRTOULONG(locker->pa, locker_addr);
1929#else
1930 locker_addr = PHYSADDRLO(locker->pa);
1931#endif /* BCMDMA64OSL */
1932 DHD_ERROR(("%s:%d: locker->state <%d>, locker->pkttype <%d>,"
1933 "pkttype <%d> locker->pa <0x%llx> \n",
1934 __FUNCTION__, __LINE__, locker->state, locker->pkttype,
1935 pkttype, locker_addr));
1936#ifdef DHD_FW_COREDUMP
1937 if (dhd->memdump_enabled) {
1938 /* collect core dump */
1939 dhd->memdump_type = DUMP_TYPE_PKTID_INVALID;
1940 dhd_bus_mem_dump(dhd);
1941 }
1942#else
1943 ASSERT(0);
1944#endif /* DHD_FW_COREDUMP */
1945 return NULL;
1946 }
1947
1948 if (rsv_locker == DHD_PKTID_FREE_LOCKER) {
1949 map->avail++;
1950 map->keys[map->avail] = nkey; /* make this numbered key available */
1951 locker->state = LOCKER_IS_FREE; /* open and free Locker */
1952 } else {
1953 /* pktid will be reused, but the locker does not have a valid pkt */
1954 locker->state = LOCKER_IS_RSVD;
1955 }
1956
1957#if defined(DHD_PKTID_AUDIT_MAP)
1958 DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE);
1959#endif /* DHD_PKTID_AUDIT_MAP */
1960
1961 *pa = locker->pa; /* return contents of locker */
1962 *len = (uint32)locker->len;
1963 *dmah = locker->dmah;
1964 *secdma = locker->secdma;
1965
1966 pkt = locker->pkt;
1967 locker->pkt = NULL; /* Clear pkt */
1968 locker->len = 0;
1969
1970 return pkt;
1971}
1972
1973#else /* ! DHD_PCIE_PKTID */
1974
1975
1976typedef struct pktlist {
1977 PKT_LIST *tx_pkt_list; /* list for tx packets */
1978 PKT_LIST *rx_pkt_list; /* list for rx packets */
1979 PKT_LIST *ctrl_pkt_list; /* list for ioctl/event buf post */
1980} pktlists_t;
1981
1982/*
1983 * Given that each workitem only uses a 32bit pktid, only 32bit hosts may avail
1984 * of a one to one mapping 32bit pktptr and a 32bit pktid.
1985 *
1986 * - When PKTIDMAP is not used, DHD_NATIVE_TO_PKTID variants will never fail.
1987 * - Neither DHD_NATIVE_TO_PKTID nor DHD_PKTID_TO_NATIVE need to be protected by
1988 * a lock.
1989 * - Hence DHD_PKTID_INVALID is not defined when DHD_PCIE_PKTID is undefined.
1990 */
1991#define DHD_PKTID32(pktptr32) ((uint32)(pktptr32))
1992#define DHD_PKTPTR32(pktid32) ((void *)(pktid32))
1993
1994
1995static INLINE uint32 dhd_native_to_pktid(dhd_pktid_map_handle_t *map, void *pktptr32,
1996 dmaaddr_t pa, uint32 dma_len, void *dmah, void *secdma,
1997 dhd_pkttype_t pkttype);
1998static INLINE void * dhd_pktid_to_native(dhd_pktid_map_handle_t *map, uint32 pktid32,
1999 dmaaddr_t *pa, uint32 *dma_len, void **dmah, void **secdma,
2000 dhd_pkttype_t pkttype);
2001
2002static dhd_pktid_map_handle_t *
2003dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items)
2004{
2005 osl_t *osh = dhd->osh;
2006 pktlists_t *handle = NULL;
2007
2008 if ((handle = (pktlists_t *) MALLOCZ(osh, sizeof(pktlists_t))) == NULL) {
2009 DHD_ERROR(("%s:%d: MALLOC failed for lists allocation, size=%d\n",
2010 __FUNCTION__, __LINE__, sizeof(pktlists_t)));
2011 goto error_done;
2012 }
2013
2014 if ((handle->tx_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) {
2015 DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n",
2016 __FUNCTION__, __LINE__, sizeof(PKT_LIST)));
2017 goto error;
2018 }
2019
2020 if ((handle->rx_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) {
2021 DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n",
2022 __FUNCTION__, __LINE__, sizeof(PKT_LIST)));
2023 goto error;
2024 }
2025
2026 if ((handle->ctrl_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) {
2027 DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n",
2028 __FUNCTION__, __LINE__, sizeof(PKT_LIST)));
2029 goto error;
2030 }
2031
2032 PKTLIST_INIT(handle->tx_pkt_list);
2033 PKTLIST_INIT(handle->rx_pkt_list);
2034 PKTLIST_INIT(handle->ctrl_pkt_list);
2035
2036 return (dhd_pktid_map_handle_t *) handle;
2037
2038error:
2039 if (handle->ctrl_pkt_list) {
2040 MFREE(osh, handle->ctrl_pkt_list, sizeof(PKT_LIST));
2041 }
2042
2043 if (handle->rx_pkt_list) {
2044 MFREE(osh, handle->rx_pkt_list, sizeof(PKT_LIST));
2045 }
2046
2047 if (handle->tx_pkt_list) {
2048 MFREE(osh, handle->tx_pkt_list, sizeof(PKT_LIST));
2049 }
2050
2051 if (handle) {
2052 MFREE(osh, handle, sizeof(pktlists_t));
2053 }
2054
2055error_done:
2056 return (dhd_pktid_map_handle_t *)NULL;
2057}
2058
2059static void
2060dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map)
2061{
2062 osl_t *osh = dhd->osh;
2063 pktlists_t *handle = (pktlists_t *) map;
2064
2065 ASSERT(handle != NULL);
2066 if (handle == (pktlists_t *)NULL)
2067 return;
2068
2069 if (handle->ctrl_pkt_list) {
2070 PKTLIST_FINI(handle->ctrl_pkt_list);
2071 MFREE(osh, handle->ctrl_pkt_list, sizeof(PKT_LIST));
2072 }
2073
2074 if (handle->rx_pkt_list) {
2075 PKTLIST_FINI(handle->rx_pkt_list);
2076 MFREE(osh, handle->rx_pkt_list, sizeof(PKT_LIST));
2077 }
2078
2079 if (handle->tx_pkt_list) {
2080 PKTLIST_FINI(handle->tx_pkt_list);
2081 MFREE(osh, handle->tx_pkt_list, sizeof(PKT_LIST));
2082 }
2083
2084 if (handle) {
2085 MFREE(osh, handle, sizeof(pktlists_t));
2086 }
2087}
2088
2089/** Save dma parameters into the packet's pkttag and convert a pktptr to pktid */
2090static INLINE uint32
2091dhd_native_to_pktid(dhd_pktid_map_handle_t *map, void *pktptr32,
2092 dmaaddr_t pa, uint32 dma_len, void *dmah, void *secdma,
2093 dhd_pkttype_t pkttype)
2094{
2095 pktlists_t *handle = (pktlists_t *) map;
2096 ASSERT(pktptr32 != NULL);
2097 DHD_PKT_SET_DMA_LEN(pktptr32, dma_len);
2098 DHD_PKT_SET_DMAH(pktptr32, dmah);
2099 DHD_PKT_SET_PA(pktptr32, pa);
2100 DHD_PKT_SET_SECDMA(pktptr32, secdma);
2101
2102 if (pkttype == PKTTYPE_DATA_TX) {
2103 PKTLIST_ENQ(handle->tx_pkt_list, pktptr32);
2104 } else if (pkttype == PKTTYPE_DATA_RX) {
2105 PKTLIST_ENQ(handle->rx_pkt_list, pktptr32);
2106 } else {
2107 PKTLIST_ENQ(handle->ctrl_pkt_list, pktptr32);
2108 }
2109
2110 return DHD_PKTID32(pktptr32);
2111}
2112
2113/** Convert a pktid to pktptr and retrieve saved dma parameters from packet */
2114static INLINE void *
2115dhd_pktid_to_native(dhd_pktid_map_handle_t *map, uint32 pktid32,
2116 dmaaddr_t *pa, uint32 *dma_len, void **dmah, void **secdma,
2117 dhd_pkttype_t pkttype)
2118{
2119 pktlists_t *handle = (pktlists_t *) map;
2120 void *pktptr32;
2121
2122 ASSERT(pktid32 != 0U);
2123 pktptr32 = DHD_PKTPTR32(pktid32);
2124 *dma_len = DHD_PKT_GET_DMA_LEN(pktptr32);
2125 *dmah = DHD_PKT_GET_DMAH(pktptr32);
2126 *pa = DHD_PKT_GET_PA(pktptr32);
2127 *secdma = DHD_PKT_GET_SECDMA(pktptr32);
2128
2129 if (pkttype == PKTTYPE_DATA_TX) {
2130 PKTLIST_UNLINK(handle->tx_pkt_list, pktptr32);
2131 } else if (pkttype == PKTTYPE_DATA_RX) {
2132 PKTLIST_UNLINK(handle->rx_pkt_list, pktptr32);
2133 } else {
2134 PKTLIST_UNLINK(handle->ctrl_pkt_list, pktptr32);
2135 }
2136
2137 return pktptr32;
2138}
2139
2140#define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt, pkttype) DHD_PKTID32(pkt)
2141
2142#define DHD_NATIVE_TO_PKTID_SAVE(dhd, map, pkt, nkey, pa, len, dma_dir, dmah, secdma, pkttype) \
2143 ({ BCM_REFERENCE(dhd); BCM_REFERENCE(nkey); BCM_REFERENCE(dma_dir); \
2144 dhd_native_to_pktid((dhd_pktid_map_handle_t *) map, (pkt), (pa), (len), \
2145 (dmah), (secdma), (dhd_pkttype_t)(pkttype)); \
2146 })
2147
2148#define DHD_NATIVE_TO_PKTID(dhd, map, pkt, pa, len, dma_dir, dmah, secdma, pkttype) \
2149 ({ BCM_REFERENCE(dhd); BCM_REFERENCE(dma_dir); \
2150 dhd_native_to_pktid((dhd_pktid_map_handle_t *) map, (pkt), (pa), (len), \
2151 (dmah), (secdma), (dhd_pkttype_t)(pkttype)); \
2152 })
2153
2154#define DHD_PKTID_TO_NATIVE(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \
2155 ({ BCM_REFERENCE(dhd); BCM_REFERENCE(pkttype); \
2156 dhd_pktid_to_native((dhd_pktid_map_handle_t *) map, (uint32)(pktid), \
2157 (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \
2158 (void **)&secdma, (dhd_pkttype_t)(pkttype)); \
2159 })
2160
2161#define DHD_PKTID_AVAIL(map) (~0)
2162
2163#endif /* ! DHD_PCIE_PKTID */
2164
2165/* +------------------ End of PCIE DHD PKTID MAPPER -----------------------+ */
2166
2167
2168/**
2169 * The PCIE FD protocol layer is constructed in two phases:
2170 * Phase 1. dhd_prot_attach()
2171 * Phase 2. dhd_prot_init()
2172 *
2173 * dhd_prot_attach() - Allocates a dhd_prot_t object and resets all its fields.
2174 * All Common rings are allose attached (msgbuf_ring_t objects are allocated
2175 * with DMA-able buffers).
2176 * All dhd_dma_buf_t objects are also allocated here.
2177 *
2178 * As dhd_prot_attach is invoked prior to the pcie_shared object is read, any
2179 * initialization of objects that requires information advertized by the dongle
2180 * may not be performed here.
2181 * E.g. the number of TxPost flowrings is not know at this point, neither do
2182 * we know shich form of D2H DMA sync mechanism is advertized by the dongle, or
2183 * whether the dongle supports DMA-ing of WR/RD indices for the H2D and/or D2H
2184 * rings (common + flow).
2185 *
2186 * dhd_prot_init() is invoked after the bus layer has fetched the information
2187 * advertized by the dongle in the pcie_shared_t.
2188 */
2189int
2190dhd_prot_attach(dhd_pub_t *dhd)
2191{
2192 osl_t *osh = dhd->osh;
2193 dhd_prot_t *prot;
2194
2195 /* Allocate prot structure */
2196 if (!(prot = (dhd_prot_t *)DHD_OS_PREALLOC(dhd, DHD_PREALLOC_PROT,
2197 sizeof(dhd_prot_t)))) {
2198 DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__));
2199 goto fail;
2200 }
2201 memset(prot, 0, sizeof(*prot));
2202
2203 prot->osh = osh;
2204 dhd->prot = prot;
2205
2206 /* DMAing ring completes supported? FALSE by default */
2207 dhd->dma_d2h_ring_upd_support = FALSE;
2208 dhd->dma_h2d_ring_upd_support = FALSE;
2209 dhd->dma_ring_upd_overwrite = FALSE;
2210
2211 dhd->idma_inited = 0;
2212 dhd->ifrm_inited = 0;
2213
2214 /* Common Ring Allocations */
2215
2216 /* Ring 0: H2D Control Submission */
2217 if (dhd_prot_ring_attach(dhd, &prot->h2dring_ctrl_subn, "h2dctrl",
2218 H2DRING_CTRL_SUB_MAX_ITEM, H2DRING_CTRL_SUB_ITEMSIZE,
2219 BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT) != BCME_OK) {
2220 DHD_ERROR(("%s: dhd_prot_ring_attach H2D Ctrl Submission failed\n",
2221 __FUNCTION__));
2222 goto fail;
2223 }
2224
2225 /* Ring 1: H2D Receive Buffer Post */
2226 if (dhd_prot_ring_attach(dhd, &prot->h2dring_rxp_subn, "h2drxp",
2227 H2DRING_RXPOST_MAX_ITEM, H2DRING_RXPOST_ITEMSIZE,
2228 BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT) != BCME_OK) {
2229 DHD_ERROR(("%s: dhd_prot_ring_attach H2D RxPost failed\n",
2230 __FUNCTION__));
2231 goto fail;
2232 }
2233
2234 /* Ring 2: D2H Control Completion */
2235 if (dhd_prot_ring_attach(dhd, &prot->d2hring_ctrl_cpln, "d2hctrl",
2236 D2HRING_CTRL_CMPLT_MAX_ITEM, D2HRING_CTRL_CMPLT_ITEMSIZE,
2237 BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE) != BCME_OK) {
2238 DHD_ERROR(("%s: dhd_prot_ring_attach D2H Ctrl Completion failed\n",
2239 __FUNCTION__));
2240 goto fail;
2241 }
2242
2243 /* Ring 3: D2H Transmit Complete */
2244 if (dhd_prot_ring_attach(dhd, &prot->d2hring_tx_cpln, "d2htxcpl",
2245 D2HRING_TXCMPLT_MAX_ITEM, D2HRING_TXCMPLT_ITEMSIZE,
2246 BCMPCIE_D2H_MSGRING_TX_COMPLETE) != BCME_OK) {
2247 DHD_ERROR(("%s: dhd_prot_ring_attach D2H Tx Completion failed\n",
2248 __FUNCTION__));
2249 goto fail;
2250
2251 }
2252
2253 /* Ring 4: D2H Receive Complete */
2254 if (dhd_prot_ring_attach(dhd, &prot->d2hring_rx_cpln, "d2hrxcpl",
2255 D2HRING_RXCMPLT_MAX_ITEM, D2HRING_RXCMPLT_ITEMSIZE,
2256 BCMPCIE_D2H_MSGRING_RX_COMPLETE) != BCME_OK) {
2257 DHD_ERROR(("%s: dhd_prot_ring_attach D2H Rx Completion failed\n",
2258 __FUNCTION__));
2259 goto fail;
2260
2261 }
2262
2263 /*
2264 * Max number of flowrings is not yet known. msgbuf_ring_t with DMA-able
2265 * buffers for flowrings will be instantiated, in dhd_prot_init() .
2266 * See dhd_prot_flowrings_pool_attach()
2267 */
2268 /* ioctl response buffer */
2269 if (dhd_dma_buf_alloc(dhd, &prot->retbuf, IOCT_RETBUF_SIZE)) {
2270 goto fail;
2271 }
2272
2273 /* IOCTL request buffer */
2274 if (dhd_dma_buf_alloc(dhd, &prot->ioctbuf, IOCT_RETBUF_SIZE)) {
2275 goto fail;
2276 }
2277
2278 /* Host TS request buffer one buffer for now */
2279 if (dhd_dma_buf_alloc(dhd, &prot->hostts_req_buf, CTRLSUB_HOSTTS_MEESAGE_SIZE)) {
2280 goto fail;
2281 }
2282 prot->hostts_req_buf_inuse = FALSE;
2283
2284 /* Scratch buffer for dma rx offset */
2285#ifdef BCM_HOST_BUF
2286 if (dhd_dma_buf_alloc(dhd, &prot->d2h_dma_scratch_buf,
2287 ROUNDUP(DMA_D2H_SCRATCH_BUF_LEN, 16) + DMA_HOST_BUFFER_LEN)) {
2288#else
2289 if (dhd_dma_buf_alloc(dhd, &prot->d2h_dma_scratch_buf, DMA_D2H_SCRATCH_BUF_LEN)) {
2290
2291#endif /* BCM_HOST_BUF */
2292 goto fail;
2293 }
2294
2295 /* scratch buffer bus throughput measurement */
2296 if (dhd_dma_buf_alloc(dhd, &prot->host_bus_throughput_buf, DHD_BUS_TPUT_BUF_LEN)) {
2297 goto fail;
2298 }
2299
2300#ifdef DHD_RX_CHAINING
2301 dhd_rxchain_reset(&prot->rxchain);
2302#endif
2303
2304 prot->rx_lock = dhd_os_spin_lock_init(dhd->osh);
2305
2306 prot->pktid_ctrl_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_CTRL_PKTID);
2307 if (prot->pktid_ctrl_map == NULL) {
2308 goto fail;
2309 }
2310
2311 prot->pktid_rx_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_RX_PKTID);
2312 if (prot->pktid_rx_map == NULL)
2313 goto fail;
2314
2315 prot->pktid_tx_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_TX_PKTID);
2316 if (prot->pktid_rx_map == NULL)
2317 goto fail;
2318
2319#ifdef IOCTLRESP_USE_CONSTMEM
2320 prot->pktid_map_handle_ioctl = DHD_NATIVE_TO_PKTID_INIT(dhd,
2321 DHD_FLOWRING_MAX_IOCTLRESPBUF_POST);
2322 if (prot->pktid_map_handle_ioctl == NULL) {
2323 goto fail;
2324 }
2325#endif /* IOCTLRESP_USE_CONSTMEM */
2326
2327 /* Initialize the work queues to be used by the Load Balancing logic */
2328#if defined(DHD_LB_TXC)
2329 {
2330 void *buffer;
2331 buffer = MALLOC(dhd->osh, sizeof(void*) * DHD_LB_WORKQ_SZ);
2332 bcm_workq_init(&prot->tx_compl_prod, &prot->tx_compl_cons,
2333 buffer, DHD_LB_WORKQ_SZ);
2334 prot->tx_compl_prod_sync = 0;
2335 DHD_INFO(("%s: created tx_compl_workq <%p,%d>\n",
2336 __FUNCTION__, buffer, DHD_LB_WORKQ_SZ));
2337 }
2338#endif /* DHD_LB_TXC */
2339
2340#if defined(DHD_LB_RXC)
2341 {
2342 void *buffer;
2343 buffer = MALLOC(dhd->osh, sizeof(void*) * DHD_LB_WORKQ_SZ);
2344 bcm_workq_init(&prot->rx_compl_prod, &prot->rx_compl_cons,
2345 buffer, DHD_LB_WORKQ_SZ);
2346 prot->rx_compl_prod_sync = 0;
2347 DHD_INFO(("%s: created rx_compl_workq <%p,%d>\n",
2348 __FUNCTION__, buffer, DHD_LB_WORKQ_SZ));
2349 }
2350#endif /* DHD_LB_RXC */
2351 /* Initialize trap buffer */
2352 if (dhd_dma_buf_alloc(dhd, &dhd->prot->fw_trap_buf, BCMPCIE_EXT_TRAP_DATA_MAXLEN)) {
2353 DHD_ERROR(("%s: dhd_init_trap_buffer falied\n", __FUNCTION__));
2354 goto fail;
2355 }
2356
2357 return BCME_OK;
2358
2359fail:
2360
2361#ifndef CONFIG_DHD_USE_STATIC_BUF
2362 if (prot != NULL) {
2363 dhd_prot_detach(dhd);
2364 }
2365#endif /* CONFIG_DHD_USE_STATIC_BUF */
2366
2367 return BCME_NOMEM;
2368} /* dhd_prot_attach */
2369
2370void
2371dhd_set_host_cap(dhd_pub_t *dhd)
2372{
2373 uint32 data = 0;
2374 dhd_prot_t *prot = dhd->prot;
2375
2376 if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6) {
2377 if (dhd->h2d_phase_supported) {
2378
2379 data |= HOSTCAP_H2D_VALID_PHASE;
2380
2381 if (dhd->force_dongletrap_on_bad_h2d_phase) {
2382 data |= HOSTCAP_H2D_ENABLE_TRAP_ON_BADPHASE;
2383 }
2384 }
2385 if (prot->host_ipc_version > prot->device_ipc_version) {
2386 prot->active_ipc_version = prot->device_ipc_version;
2387 } else {
2388 prot->active_ipc_version = prot->host_ipc_version;
2389 }
2390
2391 data |= prot->active_ipc_version;
2392
2393 if (dhdpcie_bus_get_pcie_hostready_supported(dhd->bus)) {
2394
2395 DHD_INFO(("Advertise Hostready Capability\n"));
2396
2397 data |= HOSTCAP_H2D_ENABLE_HOSTRDY;
2398 }
2399#ifdef PCIE_INB_DW
2400 if (dhdpcie_bus_get_pcie_inband_dw_supported(dhd->bus)) {
2401 DHD_INFO(("Advertise Inband-DW Capability\n"));
2402 data |= HOSTCAP_DS_INBAND_DW;
2403 data |= HOSTCAP_DS_NO_OOB_DW;
2404 dhdpcie_bus_enab_pcie_dw(dhd->bus, DEVICE_WAKE_INB);
2405 } else
2406#endif /* PCIE_INB_DW */
2407#ifdef PCIE_OOB
2408 if (dhdpcie_bus_get_pcie_oob_dw_supported(dhd->bus)) {
2409 dhdpcie_bus_enab_pcie_dw(dhd->bus, DEVICE_WAKE_OOB);
2410 } else
2411#endif /* PCIE_OOB */
2412 {
2413 /* Disable DS altogether */
2414 data |= HOSTCAP_DS_NO_OOB_DW;
2415 dhdpcie_bus_enab_pcie_dw(dhd->bus, DEVICE_WAKE_NONE);
2416 }
2417
2418 if (dhdpcie_bus_get_pcie_idma_supported(dhd->bus)) {
2419
2420 DHD_ERROR(("IDMA inited\n"));
2421 data |= HOSTCAP_H2D_IDMA;
2422 dhd->idma_inited = TRUE;
2423 }
2424
2425 if (dhdpcie_bus_get_pcie_ifrm_supported(dhd->bus)) {
2426 DHD_ERROR(("IFRM Inited\n"));
2427 data |= HOSTCAP_H2D_IFRM;
2428 dhd->ifrm_inited = TRUE;
2429 dhd->dma_h2d_ring_upd_support = FALSE;
2430 dhd_prot_dma_indx_free(dhd);
2431 }
2432
2433 /* Indicate support for TX status metadata */
2434 data |= HOSTCAP_TXSTATUS_METADATA;
2435
2436 /* Indicate support for extended trap data */
2437 data |= HOSTCAP_EXTENDED_TRAP_DATA;
2438
2439 DHD_INFO(("%s:Active Ver:%d, Host Ver:%d, FW Ver:%d\n",
2440 __FUNCTION__,
2441 prot->active_ipc_version, prot->host_ipc_version,
2442 prot->device_ipc_version));
2443
2444 dhd_bus_cmn_writeshared(dhd->bus, &data, sizeof(uint32), HOST_API_VERSION, 0);
2445 dhd_bus_cmn_writeshared(dhd->bus, &prot->fw_trap_buf.pa,
2446 sizeof(prot->fw_trap_buf.pa), DNGL_TO_HOST_TRAP_ADDR, 0);
2447 }
2448#ifdef HOFFLOAD_MODULES
2449 dhd_bus_cmn_writeshared(dhd->bus, &dhd->hmem.data_addr,
2450 sizeof(dhd->hmem.data_addr), WRT_HOST_MODULE_ADDR, 0);
2451#endif
2452
2453#ifdef DHD_TIMESYNC
2454 dhd_timesync_notify_ipc_rev(dhd->ts, prot->active_ipc_version);
2455#endif /* DHD_TIMESYNC */
2456}
2457
2458/**
2459 * dhd_prot_init - second stage of dhd_prot_attach. Now that the dongle has
2460 * completed it's initialization of the pcie_shared structure, we may now fetch
2461 * the dongle advertized features and adjust the protocol layer accordingly.
2462 *
2463 * dhd_prot_init() may be invoked again after a dhd_prot_reset().
2464 */
2465int
2466dhd_prot_init(dhd_pub_t *dhd)
2467{
2468 sh_addr_t base_addr;
2469 dhd_prot_t *prot = dhd->prot;
2470 int ret = 0;
2471
2472 /**
2473 * A user defined value can be assigned to global variable h2d_max_txpost via
2474 * 1. DHD IOVAR h2d_max_txpost, before firmware download
2475 * 2. module parameter h2d_max_txpost
2476 * prot->h2d_max_txpost is assigned with H2DRING_TXPOST_MAX_ITEM,
2477 * if user has not defined any buffers by one of the above methods.
2478 */
2479 prot->h2d_max_txpost = (uint16)h2d_max_txpost;
2480
2481 DHD_ERROR(("%s:%d: h2d_max_txpost = %d\n", __FUNCTION__, __LINE__, prot->h2d_max_txpost));
2482
2483 /* Read max rx packets supported by dongle */
2484 dhd_bus_cmn_readshared(dhd->bus, &prot->max_rxbufpost, MAX_HOST_RXBUFS, 0);
2485 if (prot->max_rxbufpost == 0) {
2486 /* This would happen if the dongle firmware is not */
2487 /* using the latest shared structure template */
2488 prot->max_rxbufpost = DEFAULT_RX_BUFFERS_TO_POST;
2489 }
2490 DHD_INFO(("%s:%d: MAX_RXBUFPOST = %d\n", __FUNCTION__, __LINE__, prot->max_rxbufpost));
2491
2492 /* Initialize. bzero() would blow away the dma pointers. */
2493 prot->max_eventbufpost = DHD_FLOWRING_MAX_EVENTBUF_POST;
2494 prot->max_ioctlrespbufpost = DHD_FLOWRING_MAX_IOCTLRESPBUF_POST;
2495 prot->max_infobufpost = DHD_H2D_INFORING_MAX_BUF_POST;
2496 prot->max_tsbufpost = DHD_MAX_TSBUF_POST;
2497
2498 prot->cur_ioctlresp_bufs_posted = 0;
2499 prot->active_tx_count = 0;
2500 prot->data_seq_no = 0;
2501 prot->ioctl_seq_no = 0;
2502 prot->rxbufpost = 0;
2503 prot->cur_event_bufs_posted = 0;
2504 prot->ioctl_state = 0;
2505 prot->curr_ioctl_cmd = 0;
2506 prot->cur_ts_bufs_posted = 0;
2507 prot->infobufpost = 0;
2508
2509 prot->dmaxfer.srcmem.va = NULL;
2510 prot->dmaxfer.dstmem.va = NULL;
2511 prot->dmaxfer.in_progress = FALSE;
2512
2513 prot->metadata_dbg = FALSE;
2514 prot->rx_metadata_offset = 0;
2515 prot->tx_metadata_offset = 0;
2516 prot->txp_threshold = TXP_FLUSH_MAX_ITEMS_FLUSH_CNT;
2517
2518 /* To catch any rollover issues fast, starting with higher ioctl_trans_id */
2519 prot->ioctl_trans_id = MAXBITVAL(NBITS(prot->ioctl_trans_id)) - BUFFER_BEFORE_ROLLOVER;
2520 prot->ioctl_state = 0;
2521 prot->ioctl_status = 0;
2522 prot->ioctl_resplen = 0;
2523 prot->ioctl_received = IOCTL_WAIT;
2524
2525 /* Register the interrupt function upfront */
2526 /* remove corerev checks in data path */
2527 prot->mb_ring_fn = dhd_bus_get_mbintr_fn(dhd->bus);
2528
2529 prot->mb_2_ring_fn = dhd_bus_get_mbintr_2_fn(dhd->bus);
2530
2531 /* Initialize Common MsgBuf Rings */
2532
2533 prot->device_ipc_version = dhd->bus->api.fw_rev;
2534 prot->host_ipc_version = PCIE_SHARED_VERSION;
2535
2536 /* Init the host API version */
2537 dhd_set_host_cap(dhd);
2538
2539 dhd_prot_ring_init(dhd, &prot->h2dring_ctrl_subn);
2540 dhd_prot_ring_init(dhd, &prot->h2dring_rxp_subn);
2541 dhd_prot_ring_init(dhd, &prot->d2hring_ctrl_cpln);
2542
2543 /* Make it compatibile with pre-rev7 Firmware */
2544 if (prot->active_ipc_version < PCIE_SHARED_VERSION_7) {
2545 prot->d2hring_tx_cpln.item_len =
2546 D2HRING_TXCMPLT_ITEMSIZE_PREREV7;
2547 prot->d2hring_rx_cpln.item_len =
2548 D2HRING_RXCMPLT_ITEMSIZE_PREREV7;
2549 }
2550 dhd_prot_ring_init(dhd, &prot->d2hring_tx_cpln);
2551 dhd_prot_ring_init(dhd, &prot->d2hring_rx_cpln);
2552
2553 dhd_prot_d2h_sync_init(dhd);
2554
2555 dhd_prot_h2d_sync_init(dhd);
2556
2557#ifdef PCIE_INB_DW
2558 /* Set the initial DS state */
2559 if (INBAND_DW_ENAB(dhd->bus)) {
2560 dhdpcie_bus_set_pcie_inband_dw_state(dhd->bus,
2561 DW_DEVICE_DS_ACTIVE);
2562 }
2563#endif /* PCIE_INB_DW */
2564
2565 /* init the scratch buffer */
2566 dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_scratch_buf.pa);
2567 dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
2568 D2H_DMA_SCRATCH_BUF, 0);
2569 dhd_bus_cmn_writeshared(dhd->bus, &prot->d2h_dma_scratch_buf.len,
2570 sizeof(prot->d2h_dma_scratch_buf.len), D2H_DMA_SCRATCH_BUF_LEN, 0);
2571
2572 /* If supported by the host, indicate the memory block
2573 * for completion writes / submission reads to shared space
2574 */
2575 if (dhd->dma_d2h_ring_upd_support) {
2576 dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_indx_wr_buf.pa);
2577 dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
2578 D2H_DMA_INDX_WR_BUF, 0);
2579 dhd_base_addr_htolpa(&base_addr, prot->h2d_dma_indx_rd_buf.pa);
2580 dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
2581 H2D_DMA_INDX_RD_BUF, 0);
2582 }
2583
2584 if (dhd->dma_h2d_ring_upd_support || IDMA_ENAB(dhd)) {
2585 dhd_base_addr_htolpa(&base_addr, prot->h2d_dma_indx_wr_buf.pa);
2586 dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
2587 H2D_DMA_INDX_WR_BUF, 0);
2588 dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_indx_rd_buf.pa);
2589 dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
2590 D2H_DMA_INDX_RD_BUF, 0);
2591
2592 }
2593
2594 /* Signal to the dongle that common ring init is complete */
2595 dhd_bus_hostready(dhd->bus);
2596
2597 /*
2598 * If the DMA-able buffers for flowring needs to come from a specific
2599 * contiguous memory region, then setup prot->flowrings_dma_buf here.
2600 * dhd_prot_flowrings_pool_attach() will carve out DMA-able buffers from
2601 * this contiguous memory region, for each of the flowrings.
2602 */
2603
2604 /* Pre-allocate pool of msgbuf_ring for flowrings */
2605 if (dhd_prot_flowrings_pool_attach(dhd) != BCME_OK) {
2606 return BCME_ERROR;
2607 }
2608
2609 /* If IFRM is enabled, wait for FW to setup the DMA channel */
2610 if (IFRM_ENAB(dhd)) {
2611 dhd_base_addr_htolpa(&base_addr, prot->h2d_ifrm_indx_wr_buf.pa);
2612 dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr),
2613 H2D_IFRM_INDX_WR_BUF, 0);
2614 }
2615
2616 /* See if info rings could be created */
2617 if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6) {
2618 if ((ret = dhd_prot_init_info_rings(dhd)) != BCME_OK) {
2619 /* For now log and proceed, further clean up action maybe necessary
2620 * when we have more clarity.
2621 */
2622 DHD_ERROR(("%s Info rings couldn't be created: Err Code%d",
2623 __FUNCTION__, ret));
2624 }
2625 }
2626
2627 /* Host should configure soft doorbells if needed ... here */
2628
2629 /* Post to dongle host configured soft doorbells */
2630 dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd);
2631
2632 /* Post buffers for packet reception and ioctl/event responses */
2633 dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */
2634 dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd);
2635 dhd_msgbuf_rxbuf_post_event_bufs(dhd);
2636
2637 return BCME_OK;
2638} /* dhd_prot_init */
2639
2640
2641/**
2642 * dhd_prot_detach - PCIE FD protocol layer destructor.
2643 * Unlink, frees allocated protocol memory (including dhd_prot)
2644 */
2645void dhd_prot_detach(dhd_pub_t *dhd)
2646{
2647 dhd_prot_t *prot = dhd->prot;
2648
2649 /* Stop the protocol module */
2650 if (prot) {
2651
2652 /* free up all DMA-able buffers allocated during prot attach/init */
2653
2654 dhd_dma_buf_free(dhd, &prot->d2h_dma_scratch_buf);
2655 dhd_dma_buf_free(dhd, &prot->retbuf);
2656 dhd_dma_buf_free(dhd, &prot->ioctbuf);
2657 dhd_dma_buf_free(dhd, &prot->host_bus_throughput_buf);
2658 dhd_dma_buf_free(dhd, &prot->hostts_req_buf);
2659 dhd_dma_buf_free(dhd, &prot->fw_trap_buf);
2660
2661 /* DMA-able buffers for DMAing H2D/D2H WR/RD indices */
2662 dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_wr_buf);
2663 dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_rd_buf);
2664 dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_wr_buf);
2665 dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_rd_buf);
2666
2667 dhd_dma_buf_free(dhd, &prot->h2d_ifrm_indx_wr_buf);
2668
2669 /* Common MsgBuf Rings */
2670 dhd_prot_ring_detach(dhd, &prot->h2dring_ctrl_subn);
2671 dhd_prot_ring_detach(dhd, &prot->h2dring_rxp_subn);
2672 dhd_prot_ring_detach(dhd, &prot->d2hring_ctrl_cpln);
2673 dhd_prot_ring_detach(dhd, &prot->d2hring_tx_cpln);
2674 dhd_prot_ring_detach(dhd, &prot->d2hring_rx_cpln);
2675
2676 /* Detach each DMA-able buffer and free the pool of msgbuf_ring_t */
2677 dhd_prot_flowrings_pool_detach(dhd);
2678
2679 /* detach info rings */
2680 dhd_prot_detach_info_rings(dhd);
2681
2682 /* if IOCTLRESP_USE_CONSTMEM is defined IOCTL PKTs use pktid_map_handle_ioctl
2683 * handler and PKT memory is allocated using alloc_ioctl_return_buffer(), Otherwise
2684 * they will be part of pktid_ctrl_map handler and PKT memory is allocated using
2685 * PKTGET_STATIC (if DHD_USE_STATIC_CTRLBUF is defined) OR PKGET.
2686 * Similarly for freeing PKT buffers DHD_NATIVE_TO_PKTID_FINI will be used
2687 * which calls PKTFREE_STATIC (if DHD_USE_STATIC_CTRLBUF is defined) OR PKFREE.
2688 * Else if IOCTLRESP_USE_CONSTMEM is defined IOCTL PKTs will be freed using
2689 * DHD_NATIVE_TO_PKTID_FINI_IOCTL which calls free_ioctl_return_buffer.
2690 */
2691 DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_ctrl_map);
2692 DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_rx_map);
2693 DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_tx_map);
2694#ifdef IOCTLRESP_USE_CONSTMEM
2695 DHD_NATIVE_TO_PKTID_FINI_IOCTL(dhd, prot->pktid_map_handle_ioctl);
2696#endif
2697
2698 dhd_os_spin_lock_deinit(dhd->osh, prot->rx_lock);
2699
2700#ifndef CONFIG_DHD_USE_STATIC_BUF
2701 MFREE(dhd->osh, dhd->prot, sizeof(dhd_prot_t));
2702#endif /* CONFIG_DHD_USE_STATIC_BUF */
2703
2704#if defined(DHD_LB_TXC)
2705 if (prot->tx_compl_prod.buffer)
2706 MFREE(dhd->osh, prot->tx_compl_prod.buffer,
2707 sizeof(void*) * DHD_LB_WORKQ_SZ);
2708#endif /* DHD_LB_TXC */
2709#if defined(DHD_LB_RXC)
2710 if (prot->rx_compl_prod.buffer)
2711 MFREE(dhd->osh, prot->rx_compl_prod.buffer,
2712 sizeof(void*) * DHD_LB_WORKQ_SZ);
2713#endif /* DHD_LB_RXC */
2714
2715 dhd->prot = NULL;
2716 }
2717} /* dhd_prot_detach */
2718
2719
2720/**
2721 * dhd_prot_reset - Reset the protocol layer without freeing any objects.
2722 * This may be invoked to soft reboot the dongle, without having to
2723 * detach and attach the entire protocol layer.
2724 *
2725 * After dhd_prot_reset(), dhd_prot_init() may be invoked
2726 * without going througha dhd_prot_attach() phase.
2727 */
2728void
2729dhd_prot_reset(dhd_pub_t *dhd)
2730{
2731 struct dhd_prot *prot = dhd->prot;
2732
2733 DHD_TRACE(("%s\n", __FUNCTION__));
2734
2735 if (prot == NULL) {
2736 return;
2737 }
2738
2739 dhd_prot_flowrings_pool_reset(dhd);
2740
2741 /* Reset Common MsgBuf Rings */
2742 dhd_prot_ring_reset(dhd, &prot->h2dring_ctrl_subn);
2743 dhd_prot_ring_reset(dhd, &prot->h2dring_rxp_subn);
2744 dhd_prot_ring_reset(dhd, &prot->d2hring_ctrl_cpln);
2745 dhd_prot_ring_reset(dhd, &prot->d2hring_tx_cpln);
2746 dhd_prot_ring_reset(dhd, &prot->d2hring_rx_cpln);
2747
2748 /* Reset info rings */
2749 if (prot->h2dring_info_subn) {
2750 dhd_prot_ring_reset(dhd, prot->h2dring_info_subn);
2751 }
2752
2753 if (prot->d2hring_info_cpln) {
2754 dhd_prot_ring_reset(dhd, prot->d2hring_info_cpln);
2755 }
2756
2757 /* Reset all DMA-able buffers allocated during prot attach */
2758 dhd_dma_buf_reset(dhd, &prot->d2h_dma_scratch_buf);
2759 dhd_dma_buf_reset(dhd, &prot->retbuf);
2760 dhd_dma_buf_reset(dhd, &prot->ioctbuf);
2761 dhd_dma_buf_reset(dhd, &prot->host_bus_throughput_buf);
2762 dhd_dma_buf_reset(dhd, &prot->hostts_req_buf);
2763 dhd_dma_buf_reset(dhd, &prot->fw_trap_buf);
2764
2765 dhd_dma_buf_reset(dhd, &prot->h2d_ifrm_indx_wr_buf);
2766
2767 /* Reset all DMA-able buffers for DMAing H2D/D2H WR/RD indices */
2768 dhd_dma_buf_reset(dhd, &prot->h2d_dma_indx_rd_buf);
2769 dhd_dma_buf_reset(dhd, &prot->h2d_dma_indx_wr_buf);
2770 dhd_dma_buf_reset(dhd, &prot->d2h_dma_indx_rd_buf);
2771 dhd_dma_buf_reset(dhd, &prot->d2h_dma_indx_wr_buf);
2772
2773
2774 prot->rx_metadata_offset = 0;
2775 prot->tx_metadata_offset = 0;
2776
2777 prot->rxbufpost = 0;
2778 prot->cur_event_bufs_posted = 0;
2779 prot->cur_ioctlresp_bufs_posted = 0;
2780
2781 prot->active_tx_count = 0;
2782 prot->data_seq_no = 0;
2783 prot->ioctl_seq_no = 0;
2784 prot->ioctl_state = 0;
2785 prot->curr_ioctl_cmd = 0;
2786 prot->ioctl_received = IOCTL_WAIT;
2787 /* To catch any rollover issues fast, starting with higher ioctl_trans_id */
2788 prot->ioctl_trans_id = MAXBITVAL(NBITS(prot->ioctl_trans_id)) - BUFFER_BEFORE_ROLLOVER;
2789
2790 /* dhd_flow_rings_init is located at dhd_bus_start,
2791 * so when stopping bus, flowrings shall be deleted
2792 */
2793 if (dhd->flow_rings_inited) {
2794 dhd_flow_rings_deinit(dhd);
2795 }
2796
2797 /* Reset PKTID map */
2798 DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_ctrl_map);
2799 DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_rx_map);
2800 DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_tx_map);
2801#ifdef IOCTLRESP_USE_CONSTMEM
2802 DHD_NATIVE_TO_PKTID_RESET_IOCTL(dhd, prot->pktid_map_handle_ioctl);
2803#endif /* IOCTLRESP_USE_CONSTMEM */
4c205efb
DW
2804#ifdef DMAMAP_STATS
2805 dhd->dma_stats.txdata = dhd->dma_stats.txdata_sz = 0;
2806 dhd->dma_stats.rxdata = dhd->dma_stats.rxdata_sz = 0;
2807#ifndef IOCTLRESP_USE_CONSTMEM
2808 dhd->dma_stats.ioctl_rx = dhd->dma_stats.ioctl_rx_sz = 0;
2809#endif /* IOCTLRESP_USE_CONSTMEM */
2810 dhd->dma_stats.event_rx = dhd->dma_stats.event_rx_sz = 0;
2811 dhd->dma_stats.info_rx = dhd->dma_stats.info_rx_sz = 0;
2812 dhd->dma_stats.tsbuf_rx = dhd->dma_stats.tsbuf_rx_sz = 0;
2813#endif /* DMAMAP_STATS */
3c2a0909
S
2814} /* dhd_prot_reset */
2815
2816#if defined(DHD_LB_RXP)
2817#define DHD_LB_DISPATCH_RX_PROCESS(dhdp) dhd_lb_dispatch_rx_process(dhdp)
2818#else /* !DHD_LB_RXP */
2819#define DHD_LB_DISPATCH_RX_PROCESS(dhdp) do { /* noop */ } while (0)
2820#endif /* !DHD_LB_RXP */
2821
2822#if defined(DHD_LB_RXC)
2823#define DHD_LB_DISPATCH_RX_COMPL(dhdp) dhd_lb_dispatch_rx_compl(dhdp)
2824#else /* !DHD_LB_RXC */
2825#define DHD_LB_DISPATCH_RX_COMPL(dhdp) do { /* noop */ } while (0)
2826#endif /* !DHD_LB_RXC */
2827
2828#if defined(DHD_LB_TXC)
2829#define DHD_LB_DISPATCH_TX_COMPL(dhdp) dhd_lb_dispatch_tx_compl(dhdp)
2830#else /* !DHD_LB_TXC */
2831#define DHD_LB_DISPATCH_TX_COMPL(dhdp) do { /* noop */ } while (0)
2832#endif /* !DHD_LB_TXC */
2833
2834
2835#if defined(DHD_LB)
2836/* DHD load balancing: deferral of work to another online CPU */
2837/* DHD_LB_TXC DHD_LB_RXC DHD_LB_RXP dispatchers, in dhd_linux.c */
2838extern void dhd_lb_tx_compl_dispatch(dhd_pub_t *dhdp);
2839extern void dhd_lb_rx_compl_dispatch(dhd_pub_t *dhdp);
2840extern void dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp);
2841extern void dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx);
2842
2843#if defined(DHD_LB_RXP)
2844/**
2845 * dhd_lb_dispatch_rx_process - load balance by dispatch Rx processing work
2846 * to other CPU cores
2847 */
2848static INLINE void
2849dhd_lb_dispatch_rx_process(dhd_pub_t *dhdp)
2850{
2851 dhd_lb_rx_napi_dispatch(dhdp); /* dispatch rx_process_napi */
2852}
2853#endif /* DHD_LB_RXP */
2854
2855#if defined(DHD_LB_TXC)
2856/**
2857 * dhd_lb_dispatch_tx_compl - load balance by dispatch Tx complition work
2858 * to other CPU cores
2859 */
2860static INLINE void
2861dhd_lb_dispatch_tx_compl(dhd_pub_t *dhdp, uint16 ring_idx)
2862{
2863 bcm_workq_prod_sync(&dhdp->prot->tx_compl_prod); /* flush WR index */
2864 dhd_lb_tx_compl_dispatch(dhdp); /* dispatch tx_compl_tasklet */
2865}
2866
2867/**
2868 * DHD load balanced tx completion tasklet handler, that will perform the
2869 * freeing of packets on the selected CPU. Packet pointers are delivered to
2870 * this tasklet via the tx complete workq.
2871 */
2872void
2873dhd_lb_tx_compl_handler(unsigned long data)
2874{
2875 int elem_ix;
2876 void *pkt, **elem;
2877 dmaaddr_t pa;
2878 uint32 pa_len;
2879 dhd_pub_t *dhd = (dhd_pub_t *)data;
2880 dhd_prot_t *prot = dhd->prot;
2881 bcm_workq_t *workq = &prot->tx_compl_cons;
2882 uint32 count = 0;
2883
2884 int curr_cpu;
2885 curr_cpu = get_cpu();
2886 put_cpu();
2887
2888 DHD_LB_STATS_TXC_PERCPU_CNT_INCR(dhd);
2889
2890 while (1) {
2891 elem_ix = bcm_ring_cons(WORKQ_RING(workq), DHD_LB_WORKQ_SZ);
2892
2893 if (elem_ix == BCM_RING_EMPTY) {
2894 break;
2895 }
2896
2897 elem = WORKQ_ELEMENT(void *, workq, elem_ix);
2898 pkt = *elem;
2899
2900 DHD_INFO(("%s: tx_compl_cons pkt<%p>\n", __FUNCTION__, pkt));
2901
2902 OSL_PREFETCH(PKTTAG(pkt));
2903 OSL_PREFETCH(pkt);
2904
2905 pa = DHD_PKTTAG_PA((dhd_pkttag_fr_t *)PKTTAG(pkt));
2906 pa_len = DHD_PKTTAG_PA_LEN((dhd_pkttag_fr_t *)PKTTAG(pkt));
2907
2908 DMA_UNMAP(dhd->osh, pa, pa_len, DMA_RX, 0, 0);
2909#if defined(BCMPCIE)
2910 dhd_txcomplete(dhd, pkt, true);
2911#endif
2912
2913 PKTFREE(dhd->osh, pkt, TRUE);
2914 count++;
2915 }
2916
2917 /* smp_wmb(); */
2918 bcm_workq_cons_sync(workq);
2919 DHD_LB_STATS_UPDATE_TXC_HISTO(dhd, count);
2920}
2921#endif /* DHD_LB_TXC */
2922
2923#if defined(DHD_LB_RXC)
2924
2925/**
2926 * dhd_lb_dispatch_rx_compl - load balance by dispatch rx complition work
2927 * to other CPU cores
2928 */
2929static INLINE void
2930dhd_lb_dispatch_rx_compl(dhd_pub_t *dhdp)
2931{
2932 dhd_prot_t *prot = dhdp->prot;
2933 /* Schedule the takslet only if we have to */
2934 if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD)) {
2935 /* flush WR index */
2936 bcm_workq_prod_sync(&dhdp->prot->rx_compl_prod);
2937 dhd_lb_rx_compl_dispatch(dhdp); /* dispatch rx_compl_tasklet */
2938 }
2939}
2940
2941void
2942dhd_lb_rx_compl_handler(unsigned long data)
2943{
2944 dhd_pub_t *dhd = (dhd_pub_t *)data;
2945 bcm_workq_t *workq = &dhd->prot->rx_compl_cons;
2946
2947 DHD_LB_STATS_RXC_PERCPU_CNT_INCR(dhd);
2948
2949 dhd_msgbuf_rxbuf_post(dhd, TRUE); /* re-use pktids */
2950 bcm_workq_cons_sync(workq);
2951}
2952#endif /* DHD_LB_RXC */
2953#endif /* DHD_LB */
2954
2955void
2956dhd_prot_rx_dataoffset(dhd_pub_t *dhd, uint32 rx_offset)
2957{
2958 dhd_prot_t *prot = dhd->prot;
2959 prot->rx_dataoffset = rx_offset;
2960}
2961
2962static int
2963dhd_check_create_info_rings(dhd_pub_t *dhd)
2964{
2965 dhd_prot_t *prot = dhd->prot;
2966 int ret = BCME_ERROR;
2967 uint16 ringid = dhd->bus->max_tx_flowrings + BCMPCIE_COMMON_MSGRINGS;
2968
2969 if (prot->h2dring_info_subn && prot->d2hring_info_cpln) {
2970 return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */
2971 }
2972
2973 if (prot->h2dring_info_subn == NULL) {
2974 prot->h2dring_info_subn = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
2975
2976 if (prot->h2dring_info_subn == NULL) {
2977 DHD_ERROR(("%s: couldn't alloc memory for h2dring_info_subn\n",
2978 __FUNCTION__));
2979 return BCME_NOMEM;
2980 }
2981
2982 DHD_INFO(("%s: about to create debug submit ring\n", __FUNCTION__));
2983 ret = dhd_prot_ring_attach(dhd, prot->h2dring_info_subn, "h2dinfo",
2984 H2DRING_DYNAMIC_INFO_MAX_ITEM, H2DRING_INFO_BUFPOST_ITEMSIZE,
2985 ringid);
2986 if (ret != BCME_OK) {
2987 DHD_ERROR(("%s: couldn't alloc resources for dbg submit ring\n",
2988 __FUNCTION__));
2989 goto err;
2990 }
2991 }
2992
2993 if (prot->d2hring_info_cpln == NULL) {
2994 prot->d2hring_info_cpln = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t));
2995
2996 if (prot->d2hring_info_cpln == NULL) {
2997 DHD_ERROR(("%s: couldn't alloc memory for h2dring_info_subn\n",
2998 __FUNCTION__));
2999 return BCME_NOMEM;
3000 }
3001
3002 /* create the debug info completion ring next to debug info submit ring
3003 * ringid = id next to debug info submit ring
3004 */
3005 ringid = ringid + 1;
3006
3007 DHD_INFO(("%s: about to create debug cpl ring\n", __FUNCTION__));
3008 ret = dhd_prot_ring_attach(dhd, prot->d2hring_info_cpln, "d2hinfo",
3009 D2HRING_DYNAMIC_INFO_MAX_ITEM, D2HRING_INFO_BUFCMPLT_ITEMSIZE,
3010 ringid);
3011 if (ret != BCME_OK) {
3012 DHD_ERROR(("%s: couldn't alloc resources for dbg cpl ring\n",
3013 __FUNCTION__));
3014 dhd_prot_ring_detach(dhd, prot->h2dring_info_subn);
3015 goto err;
3016 }
3017 }
3018
3019 return ret;
3020err:
3021 MFREE(prot->osh, prot->h2dring_info_subn, sizeof(msgbuf_ring_t));
3022 prot->h2dring_info_subn = NULL;
3023
3024 if (prot->d2hring_info_cpln) {
3025 MFREE(prot->osh, prot->d2hring_info_cpln, sizeof(msgbuf_ring_t));
3026 prot->d2hring_info_cpln = NULL;
3027 }
3028 return ret;
3029} /* dhd_check_create_info_rings */
3030
3031int
3032dhd_prot_init_info_rings(dhd_pub_t *dhd)
3033{
3034 dhd_prot_t *prot = dhd->prot;
3035 int ret = BCME_OK;
3036
3037 if ((ret = dhd_check_create_info_rings(dhd)) != BCME_OK) {
3038 DHD_ERROR(("%s: info rings aren't created! \n",
3039 __FUNCTION__));
3040 return ret;
3041 }
3042
3043 if ((prot->d2hring_info_cpln->inited) || (prot->d2hring_info_cpln->create_pending)) {
3044 DHD_INFO(("Info completion ring was created!\n"));
3045 return ret;
3046 }
3047
3048 DHD_TRACE(("trying to send create d2h info ring: id %d\n", prot->d2hring_info_cpln->idx));
3049 ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_info_cpln);
3050 if (ret != BCME_OK)
3051 return ret;
3052
3053 prot->d2hring_info_cpln->seqnum = D2H_EPOCH_INIT_VAL;
3054
3055 DHD_TRACE(("trying to send create h2d info ring id %d\n", prot->h2dring_info_subn->idx));
3056 prot->h2dring_info_subn->n_completion_ids = 1;
3057 prot->h2dring_info_subn->compeltion_ring_ids[0] = prot->d2hring_info_cpln->idx;
3058
3059 ret = dhd_send_h2d_ringcreate(dhd, prot->h2dring_info_subn);
3060
3061 /* Note that there is no way to delete d2h or h2d ring deletion incase either fails,
3062 * so can not cleanup if one ring was created while the other failed
3063 */
3064 return ret;
3065} /* dhd_prot_init_info_rings */
3066
3067static void
3068dhd_prot_detach_info_rings(dhd_pub_t *dhd)
3069{
3070 if (dhd->prot->h2dring_info_subn) {
3071 dhd_prot_ring_detach(dhd, dhd->prot->h2dring_info_subn);
3072 MFREE(dhd->prot->osh, dhd->prot->h2dring_info_subn, sizeof(msgbuf_ring_t));
3073 dhd->prot->h2dring_info_subn = NULL;
3074 }
3075 if (dhd->prot->d2hring_info_cpln) {
3076 dhd_prot_ring_detach(dhd, dhd->prot->d2hring_info_cpln);
3077 MFREE(dhd->prot->osh, dhd->prot->d2hring_info_cpln, sizeof(msgbuf_ring_t));
3078 dhd->prot->d2hring_info_cpln = NULL;
3079 }
3080}
3081
3082/**
3083 * Initialize protocol: sync w/dongle state.
3084 * Sets dongle media info (iswl, drv_version, mac address).
3085 */
3086int dhd_sync_with_dongle(dhd_pub_t *dhd)
3087{
3088 int ret = 0;
3089 wlc_rev_info_t revinfo;
3090
3091
3092 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
3093
3094 dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
3095
3096 /* Post ts buffer after shim layer is attached */
3097 ret = dhd_msgbuf_rxbuf_post_ts_bufs(dhd);
3098
3099
3100#ifdef DHD_FW_COREDUMP
3101 /* Check the memdump capability */
3102 dhd_get_memdump_info(dhd);
3103#endif /* DHD_FW_COREDUMP */
3104#ifdef BCMASSERT_LOG
3105 dhd_get_assert_info(dhd);
3106#endif /* BCMASSERT_LOG */
3107
3108 /* Get the device rev info */
3109 memset(&revinfo, 0, sizeof(revinfo));
3110 ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_REVINFO, &revinfo, sizeof(revinfo), FALSE, 0);
3111 if (ret < 0) {
3112 DHD_ERROR(("%s: GET revinfo FAILED\n", __FUNCTION__));
3113 goto done;
3114 }
3115 DHD_ERROR(("%s: GET_REVINFO device 0x%x, vendor 0x%x, chipnum 0x%x\n", __FUNCTION__,
3116 revinfo.deviceid, revinfo.vendorid, revinfo.chipnum));
3117
3118 DHD_SSSR_DUMP_INIT(dhd);
3119
3120 dhd_process_cid_mac(dhd, TRUE);
3121 ret = dhd_preinit_ioctls(dhd);
3122 dhd_process_cid_mac(dhd, FALSE);
3123
3124 /* Always assumes wl for now */
3125 dhd->iswl = TRUE;
3126done:
3127 return ret;
3128} /* dhd_sync_with_dongle */
3129
3130
3131#define DHD_DBG_SHOW_METADATA 0
3132
3133#if DHD_DBG_SHOW_METADATA
3134static void BCMFASTPATH
3135dhd_prot_print_metadata(dhd_pub_t *dhd, void *ptr, int len)
3136{
3137 uint8 tlv_t;
3138 uint8 tlv_l;
3139 uint8 *tlv_v = (uint8 *)ptr;
3140
3141 if (len <= BCMPCIE_D2H_METADATA_HDRLEN)
3142 return;
3143
3144 len -= BCMPCIE_D2H_METADATA_HDRLEN;
3145 tlv_v += BCMPCIE_D2H_METADATA_HDRLEN;
3146
3147 while (len > TLV_HDR_LEN) {
3148 tlv_t = tlv_v[TLV_TAG_OFF];
3149 tlv_l = tlv_v[TLV_LEN_OFF];
3150
3151 len -= TLV_HDR_LEN;
3152 tlv_v += TLV_HDR_LEN;
3153 if (len < tlv_l)
3154 break;
3155 if ((tlv_t == 0) || (tlv_t == WLFC_CTL_TYPE_FILLER))
3156 break;
3157
3158 switch (tlv_t) {
3159 case WLFC_CTL_TYPE_TXSTATUS: {
3160 uint32 txs;
3161 memcpy(&txs, tlv_v, sizeof(uint32));
3162 if (tlv_l < (sizeof(wl_txstatus_additional_info_t) + sizeof(uint32))) {
3163 printf("METADATA TX_STATUS: %08x\n", txs);
3164 } else {
3165 wl_txstatus_additional_info_t tx_add_info;
3166 memcpy(&tx_add_info, tlv_v + sizeof(uint32),
3167 sizeof(wl_txstatus_additional_info_t));
3168 printf("METADATA TX_STATUS: %08x WLFCTS[%04x | %08x - %08x - %08x]"
3169 " rate = %08x tries = %d - %d\n", txs,
3170 tx_add_info.seq, tx_add_info.entry_ts,
3171 tx_add_info.enq_ts, tx_add_info.last_ts,
3172 tx_add_info.rspec, tx_add_info.rts_cnt,
3173 tx_add_info.tx_cnt);
3174 }
3175 } break;
3176
3177 case WLFC_CTL_TYPE_RSSI: {
3178 if (tlv_l == 1)
3179 printf("METADATA RX_RSSI: rssi = %d\n", *tlv_v);
3180 else
3181 printf("METADATA RX_RSSI[%04x]: rssi = %d snr = %d\n",
3182 (*(tlv_v + 3) << 8) | *(tlv_v + 2),
3183 (int8)(*tlv_v), *(tlv_v + 1));
3184 } break;
3185
3186 case WLFC_CTL_TYPE_FIFO_CREDITBACK:
3187 bcm_print_bytes("METADATA FIFO_CREDITBACK", tlv_v, tlv_l);
3188 break;
3189
3190 case WLFC_CTL_TYPE_TX_ENTRY_STAMP:
3191 bcm_print_bytes("METADATA TX_ENTRY", tlv_v, tlv_l);
3192 break;
3193
3194 case WLFC_CTL_TYPE_RX_STAMP: {
3195 struct {
3196 uint32 rspec;
3197 uint32 bus_time;
3198 uint32 wlan_time;
3199 } rx_tmstamp;
3200 memcpy(&rx_tmstamp, tlv_v, sizeof(rx_tmstamp));
3201 printf("METADATA RX TIMESTMAP: WLFCTS[%08x - %08x] rate = %08x\n",
3202 rx_tmstamp.wlan_time, rx_tmstamp.bus_time, rx_tmstamp.rspec);
3203 } break;
3204
3205 case WLFC_CTL_TYPE_TRANS_ID:
3206 bcm_print_bytes("METADATA TRANS_ID", tlv_v, tlv_l);
3207 break;
3208
3209 case WLFC_CTL_TYPE_COMP_TXSTATUS:
3210 bcm_print_bytes("METADATA COMP_TXSTATUS", tlv_v, tlv_l);
3211 break;
3212
3213 default:
3214 bcm_print_bytes("METADATA UNKNOWN", tlv_v, tlv_l);
3215 break;
3216 }
3217
3218 len -= tlv_l;
3219 tlv_v += tlv_l;
3220 }
3221}
3222#endif /* DHD_DBG_SHOW_METADATA */
3223
3224static INLINE void BCMFASTPATH
3225dhd_prot_packet_free(dhd_pub_t *dhd, void *pkt, uint8 pkttype, bool send)
3226{
3227 if (pkt) {
3228 if (pkttype == PKTTYPE_IOCTL_RX ||
3229 pkttype == PKTTYPE_EVENT_RX ||
3230 pkttype == PKTTYPE_INFO_RX ||
3231 pkttype == PKTTYPE_TSBUF_RX) {
3232#ifdef DHD_USE_STATIC_CTRLBUF
3233 PKTFREE_STATIC(dhd->osh, pkt, send);
3234#else
3235 PKTFREE(dhd->osh, pkt, send);
3236#endif /* DHD_USE_STATIC_CTRLBUF */
3237 } else {
3238 PKTFREE(dhd->osh, pkt, send);
3239 }
3240 }
3241}
3242
3243/* dhd_prot_packet_get should be called only for items having pktid_ctrl_map handle */
3244static INLINE void * BCMFASTPATH
3245dhd_prot_packet_get(dhd_pub_t *dhd, uint32 pktid, uint8 pkttype, bool free_pktid)
3246{
3247 void *PKTBUF;
3248 dmaaddr_t pa;
3249 uint32 len;
3250 void *dmah;
3251 void *secdma;
3252
3253#ifdef DHD_PCIE_PKTID
3254 if (free_pktid) {
3255 PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_ctrl_map,
3256 pktid, pa, len, dmah, secdma, pkttype);
3257 } else {
3258 PKTBUF = DHD_PKTID_TO_NATIVE_RSV(dhd, dhd->prot->pktid_ctrl_map,
3259 pktid, pa, len, dmah, secdma, pkttype);
3260 }
3261#else
3262 PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_ctrl_map, pktid, pa,
3263 len, dmah, secdma, pkttype);
3264#endif /* DHD_PCIE_PKTID */
3265 if (PKTBUF) {
3266 {
3267 if (SECURE_DMA_ENAB(dhd->osh))
3268 SECURE_DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah,
3269 secdma, 0);
3270 else
3271 DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah);
4c205efb
DW
3272#ifdef DMAMAP_STATS
3273 switch (pkttype) {
3274#ifndef IOCTLRESP_USE_CONSTMEM
3275 case PKTTYPE_IOCTL_RX:
3276 dhd->dma_stats.ioctl_rx--;
3277 dhd->dma_stats.ioctl_rx_sz -= len;
3278 break;
3279#endif /* IOCTLRESP_USE_CONSTMEM */
3280 case PKTTYPE_EVENT_RX:
3281 dhd->dma_stats.event_rx--;
3282 dhd->dma_stats.event_rx_sz -= len;
3283 break;
3284 case PKTTYPE_INFO_RX:
3285 dhd->dma_stats.info_rx--;
3286 dhd->dma_stats.info_rx_sz -= len;
3287 break;
3288 case PKTTYPE_TSBUF_RX:
3289 dhd->dma_stats.tsbuf_rx--;
3290 dhd->dma_stats.tsbuf_rx_sz -= len;
3291 break;
3292 }
3293#endif /* DMAMAP_STATS */
3c2a0909
S
3294 }
3295 }
3296
3297 return PKTBUF;
3298}
3299
3300#ifdef IOCTLRESP_USE_CONSTMEM
3301static INLINE void BCMFASTPATH
3302dhd_prot_ioctl_ret_buffer_get(dhd_pub_t *dhd, uint32 pktid, dhd_dma_buf_t *retbuf)
3303{
3304 memset(retbuf, 0, sizeof(dhd_dma_buf_t));
3305 retbuf->va = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle_ioctl, pktid,
3306 retbuf->pa, retbuf->len, retbuf->dmah, retbuf->secdma, PKTTYPE_IOCTL_RX);
3307
3308 return;
3309}
3310#endif
3311
3312#ifdef PCIE_INB_DW
3313static int
3314dhd_prot_inc_hostactive_devwake_assert(dhd_bus_t *bus)
3315{
3316 unsigned long flags = 0;
3317
3318 if (INBAND_DW_ENAB(bus)) {
3319 DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
3320 bus->host_active_cnt++;
3321 DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
3322 if (dhd_bus_set_device_wake(bus, TRUE) != BCME_OK) {
3323 DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
3324 bus->host_active_cnt--;
3325 dhd_bus_inb_ack_pending_ds_req(bus);
3326 DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
3327 return BCME_ERROR;
3328 }
3329 }
3330
3331 return BCME_OK;
3332}
3333
3334static void
3335dhd_prot_dec_hostactive_ack_pending_dsreq(dhd_bus_t *bus)
3336{
3337 unsigned long flags = 0;
3338 if (INBAND_DW_ENAB(bus)) {
3339 DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags);
3340 bus->host_active_cnt--;
3341 dhd_bus_inb_ack_pending_ds_req(bus);
3342 DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags);
3343 }
3344}
3345#endif /* PCIE_INB_DW */
3346
3347static void BCMFASTPATH
3348dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd, bool use_rsv_pktid)
3349{
3350 dhd_prot_t *prot = dhd->prot;
3351 int16 fillbufs;
3352 uint16 cnt = 256;
3353 int retcount = 0;
3354
3355 fillbufs = prot->max_rxbufpost - prot->rxbufpost;
3356 while (fillbufs >= RX_BUF_BURST) {
3357 cnt--;
3358 if (cnt == 0) {
3359 /* find a better way to reschedule rx buf post if space not available */
3360 DHD_ERROR(("h2d rx post ring not available to post host buffers \n"));
3361 DHD_ERROR(("Current posted host buf count %d \n", prot->rxbufpost));
3362 break;
3363 }
3364
3365 /* Post in a burst of 32 buffers at a time */
3366 fillbufs = MIN(fillbufs, RX_BUF_BURST);
3367
3368 /* Post buffers */
3369 retcount = dhd_prot_rxbuf_post(dhd, fillbufs, use_rsv_pktid);
3370
3371 if (retcount >= 0) {
3372 prot->rxbufpost += (uint16)retcount;
3373#ifdef DHD_LB_RXC
3374 /* dhd_prot_rxbuf_post returns the number of buffers posted */
3375 DHD_LB_STATS_UPDATE_RXC_HISTO(dhd, retcount);
3376#endif /* DHD_LB_RXC */
3377 /* how many more to post */
3378 fillbufs = prot->max_rxbufpost - prot->rxbufpost;
3379 } else {
3380 /* Make sure we don't run loop any further */
3381 fillbufs = 0;
3382 }
3383 }
3384}
3385
3386/** Post 'count' no of rx buffers to dongle */
3387static int BCMFASTPATH
3388dhd_prot_rxbuf_post(dhd_pub_t *dhd, uint16 count, bool use_rsv_pktid)
3389{
3390 void *p, **pktbuf;
3391 uint16 pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
3392 uint8 *rxbuf_post_tmp;
3393 host_rxbuf_post_t *rxbuf_post;
3394 void *msg_start;
3395 dmaaddr_t pa, *pktbuf_pa;
3396 uint32 *pktlen;
3397 uint16 i = 0, alloced = 0;
3398 unsigned long flags;
3399 uint32 pktid;
3400 dhd_prot_t *prot = dhd->prot;
3401 msgbuf_ring_t *ring = &prot->h2dring_rxp_subn;
3402 void *lcl_buf;
3403 uint16 lcl_buf_size;
3404
3405#ifdef PCIE_INB_DW
3406 if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
3407 return BCME_ERROR;
3408#endif /* PCIE_INB_DW */
3409
3410 /* allocate a local buffer to store pkt buffer va, pa and length */
3411 lcl_buf_size = (sizeof(void *) + sizeof(dmaaddr_t) + sizeof(uint32)) *
3412 RX_BUF_BURST;
3413 lcl_buf = MALLOC(dhd->osh, lcl_buf_size);
3414 if (!lcl_buf) {
3415 DHD_ERROR(("%s: local scratch buffer allocation failed\n", __FUNCTION__));
3416#ifdef PCIE_INB_DW
3417 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
3418#endif
3419 return 0;
3420 }
3421 pktbuf = lcl_buf;
3422 pktbuf_pa = (dmaaddr_t *)((uint8 *)pktbuf + sizeof(void *) * RX_BUF_BURST);
3423 pktlen = (uint32 *)((uint8 *)pktbuf_pa + sizeof(dmaaddr_t) * RX_BUF_BURST);
3424
3425 for (i = 0; i < count; i++) {
3426 if ((p = PKTGET(dhd->osh, pktsz, FALSE)) == NULL) {
3427 DHD_ERROR(("%s:%d: PKTGET for rxbuf failed\n", __FUNCTION__, __LINE__));
3428 dhd->rx_pktgetfail++;
3429 break;
3430 }
3431
3432 pktlen[i] = PKTLEN(dhd->osh, p);
3433 if (SECURE_DMA_ENAB(dhd->osh)) {
3434 pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen[i],
3435 DMA_RX, p, 0, ring->dma_buf.secdma, 0);
3436 }
3437#ifndef BCM_SECURE_DMA
3438 else
3439 pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen[i], DMA_RX, p, 0);
3440#endif /* #ifndef BCM_SECURE_DMA */
3441
3442 if (PHYSADDRISZERO(pa)) {
3443 PKTFREE(dhd->osh, p, FALSE);
3444 DHD_ERROR(("Invalid phyaddr 0\n"));
3445 ASSERT(0);
3446 break;
3447 }
4c205efb
DW
3448#ifdef DMAMAP_STATS
3449 dhd->dma_stats.rxdata++;
3450 dhd->dma_stats.rxdata_sz += pktlen[i];
3451#endif /* DMAMAP_STATS */
3c2a0909
S
3452
3453 PKTPULL(dhd->osh, p, prot->rx_metadata_offset);
3454 pktlen[i] = PKTLEN(dhd->osh, p);
3455 pktbuf[i] = p;
3456 pktbuf_pa[i] = pa;
3457 }
3458
3459 /* only post what we have */
3460 count = i;
3461
3462 /* grab the rx lock to allocate pktid and post on ring */
3463 DHD_SPIN_LOCK(prot->rx_lock, flags);
3464
3465 /* Claim space for exactly 'count' no of messages, for mitigation purpose */
3466 msg_start = (void *)
3467 dhd_prot_alloc_ring_space(dhd, ring, count, &alloced, TRUE);
3468 if (msg_start == NULL) {
3469 DHD_INFO(("%s:%d: Rxbufpost Msgbuf Not available\n", __FUNCTION__, __LINE__));
3470 goto cleanup;
3471 }
3472 /* if msg_start != NULL, we should have alloced space for atleast 1 item */
3473 ASSERT(alloced > 0);
3474
3475 rxbuf_post_tmp = (uint8*)msg_start;
3476
3477 for (i = 0; i < alloced; i++) {
3478 rxbuf_post = (host_rxbuf_post_t *)rxbuf_post_tmp;
3479 p = pktbuf[i];
3480 pa = pktbuf_pa[i];
3481
3482#if defined(DHD_LB_RXC)
3483 if (use_rsv_pktid == TRUE) {
3484 bcm_workq_t *workq = &prot->rx_compl_cons;
3485 int elem_ix = bcm_ring_cons(WORKQ_RING(workq), DHD_LB_WORKQ_SZ);
3486
3487 if (elem_ix == BCM_RING_EMPTY) {
3488 DHD_INFO(("%s rx_compl_cons ring is empty\n", __FUNCTION__));
3489 pktid = DHD_PKTID_INVALID;
3490 goto alloc_pkt_id;
3491 } else {
3492 uint32 *elem = WORKQ_ELEMENT(uint32, workq, elem_ix);
3493 pktid = *elem;
3494 }
3495
3496 rxbuf_post->cmn_hdr.request_id = htol32(pktid);
3497
3498 /* Now populate the previous locker with valid information */
3499 if (pktid != DHD_PKTID_INVALID) {
3500 DHD_NATIVE_TO_PKTID_SAVE(dhd, dhd->prot->pktid_rx_map,
3501 p, pktid, pa, pktlen[i], DMA_RX, NULL, NULL,
3502 PKTTYPE_DATA_RX);
3503 }
3504 } else
3505#endif /* ! DHD_LB_RXC */
3506 {
3507#if defined(DHD_LB_RXC)
3508alloc_pkt_id:
3509#endif /* DHD_LB_RXC */
3510 pktid = DHD_NATIVE_TO_PKTID(dhd, dhd->prot->pktid_rx_map, p, pa,
3511 pktlen[i], DMA_RX, NULL, ring->dma_buf.secdma, PKTTYPE_DATA_RX);
3512#if defined(DHD_PCIE_PKTID)
3513 if (pktid == DHD_PKTID_INVALID) {
3514 break;
3515 }
3516#endif /* DHD_PCIE_PKTID */
3517 }
3518
3519 /* Common msg header */
3520 rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_RXBUF_POST;
3521 rxbuf_post->cmn_hdr.if_id = 0;
3522 rxbuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
3523 rxbuf_post->cmn_hdr.flags = ring->current_phase;
3524 ring->seqnum++;
3525 rxbuf_post->data_buf_len = htol16((uint16)pktlen[i]);
3526 rxbuf_post->data_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
3527 rxbuf_post->data_buf_addr.low_addr =
3528 htol32(PHYSADDRLO(pa) + prot->rx_metadata_offset);
3529
3530 if (prot->rx_metadata_offset) {
3531 rxbuf_post->metadata_buf_len = prot->rx_metadata_offset;
3532 rxbuf_post->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
3533 rxbuf_post->metadata_buf_addr.low_addr = htol32(PHYSADDRLO(pa));
3534 } else {
3535 rxbuf_post->metadata_buf_len = 0;
3536 rxbuf_post->metadata_buf_addr.high_addr = 0;
3537 rxbuf_post->metadata_buf_addr.low_addr = 0;
3538 }
3539
3540#ifdef DHD_PKTID_AUDIT_RING
3541 DHD_PKTID_AUDIT(dhd, prot->pktid_rx_map, pktid, DHD_DUPLICATE_ALLOC);
3542#endif /* DHD_PKTID_AUDIT_RING */
3543
3544 rxbuf_post->cmn_hdr.request_id = htol32(pktid);
3545
3546 /* Move rxbuf_post_tmp to next item */
3547 rxbuf_post_tmp = rxbuf_post_tmp + ring->item_len;
3548 }
3549
3550 if (i < alloced) {
3551 if (ring->wr < (alloced - i))
3552 ring->wr = ring->max_items - (alloced - i);
3553 else
3554 ring->wr -= (alloced - i);
3555
3556 if (ring->wr == 0) {
3557 DHD_INFO(("%s: flipping the phase now\n", ring->name));
3558 ring->current_phase = ring->current_phase ?
3559 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
3560 }
3561
3562 alloced = i;
3563 }
3564
3565 /* update ring's WR index and ring doorbell to dongle */
3566 if (alloced > 0) {
3567 unsigned long flags1;
3568 DHD_GENERAL_LOCK(dhd, flags1);
3569 dhd_prot_ring_write_complete(dhd, ring, msg_start, alloced);
3570 DHD_GENERAL_UNLOCK(dhd, flags1);
3571 }
3572
3573 DHD_SPIN_UNLOCK(prot->rx_lock, flags);
3574
3575cleanup:
3576 for (i = alloced; i < count; i++) {
3577 p = pktbuf[i];
3578 pa = pktbuf_pa[i];
3579
3580 if (SECURE_DMA_ENAB(dhd->osh))
3581 SECURE_DMA_UNMAP(dhd->osh, pa, pktlen[i], DMA_RX, 0,
3582 DHD_DMAH_NULL, ring->dma_buf.secdma, 0);
3583 else
3584 DMA_UNMAP(dhd->osh, pa, pktlen[i], DMA_RX, 0, DHD_DMAH_NULL);
3585 PKTFREE(dhd->osh, p, FALSE);
3586 }
3587
3588 MFREE(dhd->osh, lcl_buf, lcl_buf_size);
3589#ifdef PCIE_INB_DW
3590 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
3591#endif
3592 return alloced;
3593} /* dhd_prot_rxbufpost */
3594
3595static int
3596dhd_prot_infobufpost(dhd_pub_t *dhd)
3597{
3598 unsigned long flags;
3599 uint32 pktid;
3600 dhd_prot_t *prot = dhd->prot;
3601 msgbuf_ring_t *ring = prot->h2dring_info_subn;
3602 uint16 alloced = 0;
3603 uint16 pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
3604 uint32 pktlen;
3605 info_buf_post_msg_t *infobuf_post;
3606 uint8 *infobuf_post_tmp;
3607 void *p;
3608 void* msg_start;
3609 uint8 i = 0;
3610 dmaaddr_t pa;
3611 int16 count;
3612
3613 if (ring == NULL)
3614 return 0;
3615
3616 if (ring->inited != TRUE)
3617 return 0;
3618 if (prot->max_infobufpost == 0)
3619 return 0;
3620
3621 count = prot->max_infobufpost - prot->infobufpost;
3622
3623 if (count <= 0) {
3624 DHD_INFO(("%s: Cannot post more than max info resp buffers\n",
3625 __FUNCTION__));
3626 return 0;
3627 }
3628
3629#ifdef PCIE_INB_DW
3630 if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
3631 return BCME_ERROR;
3632#endif /* PCIE_INB_DW */
3633
3634 DHD_GENERAL_LOCK(dhd, flags);
3635 /* Claim space for exactly 'count' no of messages, for mitigation purpose */
3636 msg_start = (void *) dhd_prot_alloc_ring_space(dhd, ring, count, &alloced, FALSE);
3637 DHD_GENERAL_UNLOCK(dhd, flags);
3638
3639 if (msg_start == NULL) {
3640 DHD_INFO(("%s:%d: infobufpost Msgbuf Not available\n", __FUNCTION__, __LINE__));
3641#ifdef PCIE_INB_DW
3642 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
3643#endif
3644 return -1;
3645 }
3646
3647 /* if msg_start != NULL, we should have alloced space for atleast 1 item */
3648 ASSERT(alloced > 0);
3649
3650 infobuf_post_tmp = (uint8*) msg_start;
3651
3652 /* loop through each allocated message in the host ring */
3653 for (i = 0; i < alloced; i++) {
3654 infobuf_post = (info_buf_post_msg_t *) infobuf_post_tmp;
3655 /* Create a rx buffer */
3656#ifdef DHD_USE_STATIC_CTRLBUF
3657 p = PKTGET_STATIC(dhd->osh, pktsz, FALSE);
3658#else
3659 p = PKTGET(dhd->osh, pktsz, FALSE);
3660#endif /* DHD_USE_STATIC_CTRLBUF */
3661 if (p == NULL) {
3662 DHD_ERROR(("%s:%d: PKTGET for infobuf failed\n", __FUNCTION__, __LINE__));
3663 dhd->rx_pktgetfail++;
3664 break;
3665 }
3666 pktlen = PKTLEN(dhd->osh, p);
3667 if (SECURE_DMA_ENAB(dhd->osh)) {
3668 pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen,
3669 DMA_RX, p, 0, ring->dma_buf.secdma, 0);
3670 }
3671#ifndef BCM_SECURE_DMA
3672 else
3673 pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0);
3674#endif /* #ifndef BCM_SECURE_DMA */
3675 if (PHYSADDRISZERO(pa)) {
3676 if (SECURE_DMA_ENAB(dhd->osh)) {
3677 SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL,
3678 ring->dma_buf.secdma, 0);
3679 }
3680 else
3681 DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
3682#ifdef DHD_USE_STATIC_CTRLBUF
3683 PKTFREE_STATIC(dhd->osh, p, FALSE);
3684#else
3685 PKTFREE(dhd->osh, p, FALSE);
3686#endif /* DHD_USE_STATIC_CTRLBUF */
3687 DHD_ERROR(("Invalid phyaddr 0\n"));
3688 ASSERT(0);
3689 break;
3690 }
4c205efb
DW
3691#ifdef DMAMAP_STATS
3692 dhd->dma_stats.info_rx++;
3693 dhd->dma_stats.info_rx_sz += pktlen;
3694#endif /* DMAMAP_STATS */
3c2a0909
S
3695 pktlen = PKTLEN(dhd->osh, p);
3696
3697 /* Common msg header */
3698 infobuf_post->cmn_hdr.msg_type = MSG_TYPE_INFO_BUF_POST;
3699 infobuf_post->cmn_hdr.if_id = 0;
3700 infobuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
3701 infobuf_post->cmn_hdr.flags = ring->current_phase;
3702 ring->seqnum++;
3703
3704#if defined(DHD_PCIE_PKTID)
3705 /* get the lock before calling DHD_NATIVE_TO_PKTID */
3706 DHD_GENERAL_LOCK(dhd, flags);
3707#endif /* DHD_PCIE_PKTID */
3708
3709 pktid = DHD_NATIVE_TO_PKTID(dhd, dhd->prot->pktid_ctrl_map, p, pa,
3710 pktlen, DMA_RX, NULL, ring->dma_buf.secdma, PKTTYPE_INFO_RX);
3711
3712
3713#if defined(DHD_PCIE_PKTID)
3714 /* free lock */
3715 DHD_GENERAL_UNLOCK(dhd, flags);
3716
3717 if (pktid == DHD_PKTID_INVALID) {
3718 if (SECURE_DMA_ENAB(dhd->osh)) {
3719 DHD_GENERAL_LOCK(dhd, flags);
3720 SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, 0,
3721 ring->dma_buf.secdma, 0);
3722 DHD_GENERAL_UNLOCK(dhd, flags);
3723 } else
3724 DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, 0);
3725
3726#ifdef DHD_USE_STATIC_CTRLBUF
3727 PKTFREE_STATIC(dhd->osh, p, FALSE);
3728#else
3729 PKTFREE(dhd->osh, p, FALSE);
3730#endif /* DHD_USE_STATIC_CTRLBUF */
3731 DHD_ERROR(("%s: Pktid pool depleted.\n", __FUNCTION__));
3732 break;
3733 }
3734#endif /* DHD_PCIE_PKTID */
3735
3736 infobuf_post->host_buf_len = htol16((uint16)pktlen);
3737 infobuf_post->host_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
3738 infobuf_post->host_buf_addr.low_addr = htol32(PHYSADDRLO(pa));
3739
3740#ifdef DHD_PKTID_AUDIT_RING
3741 DHD_PKTID_AUDIT(dhd, prot->pktid_ctrl_map, pktid, DHD_DUPLICATE_ALLOC);
3742#endif /* DHD_PKTID_AUDIT_RING */
3743
3744 DHD_INFO(("ID %d, low_addr 0x%08x, high_addr 0x%08x\n",
3745 infobuf_post->cmn_hdr.request_id, infobuf_post->host_buf_addr.low_addr,
3746 infobuf_post->host_buf_addr.high_addr));
3747
3748 infobuf_post->cmn_hdr.request_id = htol32(pktid);
3749 /* Move rxbuf_post_tmp to next item */
3750 infobuf_post_tmp = infobuf_post_tmp + ring->item_len;
3751 }
3752
3753 if (i < alloced) {
3754 if (ring->wr < (alloced - i))
3755 ring->wr = ring->max_items - (alloced - i);
3756 else
3757 ring->wr -= (alloced - i);
3758
3759 alloced = i;
3760 if (alloced && ring->wr == 0) {
3761 DHD_INFO(("%s: flipping the phase now\n", ring->name));
3762 ring->current_phase = ring->current_phase ?
3763 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
3764 }
3765 }
3766
3767 /* Update the write pointer in TCM & ring bell */
3768 if (alloced > 0) {
3769 prot->infobufpost += alloced;
3770 DHD_INFO(("allocated %d buffers for info ring\n", alloced));
3771 DHD_GENERAL_LOCK(dhd, flags);
3772 dhd_prot_ring_write_complete(dhd, ring, msg_start, alloced);
3773 DHD_GENERAL_UNLOCK(dhd, flags);
3774 }
3775#ifdef PCIE_INB_DW
3776 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
3777#endif
3778 return alloced;
3779} /* dhd_prot_infobufpost */
3780
3781#ifdef IOCTLRESP_USE_CONSTMEM
3782static int
3783alloc_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf)
3784{
3785 int err;
3786 memset(retbuf, 0, sizeof(dhd_dma_buf_t));
3787
3788 if ((err = dhd_dma_buf_alloc(dhd, retbuf, IOCT_RETBUF_SIZE)) != BCME_OK) {
3789 DHD_ERROR(("%s: dhd_dma_buf_alloc err %d\n", __FUNCTION__, err));
3790 ASSERT(0);
3791 return BCME_NOMEM;
3792 }
3793
3794 return BCME_OK;
3795}
3796
3797static void
3798free_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf)
3799{
3800 /* retbuf (declared on stack) not fully populated ... */
3801 if (retbuf->va) {
3802 uint32 dma_pad;
3803 dma_pad = (IOCT_RETBUF_SIZE % DHD_DMA_PAD) ? DHD_DMA_PAD : 0;
3804 retbuf->len = IOCT_RETBUF_SIZE;
3805 retbuf->_alloced = retbuf->len + dma_pad;
3806 }
3807
3808 dhd_dma_buf_free(dhd, retbuf);
3809 return;
3810}
3811#endif /* IOCTLRESP_USE_CONSTMEM */
3812
3813static int
3814dhd_prot_rxbufpost_ctrl(dhd_pub_t *dhd, uint8 msg_type)
3815{
3816 void *p;
3817 uint16 pktsz;
3818 ioctl_resp_evt_buf_post_msg_t *rxbuf_post;
3819 dmaaddr_t pa;
3820 uint32 pktlen;
3821 dhd_prot_t *prot = dhd->prot;
3822 uint16 alloced = 0;
3823 unsigned long flags;
3824 dhd_dma_buf_t retbuf;
3825 void *dmah = NULL;
3826 uint32 pktid;
3827 void *map_handle;
3828 msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
3829 bool non_ioctl_resp_buf = 0;
3830 dhd_pkttype_t buf_type;
3831
3832 if (dhd->busstate == DHD_BUS_DOWN) {
3833 DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__));
3834 return -1;
3835 }
3836 memset(&retbuf, 0, sizeof(dhd_dma_buf_t));
3837
3838 if (msg_type == MSG_TYPE_IOCTLRESP_BUF_POST)
3839 buf_type = PKTTYPE_IOCTL_RX;
3840 else if (msg_type == MSG_TYPE_EVENT_BUF_POST)
3841 buf_type = PKTTYPE_EVENT_RX;
3842 else if (msg_type == MSG_TYPE_TIMSTAMP_BUFPOST)
3843 buf_type = PKTTYPE_TSBUF_RX;
3844 else {
3845 DHD_ERROR(("invalid message type to be posted to Ctrl ring %d\n", msg_type));
3846 return -1;
3847 }
3848
3849
3850 if ((msg_type == MSG_TYPE_EVENT_BUF_POST) || (msg_type == MSG_TYPE_TIMSTAMP_BUFPOST))
3851 non_ioctl_resp_buf = TRUE;
3852 else
3853 non_ioctl_resp_buf = FALSE;
3854
3855 if (non_ioctl_resp_buf) {
3856 /* Allocate packet for not ioctl resp buffer post */
3857 pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ;
3858 } else {
3859 /* Allocate packet for ctrl/ioctl buffer post */
3860 pktsz = DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ;
3861 }
3862
3863#ifdef IOCTLRESP_USE_CONSTMEM
3864 if (!non_ioctl_resp_buf) {
3865 if (alloc_ioctl_return_buffer(dhd, &retbuf) != BCME_OK) {
3866 DHD_ERROR(("Could not allocate IOCTL response buffer\n"));
3867 return -1;
3868 }
3869 ASSERT(retbuf.len == IOCT_RETBUF_SIZE);
3870 p = retbuf.va;
3871 pktlen = retbuf.len;
3872 pa = retbuf.pa;
3873 dmah = retbuf.dmah;
3874 } else
3875#endif /* IOCTLRESP_USE_CONSTMEM */
3876 {
3877#ifdef DHD_USE_STATIC_CTRLBUF
3878 p = PKTGET_STATIC(dhd->osh, pktsz, FALSE);
3879#else
3880 p = PKTGET(dhd->osh, pktsz, FALSE);
3881#endif /* DHD_USE_STATIC_CTRLBUF */
3882 if (p == NULL) {
3883 DHD_ERROR(("%s:%d: PKTGET for %s buf failed\n",
3884 __FUNCTION__, __LINE__, non_ioctl_resp_buf ?
3885 "EVENT" : "IOCTL RESP"));
3886 dhd->rx_pktgetfail++;
3887 return -1;
3888 }
3889
3890 pktlen = PKTLEN(dhd->osh, p);
3891
3892 if (SECURE_DMA_ENAB(dhd->osh)) {
3893 DHD_GENERAL_LOCK(dhd, flags);
3894 pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen,
3895 DMA_RX, p, 0, ring->dma_buf.secdma, 0);
3896 DHD_GENERAL_UNLOCK(dhd, flags);
3897 }
3898#ifndef BCM_SECURE_DMA
3899 else
3900 pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0);
3901#endif /* #ifndef BCM_SECURE_DMA */
3902
3903 if (PHYSADDRISZERO(pa)) {
3904 DHD_ERROR(("Invalid physaddr 0\n"));
3905 ASSERT(0);
3906 goto free_pkt_return;
3907 }
3908
4c205efb
DW
3909#ifdef DMAMAP_STATS
3910 switch (buf_type) {
3911#ifndef IOCTLRESP_USE_CONSTMEM
3912 case PKTTYPE_IOCTL_RX:
3913 dhd->dma_stats.ioctl_rx++;
3914 dhd->dma_stats.ioctl_rx_sz += pktlen;
3915 break;
3916#endif /* !IOCTLRESP_USE_CONSTMEM */
3917 case PKTTYPE_EVENT_RX:
3918 dhd->dma_stats.event_rx++;
3919 dhd->dma_stats.event_rx_sz += pktlen;
3920 break;
3921 case PKTTYPE_TSBUF_RX:
3922 dhd->dma_stats.tsbuf_rx++;
3923 dhd->dma_stats.tsbuf_rx_sz += pktlen;
3924 break;
3925 default:
3926 break;
3927 }
3928#endif /* DMAMAP_STATS */
3929
3c2a0909
S
3930 }
3931#ifdef PCIE_INB_DW
3932 if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
3933 return BCME_ERROR;
3934#endif /* PCIE_INB_DW */
3935
3936 DHD_GENERAL_LOCK(dhd, flags);
3937
3938 rxbuf_post = (ioctl_resp_evt_buf_post_msg_t *)
3939 dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
3940
3941 if (rxbuf_post == NULL) {
3942 DHD_GENERAL_UNLOCK(dhd, flags);
3943 DHD_ERROR(("%s:%d: Ctrl submit Msgbuf Not available to post buffer \n",
3944 __FUNCTION__, __LINE__));
3945
3946#ifdef IOCTLRESP_USE_CONSTMEM
3947 if (non_ioctl_resp_buf)
3948#endif /* IOCTLRESP_USE_CONSTMEM */
3949 {
3950 if (SECURE_DMA_ENAB(dhd->osh)) {
3951 DHD_GENERAL_LOCK(dhd, flags);
3952 SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL,
3953 ring->dma_buf.secdma, 0);
3954 DHD_GENERAL_UNLOCK(dhd, flags);
3955 } else {
3956 DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
3957 }
3958 }
3959 goto free_pkt_return;
3960 }
3961
3962 /* CMN msg header */
3963 rxbuf_post->cmn_hdr.msg_type = msg_type;
3964
3965#ifdef IOCTLRESP_USE_CONSTMEM
3966 if (!non_ioctl_resp_buf) {
3967 map_handle = dhd->prot->pktid_map_handle_ioctl;
3968 pktid = DHD_NATIVE_TO_PKTID(dhd, map_handle, p, pa, pktlen, DMA_RX, dmah,
3969 ring->dma_buf.secdma, buf_type);
3970 } else
3971#endif /* IOCTLRESP_USE_CONSTMEM */
3972 {
3973 map_handle = dhd->prot->pktid_ctrl_map;
3974 pktid = DHD_NATIVE_TO_PKTID(dhd, map_handle,
3975 p, pa, pktlen, DMA_RX, dmah, ring->dma_buf.secdma,
3976 buf_type);
3977 }
3978
3979 if (pktid == DHD_PKTID_INVALID) {
3980 if (ring->wr == 0) {
3981 ring->wr = ring->max_items - 1;
3982 } else {
3983 ring->wr--;
3984 if (ring->wr == 0) {
3985 ring->current_phase = ring->current_phase ? 0 :
3986 BCMPCIE_CMNHDR_PHASE_BIT_INIT;
3987 }
3988 }
3989 DHD_GENERAL_UNLOCK(dhd, flags);
3990 DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
3991 DHD_ERROR(("%s: Pktid pool depleted.\n", __FUNCTION__));
3992 goto free_pkt_return;
3993 }
3994
3995#ifdef DHD_PKTID_AUDIT_RING
3996 DHD_PKTID_AUDIT(dhd, map_handle, pktid, DHD_DUPLICATE_ALLOC);
3997#endif /* DHD_PKTID_AUDIT_RING */
3998
3999 rxbuf_post->cmn_hdr.request_id = htol32(pktid);
4000 rxbuf_post->cmn_hdr.if_id = 0;
4001 rxbuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
4002 ring->seqnum++;
4003 rxbuf_post->cmn_hdr.flags = ring->current_phase;
4004
4005#if defined(DHD_PCIE_PKTID)
4006 if (rxbuf_post->cmn_hdr.request_id == DHD_PKTID_INVALID) {
4007 if (ring->wr == 0) {
4008 ring->wr = ring->max_items - 1;
4009 } else {
4010 if (ring->wr == 0) {
4011 ring->current_phase = ring->current_phase ? 0 :
4012 BCMPCIE_CMNHDR_PHASE_BIT_INIT;
4013 }
4014 }
4015 DHD_GENERAL_UNLOCK(dhd, flags);
4016#ifdef IOCTLRESP_USE_CONSTMEM
4017 if (non_ioctl_resp_buf)
4018#endif /* IOCTLRESP_USE_CONSTMEM */
4019 {
4020 if (SECURE_DMA_ENAB(dhd->osh)) {
4021 DHD_GENERAL_LOCK(dhd, flags);
4022 SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL,
4023 ring->dma_buf.secdma, 0);
4024 DHD_GENERAL_UNLOCK(dhd, flags);
4025 } else
4026 DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL);
4027 }
4028 goto free_pkt_return;
4029 }
4030#endif /* DHD_PCIE_PKTID */
4031
4032#ifndef IOCTLRESP_USE_CONSTMEM
4033 rxbuf_post->host_buf_len = htol16((uint16)PKTLEN(dhd->osh, p));
4034#else
4035 rxbuf_post->host_buf_len = htol16((uint16)pktlen);
4036#endif /* IOCTLRESP_USE_CONSTMEM */
4037 rxbuf_post->host_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
4038 rxbuf_post->host_buf_addr.low_addr = htol32(PHYSADDRLO(pa));
4039
4040 /* update ring's WR index and ring doorbell to dongle */
4041 dhd_prot_ring_write_complete(dhd, ring, rxbuf_post, 1);
4042 DHD_GENERAL_UNLOCK(dhd, flags);
4043
4044#ifdef PCIE_INB_DW
4045 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
4046#endif
4047
4048 return 1;
4049
4050free_pkt_return:
4051#ifdef IOCTLRESP_USE_CONSTMEM
4052 if (!non_ioctl_resp_buf) {
4053 free_ioctl_return_buffer(dhd, &retbuf);
4054 } else
4055#endif
4056 {
4057 dhd_prot_packet_free(dhd, p, buf_type, FALSE);
4058 }
4059
4060#ifdef PCIE_INB_DW
4061 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
4062#endif
4063
4064 return -1;
4065} /* dhd_prot_rxbufpost_ctrl */
4066
4067static uint16
4068dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, uint8 msg_type, uint32 max_to_post)
4069{
4070 uint32 i = 0;
4071 int32 ret_val;
4072
4073 DHD_INFO(("max to post %d, event %d \n", max_to_post, msg_type));
4074
4075 if (dhd->busstate == DHD_BUS_DOWN) {
4076 DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__));
4077 return 0;
4078 }
4079
4080 while (i < max_to_post) {
4081 ret_val = dhd_prot_rxbufpost_ctrl(dhd, msg_type);
4082 if (ret_val < 0)
4083 break;
4084 i++;
4085 }
4086 DHD_INFO(("posted %d buffers of type %d\n", i, msg_type));
4087 return (uint16)i;
4088}
4089
4090static void
4091dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *dhd)
4092{
4093 dhd_prot_t *prot = dhd->prot;
4094 int max_to_post;
4095
4096 DHD_INFO(("ioctl resp buf post\n"));
4097 max_to_post = prot->max_ioctlrespbufpost - prot->cur_ioctlresp_bufs_posted;
4098 if (max_to_post <= 0) {
4099 DHD_INFO(("%s: Cannot post more than max IOCTL resp buffers\n",
4100 __FUNCTION__));
4101 return;
4102 }
4103 prot->cur_ioctlresp_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd,
4104 MSG_TYPE_IOCTLRESP_BUF_POST, max_to_post);
4105}
4106
4107static void
4108dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *dhd)
4109{
4110 dhd_prot_t *prot = dhd->prot;
4111 int max_to_post;
4112
4113 max_to_post = prot->max_eventbufpost - prot->cur_event_bufs_posted;
4114 if (max_to_post <= 0) {
4115 DHD_ERROR(("%s: Cannot post more than max event buffers\n",
4116 __FUNCTION__));
4117 return;
4118 }
4119 prot->cur_event_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd,
4120 MSG_TYPE_EVENT_BUF_POST, max_to_post);
4121}
4122
4123static int
4124dhd_msgbuf_rxbuf_post_ts_bufs(dhd_pub_t *dhd)
4125{
4126#ifdef DHD_TIMESYNC
4127 dhd_prot_t *prot = dhd->prot;
4128 int max_to_post;
4129
4130 if (prot->active_ipc_version < 7) {
4131 DHD_ERROR(("no ts buffers to device ipc rev is %d, needs to be atleast 7\n",
4132 prot->active_ipc_version));
4133 return 0;
4134 }
4135
4136 max_to_post = prot->max_tsbufpost - prot->cur_ts_bufs_posted;
4137 if (max_to_post <= 0) {
4138 DHD_INFO(("%s: Cannot post more than max ts buffers\n",
4139 __FUNCTION__));
4140 return 0;
4141 }
4142
4143 prot->cur_ts_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd,
4144 MSG_TYPE_TIMSTAMP_BUFPOST, max_to_post);
4145#endif /* DHD_TIMESYNC */
4146 return 0;
4147}
4148
4149bool BCMFASTPATH
4150dhd_prot_process_msgbuf_infocpl(dhd_pub_t *dhd, uint bound)
4151{
4152 dhd_prot_t *prot = dhd->prot;
4153 bool more = TRUE;
4154 uint n = 0;
4155 msgbuf_ring_t *ring = prot->d2hring_info_cpln;
4156
4157 if (ring == NULL)
4158 return FALSE;
4159 if (ring->inited != TRUE)
4160 return FALSE;
4161
4162 /* Process all the messages - DTOH direction */
4163 while (!dhd_is_device_removed(dhd)) {
4164 uint8 *msg_addr;
4165 uint32 msg_len;
4166
4167 if (dhd->hang_was_sent) {
4168 more = FALSE;
4169 break;
4170 }
4171
4172 /* Get the message from ring */
4173 msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
4174 if (msg_addr == NULL) {
4175 more = FALSE;
4176 break;
4177 }
4178
4179 /* Prefetch data to populate the cache */
4180 OSL_PREFETCH(msg_addr);
4181
4182 if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) {
4183 DHD_ERROR(("%s: Error at process rxpl msgbuf of len %d\n",
4184 __FUNCTION__, msg_len));
4185 }
4186
4187 /* Update read pointer */
4188 dhd_prot_upd_read_idx(dhd, ring);
4189
4190 /* After batch processing, check RX bound */
4191 n += msg_len / ring->item_len;
4192 if (n >= bound) {
4193 break;
4194 }
4195 }
4196
4197 return more;
4198}
4199
4200/** called when DHD needs to check for 'receive complete' messages from the dongle */
4201bool BCMFASTPATH
4202dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd, uint bound)
4203{
4204 bool more = FALSE;
4205 uint n = 0;
4206 dhd_prot_t *prot = dhd->prot;
4207 msgbuf_ring_t *ring = &prot->d2hring_rx_cpln;
4208 uint16 item_len = ring->item_len;
4209 host_rxbuf_cmpl_t *msg = NULL;
4210 uint8 *msg_addr;
4211 uint32 msg_len;
4212 uint16 pkt_cnt, pkt_cnt_newidx;
4213 unsigned long flags;
4214 dmaaddr_t pa;
4215 uint32 len;
4216 void *dmah;
4217 void *secdma;
4218 int ifidx = 0, if_newidx = 0;
4219 void *pkt, *pktqhead = NULL, *prevpkt = NULL, *pkt_newidx, *nextpkt;
4220 uint32 pktid;
4221 int i;
4222 uint8 sync;
4223
3c2a0909
S
4224 while (1) {
4225 if (dhd_is_device_removed(dhd))
4226 break;
4227
4228 if (dhd->hang_was_sent)
4229 break;
4230
4231 pkt_cnt = 0;
4232 pktqhead = pkt_newidx = NULL;
4233 pkt_cnt_newidx = 0;
4234
4235 DHD_SPIN_LOCK(prot->rx_lock, flags);
4236
4237 /* Get the address of the next message to be read from ring */
4238 msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
4239 if (msg_addr == NULL) {
4240 DHD_SPIN_UNLOCK(prot->rx_lock, flags);
4241 break;
4242 }
4243
4244 while (msg_len > 0) {
4245 msg = (host_rxbuf_cmpl_t *)msg_addr;
4246
4247 /* Wait until DMA completes, then fetch msg_type */
4248 sync = prot->d2h_sync_cb(dhd, ring, &msg->cmn_hdr, item_len);
4249 /*
4250 * Update the curr_rd to the current index in the ring, from where
4251 * the work item is fetched. This way if the fetched work item
4252 * fails in LIVELOCK, we can print the exact read index in the ring
4253 * that shows up the corrupted work item.
4254 */
4255 if ((ring->curr_rd + 1) >= ring->max_items) {
4256 ring->curr_rd = 0;
4257 } else {
4258 ring->curr_rd += 1;
4259 }
4260
4261 if (!sync) {
4262 msg_len -= item_len;
4263 msg_addr += item_len;
4264 continue;
4265 }
4266
4267 pktid = ltoh32(msg->cmn_hdr.request_id);
4268
4269#ifdef DHD_PKTID_AUDIT_RING
4270 DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_rx_map, pktid,
4271 DHD_DUPLICATE_FREE, msg, D2HRING_RXCMPLT_ITEMSIZE);
4272#endif /* DHD_PKTID_AUDIT_RING */
4273
4274 pkt = DHD_PKTID_TO_NATIVE(dhd, prot->pktid_rx_map, pktid, pa,
4275 len, dmah, secdma, PKTTYPE_DATA_RX);
4276 if (!pkt) {
4277 msg_len -= item_len;
4278 msg_addr += item_len;
4279 continue;
4280 }
4281
4282 if (SECURE_DMA_ENAB(dhd->osh))
4283 SECURE_DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0,
4284 dmah, secdma, 0);
4285 else
4286 DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah);
4287
4c205efb
DW
4288#ifdef DMAMAP_STATS
4289 dhd->dma_stats.rxdata--;
4290 dhd->dma_stats.rxdata_sz -= len;
4291#endif /* DMAMAP_STATS */
3c2a0909
S
4292 DHD_INFO(("id 0x%04x, offset %d, len %d, idx %d, phase 0x%02x, "
4293 "pktdata %p, metalen %d\n",
4294 ltoh32(msg->cmn_hdr.request_id),
4295 ltoh16(msg->data_offset),
4296 ltoh16(msg->data_len), msg->cmn_hdr.if_id,
4297 msg->cmn_hdr.flags, PKTDATA(dhd->osh, pkt),
4298 ltoh16(msg->metadata_len)));
4299
4300 pkt_cnt++;
4301 msg_len -= item_len;
4302 msg_addr += item_len;
4303
4304#if DHD_DBG_SHOW_METADATA
4305 if (prot->metadata_dbg && prot->rx_metadata_offset &&
4306 msg->metadata_len) {
4307 uchar *ptr;
4308 ptr = PKTDATA(dhd->osh, pkt) - (prot->rx_metadata_offset);
4309 /* header followed by data */
4310 bcm_print_bytes("rxmetadata", ptr, msg->metadata_len);
4311 dhd_prot_print_metadata(dhd, ptr, msg->metadata_len);
4312 }
4313#endif /* DHD_DBG_SHOW_METADATA */
4314
4315 /* data_offset from buf start */
4316 if (ltoh16(msg->data_offset)) {
4317 /* data offset given from dongle after split rx */
4318 PKTPULL(dhd->osh, pkt, ltoh16(msg->data_offset));
4319 }
4320 else if (prot->rx_dataoffset) {
4321 /* DMA RX offset updated through shared area */
4322 PKTPULL(dhd->osh, pkt, prot->rx_dataoffset);
4323 }
4324 /* Actual length of the packet */
4325 PKTSETLEN(dhd->osh, pkt, ltoh16(msg->data_len));
4326#if defined(WL_MONITOR)
4327 if (dhd_monitor_enabled(dhd, ifidx) &&
4328 (msg->flags & BCMPCIE_PKT_FLAGS_FRAME_802_11)) {
4329 dhd_rx_mon_pkt(dhd, msg, pkt, ifidx);
4330 continue;
4331 }
4332#endif
4333
4334 if (!pktqhead) {
4335 pktqhead = prevpkt = pkt;
4336 ifidx = msg->cmn_hdr.if_id;
4337 } else {
4338 if (ifidx != msg->cmn_hdr.if_id) {
4339 pkt_newidx = pkt;
4340 if_newidx = msg->cmn_hdr.if_id;
4341 pkt_cnt--;
4342 pkt_cnt_newidx = 1;
4343 break;
4344 } else {
4345 PKTSETNEXT(dhd->osh, prevpkt, pkt);
4346 prevpkt = pkt;
4347 }
4348 }
4349
4350#ifdef DHD_TIMESYNC
4351 if (dhd->prot->rx_ts_log_enabled) {
4352 ts_timestamp_t *ts = (ts_timestamp_t *)&msg->ts;
4353 dhd_timesync_log_rx_timestamp(dhd->ts, ifidx, ts->low, ts->high);
4354 }
4355#endif /* DHD_TIMESYNC */
4356 }
4357
4358 /* roll back read pointer for unprocessed message */
4359 if (msg_len > 0) {
4360 if (ring->rd < msg_len / item_len)
4361 ring->rd = ring->max_items - msg_len / item_len;
4362 else
4363 ring->rd -= msg_len / item_len;
4364 }
4365
4366 /* Update read pointer */
4367 dhd_prot_upd_read_idx(dhd, ring);
4368
4369 DHD_SPIN_UNLOCK(prot->rx_lock, flags);
4370
4371 pkt = pktqhead;
4372 for (i = 0; pkt && i < pkt_cnt; i++, pkt = nextpkt) {
4373 nextpkt = PKTNEXT(dhd->osh, pkt);
4374 PKTSETNEXT(dhd->osh, pkt, NULL);
4375#ifdef DHD_LB_RXP
4376 dhd_lb_rx_pkt_enqueue(dhd, pkt, ifidx);
4377#elif defined(DHD_RX_CHAINING)
4378 dhd_rxchain_frame(dhd, pkt, ifidx);
3c2a0909
S
4379#else
4380 dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1);
3c2a0909
S
4381#endif /* DHD_LB_RXP */
4382 }
4383
4384 if (pkt_newidx) {
4385#ifdef DHD_LB_RXP
4386 dhd_lb_rx_pkt_enqueue(dhd, pkt_newidx, if_newidx);
4387#elif defined(DHD_RX_CHAINING)
4388 dhd_rxchain_frame(dhd, pkt_newidx, if_newidx);
3c2a0909
S
4389#else
4390 dhd_bus_rx_frame(dhd->bus, pkt_newidx, if_newidx, 1);
3c2a0909
S
4391#endif /* DHD_LB_RXP */
4392 }
4393
4394 pkt_cnt += pkt_cnt_newidx;
4395
4396 /* Post another set of rxbufs to the device */
4397 dhd_prot_return_rxbuf(dhd, 0, pkt_cnt);
4398
4399 /* After batch processing, check RX bound */
4400 n += pkt_cnt;
4401 if (n >= bound) {
4402 more = TRUE;
4403 break;
4404 }
4405 }
4406
4407 /* Call lb_dispatch only if packets are queued */
4408 if (n) {
4409 DHD_LB_DISPATCH_RX_COMPL(dhd);
4410 DHD_LB_DISPATCH_RX_PROCESS(dhd);
4411 }
4412
4413 return more;
4414}
4415
4416/**
4417 * Hands transmit packets (with a caller provided flow_id) over to dongle territory (the flow ring)
4418 */
4419void
4420dhd_prot_update_txflowring(dhd_pub_t *dhd, uint16 flowid, void *msgring)
4421{
4422 msgbuf_ring_t *ring = (msgbuf_ring_t *)msgring;
4423
4424 if (ring == NULL) {
4425 DHD_ERROR(("%s: NULL txflowring. exiting...\n", __FUNCTION__));
4426 return;
4427 }
4428 /* Update read pointer */
4429 if (dhd->dma_d2h_ring_upd_support) {
4430 ring->rd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
4431 }
4432
4433 DHD_TRACE(("ringid %d flowid %d write %d read %d \n\n",
4434 ring->idx, flowid, ring->wr, ring->rd));
4435
4436 /* Need more logic here, but for now use it directly */
4437 dhd_bus_schedule_queue(dhd->bus, flowid, TRUE); /* from queue to flowring */
4438}
4439
4440/** called when DHD needs to check for 'transmit complete' messages from the dongle */
4441bool BCMFASTPATH
4442dhd_prot_process_msgbuf_txcpl(dhd_pub_t *dhd, uint bound)
4443{
4444 bool more = TRUE;
4445 uint n = 0;
4446 msgbuf_ring_t *ring = &dhd->prot->d2hring_tx_cpln;
4447
4448 /* Process all the messages - DTOH direction */
4449 while (!dhd_is_device_removed(dhd)) {
4450 uint8 *msg_addr;
4451 uint32 msg_len;
4452
4453 if (dhd->hang_was_sent) {
4454 more = FALSE;
4455 break;
4456 }
4457
4458 /* Get the address of the next message to be read from ring */
4459 msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
4460 if (msg_addr == NULL) {
4461 more = FALSE;
4462 break;
4463 }
4464
4465 /* Prefetch data to populate the cache */
4466 OSL_PREFETCH(msg_addr);
4467
4468 if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) {
4469 DHD_ERROR(("%s: process %s msg addr %p len %d\n",
4470 __FUNCTION__, ring->name, msg_addr, msg_len));
4471 }
4472
4473 /* Write to dngl rd ptr */
4474 dhd_prot_upd_read_idx(dhd, ring);
4475
4476 /* After batch processing, check bound */
4477 n += msg_len / ring->item_len;
4478 if (n >= bound) {
4479 break;
4480 }
4481 }
4482
4483 DHD_LB_DISPATCH_TX_COMPL(dhd);
4484
4485 return more;
4486}
4487
4488int BCMFASTPATH
4489dhd_prot_process_trapbuf(dhd_pub_t *dhd)
4490{
4491 uint32 data;
4492 dhd_dma_buf_t *trap_addr = &dhd->prot->fw_trap_buf;
4493
4494 /* Interrupts can come in before this struct
4495 * has been initialized.
4496 */
4497 if (trap_addr->va == NULL) {
4498 DHD_ERROR(("%s: trap_addr->va is NULL\n", __FUNCTION__));
4499 return 0;
4500 }
4501
4502 OSL_CACHE_INV((void *)trap_addr->va, sizeof(uint32));
4503 data = *(uint32 *)(trap_addr->va);
4504
4505 if (data & D2H_DEV_FWHALT) {
4506 DHD_ERROR(("Firmware trapped and trap_data is 0x%04x\n", data));
4507 if (data & D2H_DEV_EXT_TRAP_DATA)
4508 {
4509 if (dhd->extended_trap_data) {
4510 OSL_CACHE_INV((void *)trap_addr->va,
4511 BCMPCIE_EXT_TRAP_DATA_MAXLEN);
4512 memcpy(dhd->extended_trap_data, (uint32 *)trap_addr->va,
4513 BCMPCIE_EXT_TRAP_DATA_MAXLEN);
4514 }
4515 DHD_ERROR(("Extended trap data available\n"));
4516 }
4517 return data;
4518 }
4519 return 0;
4520}
4521
4522/** called when DHD needs to check for 'ioctl complete' messages from the dongle */
4523int BCMFASTPATH
4524dhd_prot_process_ctrlbuf(dhd_pub_t *dhd)
4525{
4526 dhd_prot_t *prot = dhd->prot;
4527 msgbuf_ring_t *ring = &prot->d2hring_ctrl_cpln;
4528
4529 /* Process all the messages - DTOH direction */
4530 while (!dhd_is_device_removed(dhd)) {
4531 uint8 *msg_addr;
4532 uint32 msg_len;
4533
4534 if (dhd->hang_was_sent) {
4535 break;
4536 }
4537
4538 /* Get the address of the next message to be read from ring */
4539 msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len);
4540 if (msg_addr == NULL) {
4541 break;
4542 }
4543
4544 /* Prefetch data to populate the cache */
4545 OSL_PREFETCH(msg_addr);
4546 if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) {
4547 DHD_ERROR(("%s: process %s msg addr %p len %d\n",
4548 __FUNCTION__, ring->name, msg_addr, msg_len));
4549 }
4550
4551 /* Write to dngl rd ptr */
4552 dhd_prot_upd_read_idx(dhd, ring);
4553 }
4554
4555 return 0;
4556}
4557
4558/**
4559 * Consume messages out of the D2H ring. Ensure that the message's DMA to host
4560 * memory has completed, before invoking the message handler via a table lookup
4561 * of the cmn_msg_hdr::msg_type.
4562 */
4563static int BCMFASTPATH
4564dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8 *buf, uint32 len)
4565{
4566 uint32 buf_len = len;
4567 uint16 item_len;
4568 uint8 msg_type;
4569 cmn_msg_hdr_t *msg = NULL;
4570 int ret = BCME_OK;
4571
4572 ASSERT(ring);
4573 item_len = ring->item_len;
4574 if (item_len == 0) {
4575 DHD_ERROR(("%s: ringidx %d, item_len %d buf_len %d \n",
4576 __FUNCTION__, ring->idx, item_len, buf_len));
4577 return BCME_ERROR;
4578 }
4579
4580 while (buf_len > 0) {
4581 if (dhd->hang_was_sent) {
4582 ret = BCME_ERROR;
4583 goto done;
4584 }
4585
4586 msg = (cmn_msg_hdr_t *)buf;
4587
4588 /* Wait until DMA completes, then fetch msg_type */
4589 msg_type = dhd->prot->d2h_sync_cb(dhd, ring, msg, item_len);
4590
4591 /*
4592 * Update the curr_rd to the current index in the ring, from where
4593 * the work item is fetched. This way if the fetched work item
4594 * fails in LIVELOCK, we can print the exact read index in the ring
4595 * that shows up the corrupted work item.
4596 */
4597 if ((ring->curr_rd + 1) >= ring->max_items) {
4598 ring->curr_rd = 0;
4599 } else {
4600 ring->curr_rd += 1;
4601 }
4602
4603 /* Prefetch data to populate the cache */
4604 OSL_PREFETCH(buf + item_len);
4605
4606 DHD_INFO(("msg_type %d item_len %d buf_len %d\n",
4607 msg_type, item_len, buf_len));
4608
4609 if (msg_type == MSG_TYPE_LOOPBACK) {
4610 bcm_print_bytes("LPBK RESP: ", (uint8 *)msg, item_len);
4611 DHD_ERROR((" MSG_TYPE_LOOPBACK, len %d\n", item_len));
4612 }
4613
4614 ASSERT(msg_type < DHD_PROT_FUNCS);
4615 if (msg_type >= DHD_PROT_FUNCS) {
4616 DHD_ERROR(("%s: msg_type %d, item_len %d buf_len %d\n",
4617 __FUNCTION__, msg_type, item_len, buf_len));
4618 ret = BCME_ERROR;
4619 goto done;
4620 }
4621
4622 if (table_lookup[msg_type]) {
4623 table_lookup[msg_type](dhd, buf);
4624 }
4625
4626 if (buf_len < item_len) {
4627 ret = BCME_ERROR;
4628 goto done;
4629 }
4630 buf_len = buf_len - item_len;
4631 buf = buf + item_len;
4632 }
4633
4634done:
4635
4636#ifdef DHD_RX_CHAINING
4637 dhd_rxchain_commit(dhd);
4638#endif
4639
4640 return ret;
4641} /* dhd_prot_process_msgtype */
4642
4643static void
4644dhd_prot_noop(dhd_pub_t *dhd, void *msg)
4645{
4646 return;
4647}
4648
4649/** called on MSG_TYPE_RING_STATUS message received from dongle */
4650static void
4651dhd_prot_ringstatus_process(dhd_pub_t *dhd, void *msg)
4652{
4653 pcie_ring_status_t *ring_status = (pcie_ring_status_t *) msg;
4654 uint32 request_id = ltoh32(ring_status->cmn_hdr.request_id);
4655 uint16 status = ltoh16(ring_status->compl_hdr.status);
4656 uint16 ring_id = ltoh16(ring_status->compl_hdr.flow_ring_id);
4657
4658 DHD_ERROR(("ring status: request_id %d, status 0x%04x, flow ring %d, write_idx %d \n",
4659 request_id, status, ring_id, ltoh16(ring_status->write_idx)));
4660
4661 if (ltoh16(ring_status->compl_hdr.ring_id) != BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT)
4662 return;
4663 if (status == BCMPCIE_BAD_PHASE) {
4664 /* bad phase report from */
4665 DHD_ERROR(("Bad phase\n"));
4666 }
4667 if (status != BCMPCIE_BADOPTION)
4668 return;
4669
4670 if (request_id == DHD_H2D_DBGRING_REQ_PKTID) {
4671 if (dhd->prot->h2dring_info_subn != NULL) {
4672 if (dhd->prot->h2dring_info_subn->create_pending == TRUE) {
4673 DHD_ERROR(("H2D ring create failed for info ring\n"));
4674 dhd->prot->h2dring_info_subn->create_pending = FALSE;
4675 }
4676 else
4677 DHD_ERROR(("ring create ID for a ring, create not pending\n"));
4678 } else {
4679 DHD_ERROR(("%s info submit ring doesn't exist\n", __FUNCTION__));
4680 }
4681 }
4682 else if (request_id == DHD_D2H_DBGRING_REQ_PKTID) {
4683 if (dhd->prot->d2hring_info_cpln != NULL) {
4684 if (dhd->prot->d2hring_info_cpln->create_pending == TRUE) {
4685 DHD_ERROR(("D2H ring create failed for info ring\n"));
4686 dhd->prot->d2hring_info_cpln->create_pending = FALSE;
4687 }
4688 else
4689 DHD_ERROR(("ring create ID for info ring, create not pending\n"));
4690 } else {
4691 DHD_ERROR(("%s info cpl ring doesn't exist\n", __FUNCTION__));
4692 }
4693 }
4694 else {
4695 DHD_ERROR(("don;t know how to pair with original request\n"));
4696 }
4697 /* How do we track this to pair it with ??? */
4698 return;
4699}
4700
4701/** called on MSG_TYPE_GEN_STATUS ('general status') message received from dongle */
4702static void
4703dhd_prot_genstatus_process(dhd_pub_t *dhd, void *msg)
4704{
4705 pcie_gen_status_t *gen_status = (pcie_gen_status_t *)msg;
4706 DHD_ERROR(("ERROR: gen status: request_id %d, STATUS 0x%04x, flow ring %d \n",
4707 gen_status->cmn_hdr.request_id, gen_status->compl_hdr.status,
4708 gen_status->compl_hdr.flow_ring_id));
4709
4710 /* How do we track this to pair it with ??? */
4711 return;
4712}
4713
4714/**
4715 * Called on MSG_TYPE_IOCTLPTR_REQ_ACK ('ioctl ack') message received from dongle, meaning that the
4716 * dongle received the ioctl message in dongle memory.
4717 */
4718static void
4719dhd_prot_ioctack_process(dhd_pub_t *dhd, void *msg)
4720{
4721 ioctl_req_ack_msg_t *ioct_ack = (ioctl_req_ack_msg_t *)msg;
4722 unsigned long flags;
4723#ifdef DHD_PKTID_AUDIT_RING
4724 uint32 pktid = ltoh32(ioct_ack->cmn_hdr.request_id);
4725
4726 /* Skip audit for ADHD_IOCTL_REQ_PKTID = 0xFFFE */
4727 if (pktid != DHD_IOCTL_REQ_PKTID) {
4728#ifndef IOCTLRESP_USE_CONSTMEM
4729 DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, pktid,
4730 DHD_TEST_IS_ALLOC, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
4731#else
4732 DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_map_handle_ioctl, pktid,
4733 DHD_TEST_IS_ALLOC, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
4734#endif /* !IOCTLRESP_USE_CONSTMEM */
4735 }
4736#endif /* DHD_PKTID_AUDIT_RING */
4737
4738 DHD_GENERAL_LOCK(dhd, flags);
4739 if ((dhd->prot->ioctl_state & MSGBUF_IOCTL_ACK_PENDING) &&
4740 (dhd->prot->ioctl_state & MSGBUF_IOCTL_RESP_PENDING)) {
4741 dhd->prot->ioctl_state &= ~MSGBUF_IOCTL_ACK_PENDING;
4742 } else {
4743 DHD_ERROR(("%s: received ioctl ACK with state %02x trans_id = %d\n",
4744 __FUNCTION__, dhd->prot->ioctl_state, dhd->prot->ioctl_trans_id));
4745 prhex("dhd_prot_ioctack_process:",
4746 (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
4747 }
4748 DHD_GENERAL_UNLOCK(dhd, flags);
4749
4750 DHD_CTL(("ioctl req ack: request_id %d, status 0x%04x, flow ring %d \n",
4751 ioct_ack->cmn_hdr.request_id, ioct_ack->compl_hdr.status,
4752 ioct_ack->compl_hdr.flow_ring_id));
4753 if (ioct_ack->compl_hdr.status != 0) {
4754 DHD_ERROR(("got an error status for the ioctl request...need to handle that\n"));
4755 }
4756#ifdef REPORT_FATAL_TIMEOUTS
4757 else {
4758 dhd_stop_bus_timer(dhd);
4759 }
4760#endif /* REPORT_FATAL_TIMEOUTS */
4761}
4762
4763/** called on MSG_TYPE_IOCTL_CMPLT message received from dongle */
4764static void
4765dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void *msg)
4766{
4767 dhd_prot_t *prot = dhd->prot;
4768 uint32 pkt_id, xt_id;
4769 ioctl_comp_resp_msg_t *ioct_resp = (ioctl_comp_resp_msg_t *)msg;
4770 void *pkt;
4771 unsigned long flags;
4772 dhd_dma_buf_t retbuf;
4773#ifdef REPORT_FATAL_TIMEOUTS
4774 uint16 dhd_xt_id;
4775#endif
4776
4777 memset(&retbuf, 0, sizeof(dhd_dma_buf_t));
4778
4779 pkt_id = ltoh32(ioct_resp->cmn_hdr.request_id);
4780
4781#ifdef DHD_PKTID_AUDIT_RING
4782#ifndef IOCTLRESP_USE_CONSTMEM
4783 DHD_PKTID_AUDIT_RING_DEBUG(dhd, prot->pktid_ctrl_map, pkt_id,
4784 DHD_DUPLICATE_FREE, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
4785#else
4786 DHD_PKTID_AUDIT_RING_DEBUG(dhd, prot->pktid_map_handle_ioctl, pkt_id,
4787 DHD_DUPLICATE_FREE, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
4788#endif /* !IOCTLRESP_USE_CONSTMEM */
4789#endif /* DHD_PKTID_AUDIT_RING */
4790
4791 DHD_GENERAL_LOCK(dhd, flags);
4792 if ((prot->ioctl_state & MSGBUF_IOCTL_ACK_PENDING) ||
4793 !(prot->ioctl_state & MSGBUF_IOCTL_RESP_PENDING)) {
4794 DHD_ERROR(("%s: received ioctl response with state %02x trans_id = %d\n",
4795 __FUNCTION__, dhd->prot->ioctl_state, dhd->prot->ioctl_trans_id));
4796 prhex("dhd_prot_ioctcmplt_process:",
4797 (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
4798 DHD_GENERAL_UNLOCK(dhd, flags);
4799 return;
4800 }
4801
4802 /* Clear Response pending bit */
4803 prot->ioctl_state &= ~MSGBUF_IOCTL_RESP_PENDING;
4804
4805#ifndef IOCTLRESP_USE_CONSTMEM
4806 pkt = dhd_prot_packet_get(dhd, pkt_id, PKTTYPE_IOCTL_RX, TRUE);
4807#else
4808 dhd_prot_ioctl_ret_buffer_get(dhd, pkt_id, &retbuf);
4809 pkt = retbuf.va;
4810#endif /* !IOCTLRESP_USE_CONSTMEM */
4811 if (!pkt) {
4812 DHD_GENERAL_UNLOCK(dhd, flags);
4813 DHD_ERROR(("%s: received ioctl response with NULL pkt\n", __FUNCTION__));
4814 prhex("dhd_prot_ioctcmplt_process:",
4815 (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
4816 return;
4817 }
4818 DHD_GENERAL_UNLOCK(dhd, flags);
4819
4820 prot->ioctl_resplen = ltoh16(ioct_resp->resp_len);
4821 prot->ioctl_status = ltoh16(ioct_resp->compl_hdr.status);
4822 xt_id = ltoh16(ioct_resp->trans_id);
4823
4824 if (xt_id != prot->ioctl_trans_id || prot->curr_ioctl_cmd != ioct_resp->cmd) {
4825 DHD_ERROR(("%s: transaction id(%d %d) or cmd(%d %d) mismatch\n",
4826 __FUNCTION__, xt_id, prot->ioctl_trans_id,
4827 prot->curr_ioctl_cmd, ioct_resp->cmd));
4828#ifdef REPORT_FATAL_TIMEOUTS
4829 dhd_stop_cmd_timer(dhd);
4830#endif /* REPORT_FATAL_TIMEOUTS */
4831 dhd_wakeup_ioctl_event(dhd, IOCTL_RETURN_ON_ERROR);
4832 dhd_prot_debug_info_print(dhd);
4833#ifdef DHD_FW_COREDUMP
4834 if (dhd->memdump_enabled) {
4835 /* collect core dump */
4836 dhd->memdump_type = DUMP_TYPE_TRANS_ID_MISMATCH;
4837 dhd_bus_mem_dump(dhd);
4838 }
4839#else
4840 ASSERT(0);
4841#endif /* DHD_FW_COREDUMP */
4842 dhd_schedule_reset(dhd);
4843 goto exit;
4844 }
4845#ifdef REPORT_FATAL_TIMEOUTS
4846 dhd_xt_id = dhd_get_request_id(dhd);
4847 if (xt_id == dhd_xt_id) {
4848 dhd_stop_cmd_timer(dhd);
4849 } else {
4850 DHD_ERROR(("%s: Cmd timer not stopped received xt_id %d stored xt_id %d",
4851 __FUNCTION__, xt_id, dhd_xt_id));
4852 }
4853#endif /* REPORT_FATAL_TIMEOUTS */
4854 DHD_CTL(("IOCTL_COMPLETE: req_id %x transid %d status %x resplen %d\n",
4855 pkt_id, xt_id, prot->ioctl_status, prot->ioctl_resplen));
4856
4857 if (prot->ioctl_resplen > 0) {
4858#ifndef IOCTLRESP_USE_CONSTMEM
4859 bcopy(PKTDATA(dhd->osh, pkt), prot->retbuf.va, prot->ioctl_resplen);
4860#else
4861 bcopy(pkt, prot->retbuf.va, prot->ioctl_resplen);
4862#endif /* !IOCTLRESP_USE_CONSTMEM */
4863 }
4864
4865 /* wake up any dhd_os_ioctl_resp_wait() */
4866 dhd_wakeup_ioctl_event(dhd, IOCTL_RETURN_ON_SUCCESS);
4867
4868exit:
4869#ifndef IOCTLRESP_USE_CONSTMEM
4870 dhd_prot_packet_free(dhd, pkt,
4871 PKTTYPE_IOCTL_RX, FALSE);
4872#else
4873 free_ioctl_return_buffer(dhd, &retbuf);
4874#endif /* !IOCTLRESP_USE_CONSTMEM */
4875
4876 /* Post another ioctl buf to the device */
4877 if (prot->cur_ioctlresp_bufs_posted > 0) {
4878 prot->cur_ioctlresp_bufs_posted--;
4879 }
4880
4881 dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd);
4882}
4883
4884/** called on MSG_TYPE_TX_STATUS message received from dongle */
4885static void BCMFASTPATH
4886dhd_prot_txstatus_process(dhd_pub_t *dhd, void *msg)
4887{
4888 dhd_prot_t *prot = dhd->prot;
4889 host_txbuf_cmpl_t * txstatus;
4890 unsigned long flags;
4891 uint32 pktid;
4892 void *pkt;
4893 dmaaddr_t pa;
4894 uint32 len;
4895 void *dmah;
4896 void *secdma;
4897 bool pkt_fate;
4898#ifdef DEVICE_TX_STUCK_DETECT
4899 flow_ring_node_t *flow_ring_node;
4900 uint16 flowid;
4901#endif /* DEVICE_TX_STUCK_DETECT */
4902
4903
4904 txstatus = (host_txbuf_cmpl_t *)msg;
4905#ifdef DEVICE_TX_STUCK_DETECT
4906 flowid = txstatus->compl_hdr.flow_ring_id;
4907 flow_ring_node = DHD_FLOW_RING(dhd, flowid);
4908 /**
4909 * Since we got a completion message on this flowid,
4910 * update tx_cmpl time stamp
4911 */
4912 flow_ring_node->tx_cmpl = OSL_SYSUPTIME();
4913#endif /* DEVICE_TX_STUCK_DETECT */
4914
4915 /* locks required to protect circular buffer accesses */
4916 DHD_GENERAL_LOCK(dhd, flags);
4917 pktid = ltoh32(txstatus->cmn_hdr.request_id);
4918 pkt_fate = TRUE;
4919
4920#ifdef DHD_PKTID_AUDIT_RING
4921 DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_tx_map, pktid,
4922 DHD_DUPLICATE_FREE, msg, D2HRING_TXCMPLT_ITEMSIZE);
4923#endif /* DHD_PKTID_AUDIT_RING */
4924
4925 DHD_INFO(("txstatus for pktid 0x%04x\n", pktid));
4926 if (prot->active_tx_count) {
4927 prot->active_tx_count--;
4928
4929 /* Release the Lock when no more tx packets are pending */
4930 if (prot->active_tx_count == 0)
4931 DHD_TXFL_WAKE_UNLOCK(dhd);
4932 } else {
4933 DHD_ERROR(("Extra packets are freed\n"));
4934 }
4935
4936 ASSERT(pktid != 0);
4937#if defined(DHD_LB_TXC) && !defined(BCM_SECURE_DMA)
4938 {
4939 int elem_ix;
4940 void **elem;
4941 bcm_workq_t *workq;
4942 dmaaddr_t pa;
4943 uint32 pa_len;
4944
4945 pkt = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_tx_map,
4946 pktid, pa, pa_len, dmah, secdma, PKTTYPE_DATA_TX);
4947
4948 workq = &prot->tx_compl_prod;
4949 /*
4950 * Produce the packet into the tx_compl workq for the tx compl tasklet
4951 * to consume.
4952 */
4953 OSL_PREFETCH(PKTTAG(pkt));
4954
4955 /* fetch next available slot in workq */
4956 elem_ix = bcm_ring_prod(WORKQ_RING(workq), DHD_LB_WORKQ_SZ);
4957
4958 DHD_PKTTAG_SET_PA((dhd_pkttag_fr_t *)PKTTAG(pkt), pa);
4959 DHD_PKTTAG_SET_PA_LEN((dhd_pkttag_fr_t *)PKTTAG(pkt), pa_len);
4960
4961 if (elem_ix == BCM_RING_FULL) {
4962 DHD_ERROR(("tx_compl_prod BCM_RING_FULL\n"));
4963 goto workq_ring_full;
4964 }
4965
4966 elem = WORKQ_ELEMENT(void *, &prot->tx_compl_prod, elem_ix);
4967 *elem = pkt;
4968
4969 smp_wmb();
4970
4971 /* Sync WR index to consumer if the SYNC threshold has been reached */
4972 if (++prot->tx_compl_prod_sync >= DHD_LB_WORKQ_SYNC) {
4973 bcm_workq_prod_sync(workq);
4974 prot->tx_compl_prod_sync = 0;
4975 }
4976
4977 DHD_INFO(("%s: tx_compl_prod pkt<%p> sync<%d>\n",
4978 __FUNCTION__, pkt, prot->tx_compl_prod_sync));
4979
4980 DHD_GENERAL_UNLOCK(dhd, flags);
4981
4982 return;
4983 }
4984
4985workq_ring_full:
4986
4987#endif /* !DHD_LB_TXC */
4988
4989 pkt = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_tx_map, pktid,
4990 pa, len, dmah, secdma, PKTTYPE_DATA_TX);
4991
4992 if (pkt) {
4993 if (SECURE_DMA_ENAB(dhd->osh)) {
4994 int offset = 0;
4995 BCM_REFERENCE(offset);
4996
4997 if (dhd->prot->tx_metadata_offset)
4998 offset = dhd->prot->tx_metadata_offset + ETHER_HDR_LEN;
4999 SECURE_DMA_UNMAP(dhd->osh, (uint) pa,
5000 (uint) dhd->prot->tx_metadata_offset, DMA_RX, 0, dmah,
5001 secdma, offset);
5002 } else
5003 DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah);
4c205efb
DW
5004#ifdef DMAMAP_STATS
5005 dhd->dma_stats.txdata--;
5006 dhd->dma_stats.txdata_sz -= len;
5007#endif /* DMAMAP_STATS */
5008#if defined(DBG_PKT_MON) || defined(DHD_PKT_LOGGING)
3c2a0909
S
5009 if (dhd->d11_tx_status) {
5010 uint16 tx_status;
5011
5012 tx_status = ltoh16(txstatus->compl_hdr.status) &
5013 WLFC_CTL_PKTFLAG_MASK;
5014 pkt_fate = (tx_status == WLFC_CTL_PKTFLAG_DISCARD) ? TRUE : FALSE;
5015
5016 DHD_DBG_PKT_MON_TX_STATUS(dhd, pkt, pktid, tx_status);
4c205efb
DW
5017#ifdef DHD_PKT_LOGGING
5018 DHD_PKTLOG_TXS(dhd, pkt, pktid, tx_status);
5019#endif /* DHD_PKT_LOGGING */
3c2a0909 5020 }
4c205efb 5021#endif /* DBG_PKT_MON || DHD_PKT_LOGGING */
3c2a0909
S
5022#if defined(BCMPCIE)
5023 dhd_txcomplete(dhd, pkt, pkt_fate);
5024#endif
5025
5026#if DHD_DBG_SHOW_METADATA
5027 if (dhd->prot->metadata_dbg &&
5028 dhd->prot->tx_metadata_offset && txstatus->metadata_len) {
5029 uchar *ptr;
5030 /* The Ethernet header of TX frame was copied and removed.
5031 * Here, move the data pointer forward by Ethernet header size.
5032 */
5033 PKTPULL(dhd->osh, pkt, ETHER_HDR_LEN);
5034 ptr = PKTDATA(dhd->osh, pkt) - (dhd->prot->tx_metadata_offset);
5035 bcm_print_bytes("txmetadata", ptr, txstatus->metadata_len);
5036 dhd_prot_print_metadata(dhd, ptr, txstatus->metadata_len);
5037 }
5038#endif /* DHD_DBG_SHOW_METADATA */
5039 DHD_GENERAL_UNLOCK(dhd, flags);
5040 PKTFREE(dhd->osh, pkt, TRUE);
5041 DHD_GENERAL_LOCK(dhd, flags);
5042 DHD_FLOWRING_TXSTATUS_CNT_UPDATE(dhd->bus, txstatus->compl_hdr.flow_ring_id,
5043 txstatus->tx_status);
5044
5045#ifdef DHD_TIMESYNC
5046 if (dhd->prot->tx_ts_log_enabled) {
5047 ts_timestamp_t *ts = (ts_timestamp_t *)&(txstatus->ts);
5048 dhd_timesync_log_tx_timestamp(dhd->ts,
5049 txstatus->compl_hdr.flow_ring_id,
5050 txstatus->cmn_hdr.if_id,
5051 ts->low, ts->high);
5052 }
5053#endif /* DHD_TIMESYNC */
5054 }
5055
5056 DHD_GENERAL_UNLOCK(dhd, flags);
5057
5058 return;
5059} /* dhd_prot_txstatus_process */
5060
5061/** called on MSG_TYPE_WL_EVENT message received from dongle */
5062static void
5063dhd_prot_event_process(dhd_pub_t *dhd, void *msg)
5064{
5065 wlevent_req_msg_t *evnt;
5066 uint32 bufid;
5067 uint16 buflen;
5068 int ifidx = 0;
5069 void* pkt;
5070 unsigned long flags;
5071 dhd_prot_t *prot = dhd->prot;
5072
3c2a0909
S
5073 /* Event complete header */
5074 evnt = (wlevent_req_msg_t *)msg;
5075 bufid = ltoh32(evnt->cmn_hdr.request_id);
5076
5077#ifdef DHD_PKTID_AUDIT_RING
5078 DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, bufid,
5079 DHD_DUPLICATE_FREE, msg, D2HRING_CTRL_CMPLT_ITEMSIZE);
5080#endif /* DHD_PKTID_AUDIT_RING */
5081
5082 buflen = ltoh16(evnt->event_data_len);
5083
5084 ifidx = BCMMSGBUF_API_IFIDX(&evnt->cmn_hdr);
5085
5086 /* Post another rxbuf to the device */
5087 if (prot->cur_event_bufs_posted)
5088 prot->cur_event_bufs_posted--;
5089 dhd_msgbuf_rxbuf_post_event_bufs(dhd);
5090
5091 /* locks required to protect pktid_map */
5092 DHD_GENERAL_LOCK(dhd, flags);
5093 pkt = dhd_prot_packet_get(dhd, bufid, PKTTYPE_EVENT_RX, TRUE);
5094 DHD_GENERAL_UNLOCK(dhd, flags);
5095
5096 if (!pkt) {
5097 DHD_ERROR(("%s: pkt is NULL for pktid %d\n", __FUNCTION__, bufid));
5098 return;
5099 }
5100
5101 /* DMA RX offset updated through shared area */
5102 if (dhd->prot->rx_dataoffset)
5103 PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset);
5104
5105 PKTSETLEN(dhd->osh, pkt, buflen);
5106
3c2a0909 5107 dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1);
3c2a0909
S
5108}
5109
5110/** called on MSG_TYPE_INFO_BUF_CMPLT message received from dongle */
5111static void BCMFASTPATH
5112dhd_prot_process_infobuf_complete(dhd_pub_t *dhd, void* buf)
5113{
5114 info_buf_resp_t *resp;
5115 uint32 pktid;
5116 uint16 buflen;
5117 void * pkt;
5118 unsigned long flags;
5119
3c2a0909
S
5120 resp = (info_buf_resp_t *)buf;
5121 pktid = ltoh32(resp->cmn_hdr.request_id);
5122 buflen = ltoh16(resp->info_data_len);
5123
5124#ifdef DHD_PKTID_AUDIT_RING
5125 DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, pktid,
5126 DHD_DUPLICATE_FREE, buf, D2HRING_INFO_BUFCMPLT_ITEMSIZE);
5127#endif /* DHD_PKTID_AUDIT_RING */
5128
5129 DHD_INFO(("id 0x%04x, len %d, phase 0x%02x, seqnum %d, rx_dataoffset %d\n",
5130 pktid, buflen, resp->cmn_hdr.flags, ltoh16(resp->seqnum),
5131 dhd->prot->rx_dataoffset));
5132
5133 if (!dhd->prot->infobufpost) {
5134 DHD_ERROR(("infobuf posted are zero, but there is a completion\n"));
5135 return;
5136 }
5137
5138 dhd->prot->infobufpost--;
5139 dhd_prot_infobufpost(dhd);
5140
5141 DHD_GENERAL_LOCK(dhd, flags);
5142 pkt = dhd_prot_packet_get(dhd, pktid, PKTTYPE_INFO_RX, TRUE);
5143 DHD_GENERAL_UNLOCK(dhd, flags);
5144
5145 if (!pkt)
5146 return;
5147
5148 /* DMA RX offset updated through shared area */
5149 if (dhd->prot->rx_dataoffset)
5150 PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset);
5151
5152 PKTSETLEN(dhd->osh, pkt, buflen);
5153
5154 /* info ring "debug" data, which is not a 802.3 frame, is sent/hacked with a
5155 * special ifidx of -1. This is just internal to dhd to get the data to
5156 * dhd_linux.c:dhd_rx_frame() from here (dhd_prot_infobuf_cmplt_process).
5157 */
3c2a0909 5158 dhd_bus_rx_frame(dhd->bus, pkt, DHD_EVENT_IF /* ifidx HACK */, 1);
3c2a0909
S
5159}
5160
5161/** Stop protocol: sync w/dongle state. */
5162void dhd_prot_stop(dhd_pub_t *dhd)
5163{
5164 ASSERT(dhd);
5165 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
5166
5167}
5168
5169/* Add any protocol-specific data header.
5170 * Caller must reserve prot_hdrlen prepend space.
5171 */
5172void BCMFASTPATH
5173dhd_prot_hdrpush(dhd_pub_t *dhd, int ifidx, void *PKTBUF)
5174{
5175 return;
5176}
5177
5178uint
5179dhd_prot_hdrlen(dhd_pub_t *dhd, void *PKTBUF)
5180{
5181 return 0;
5182}
5183
5184
5185#define PKTBUF pktbuf
5186
5187/**
5188 * Called when a tx ethernet packet has been dequeued from a flow queue, and has to be inserted in
5189 * the corresponding flow ring.
5190 */
5191int BCMFASTPATH
5192dhd_prot_txdata(dhd_pub_t *dhd, void *PKTBUF, uint8 ifidx)
5193{
5194 unsigned long flags;
5195 dhd_prot_t *prot = dhd->prot;
5196 host_txbuf_post_t *txdesc = NULL;
5197 dmaaddr_t pa, meta_pa;
5198 uint8 *pktdata;
5199 uint32 pktlen;
5200 uint32 pktid;
5201 uint8 prio;
5202 uint16 flowid = 0;
5203 uint16 alloced = 0;
5204 uint16 headroom;
5205 msgbuf_ring_t *ring;
5206 flow_ring_table_t *flow_ring_table;
5207 flow_ring_node_t *flow_ring_node;
5208
5209 if (dhd->flow_ring_table == NULL) {
5210 return BCME_NORESOURCE;
5211 }
5212
5213 flowid = DHD_PKT_GET_FLOWID(PKTBUF);
5214 flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
5215 flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid];
5216
5217 ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
5218
5219#ifdef PCIE_INB_DW
5220 if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
5221 return BCME_ERROR;
5222#endif /* PCIE_INB_DW */
5223
5224 DHD_GENERAL_LOCK(dhd, flags);
5225
5226 /* Create a unique 32-bit packet id */
5227 pktid = DHD_NATIVE_TO_PKTID_RSV(dhd, dhd->prot->pktid_tx_map,
5228 PKTBUF, PKTTYPE_DATA_TX);
5229#if defined(DHD_PCIE_PKTID)
5230 if (pktid == DHD_PKTID_INVALID) {
5231 DHD_ERROR(("%s: Pktid pool depleted.\n", __FUNCTION__));
5232 /*
5233 * If we return error here, the caller would queue the packet
5234 * again. So we'll just free the skb allocated in DMA Zone.
5235 * Since we have not freed the original SKB yet the caller would
5236 * requeue the same.
5237 */
5238 goto err_no_res_pktfree;
5239 }
5240#endif /* DHD_PCIE_PKTID */
5241
5242 /* Reserve space in the circular buffer */
5243 txdesc = (host_txbuf_post_t *)
5244 dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
5245 if (txdesc == NULL) {
3c2a0909
S
5246 DHD_INFO(("%s:%d: HTOD Msgbuf Not available TxCount = %d\n",
5247 __FUNCTION__, __LINE__, prot->active_tx_count));
4c205efb 5248 goto err_free_pktid;
3c2a0909
S
5249 }
5250
5251#ifdef DBG_PKT_MON
5252 DHD_DBG_PKT_MON_TX(dhd, PKTBUF, pktid);
5253#endif /* DBG_PKT_MON */
4c205efb
DW
5254#ifdef DHD_PKT_LOGGING
5255 DHD_PKTLOG_TX(dhd, PKTBUF, pktid);
5256#endif /* DHD_PKT_LOGGING */
3c2a0909
S
5257
5258
5259 /* Extract the data pointer and length information */
5260 pktdata = PKTDATA(dhd->osh, PKTBUF);
5261 pktlen = PKTLEN(dhd->osh, PKTBUF);
5262
5263 /* Ethernet header: Copy before we cache flush packet using DMA_MAP */
5264 bcopy(pktdata, txdesc->txhdr, ETHER_HDR_LEN);
5265
5266 /* Extract the ethernet header and adjust the data pointer and length */
5267 pktdata = PKTPULL(dhd->osh, PKTBUF, ETHER_HDR_LEN);
5268 pktlen -= ETHER_HDR_LEN;
5269
5270 /* Map the data pointer to a DMA-able address */
5271 if (SECURE_DMA_ENAB(dhd->osh)) {
5272 int offset = 0;
5273 BCM_REFERENCE(offset);
5274
5275 if (prot->tx_metadata_offset)
5276 offset = prot->tx_metadata_offset + ETHER_HDR_LEN;
5277
5278 pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), pktlen,
5279 DMA_TX, PKTBUF, 0, ring->dma_buf.secdma, offset);
5280 }
5281#ifndef BCM_SECURE_DMA
5282 else
5283 pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), pktlen, DMA_TX, PKTBUF, 0);
5284#endif /* #ifndef BCM_SECURE_DMA */
5285
5286 if (PHYSADDRISZERO(pa)) {
4c205efb
DW
5287 DHD_ERROR(("%s: Something really bad, unless 0 is "
5288 "a valid phyaddr for pa\n", __FUNCTION__));
3c2a0909 5289 ASSERT(0);
4c205efb 5290 goto err_rollback_idx;
3c2a0909
S
5291 }
5292
4c205efb
DW
5293#ifdef DMAMAP_STATS
5294 dhd->dma_stats.txdata++;
5295 dhd->dma_stats.txdata_sz += pktlen;
5296#endif /* DMAMAP_STATS */
3c2a0909
S
5297 /* No need to lock. Save the rest of the packet's metadata */
5298 DHD_NATIVE_TO_PKTID_SAVE(dhd, dhd->prot->pktid_tx_map, PKTBUF, pktid,
5299 pa, pktlen, DMA_TX, NULL, ring->dma_buf.secdma, PKTTYPE_DATA_TX);
5300
5301#ifdef TXP_FLUSH_NITEMS
5302 if (ring->pend_items_count == 0)
5303 ring->start_addr = (void *)txdesc;
5304 ring->pend_items_count++;
5305#endif
5306
5307 /* Form the Tx descriptor message buffer */
5308
5309 /* Common message hdr */
5310 txdesc->cmn_hdr.msg_type = MSG_TYPE_TX_POST;
5311 txdesc->cmn_hdr.if_id = ifidx;
5312 txdesc->cmn_hdr.flags = ring->current_phase;
5313
5314 txdesc->flags = BCMPCIE_PKT_FLAGS_FRAME_802_3;
5315 prio = (uint8)PKTPRIO(PKTBUF);
5316
5317
5318 txdesc->flags |= (prio & 0x7) << BCMPCIE_PKT_FLAGS_PRIO_SHIFT;
5319 txdesc->seg_cnt = 1;
5320
5321 txdesc->data_len = htol16((uint16) pktlen);
5322 txdesc->data_buf_addr.high_addr = htol32(PHYSADDRHI(pa));
5323 txdesc->data_buf_addr.low_addr = htol32(PHYSADDRLO(pa));
5324
5325 /* Move data pointer to keep ether header in local PKTBUF for later reference */
5326 PKTPUSH(dhd->osh, PKTBUF, ETHER_HDR_LEN);
5327
5328 /* Handle Tx metadata */
5329 headroom = (uint16)PKTHEADROOM(dhd->osh, PKTBUF);
5330 if (prot->tx_metadata_offset && (headroom < prot->tx_metadata_offset))
5331 DHD_ERROR(("No headroom for Metadata tx %d %d\n",
5332 prot->tx_metadata_offset, headroom));
5333
5334 if (prot->tx_metadata_offset && (headroom >= prot->tx_metadata_offset)) {
5335 DHD_TRACE(("Metadata in tx %d\n", prot->tx_metadata_offset));
5336
5337 /* Adjust the data pointer to account for meta data in DMA_MAP */
5338 PKTPUSH(dhd->osh, PKTBUF, prot->tx_metadata_offset);
5339
5340 if (SECURE_DMA_ENAB(dhd->osh)) {
5341 meta_pa = SECURE_DMA_MAP_TXMETA(dhd->osh, PKTDATA(dhd->osh, PKTBUF),
5342 prot->tx_metadata_offset + ETHER_HDR_LEN, DMA_RX, PKTBUF,
5343 0, ring->dma_buf.secdma);
5344 }
5345#ifndef BCM_SECURE_DMA
5346 else
5347 meta_pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF),
5348 prot->tx_metadata_offset, DMA_RX, PKTBUF, 0);
5349#endif /* #ifndef BCM_SECURE_DMA */
5350
5351 if (PHYSADDRISZERO(meta_pa)) {
4c205efb
DW
5352 /* Unmap the data pointer to a DMA-able address */
5353 if (SECURE_DMA_ENAB(dhd->osh)) {
5354
5355 int offset = 0;
5356 BCM_REFERENCE(offset);
5357
5358 if (prot->tx_metadata_offset) {
5359 offset = prot->tx_metadata_offset + ETHER_HDR_LEN;
5360 }
5361
5362 SECURE_DMA_UNMAP(dhd->osh, pa, pktlen,
5363 DMA_TX, 0, DHD_DMAH_NULL, ring->dma_buf.secdma, offset);
5364 }
5365#ifndef BCM_SECURE_DMA
5366 else {
5367 DMA_UNMAP(dhd->osh, pa, pktlen, DMA_TX, 0, DHD_DMAH_NULL);
5368 }
5369#endif /* #ifndef BCM_SECURE_DMA */
5370#ifdef TXP_FLUSH_NITEMS
5371 /* update pend_items_count */
5372 ring->pend_items_count--;
5373#endif /* TXP_FLUSH_NITEMS */
5374
5375 DHD_ERROR(("%s: Something really bad, unless 0 is "
5376 "a valid phyaddr for meta_pa\n", __FUNCTION__));
3c2a0909 5377 ASSERT(0);
4c205efb 5378 goto err_rollback_idx;
3c2a0909
S
5379 }
5380
5381 /* Adjust the data pointer back to original value */
5382 PKTPULL(dhd->osh, PKTBUF, prot->tx_metadata_offset);
5383
5384 txdesc->metadata_buf_len = prot->tx_metadata_offset;
5385 txdesc->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(meta_pa));
5386 txdesc->metadata_buf_addr.low_addr = htol32(PHYSADDRLO(meta_pa));
5387 } else {
5388 txdesc->metadata_buf_len = htol16(0);
5389 txdesc->metadata_buf_addr.high_addr = 0;
5390 txdesc->metadata_buf_addr.low_addr = 0;
5391 }
5392
5393#ifdef DHD_PKTID_AUDIT_RING
5394 DHD_PKTID_AUDIT(dhd, prot->pktid_tx_map, pktid, DHD_DUPLICATE_ALLOC);
5395#endif /* DHD_PKTID_AUDIT_RING */
5396
5397 txdesc->cmn_hdr.request_id = htol32(pktid);
5398
5399 DHD_TRACE(("txpost: data_len %d, pktid 0x%04x\n", txdesc->data_len,
5400 txdesc->cmn_hdr.request_id));
5401
5402 /* Update the write pointer in TCM & ring bell */
5403#ifdef TXP_FLUSH_NITEMS
5404 /* Flush if we have either hit the txp_threshold or if this msg is */
5405 /* occupying the last slot in the flow_ring - before wrap around. */
5406 if ((ring->pend_items_count == prot->txp_threshold) ||
5407 ((uint8 *) txdesc == (uint8 *) DHD_RING_END_VA(ring))) {
5408 dhd_prot_txdata_write_flush(dhd, flowid, TRUE);
5409 }
5410#else
5411 /* update ring's WR index and ring doorbell to dongle */
5412 dhd_prot_ring_write_complete(dhd, ring, txdesc, 1);
5413#endif
5414
5415 prot->active_tx_count++;
5416
5417 /*
5418 * Take a wake lock, do not sleep if we have atleast one packet
5419 * to finish.
5420 */
5421 if (prot->active_tx_count >= 1)
5422 DHD_TXFL_WAKE_LOCK_TIMEOUT(dhd, MAX_TX_TIMEOUT);
5423
5424 DHD_GENERAL_UNLOCK(dhd, flags);
5425
5426#ifdef PCIE_INB_DW
5427 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
5428#endif
5429
5430 return BCME_OK;
5431
4c205efb
DW
5432err_rollback_idx:
5433 /* roll back write pointer for unprocessed message */
5434 if (ring->wr == 0) {
5435 ring->wr = ring->max_items - 1;
5436 } else {
5437 ring->wr--;
5438 if (ring->wr == 0) {
5439 DHD_INFO(("%s: flipping the phase now\n", ring->name));
5440 ring->current_phase = ring->current_phase ?
5441 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
5442 }
5443 }
5444
5445err_free_pktid:
5446#if defined(DHD_PCIE_PKTID)
5447 {
5448 void *dmah;
5449 void *secdma;
5450 /* Free up the PKTID. physaddr and pktlen will be garbage. */
5451 DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_tx_map, pktid,
5452 pa, pktlen, dmah, secdma, PKTTYPE_NO_CHECK);
5453 }
5454
3c2a0909 5455err_no_res_pktfree:
4c205efb 5456#endif /* DHD_PCIE_PKTID */
3c2a0909
S
5457
5458
5459
5460 DHD_GENERAL_UNLOCK(dhd, flags);
5461#ifdef PCIE_INB_DW
5462 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
5463#endif
5464 return BCME_NORESOURCE;
5465} /* dhd_prot_txdata */
5466
5467/* called with a lock */
5468/** optimization to write "n" tx items at a time to ring */
5469void BCMFASTPATH
5470dhd_prot_txdata_write_flush(dhd_pub_t *dhd, uint16 flowid, bool in_lock)
5471{
5472#ifdef TXP_FLUSH_NITEMS
5473 unsigned long flags = 0;
5474 flow_ring_table_t *flow_ring_table;
5475 flow_ring_node_t *flow_ring_node;
5476 msgbuf_ring_t *ring;
5477
5478 if (dhd->flow_ring_table == NULL) {
5479 return;
5480 }
5481
5482 if (!in_lock) {
5483 DHD_GENERAL_LOCK(dhd, flags);
5484 }
5485
5486 flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
5487 flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid];
5488 ring = (msgbuf_ring_t *)flow_ring_node->prot_info;
5489
5490 if (ring->pend_items_count) {
5491 /* update ring's WR index and ring doorbell to dongle */
5492 dhd_prot_ring_write_complete(dhd, ring, ring->start_addr,
5493 ring->pend_items_count);
5494 ring->pend_items_count = 0;
5495 ring->start_addr = NULL;
5496 }
5497
5498 if (!in_lock) {
5499 DHD_GENERAL_UNLOCK(dhd, flags);
5500 }
5501#endif /* TXP_FLUSH_NITEMS */
5502}
5503
5504#undef PKTBUF /* Only defined in the above routine */
5505
5506int BCMFASTPATH
5507dhd_prot_hdrpull(dhd_pub_t *dhd, int *ifidx, void *pkt, uchar *buf, uint *len)
5508{
5509 return 0;
5510}
5511
5512/** post a set of receive buffers to the dongle */
5513static void BCMFASTPATH
5514dhd_prot_return_rxbuf(dhd_pub_t *dhd, uint32 pktid, uint32 rxcnt)
5515{
5516 dhd_prot_t *prot = dhd->prot;
5517#if defined(DHD_LB_RXC)
5518 int elem_ix;
5519 uint32 *elem;
5520 bcm_workq_t *workq;
5521
5522 workq = &prot->rx_compl_prod;
5523
5524 /* Produce the work item */
5525 elem_ix = bcm_ring_prod(WORKQ_RING(workq), DHD_LB_WORKQ_SZ);
5526 if (elem_ix == BCM_RING_FULL) {
5527 DHD_ERROR(("%s LB RxCompl workQ is full\n", __FUNCTION__));
5528 ASSERT(0);
5529 return;
5530 }
5531
5532 elem = WORKQ_ELEMENT(uint32, workq, elem_ix);
5533 *elem = pktid;
5534
5535 smp_wmb();
5536
5537 /* Sync WR index to consumer if the SYNC threshold has been reached */
5538 if (++prot->rx_compl_prod_sync >= DHD_LB_WORKQ_SYNC) {
5539 bcm_workq_prod_sync(workq);
5540 prot->rx_compl_prod_sync = 0;
5541 }
5542
5543 DHD_INFO(("%s: rx_compl_prod pktid<%u> sync<%d>\n",
5544 __FUNCTION__, pktid, prot->rx_compl_prod_sync));
5545
5546#endif /* DHD_LB_RXC */
5547
5548 if (prot->rxbufpost >= rxcnt) {
5549 prot->rxbufpost -= (uint16)rxcnt;
5550 } else {
5551 /* ASSERT(0); */
5552 prot->rxbufpost = 0;
5553 }
5554
5555#if !defined(DHD_LB_RXC)
5556 if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD))
5557 dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */
5558#endif /* !DHD_LB_RXC */
5559 return;
5560}
5561
5562/* called before an ioctl is sent to the dongle */
5563static void
5564dhd_prot_wlioctl_intercept(dhd_pub_t *dhd, wl_ioctl_t * ioc, void * buf)
5565{
5566 dhd_prot_t *prot = dhd->prot;
5567
5568 if (ioc->cmd == WLC_SET_VAR && buf != NULL && !strcmp(buf, "pcie_bus_tput")) {
5569 int slen = 0;
5570 pcie_bus_tput_params_t *tput_params;
5571
5572 slen = strlen("pcie_bus_tput") + 1;
5573 tput_params = (pcie_bus_tput_params_t*)((char *)buf + slen);
5574 bcopy(&prot->host_bus_throughput_buf.pa, &tput_params->host_buf_addr,
5575 sizeof(tput_params->host_buf_addr));
5576 tput_params->host_buf_len = DHD_BUS_TPUT_BUF_LEN;
5577 }
5578}
5579
5580#ifdef DHD_PM_CONTROL_FROM_FILE
5581extern bool g_pm_control;
5582#endif /* DHD_PM_CONTROL_FROM_FILE */
5583
5584/** Use protocol to issue ioctl to dongle. Only one ioctl may be in transit. */
5585int dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len)
5586{
5587 int ret = -1;
5588 uint8 action;
5589
5590 if ((dhd->busstate == DHD_BUS_DOWN) || dhd->hang_was_sent) {
5591 DHD_ERROR(("%s : bus is down. we have nothing to do - bs: %d, has: %d\n",
5592 __FUNCTION__, dhd->busstate, dhd->hang_was_sent));
5593 goto done;
5594 }
5595
5596 if (dhd->busstate == DHD_BUS_SUSPEND) {
5597 DHD_ERROR(("%s : bus is suspended\n", __FUNCTION__));
5598 goto done;
5599 }
5600
5601 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
5602
5603 if (ioc->cmd == WLC_SET_PM) {
5604#ifdef DHD_PM_CONTROL_FROM_FILE
5605 if (g_pm_control == TRUE) {
5606 DHD_ERROR(("%s: SET PM ignored!(Requested:%d)\n",
5607 __FUNCTION__, buf ? *(char *)buf : 0));
5608 goto done;
5609 }
5610#endif /* DHD_PM_CONTROL_FROM_FILE */
5611 DHD_TRACE_HW4(("%s: SET PM to %d\n", __FUNCTION__, buf ? *(char *)buf : 0));
5612 }
5613
5614 ASSERT(len <= WLC_IOCTL_MAXLEN);
5615
5616 if (len > WLC_IOCTL_MAXLEN)
5617 goto done;
5618
5619 action = ioc->set;
5620
5621 dhd_prot_wlioctl_intercept(dhd, ioc, buf);
5622
5623 if (action & WL_IOCTL_ACTION_SET) {
5624 ret = dhd_msgbuf_set_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
5625 } else {
5626 ret = dhd_msgbuf_query_ioctl(dhd, ifidx, ioc->cmd, buf, len, action);
5627 if (ret > 0)
5628 ioc->used = ret;
5629 }
5630
5631 /* Too many programs assume ioctl() returns 0 on success */
5632 if (ret >= 0) {
5633 ret = 0;
5634 } else {
5635 dhd->dongle_error = ret;
5636 }
5637
5638 if (!ret && ioc->cmd == WLC_SET_VAR && buf != NULL) {
5639 /* Intercept the wme_dp ioctl here */
5640 if (!strcmp(buf, "wme_dp")) {
5641 int slen, val = 0;
5642
5643 slen = strlen("wme_dp") + 1;
5644 if (len >= (int)(slen + sizeof(int)))
5645 bcopy(((char *)buf + slen), &val, sizeof(int));
5646 dhd->wme_dp = (uint8) ltoh32(val);
5647 }
5648
5649 }
5650
5651done:
5652 return ret;
5653
5654} /* dhd_prot_ioctl */
5655
5656/** test / loopback */
5657
5658int
5659dhdmsgbuf_lpbk_req(dhd_pub_t *dhd, uint len)
5660{
5661 unsigned long flags;
5662 dhd_prot_t *prot = dhd->prot;
5663 uint16 alloced = 0;
5664
5665 ioct_reqst_hdr_t *ioct_rqst;
5666
5667 uint16 hdrlen = sizeof(ioct_reqst_hdr_t);
5668 uint16 msglen = len + hdrlen;
5669 msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
5670
5671 msglen = ALIGN_SIZE(msglen, DMA_ALIGN_LEN);
5672 msglen = LIMIT_TO_MAX(msglen, MSGBUF_MAX_MSG_SIZE);
5673
5674#ifdef PCIE_INB_DW
5675 if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
5676 return BCME_ERROR;
5677#endif /* PCIE_INB_DW */
5678
5679 DHD_GENERAL_LOCK(dhd, flags);
5680
5681 ioct_rqst = (ioct_reqst_hdr_t *)
5682 dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
5683
5684 if (ioct_rqst == NULL) {
5685 DHD_GENERAL_UNLOCK(dhd, flags);
5686#ifdef PCIE_INB_DW
5687 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
5688#endif
5689 return 0;
5690 }
5691
5692 {
5693 uint8 *ptr;
5694 uint16 i;
5695
5696 ptr = (uint8 *)ioct_rqst;
5697 for (i = 0; i < msglen; i++) {
5698 ptr[i] = i % 256;
5699 }
5700 }
5701
5702 /* Common msg buf hdr */
5703 ioct_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
5704 ring->seqnum++;
5705
5706 ioct_rqst->msg.msg_type = MSG_TYPE_LOOPBACK;
5707 ioct_rqst->msg.if_id = 0;
5708 ioct_rqst->msg.flags = ring->current_phase;
5709
5710 bcm_print_bytes("LPBK REQ: ", (uint8 *)ioct_rqst, msglen);
5711
5712 /* update ring's WR index and ring doorbell to dongle */
5713 dhd_prot_ring_write_complete(dhd, ring, ioct_rqst, 1);
5714 DHD_GENERAL_UNLOCK(dhd, flags);
5715#ifdef PCIE_INB_DW
5716 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
5717#endif
5718
5719 return 0;
5720}
5721
5722/** test / loopback */
5723void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dmaxfer)
5724{
5725 if (dmaxfer == NULL)
5726 return;
5727
5728 dhd_dma_buf_free(dhd, &dmaxfer->srcmem);
5729 dhd_dma_buf_free(dhd, &dmaxfer->dstmem);
5730}
5731
5732/** test / loopback */
5733int
5734dhd_prepare_schedule_dmaxfer_free(dhd_pub_t *dhdp)
5735{
5736 dhd_prot_t *prot = dhdp->prot;
5737 dhd_dmaxfer_t *dmaxfer = &prot->dmaxfer;
5738 dmaxref_mem_map_t *dmap = NULL;
5739
5740 dmap = MALLOCZ(dhdp->osh, sizeof(dmaxref_mem_map_t));
5741 if (!dmap) {
5742 DHD_ERROR(("%s: dmap alloc failed\n", __FUNCTION__));
5743 goto mem_alloc_fail;
5744 }
5745 dmap->srcmem = &(dmaxfer->srcmem);
5746 dmap->dstmem = &(dmaxfer->dstmem);
5747
5748 DMAXFER_FREE(dhdp, dmap);
5749 return BCME_OK;
5750
5751mem_alloc_fail:
5752 if (dmap) {
5753 MFREE(dhdp->osh, dmap, sizeof(dmaxref_mem_map_t));
5754 dmap = NULL;
5755 }
5756 return BCME_NOMEM;
5757} /* dhd_prepare_schedule_dmaxfer_free */
5758
5759
5760/** test / loopback */
5761void
5762dmaxfer_free_prev_dmaaddr(dhd_pub_t *dhdp, dmaxref_mem_map_t *dmmap)
5763{
5764
5765 dhd_dma_buf_free(dhdp, dmmap->srcmem);
5766 dhd_dma_buf_free(dhdp, dmmap->dstmem);
5767
5768 MFREE(dhdp->osh, dmmap, sizeof(dmaxref_mem_map_t));
5769 dmmap = NULL;
5770
5771} /* dmaxfer_free_prev_dmaaddr */
5772
5773
5774/** test / loopback */
5775int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len,
5776 uint srcdelay, uint destdelay, dhd_dmaxfer_t *dmaxfer)
5777{
4c205efb 5778 uint i = 0, j = 0;
3c2a0909
S
5779 if (!dmaxfer)
5780 return BCME_ERROR;
5781
5782 /* First free up existing buffers */
5783 dmaxfer_free_dmaaddr(dhd, dmaxfer);
5784
5785 if (dhd_dma_buf_alloc(dhd, &dmaxfer->srcmem, len)) {
5786 return BCME_NOMEM;
5787 }
5788
5789 if (dhd_dma_buf_alloc(dhd, &dmaxfer->dstmem, len + 8)) {
5790 dhd_dma_buf_free(dhd, &dmaxfer->srcmem);
5791 return BCME_NOMEM;
5792 }
5793
5794 dmaxfer->len = len;
5795
4c205efb
DW
5796 /* Populate source with a pattern like below
5797 * 0x00000000
5798 * 0x01010101
5799 * 0x02020202
5800 * 0x03030303
5801 * 0x04040404
5802 * 0x05050505
5803 * ...
5804 * 0xFFFFFFFF
5805 */
5806 while (i < dmaxfer->len) {
5807 ((uint8*)dmaxfer->srcmem.va)[i] = j % 256;
5808 i++;
5809 if (i % 4 == 0) {
5810 j++;
5811 }
3c2a0909 5812 }
4c205efb 5813
3c2a0909
S
5814 OSL_CACHE_FLUSH(dmaxfer->srcmem.va, dmaxfer->len);
5815
5816 dmaxfer->srcdelay = srcdelay;
5817 dmaxfer->destdelay = destdelay;
5818
5819 return BCME_OK;
5820} /* dmaxfer_prepare_dmaaddr */
5821
5822static void
5823dhd_msgbuf_dmaxfer_process(dhd_pub_t *dhd, void *msg)
5824{
5825 dhd_prot_t *prot = dhd->prot;
5826 uint64 end_usec;
5827 pcie_dmaxfer_cmplt_t *cmplt = (pcie_dmaxfer_cmplt_t *)msg;
5828
5829 BCM_REFERENCE(cmplt);
4c205efb
DW
5830 end_usec = OSL_SYSUPTIME_US();
5831
5832 DHD_ERROR(("DMA loopback status: %d\n", cmplt->compl_hdr.status));
5833 prot->dmaxfer.status = cmplt->compl_hdr.status;
3c2a0909
S
5834 OSL_CACHE_INV(prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
5835 if (prot->dmaxfer.srcmem.va && prot->dmaxfer.dstmem.va) {
5836 if (memcmp(prot->dmaxfer.srcmem.va,
4c205efb
DW
5837 prot->dmaxfer.dstmem.va, prot->dmaxfer.len) ||
5838 cmplt->compl_hdr.status != BCME_OK) {
5839 DHD_ERROR(("DMA loopback failed\n"));
3c2a0909 5840 prhex("XFER SRC: ",
4c205efb 5841 prot->dmaxfer.srcmem.va, prot->dmaxfer.len);
3c2a0909 5842 prhex("XFER DST: ",
4c205efb
DW
5843 prot->dmaxfer.dstmem.va, prot->dmaxfer.len);
5844 prot->dmaxfer.status = BCME_ERROR;
3c2a0909
S
5845 }
5846 else {
4c205efb
DW
5847 switch (prot->dmaxfer.d11_lpbk) {
5848 case M2M_DMA_LPBK: {
5849 DHD_ERROR(("DMA successful pcie m2m DMA loopback\n"));
5850 } break;
5851 case D11_LPBK: {
3c2a0909 5852 DHD_ERROR(("DMA successful with d11 loopback\n"));
4c205efb
DW
5853 } break;
5854 case BMC_LPBK: {
5855 DHD_ERROR(("DMA successful with bmc loopback\n"));
5856 } break;
5857 case M2M_NON_DMA_LPBK: {
5858 DHD_ERROR(("DMA successful pcie m2m NON DMA loopback\n"));
5859 } break;
5860 case D11_HOST_MEM_LPBK: {
5861 DHD_ERROR(("DMA successful d11 host mem loopback\n"));
5862 } break;
5863 case BMC_HOST_MEM_LPBK: {
5864 DHD_ERROR(("DMA successful bmc host mem loopback\n"));
5865 } break;
5866 default: {
5867 DHD_ERROR(("Invalid loopback option\n"));
5868 } break;
5869 }
5870
5871 if (DHD_LPBKDTDUMP_ON()) {
5872 /* debug info print of the Tx and Rx buffers */
5873 dhd_prhex("XFER SRC: ", prot->dmaxfer.srcmem.va,
5874 prot->dmaxfer.len, DHD_INFO_VAL);
5875 dhd_prhex("XFER DST: ", prot->dmaxfer.dstmem.va,
5876 prot->dmaxfer.len, DHD_INFO_VAL);
3c2a0909
S
5877 }
5878 }
5879 }
4c205efb 5880
3c2a0909
S
5881 dhd_prepare_schedule_dmaxfer_free(dhd);
5882 end_usec -= prot->dmaxfer.start_usec;
4c205efb
DW
5883 if (end_usec)
5884 DHD_ERROR(("DMA loopback %d bytes in %lu usec, %u kBps\n",
5885 prot->dmaxfer.len, (unsigned long)end_usec,
5886 (prot->dmaxfer.len * (1000 * 1000 / 1024) / (uint32)end_usec)));
3c2a0909 5887 dhd->prot->dmaxfer.in_progress = FALSE;
4c205efb
DW
5888
5889 dhd->bus->dmaxfer_complete = TRUE;
5890 dhd_os_dmaxfer_wake(dhd);
3c2a0909
S
5891}
5892
5893/** Test functionality.
5894 * Transfers bytes from host to dongle and to host again using DMA
5895 * This function is not reentrant, as prot->dmaxfer.in_progress is not protected
5896 * by a spinlock.
5897 */
5898int
4c205efb
DW
5899dhdmsgbuf_dmaxfer_req(dhd_pub_t *dhd, uint len, uint srcdelay, uint destdelay,
5900 uint d11_lpbk, uint core_num)
3c2a0909
S
5901{
5902 unsigned long flags;
5903 int ret = BCME_OK;
5904 dhd_prot_t *prot = dhd->prot;
5905 pcie_dma_xfer_params_t *dmap;
5906 uint32 xferlen = LIMIT_TO_MAX(len, DMA_XFER_LEN_LIMIT);
5907 uint16 alloced = 0;
5908 msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
5909
5910 if (prot->dmaxfer.in_progress) {
5911 DHD_ERROR(("DMA is in progress...\n"));
4c205efb
DW
5912 return BCME_ERROR;
5913 }
5914
5915 if (d11_lpbk >= MAX_LPBK) {
5916 DHD_ERROR(("loopback mode should be either"
5917 " 0-PCIE_M2M_DMA, 1-D11, 2-BMC or 3-PCIE_M2M_NonDMA\n"));
5918 return BCME_ERROR;
3c2a0909
S
5919 }
5920
4c205efb
DW
5921 DHD_GENERAL_LOCK(dhd, flags);
5922
3c2a0909
S
5923 prot->dmaxfer.in_progress = TRUE;
5924 if ((ret = dmaxfer_prepare_dmaaddr(dhd, xferlen, srcdelay, destdelay,
4c205efb 5925 &prot->dmaxfer)) != BCME_OK) {
3c2a0909 5926 prot->dmaxfer.in_progress = FALSE;
4c205efb 5927 DHD_GENERAL_UNLOCK(dhd, flags);
3c2a0909
S
5928 return ret;
5929 }
5930
3c2a0909
S
5931 dmap = (pcie_dma_xfer_params_t *)
5932 dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
5933
5934 if (dmap == NULL) {
5935 dmaxfer_free_dmaaddr(dhd, &prot->dmaxfer);
5936 prot->dmaxfer.in_progress = FALSE;
5937 DHD_GENERAL_UNLOCK(dhd, flags);
3c2a0909
S
5938 return BCME_NOMEM;
5939 }
5940
5941 /* Common msg buf hdr */
5942 dmap->cmn_hdr.msg_type = MSG_TYPE_LPBK_DMAXFER;
5943 dmap->cmn_hdr.request_id = htol32(DHD_FAKE_PKTID);
5944 dmap->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
5945 dmap->cmn_hdr.flags = ring->current_phase;
5946 ring->seqnum++;
5947
5948 dmap->host_input_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.srcmem.pa));
5949 dmap->host_input_buf_addr.low = htol32(PHYSADDRLO(prot->dmaxfer.srcmem.pa));
5950 dmap->host_ouput_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.dstmem.pa));
5951 dmap->host_ouput_buf_addr.low = htol32(PHYSADDRLO(prot->dmaxfer.dstmem.pa));
5952 dmap->xfer_len = htol32(prot->dmaxfer.len);
5953 dmap->srcdelay = htol32(prot->dmaxfer.srcdelay);
5954 dmap->destdelay = htol32(prot->dmaxfer.destdelay);
4c205efb
DW
5955 prot->dmaxfer.d11_lpbk = d11_lpbk;
5956 dmap->flags = (((core_num & PCIE_DMA_XFER_FLG_CORE_NUMBER_MASK)
5957 << PCIE_DMA_XFER_FLG_CORE_NUMBER_SHIFT) |
5958 ((prot->dmaxfer.d11_lpbk & PCIE_DMA_XFER_FLG_D11_LPBK_MASK)
5959 << PCIE_DMA_XFER_FLG_D11_LPBK_SHIFT));
5960 prot->dmaxfer.start_usec = OSL_SYSUPTIME_US();
3c2a0909
S
5961
5962 /* update ring's WR index and ring doorbell to dongle */
3c2a0909 5963 dhd_prot_ring_write_complete(dhd, ring, dmap, 1);
4c205efb 5964
3c2a0909 5965 DHD_GENERAL_UNLOCK(dhd, flags);
3c2a0909 5966
4c205efb 5967 DHD_ERROR(("DMA loopback Started...\n"));
3c2a0909
S
5968
5969 return BCME_OK;
5970} /* dhdmsgbuf_dmaxfer_req */
5971
4c205efb
DW
5972dma_xfer_status_t
5973dhdmsgbuf_dmaxfer_status(dhd_pub_t *dhd)
5974{
5975 dhd_prot_t *prot = dhd->prot;
5976
5977 if (prot->dmaxfer.in_progress)
5978 return DMA_XFER_IN_PROGRESS;
5979 else if (prot->dmaxfer.status == BCME_OK)
5980 return DMA_XFER_SUCCESS;
5981 else
5982 return DMA_XFER_FAILED;
5983}
5984
3c2a0909
S
5985/** Called in the process of submitting an ioctl to the dongle */
5986static int
5987dhd_msgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
5988{
5989 int ret = 0;
5990 uint copylen = 0;
5991
5992 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
5993
5994 if (cmd == WLC_GET_VAR && buf)
5995 {
5996 if (!len || !*(uint8 *)buf) {
5997 DHD_ERROR(("%s(): Zero length bailing\n", __FUNCTION__));
5998 ret = BCME_BADARG;
5999 goto done;
6000 }
6001
6002 /* Respond "bcmerror" and "bcmerrorstr" with local cache */
6003 copylen = MIN(len, BCME_STRLEN);
6004
6005 if ((len >= strlen("bcmerrorstr")) &&
6006 (!strcmp((char *)buf, "bcmerrorstr"))) {
6007
6008 strncpy((char *)buf, bcmerrorstr(dhd->dongle_error), copylen);
6009 *(uint8 *)((uint8 *)buf + (copylen - 1)) = '\0';
6010
6011 goto done;
6012 } else if ((len >= strlen("bcmerror")) &&
6013 !strcmp((char *)buf, "bcmerror")) {
6014
6015 *(uint32 *)(uint32 *)buf = dhd->dongle_error;
6016
6017 goto done;
6018 }
6019 }
6020
6021
6022 DHD_CTL(("query_ioctl: ACTION %d ifdix %d cmd %d len %d \n",
6023 action, ifidx, cmd, len));
6024#ifdef REPORT_FATAL_TIMEOUTS
6025 /*
6026 * These timers "should" be started before sending H2D interrupt.
6027 * Think of the scenario where H2D interrupt is fired and the Dongle
6028 * responds back immediately. From the DPC we would stop the cmd, bus
6029 * timers. But the process context could have switched out leading to
6030 * a situation where the timers are Not started yet, but are actually stopped.
6031 *
6032 * Disable preemption from the time we start the timer until we are done
6033 * with seding H2D interrupts.
6034 */
6035 OSL_DISABLE_PREEMPTION(dhd->osh);
6036 dhd_set_request_id(dhd, dhd->prot->ioctl_trans_id+1, cmd);
6037 dhd_start_cmd_timer(dhd);
6038 dhd_start_bus_timer(dhd);
6039#endif /* REPORT_FATAL_TIMEOUTS */
6040
6041 ret = dhd_fillup_ioct_reqst(dhd, (uint16)len, cmd, buf, ifidx);
6042
6043#ifdef REPORT_FATAL_TIMEOUTS
6044 /* For some reason if we fail to ring door bell, stop the timers */
6045 if (ret < 0) {
6046 DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__));
6047 dhd_stop_cmd_timer(dhd);
6048 dhd_stop_bus_timer(dhd);
6049 OSL_ENABLE_PREEMPTION(dhd->osh);
6050 goto done;
6051 }
6052 OSL_ENABLE_PREEMPTION(dhd->osh);
6053#else
6054 if (ret < 0) {
6055 DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__));
6056 goto done;
6057 }
6058#endif /* REPORT_FATAL_TIMEOUTS */
6059
6060 /* wait for IOCTL completion message from dongle and get first fragment */
6061 ret = dhd_msgbuf_wait_ioctl_cmplt(dhd, len, buf);
6062
6063done:
6064 return ret;
6065}
6066
6067/**
6068 * Waits for IOCTL completion message from the dongle, copies this into caller
6069 * provided parameter 'buf'.
6070 */
6071static int
6072dhd_msgbuf_wait_ioctl_cmplt(dhd_pub_t *dhd, uint32 len, void *buf)
6073{
6074 dhd_prot_t *prot = dhd->prot;
6075 int timeleft;
6076 unsigned long flags;
6077 int ret = 0;
6078
6079 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
6080
6081 if (dhd_query_bus_erros(dhd)) {
6082 ret = -EIO;
6083 goto out;
6084 }
6085
6086 timeleft = dhd_os_ioctl_resp_wait(dhd, (uint *)&prot->ioctl_received);
6087
6088#ifdef DHD_RECOVER_TIMEOUT
6089 if (prot->ioctl_received == 0) {
6090 uint32 intstatus = 0;
6091 uint32 intmask = 0;
6092 intstatus = si_corereg(dhd->bus->sih,
6093 dhd->bus->sih->buscoreidx, PCIMailBoxInt, 0, 0);
6094 intmask = si_corereg(dhd->bus->sih,
6095 dhd->bus->sih->buscoreidx, PCIMailBoxMask, 0, 0);
6096 if ((intstatus) && (!intmask) && (timeleft == 0) && (!dhd_query_bus_erros(dhd)))
6097 {
6098 DHD_ERROR(("%s: iovar timeout trying again intstatus=%x intmask=%x\n",
6099 __FUNCTION__, intstatus, intmask));
6100 DHD_ERROR(("\n ------- DUMPING INTR enable/disable counters\r\n"));
6101 DHD_ERROR(("resume_intr_enable_count=%lu dpc_intr_enable_count=%lu\n"
6102 "isr_intr_disable_count=%lu suspend_intr_disable_count=%lu\n"
6103 "dpc_return_busdown_count=%lu\n",
6104 dhd->bus->resume_intr_enable_count, dhd->bus->dpc_intr_enable_count,
6105 dhd->bus->isr_intr_disable_count,
6106 dhd->bus->suspend_intr_disable_count,
6107 dhd->bus->dpc_return_busdown_count));
6108
6109 dhd_prot_process_ctrlbuf(dhd);
6110
6111 timeleft = dhd_os_ioctl_resp_wait(dhd, (uint *)&prot->ioctl_received);
6112 /* Enable Back Interrupts using IntMask */
6113 dhdpcie_bus_intr_enable(dhd->bus);
6114 }
6115 }
6116#endif /* DHD_RECOVER_TIMEOUT */
6117
6118 if (timeleft == 0 && (!dhd_query_bus_erros(dhd))) {
6119 uint32 intstatus;
6120
6121 dhd->rxcnt_timeout++;
6122 dhd->rx_ctlerrs++;
6123 dhd->iovar_timeout_occured = TRUE;
6124 DHD_ERROR(("%s: resumed on timeout rxcnt_timeout %d ioctl_cmd %d "
6125 "trans_id %d state %d busstate=%d ioctl_received=%d\n",
6126 __FUNCTION__, dhd->rxcnt_timeout, prot->curr_ioctl_cmd,
6127 prot->ioctl_trans_id, prot->ioctl_state,
6128 dhd->busstate, prot->ioctl_received));
6129 if (prot->curr_ioctl_cmd == WLC_SET_VAR ||
6130 prot->curr_ioctl_cmd == WLC_GET_VAR) {
6131 char iovbuf[32];
6132 int i;
6133 int dump_size = 128;
6134 uint8 *ioctl_buf = (uint8 *)prot->ioctbuf.va;
6135 memset(iovbuf, 0, sizeof(iovbuf));
6136 strncpy(iovbuf, ioctl_buf, sizeof(iovbuf) - 1);
6137 iovbuf[sizeof(iovbuf) - 1] = '\0';
6138 DHD_ERROR(("Current IOVAR (%s): %s\n",
6139 prot->curr_ioctl_cmd == WLC_SET_VAR ?
6140 "WLC_SET_VAR" : "WLC_GET_VAR", iovbuf));
6141 DHD_ERROR(("========== START IOCTL REQBUF DUMP ==========\n"));
6142 for (i = 0; i < dump_size; i++) {
6143 DHD_ERROR(("%02X ", ioctl_buf[i]));
6144 if ((i % 32) == 31) {
6145 DHD_ERROR(("\n"));
6146 }
6147 }
6148 DHD_ERROR(("\n========== END IOCTL REQBUF DUMP ==========\n"));
6149 }
6150
6151 /* Check the PCIe link status by reading intstatus register */
6152 intstatus = si_corereg(dhd->bus->sih,
6153 dhd->bus->sih->buscoreidx, PCIMailBoxInt, 0, 0);
6154 if (intstatus == (uint32)-1) {
6155 DHD_ERROR(("%s : PCIe link might be down\n", __FUNCTION__));
6156 dhd->bus->is_linkdown = TRUE;
6157 }
6158
6159 dhd_bus_dump_console_buffer(dhd->bus);
6160 dhd_prot_debug_info_print(dhd);
6161
6162#ifdef DHD_FW_COREDUMP
6163 /* Collect socram dump */
6164 if (dhd->memdump_enabled) {
6165 /* collect core dump */
6166 dhd->memdump_type = DUMP_TYPE_RESUMED_ON_TIMEOUT;
6167 dhd_bus_mem_dump(dhd);
6168 }
6169#endif /* DHD_FW_COREDUMP */
6170#ifdef SUPPORT_LINKDOWN_RECOVERY
6171#ifdef CONFIG_ARCH_MSM
6172 dhd->bus->no_cfg_restore = 1;
6173#endif /* CONFIG_ARCH_MSM */
6174#endif /* SUPPORT_LINKDOWN_RECOVERY */
6175 ret = -ETIMEDOUT;
6176 goto out;
6177 } else {
6178 if (prot->ioctl_received != IOCTL_RETURN_ON_SUCCESS) {
6179 DHD_ERROR(("%s: IOCTL failure due to ioctl_received = %d\n",
6180 __FUNCTION__, prot->ioctl_received));
6181 ret = -EINVAL;
6182 goto out;
6183 }
6184 dhd->rxcnt_timeout = 0;
6185 dhd->rx_ctlpkts++;
6186 DHD_CTL(("%s: ioctl resp resumed, got %d\n",
6187 __FUNCTION__, prot->ioctl_resplen));
6188 }
6189
6190 if (dhd->prot->ioctl_resplen > len)
6191 dhd->prot->ioctl_resplen = (uint16)len;
6192 if (buf)
6193 bcopy(dhd->prot->retbuf.va, buf, dhd->prot->ioctl_resplen);
6194
6195 ret = (int)(dhd->prot->ioctl_status);
6196
6197out:
6198 DHD_GENERAL_LOCK(dhd, flags);
6199 dhd->prot->ioctl_state = 0;
6200 dhd->prot->ioctl_resplen = 0;
6201 dhd->prot->ioctl_received = IOCTL_WAIT;
6202 dhd->prot->curr_ioctl_cmd = 0;
6203 DHD_GENERAL_UNLOCK(dhd, flags);
6204
6205 return ret;
6206} /* dhd_msgbuf_wait_ioctl_cmplt */
6207
6208static int
6209dhd_msgbuf_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action)
6210{
6211 int ret = 0;
6212
6213 DHD_TRACE(("%s: Enter \n", __FUNCTION__));
6214
6215 if (dhd->busstate == DHD_BUS_DOWN) {
6216 DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__));
6217 return -EIO;
6218 }
6219
6220 /* don't talk to the dongle if fw is about to be reloaded */
6221 if (dhd->hang_was_sent) {
6222 DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n",
6223 __FUNCTION__));
6224 return -EIO;
6225 }
6226
6227 DHD_CTL(("ACTION %d ifdix %d cmd %d len %d \n",
6228 action, ifidx, cmd, len));
6229
6230#ifdef REPORT_FATAL_TIMEOUTS
6231 /*
6232 * These timers "should" be started before sending H2D interrupt.
6233 * Think of the scenario where H2D interrupt is fired and the Dongle
6234 * responds back immediately. From the DPC we would stop the cmd, bus
6235 * timers. But the process context could have switched out leading to
6236 * a situation where the timers are Not started yet, but are actually stopped.
6237 *
6238 * Disable preemption from the time we start the timer until we are done
6239 * with seding H2D interrupts.
6240 */
6241 OSL_DISABLE_PREEMPTION(dhd->osh);
6242 dhd_set_request_id(dhd, dhd->prot->ioctl_trans_id+1, cmd);
6243 dhd_start_cmd_timer(dhd);
6244 dhd_start_bus_timer(dhd);
6245#endif /* REPORT_FATAL_TIMEOUTS */
6246
6247 /* Fill up msgbuf for ioctl req */
6248 ret = dhd_fillup_ioct_reqst(dhd, (uint16)len, cmd, buf, ifidx);
6249
6250#ifdef REPORT_FATAL_TIMEOUTS
6251 /* For some reason if we fail to ring door bell, stop the timers */
6252 if (ret < 0) {
6253 DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__));
6254 dhd_stop_cmd_timer(dhd);
6255 dhd_stop_bus_timer(dhd);
6256 OSL_ENABLE_PREEMPTION(dhd->osh);
6257 goto done;
6258 }
6259
6260 OSL_ENABLE_PREEMPTION(dhd->osh);
6261#else
6262 if (ret < 0) {
6263 DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__));
6264 goto done;
6265 }
6266#endif /* REPORT_FATAL_TIMEOUTS */
6267
6268 ret = dhd_msgbuf_wait_ioctl_cmplt(dhd, len, buf);
6269
6270done:
6271 return ret;
6272}
6273
6274/** Called by upper DHD layer. Handles a protocol control response asynchronously. */
6275int dhd_prot_ctl_complete(dhd_pub_t *dhd)
6276{
6277 return 0;
6278}
6279
6280/** Called by upper DHD layer. Check for and handle local prot-specific iovar commands */
6281int dhd_prot_iovar_op(dhd_pub_t *dhd, const char *name,
6282 void *params, int plen, void *arg, int len, bool set)
6283{
6284 return BCME_UNSUPPORTED;
6285}
6286
6287/** Add prot dump output to a buffer */
6288void dhd_prot_dump(dhd_pub_t *dhd, struct bcmstrbuf *b)
6289{
6290
6291 if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM)
6292 bcm_bprintf(b, "\nd2h_sync: SEQNUM:");
6293 else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM)
6294 bcm_bprintf(b, "\nd2h_sync: XORCSUM:");
6295 else
6296 bcm_bprintf(b, "\nd2h_sync: NONE:");
6297 bcm_bprintf(b, " d2h_sync_wait max<%lu> tot<%lu>\n",
6298 dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot);
6299
6300 bcm_bprintf(b, "\nDongle DMA Indices: h2d %d d2h %d index size %d bytes\n",
6301 dhd->dma_h2d_ring_upd_support,
6302 dhd->dma_d2h_ring_upd_support,
6303 dhd->prot->rw_index_sz);
6304 bcm_bprintf(b, "h2d_max_txpost: %d, prot->h2d_max_txpost: %d\n",
6305 h2d_max_txpost, dhd->prot->h2d_max_txpost);
6306}
6307
6308/* Update local copy of dongle statistics */
6309void dhd_prot_dstats(dhd_pub_t *dhd)
6310{
6311 return;
6312}
6313
6314/** Called by upper DHD layer */
6315int dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf,
6316 uint reorder_info_len, void **pkt, uint32 *free_buf_count)
6317{
6318 return 0;
6319}
6320
6321/** Debug related, post a dummy message to interrupt dongle. Used to process cons commands. */
6322int
6323dhd_post_dummy_msg(dhd_pub_t *dhd)
6324{
6325 unsigned long flags;
6326 hostevent_hdr_t *hevent = NULL;
6327 uint16 alloced = 0;
6328
6329 dhd_prot_t *prot = dhd->prot;
6330 msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
6331
6332#ifdef PCIE_INB_DW
6333 if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
6334 return BCME_ERROR;
6335#endif /* PCIE_INB_DW */
6336
6337 DHD_GENERAL_LOCK(dhd, flags);
6338
6339 hevent = (hostevent_hdr_t *)
6340 dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
6341
6342 if (hevent == NULL) {
6343 DHD_GENERAL_UNLOCK(dhd, flags);
6344#ifdef PCIE_INB_DW
6345 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
6346#endif
6347 return -1;
6348 }
6349
6350 /* CMN msg header */
6351 hevent->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
6352 ring->seqnum++;
6353 hevent->msg.msg_type = MSG_TYPE_HOST_EVNT;
6354 hevent->msg.if_id = 0;
6355 hevent->msg.flags = ring->current_phase;
6356
6357 /* Event payload */
6358 hevent->evnt_pyld = htol32(HOST_EVENT_CONS_CMD);
6359
6360 /* Since, we are filling the data directly into the bufptr obtained
6361 * from the msgbuf, we can directly call the write_complete
6362 */
6363 dhd_prot_ring_write_complete(dhd, ring, hevent, 1);
6364 DHD_GENERAL_UNLOCK(dhd, flags);
6365#ifdef PCIE_INB_DW
6366 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
6367#endif
6368
6369 return 0;
6370}
6371
6372/**
6373 * If exactly_nitems is true, this function will allocate space for nitems or fail
6374 * If exactly_nitems is false, this function will allocate space for nitems or less
6375 */
6376static void * BCMFASTPATH
6377dhd_prot_alloc_ring_space(dhd_pub_t *dhd, msgbuf_ring_t *ring,
6378 uint16 nitems, uint16 * alloced, bool exactly_nitems)
6379{
6380 void * ret_buf;
6381
6382 /* Alloc space for nitems in the ring */
6383 ret_buf = dhd_prot_get_ring_space(ring, nitems, alloced, exactly_nitems);
6384
6385 if (ret_buf == NULL) {
6386 /* if alloc failed , invalidate cached read ptr */
6387 if (dhd->dma_d2h_ring_upd_support) {
6388 ring->rd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx);
6389 } else {
6390 dhd_bus_cmn_readshared(dhd->bus, &(ring->rd), RING_RD_UPD, ring->idx);
6391#ifdef SUPPORT_LINKDOWN_RECOVERY
6392 /* Check if ring->rd is valid */
6393 if (ring->rd >= ring->max_items) {
6394 dhd->bus->read_shm_fail = TRUE;
6395 DHD_ERROR(("%s: Invalid rd idx=%d\n", ring->name, ring->rd));
6396 return NULL;
6397 }
6398#endif /* SUPPORT_LINKDOWN_RECOVERY */
6399 }
6400
6401 /* Try allocating once more */
6402 ret_buf = dhd_prot_get_ring_space(ring, nitems, alloced, exactly_nitems);
6403
6404 if (ret_buf == NULL) {
6405 DHD_INFO(("%s: Ring space not available \n", ring->name));
6406 return NULL;
6407 }
6408 }
6409
6410 if (ret_buf == HOST_RING_BASE(ring)) {
6411 DHD_INFO(("%s: setting the phase now\n", ring->name));
6412 ring->current_phase = ring->current_phase ? 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT;
6413 }
6414
6415 /* Return alloced space */
6416 return ret_buf;
6417}
6418
6419/**
6420 * Non inline ioct request.
6421 * Form a ioctl request first as per ioctptr_reqst_hdr_t header in the circular buffer
6422 * Form a separate request buffer where a 4 byte cmn header is added in the front
6423 * buf contents from parent function is copied to remaining section of this buffer
6424 */
6425static int
6426dhd_fillup_ioct_reqst(dhd_pub_t *dhd, uint16 len, uint cmd, void* buf, int ifidx)
6427{
6428 dhd_prot_t *prot = dhd->prot;
6429 ioctl_req_msg_t *ioct_rqst;
6430 void * ioct_buf; /* For ioctl payload */
6431 uint16 rqstlen, resplen;
6432 unsigned long flags;
6433 uint16 alloced = 0;
6434 msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
6435
6436 if (dhd_query_bus_erros(dhd)) {
6437 return -EIO;
6438 }
6439
6440 rqstlen = len;
6441 resplen = len;
6442
6443 /* Limit ioct request to MSGBUF_MAX_MSG_SIZE bytes including hdrs */
6444 /* 8K allocation of dongle buffer fails */
6445 /* dhd doesnt give separate input & output buf lens */
6446 /* so making the assumption that input length can never be more than 1.5k */
6447 rqstlen = MIN(rqstlen, MSGBUF_MAX_MSG_SIZE);
6448
6449#ifdef PCIE_INB_DW
6450 if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
6451 return BCME_ERROR;
6452#endif /* PCIE_INB_DW */
6453
6454 DHD_GENERAL_LOCK(dhd, flags);
6455
6456 if (prot->ioctl_state) {
6457 DHD_ERROR(("%s: pending ioctl %02x\n", __FUNCTION__, prot->ioctl_state));
6458 DHD_GENERAL_UNLOCK(dhd, flags);
6459#ifdef PCIE_INB_DW
6460 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
6461#endif
6462 return BCME_BUSY;
6463 } else {
6464 prot->ioctl_state = MSGBUF_IOCTL_ACK_PENDING | MSGBUF_IOCTL_RESP_PENDING;
6465 }
6466
6467 /* Request for cbuf space */
6468 ioct_rqst = (ioctl_req_msg_t*)
6469 dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
6470 if (ioct_rqst == NULL) {
6471 DHD_ERROR(("couldn't allocate space on msgring to send ioctl request\n"));
6472 prot->ioctl_state = 0;
6473 prot->curr_ioctl_cmd = 0;
6474 prot->ioctl_received = IOCTL_WAIT;
6475 DHD_GENERAL_UNLOCK(dhd, flags);
6476#ifdef PCIE_INB_DW
6477 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
6478#endif
6479 return -1;
6480 }
6481
6482 /* Common msg buf hdr */
6483 ioct_rqst->cmn_hdr.msg_type = MSG_TYPE_IOCTLPTR_REQ;
6484 ioct_rqst->cmn_hdr.if_id = (uint8)ifidx;
6485 ioct_rqst->cmn_hdr.flags = ring->current_phase;
6486 ioct_rqst->cmn_hdr.request_id = htol32(DHD_IOCTL_REQ_PKTID);
6487 ioct_rqst->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO;
6488 ring->seqnum++;
6489
6490 ioct_rqst->cmd = htol32(cmd);
6491 prot->curr_ioctl_cmd = cmd;
6492 ioct_rqst->output_buf_len = htol16(resplen);
6493 prot->ioctl_trans_id++;
6494 ioct_rqst->trans_id = prot->ioctl_trans_id;
6495
6496 /* populate ioctl buffer info */
6497 ioct_rqst->input_buf_len = htol16(rqstlen);
6498 ioct_rqst->host_input_buf_addr.high = htol32(PHYSADDRHI(prot->ioctbuf.pa));
6499 ioct_rqst->host_input_buf_addr.low = htol32(PHYSADDRLO(prot->ioctbuf.pa));
6500 /* copy ioct payload */
6501 ioct_buf = (void *) prot->ioctbuf.va;
6502
6503 if (buf)
6504 memcpy(ioct_buf, buf, len);
6505
6506 OSL_CACHE_FLUSH((void *) prot->ioctbuf.va, len);
6507
6508 if (!ISALIGNED(ioct_buf, DMA_ALIGN_LEN))
6509 DHD_ERROR(("host ioct address unaligned !!!!! \n"));
6510
6511 DHD_CTL(("submitted IOCTL request request_id %d, cmd %d, output_buf_len %d, tx_id %d\n",
6512 ioct_rqst->cmn_hdr.request_id, cmd, ioct_rqst->output_buf_len,
6513 ioct_rqst->trans_id));
6514
6515 /* update ring's WR index and ring doorbell to dongle */
6516 dhd_prot_ring_write_complete(dhd, ring, ioct_rqst, 1);
6517 DHD_GENERAL_UNLOCK(dhd, flags);
6518#ifdef PCIE_INB_DW
6519 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
6520#endif
6521
6522 return 0;
6523} /* dhd_fillup_ioct_reqst */
6524
6525
6526/**
6527 * dhd_prot_ring_attach - Initialize the msgbuf_ring object and attach a
6528 * DMA-able buffer to it. The ring is NOT tagged as inited until all the ring
6529 * information is posted to the dongle.
6530 *
6531 * Invoked in dhd_prot_attach for the common rings, and in dhd_prot_init for
6532 * each flowring in pool of flowrings.
6533 *
6534 * returns BCME_OK=0 on success
6535 * returns non-zero negative error value on failure.
6536 */
6537static int
6538dhd_prot_ring_attach(dhd_pub_t *dhd, msgbuf_ring_t *ring, const char *name,
6539 uint16 max_items, uint16 item_len, uint16 ringid)
6540{
6541 int dma_buf_alloced = BCME_NOMEM;
6542 uint32 dma_buf_len = max_items * item_len;
6543 dhd_prot_t *prot = dhd->prot;
6544 uint16 max_flowrings = dhd->bus->max_tx_flowrings;
6545
6546 ASSERT(ring);
6547 ASSERT(name);
6548 ASSERT((max_items < 0xFFFF) && (item_len < 0xFFFF) && (ringid < 0xFFFF));
6549
6550 /* Init name */
6551 strncpy(ring->name, name, RING_NAME_MAX_LENGTH);
6552 ring->name[RING_NAME_MAX_LENGTH - 1] = '\0';
6553
6554 ring->idx = ringid;
6555
6556 ring->max_items = max_items;
6557 ring->item_len = item_len;
6558
6559 /* A contiguous space may be reserved for all flowrings */
6560 if (DHD_IS_FLOWRING(ringid, max_flowrings) && (prot->flowrings_dma_buf.va)) {
6561 /* Carve out from the contiguous DMA-able flowring buffer */
6562 uint16 flowid;
6563 uint32 base_offset;
6564
6565 dhd_dma_buf_t *dma_buf = &ring->dma_buf;
6566 dhd_dma_buf_t *rsv_buf = &prot->flowrings_dma_buf;
6567
6568 flowid = DHD_RINGID_TO_FLOWID(ringid);
6569 base_offset = (flowid - BCMPCIE_H2D_COMMON_MSGRINGS) * dma_buf_len;
6570
6571 ASSERT(base_offset + dma_buf_len <= rsv_buf->len);
6572
6573 dma_buf->len = dma_buf_len;
6574 dma_buf->va = (void *)((uintptr)rsv_buf->va + base_offset);
6575 PHYSADDRHISET(dma_buf->pa, PHYSADDRHI(rsv_buf->pa));
6576 PHYSADDRLOSET(dma_buf->pa, PHYSADDRLO(rsv_buf->pa) + base_offset);
6577
6578 /* On 64bit, contiguous space may not span across 0x00000000FFFFFFFF */
6579 ASSERT(PHYSADDRLO(dma_buf->pa) >= PHYSADDRLO(rsv_buf->pa));
6580
6581 dma_buf->dmah = rsv_buf->dmah;
6582 dma_buf->secdma = rsv_buf->secdma;
6583
6584 (void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
6585 } else {
6586 /* Allocate a dhd_dma_buf */
6587 dma_buf_alloced = dhd_dma_buf_alloc(dhd, &ring->dma_buf, dma_buf_len);
6588 if (dma_buf_alloced != BCME_OK) {
6589 return BCME_NOMEM;
6590 }
6591 }
6592
6593 /* CAUTION: Save ring::base_addr in little endian format! */
6594 dhd_base_addr_htolpa(&ring->base_addr, ring->dma_buf.pa);
6595
6596#ifdef BCM_SECURE_DMA
6597 if (SECURE_DMA_ENAB(prot->osh)) {
6598 ring->dma_buf.secdma = MALLOCZ(prot->osh, sizeof(sec_cma_info_t));
6599 if (ring->dma_buf.secdma == NULL) {
6600 goto free_dma_buf;
6601 }
6602 }
6603#endif /* BCM_SECURE_DMA */
6604
6605 DHD_INFO(("RING_ATTACH : %s Max item %d len item %d total size %d "
6606 "ring start %p buf phys addr %x:%x \n",
6607 ring->name, ring->max_items, ring->item_len,
6608 dma_buf_len, ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
6609 ltoh32(ring->base_addr.low_addr)));
6610
6611 return BCME_OK;
6612
6613#ifdef BCM_SECURE_DMA
6614free_dma_buf:
6615 if (dma_buf_alloced == BCME_OK) {
6616 dhd_dma_buf_free(dhd, &ring->dma_buf);
6617 }
6618#endif /* BCM_SECURE_DMA */
6619
6620 return BCME_NOMEM;
6621
6622} /* dhd_prot_ring_attach */
6623
6624
6625/**
6626 * dhd_prot_ring_init - Post the common ring information to dongle.
6627 *
6628 * Used only for common rings.
6629 *
6630 * The flowrings information is passed via the create flowring control message
6631 * (tx_flowring_create_request_t) sent over the H2D control submission common
6632 * ring.
6633 */
6634static void
6635dhd_prot_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring)
6636{
6637 ring->wr = 0;
6638 ring->rd = 0;
6639 ring->curr_rd = 0;
6640
6641 /* CAUTION: ring::base_addr already in Little Endian */
6642 dhd_bus_cmn_writeshared(dhd->bus, &ring->base_addr,
6643 sizeof(sh_addr_t), RING_BUF_ADDR, ring->idx);
6644 dhd_bus_cmn_writeshared(dhd->bus, &ring->max_items,
6645 sizeof(uint16), RING_MAX_ITEMS, ring->idx);
6646 dhd_bus_cmn_writeshared(dhd->bus, &ring->item_len,
6647 sizeof(uint16), RING_ITEM_LEN, ring->idx);
6648
6649 dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr),
6650 sizeof(uint16), RING_WR_UPD, ring->idx);
6651 dhd_bus_cmn_writeshared(dhd->bus, &(ring->rd),
6652 sizeof(uint16), RING_RD_UPD, ring->idx);
6653
6654 /* ring inited */
6655 ring->inited = TRUE;
6656
6657} /* dhd_prot_ring_init */
6658
6659
6660/**
6661 * dhd_prot_ring_reset - bzero a ring's DMA-ble buffer and cache flush
6662 * Reset WR and RD indices to 0.
6663 */
6664static void
6665dhd_prot_ring_reset(dhd_pub_t *dhd, msgbuf_ring_t *ring)
6666{
6667 DHD_TRACE(("%s\n", __FUNCTION__));
6668
6669 dhd_dma_buf_reset(dhd, &ring->dma_buf);
6670
6671 ring->rd = ring->wr = 0;
6672 ring->curr_rd = 0;
6673 ring->inited = FALSE;
6674 ring->create_pending = FALSE;
6675}
6676
6677
6678/**
6679 * dhd_prot_ring_detach - Detach the DMA-able buffer and any other objects
6680 * hanging off the msgbuf_ring.
6681 */
6682static void
6683dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t *ring)
6684{
6685 dhd_prot_t *prot = dhd->prot;
6686 uint16 max_flowrings = dhd->bus->max_tx_flowrings;
6687 ASSERT(ring);
6688
6689 ring->inited = FALSE;
6690 /* rd = ~0, wr = ring->rd - 1, max_items = 0, len_item = ~0 */
6691
6692#ifdef BCM_SECURE_DMA
6693 if (SECURE_DMA_ENAB(prot->osh)) {
6694 if (ring->dma_buf.secdma) {
6695 SECURE_DMA_UNMAP_ALL(prot->osh, ring->dma_buf.secdma);
6696 MFREE(prot->osh, ring->dma_buf.secdma, sizeof(sec_cma_info_t));
6697 ring->dma_buf.secdma = NULL;
6698 }
6699 }
6700#endif /* BCM_SECURE_DMA */
6701
6702 /* If the DMA-able buffer was carved out of a pre-reserved contiguous
6703 * memory, then simply stop using it.
6704 */
6705 if (DHD_IS_FLOWRING(ring->idx, max_flowrings) && (prot->flowrings_dma_buf.va)) {
6706 (void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
6707 memset(&ring->dma_buf, 0, sizeof(dhd_dma_buf_t));
6708 } else {
6709 dhd_dma_buf_free(dhd, &ring->dma_buf);
6710 }
6711
6712} /* dhd_prot_ring_detach */
6713
6714
6715/*
6716 * +----------------------------------------------------------------------------
6717 * Flowring Pool
6718 *
6719 * Unlike common rings, which are attached very early on (dhd_prot_attach),
6720 * flowrings are dynamically instantiated. Moreover, flowrings may require a
6721 * larger DMA-able buffer. To avoid issues with fragmented cache coherent
6722 * DMA-able memory, a pre-allocated pool of msgbuf_ring_t is allocated once.
6723 * The DMA-able buffers are attached to these pre-allocated msgbuf_ring.
6724 *
6725 * Each DMA-able buffer may be allocated independently, or may be carved out
6726 * of a single large contiguous region that is registered with the protocol
6727 * layer into flowrings_dma_buf. On a 64bit platform, this contiguous region
6728 * may not span 0x00000000FFFFFFFF (avoid dongle side 64bit ptr arithmetic).
6729 *
6730 * No flowring pool action is performed in dhd_prot_attach(), as the number
6731 * of h2d rings is not yet known.
6732 *
6733 * In dhd_prot_init(), the dongle advertized number of h2d rings is used to
6734 * determine the number of flowrings required, and a pool of msgbuf_rings are
6735 * allocated and a DMA-able buffer (carved or allocated) is attached.
6736 * See: dhd_prot_flowrings_pool_attach()
6737 *
6738 * A flowring msgbuf_ring object may be fetched from this pool during flowring
6739 * creation, using the flowid. Likewise, flowrings may be freed back into the
6740 * pool on flowring deletion.
6741 * See: dhd_prot_flowrings_pool_fetch(), dhd_prot_flowrings_pool_release()
6742 *
6743 * In dhd_prot_detach(), the flowring pool is detached. The DMA-able buffers
6744 * are detached (returned back to the carved region or freed), and the pool of
6745 * msgbuf_ring and any objects allocated against it are freed.
6746 * See: dhd_prot_flowrings_pool_detach()
6747 *
6748 * In dhd_prot_reset(), the flowring pool is simply reset by returning it to a
6749 * state as-if upon an attach. All DMA-able buffers are retained.
6750 * Following a dhd_prot_reset(), in a subsequent dhd_prot_init(), the flowring
6751 * pool attach will notice that the pool persists and continue to use it. This
6752 * will avoid the case of a fragmented DMA-able region.
6753 *
6754 * +----------------------------------------------------------------------------
6755 */
6756
6757/* Conversion of a flowid to a flowring pool index */
6758#define DHD_FLOWRINGS_POOL_OFFSET(flowid) \
6759 ((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS)
6760
6761/* Fetch the msgbuf_ring_t from the flowring pool given a flowid */
6762#define DHD_RING_IN_FLOWRINGS_POOL(prot, flowid) \
6763 (msgbuf_ring_t*)((prot)->h2d_flowrings_pool) + \
6764 DHD_FLOWRINGS_POOL_OFFSET(flowid)
6765
6766/* Traverse each flowring in the flowring pool, assigning ring and flowid */
6767#define FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, total_flowrings) \
6768 for ((flowid) = DHD_FLOWRING_START_FLOWID, \
6769 (ring) = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid); \
6770 (flowid) < ((total_flowrings) + DHD_FLOWRING_START_FLOWID); \
6771 (ring)++, (flowid)++)
6772
6773/* Fetch number of H2D flowrings given the total number of h2d rings */
6774static uint16
6775dhd_get_max_flow_rings(dhd_pub_t *dhd)
6776{
6777 if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6)
6778 return dhd->bus->max_tx_flowrings;
6779 else
6780 return (dhd->bus->max_tx_flowrings - BCMPCIE_H2D_COMMON_MSGRINGS);
6781}
6782
6783/**
6784 * dhd_prot_flowrings_pool_attach - Initialize a pool of flowring msgbuf_ring_t.
6785 *
6786 * Allocate a pool of msgbuf_ring along with DMA-able buffers for flowrings.
6787 * Dongle includes common rings when it advertizes the number of H2D rings.
6788 * Allocates a pool of msgbuf_ring_t and invokes dhd_prot_ring_attach to
6789 * allocate the DMA-able buffer and initialize each msgbuf_ring_t object.
6790 *
6791 * dhd_prot_ring_attach is invoked to perform the actual initialization and
6792 * attaching the DMA-able buffer.
6793 *
6794 * Later dhd_prot_flowrings_pool_fetch() may be used to fetch a preallocated and
6795 * initialized msgbuf_ring_t object.
6796 *
6797 * returns BCME_OK=0 on success
6798 * returns non-zero negative error value on failure.
6799 */
6800static int
6801dhd_prot_flowrings_pool_attach(dhd_pub_t *dhd)
6802{
6803 uint16 flowid;
6804 msgbuf_ring_t *ring;
6805 uint16 h2d_flowrings_total; /* exclude H2D common rings */
6806 dhd_prot_t *prot = dhd->prot;
6807 char ring_name[RING_NAME_MAX_LENGTH];
6808
6809 if (prot->h2d_flowrings_pool != NULL)
6810 return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */
6811
6812 ASSERT(prot->h2d_rings_total == 0);
6813
6814 /* h2d_rings_total includes H2D common rings: ctrl and rxbuf subn */
6815 prot->h2d_rings_total = (uint16)dhd_bus_max_h2d_queues(dhd->bus);
6816
6817 if (prot->h2d_rings_total < BCMPCIE_H2D_COMMON_MSGRINGS) {
6818 DHD_ERROR(("%s: h2d_rings_total advertized as %u\n",
6819 __FUNCTION__, prot->h2d_rings_total));
6820 return BCME_ERROR;
6821 }
6822
6823 /* Subtract number of H2D common rings, to determine number of flowrings */
6824 h2d_flowrings_total = dhd_get_max_flow_rings(dhd);
6825
6826 DHD_ERROR(("Attach flowrings pool for %d rings\n", h2d_flowrings_total));
6827
6828 /* Allocate pool of msgbuf_ring_t objects for all flowrings */
6829 prot->h2d_flowrings_pool = (msgbuf_ring_t *)MALLOCZ(prot->osh,
6830 (h2d_flowrings_total * sizeof(msgbuf_ring_t)));
6831
6832 if (prot->h2d_flowrings_pool == NULL) {
6833 DHD_ERROR(("%s: flowrings pool for %d flowrings, alloc failure\n",
6834 __FUNCTION__, h2d_flowrings_total));
6835 goto fail;
6836 }
6837
6838 /* Setup & Attach a DMA-able buffer to each flowring in the flowring pool */
6839 FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) {
6840 snprintf(ring_name, sizeof(ring_name), "h2dflr_%03u", flowid);
6841 if (dhd_prot_ring_attach(dhd, ring, ring_name,
6842 prot->h2d_max_txpost, H2DRING_TXPOST_ITEMSIZE,
6843 DHD_FLOWID_TO_RINGID(flowid)) != BCME_OK) {
6844 goto attach_fail;
6845 }
6846 }
6847
6848 return BCME_OK;
6849
6850attach_fail:
6851 dhd_prot_flowrings_pool_detach(dhd); /* Free entire pool of flowrings */
6852
6853fail:
6854 prot->h2d_rings_total = 0;
6855 return BCME_NOMEM;
6856
6857} /* dhd_prot_flowrings_pool_attach */
6858
6859
6860/**
6861 * dhd_prot_flowrings_pool_reset - Reset all msgbuf_ring_t objects in the pool.
6862 * Invokes dhd_prot_ring_reset to perform the actual reset.
6863 *
6864 * The DMA-able buffer is not freed during reset and neither is the flowring
6865 * pool freed.
6866 *
6867 * dhd_prot_flowrings_pool_reset will be invoked in dhd_prot_reset. Following
6868 * the dhd_prot_reset, dhd_prot_init will be re-invoked, and the flowring pool
6869 * from a previous flowring pool instantiation will be reused.
6870 *
6871 * This will avoid a fragmented DMA-able memory condition, if multiple
6872 * dhd_prot_reset were invoked to reboot the dongle without a full detach/attach
6873 * cycle.
6874 */
6875static void
6876dhd_prot_flowrings_pool_reset(dhd_pub_t *dhd)
6877{
6878 uint16 flowid, h2d_flowrings_total;
6879 msgbuf_ring_t *ring;
6880 dhd_prot_t *prot = dhd->prot;
6881
6882 if (prot->h2d_flowrings_pool == NULL) {
6883 ASSERT(prot->h2d_rings_total == 0);
6884 return;
6885 }
6886 h2d_flowrings_total = dhd_get_max_flow_rings(dhd);
6887 /* Reset each flowring in the flowring pool */
6888 FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) {
6889 dhd_prot_ring_reset(dhd, ring);
6890 ring->inited = FALSE;
6891 }
6892
6893 /* Flowring pool state must be as-if dhd_prot_flowrings_pool_attach */
6894}
6895
6896
6897/**
6898 * dhd_prot_flowrings_pool_detach - Free pool of msgbuf_ring along with
6899 * DMA-able buffers for flowrings.
6900 * dhd_prot_ring_detach is invoked to free the DMA-able buffer and perform any
6901 * de-initialization of each msgbuf_ring_t.
6902 */
6903static void
6904dhd_prot_flowrings_pool_detach(dhd_pub_t *dhd)
6905{
6906 int flowid;
6907 msgbuf_ring_t *ring;
6908 uint16 h2d_flowrings_total; /* exclude H2D common rings */
6909 dhd_prot_t *prot = dhd->prot;
6910
6911 if (prot->h2d_flowrings_pool == NULL) {
6912 ASSERT(prot->h2d_rings_total == 0);
6913 return;
6914 }
6915
6916 h2d_flowrings_total = dhd_get_max_flow_rings(dhd);
6917 /* Detach the DMA-able buffer for each flowring in the flowring pool */
6918 FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) {
6919 dhd_prot_ring_detach(dhd, ring);
6920 }
6921
6922
6923 MFREE(prot->osh, prot->h2d_flowrings_pool,
6924 (h2d_flowrings_total * sizeof(msgbuf_ring_t)));
6925
6926 prot->h2d_flowrings_pool = (msgbuf_ring_t*)NULL;
6927 prot->h2d_rings_total = 0;
6928
6929} /* dhd_prot_flowrings_pool_detach */
6930
6931
6932/**
6933 * dhd_prot_flowrings_pool_fetch - Fetch a preallocated and initialized
6934 * msgbuf_ring from the flowring pool, and assign it.
6935 *
6936 * Unlike common rings, which uses a dhd_prot_ring_init() to pass the common
6937 * ring information to the dongle, a flowring's information is passed via a
6938 * flowring create control message.
6939 *
6940 * Only the ring state (WR, RD) index are initialized.
6941 */
6942static msgbuf_ring_t *
6943dhd_prot_flowrings_pool_fetch(dhd_pub_t *dhd, uint16 flowid)
6944{
6945 msgbuf_ring_t *ring;
6946 dhd_prot_t *prot = dhd->prot;
6947
6948 ASSERT(flowid >= DHD_FLOWRING_START_FLOWID);
6949 ASSERT(flowid < prot->h2d_rings_total);
6950 ASSERT(prot->h2d_flowrings_pool != NULL);
6951
6952 ring = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid);
6953
6954 /* ASSERT flow_ring->inited == FALSE */
6955
6956 ring->wr = 0;
6957 ring->rd = 0;
6958 ring->curr_rd = 0;
6959 ring->inited = TRUE;
6960 /**
6961 * Every time a flowring starts dynamically, initialize current_phase with 0
6962 * then flip to BCMPCIE_CMNHDR_PHASE_BIT_INIT
6963 */
6964 ring->current_phase = 0;
6965 return ring;
6966}
6967
6968
6969/**
6970 * dhd_prot_flowrings_pool_release - release a previously fetched flowring's
6971 * msgbuf_ring back to the flow_ring pool.
6972 */
6973void
6974dhd_prot_flowrings_pool_release(dhd_pub_t *dhd, uint16 flowid, void *flow_ring)
6975{
6976 msgbuf_ring_t *ring;
6977 dhd_prot_t *prot = dhd->prot;
6978
6979 ASSERT(flowid >= DHD_FLOWRING_START_FLOWID);
6980 ASSERT(flowid < prot->h2d_rings_total);
6981 ASSERT(prot->h2d_flowrings_pool != NULL);
6982
6983 ring = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid);
6984
6985 ASSERT(ring == (msgbuf_ring_t*)flow_ring);
6986 /* ASSERT flow_ring->inited == TRUE */
6987
6988 (void)dhd_dma_buf_audit(dhd, &ring->dma_buf);
6989
6990 ring->wr = 0;
6991 ring->rd = 0;
6992 ring->inited = FALSE;
6993
6994 ring->curr_rd = 0;
6995}
6996
6997
6998/* Assumes only one index is updated at a time */
6999/* If exactly_nitems is true, this function will allocate space for nitems or fail */
7000/* Exception: when wrap around is encountered, to prevent hangup (last nitems of ring buffer) */
7001/* If exactly_nitems is false, this function will allocate space for nitems or less */
7002static void *BCMFASTPATH
7003dhd_prot_get_ring_space(msgbuf_ring_t *ring, uint16 nitems, uint16 * alloced,
7004 bool exactly_nitems)
7005{
7006 void *ret_ptr = NULL;
7007 uint16 ring_avail_cnt;
7008
7009 ASSERT(nitems <= ring->max_items);
7010
7011 ring_avail_cnt = CHECK_WRITE_SPACE(ring->rd, ring->wr, ring->max_items);
7012
7013 if ((ring_avail_cnt == 0) ||
7014 (exactly_nitems && (ring_avail_cnt < nitems) &&
7015 ((ring->max_items - ring->wr) >= nitems))) {
7016 DHD_INFO(("Space not available: ring %s items %d write %d read %d\n",
7017 ring->name, nitems, ring->wr, ring->rd));
7018 return NULL;
7019 }
7020 *alloced = MIN(nitems, ring_avail_cnt);
7021
7022 /* Return next available space */
7023 ret_ptr = (char *)DHD_RING_BGN_VA(ring) + (ring->wr * ring->item_len);
7024
7025 /* Update write index */
7026 if ((ring->wr + *alloced) == ring->max_items)
7027 ring->wr = 0;
7028 else if ((ring->wr + *alloced) < ring->max_items)
7029 ring->wr += *alloced;
7030 else {
7031 /* Should never hit this */
7032 ASSERT(0);
7033 return NULL;
7034 }
7035
7036 return ret_ptr;
7037} /* dhd_prot_get_ring_space */
7038
7039
7040/**
7041 * dhd_prot_ring_write_complete - Host updates the new WR index on producing
7042 * new messages in a H2D ring. The messages are flushed from cache prior to
7043 * posting the new WR index. The new WR index will be updated in the DMA index
7044 * array or directly in the dongle's ring state memory.
7045 * A PCIE doorbell will be generated to wake up the dongle.
7046 * This is a non-atomic function, make sure the callers
7047 * always hold appropriate locks.
7048 */
7049static void BCMFASTPATH
7050dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p,
7051 uint16 nitems)
7052{
7053 dhd_prot_t *prot = dhd->prot;
7054 uint8 db_index;
7055 uint16 max_flowrings = dhd->bus->max_tx_flowrings;
7056
7057 /* cache flush */
7058 OSL_CACHE_FLUSH(p, ring->item_len * nitems);
7059
7060 if (IDMA_DS_ACTIVE(dhd) && IDMA_ACTIVE(dhd)) {
7061 dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr),
7062 sizeof(uint16), RING_WR_UPD, ring->idx);
7063 } else if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) {
7064 dhd_prot_dma_indx_set(dhd, ring->wr,
7065 H2D_DMA_INDX_WR_UPD, ring->idx);
7066 } else if (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(ring->idx, max_flowrings)) {
7067 dhd_prot_dma_indx_set(dhd, ring->wr,
7068 H2D_IFRM_INDX_WR_UPD, ring->idx);
7069 } else {
7070 dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr),
7071 sizeof(uint16), RING_WR_UPD, ring->idx);
7072 }
7073
7074 /* raise h2d interrupt */
7075 if (IDMA_ACTIVE(dhd) ||
7076 (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(ring->idx, max_flowrings))) {
7077 if (IDMA_DS_ACTIVE(dhd)) {
7078 prot->mb_ring_fn(dhd->bus, ring->wr);
7079 } else {
7080 db_index = IDMA_IDX0;
7081 prot->mb_2_ring_fn(dhd->bus, db_index, TRUE);
7082 }
7083 } else {
7084 prot->mb_ring_fn(dhd->bus, ring->wr);
7085 }
7086}
7087
7088/**
7089 * dhd_prot_upd_read_idx - Host updates the new RD index on consuming messages
7090 * from a D2H ring. The new RD index will be updated in the DMA Index array or
7091 * directly in dongle's ring state memory.
7092 */
7093static void
7094dhd_prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t * ring)
7095{
7096 dhd_prot_t *prot = dhd->prot;
7097 uint8 db_index;
7098
7099 /* update read index */
7100 /* If dma'ing h2d indices supported
7101 * update the r -indices in the
7102 * host memory o/w in TCM
7103 */
7104 if (IDMA_ACTIVE(dhd)) {
7105 dhd_prot_dma_indx_set(dhd, ring->rd,
7106 D2H_DMA_INDX_RD_UPD, ring->idx);
7107 if (IDMA_DS_ACTIVE(dhd)) {
7108 dhd_bus_cmn_writeshared(dhd->bus, &(ring->rd),
7109 sizeof(uint16), RING_RD_UPD, ring->idx);
7110 } else {
7111 db_index = IDMA_IDX1;
7112 prot->mb_2_ring_fn(dhd->bus, db_index, FALSE);
7113 }
7114 } else if (dhd->dma_h2d_ring_upd_support) {
7115 dhd_prot_dma_indx_set(dhd, ring->rd,
7116 D2H_DMA_INDX_RD_UPD, ring->idx);
7117 } else {
7118 dhd_bus_cmn_writeshared(dhd->bus, &(ring->rd),
7119 sizeof(uint16), RING_RD_UPD, ring->idx);
7120 }
7121}
7122
7123static int
7124dhd_send_d2h_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create)
7125{
7126 unsigned long flags;
7127 d2h_ring_create_req_t *d2h_ring;
7128 uint16 alloced = 0;
7129 int ret = BCME_OK;
7130 uint16 max_h2d_rings = dhd->bus->max_submission_rings;
7131
7132#ifdef PCIE_INB_DW
7133 if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
7134 return BCME_ERROR;
7135#endif /* PCIE_INB_DW */
7136 DHD_GENERAL_LOCK(dhd, flags);
7137
7138 DHD_TRACE(("%s trying to send D2H ring create Req\n", __FUNCTION__));
7139
7140 if (ring_to_create == NULL) {
7141 DHD_ERROR(("%s: FATAL: ring_to_create is NULL\n", __FUNCTION__));
7142 ret = BCME_ERROR;
7143 goto err;
7144 }
7145
7146 /* Request for ring buffer space */
7147 d2h_ring = (d2h_ring_create_req_t *) dhd_prot_alloc_ring_space(dhd,
7148 &dhd->prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,
7149 &alloced, FALSE);
7150
7151 if (d2h_ring == NULL) {
7152 DHD_ERROR(("%s: FATAL: No space in control ring to send D2H ring create\n",
7153 __FUNCTION__));
7154 ret = BCME_NOMEM;
7155 goto err;
7156 }
7157 ring_to_create->create_req_id = DHD_D2H_DBGRING_REQ_PKTID;
7158 ring_to_create->create_pending = TRUE;
7159
7160 /* Common msg buf hdr */
7161 d2h_ring->msg.msg_type = MSG_TYPE_D2H_RING_CREATE;
7162 d2h_ring->msg.if_id = 0;
7163 d2h_ring->msg.flags = dhd->prot->h2dring_ctrl_subn.current_phase;
7164 d2h_ring->msg.request_id = htol32(ring_to_create->create_req_id);
7165 d2h_ring->ring_id = htol16(DHD_D2H_RING_OFFSET(ring_to_create->idx, max_h2d_rings));
7166 d2h_ring->ring_type = BCMPCIE_D2H_RING_TYPE_DBGBUF_CPL;
7167 d2h_ring->max_items = htol16(D2HRING_DYNAMIC_INFO_MAX_ITEM);
7168 d2h_ring->len_item = htol16(D2HRING_INFO_BUFCMPLT_ITEMSIZE);
7169 d2h_ring->ring_ptr.low_addr = ring_to_create->base_addr.low_addr;
7170 d2h_ring->ring_ptr.high_addr = ring_to_create->base_addr.high_addr;
7171
7172 d2h_ring->flags = 0;
7173 d2h_ring->msg.epoch =
7174 dhd->prot->h2dring_ctrl_subn.seqnum % H2D_EPOCH_MODULO;
7175 dhd->prot->h2dring_ctrl_subn.seqnum++;
7176
7177 /* Update the flow_ring's WRITE index */
7178 dhd_prot_ring_write_complete(dhd, &dhd->prot->h2dring_ctrl_subn, d2h_ring,
7179 DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
7180
7181err:
7182 DHD_GENERAL_UNLOCK(dhd, flags);
7183#ifdef PCIE_INB_DW
7184 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
7185#endif
7186 return ret;
7187}
7188
7189static int
7190dhd_send_h2d_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create)
7191{
7192 unsigned long flags;
7193 h2d_ring_create_req_t *h2d_ring;
7194 uint16 alloced = 0;
7195 uint8 i = 0;
7196 int ret = BCME_OK;
7197
7198
7199#ifdef PCIE_INB_DW
7200 if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
7201 return BCME_ERROR;
7202#endif /* PCIE_INB_DW */
7203 DHD_GENERAL_LOCK(dhd, flags);
7204
7205 DHD_TRACE(("%s trying to send H2D ring create Req\n", __FUNCTION__));
7206
7207 if (ring_to_create == NULL) {
7208 DHD_ERROR(("%s: FATAL: ring_to_create is NULL\n", __FUNCTION__));
7209 ret = BCME_ERROR;
7210 goto err;
7211 }
7212
7213 /* Request for ring buffer space */
7214 h2d_ring = (h2d_ring_create_req_t *)dhd_prot_alloc_ring_space(dhd,
7215 &dhd->prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,
7216 &alloced, FALSE);
7217
7218 if (h2d_ring == NULL) {
7219 DHD_ERROR(("%s: FATAL: No space in control ring to send H2D ring create\n",
7220 __FUNCTION__));
7221 ret = BCME_NOMEM;
7222 goto err;
7223 }
7224 ring_to_create->create_req_id = DHD_H2D_DBGRING_REQ_PKTID;
7225 ring_to_create->create_pending = TRUE;
7226
7227 /* Common msg buf hdr */
7228 h2d_ring->msg.msg_type = MSG_TYPE_H2D_RING_CREATE;
7229 h2d_ring->msg.if_id = 0;
7230 h2d_ring->msg.request_id = htol32(ring_to_create->create_req_id);
7231 h2d_ring->msg.flags = dhd->prot->h2dring_ctrl_subn.current_phase;
7232 h2d_ring->ring_id = htol16(DHD_H2D_RING_OFFSET(ring_to_create->idx));
7233 h2d_ring->ring_type = BCMPCIE_H2D_RING_TYPE_DBGBUF_SUBMIT;
7234 h2d_ring->max_items = htol16(H2DRING_DYNAMIC_INFO_MAX_ITEM);
7235 h2d_ring->n_completion_ids = ring_to_create->n_completion_ids;
7236 h2d_ring->len_item = htol16(H2DRING_INFO_BUFPOST_ITEMSIZE);
7237 h2d_ring->ring_ptr.low_addr = ring_to_create->base_addr.low_addr;
7238 h2d_ring->ring_ptr.high_addr = ring_to_create->base_addr.high_addr;
7239
7240 for (i = 0; i < ring_to_create->n_completion_ids; i++) {
7241 h2d_ring->completion_ring_ids[i] = htol16(ring_to_create->compeltion_ring_ids[i]);
7242 }
7243
7244 h2d_ring->flags = 0;
7245 h2d_ring->msg.epoch =
7246 dhd->prot->h2dring_ctrl_subn.seqnum % H2D_EPOCH_MODULO;
7247 dhd->prot->h2dring_ctrl_subn.seqnum++;
7248
7249 /* Update the flow_ring's WRITE index */
7250 dhd_prot_ring_write_complete(dhd, &dhd->prot->h2dring_ctrl_subn, h2d_ring,
7251 DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
7252
7253err:
7254 DHD_GENERAL_UNLOCK(dhd, flags);
7255#ifdef PCIE_INB_DW
7256 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
7257#endif
7258 return ret;
7259}
7260
7261/**
7262 * dhd_prot_dma_indx_set - set a new WR or RD index in the DMA index array.
7263 * Dongle will DMA the entire array (if DMA_INDX feature is enabled).
7264 * See dhd_prot_dma_indx_init()
7265 */
7266void
7267dhd_prot_dma_indx_set(dhd_pub_t *dhd, uint16 new_index, uint8 type, uint16 ringid)
7268{
7269 uint8 *ptr;
7270 uint16 offset;
7271 dhd_prot_t *prot = dhd->prot;
7272 uint16 max_h2d_rings = dhd->bus->max_submission_rings;
7273
7274 switch (type) {
7275 case H2D_DMA_INDX_WR_UPD:
7276 ptr = (uint8 *)(prot->h2d_dma_indx_wr_buf.va);
7277 offset = DHD_H2D_RING_OFFSET(ringid);
7278 break;
7279
7280 case D2H_DMA_INDX_RD_UPD:
7281 ptr = (uint8 *)(prot->d2h_dma_indx_rd_buf.va);
7282 offset = DHD_D2H_RING_OFFSET(ringid, max_h2d_rings);
7283 break;
7284
7285 case H2D_IFRM_INDX_WR_UPD:
7286 ptr = (uint8 *)(prot->h2d_ifrm_indx_wr_buf.va);
7287 offset = DHD_H2D_FRM_FLOW_RING_OFFSET(ringid);
7288 break;
7289
7290 default:
7291 DHD_ERROR(("%s: Invalid option for DMAing read/write index\n",
7292 __FUNCTION__));
7293 return;
7294 }
7295
7296 ASSERT(prot->rw_index_sz != 0);
7297 ptr += offset * prot->rw_index_sz;
7298
7299 *(uint16*)ptr = htol16(new_index);
7300
7301 OSL_CACHE_FLUSH((void *)ptr, prot->rw_index_sz);
7302
7303 DHD_TRACE(("%s: data %d type %d ringid %d ptr 0x%p offset %d\n",
7304 __FUNCTION__, new_index, type, ringid, ptr, offset));
7305
7306} /* dhd_prot_dma_indx_set */
7307
7308
7309/**
7310 * dhd_prot_dma_indx_get - Fetch a WR or RD index from the dongle DMA-ed index
7311 * array.
7312 * Dongle DMAes an entire array to host memory (if the feature is enabled).
7313 * See dhd_prot_dma_indx_init()
7314 */
7315static uint16
7316dhd_prot_dma_indx_get(dhd_pub_t *dhd, uint8 type, uint16 ringid)
7317{
7318 uint8 *ptr;
7319 uint16 data;
7320 uint16 offset;
7321 dhd_prot_t *prot = dhd->prot;
7322 uint16 max_h2d_rings = dhd->bus->max_submission_rings;
7323
7324 switch (type) {
7325 case H2D_DMA_INDX_WR_UPD:
7326 ptr = (uint8 *)(prot->h2d_dma_indx_wr_buf.va);
7327 offset = DHD_H2D_RING_OFFSET(ringid);
7328 break;
7329
7330 case H2D_DMA_INDX_RD_UPD:
7331 ptr = (uint8 *)(prot->h2d_dma_indx_rd_buf.va);
7332 offset = DHD_H2D_RING_OFFSET(ringid);
7333 break;
7334
7335 case D2H_DMA_INDX_WR_UPD:
7336 ptr = (uint8 *)(prot->d2h_dma_indx_wr_buf.va);
7337 offset = DHD_D2H_RING_OFFSET(ringid, max_h2d_rings);
7338 break;
7339
7340 case D2H_DMA_INDX_RD_UPD:
7341 ptr = (uint8 *)(prot->d2h_dma_indx_rd_buf.va);
7342 offset = DHD_D2H_RING_OFFSET(ringid, max_h2d_rings);
7343 break;
7344
7345 default:
7346 DHD_ERROR(("%s: Invalid option for DMAing read/write index\n",
7347 __FUNCTION__));
7348 return 0;
7349 }
7350
7351 ASSERT(prot->rw_index_sz != 0);
7352 ptr += offset * prot->rw_index_sz;
7353
7354 OSL_CACHE_INV((void *)ptr, prot->rw_index_sz);
7355
7356 data = LTOH16(*((uint16*)ptr));
7357
7358 DHD_TRACE(("%s: data %d type %d ringid %d ptr 0x%p offset %d\n",
7359 __FUNCTION__, data, type, ringid, ptr, offset));
7360
7361 return (data);
7362
7363} /* dhd_prot_dma_indx_get */
7364
7365/**
7366 * An array of DMA read/write indices, containing information about host rings, can be maintained
7367 * either in host memory or in device memory, dependent on preprocessor options. This function is,
7368 * dependent on these options, called during driver initialization. It reserves and initializes
7369 * blocks of DMA'able host memory containing an array of DMA read or DMA write indices. The physical
7370 * address of these host memory blocks are communicated to the dongle later on. By reading this host
7371 * memory, the dongle learns about the state of the host rings.
7372 */
7373
7374static INLINE int
7375dhd_prot_dma_indx_alloc(dhd_pub_t *dhd, uint8 type,
7376 dhd_dma_buf_t *dma_buf, uint32 bufsz)
7377{
7378 int rc;
7379
7380 if ((dma_buf->len == bufsz) || (dma_buf->va != NULL))
7381 return BCME_OK;
7382
7383 rc = dhd_dma_buf_alloc(dhd, dma_buf, bufsz);
7384
7385 return rc;
7386}
7387
7388int
7389dhd_prot_dma_indx_init(dhd_pub_t *dhd, uint32 rw_index_sz, uint8 type, uint32 length)
7390{
7391 uint32 bufsz;
7392 dhd_prot_t *prot = dhd->prot;
7393 dhd_dma_buf_t *dma_buf;
7394
7395 if (prot == NULL) {
7396 DHD_ERROR(("prot is not inited\n"));
7397 return BCME_ERROR;
7398 }
7399
7400 /* Dongle advertizes 2B or 4B RW index size */
7401 ASSERT(rw_index_sz != 0);
7402 prot->rw_index_sz = rw_index_sz;
7403
7404 bufsz = rw_index_sz * length;
7405
7406 switch (type) {
7407 case H2D_DMA_INDX_WR_BUF:
7408 dma_buf = &prot->h2d_dma_indx_wr_buf;
7409 if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
7410 goto ret_no_mem;
7411 DHD_ERROR(("H2D DMA WR INDX : array size %d = %d * %d\n",
7412 dma_buf->len, rw_index_sz, length));
7413 break;
7414
7415 case H2D_DMA_INDX_RD_BUF:
7416 dma_buf = &prot->h2d_dma_indx_rd_buf;
7417 if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
7418 goto ret_no_mem;
7419 DHD_ERROR(("H2D DMA RD INDX : array size %d = %d * %d\n",
7420 dma_buf->len, rw_index_sz, length));
7421 break;
7422
7423 case D2H_DMA_INDX_WR_BUF:
7424 dma_buf = &prot->d2h_dma_indx_wr_buf;
7425 if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
7426 goto ret_no_mem;
7427 DHD_ERROR(("D2H DMA WR INDX : array size %d = %d * %d\n",
7428 dma_buf->len, rw_index_sz, length));
7429 break;
7430
7431 case D2H_DMA_INDX_RD_BUF:
7432 dma_buf = &prot->d2h_dma_indx_rd_buf;
7433 if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
7434 goto ret_no_mem;
7435 DHD_ERROR(("D2H DMA RD INDX : array size %d = %d * %d\n",
7436 dma_buf->len, rw_index_sz, length));
7437 break;
7438
7439 case H2D_IFRM_INDX_WR_BUF:
7440 dma_buf = &prot->h2d_ifrm_indx_wr_buf;
7441 if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz))
7442 goto ret_no_mem;
7443 DHD_ERROR(("H2D IFRM WR INDX : array size %d = %d * %d\n",
7444 dma_buf->len, rw_index_sz, length));
7445 break;
7446
7447 default:
7448 DHD_ERROR(("%s: Unexpected option\n", __FUNCTION__));
7449 return BCME_BADOPTION;
7450 }
7451
7452 return BCME_OK;
7453
7454ret_no_mem:
7455 DHD_ERROR(("%s: dhd_prot_dma_indx_alloc type %d buf_sz %d failure\n",
7456 __FUNCTION__, type, bufsz));
7457 return BCME_NOMEM;
7458
7459} /* dhd_prot_dma_indx_init */
7460
7461
7462/**
7463 * Called on checking for 'completion' messages from the dongle. Returns next host buffer to read
7464 * from, or NULL if there are no more messages to read.
7465 */
7466static uint8*
7467dhd_prot_get_read_addr(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint32 *available_len)
7468{
7469 uint16 wr;
7470 uint16 rd;
7471 uint16 depth;
7472 uint16 items;
7473 void *read_addr = NULL; /* address of next msg to be read in ring */
7474 uint16 d2h_wr = 0;
7475
7476 DHD_TRACE(("%s: d2h_dma_indx_rd_buf %p, d2h_dma_indx_wr_buf %p\n",
7477 __FUNCTION__, (uint32 *)(dhd->prot->d2h_dma_indx_rd_buf.va),
7478 (uint32 *)(dhd->prot->d2h_dma_indx_wr_buf.va)));
7479
7480 /* Remember the read index in a variable.
7481 * This is becuase ring->rd gets updated in the end of this function
7482 * So if we have to print the exact read index from which the
7483 * message is read its not possible.
7484 */
7485 ring->curr_rd = ring->rd;
7486
7487 /* update write pointer */
7488 if (dhd->dma_d2h_ring_upd_support) {
7489 /* DMAing write/read indices supported */
7490 d2h_wr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx);
7491 ring->wr = d2h_wr;
7492 } else {
7493 dhd_bus_cmn_readshared(dhd->bus, &(ring->wr), RING_WR_UPD, ring->idx);
7494 }
7495
7496 wr = ring->wr;
7497 rd = ring->rd;
7498 depth = ring->max_items;
7499
7500 /* check for avail space, in number of ring items */
7501 items = READ_AVAIL_SPACE(wr, rd, depth);
7502 if (items == 0)
7503 return NULL;
7504
7505 /*
7506 * Note that there are builds where Assert translates to just printk
7507 * so, even if we had hit this condition we would never halt. Now
7508 * dhd_prot_process_msgtype can get into an big loop if this
7509 * happens.
7510 */
7511 if (items > ring->max_items) {
7512 DHD_ERROR(("\r\n======================= \r\n"));
7513 DHD_ERROR(("%s(): ring %p, ring->name %s, ring->max_items %d, items %d \r\n",
7514 __FUNCTION__, ring, ring->name, ring->max_items, items));
7515 DHD_ERROR(("wr: %d, rd: %d, depth: %d \r\n", wr, rd, depth));
7516 DHD_ERROR(("dhd->busstate %d bus->wait_for_d3_ack %d \r\n",
7517 dhd->busstate, dhd->bus->wait_for_d3_ack));
7518 DHD_ERROR(("\r\n======================= \r\n"));
7519#ifdef SUPPORT_LINKDOWN_RECOVERY
7520 if (wr >= ring->max_items) {
7521 dhd->bus->read_shm_fail = TRUE;
7522 }
7523#else
7524#ifdef DHD_FW_COREDUMP
7525 if (dhd->memdump_enabled) {
7526 /* collect core dump */
7527 dhd->memdump_type = DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR;
7528 dhd_bus_mem_dump(dhd);
7529
7530 }
7531#endif /* DHD_FW_COREDUMP */
7532#endif /* SUPPORT_LINKDOWN_RECOVERY */
7533
7534 *available_len = 0;
7535 dhd_schedule_reset(dhd);
7536
7537 return NULL;
7538 }
7539
7540 /* if space is available, calculate address to be read */
7541 read_addr = (char*)ring->dma_buf.va + (rd * ring->item_len);
7542
7543 /* update read pointer */
7544 if ((ring->rd + items) >= ring->max_items)
7545 ring->rd = 0;
7546 else
7547 ring->rd += items;
7548
7549 ASSERT(ring->rd < ring->max_items);
7550
7551 /* convert items to bytes : available_len must be 32bits */
7552 *available_len = (uint32)(items * ring->item_len);
7553
7554 OSL_CACHE_INV(read_addr, *available_len);
7555
7556 /* return read address */
7557 return read_addr;
7558
7559} /* dhd_prot_get_read_addr */
7560
7561/**
7562 * dhd_prot_h2d_mbdata_send_ctrlmsg is a non-atomic function,
7563 * make sure the callers always hold appropriate locks.
7564 */
7565int dhd_prot_h2d_mbdata_send_ctrlmsg(dhd_pub_t *dhd, uint32 mb_data)
7566{
7567 h2d_mailbox_data_t *h2d_mb_data;
7568 uint16 alloced = 0;
7569 int num_post = 1;
7570 int i;
7571
7572 DHD_INFO(("%s Sending H2D MB data Req data 0x%04x\n",
7573 __FUNCTION__, mb_data));
7574 if (!dhd->prot->h2dring_ctrl_subn.inited) {
7575 DHD_ERROR(("%s: Ctrl Submit Ring: not inited\n", __FUNCTION__));
7576 return BCME_ERROR;
7577 }
7578#ifdef PCIE_INB_DW
7579 if ((INBAND_DW_ENAB(dhd->bus)) &&
7580 (dhdpcie_bus_get_pcie_inband_dw_state(dhd->bus) ==
7581 DW_DEVICE_DS_DEV_SLEEP)) {
7582 if (mb_data == H2D_HOST_CONS_INT) {
7583 /* One additional device_wake post needed */
7584 num_post = 2;
7585 }
7586 }
7587#endif /* PCIE_INB_DW */
7588
7589 for (i = 0; i < num_post; i ++) {
7590 /* Request for ring buffer space */
7591 h2d_mb_data = (h2d_mailbox_data_t *)dhd_prot_alloc_ring_space(dhd,
7592 &dhd->prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D,
7593 &alloced, FALSE);
7594
7595 if (h2d_mb_data == NULL) {
7596 DHD_ERROR(("%s: FATAL: No space in control ring to send H2D Mb data\n",
7597 __FUNCTION__));
7598 return BCME_NOMEM;
7599 }
7600
7601 memset(h2d_mb_data, 0, sizeof(h2d_mailbox_data_t));
7602 /* Common msg buf hdr */
7603 h2d_mb_data->msg.msg_type = MSG_TYPE_H2D_MAILBOX_DATA;
7604 h2d_mb_data->msg.flags = dhd->prot->h2dring_ctrl_subn.current_phase;
7605
7606 h2d_mb_data->msg.epoch =
7607 dhd->prot->h2dring_ctrl_subn.seqnum % H2D_EPOCH_MODULO;
7608 dhd->prot->h2dring_ctrl_subn.seqnum++;
7609
7610#ifdef PCIE_INB_DW
7611 /* post device_wake first */
7612 if ((num_post == 2) && (i == 0)) {
7613 h2d_mb_data->mail_box_data = htol32(H2DMB_DS_DEVICE_WAKE);
7614 } else
7615#endif /* PCIE_INB_DW */
7616 {
7617 h2d_mb_data->mail_box_data = htol32(mb_data);
7618 }
7619
7620 DHD_INFO(("%s Send H2D MB data Req data 0x%04x\n", __FUNCTION__, mb_data));
7621
7622 /* upd wrt ptr and raise interrupt */
7623 /* caller of dhd_prot_h2d_mbdata_send_ctrlmsg already holding general lock */
7624 dhd_prot_ring_write_complete(dhd, &dhd->prot->h2dring_ctrl_subn, h2d_mb_data,
7625 DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
7626#ifdef PCIE_INB_DW
7627 /* Add a delay if device_wake is posted */
7628 if ((num_post == 2) && (i == 0)) {
7629 OSL_DELAY(1000);
7630 }
7631#endif /* PCIE_INB_DW */
7632 }
7633
7634 return 0;
7635}
7636
7637/** Creates a flow ring and informs dongle of this event */
7638int
7639dhd_prot_flow_ring_create(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
7640{
7641 tx_flowring_create_request_t *flow_create_rqst;
7642 msgbuf_ring_t *flow_ring;
7643 dhd_prot_t *prot = dhd->prot;
7644 unsigned long flags;
7645 uint16 alloced = 0;
7646 msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
7647 uint16 max_flowrings = dhd->bus->max_tx_flowrings;
7648
7649 /* Fetch a pre-initialized msgbuf_ring from the flowring pool */
7650 flow_ring = dhd_prot_flowrings_pool_fetch(dhd, flow_ring_node->flowid);
7651 if (flow_ring == NULL) {
7652 DHD_ERROR(("%s: dhd_prot_flowrings_pool_fetch TX Flowid %d failed\n",
7653 __FUNCTION__, flow_ring_node->flowid));
7654 return BCME_NOMEM;
7655 }
7656
7657#ifdef PCIE_INB_DW
7658 if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
7659 return BCME_ERROR;
7660#endif /* PCIE_INB_DW */
7661 DHD_GENERAL_LOCK(dhd, flags);
7662
7663 /* Request for ctrl_ring buffer space */
7664 flow_create_rqst = (tx_flowring_create_request_t *)
7665 dhd_prot_alloc_ring_space(dhd, ctrl_ring, 1, &alloced, FALSE);
7666
7667 if (flow_create_rqst == NULL) {
7668 dhd_prot_flowrings_pool_release(dhd, flow_ring_node->flowid, flow_ring);
7669 DHD_ERROR(("%s: Flow Create Req flowid %d - failure ring space\n",
7670 __FUNCTION__, flow_ring_node->flowid));
7671 DHD_GENERAL_UNLOCK(dhd, flags);
7672#ifdef PCIE_INB_DW
7673 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
7674#endif
7675 return BCME_NOMEM;
7676 }
7677
7678 flow_ring_node->prot_info = (void *)flow_ring;
7679
7680 /* Common msg buf hdr */
7681 flow_create_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_CREATE;
7682 flow_create_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
7683 flow_create_rqst->msg.request_id = htol32(0); /* TBD */
7684 flow_create_rqst->msg.flags = ctrl_ring->current_phase;
7685
7686 flow_create_rqst->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
7687 ctrl_ring->seqnum++;
7688
7689 /* Update flow create message */
7690 flow_create_rqst->tid = flow_ring_node->flow_info.tid;
7691 flow_create_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
7692 memcpy(flow_create_rqst->sa, flow_ring_node->flow_info.sa, sizeof(flow_create_rqst->sa));
7693 memcpy(flow_create_rqst->da, flow_ring_node->flow_info.da, sizeof(flow_create_rqst->da));
7694 /* CAUTION: ring::base_addr already in Little Endian */
7695 flow_create_rqst->flow_ring_ptr.low_addr = flow_ring->base_addr.low_addr;
7696 flow_create_rqst->flow_ring_ptr.high_addr = flow_ring->base_addr.high_addr;
7697 flow_create_rqst->max_items = htol16(prot->h2d_max_txpost);
7698 flow_create_rqst->len_item = htol16(H2DRING_TXPOST_ITEMSIZE);
7699
7700 /* definition for ifrm mask : bit0:d11ac core, bit1:d11ad core
7701 * currently it is not used for priority. so uses solely for ifrm mask
7702 */
7703 if (IFRM_ACTIVE(dhd))
7704 flow_create_rqst->priority_ifrmmask = (1 << IFRM_DEV_0);
7705
7706 DHD_ERROR(("%s: Send Flow Create Req flow ID %d for peer " MACDBG
7707 " prio %d ifindex %d\n", __FUNCTION__, flow_ring_node->flowid,
7708 MAC2STRDBG(flow_ring_node->flow_info.da), flow_ring_node->flow_info.tid,
7709 flow_ring_node->flow_info.ifindex));
7710
7711 /* Update the flow_ring's WRITE index */
7712 if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) {
7713 dhd_prot_dma_indx_set(dhd, flow_ring->wr,
7714 H2D_DMA_INDX_WR_UPD, flow_ring->idx);
7715 } else if (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(flow_ring->idx, max_flowrings)) {
7716 dhd_prot_dma_indx_set(dhd, flow_ring->wr,
7717 H2D_IFRM_INDX_WR_UPD, flow_ring->idx);
7718 } else {
7719 dhd_bus_cmn_writeshared(dhd->bus, &(flow_ring->wr),
7720 sizeof(uint16), RING_WR_UPD, flow_ring->idx);
7721 }
7722
7723 /* update control subn ring's WR index and ring doorbell to dongle */
7724 dhd_prot_ring_write_complete(dhd, ctrl_ring, flow_create_rqst, 1);
7725
7726 DHD_GENERAL_UNLOCK(dhd, flags);
7727#ifdef PCIE_INB_DW
7728 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
7729#endif
7730
7731 return BCME_OK;
7732} /* dhd_prot_flow_ring_create */
7733
7734/** called on receiving MSG_TYPE_FLOW_RING_CREATE_CMPLT message from dongle */
7735static void
7736dhd_prot_flow_ring_create_response_process(dhd_pub_t *dhd, void *msg)
7737{
7738 tx_flowring_create_response_t *flow_create_resp = (tx_flowring_create_response_t *)msg;
7739
7740 DHD_ERROR(("%s: Flow Create Response status = %d Flow %d\n", __FUNCTION__,
7741 ltoh16(flow_create_resp->cmplt.status),
7742 ltoh16(flow_create_resp->cmplt.flow_ring_id)));
7743
7744 dhd_bus_flow_ring_create_response(dhd->bus,
7745 ltoh16(flow_create_resp->cmplt.flow_ring_id),
7746 ltoh16(flow_create_resp->cmplt.status));
7747}
7748
7749static void
7750dhd_prot_process_h2d_ring_create_complete(dhd_pub_t *dhd, void *buf)
7751{
7752 h2d_ring_create_response_t *resp = (h2d_ring_create_response_t *)buf;
7753 DHD_INFO(("%s ring create Response status = %d ring %d, id 0x%04x\n", __FUNCTION__,
7754 ltoh16(resp->cmplt.status),
7755 ltoh16(resp->cmplt.ring_id),
7756 ltoh32(resp->cmn_hdr.request_id)));
7757 if (ltoh32(resp->cmn_hdr.request_id) != DHD_H2D_DBGRING_REQ_PKTID) {
7758 DHD_ERROR(("invalid request ID with h2d ring create complete\n"));
7759 return;
7760 }
7761 if (!dhd->prot->h2dring_info_subn->create_pending) {
7762 DHD_ERROR(("info ring create status for not pending submit ring\n"));
7763 }
7764
7765 if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
7766 DHD_ERROR(("info ring create failed with status %d\n",
7767 ltoh16(resp->cmplt.status)));
7768 return;
7769 }
7770 dhd->prot->h2dring_info_subn->create_pending = FALSE;
7771 dhd->prot->h2dring_info_subn->inited = TRUE;
7772 dhd_prot_infobufpost(dhd);
7773}
7774
7775static void
7776dhd_prot_process_d2h_ring_create_complete(dhd_pub_t *dhd, void *buf)
7777{
7778 d2h_ring_create_response_t *resp = (d2h_ring_create_response_t *)buf;
7779 DHD_INFO(("%s ring create Response status = %d ring %d, id 0x%04x\n", __FUNCTION__,
7780 ltoh16(resp->cmplt.status),
7781 ltoh16(resp->cmplt.ring_id),
7782 ltoh32(resp->cmn_hdr.request_id)));
7783 if (ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_DBGRING_REQ_PKTID) {
7784 DHD_ERROR(("invalid request ID with d2h ring create complete\n"));
7785 return;
7786 }
7787 if (!dhd->prot->d2hring_info_cpln->create_pending) {
7788 DHD_ERROR(("info ring create status for not pending cpl ring\n"));
7789 return;
7790 }
7791
7792 if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) {
7793 DHD_ERROR(("info cpl ring create failed with status %d\n",
7794 ltoh16(resp->cmplt.status)));
7795 return;
7796 }
7797 dhd->prot->d2hring_info_cpln->create_pending = FALSE;
7798 dhd->prot->d2hring_info_cpln->inited = TRUE;
7799}
7800
7801static void
7802dhd_prot_process_d2h_mb_data(dhd_pub_t *dhd, void* buf)
7803{
7804 d2h_mailbox_data_t *d2h_data;
7805
7806 d2h_data = (d2h_mailbox_data_t *)buf;
7807 DHD_INFO(("%s dhd_prot_process_d2h_mb_data, 0x%04x\n", __FUNCTION__,
7808 d2h_data->d2h_mailbox_data));
7809 dhd_bus_handle_mb_data(dhd->bus, d2h_data->d2h_mailbox_data);
7810}
7811
7812static void
7813dhd_prot_process_d2h_host_ts_complete(dhd_pub_t *dhd, void* buf)
7814{
7815#ifdef DHD_TIMESYNC
7816 host_timestamp_msg_cpl_t *host_ts_cpl;
7817 uint32 pktid;
7818 dhd_prot_t *prot = dhd->prot;
7819
7820 host_ts_cpl = (host_timestamp_msg_cpl_t *)buf;
7821 DHD_INFO(("%s host TS cpl: status %d, req_ID: 0x%04x, xt_id %d \n", __FUNCTION__,
7822 host_ts_cpl->cmplt.status, host_ts_cpl->msg.request_id, host_ts_cpl->xt_id));
7823
7824 pktid = ltoh32(host_ts_cpl->msg.request_id);
7825 if (prot->hostts_req_buf_inuse == FALSE) {
7826 DHD_ERROR(("No Pending Host TS req, but completion\n"));
7827 return;
7828 }
7829 prot->hostts_req_buf_inuse = FALSE;
7830 if (pktid != DHD_H2D_HOSTTS_REQ_PKTID) {
7831 DHD_ERROR(("Host TS req CPL, but req ID different 0x%04x, exp 0x%04x\n",
7832 pktid, DHD_H2D_HOSTTS_REQ_PKTID));
7833 return;
7834 }
7835 dhd_timesync_handle_host_ts_complete(dhd->ts, host_ts_cpl->xt_id,
7836 host_ts_cpl->cmplt.status);
7837#else /* DHD_TIMESYNC */
7838 DHD_ERROR(("Timesunc feature not compiled in but GOT HOST_TS_COMPLETE\n"));
7839#endif /* DHD_TIMESYNC */
7840
7841}
7842
7843/** called on e.g. flow ring delete */
7844void dhd_prot_clean_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info)
7845{
7846 msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info;
7847 dhd_prot_ring_detach(dhd, flow_ring);
7848 DHD_INFO(("%s Cleaning up Flow \n", __FUNCTION__));
7849}
7850
7851void dhd_prot_print_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info,
7852 struct bcmstrbuf *strbuf, const char * fmt)
7853{
7854 const char *default_fmt = "RD %d WR %d BASE(VA) %p BASE(PA) %x:%x"
7855 " WORK ITEM SIZE %d MAX WORK ITEMS %d SIZE %d\n";
7856 msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info;
7857 uint16 rd, wr;
7858 uint32 dma_buf_len = flow_ring->max_items * flow_ring->item_len;
7859
7860 if (fmt == NULL) {
7861 fmt = default_fmt;
7862 }
7863 dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, flow_ring->idx);
7864 dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, flow_ring->idx);
7865 bcm_bprintf(strbuf, fmt, rd, wr, flow_ring->dma_buf.va,
7866 ltoh32(flow_ring->base_addr.high_addr),
7867 ltoh32(flow_ring->base_addr.low_addr),
7868 flow_ring->item_len, flow_ring->max_items, dma_buf_len);
7869}
7870
7871void dhd_prot_print_info(dhd_pub_t *dhd, struct bcmstrbuf *strbuf)
7872{
7873 dhd_prot_t *prot = dhd->prot;
7874 bcm_bprintf(strbuf, "IPCrevs: Dev %d, \t Host %d, \tactive %d\n",
7875 dhd->prot->device_ipc_version,
7876 dhd->prot->host_ipc_version,
7877 dhd->prot->active_ipc_version);
7878
7879 bcm_bprintf(strbuf, "max Host TS bufs to post: %d, \t posted %d \n",
7880 dhd->prot->max_tsbufpost, dhd->prot->cur_ts_bufs_posted);
7881 bcm_bprintf(strbuf, "max INFO bufs to post: %d, \t posted %d \n",
7882 dhd->prot->max_infobufpost, dhd->prot->infobufpost);
7883 bcm_bprintf(strbuf, "max event bufs to post: %d, \t posted %d \n",
7884 dhd->prot->max_eventbufpost, dhd->prot->cur_event_bufs_posted);
7885 bcm_bprintf(strbuf, "max ioctlresp bufs to post: %d, \t posted %d \n",
7886 dhd->prot->max_ioctlrespbufpost, dhd->prot->cur_ioctlresp_bufs_posted);
7887 bcm_bprintf(strbuf, "max RX bufs to post: %d, \t posted %d \n",
7888 dhd->prot->max_rxbufpost, dhd->prot->rxbufpost);
7889
7890 bcm_bprintf(strbuf,
7891 "%14s %5s %5s %17s %17s %14s %14s %10s\n",
7892 "Type", "RD", "WR", "BASE(VA)", "BASE(PA)",
7893 "WORK_ITEM_SIZE", "MAX_WORK_ITEMS", "TOTAL_SIZE");
7894 bcm_bprintf(strbuf, "%14s", "H2DCtrlPost");
7895 dhd_prot_print_flow_ring(dhd, &prot->h2dring_ctrl_subn, strbuf,
7896 " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
7897 bcm_bprintf(strbuf, "%14s", "D2HCtrlCpl");
7898 dhd_prot_print_flow_ring(dhd, &prot->d2hring_ctrl_cpln, strbuf,
7899 " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
7900 bcm_bprintf(strbuf, "%14s", "H2DRxPost", prot->rxbufpost);
7901 dhd_prot_print_flow_ring(dhd, &prot->h2dring_rxp_subn, strbuf,
7902 " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
7903 bcm_bprintf(strbuf, "%14s", "D2HRxCpl");
7904 dhd_prot_print_flow_ring(dhd, &prot->d2hring_rx_cpln, strbuf,
7905 " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
7906 bcm_bprintf(strbuf, "%14s", "D2HTxCpl");
7907 dhd_prot_print_flow_ring(dhd, &prot->d2hring_tx_cpln, strbuf,
7908 " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
7909 if (dhd->prot->h2dring_info_subn != NULL && dhd->prot->d2hring_info_cpln != NULL) {
7910 bcm_bprintf(strbuf, "%14s", "H2DRingInfoSub");
7911 dhd_prot_print_flow_ring(dhd, prot->h2dring_info_subn, strbuf,
7912 " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
7913 bcm_bprintf(strbuf, "%14s", "D2HRingInfoCpl");
7914 dhd_prot_print_flow_ring(dhd, prot->d2hring_info_cpln, strbuf,
7915 " %5d %5d %17p %8x:%8x %14d %14d %10d\n");
7916 }
7917
7918 bcm_bprintf(strbuf, "active_tx_count %d pktidmap_avail(ctrl/rx/tx) %d %d %d\n",
7919 dhd->prot->active_tx_count,
7920 DHD_PKTID_AVAIL(dhd->prot->pktid_ctrl_map),
7921 DHD_PKTID_AVAIL(dhd->prot->pktid_rx_map),
7922 DHD_PKTID_AVAIL(dhd->prot->pktid_tx_map));
7923
7924}
7925
7926int
7927dhd_prot_flow_ring_delete(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
7928{
7929 tx_flowring_delete_request_t *flow_delete_rqst;
7930 dhd_prot_t *prot = dhd->prot;
7931 unsigned long flags;
7932 uint16 alloced = 0;
7933 msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
7934
7935#ifdef PCIE_INB_DW
7936 if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
7937 return BCME_ERROR;
7938#endif /* PCIE_INB_DW */
7939
7940 DHD_GENERAL_LOCK(dhd, flags);
7941
7942 /* Request for ring buffer space */
7943 flow_delete_rqst = (tx_flowring_delete_request_t *)
7944 dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
7945
7946 if (flow_delete_rqst == NULL) {
7947 DHD_GENERAL_UNLOCK(dhd, flags);
7948 DHD_ERROR(("%s: Flow Delete Req - failure ring space\n", __FUNCTION__));
7949#ifdef PCIE_INB_DW
7950 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
7951#endif
7952 return BCME_NOMEM;
7953 }
7954
7955 /* Common msg buf hdr */
7956 flow_delete_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_DELETE;
7957 flow_delete_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
7958 flow_delete_rqst->msg.request_id = htol32(0); /* TBD */
7959 flow_delete_rqst->msg.flags = ring->current_phase;
7960
7961 flow_delete_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
7962 ring->seqnum++;
7963
7964 /* Update Delete info */
7965 flow_delete_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
7966 flow_delete_rqst->reason = htol16(BCME_OK);
7967
7968 DHD_ERROR(("%s: Send Flow Delete Req RING ID %d for peer " MACDBG
7969 " prio %d ifindex %d\n", __FUNCTION__, flow_ring_node->flowid,
7970 MAC2STRDBG(flow_ring_node->flow_info.da), flow_ring_node->flow_info.tid,
7971 flow_ring_node->flow_info.ifindex));
7972
7973 /* update ring's WR index and ring doorbell to dongle */
7974 dhd_prot_ring_write_complete(dhd, ring, flow_delete_rqst, 1);
7975 DHD_GENERAL_UNLOCK(dhd, flags);
7976#ifdef PCIE_INB_DW
7977 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
7978#endif
7979
7980 return BCME_OK;
7981}
7982
7983static void
7984dhd_prot_flow_ring_delete_response_process(dhd_pub_t *dhd, void *msg)
7985{
7986 tx_flowring_delete_response_t *flow_delete_resp = (tx_flowring_delete_response_t *)msg;
7987
7988 DHD_ERROR(("%s: Flow Delete Response status = %d Flow %d\n", __FUNCTION__,
7989 flow_delete_resp->cmplt.status, flow_delete_resp->cmplt.flow_ring_id));
7990
7991 dhd_bus_flow_ring_delete_response(dhd->bus, flow_delete_resp->cmplt.flow_ring_id,
7992 flow_delete_resp->cmplt.status);
7993}
7994
7995static void
7996dhd_prot_process_flow_ring_resume_response(dhd_pub_t *dhd, void* msg)
7997{
7998#ifdef IDLE_TX_FLOW_MGMT
7999 tx_idle_flowring_resume_response_t *flow_resume_resp =
8000 (tx_idle_flowring_resume_response_t *)msg;
8001
8002 DHD_ERROR(("%s Flow resume Response status = %d Flow %d\n", __FUNCTION__,
8003 flow_resume_resp->cmplt.status, flow_resume_resp->cmplt.flow_ring_id));
8004
8005 dhd_bus_flow_ring_resume_response(dhd->bus, flow_resume_resp->cmplt.flow_ring_id,
8006 flow_resume_resp->cmplt.status);
8007#endif /* IDLE_TX_FLOW_MGMT */
8008}
8009
8010static void
8011dhd_prot_process_flow_ring_suspend_response(dhd_pub_t *dhd, void* msg)
8012{
8013#ifdef IDLE_TX_FLOW_MGMT
8014 int16 status;
8015 tx_idle_flowring_suspend_response_t *flow_suspend_resp =
8016 (tx_idle_flowring_suspend_response_t *)msg;
8017 status = flow_suspend_resp->cmplt.status;
8018
8019 DHD_ERROR(("%s Flow id %d suspend Response status = %d\n",
8020 __FUNCTION__, flow_suspend_resp->cmplt.flow_ring_id,
8021 status));
8022
8023 if (status != BCME_OK) {
8024
8025 DHD_ERROR(("%s Error in Suspending Flow rings!!"
8026 "Dongle will still be polling idle rings!!Status = %d \n",
8027 __FUNCTION__, status));
8028 }
8029#endif /* IDLE_TX_FLOW_MGMT */
8030}
8031
8032int
8033dhd_prot_flow_ring_flush(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
8034{
8035 tx_flowring_flush_request_t *flow_flush_rqst;
8036 dhd_prot_t *prot = dhd->prot;
8037 unsigned long flags;
8038 uint16 alloced = 0;
8039 msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
8040
8041#ifdef PCIE_INB_DW
8042 if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
8043 return BCME_ERROR;
8044#endif /* PCIE_INB_DW */
8045
8046 DHD_GENERAL_LOCK(dhd, flags);
8047
8048 /* Request for ring buffer space */
8049 flow_flush_rqst = (tx_flowring_flush_request_t *)
8050 dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
8051 if (flow_flush_rqst == NULL) {
8052 DHD_GENERAL_UNLOCK(dhd, flags);
8053 DHD_ERROR(("%s: Flow Flush Req - failure ring space\n", __FUNCTION__));
8054#ifdef PCIE_INB_DW
8055 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
8056#endif
8057 return BCME_NOMEM;
8058 }
8059
8060 /* Common msg buf hdr */
8061 flow_flush_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_FLUSH;
8062 flow_flush_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
8063 flow_flush_rqst->msg.request_id = htol32(0); /* TBD */
8064 flow_flush_rqst->msg.flags = ring->current_phase;
8065 flow_flush_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
8066 ring->seqnum++;
8067
8068 flow_flush_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
8069 flow_flush_rqst->reason = htol16(BCME_OK);
8070
8071 DHD_INFO(("%s: Send Flow Flush Req\n", __FUNCTION__));
8072
8073 /* update ring's WR index and ring doorbell to dongle */
8074 dhd_prot_ring_write_complete(dhd, ring, flow_flush_rqst, 1);
8075 DHD_GENERAL_UNLOCK(dhd, flags);
8076#ifdef PCIE_INB_DW
8077 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
8078#endif
8079
8080 return BCME_OK;
8081} /* dhd_prot_flow_ring_flush */
8082
8083static void
8084dhd_prot_flow_ring_flush_response_process(dhd_pub_t *dhd, void *msg)
8085{
8086 tx_flowring_flush_response_t *flow_flush_resp = (tx_flowring_flush_response_t *)msg;
8087
8088 DHD_INFO(("%s: Flow Flush Response status = %d\n", __FUNCTION__,
8089 flow_flush_resp->cmplt.status));
8090
8091 dhd_bus_flow_ring_flush_response(dhd->bus, flow_flush_resp->cmplt.flow_ring_id,
8092 flow_flush_resp->cmplt.status);
8093}
8094
8095/**
8096 * Request dongle to configure soft doorbells for D2H rings. Host populated soft
8097 * doorbell information is transferred to dongle via the d2h ring config control
8098 * message.
8099 */
8100void
8101dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd_pub_t *dhd)
8102{
8103#if defined(DHD_D2H_SOFT_DOORBELL_SUPPORT)
8104 uint16 ring_idx;
8105 uint8 *msg_next;
8106 void *msg_start;
8107 uint16 alloced = 0;
8108 unsigned long flags;
8109 dhd_prot_t *prot = dhd->prot;
8110 ring_config_req_t *ring_config_req;
8111 bcmpcie_soft_doorbell_t *soft_doorbell;
8112 msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
8113 const uint16 d2h_rings = BCMPCIE_D2H_COMMON_MSGRINGS;
8114
8115#ifdef PCIE_INB_DW
8116 if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
8117 return BCME_ERROR;
8118#endif /* PCIE_INB_DW */
8119 /* Claim space for d2h_ring number of d2h_ring_config_req_t messages */
8120 DHD_GENERAL_LOCK(dhd, flags);
8121
8122 msg_start = dhd_prot_alloc_ring_space(dhd, ctrl_ring, d2h_rings, &alloced, TRUE);
8123
8124 if (msg_start == NULL) {
8125 DHD_ERROR(("%s Msgbuf no space for %d D2H ring config soft doorbells\n",
8126 __FUNCTION__, d2h_rings));
8127 DHD_GENERAL_UNLOCK(dhd, flags);
8128#ifdef PCIE_INB_DW
8129 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
8130#endif
8131 return;
8132 }
8133
8134 msg_next = (uint8*)msg_start;
8135
8136 for (ring_idx = 0; ring_idx < d2h_rings; ring_idx++) {
8137
8138 /* position the ring_config_req into the ctrl subm ring */
8139 ring_config_req = (ring_config_req_t *)msg_next;
8140
8141 /* Common msg header */
8142 ring_config_req->msg.msg_type = MSG_TYPE_D2H_RING_CONFIG;
8143 ring_config_req->msg.if_id = 0;
8144 ring_config_req->msg.flags = 0;
8145
8146 ring_config_req->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
8147 ctrl_ring->seqnum++;
8148
8149 ring_config_req->msg.request_id = htol32(DHD_FAKE_PKTID); /* unused */
8150
8151 /* Ring Config subtype and d2h ring_id */
8152 ring_config_req->subtype = htol16(D2H_RING_CONFIG_SUBTYPE_SOFT_DOORBELL);
8153 ring_config_req->ring_id = htol16(DHD_D2H_RINGID(ring_idx));
8154
8155 /* Host soft doorbell configuration */
8156 soft_doorbell = &prot->soft_doorbell[ring_idx];
8157
8158 ring_config_req->soft_doorbell.value = htol32(soft_doorbell->value);
8159 ring_config_req->soft_doorbell.haddr.high =
8160 htol32(soft_doorbell->haddr.high);
8161 ring_config_req->soft_doorbell.haddr.low =
8162 htol32(soft_doorbell->haddr.low);
8163 ring_config_req->soft_doorbell.items = htol16(soft_doorbell->items);
8164 ring_config_req->soft_doorbell.msecs = htol16(soft_doorbell->msecs);
8165
8166 DHD_INFO(("%s: Soft doorbell haddr 0x%08x 0x%08x value 0x%08x\n",
8167 __FUNCTION__, ring_config_req->soft_doorbell.haddr.high,
8168 ring_config_req->soft_doorbell.haddr.low,
8169 ring_config_req->soft_doorbell.value));
8170
8171 msg_next = msg_next + ctrl_ring->item_len;
8172 }
8173
8174 /* update control subn ring's WR index and ring doorbell to dongle */
8175 dhd_prot_ring_write_complete(dhd, ctrl_ring, msg_start, d2h_rings);
8176 DHD_GENERAL_UNLOCK(dhd, flags);
8177#ifdef PCIE_INB_DW
8178 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
8179#endif
8180#endif /* DHD_D2H_SOFT_DOORBELL_SUPPORT */
8181}
8182
8183static void
8184dhd_prot_process_d2h_ring_config_complete(dhd_pub_t *dhd, void *msg)
8185{
8186 DHD_INFO(("%s: Ring Config Response - status %d ringid %d\n",
8187 __FUNCTION__, ltoh16(((ring_config_resp_t *)msg)->compl_hdr.status),
8188 ltoh16(((ring_config_resp_t *)msg)->compl_hdr.flow_ring_id)));
8189}
8190
8191int
8192dhd_prot_debug_dma_info_print(dhd_pub_t *dhd)
8193{
8194 if (dhd->bus->is_linkdown) {
8195 DHD_ERROR(("\n ------- SKIP DUMPING DMA Registers "
8196 "due to PCIe link down ------- \r\n"));
8197 return 0;
8198 }
8199
8200 DHD_ERROR(("\n ------- DUMPING DMA Registers ------- \r\n"));
8201
8202 //HostToDev
8203 DHD_ERROR(("HostToDev TX: XmtCtrl=0x%08x XmtPtr=0x%08x\n",
8204 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x200, 0, 0),
8205 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x204, 0, 0)));
8206 DHD_ERROR((" : XmtAddrLow=0x%08x XmtAddrHigh=0x%08x\n",
8207 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x208, 0, 0),
8208 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x20C, 0, 0)));
8209 DHD_ERROR((" : XmtStatus0=0x%08x XmtStatus1=0x%08x\n",
8210 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x210, 0, 0),
8211 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x214, 0, 0)));
8212
8213 DHD_ERROR(("HostToDev RX: RcvCtrl=0x%08x RcvPtr=0x%08x\n",
8214 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x220, 0, 0),
8215 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x224, 0, 0)));
8216 DHD_ERROR((" : RcvAddrLow=0x%08x RcvAddrHigh=0x%08x\n",
8217 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x228, 0, 0),
8218 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x22C, 0, 0)));
8219 DHD_ERROR((" : RcvStatus0=0x%08x RcvStatus1=0x%08x\n",
8220 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x230, 0, 0),
8221 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x234, 0, 0)));
8222
8223 //DevToHost
8224 DHD_ERROR(("DevToHost TX: XmtCtrl=0x%08x XmtPtr=0x%08x\n",
8225 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x240, 0, 0),
8226 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x244, 0, 0)));
8227 DHD_ERROR((" : XmtAddrLow=0x%08x XmtAddrHigh=0x%08x\n",
8228 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x248, 0, 0),
8229 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x24C, 0, 0)));
8230 DHD_ERROR((" : XmtStatus0=0x%08x XmtStatus1=0x%08x\n",
8231 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x250, 0, 0),
8232 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x254, 0, 0)));
8233
8234 DHD_ERROR(("DevToHost RX: RcvCtrl=0x%08x RcvPtr=0x%08x\n",
8235 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x260, 0, 0),
8236 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x264, 0, 0)));
8237 DHD_ERROR((" : RcvAddrLow=0x%08x RcvAddrHigh=0x%08x\n",
8238 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x268, 0, 0),
8239 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x26C, 0, 0)));
8240 DHD_ERROR((" : RcvStatus0=0x%08x RcvStatus1=0x%08x\n",
8241 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x270, 0, 0),
8242 si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x274, 0, 0)));
8243
8244 return 0;
8245}
8246
8247int
8248dhd_prot_debug_info_print(dhd_pub_t *dhd)
8249{
8250 dhd_prot_t *prot = dhd->prot;
8251 msgbuf_ring_t *ring;
8252 uint16 rd, wr;
8253 uint32 intstatus = 0;
8254 uint32 intmask = 0;
8255 uint32 mbintstatus = 0;
8256 uint32 d2h_mb_data = 0;
8257 uint32 dma_buf_len;
8258
8259 DHD_ERROR(("\n ------- DUMPING VERSION INFORMATION ------- \r\n"));
8260 DHD_ERROR(("DHD: %s\n", dhd_version));
8261 DHD_ERROR(("Firmware: %s\n", fw_version));
8262
8263 DHD_ERROR(("\n ------- DUMPING PROTOCOL INFORMATION ------- \r\n"));
8264 DHD_ERROR(("ICPrevs: Dev %d, Host %d, active %d\n",
8265 prot->device_ipc_version,
8266 prot->host_ipc_version,
8267 prot->active_ipc_version));
8268 DHD_ERROR(("max Host TS bufs to post: %d, posted %d\n",
8269 prot->max_tsbufpost, prot->cur_ts_bufs_posted));
8270 DHD_ERROR(("max INFO bufs to post: %d, posted %d\n",
8271 prot->max_infobufpost, prot->infobufpost));
8272 DHD_ERROR(("max event bufs to post: %d, posted %d\n",
8273 prot->max_eventbufpost, prot->cur_event_bufs_posted));
8274 DHD_ERROR(("max ioctlresp bufs to post: %d, posted %d\n",
8275 prot->max_ioctlrespbufpost, prot->cur_ioctlresp_bufs_posted));
8276 DHD_ERROR(("max RX bufs to post: %d, posted %d\n",
8277 prot->max_rxbufpost, prot->rxbufpost));
8278 DHD_ERROR(("h2d_max_txpost: %d, prot->h2d_max_txpost: %d\n",
8279 h2d_max_txpost, prot->h2d_max_txpost));
8280
8281 DHD_ERROR(("\n ------- DUMPING IOCTL RING RD WR Pointers ------- \r\n"));
8282
8283 ring = &prot->h2dring_ctrl_subn;
8284 dma_buf_len = ring->max_items * ring->item_len;
8285 DHD_ERROR(("CtrlPost: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n",
8286 ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
8287 ltoh32(ring->base_addr.low_addr), dma_buf_len));
8288 DHD_ERROR(("CtrlPost: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
8289 dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
8290 dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
8291 DHD_ERROR(("CtrlPost: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
8292 DHD_ERROR(("CtrlPost: seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO));
8293
8294 ring = &prot->d2hring_ctrl_cpln;
8295 dma_buf_len = ring->max_items * ring->item_len;
8296 DHD_ERROR(("CtrlCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n",
8297 ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
8298 ltoh32(ring->base_addr.low_addr), dma_buf_len));
8299 DHD_ERROR(("CtrlCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
8300 dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
8301 dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
8302 DHD_ERROR(("CtrlCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
8303 DHD_ERROR(("CtrlCpl: Expected seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO));
8304
8305 ring = prot->h2dring_info_subn;
8306 if (ring) {
8307 dma_buf_len = ring->max_items * ring->item_len;
8308 DHD_ERROR(("InfoSub: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n",
8309 ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
8310 ltoh32(ring->base_addr.low_addr), dma_buf_len));
8311 DHD_ERROR(("InfoSub: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
8312 dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
8313 dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
8314 DHD_ERROR(("InfoSub: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
8315 DHD_ERROR(("InfoSub: seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO));
8316 }
8317 ring = prot->d2hring_info_cpln;
8318 if (ring) {
8319 dma_buf_len = ring->max_items * ring->item_len;
8320 DHD_ERROR(("InfoCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n",
8321 ring->dma_buf.va, ltoh32(ring->base_addr.high_addr),
8322 ltoh32(ring->base_addr.low_addr), dma_buf_len));
8323 DHD_ERROR(("InfoCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr));
8324 dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx);
8325 dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx);
8326 DHD_ERROR(("InfoCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr));
8327 DHD_ERROR(("InfoCpl: Expected seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO));
8328 }
8329
8330 DHD_ERROR(("%s: cur_ioctlresp_bufs_posted %d cur_event_bufs_posted %d\n",
8331 __FUNCTION__, prot->cur_ioctlresp_bufs_posted, prot->cur_event_bufs_posted));
8332
8333 if (!dhd->bus->is_linkdown && dhd->bus->intstatus != (uint32)-1) {
8334 DHD_ERROR(("\n ------- DUMPING INTR Status and Masks ------- \r\n"));
8335 intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
8336 PCIMailBoxInt, 0, 0);
8337 intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
8338 PCIMailBoxMask, 0, 0);
8339 mbintstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx,
8340 PCID2H_MailBox, 0, 0);
8341 dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0);
8342
8343 DHD_ERROR(("intstatus=0x%x intmask=0x%x mbintstatus=0x%x\n",
8344 intstatus, intmask, mbintstatus));
8345 DHD_ERROR(("d2h_mb_data=0x%x def_intmask=0x%x \r\n", d2h_mb_data,
8346 dhd->bus->def_intmask));
8347
8348 DHD_ERROR(("host pcie_irq enabled = %d\n", dhdpcie_irq_enabled(dhd->bus)));
8349
8350 DHD_ERROR(("\n ------- DUMPING PCIE Registers ------- \r\n"));
8351 /* hwnbu-twiki.sj.broadcom.com/bin/view/Mwgroup/CurrentPcieGen2ProgramGuide */
8352 DHD_ERROR(("Status Command(0x%x)=0x%x, BaseAddress0(0x%x)=0x%x\n",
8353 PCIECFGREG_STATUS_CMD,
8354 dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_STATUS_CMD, sizeof(uint32)),
8355 PCIECFGREG_BASEADDR0,
8356 dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_BASEADDR0, sizeof(uint32))));
8357 DHD_ERROR(("LinkCtl(0x%x)=0x%x DeviceStatusControl2(0x%x)=0x%x "
8358 "L1SSControl(0x%x)=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL,
8359 dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_LINK_STATUS_CTRL,
8360 sizeof(uint32)), PCIECFGGEN_DEV_STATUS_CTRL2,
8361 dhd_pcie_config_read(dhd->bus->osh, PCIECFGGEN_DEV_STATUS_CTRL2,
8362 sizeof(uint32)), PCIECFGREG_PML1_SUB_CTRL1,
8363 dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_PML1_SUB_CTRL1,
8364 sizeof(uint32))));
8365
8366 /* hwnbu-twiki.sj.broadcom.com/twiki/pub/Mwgroup/
8367 * CurrentPcieGen2ProgramGuide/pcie_ep.htm
8368 */
8369 DHD_ERROR(("ClkReq0(0x%x)=0x%x ClkReq1(0x%x)=0x%x ClkReq2(0x%x)=0x%x "
8370 "ClkReq3(0x%x)=0x%x\n", PCIECFGREG_PHY_DBG_CLKREQ0,
8371 dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ0),
8372 PCIECFGREG_PHY_DBG_CLKREQ1,
8373 dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ1),
8374 PCIECFGREG_PHY_DBG_CLKREQ2,
8375 dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ2),
8376 PCIECFGREG_PHY_DBG_CLKREQ3,
8377 dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ3)));
8378
8379 DHD_ERROR(("Pcie RC Error Status Val=0x%x\n",
8380 dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR,
8381 PCIE_EXTCAP_AER_UCERR_OFFSET, TRUE, FALSE, 0)));
8382
8383 DHD_ERROR(("RootPort PCIe linkcap=0x%08x\n",
8384 dhd_debug_get_rc_linkcap(dhd->bus)));
8385
8386 DHD_ERROR(("\n ------- DUMPING INTR enable/disable counters ------- \r\n"));
8387 DHD_ERROR(("resume_intr_enable_count=%lu dpc_intr_enable_count=%lu\n"
8388 "isr_intr_disable_count=%lu suspend_intr_disable_count=%lu\n"
8389 "dpc_return_busdown_count=%lu\n",
8390 dhd->bus->resume_intr_enable_count, dhd->bus->dpc_intr_enable_count,
8391 dhd->bus->isr_intr_disable_count, dhd->bus->suspend_intr_disable_count,
8392 dhd->bus->dpc_return_busdown_count));
8393
8394 }
8395 dhd_prot_debug_dma_info_print(dhd);
8396#ifdef DHD_FW_COREDUMP
8397 if (dhd->memdump_enabled) {
8398#ifdef DHD_SSSR_DUMP
8399 if (dhd->sssr_inited) {
8400 dhdpcie_sssr_dump(dhd);
8401 }
8402#endif /* DHD_SSSR_DUMP */
8403 }
8404#endif /* DHD_FW_COREDUMP */
8405 return 0;
8406}
8407
8408int
8409dhd_prot_ringupd_dump(dhd_pub_t *dhd, struct bcmstrbuf *b)
8410{
8411 uint32 *ptr;
8412 uint32 value;
8413 uint32 i;
8414 uint32 max_h2d_queues = dhd_bus_max_h2d_queues(dhd->bus);
8415
8416 OSL_CACHE_INV((void *)dhd->prot->d2h_dma_indx_wr_buf.va,
8417 dhd->prot->d2h_dma_indx_wr_buf.len);
8418
8419 ptr = (uint32 *)(dhd->prot->d2h_dma_indx_wr_buf.va);
8420
8421 bcm_bprintf(b, "\n max_tx_queues %d\n", max_h2d_queues);
8422
8423 bcm_bprintf(b, "\nRPTR block H2D common rings, 0x%04x\n", ptr);
8424 value = ltoh32(*ptr);
8425 bcm_bprintf(b, "\tH2D CTRL: value 0x%04x\n", value);
8426 ptr++;
8427 value = ltoh32(*ptr);
8428 bcm_bprintf(b, "\tH2D RXPOST: value 0x%04x\n", value);
8429
8430 ptr++;
8431 bcm_bprintf(b, "RPTR block Flow rings , 0x%04x\n", ptr);
8432 for (i = BCMPCIE_H2D_COMMON_MSGRINGS; i < max_h2d_queues; i++) {
8433 value = ltoh32(*ptr);
8434 bcm_bprintf(b, "\tflowring ID %d: value 0x%04x\n", i, value);
8435 ptr++;
8436 }
8437
8438 OSL_CACHE_INV((void *)dhd->prot->h2d_dma_indx_rd_buf.va,
8439 dhd->prot->h2d_dma_indx_rd_buf.len);
8440
8441 ptr = (uint32 *)(dhd->prot->h2d_dma_indx_rd_buf.va);
8442
8443 bcm_bprintf(b, "\nWPTR block D2H common rings, 0x%04x\n", ptr);
8444 value = ltoh32(*ptr);
8445 bcm_bprintf(b, "\tD2H CTRLCPLT: value 0x%04x\n", value);
8446 ptr++;
8447 value = ltoh32(*ptr);
8448 bcm_bprintf(b, "\tD2H TXCPLT: value 0x%04x\n", value);
8449 ptr++;
8450 value = ltoh32(*ptr);
8451 bcm_bprintf(b, "\tD2H RXCPLT: value 0x%04x\n", value);
8452
8453 return 0;
8454}
8455
8456uint32
8457dhd_prot_metadata_dbg_set(dhd_pub_t *dhd, bool val)
8458{
8459 dhd_prot_t *prot = dhd->prot;
8460#if DHD_DBG_SHOW_METADATA
8461 prot->metadata_dbg = val;
8462#endif
8463 return (uint32)prot->metadata_dbg;
8464}
8465
8466uint32
8467dhd_prot_metadata_dbg_get(dhd_pub_t *dhd)
8468{
8469 dhd_prot_t *prot = dhd->prot;
8470 return (uint32)prot->metadata_dbg;
8471}
8472
8473uint32
8474dhd_prot_metadatalen_set(dhd_pub_t *dhd, uint32 val, bool rx)
8475{
8476 dhd_prot_t *prot = dhd->prot;
8477 if (rx)
8478 prot->rx_metadata_offset = (uint16)val;
8479 else
8480 prot->tx_metadata_offset = (uint16)val;
8481 return dhd_prot_metadatalen_get(dhd, rx);
8482}
8483
8484uint32
8485dhd_prot_metadatalen_get(dhd_pub_t *dhd, bool rx)
8486{
8487 dhd_prot_t *prot = dhd->prot;
8488 if (rx)
8489 return prot->rx_metadata_offset;
8490 else
8491 return prot->tx_metadata_offset;
8492}
8493
8494/** optimization to write "n" tx items at a time to ring */
8495uint32
8496dhd_prot_txp_threshold(dhd_pub_t *dhd, bool set, uint32 val)
8497{
8498 dhd_prot_t *prot = dhd->prot;
8499 if (set)
8500 prot->txp_threshold = (uint16)val;
8501 val = prot->txp_threshold;
8502 return val;
8503}
8504
8505#ifdef DHD_RX_CHAINING
8506
8507static INLINE void BCMFASTPATH
8508dhd_rxchain_reset(rxchain_info_t *rxchain)
8509{
8510 rxchain->pkt_count = 0;
8511}
8512
8513static void BCMFASTPATH
8514dhd_rxchain_frame(dhd_pub_t *dhd, void *pkt, uint ifidx)
8515{
8516 uint8 *eh;
8517 uint8 prio;
8518 dhd_prot_t *prot = dhd->prot;
8519 rxchain_info_t *rxchain = &prot->rxchain;
8520
8521 ASSERT(!PKTISCHAINED(pkt));
8522 ASSERT(PKTCLINK(pkt) == NULL);
8523 ASSERT(PKTCGETATTR(pkt) == 0);
8524
8525 eh = PKTDATA(dhd->osh, pkt);
8526 prio = IP_TOS46(eh + ETHER_HDR_LEN) >> IPV4_TOS_PREC_SHIFT;
8527
8528 if (rxchain->pkt_count && !(PKT_CTF_CHAINABLE(dhd, ifidx, eh, prio, rxchain->h_sa,
8529 rxchain->h_da, rxchain->h_prio))) {
8530 /* Different flow - First release the existing chain */
8531 dhd_rxchain_commit(dhd);
8532 }
8533
8534 /* For routers, with HNDCTF, link the packets using PKTSETCLINK, */
8535 /* so that the chain can be handed off to CTF bridge as is. */
8536 if (rxchain->pkt_count == 0) {
8537 /* First packet in chain */
8538 rxchain->pkthead = rxchain->pkttail = pkt;
8539
8540 /* Keep a copy of ptr to ether_da, ether_sa and prio */
8541 rxchain->h_da = ((struct ether_header *)eh)->ether_dhost;
8542 rxchain->h_sa = ((struct ether_header *)eh)->ether_shost;
8543 rxchain->h_prio = prio;
8544 rxchain->ifidx = ifidx;
8545 rxchain->pkt_count++;
8546 } else {
8547 /* Same flow - keep chaining */
8548 PKTSETCLINK(rxchain->pkttail, pkt);
8549 rxchain->pkttail = pkt;
8550 rxchain->pkt_count++;
8551 }
8552
8553 if ((dhd_rx_pkt_chainable(dhd, ifidx)) && (!ETHER_ISMULTI(rxchain->h_da)) &&
8554 ((((struct ether_header *)eh)->ether_type == HTON16(ETHER_TYPE_IP)) ||
8555 (((struct ether_header *)eh)->ether_type == HTON16(ETHER_TYPE_IPV6)))) {
8556 PKTSETCHAINED(dhd->osh, pkt);
8557 PKTCINCRCNT(rxchain->pkthead);
8558 PKTCADDLEN(rxchain->pkthead, PKTLEN(dhd->osh, pkt));
8559 } else {
8560 dhd_rxchain_commit(dhd);
8561 return;
8562 }
8563
8564 /* If we have hit the max chain length, dispatch the chain and reset */
8565 if (rxchain->pkt_count >= DHD_PKT_CTF_MAX_CHAIN_LEN) {
8566 dhd_rxchain_commit(dhd);
8567 }
8568}
8569
8570static void BCMFASTPATH
8571dhd_rxchain_commit(dhd_pub_t *dhd)
8572{
8573 dhd_prot_t *prot = dhd->prot;
8574 rxchain_info_t *rxchain = &prot->rxchain;
8575
3c2a0909
S
8576 if (rxchain->pkt_count == 0)
8577 return;
8578
8579 /* Release the packets to dhd_linux */
3c2a0909 8580 dhd_bus_rx_frame(dhd->bus, rxchain->pkthead, rxchain->ifidx, rxchain->pkt_count);
3c2a0909
S
8581
8582 /* Reset the chain */
8583 dhd_rxchain_reset(rxchain);
8584}
8585
8586#endif /* DHD_RX_CHAINING */
8587
8588
8589#ifdef IDLE_TX_FLOW_MGMT
8590int
8591dhd_prot_flow_ring_resume(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node)
8592{
8593 tx_idle_flowring_resume_request_t *flow_resume_rqst;
8594 msgbuf_ring_t *flow_ring;
8595 dhd_prot_t *prot = dhd->prot;
8596 unsigned long flags;
8597 uint16 alloced = 0;
8598 msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn;
8599
8600 /* Fetch a pre-initialized msgbuf_ring from the flowring pool */
8601 flow_ring = dhd_prot_flowrings_pool_fetch(dhd, flow_ring_node->flowid);
8602 if (flow_ring == NULL) {
8603 DHD_ERROR(("%s: dhd_prot_flowrings_pool_fetch TX Flowid %d failed\n",
8604 __FUNCTION__, flow_ring_node->flowid));
8605 return BCME_NOMEM;
8606 }
8607#ifdef PCIE_INB_DW
8608 if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
8609 return BCME_ERROR;
8610#endif /* PCIE_INB_DW */
8611
8612 DHD_GENERAL_LOCK(dhd, flags);
8613
8614 /* Request for ctrl_ring buffer space */
8615 flow_resume_rqst = (tx_idle_flowring_resume_request_t *)
8616 dhd_prot_alloc_ring_space(dhd, ctrl_ring, 1, &alloced, FALSE);
8617
8618 if (flow_resume_rqst == NULL) {
8619 dhd_prot_flowrings_pool_release(dhd, flow_ring_node->flowid, flow_ring);
8620 DHD_ERROR(("%s: Flow resume Req flowid %d - failure ring space\n",
8621 __FUNCTION__, flow_ring_node->flowid));
8622 DHD_GENERAL_UNLOCK(dhd, flags);
8623#ifdef PCIE_INB_DW
8624 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
8625#endif
8626 return BCME_NOMEM;
8627 }
8628
8629 flow_ring_node->prot_info = (void *)flow_ring;
8630
8631 /* Common msg buf hdr */
8632 flow_resume_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_RESUME;
8633 flow_resume_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex;
8634 flow_resume_rqst->msg.request_id = htol32(0); /* TBD */
8635
8636 flow_resume_rqst->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO;
8637 ctrl_ring->seqnum++;
8638
8639 flow_resume_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid);
8640 DHD_ERROR(("%s Send Flow resume Req flow ID %d\n",
8641 __FUNCTION__, flow_ring_node->flowid));
8642
8643 /* Update the flow_ring's WRITE index */
8644 if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) {
8645 dhd_prot_dma_indx_set(dhd, flow_ring->wr,
8646 H2D_DMA_INDX_WR_UPD, flow_ring->idx);
8647 } else if (IFRM_ACTIVE(dhd) && (flow_ring->idx >= BCMPCIE_H2D_MSGRING_TXFLOW_IDX_START)) {
8648 dhd_prot_dma_indx_set(dhd, flow_ring->wr,
8649 H2D_IFRM_INDX_WR_UPD,
8650 (flow_ring->idx - BCMPCIE_H2D_MSGRING_TXFLOW_IDX_START));
8651 } else {
8652 dhd_bus_cmn_writeshared(dhd->bus, &(flow_ring->wr),
8653 sizeof(uint16), RING_WR_UPD, flow_ring->idx);
8654 }
8655
8656 /* update control subn ring's WR index and ring doorbell to dongle */
8657 dhd_prot_ring_write_complete(dhd, ctrl_ring, flow_resume_rqst, 1);
8658
8659 DHD_GENERAL_UNLOCK(dhd, flags);
8660#ifdef PCIE_INB_DW
8661 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
8662#endif
8663
8664 return BCME_OK;
8665} /* dhd_prot_flow_ring_create */
8666
8667int
8668dhd_prot_flow_ring_batch_suspend_request(dhd_pub_t *dhd, uint16 *ringid, uint16 count)
8669{
8670 tx_idle_flowring_suspend_request_t *flow_suspend_rqst;
8671 dhd_prot_t *prot = dhd->prot;
8672 unsigned long flags;
8673 uint16 index;
8674 uint16 alloced = 0;
8675 msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn;
8676
8677#ifdef PCIE_INB_DW
8678 if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK)
8679 return BCME_ERROR;
8680#endif /* PCIE_INB_DW */
8681
8682 DHD_GENERAL_LOCK(dhd, flags);
8683
8684 /* Request for ring buffer space */
8685 flow_suspend_rqst = (tx_idle_flowring_suspend_request_t *)
8686 dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE);
8687
8688 if (flow_suspend_rqst == NULL) {
8689 DHD_GENERAL_UNLOCK(dhd, flags);
8690 DHD_ERROR(("%s: Flow suspend Req - failure ring space\n", __FUNCTION__));
8691#ifdef PCIE_INB_DW
8692 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
8693#endif
8694 return BCME_NOMEM;
8695 }
8696
8697 /* Common msg buf hdr */
8698 flow_suspend_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_SUSPEND;
8699 /* flow_suspend_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex; */
8700 flow_suspend_rqst->msg.request_id = htol32(0); /* TBD */
8701
8702 flow_suspend_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO;
8703 ring->seqnum++;
8704
8705 /* Update flow id info */
8706 for (index = 0; index < count; index++)
8707 {
8708 flow_suspend_rqst->ring_id[index] = ringid[index];
8709 }
8710 flow_suspend_rqst->num = count;
8711
8712 DHD_ERROR(("%s sending batch suspend!! count is %d\n", __FUNCTION__, count));
8713
8714 /* update ring's WR index and ring doorbell to dongle */
8715 dhd_prot_ring_write_complete(dhd, ring, flow_suspend_rqst, 1);
8716 DHD_GENERAL_UNLOCK(dhd, flags);
8717#ifdef PCIE_INB_DW
8718 dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus);
8719#endif
8720
8721 return BCME_OK;
8722}
8723#endif /* IDLE_TX_FLOW_MGMT */
8724
8725
8726int dhd_prot_dump_extended_trap(dhd_pub_t *dhdp, struct bcmstrbuf *b, bool raw)
8727{
8728 uint32 i;
8729 uint32 *ext_data;
8730 hnd_ext_trap_hdr_t *hdr;
8731 bcm_tlv_t *tlv;
8732 trap_t *tr;
8733 uint32 *stack;
8734 hnd_ext_trap_bp_err_t *bpe;
8735 uint32 raw_len;
8736
8737 ext_data = dhdp->extended_trap_data;
8738
8739 /* return if there is no extended trap data */
8740 if (!ext_data || !(dhdp->dongle_trap_data & D2H_DEV_EXT_TRAP_DATA))
8741 {
8742 bcm_bprintf(b, "%d (0x%x)", dhdp->dongle_trap_data, dhdp->dongle_trap_data);
8743 return BCME_OK;
8744 }
8745
8746 bcm_bprintf(b, "Extended trap data\n");
8747
8748 /* First word is original trap_data */
8749 bcm_bprintf(b, "trap_data = 0x%08x\n", *ext_data);
8750 ext_data++;
8751
8752 /* Followed by the extended trap data header */
8753 hdr = (hnd_ext_trap_hdr_t *)ext_data;
8754 bcm_bprintf(b, "version: %d, len: %d\n", hdr->version, hdr->len);
8755
8756 if (raw)
8757 {
8758 raw_len = sizeof(hnd_ext_trap_hdr_t) + (hdr->len / 4) + (hdr->len % 4 ? 1 : 0);
8759 for (i = 0; i < raw_len; i++)
8760 {
8761 bcm_bprintf(b, "0x%08x ", ext_data[i]);
8762 if (i % 4 == 3)
8763 bcm_bprintf(b, "\n");
8764 }
8765 return BCME_OK;
8766 }
8767
8768 /* Extract the various supported TLVs from the extended trap data */
8769 tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_SIGNATURE);
8770 if (tlv)
8771 {
8772 bcm_bprintf(b, "\nTAG_TRAP_SIGNATURE len: %d\n", tlv->len);
8773 tr = (trap_t *)tlv->data;
8774
8775 bcm_bprintf(b, "TRAP %x: pc %x, lr %x, sp %x, cpsr %x, spsr %x\n",
8776 tr->type, tr->pc, tr->r14, tr->r13, tr->cpsr, tr->spsr);
8777 bcm_bprintf(b, " r0 %x, r1 %x, r2 %x, r3 %x, r4 %x, r5 %x, r6 %x\n",
8778 tr->r0, tr->r1, tr->r2, tr->r3, tr->r4, tr->r5, tr->r6);
8779 bcm_bprintf(b, " r7 %x, r8 %x, r9 %x, r10 %x, r11 %x, r12 %x\n",
8780 tr->r7, tr->r8, tr->r9, tr->r10, tr->r11, tr->r12);
8781 }
8782
8783 tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_STACK);
8784 if (tlv)
8785 {
8786 bcm_bprintf(b, "\nTAG_TRAP_STACK len: %d\n", tlv->len);
8787 stack = (uint32 *)tlv->data;
8788 for (i = 0; i < (uint32)(tlv->len / 4); i++)
8789 {
8790 bcm_bprintf(b, " 0x%08x\n", *stack);
8791 stack++;
8792 }
8793 }
8794
8795 tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_BACKPLANE);
8796 if (tlv)
8797 {
8798 bcm_bprintf(b, "\nTAG_TRAP_BACKPLANE len: %d\n", tlv->len);
8799 bpe = (hnd_ext_trap_bp_err_t *)tlv->data;
8800 bcm_bprintf(b, " error: %x\n", bpe->error);
8801 bcm_bprintf(b, " coreid: %x\n", bpe->coreid);
8802 bcm_bprintf(b, " baseaddr: %x\n", bpe->baseaddr);
8803 bcm_bprintf(b, " ioctrl: %x\n", bpe->ioctrl);
8804 bcm_bprintf(b, " iostatus: %x\n", bpe->iostatus);
8805 bcm_bprintf(b, " resetctrl: %x\n", bpe->resetctrl);
8806 bcm_bprintf(b, " resetstatus: %x\n", bpe->resetstatus);
8807 bcm_bprintf(b, " errlogctrl: %x\n", bpe->errlogctrl);
8808 bcm_bprintf(b, " errlogdone: %x\n", bpe->errlogdone);
8809 bcm_bprintf(b, " errlogstatus: %x\n", bpe->errlogstatus);
8810 bcm_bprintf(b, " errlogaddrlo: %x\n", bpe->errlogaddrlo);
8811 bcm_bprintf(b, " errlogaddrhi: %x\n", bpe->errlogaddrhi);
8812 bcm_bprintf(b, " errlogid: %x\n", bpe->errlogid);
8813 bcm_bprintf(b, " errloguser: %x\n", bpe->errloguser);
8814 bcm_bprintf(b, " errlogflags: %x\n", bpe->errlogflags);
8815 }
8816
8817 return BCME_OK;
8818}
8819
8820
8821#ifdef BCMPCIE
8822int
8823dhd_prot_send_host_timestamp(dhd_pub_t *dhdp, uchar *tlvs, uint16 tlv_len,
8824 uint16 seqnum, uint16 xt_id)
8825{
8826 dhd_prot_t *prot = dhdp->prot;
8827 host_timestamp_msg_t *ts_req;
8828 unsigned long flags;
8829 uint16 alloced = 0;
8830 uchar *ts_tlv_buf;
8831
8832 if ((tlvs == NULL) || (tlv_len == 0)) {
8833 DHD_ERROR(("%s: argument error tlv: %p, tlv_len %d\n",
8834 __FUNCTION__, tlvs, tlv_len));
8835 return -1;
8836 }
8837#ifdef PCIE_INB_DW
8838 if (dhd_prot_inc_hostactive_devwake_assert(dhdp->bus) != BCME_OK)
8839 return BCME_ERROR;
8840#endif /* PCIE_INB_DW */
8841
8842 DHD_GENERAL_LOCK(dhdp, flags);
8843
8844 /* if Host TS req already pending go away */
8845 if (prot->hostts_req_buf_inuse == TRUE) {
8846 DHD_ERROR(("one host TS request already pending at device\n"));
8847 DHD_GENERAL_UNLOCK(dhdp, flags);
8848#ifdef PCIE_INB_DW
8849 dhd_prot_dec_hostactive_ack_pending_dsreq(dhdp->bus);
8850#endif
8851 return -1;
8852 }
8853
8854 /* Request for cbuf space */
8855 ts_req = (host_timestamp_msg_t*)dhd_prot_alloc_ring_space(dhdp, &prot->h2dring_ctrl_subn,
8856 DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced, FALSE);
8857 if (ts_req == NULL) {
8858 DHD_ERROR(("couldn't allocate space on msgring to send host TS request\n"));
8859 DHD_GENERAL_UNLOCK(dhdp, flags);
8860#ifdef PCIE_INB_DW
8861 dhd_prot_dec_hostactive_ack_pending_dsreq(dhdp->bus);
8862#endif
8863 return -1;
8864 }
8865
8866 /* Common msg buf hdr */
8867 ts_req->msg.msg_type = MSG_TYPE_HOSTTIMSTAMP;
8868 ts_req->msg.if_id = 0;
8869 ts_req->msg.flags = prot->h2dring_ctrl_subn.current_phase;
8870 ts_req->msg.request_id = DHD_H2D_HOSTTS_REQ_PKTID;
8871
8872 ts_req->msg.epoch = prot->h2dring_ctrl_subn.seqnum % H2D_EPOCH_MODULO;
8873 prot->h2dring_ctrl_subn.seqnum++;
8874
8875 ts_req->xt_id = xt_id;
8876 ts_req->seqnum = seqnum;
8877 /* populate TS req buffer info */
8878 ts_req->input_data_len = htol16(tlv_len);
8879 ts_req->host_buf_addr.high = htol32(PHYSADDRHI(prot->hostts_req_buf.pa));
8880 ts_req->host_buf_addr.low = htol32(PHYSADDRLO(prot->hostts_req_buf.pa));
8881 /* copy ioct payload */
8882 ts_tlv_buf = (void *) prot->hostts_req_buf.va;
8883 prot->hostts_req_buf_inuse = TRUE;
8884 memcpy(ts_tlv_buf, tlvs, tlv_len);
8885
8886 OSL_CACHE_FLUSH((void *) prot->hostts_req_buf.va, tlv_len);
8887
8888 if (ISALIGNED(ts_tlv_buf, DMA_ALIGN_LEN) == FALSE) {
8889 DHD_ERROR(("host TS req buffer address unaligned !!!!! \n"));
8890 }
8891
8892 DHD_CTL(("submitted Host TS request request_id %d, data_len %d, tx_id %d, seq %d\n",
8893 ts_req->msg.request_id, ts_req->input_data_len,
8894 ts_req->xt_id, ts_req->seqnum));
8895
8896
8897 /* upd wrt ptr and raise interrupt */
8898 dhd_prot_ring_write_complete(dhdp, &prot->h2dring_ctrl_subn, ts_req,
8899 DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D);
8900 DHD_GENERAL_UNLOCK(dhdp, flags);
8901#ifdef PCIE_INB_DW
8902 dhd_prot_dec_hostactive_ack_pending_dsreq(dhdp->bus);
8903#endif
8904
8905 return 0;
8906} /* dhd_prot_send_host_timestamp */
8907
8908
8909bool
8910dhd_prot_data_path_tx_timestamp_logging(dhd_pub_t *dhd, bool enable, bool set)
8911{
8912 if (set)
8913 dhd->prot->tx_ts_log_enabled = enable;
8914
8915 return dhd->prot->tx_ts_log_enabled;
8916}
8917
8918bool
8919dhd_prot_data_path_rx_timestamp_logging(dhd_pub_t *dhd, bool enable, bool set)
8920{
8921 if (set)
8922 dhd->prot->rx_ts_log_enabled = enable;
8923
8924 return dhd->prot->rx_ts_log_enabled;
8925}
8926#endif /* BCMPCIE */
8927
8928void
8929dhd_prot_dma_indx_free(dhd_pub_t *dhd)
8930{
8931 dhd_prot_t *prot = dhd->prot;
8932
8933 dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_wr_buf);
8934 dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_rd_buf);
8935}
8936
8937static void BCMFASTPATH
8938dhd_prot_process_fw_timestamp(dhd_pub_t *dhd, void* buf)
8939{
8940#ifdef DHD_TIMESYNC
8941 fw_timestamp_event_msg_t *resp;
8942 uint32 pktid;
8943 uint16 buflen, seqnum;
8944 void * pkt;
8945 unsigned long flags;
8946
8947 resp = (fw_timestamp_event_msg_t *)buf;
8948 pktid = ltoh32(resp->msg.request_id);
8949 buflen = ltoh16(resp->buf_len);
8950 seqnum = ltoh16(resp->seqnum);
8951
8952#if defined(DHD_PKTID_AUDIT_RING)
8953 DHD_PKTID_AUDIT(dhd, dhd->prot->pktid_ctrl_map, pktid,
8954 DHD_DUPLICATE_FREE);
8955#endif /* DHD_PKTID_AUDIT_RING */
8956
8957 DHD_INFO(("id 0x%04x, len %d, phase 0x%02x, seqnum %d\n",
8958 pktid, buflen, resp->msg.flags, ltoh16(resp->seqnum)));
8959
8960 if (!dhd->prot->cur_ts_bufs_posted) {
8961 DHD_ERROR(("tsbuf posted are zero, but there is a completion\n"));
8962 return;
8963 }
8964
8965 dhd->prot->cur_ts_bufs_posted--;
8966 if (dhd->prot->max_tsbufpost > 0)
8967 dhd_msgbuf_rxbuf_post_ts_bufs(dhd);
8968
8969 DHD_GENERAL_LOCK(dhd, flags);
8970 pkt = dhd_prot_packet_get(dhd, pktid, PKTTYPE_TSBUF_RX, TRUE);
8971 DHD_GENERAL_UNLOCK(dhd, flags);
8972
8973 if (!pkt) {
8974 DHD_ERROR(("no ts buffer associated with pktid 0x%04x\n", pktid));
8975 return;
8976 }
8977
8978 PKTSETLEN(dhd->osh, pkt, buflen);
8979 dhd_timesync_handle_fw_timestamp(dhd->ts, PKTDATA(dhd->osh, pkt), buflen, seqnum);
8980#ifdef DHD_USE_STATIC_CTRLBUF
8981 PKTFREE_STATIC(dhd->osh, pkt, TRUE);
8982#else
8983 PKTFREE(dhd->osh, pkt, TRUE);
8984#endif /* DHD_USE_STATIC_CTRLBUF */
8985#else /* DHD_TIMESYNC */
8986 DHD_ERROR(("Timesunc feature not compiled in but GOT FW TS message\n"));
8987#endif /* DHD_TIMESYNC */
8988
8989}