Commit | Line | Data |
---|---|---|
3c2a0909 S |
1 | /** |
2 | * @file definition of host message ring functionality | |
3 | * Provides type definitions and function prototypes used to link the | |
4 | * DHD OS, bus, and protocol modules. | |
5 | * | |
6 | * Copyright (C) 1999-2017, Broadcom Corporation | |
7 | * | |
8 | * Unless you and Broadcom execute a separate written software license | |
9 | * agreement governing use of this software, this software is licensed to you | |
10 | * under the terms of the GNU General Public License version 2 (the "GPL"), | |
11 | * available at http://www.broadcom.com/licenses/GPLv2.php, with the | |
12 | * following added to such license: | |
13 | * | |
14 | * As a special exception, the copyright holders of this software give you | |
15 | * permission to link this software with independent modules, and to copy and | |
16 | * distribute the resulting executable under terms of your choice, provided that | |
17 | * you also meet, for each linked independent module, the terms and conditions of | |
18 | * the license of that module. An independent module is a module which is not | |
19 | * derived from this software. The special exception does not apply to any | |
20 | * modifications of the software. | |
21 | * | |
22 | * Notwithstanding the above, under no circumstances may you combine this | |
23 | * software in any way with any other Broadcom software provided under a license | |
24 | * other than the GPL, without Broadcom's express prior written consent. | |
25 | * | |
26 | * | |
27 | * <<Broadcom-WL-IPTag/Open:>> | |
28 | * | |
29 | * $Id: dhd_msgbuf.c 685101 2017-02-15 11:02:19Z $ | |
30 | */ | |
31 | ||
32 | ||
33 | #include <typedefs.h> | |
34 | #include <osl.h> | |
35 | ||
36 | #include <bcmutils.h> | |
37 | #include <bcmmsgbuf.h> | |
38 | #include <bcmendian.h> | |
39 | ||
40 | #include <dngl_stats.h> | |
41 | #include <dhd.h> | |
42 | #include <dhd_proto.h> | |
43 | ||
44 | #include <dhd_bus.h> | |
45 | ||
46 | #include <dhd_dbg.h> | |
47 | #include <siutils.h> | |
48 | #include <dhd_debug.h> | |
49 | ||
50 | #include <dhd_flowring.h> | |
51 | ||
52 | #include <pcie_core.h> | |
53 | #include <bcmpcie.h> | |
54 | #include <dhd_pcie.h> | |
55 | #ifdef DHD_TIMESYNC | |
56 | #include <dhd_timesync.h> | |
57 | #endif /* DHD_TIMESYNC */ | |
58 | ||
59 | #if defined(DHD_LB) | |
60 | #include <linux/cpu.h> | |
61 | #include <bcm_ring.h> | |
62 | #define DHD_LB_WORKQ_SZ (8192) | |
63 | #define DHD_LB_WORKQ_SYNC (16) | |
64 | #define DHD_LB_WORK_SCHED (DHD_LB_WORKQ_SYNC * 2) | |
65 | #endif /* DHD_LB */ | |
66 | ||
67 | #include <hnd_debug.h> | |
68 | #include <hnd_armtrap.h> | |
69 | ||
70 | extern char dhd_version[]; | |
71 | extern char fw_version[]; | |
72 | ||
73 | /** | |
74 | * Host configures a soft doorbell for d2h rings, by specifying a 32bit host | |
75 | * address where a value must be written. Host may also interrupt coalescing | |
76 | * on this soft doorbell. | |
77 | * Use Case: Hosts with network processors, may register with the dongle the | |
78 | * network processor's thread wakeup register and a value corresponding to the | |
79 | * core/thread context. Dongle will issue a write transaction <address,value> | |
80 | * to the PCIE RC which will need to be routed to the mapped register space, by | |
81 | * the host. | |
82 | */ | |
83 | /* #define DHD_D2H_SOFT_DOORBELL_SUPPORT */ | |
84 | ||
85 | /* Dependency Check */ | |
86 | #if defined(IOCTLRESP_USE_CONSTMEM) && defined(DHD_USE_STATIC_CTRLBUF) | |
87 | #error "DHD_USE_STATIC_CTRLBUF is NOT working with DHD_USE_OSLPKT_FOR_RESPBUF" | |
88 | #endif /* IOCTLRESP_USE_CONSTMEM && DHD_USE_STATIC_CTRLBUF */ | |
89 | ||
90 | #define RETRIES 2 /* # of retries to retrieve matching ioctl response */ | |
91 | ||
92 | #define DEFAULT_RX_BUFFERS_TO_POST 256 | |
93 | #define RXBUFPOST_THRESHOLD 32 | |
94 | #define RX_BUF_BURST 32 /* Rx buffers for MSDU Data */ | |
95 | ||
96 | #define DHD_STOP_QUEUE_THRESHOLD 200 | |
97 | #define DHD_START_QUEUE_THRESHOLD 100 | |
98 | ||
99 | #define RX_DMA_OFFSET 8 /* Mem2mem DMA inserts an extra 8 */ | |
100 | #define IOCT_RETBUF_SIZE (RX_DMA_OFFSET + WLC_IOCTL_MAXLEN) | |
101 | ||
102 | /* flags for ioctl pending status */ | |
103 | #define MSGBUF_IOCTL_ACK_PENDING (1<<0) | |
104 | #define MSGBUF_IOCTL_RESP_PENDING (1<<1) | |
105 | ||
106 | #define DMA_ALIGN_LEN 4 | |
107 | ||
108 | #define DMA_D2H_SCRATCH_BUF_LEN 8 | |
109 | #define DMA_XFER_LEN_LIMIT 0x400000 | |
110 | ||
111 | #ifdef BCM_HOST_BUF | |
112 | #ifndef DMA_HOST_BUFFER_LEN | |
113 | #define DMA_HOST_BUFFER_LEN 0x200000 | |
114 | #endif | |
115 | #endif /* BCM_HOST_BUF */ | |
116 | ||
117 | #define DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ 8192 | |
118 | ||
119 | #define DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D 1 | |
120 | #define DHD_FLOWRING_MAX_EVENTBUF_POST 32 | |
121 | #define DHD_FLOWRING_MAX_IOCTLRESPBUF_POST 8 | |
122 | #define DHD_H2D_INFORING_MAX_BUF_POST 32 | |
123 | #define DHD_MAX_TSBUF_POST 8 | |
124 | ||
125 | #define DHD_PROT_FUNCS 41 | |
126 | ||
127 | /* Length of buffer in host for bus throughput measurement */ | |
128 | #define DHD_BUS_TPUT_BUF_LEN 2048 | |
129 | ||
130 | #define TXP_FLUSH_NITEMS | |
131 | ||
132 | /* optimization to write "n" tx items at a time to ring */ | |
133 | #define TXP_FLUSH_MAX_ITEMS_FLUSH_CNT 48 | |
134 | ||
135 | #define RING_NAME_MAX_LENGTH 24 | |
136 | #define CTRLSUB_HOSTTS_MEESAGE_SIZE 1024 | |
137 | /* Giving room before ioctl_trans_id rollsover. */ | |
138 | #define BUFFER_BEFORE_ROLLOVER 300 | |
139 | ||
140 | struct msgbuf_ring; /* ring context for common and flow rings */ | |
141 | ||
142 | /** | |
143 | * PCIE D2H DMA Complete Sync Modes | |
144 | * | |
145 | * Firmware may interrupt the host, prior to the D2H Mem2Mem DMA completes into | |
146 | * Host system memory. A WAR using one of 3 approaches is needed: | |
147 | * 1. Dongle places a modulo-253 seqnum in last word of each D2H message | |
148 | * 2. XOR Checksum, with epoch# in each work item. Dongle builds an XOR checksum | |
149 | * writes in the last word of each work item. Each work item has a seqnum | |
150 | * number = sequence num % 253. | |
151 | * | |
152 | * 3. Read Barrier: Dongle does a host memory read access prior to posting an | |
153 | * interrupt, ensuring that D2H data transfer indeed completed. | |
154 | * 4. Dongle DMA's all indices after producing items in the D2H ring, flushing | |
155 | * ring contents before the indices. | |
156 | * | |
157 | * Host does not sync for DMA to complete with option #3 or #4, and a noop sync | |
158 | * callback (see dhd_prot_d2h_sync_none) may be bound. | |
159 | * | |
160 | * Dongle advertizes host side sync mechanism requirements. | |
161 | */ | |
162 | ||
163 | #define PCIE_D2H_SYNC_WAIT_TRIES (512UL) | |
164 | #define PCIE_D2H_SYNC_NUM_OF_STEPS (5UL) | |
165 | #define PCIE_D2H_SYNC_DELAY (100UL) /* in terms of usecs */ | |
166 | ||
167 | /** | |
168 | * Custom callback attached based upon D2H DMA Sync mode advertized by dongle. | |
169 | * | |
170 | * On success: return cmn_msg_hdr_t::msg_type | |
171 | * On failure: return 0 (invalid msg_type) | |
172 | */ | |
173 | typedef uint8 (* d2h_sync_cb_t)(dhd_pub_t *dhd, struct msgbuf_ring *ring, | |
174 | volatile cmn_msg_hdr_t *msg, int msglen); | |
175 | ||
176 | /* | |
177 | * +---------------------------------------------------------------------------- | |
178 | * | |
179 | * RingIds and FlowId are not equivalent as ringids include D2H rings whereas | |
180 | * flowids do not. | |
181 | * | |
182 | * Dongle advertizes the max H2D rings, as max_sub_queues = 'N' which includes | |
183 | * the H2D common rings as well as the (N-BCMPCIE_H2D_COMMON_MSGRINGS) flowrings | |
184 | * | |
185 | * Here is a sample mapping for (based on PCIE Full Dongle Rev5) where, | |
186 | * BCMPCIE_H2D_COMMON_MSGRINGS = 2, i.e. 2 H2D common rings, | |
187 | * BCMPCIE_COMMON_MSGRINGS = 5, i.e. include 3 D2H common rings. | |
188 | * | |
189 | * H2D Control Submit RingId = 0 FlowId = 0 reserved never allocated | |
190 | * H2D RxPost Submit RingId = 1 FlowId = 1 reserved never allocated | |
191 | * | |
192 | * D2H Control Complete RingId = 2 | |
193 | * D2H Transmit Complete RingId = 3 | |
194 | * D2H Receive Complete RingId = 4 | |
195 | * | |
196 | * H2D TxPost FLOWRING RingId = 5 FlowId = 2 (1st flowring) | |
197 | * H2D TxPost FLOWRING RingId = 6 FlowId = 3 (2nd flowring) | |
198 | * H2D TxPost FLOWRING RingId = 5 + (N-1) FlowId = (N-1) (Nth flowring) | |
199 | * | |
200 | * When TxPost FlowId(s) are allocated, the FlowIds [0..FLOWID_RESERVED) are | |
201 | * unused, where FLOWID_RESERVED is BCMPCIE_H2D_COMMON_MSGRINGS. | |
202 | * | |
203 | * Example: when a system supports 4 bc/mc and 128 uc flowrings, with | |
204 | * BCMPCIE_H2D_COMMON_MSGRINGS = 2, and BCMPCIE_H2D_COMMON_MSGRINGS = 5, and the | |
205 | * FlowId values would be in the range [2..133] and the corresponding | |
206 | * RingId values would be in the range [5..136]. | |
207 | * | |
208 | * The flowId allocator, may chose to, allocate Flowids: | |
209 | * bc/mc (per virtual interface) in one consecutive range [2..(2+VIFS)) | |
210 | * X# of uc flowids in consecutive ranges (per station Id), where X is the | |
211 | * packet's access category (e.g. 4 uc flowids per station). | |
212 | * | |
213 | * CAUTION: | |
214 | * When DMA indices array feature is used, RingId=5, corresponding to the 0th | |
215 | * FLOWRING, will actually use the FlowId as index into the H2D DMA index, | |
216 | * since the FlowId truly represents the index in the H2D DMA indices array. | |
217 | * | |
218 | * Likewise, in the D2H direction, the RingId - BCMPCIE_H2D_COMMON_MSGRINGS, | |
219 | * will represent the index in the D2H DMA indices array. | |
220 | * | |
221 | * +---------------------------------------------------------------------------- | |
222 | */ | |
223 | ||
224 | /* First TxPost Flowring Id */ | |
225 | #define DHD_FLOWRING_START_FLOWID BCMPCIE_H2D_COMMON_MSGRINGS | |
226 | ||
227 | /* Determine whether a ringid belongs to a TxPost flowring */ | |
228 | #define DHD_IS_FLOWRING(ringid, max_flow_rings) \ | |
229 | ((ringid) >= BCMPCIE_COMMON_MSGRINGS && \ | |
230 | (ringid) < ((max_flow_rings) + BCMPCIE_COMMON_MSGRINGS)) | |
231 | ||
232 | /* Convert a H2D TxPost FlowId to a MsgBuf RingId */ | |
233 | #define DHD_FLOWID_TO_RINGID(flowid) \ | |
234 | (BCMPCIE_COMMON_MSGRINGS + ((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS)) | |
235 | ||
236 | /* Convert a MsgBuf RingId to a H2D TxPost FlowId */ | |
237 | #define DHD_RINGID_TO_FLOWID(ringid) \ | |
238 | (BCMPCIE_H2D_COMMON_MSGRINGS + ((ringid) - BCMPCIE_COMMON_MSGRINGS)) | |
239 | ||
240 | /* Convert a H2D MsgBuf RingId to an offset index into the H2D DMA indices array | |
241 | * This may be used for the H2D DMA WR index array or H2D DMA RD index array or | |
242 | * any array of H2D rings. | |
243 | */ | |
244 | #define DHD_H2D_RING_OFFSET(ringid) \ | |
245 | (((ringid) >= BCMPCIE_COMMON_MSGRINGS) ? DHD_RINGID_TO_FLOWID(ringid) : (ringid)) | |
246 | ||
247 | /* Convert a H2D MsgBuf Flowring Id to an offset index into the H2D DMA indices array | |
248 | * This may be used for IFRM. | |
249 | */ | |
250 | #define DHD_H2D_FRM_FLOW_RING_OFFSET(ringid) \ | |
251 | ((ringid) - BCMPCIE_COMMON_MSGRINGS) | |
252 | ||
253 | /* Convert a D2H MsgBuf RingId to an offset index into the D2H DMA indices array | |
254 | * This may be used for the D2H DMA WR index array or D2H DMA RD index array or | |
255 | * any array of D2H rings. | |
256 | * d2h debug ring is located at the end, i.e. after all the tx flow rings and h2d debug ring | |
257 | * max_h2d_rings: total number of h2d rings | |
258 | */ | |
259 | #define DHD_D2H_RING_OFFSET(ringid, max_h2d_rings) \ | |
260 | ((ringid) > (max_h2d_rings) ? \ | |
261 | ((ringid) - max_h2d_rings) : \ | |
262 | ((ringid) - BCMPCIE_H2D_COMMON_MSGRINGS)) | |
263 | ||
264 | /* Convert a D2H DMA Indices Offset to a RingId */ | |
265 | #define DHD_D2H_RINGID(offset) \ | |
266 | ((offset) + BCMPCIE_H2D_COMMON_MSGRINGS) | |
267 | ||
268 | ||
269 | #define DHD_DMAH_NULL ((void*)NULL) | |
270 | ||
271 | /* | |
272 | * Pad a DMA-able buffer by an additional cachline. If the end of the DMA-able | |
273 | * buffer does not occupy the entire cacheline, and another object is placed | |
274 | * following the DMA-able buffer, data corruption may occur if the DMA-able | |
275 | * buffer is used to DMAing into (e.g. D2H direction), when HW cache coherency | |
276 | * is not available. | |
277 | */ | |
278 | #if defined(L1_CACHE_BYTES) | |
279 | #define DHD_DMA_PAD (L1_CACHE_BYTES) | |
280 | #else | |
281 | #define DHD_DMA_PAD (128) | |
282 | #endif | |
283 | ||
284 | /* Used in loopback tests */ | |
285 | typedef struct dhd_dmaxfer { | |
286 | dhd_dma_buf_t srcmem; | |
287 | dhd_dma_buf_t dstmem; | |
288 | uint32 srcdelay; | |
289 | uint32 destdelay; | |
290 | uint32 len; | |
291 | bool in_progress; | |
292 | uint64 start_usec; | |
293 | uint32 d11_lpbk; | |
294 | } dhd_dmaxfer_t; | |
295 | ||
296 | /** | |
297 | * msgbuf_ring : This object manages the host side ring that includes a DMA-able | |
298 | * buffer, the WR and RD indices, ring parameters such as max number of items | |
299 | * an length of each items, and other miscellaneous runtime state. | |
300 | * A msgbuf_ring may be used to represent a H2D or D2H common ring or a | |
301 | * H2D TxPost ring as specified in the PCIE FullDongle Spec. | |
302 | * Ring parameters are conveyed to the dongle, which maintains its own peer end | |
303 | * ring state. Depending on whether the DMA Indices feature is supported, the | |
304 | * host will update the WR/RD index in the DMA indices array in host memory or | |
305 | * directly in dongle memory. | |
306 | */ | |
307 | typedef struct msgbuf_ring { | |
308 | bool inited; | |
309 | uint16 idx; /* ring id */ | |
310 | uint16 rd; /* read index */ | |
311 | uint16 curr_rd; /* read index for debug */ | |
312 | uint16 wr; /* write index */ | |
313 | uint16 max_items; /* maximum number of items in ring */ | |
314 | uint16 item_len; /* length of each item in the ring */ | |
315 | sh_addr_t base_addr; /* LITTLE ENDIAN formatted: base address */ | |
316 | dhd_dma_buf_t dma_buf; /* DMA-able buffer: pa, va, len, dmah, secdma */ | |
317 | uint32 seqnum; /* next expected item's sequence number */ | |
318 | #ifdef TXP_FLUSH_NITEMS | |
319 | void *start_addr; | |
320 | /* # of messages on ring not yet announced to dongle */ | |
321 | uint16 pend_items_count; | |
322 | #endif /* TXP_FLUSH_NITEMS */ | |
323 | ||
324 | uint8 ring_type; | |
325 | uint8 n_completion_ids; | |
326 | bool create_pending; | |
327 | uint16 create_req_id; | |
328 | uint8 current_phase; | |
329 | uint16 compeltion_ring_ids[MAX_COMPLETION_RING_IDS_ASSOCIATED]; | |
330 | uchar name[RING_NAME_MAX_LENGTH]; | |
331 | uint32 ring_mem_allocated; | |
332 | } msgbuf_ring_t; | |
333 | ||
334 | #define DHD_RING_BGN_VA(ring) ((ring)->dma_buf.va) | |
335 | #define DHD_RING_END_VA(ring) \ | |
336 | ((uint8 *)(DHD_RING_BGN_VA((ring))) + \ | |
337 | (((ring)->max_items - 1) * (ring)->item_len)) | |
338 | ||
339 | ||
340 | ||
341 | /* This can be overwritten by module parameter defined in dhd_linux.c | |
342 | * or by dhd iovar h2d_max_txpost. | |
343 | */ | |
344 | int h2d_max_txpost = H2DRING_TXPOST_MAX_ITEM; | |
345 | ||
346 | /** DHD protocol handle. Is an opaque type to other DHD software layers. */ | |
347 | typedef struct dhd_prot { | |
348 | osl_t *osh; /* OSL handle */ | |
349 | uint16 rxbufpost; | |
350 | uint16 max_rxbufpost; | |
351 | uint16 max_eventbufpost; | |
352 | uint16 max_ioctlrespbufpost; | |
353 | uint16 max_tsbufpost; | |
354 | uint16 max_infobufpost; | |
355 | uint16 infobufpost; | |
356 | uint16 cur_event_bufs_posted; | |
357 | uint16 cur_ioctlresp_bufs_posted; | |
358 | uint16 cur_ts_bufs_posted; | |
359 | ||
360 | /* Flow control mechanism based on active transmits pending */ | |
361 | uint16 active_tx_count; /* increments on every packet tx, and decrements on tx_status */ | |
362 | uint16 h2d_max_txpost; | |
363 | uint16 txp_threshold; /* optimization to write "n" tx items at a time to ring */ | |
364 | ||
365 | /* MsgBuf Ring info: has a dhd_dma_buf that is dynamically allocated */ | |
366 | msgbuf_ring_t h2dring_ctrl_subn; /* H2D ctrl message submission ring */ | |
367 | msgbuf_ring_t h2dring_rxp_subn; /* H2D RxBuf post ring */ | |
368 | msgbuf_ring_t d2hring_ctrl_cpln; /* D2H ctrl completion ring */ | |
369 | msgbuf_ring_t d2hring_tx_cpln; /* D2H Tx complete message ring */ | |
370 | msgbuf_ring_t d2hring_rx_cpln; /* D2H Rx complete message ring */ | |
371 | msgbuf_ring_t *h2dring_info_subn; /* H2D info submission ring */ | |
372 | msgbuf_ring_t *d2hring_info_cpln; /* D2H info completion ring */ | |
373 | ||
374 | msgbuf_ring_t *h2d_flowrings_pool; /* Pool of preallocated flowings */ | |
375 | dhd_dma_buf_t flowrings_dma_buf; /* Contiguous DMA buffer for flowrings */ | |
376 | uint16 h2d_rings_total; /* total H2D (common rings + flowrings) */ | |
377 | ||
378 | uint32 rx_dataoffset; | |
379 | ||
380 | dhd_mb_ring_t mb_ring_fn; /* called when dongle needs to be notified of new msg */ | |
381 | dhd_mb_ring_2_t mb_2_ring_fn; /* called when dongle needs to be notified of new msg */ | |
382 | ||
383 | /* ioctl related resources */ | |
384 | uint8 ioctl_state; | |
385 | int16 ioctl_status; /* status returned from dongle */ | |
386 | uint16 ioctl_resplen; | |
387 | dhd_ioctl_recieved_status_t ioctl_received; | |
388 | uint curr_ioctl_cmd; | |
389 | dhd_dma_buf_t retbuf; /* For holding ioctl response */ | |
390 | dhd_dma_buf_t ioctbuf; /* For holding ioctl request */ | |
391 | ||
392 | dhd_dma_buf_t d2h_dma_scratch_buf; /* For holding d2h scratch */ | |
393 | ||
394 | /* DMA-able arrays for holding WR and RD indices */ | |
395 | uint32 rw_index_sz; /* Size of a RD or WR index in dongle */ | |
396 | dhd_dma_buf_t h2d_dma_indx_wr_buf; /* Array of H2D WR indices */ | |
397 | dhd_dma_buf_t h2d_dma_indx_rd_buf; /* Array of H2D RD indices */ | |
398 | dhd_dma_buf_t d2h_dma_indx_wr_buf; /* Array of D2H WR indices */ | |
399 | dhd_dma_buf_t d2h_dma_indx_rd_buf; /* Array of D2H RD indices */ | |
400 | dhd_dma_buf_t h2d_ifrm_indx_wr_buf; /* Array of H2D WR indices for ifrm */ | |
401 | ||
402 | dhd_dma_buf_t host_bus_throughput_buf; /* bus throughput measure buffer */ | |
403 | ||
404 | dhd_dma_buf_t *flowring_buf; /* pool of flow ring buf */ | |
405 | uint32 flowring_num; | |
406 | ||
407 | d2h_sync_cb_t d2h_sync_cb; /* Sync on D2H DMA done: SEQNUM or XORCSUM */ | |
408 | ulong d2h_sync_wait_max; /* max number of wait loops to receive one msg */ | |
409 | ulong d2h_sync_wait_tot; /* total wait loops */ | |
410 | ||
411 | dhd_dmaxfer_t dmaxfer; /* for test/DMA loopback */ | |
412 | ||
413 | uint16 ioctl_seq_no; | |
414 | uint16 data_seq_no; | |
415 | uint16 ioctl_trans_id; | |
416 | void *pktid_ctrl_map; /* a pktid maps to a packet and its metadata */ | |
417 | void *pktid_rx_map; /* pktid map for rx path */ | |
418 | void *pktid_tx_map; /* pktid map for tx path */ | |
419 | void *rx_lock; /* rx pktid map and rings access protection */ | |
420 | bool metadata_dbg; | |
421 | void *pktid_map_handle_ioctl; | |
422 | ||
423 | /* Applications/utilities can read tx and rx metadata using IOVARs */ | |
424 | uint16 rx_metadata_offset; | |
425 | uint16 tx_metadata_offset; | |
426 | ||
427 | ||
428 | #if defined(DHD_D2H_SOFT_DOORBELL_SUPPORT) | |
429 | /* Host's soft doorbell configuration */ | |
430 | bcmpcie_soft_doorbell_t soft_doorbell[BCMPCIE_D2H_COMMON_MSGRINGS]; | |
431 | #endif /* DHD_D2H_SOFT_DOORBELL_SUPPORT */ | |
432 | ||
433 | /* Work Queues to be used by the producer and the consumer, and threshold | |
434 | * when the WRITE index must be synced to consumer's workq | |
435 | */ | |
436 | #if defined(DHD_LB_TXC) | |
437 | uint32 tx_compl_prod_sync ____cacheline_aligned; | |
438 | bcm_workq_t tx_compl_prod, tx_compl_cons; | |
439 | #endif /* DHD_LB_TXC */ | |
440 | #if defined(DHD_LB_RXC) | |
441 | uint32 rx_compl_prod_sync ____cacheline_aligned; | |
442 | bcm_workq_t rx_compl_prod, rx_compl_cons; | |
443 | #endif /* DHD_LB_RXC */ | |
444 | ||
445 | dhd_dma_buf_t fw_trap_buf; /* firmware trap buffer */ | |
446 | ||
447 | uint32 host_ipc_version; /* Host sypported IPC rev */ | |
448 | uint32 device_ipc_version; /* FW supported IPC rev */ | |
449 | uint32 active_ipc_version; /* Host advertised IPC rev */ | |
450 | dhd_dma_buf_t hostts_req_buf; /* For holding host timestamp request buf */ | |
451 | bool hostts_req_buf_inuse; | |
452 | bool rx_ts_log_enabled; | |
453 | bool tx_ts_log_enabled; | |
454 | } dhd_prot_t; | |
455 | ||
456 | extern void dhd_schedule_dmaxfer_free(dhd_pub_t* dhdp, dmaxref_mem_map_t *dmmap); | |
457 | ||
458 | /* Convert a dmaaddr_t to a base_addr with htol operations */ | |
459 | static INLINE void dhd_base_addr_htolpa(sh_addr_t *base_addr, dmaaddr_t pa); | |
460 | ||
461 | /* APIs for managing a DMA-able buffer */ | |
462 | static int dhd_dma_buf_audit(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf); | |
463 | static int dhd_dma_buf_alloc(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf, uint32 buf_len); | |
464 | static void dhd_dma_buf_reset(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf); | |
465 | static void dhd_dma_buf_free(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf); | |
466 | ||
467 | /* msgbuf ring management */ | |
468 | static int dhd_prot_ring_attach(dhd_pub_t *dhd, msgbuf_ring_t *ring, | |
469 | const char *name, uint16 max_items, uint16 len_item, uint16 ringid); | |
470 | static void dhd_prot_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring); | |
471 | static void dhd_prot_ring_reset(dhd_pub_t *dhd, msgbuf_ring_t *ring); | |
472 | static void dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t *ring); | |
473 | static void dhd_prot_process_fw_timestamp(dhd_pub_t *dhd, void* buf); | |
474 | ||
475 | /* Pool of pre-allocated msgbuf_ring_t with DMA-able buffers for Flowrings */ | |
476 | static int dhd_prot_flowrings_pool_attach(dhd_pub_t *dhd); | |
477 | static void dhd_prot_flowrings_pool_reset(dhd_pub_t *dhd); | |
478 | static void dhd_prot_flowrings_pool_detach(dhd_pub_t *dhd); | |
479 | ||
480 | /* Fetch and Release a flowring msgbuf_ring from flowring pool */ | |
481 | static msgbuf_ring_t *dhd_prot_flowrings_pool_fetch(dhd_pub_t *dhd, | |
482 | uint16 flowid); | |
483 | /* see also dhd_prot_flowrings_pool_release() in dhd_prot.h */ | |
484 | ||
485 | /* Producer: Allocate space in a msgbuf ring */ | |
486 | static void* dhd_prot_alloc_ring_space(dhd_pub_t *dhd, msgbuf_ring_t *ring, | |
487 | uint16 nitems, uint16 *alloced, bool exactly_nitems); | |
488 | static void* dhd_prot_get_ring_space(msgbuf_ring_t *ring, uint16 nitems, | |
489 | uint16 *alloced, bool exactly_nitems); | |
490 | ||
491 | /* Consumer: Determine the location where the next message may be consumed */ | |
492 | static uint8* dhd_prot_get_read_addr(dhd_pub_t *dhd, msgbuf_ring_t *ring, | |
493 | uint32 *available_len); | |
494 | ||
495 | /* Producer (WR index update) or Consumer (RD index update) indication */ | |
496 | static void dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t *ring, | |
497 | void *p, uint16 len); | |
498 | static void dhd_prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t *ring); | |
499 | ||
500 | static INLINE int dhd_prot_dma_indx_alloc(dhd_pub_t *dhd, uint8 type, | |
501 | dhd_dma_buf_t *dma_buf, uint32 bufsz); | |
502 | ||
503 | /* Set/Get a RD or WR index in the array of indices */ | |
504 | /* See also: dhd_prot_dma_indx_init() */ | |
505 | void dhd_prot_dma_indx_set(dhd_pub_t *dhd, uint16 new_index, uint8 type, | |
506 | uint16 ringid); | |
507 | static uint16 dhd_prot_dma_indx_get(dhd_pub_t *dhd, uint8 type, uint16 ringid); | |
508 | ||
509 | /* Locate a packet given a pktid */ | |
510 | static INLINE void *dhd_prot_packet_get(dhd_pub_t *dhd, uint32 pktid, uint8 pkttype, | |
511 | bool free_pktid); | |
512 | /* Locate a packet given a PktId and free it. */ | |
513 | static INLINE void dhd_prot_packet_free(dhd_pub_t *dhd, void *pkt, uint8 pkttype, bool send); | |
514 | ||
515 | static int dhd_msgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, | |
516 | void *buf, uint len, uint8 action); | |
517 | static int dhd_msgbuf_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, | |
518 | void *buf, uint len, uint8 action); | |
519 | static int dhd_msgbuf_wait_ioctl_cmplt(dhd_pub_t *dhd, uint32 len, void *buf); | |
520 | static int dhd_fillup_ioct_reqst(dhd_pub_t *dhd, uint16 len, uint cmd, | |
521 | void *buf, int ifidx); | |
522 | ||
523 | /* Post buffers for Rx, control ioctl response and events */ | |
524 | static uint16 dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, uint8 msgid, uint32 max_to_post); | |
525 | static void dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *pub); | |
526 | static void dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *pub); | |
527 | static void dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd, bool use_rsv_pktid); | |
528 | static int dhd_prot_rxbuf_post(dhd_pub_t *dhd, uint16 count, bool use_rsv_pktid); | |
529 | static int dhd_msgbuf_rxbuf_post_ts_bufs(dhd_pub_t *pub); | |
530 | ||
531 | static void dhd_prot_return_rxbuf(dhd_pub_t *dhd, uint32 pktid, uint32 rxcnt); | |
532 | ||
533 | ||
534 | /* D2H Message handling */ | |
535 | static int dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8 *buf, uint32 len); | |
536 | ||
537 | /* D2H Message handlers */ | |
538 | static void dhd_prot_noop(dhd_pub_t *dhd, void *msg); | |
539 | static void dhd_prot_txstatus_process(dhd_pub_t *dhd, void *msg); | |
540 | static void dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void *msg); | |
541 | static void dhd_prot_ioctack_process(dhd_pub_t *dhd, void *msg); | |
542 | static void dhd_prot_ringstatus_process(dhd_pub_t *dhd, void *msg); | |
543 | static void dhd_prot_genstatus_process(dhd_pub_t *dhd, void *msg); | |
544 | static void dhd_prot_event_process(dhd_pub_t *dhd, void *msg); | |
545 | ||
546 | /* Loopback test with dongle */ | |
547 | static void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dma); | |
548 | static int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len, uint srcdelay, | |
549 | uint destdelay, dhd_dmaxfer_t *dma); | |
550 | static void dhd_msgbuf_dmaxfer_process(dhd_pub_t *dhd, void *msg); | |
551 | ||
552 | /* Flowring management communication with dongle */ | |
553 | static void dhd_prot_flow_ring_create_response_process(dhd_pub_t *dhd, void *msg); | |
554 | static void dhd_prot_flow_ring_delete_response_process(dhd_pub_t *dhd, void *msg); | |
555 | static void dhd_prot_flow_ring_flush_response_process(dhd_pub_t *dhd, void *msg); | |
556 | static void dhd_prot_process_flow_ring_resume_response(dhd_pub_t *dhd, void* msg); | |
557 | static void dhd_prot_process_flow_ring_suspend_response(dhd_pub_t *dhd, void* msg); | |
558 | ||
559 | /* Monitor Mode */ | |
560 | #ifdef WL_MONITOR | |
561 | extern bool dhd_monitor_enabled(dhd_pub_t *dhd, int ifidx); | |
562 | extern void dhd_rx_mon_pkt(dhd_pub_t *dhdp, host_rxbuf_cmpl_t* msg, void *pkt, int ifidx); | |
563 | #endif /* WL_MONITOR */ | |
564 | ||
565 | /* Configure a soft doorbell per D2H ring */ | |
566 | static void dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd_pub_t *dhd); | |
567 | static void dhd_prot_process_d2h_ring_config_complete(dhd_pub_t *dhd, void *msg); | |
568 | static void dhd_prot_process_d2h_ring_create_complete(dhd_pub_t *dhd, void *buf); | |
569 | static void dhd_prot_process_h2d_ring_create_complete(dhd_pub_t *dhd, void *buf); | |
570 | static void dhd_prot_process_d2h_mb_data(dhd_pub_t *dhd, void* buf); | |
571 | static void dhd_prot_process_infobuf_complete(dhd_pub_t *dhd, void* buf); | |
572 | static void dhd_prot_detach_info_rings(dhd_pub_t *dhd); | |
573 | static void dhd_prot_process_d2h_host_ts_complete(dhd_pub_t *dhd, void* buf); | |
574 | ||
575 | typedef void (*dhd_msgbuf_func_t)(dhd_pub_t *dhd, void *msg); | |
576 | ||
577 | /** callback functions for messages generated by the dongle */ | |
578 | #define MSG_TYPE_INVALID 0 | |
579 | ||
580 | static dhd_msgbuf_func_t table_lookup[DHD_PROT_FUNCS] = { | |
581 | dhd_prot_noop, /* 0 is MSG_TYPE_INVALID */ | |
582 | dhd_prot_genstatus_process, /* MSG_TYPE_GEN_STATUS */ | |
583 | dhd_prot_ringstatus_process, /* MSG_TYPE_RING_STATUS */ | |
584 | NULL, | |
585 | dhd_prot_flow_ring_create_response_process, /* MSG_TYPE_FLOW_RING_CREATE_CMPLT */ | |
586 | NULL, | |
587 | dhd_prot_flow_ring_delete_response_process, /* MSG_TYPE_FLOW_RING_DELETE_CMPLT */ | |
588 | NULL, | |
589 | dhd_prot_flow_ring_flush_response_process, /* MSG_TYPE_FLOW_RING_FLUSH_CMPLT */ | |
590 | NULL, | |
591 | dhd_prot_ioctack_process, /* MSG_TYPE_IOCTLPTR_REQ_ACK */ | |
592 | NULL, | |
593 | dhd_prot_ioctcmplt_process, /* MSG_TYPE_IOCTL_CMPLT */ | |
594 | NULL, | |
595 | dhd_prot_event_process, /* MSG_TYPE_WL_EVENT */ | |
596 | NULL, | |
597 | dhd_prot_txstatus_process, /* MSG_TYPE_TX_STATUS */ | |
598 | NULL, | |
599 | NULL, /* MSG_TYPE_RX_CMPLT use dedicated handler */ | |
600 | NULL, | |
601 | dhd_msgbuf_dmaxfer_process, /* MSG_TYPE_LPBK_DMAXFER_CMPLT */ | |
602 | NULL, /* MSG_TYPE_FLOW_RING_RESUME */ | |
603 | dhd_prot_process_flow_ring_resume_response, /* MSG_TYPE_FLOW_RING_RESUME_CMPLT */ | |
604 | NULL, /* MSG_TYPE_FLOW_RING_SUSPEND */ | |
605 | dhd_prot_process_flow_ring_suspend_response, /* MSG_TYPE_FLOW_RING_SUSPEND_CMPLT */ | |
606 | NULL, /* MSG_TYPE_INFO_BUF_POST */ | |
607 | dhd_prot_process_infobuf_complete, /* MSG_TYPE_INFO_BUF_CMPLT */ | |
608 | NULL, /* MSG_TYPE_H2D_RING_CREATE */ | |
609 | NULL, /* MSG_TYPE_D2H_RING_CREATE */ | |
610 | dhd_prot_process_h2d_ring_create_complete, /* MSG_TYPE_H2D_RING_CREATE_CMPLT */ | |
611 | dhd_prot_process_d2h_ring_create_complete, /* MSG_TYPE_D2H_RING_CREATE_CMPLT */ | |
612 | NULL, /* MSG_TYPE_H2D_RING_CONFIG */ | |
613 | NULL, /* MSG_TYPE_D2H_RING_CONFIG */ | |
614 | NULL, /* MSG_TYPE_H2D_RING_CONFIG_CMPLT */ | |
615 | dhd_prot_process_d2h_ring_config_complete, /* MSG_TYPE_D2H_RING_CONFIG_CMPLT */ | |
616 | NULL, /* MSG_TYPE_H2D_MAILBOX_DATA */ | |
617 | dhd_prot_process_d2h_mb_data, /* MSG_TYPE_D2H_MAILBOX_DATA */ | |
618 | NULL, /* MSG_TYPE_TIMSTAMP_BUFPOST */ | |
619 | NULL, /* MSG_TYPE_HOSTTIMSTAMP */ | |
620 | dhd_prot_process_d2h_host_ts_complete, /* MSG_TYPE_HOSTTIMSTAMP_CMPLT */ | |
621 | dhd_prot_process_fw_timestamp, /* MSG_TYPE_FIRMWARE_TIMESTAMP */ | |
622 | }; | |
623 | ||
624 | ||
625 | #ifdef DHD_RX_CHAINING | |
626 | ||
627 | #define PKT_CTF_CHAINABLE(dhd, ifidx, evh, prio, h_sa, h_da, h_prio) \ | |
628 | (dhd_wet_chainable(dhd) && \ | |
629 | dhd_rx_pkt_chainable((dhd), (ifidx)) && \ | |
630 | !ETHER_ISNULLDEST(((struct ether_header *)(evh))->ether_dhost) && \ | |
631 | !ETHER_ISMULTI(((struct ether_header *)(evh))->ether_dhost) && \ | |
632 | !eacmp((h_da), ((struct ether_header *)(evh))->ether_dhost) && \ | |
633 | !eacmp((h_sa), ((struct ether_header *)(evh))->ether_shost) && \ | |
634 | ((h_prio) == (prio)) && (dhd_ctf_hotbrc_check((dhd), (evh), (ifidx))) && \ | |
635 | ((((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IP)) || \ | |
636 | (((struct ether_header *)(evh))->ether_type == HTON16(ETHER_TYPE_IPV6)))) | |
637 | ||
638 | static INLINE void BCMFASTPATH dhd_rxchain_reset(rxchain_info_t *rxchain); | |
639 | static void BCMFASTPATH dhd_rxchain_frame(dhd_pub_t *dhd, void *pkt, uint ifidx); | |
640 | static void BCMFASTPATH dhd_rxchain_commit(dhd_pub_t *dhd); | |
641 | ||
642 | #define DHD_PKT_CTF_MAX_CHAIN_LEN 64 | |
643 | ||
644 | #endif /* DHD_RX_CHAINING */ | |
645 | ||
646 | static void dhd_prot_h2d_sync_init(dhd_pub_t *dhd); | |
647 | ||
648 | /** | |
649 | * D2H DMA to completion callback handlers. Based on the mode advertised by the | |
650 | * dongle through the PCIE shared region, the appropriate callback will be | |
651 | * registered in the proto layer to be invoked prior to precessing any message | |
652 | * from a D2H DMA ring. If the dongle uses a read barrier or another mode that | |
653 | * does not require host participation, then a noop callback handler will be | |
654 | * bound that simply returns the msg_type. | |
655 | */ | |
656 | static void dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, uint32 msg_seqnum, msgbuf_ring_t *ring, | |
657 | uint32 tries, volatile uchar *msg, int msglen); | |
658 | static uint8 dhd_prot_d2h_sync_seqnum(dhd_pub_t *dhd, msgbuf_ring_t *ring, | |
659 | volatile cmn_msg_hdr_t *msg, int msglen); | |
660 | static uint8 dhd_prot_d2h_sync_xorcsum(dhd_pub_t *dhd, msgbuf_ring_t *ring, | |
661 | volatile cmn_msg_hdr_t *msg, int msglen); | |
662 | static uint8 dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring, | |
663 | volatile cmn_msg_hdr_t *msg, int msglen); | |
664 | static void dhd_prot_d2h_sync_init(dhd_pub_t *dhd); | |
665 | static int dhd_send_d2h_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create); | |
666 | static int dhd_send_h2d_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create); | |
667 | static uint16 dhd_get_max_flow_rings(dhd_pub_t *dhd); | |
668 | ||
669 | bool | |
670 | dhd_prot_is_cmpl_ring_empty(dhd_pub_t *dhd, void *prot_info) | |
671 | { | |
672 | msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)prot_info; | |
673 | uint16 rd, wr; | |
674 | bool ret; | |
675 | ||
676 | if (dhd->dma_d2h_ring_upd_support) { | |
677 | wr = flow_ring->wr; | |
678 | } else { | |
679 | dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, flow_ring->idx); | |
680 | } | |
681 | if (dhd->dma_h2d_ring_upd_support) { | |
682 | rd = flow_ring->rd; | |
683 | } else { | |
684 | dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, flow_ring->idx); | |
685 | } | |
686 | ret = (wr == rd) ? TRUE : FALSE; | |
687 | return ret; | |
688 | } | |
689 | uint16 | |
690 | dhd_prot_get_h2d_max_txpost(dhd_pub_t *dhd) | |
691 | { | |
692 | return (uint16)h2d_max_txpost; | |
693 | } | |
694 | void | |
695 | dhd_prot_set_h2d_max_txpost(dhd_pub_t *dhd, uint16 max_txpost) | |
696 | { | |
697 | h2d_max_txpost = max_txpost; | |
698 | } | |
699 | /** | |
700 | * dhd_prot_d2h_sync_livelock - when the host determines that a DMA transfer has | |
701 | * not completed, a livelock condition occurs. Host will avert this livelock by | |
702 | * dropping this message and moving to the next. This dropped message can lead | |
703 | * to a packet leak, or even something disastrous in the case the dropped | |
704 | * message happens to be a control response. | |
705 | * Here we will log this condition. One may choose to reboot the dongle. | |
706 | * | |
707 | */ | |
708 | static void | |
709 | dhd_prot_d2h_sync_livelock(dhd_pub_t *dhd, uint32 msg_seqnum, msgbuf_ring_t *ring, uint32 tries, | |
710 | volatile uchar *msg, int msglen) | |
711 | { | |
712 | uint32 ring_seqnum = ring->seqnum; | |
713 | DHD_ERROR(( | |
714 | "LIVELOCK DHD<%p> ring<%s> msg_seqnum<%u> ring_seqnum<%u:%u> tries<%u> max<%lu>" | |
715 | " tot<%lu> dma_buf va<%p> msg<%p> curr_rd<%d>\n", | |
716 | dhd, ring->name, msg_seqnum, ring_seqnum, ring_seqnum% D2H_EPOCH_MODULO, tries, | |
717 | dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot, | |
718 | ring->dma_buf.va, msg, ring->curr_rd)); | |
719 | prhex("D2H MsgBuf Failure", (volatile uchar *)msg, msglen); | |
720 | ||
721 | dhd_bus_dump_console_buffer(dhd->bus); | |
722 | dhd_prot_debug_info_print(dhd); | |
723 | ||
724 | #ifdef DHD_FW_COREDUMP | |
725 | if (dhd->memdump_enabled) { | |
726 | /* collect core dump */ | |
727 | dhd->memdump_type = DUMP_TYPE_BY_LIVELOCK; | |
728 | dhd_bus_mem_dump(dhd); | |
729 | } | |
730 | #endif /* DHD_FW_COREDUMP */ | |
731 | ||
732 | dhd_schedule_reset(dhd); | |
733 | ||
734 | #ifdef SUPPORT_LINKDOWN_RECOVERY | |
735 | #ifdef CONFIG_ARCH_MSM | |
736 | dhd->bus->no_cfg_restore = 1; | |
737 | #endif /* CONFIG_ARCH_MSM */ | |
738 | dhd->hang_reason = HANG_REASON_MSGBUF_LIVELOCK; | |
739 | dhd_os_send_hang_message(dhd); | |
740 | #endif /* SUPPORT_LINKDOWN_RECOVERY */ | |
741 | } | |
742 | ||
743 | /** | |
744 | * dhd_prot_d2h_sync_seqnum - Sync on a D2H DMA completion using the SEQNUM | |
745 | * mode. Sequence number is always in the last word of a message. | |
746 | */ | |
747 | static uint8 BCMFASTPATH | |
748 | dhd_prot_d2h_sync_seqnum(dhd_pub_t *dhd, msgbuf_ring_t *ring, | |
749 | volatile cmn_msg_hdr_t *msg, int msglen) | |
750 | { | |
751 | uint32 tries; | |
752 | uint32 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO; | |
753 | int num_words = msglen / sizeof(uint32); /* num of 32bit words */ | |
754 | volatile uint32 *marker = (volatile uint32 *)msg + (num_words - 1); /* last word */ | |
755 | dhd_prot_t *prot = dhd->prot; | |
756 | uint32 msg_seqnum; | |
757 | uint32 step = 0; | |
758 | uint32 delay = PCIE_D2H_SYNC_DELAY; | |
759 | uint32 total_tries = 0; | |
760 | ||
761 | ASSERT(msglen == ring->item_len); | |
762 | ||
763 | BCM_REFERENCE(delay); | |
764 | /* | |
765 | * For retries we have to make some sort of stepper algorithm. | |
766 | * We see that every time when the Dongle comes out of the D3 | |
767 | * Cold state, the first D2H mem2mem DMA takes more time to | |
768 | * complete, leading to livelock issues. | |
769 | * | |
770 | * Case 1 - Apart from Host CPU some other bus master is | |
771 | * accessing the DDR port, probably page close to the ring | |
772 | * so, PCIE does not get a change to update the memory. | |
773 | * Solution - Increase the number of tries. | |
774 | * | |
775 | * Case 2 - The 50usec delay given by the Host CPU is not | |
776 | * sufficient for the PCIe RC to start its work. | |
777 | * In this case the breathing time of 50usec given by | |
778 | * the Host CPU is not sufficient. | |
779 | * Solution: Increase the delay in a stepper fashion. | |
780 | * This is done to ensure that there are no | |
781 | * unwanted extra delay introdcued in normal conditions. | |
782 | */ | |
783 | for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) { | |
784 | for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) { | |
785 | msg_seqnum = *marker; | |
786 | if (ltoh32(msg_seqnum) == ring_seqnum) { /* dma upto last word done */ | |
787 | ring->seqnum++; /* next expected sequence number */ | |
788 | goto dma_completed; | |
789 | } | |
790 | ||
791 | total_tries = ((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries; | |
792 | ||
793 | if (total_tries > prot->d2h_sync_wait_max) | |
794 | prot->d2h_sync_wait_max = total_tries; | |
795 | ||
796 | OSL_CACHE_INV(msg, msglen); /* invalidate and try again */ | |
797 | OSL_CPU_RELAX(); /* CPU relax for msg_seqnum value to update */ | |
798 | OSL_DELAY(delay * step); /* Add stepper delay */ | |
799 | ||
800 | } /* for PCIE_D2H_SYNC_WAIT_TRIES */ | |
801 | } /* for PCIE_D2H_SYNC_NUM_OF_STEPS */ | |
802 | ||
803 | dhd_prot_d2h_sync_livelock(dhd, msg_seqnum, ring, total_tries, | |
804 | (volatile uchar *) msg, msglen); | |
805 | ||
806 | ring->seqnum++; /* skip this message ... leak of a pktid */ | |
807 | return MSG_TYPE_INVALID; /* invalid msg_type 0 -> noop callback */ | |
808 | ||
809 | dma_completed: | |
810 | ||
811 | prot->d2h_sync_wait_tot += tries; | |
812 | return msg->msg_type; | |
813 | } | |
814 | ||
815 | /** | |
816 | * dhd_prot_d2h_sync_xorcsum - Sync on a D2H DMA completion using the XORCSUM | |
817 | * mode. The xorcsum is placed in the last word of a message. Dongle will also | |
818 | * place a seqnum in the epoch field of the cmn_msg_hdr. | |
819 | */ | |
820 | static uint8 BCMFASTPATH | |
821 | dhd_prot_d2h_sync_xorcsum(dhd_pub_t *dhd, msgbuf_ring_t *ring, | |
822 | volatile cmn_msg_hdr_t *msg, int msglen) | |
823 | { | |
824 | uint32 tries; | |
825 | uint32 prot_checksum = 0; /* computed checksum */ | |
826 | int num_words = msglen / sizeof(uint32); /* num of 32bit words */ | |
827 | uint8 ring_seqnum = ring->seqnum % D2H_EPOCH_MODULO; | |
828 | dhd_prot_t *prot = dhd->prot; | |
829 | uint32 step = 0; | |
830 | uint32 delay = PCIE_D2H_SYNC_DELAY; | |
831 | uint32 total_tries = 0; | |
832 | ||
833 | ASSERT(msglen == ring->item_len); | |
834 | ||
835 | BCM_REFERENCE(delay); | |
836 | /* | |
837 | * For retries we have to make some sort of stepper algorithm. | |
838 | * We see that every time when the Dongle comes out of the D3 | |
839 | * Cold state, the first D2H mem2mem DMA takes more time to | |
840 | * complete, leading to livelock issues. | |
841 | * | |
842 | * Case 1 - Apart from Host CPU some other bus master is | |
843 | * accessing the DDR port, probably page close to the ring | |
844 | * so, PCIE does not get a change to update the memory. | |
845 | * Solution - Increase the number of tries. | |
846 | * | |
847 | * Case 2 - The 50usec delay given by the Host CPU is not | |
848 | * sufficient for the PCIe RC to start its work. | |
849 | * In this case the breathing time of 50usec given by | |
850 | * the Host CPU is not sufficient. | |
851 | * Solution: Increase the delay in a stepper fashion. | |
852 | * This is done to ensure that there are no | |
853 | * unwanted extra delay introdcued in normal conditions. | |
854 | */ | |
855 | for (step = 1; step <= PCIE_D2H_SYNC_NUM_OF_STEPS; step++) { | |
856 | for (tries = 0; tries < PCIE_D2H_SYNC_WAIT_TRIES; tries++) { | |
857 | prot_checksum = bcm_compute_xor32((volatile uint32 *)msg, num_words); | |
858 | if (prot_checksum == 0U) { /* checksum is OK */ | |
859 | if (msg->epoch == ring_seqnum) { | |
860 | ring->seqnum++; /* next expected sequence number */ | |
861 | goto dma_completed; | |
862 | } | |
863 | } | |
864 | ||
865 | total_tries = ((step-1) * PCIE_D2H_SYNC_WAIT_TRIES) + tries; | |
866 | ||
867 | if (total_tries > prot->d2h_sync_wait_max) | |
868 | prot->d2h_sync_wait_max = total_tries; | |
869 | ||
870 | OSL_CACHE_INV(msg, msglen); /* invalidate and try again */ | |
871 | OSL_CPU_RELAX(); /* CPU relax for msg_seqnum value to update */ | |
872 | OSL_DELAY(delay * step); /* Add stepper delay */ | |
873 | ||
874 | } /* for PCIE_D2H_SYNC_WAIT_TRIES */ | |
875 | } /* for PCIE_D2H_SYNC_NUM_OF_STEPS */ | |
876 | ||
877 | DHD_ERROR(("%s: prot_checksum = 0x%x\n", __FUNCTION__, prot_checksum)); | |
878 | dhd_prot_d2h_sync_livelock(dhd, msg->epoch, ring, total_tries, | |
879 | (volatile uchar *) msg, msglen); | |
880 | ||
881 | ring->seqnum++; /* skip this message ... leak of a pktid */ | |
882 | return MSG_TYPE_INVALID; /* invalid msg_type 0 -> noop callback */ | |
883 | ||
884 | dma_completed: | |
885 | ||
886 | prot->d2h_sync_wait_tot += tries; | |
887 | return msg->msg_type; | |
888 | } | |
889 | ||
890 | /** | |
891 | * dhd_prot_d2h_sync_none - Dongle ensure that the DMA will complete and host | |
892 | * need to try to sync. This noop sync handler will be bound when the dongle | |
893 | * advertises that neither the SEQNUM nor XORCSUM mode of DMA sync is required. | |
894 | */ | |
895 | static uint8 BCMFASTPATH | |
896 | dhd_prot_d2h_sync_none(dhd_pub_t *dhd, msgbuf_ring_t *ring, | |
897 | volatile cmn_msg_hdr_t *msg, int msglen) | |
898 | { | |
899 | return msg->msg_type; | |
900 | } | |
901 | ||
902 | INLINE void | |
903 | dhd_wakeup_ioctl_event(dhd_pub_t *dhd, dhd_ioctl_recieved_status_t reason) | |
904 | { | |
905 | /* To synchronize with the previous memory operations call wmb() */ | |
906 | OSL_SMP_WMB(); | |
907 | dhd->prot->ioctl_received = reason; | |
908 | /* Call another wmb() to make sure before waking up the other event value gets updated */ | |
909 | OSL_SMP_WMB(); | |
910 | dhd_os_ioctl_resp_wake(dhd); | |
911 | } | |
912 | ||
913 | /** | |
914 | * dhd_prot_d2h_sync_init - Setup the host side DMA sync mode based on what | |
915 | * dongle advertizes. | |
916 | */ | |
917 | static void | |
918 | dhd_prot_d2h_sync_init(dhd_pub_t *dhd) | |
919 | { | |
920 | dhd_prot_t *prot = dhd->prot; | |
921 | prot->d2h_sync_wait_max = 0UL; | |
922 | prot->d2h_sync_wait_tot = 0UL; | |
923 | ||
924 | prot->d2hring_ctrl_cpln.seqnum = D2H_EPOCH_INIT_VAL; | |
925 | prot->d2hring_ctrl_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT; | |
926 | ||
927 | prot->d2hring_tx_cpln.seqnum = D2H_EPOCH_INIT_VAL; | |
928 | prot->d2hring_tx_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT; | |
929 | ||
930 | prot->d2hring_rx_cpln.seqnum = D2H_EPOCH_INIT_VAL; | |
931 | prot->d2hring_rx_cpln.current_phase = BCMPCIE_CMNHDR_PHASE_BIT_INIT; | |
932 | ||
933 | if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM) { | |
934 | prot->d2h_sync_cb = dhd_prot_d2h_sync_seqnum; | |
935 | DHD_ERROR(("%s(): D2H sync mechanism is SEQNUM \r\n", __FUNCTION__)); | |
936 | } else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM) { | |
937 | prot->d2h_sync_cb = dhd_prot_d2h_sync_xorcsum; | |
938 | DHD_ERROR(("%s(): D2H sync mechanism is XORCSUM \r\n", __FUNCTION__)); | |
939 | } else { | |
940 | prot->d2h_sync_cb = dhd_prot_d2h_sync_none; | |
941 | DHD_ERROR(("%s(): D2H sync mechanism is NONE \r\n", __FUNCTION__)); | |
942 | } | |
943 | } | |
944 | ||
945 | /** | |
946 | * dhd_prot_h2d_sync_init - Per H2D common ring, setup the msgbuf ring seqnum | |
947 | */ | |
948 | static void | |
949 | dhd_prot_h2d_sync_init(dhd_pub_t *dhd) | |
950 | { | |
951 | dhd_prot_t *prot = dhd->prot; | |
952 | prot->h2dring_rxp_subn.seqnum = H2D_EPOCH_INIT_VAL; | |
953 | prot->h2dring_rxp_subn.current_phase = 0; | |
954 | ||
955 | prot->h2dring_ctrl_subn.seqnum = H2D_EPOCH_INIT_VAL; | |
956 | prot->h2dring_ctrl_subn.current_phase = 0; | |
957 | } | |
958 | ||
959 | /* +----------------- End of PCIE DHD H2D DMA SYNC ------------------------+ */ | |
960 | ||
961 | ||
962 | /* | |
963 | * +---------------------------------------------------------------------------+ | |
964 | * PCIE DMA-able buffer. Sets up a dhd_dma_buf_t object, which includes the | |
965 | * virtual and physical address, the buffer lenght and the DMA handler. | |
966 | * A secdma handler is also included in the dhd_dma_buf object. | |
967 | * +---------------------------------------------------------------------------+ | |
968 | */ | |
969 | ||
970 | static INLINE void | |
971 | dhd_base_addr_htolpa(sh_addr_t *base_addr, dmaaddr_t pa) | |
972 | { | |
973 | base_addr->low_addr = htol32(PHYSADDRLO(pa)); | |
974 | base_addr->high_addr = htol32(PHYSADDRHI(pa)); | |
975 | } | |
976 | ||
977 | ||
978 | /** | |
979 | * dhd_dma_buf_audit - Any audits on a DHD DMA Buffer. | |
980 | */ | |
981 | static int | |
982 | dhd_dma_buf_audit(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf) | |
983 | { | |
984 | uint32 pa_lowaddr, end; /* dongle uses 32bit ptr arithmetic */ | |
985 | ASSERT(dma_buf); | |
986 | pa_lowaddr = PHYSADDRLO(dma_buf->pa); | |
987 | ASSERT(PHYSADDRLO(dma_buf->pa) || PHYSADDRHI(dma_buf->pa)); | |
988 | ASSERT(ISALIGNED(pa_lowaddr, DMA_ALIGN_LEN)); | |
989 | ASSERT(dma_buf->len != 0); | |
990 | ||
991 | /* test 32bit offset arithmetic over dma buffer for loss of carry-over */ | |
992 | end = (pa_lowaddr + dma_buf->len); /* end address */ | |
993 | ||
994 | if ((end & 0xFFFFFFFF) < (pa_lowaddr & 0xFFFFFFFF)) { /* exclude carryover */ | |
995 | DHD_ERROR(("%s: dma_buf %x len %d spans dongle 32bit ptr arithmetic\n", | |
996 | __FUNCTION__, pa_lowaddr, dma_buf->len)); | |
997 | return BCME_ERROR; | |
998 | } | |
999 | ||
1000 | return BCME_OK; | |
1001 | } | |
1002 | ||
1003 | /** | |
1004 | * dhd_dma_buf_alloc - Allocate a cache coherent DMA-able buffer. | |
1005 | * returns BCME_OK=0 on success | |
1006 | * returns non-zero negative error value on failure. | |
1007 | */ | |
1008 | static int | |
1009 | dhd_dma_buf_alloc(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf, uint32 buf_len) | |
1010 | { | |
1011 | uint32 dma_pad = 0; | |
1012 | osl_t *osh = dhd->osh; | |
1013 | uint16 dma_align = DMA_ALIGN_LEN; | |
1014 | ||
1015 | ||
1016 | ASSERT(dma_buf != NULL); | |
1017 | ASSERT(dma_buf->va == NULL); | |
1018 | ASSERT(dma_buf->len == 0); | |
1019 | ||
1020 | /* Pad the buffer length by one extra cacheline size. | |
1021 | * Required for D2H direction. | |
1022 | */ | |
1023 | dma_pad = (buf_len % DHD_DMA_PAD) ? DHD_DMA_PAD : 0; | |
1024 | dma_buf->va = DMA_ALLOC_CONSISTENT(osh, buf_len + dma_pad, | |
1025 | dma_align, &dma_buf->_alloced, &dma_buf->pa, &dma_buf->dmah); | |
1026 | ||
1027 | if (dma_buf->va == NULL) { | |
1028 | DHD_ERROR(("%s: buf_len %d, no memory available\n", | |
1029 | __FUNCTION__, buf_len)); | |
1030 | return BCME_NOMEM; | |
1031 | } | |
1032 | ||
1033 | dma_buf->len = buf_len; /* not including padded len */ | |
1034 | ||
1035 | if (dhd_dma_buf_audit(dhd, dma_buf) != BCME_OK) { /* audit dma buf */ | |
1036 | dhd_dma_buf_free(dhd, dma_buf); | |
1037 | return BCME_ERROR; | |
1038 | } | |
1039 | ||
1040 | dhd_dma_buf_reset(dhd, dma_buf); /* zero out and cache flush */ | |
1041 | ||
1042 | return BCME_OK; | |
1043 | } | |
1044 | ||
1045 | /** | |
1046 | * dhd_dma_buf_reset - Reset a cache coherent DMA-able buffer. | |
1047 | */ | |
1048 | static void | |
1049 | dhd_dma_buf_reset(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf) | |
1050 | { | |
1051 | if ((dma_buf == NULL) || (dma_buf->va == NULL)) | |
1052 | return; | |
1053 | ||
1054 | (void)dhd_dma_buf_audit(dhd, dma_buf); | |
1055 | ||
1056 | /* Zero out the entire buffer and cache flush */ | |
1057 | memset((void*)dma_buf->va, 0, dma_buf->len); | |
1058 | OSL_CACHE_FLUSH((void *)dma_buf->va, dma_buf->len); | |
1059 | } | |
1060 | ||
1061 | /** | |
1062 | * dhd_dma_buf_free - Free a DMA-able buffer that was previously allocated using | |
1063 | * dhd_dma_buf_alloc(). | |
1064 | */ | |
1065 | static void | |
1066 | dhd_dma_buf_free(dhd_pub_t *dhd, dhd_dma_buf_t *dma_buf) | |
1067 | { | |
1068 | osl_t *osh = dhd->osh; | |
1069 | ||
1070 | ASSERT(dma_buf); | |
1071 | ||
1072 | if (dma_buf->va == NULL) | |
1073 | return; /* Allow for free invocation, when alloc failed */ | |
1074 | ||
1075 | /* DEBUG: dhd_dma_buf_reset(dhd, dma_buf) */ | |
1076 | (void)dhd_dma_buf_audit(dhd, dma_buf); | |
1077 | ||
1078 | /* dma buffer may have been padded at allocation */ | |
1079 | DMA_FREE_CONSISTENT(osh, dma_buf->va, dma_buf->_alloced, | |
1080 | dma_buf->pa, dma_buf->dmah); | |
1081 | ||
1082 | memset(dma_buf, 0, sizeof(dhd_dma_buf_t)); | |
1083 | } | |
1084 | ||
1085 | /** | |
1086 | * dhd_dma_buf_init - Initialize a dhd_dma_buf with speicifed values. | |
1087 | * Do not use dhd_dma_buf_init to zero out a dhd_dma_buf_t object. Use memset 0. | |
1088 | */ | |
1089 | void | |
1090 | dhd_dma_buf_init(dhd_pub_t *dhd, void *dhd_dma_buf, | |
1091 | void *va, uint32 len, dmaaddr_t pa, void *dmah, void *secdma) | |
1092 | { | |
1093 | dhd_dma_buf_t *dma_buf; | |
1094 | ASSERT(dhd_dma_buf); | |
1095 | dma_buf = (dhd_dma_buf_t *)dhd_dma_buf; | |
1096 | dma_buf->va = va; | |
1097 | dma_buf->len = len; | |
1098 | dma_buf->pa = pa; | |
1099 | dma_buf->dmah = dmah; | |
1100 | dma_buf->secdma = secdma; | |
1101 | ||
1102 | /* Audit user defined configuration */ | |
1103 | (void)dhd_dma_buf_audit(dhd, dma_buf); | |
1104 | } | |
1105 | ||
1106 | /* +------------------ End of PCIE DHD DMA BUF ADT ------------------------+ */ | |
1107 | ||
1108 | /* | |
1109 | * +---------------------------------------------------------------------------+ | |
1110 | * PktId Map: Provides a native packet pointer to unique 32bit PktId mapping. | |
1111 | * Main purpose is to save memory on the dongle, has other purposes as well. | |
1112 | * The packet id map, also includes storage for some packet parameters that | |
1113 | * may be saved. A native packet pointer along with the parameters may be saved | |
1114 | * and a unique 32bit pkt id will be returned. Later, the saved packet pointer | |
1115 | * and the metadata may be retrieved using the previously allocated packet id. | |
1116 | * +---------------------------------------------------------------------------+ | |
1117 | */ | |
1118 | #define DHD_PCIE_PKTID | |
1119 | #define MAX_CTRL_PKTID (1024) /* Maximum number of pktids supported */ | |
1120 | #define MAX_RX_PKTID (1024) | |
1121 | #define MAX_TX_PKTID (3072 * 2) | |
1122 | ||
1123 | /* On Router, the pktptr serves as a pktid. */ | |
1124 | ||
1125 | ||
1126 | #if defined(PROP_TXSTATUS) && !defined(DHD_PCIE_PKTID) | |
1127 | #error "PKTIDMAP must be supported with PROP_TXSTATUS/WLFC" | |
1128 | #endif | |
1129 | ||
1130 | /* Enum for marking the buffer color based on usage */ | |
1131 | typedef enum dhd_pkttype { | |
1132 | PKTTYPE_DATA_TX = 0, | |
1133 | PKTTYPE_DATA_RX, | |
1134 | PKTTYPE_IOCTL_RX, | |
1135 | PKTTYPE_EVENT_RX, | |
1136 | PKTTYPE_INFO_RX, | |
1137 | /* dhd_prot_pkt_free no check, if pktid reserved and no space avail case */ | |
1138 | PKTTYPE_NO_CHECK, | |
1139 | PKTTYPE_TSBUF_RX | |
1140 | } dhd_pkttype_t; | |
1141 | ||
1142 | #define DHD_PKTID_INVALID (0U) | |
1143 | #define DHD_IOCTL_REQ_PKTID (0xFFFE) | |
1144 | #define DHD_FAKE_PKTID (0xFACE) | |
1145 | #define DHD_H2D_DBGRING_REQ_PKTID 0xFFFD | |
1146 | #define DHD_D2H_DBGRING_REQ_PKTID 0xFFFC | |
1147 | #define DHD_H2D_HOSTTS_REQ_PKTID 0xFFFB | |
1148 | ||
1149 | #define IS_FLOWRING(ring) \ | |
1150 | ((strncmp(ring->name, "h2dflr", sizeof("h2dflr"))) == (0)) | |
1151 | ||
1152 | typedef void * dhd_pktid_map_handle_t; /* opaque handle to a pktid map */ | |
1153 | ||
1154 | /* Construct a packet id mapping table, returning an opaque map handle */ | |
1155 | static dhd_pktid_map_handle_t *dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items); | |
1156 | ||
1157 | /* Destroy a packet id mapping table, freeing all packets active in the table */ | |
1158 | static void dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map); | |
1159 | ||
1160 | #define DHD_NATIVE_TO_PKTID_INIT(dhd, items) dhd_pktid_map_init((dhd), (items)) | |
1161 | #define DHD_NATIVE_TO_PKTID_RESET(dhd, map) dhd_pktid_map_reset((dhd), (map)) | |
1162 | #define DHD_NATIVE_TO_PKTID_FINI(dhd, map) dhd_pktid_map_fini((dhd), (map)) | |
1163 | #define DHD_NATIVE_TO_PKTID_FINI_IOCTL(osh, map) dhd_pktid_map_fini_ioctl((osh), (map)) | |
1164 | ||
1165 | #ifdef MACOSX_DHD | |
1166 | #undef DHD_PCIE_PKTID | |
1167 | #define DHD_PCIE_PKTID 1 | |
1168 | #endif /* MACOSX_DHD */ | |
1169 | ||
1170 | #if defined(DHD_PCIE_PKTID) | |
1171 | #if defined(MACOSX_DHD) || defined(DHD_EFI) | |
1172 | #define IOCTLRESP_USE_CONSTMEM | |
1173 | static void free_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf); | |
1174 | static int alloc_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf); | |
1175 | #endif | |
1176 | ||
1177 | /* Determine number of pktids that are available */ | |
1178 | static INLINE uint32 dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *handle); | |
1179 | ||
1180 | /* Allocate a unique pktid against which a pkt and some metadata is saved */ | |
1181 | static INLINE uint32 dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, | |
1182 | void *pkt, dhd_pkttype_t pkttype); | |
1183 | static INLINE void dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, | |
1184 | void *pkt, uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dma, | |
1185 | void *dmah, void *secdma, dhd_pkttype_t pkttype); | |
1186 | static uint32 dhd_pktid_map_alloc(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map, | |
1187 | void *pkt, dmaaddr_t pa, uint32 len, uint8 dma, | |
1188 | void *dmah, void *secdma, dhd_pkttype_t pkttype); | |
1189 | ||
1190 | /* Return an allocated pktid, retrieving previously saved pkt and metadata */ | |
1191 | static void *dhd_pktid_map_free(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map, | |
1192 | uint32 id, dmaaddr_t *pa, uint32 *len, void **dmah, | |
1193 | void **secdma, dhd_pkttype_t pkttype, bool rsv_locker); | |
1194 | ||
1195 | /* | |
1196 | * DHD_PKTID_AUDIT_ENABLED: Audit of PktIds in DHD for duplicate alloc and frees | |
1197 | * | |
1198 | * DHD_PKTID_AUDIT_MAP: Audit the LIFO or FIFO PktIdMap allocator | |
1199 | * DHD_PKTID_AUDIT_RING: Audit the pktid during producer/consumer ring operation | |
1200 | * | |
1201 | * CAUTION: When DHD_PKTID_AUDIT_ENABLED is defined, | |
1202 | * either DHD_PKTID_AUDIT_MAP or DHD_PKTID_AUDIT_RING may be selected. | |
1203 | */ | |
1204 | #if defined(DHD_PKTID_AUDIT_ENABLED) | |
1205 | #define USE_DHD_PKTID_AUDIT_LOCK 1 | |
1206 | /* Audit the pktidmap allocator */ | |
1207 | /* #define DHD_PKTID_AUDIT_MAP */ | |
1208 | ||
1209 | /* Audit the pktid during production/consumption of workitems */ | |
1210 | #define DHD_PKTID_AUDIT_RING | |
1211 | ||
1212 | #if defined(DHD_PKTID_AUDIT_MAP) && defined(DHD_PKTID_AUDIT_RING) | |
1213 | #error "May only enabled audit of MAP or RING, at a time." | |
1214 | #endif /* DHD_PKTID_AUDIT_MAP && DHD_PKTID_AUDIT_RING */ | |
1215 | ||
1216 | #define DHD_DUPLICATE_ALLOC 1 | |
1217 | #define DHD_DUPLICATE_FREE 2 | |
1218 | #define DHD_TEST_IS_ALLOC 3 | |
1219 | #define DHD_TEST_IS_FREE 4 | |
1220 | ||
1221 | #ifdef USE_DHD_PKTID_AUDIT_LOCK | |
1222 | #define DHD_PKTID_AUDIT_LOCK_INIT(osh) dhd_os_spin_lock_init(osh) | |
1223 | #define DHD_PKTID_AUDIT_LOCK_DEINIT(osh, lock) dhd_os_spin_lock_deinit(osh, lock) | |
1224 | #define DHD_PKTID_AUDIT_LOCK(lock) dhd_os_spin_lock(lock) | |
1225 | #define DHD_PKTID_AUDIT_UNLOCK(lock, flags) dhd_os_spin_unlock(lock, flags) | |
1226 | #else | |
1227 | #define DHD_PKTID_AUDIT_LOCK_INIT(osh) (void *)(1) | |
1228 | #define DHD_PKTID_AUDIT_LOCK_DEINIT(osh, lock) do { /* noop */ } while (0) | |
1229 | #define DHD_PKTID_AUDIT_LOCK(lock) 0 | |
1230 | #define DHD_PKTID_AUDIT_UNLOCK(lock, flags) do { /* noop */ } while (0) | |
1231 | #endif /* !USE_DHD_PKTID_AUDIT_LOCK */ | |
1232 | ||
1233 | #endif /* DHD_PKTID_AUDIT_ENABLED */ | |
1234 | ||
1235 | /* #define USE_DHD_PKTID_LOCK 1 */ | |
1236 | ||
1237 | #ifdef USE_DHD_PKTID_LOCK | |
1238 | #define DHD_PKTID_LOCK_INIT(osh) dhd_os_spin_lock_init(osh) | |
1239 | #define DHD_PKTID_LOCK_DEINIT(osh, lock) dhd_os_spin_lock_deinit(osh, lock) | |
1240 | #define DHD_PKTID_LOCK(lock) dhd_os_spin_lock(lock) | |
1241 | #define DHD_PKTID_UNLOCK(lock, flags) dhd_os_spin_unlock(lock, flags) | |
1242 | #else | |
1243 | #define DHD_PKTID_LOCK_INIT(osh) (void *)(1) | |
1244 | #define DHD_PKTID_LOCK_DEINIT(osh, lock) \ | |
1245 | do { \ | |
1246 | BCM_REFERENCE(osh); \ | |
1247 | BCM_REFERENCE(lock); \ | |
1248 | } while (0) | |
1249 | #define DHD_PKTID_LOCK(lock) 0 | |
1250 | #define DHD_PKTID_UNLOCK(lock, flags) \ | |
1251 | do { \ | |
1252 | BCM_REFERENCE(lock); \ | |
1253 | BCM_REFERENCE(flags); \ | |
1254 | } while (0) | |
1255 | #endif /* !USE_DHD_PKTID_LOCK */ | |
1256 | ||
1257 | typedef enum dhd_locker_state { | |
1258 | LOCKER_IS_FREE, | |
1259 | LOCKER_IS_BUSY, | |
1260 | LOCKER_IS_RSVD | |
1261 | } dhd_locker_state_t; | |
1262 | ||
1263 | /* Packet metadata saved in packet id mapper */ | |
1264 | ||
1265 | typedef struct dhd_pktid_item { | |
1266 | dhd_locker_state_t state; /* tag a locker to be free, busy or reserved */ | |
1267 | uint8 dir; /* dma map direction (Tx=flush or Rx=invalidate) */ | |
1268 | dhd_pkttype_t pkttype; /* pktlists are maintained based on pkttype */ | |
1269 | uint16 len; /* length of mapped packet's buffer */ | |
1270 | void *pkt; /* opaque native pointer to a packet */ | |
1271 | dmaaddr_t pa; /* physical address of mapped packet's buffer */ | |
1272 | void *dmah; /* handle to OS specific DMA map */ | |
1273 | void *secdma; | |
1274 | } dhd_pktid_item_t; | |
1275 | ||
1276 | typedef uint32 dhd_pktid_key_t; | |
1277 | ||
1278 | typedef struct dhd_pktid_map { | |
1279 | uint32 items; /* total items in map */ | |
1280 | uint32 avail; /* total available items */ | |
1281 | int failures; /* lockers unavailable count */ | |
1282 | #if defined(DHD_PKTID_AUDIT_ENABLED) | |
1283 | void *pktid_audit_lock; | |
1284 | struct bcm_mwbmap *pktid_audit; /* multi word bitmap based audit */ | |
1285 | #endif /* DHD_PKTID_AUDIT_ENABLED */ | |
1286 | dhd_pktid_key_t *keys; /* map_items +1 unique pkt ids */ | |
1287 | dhd_pktid_item_t lockers[0]; /* metadata storage */ | |
1288 | } dhd_pktid_map_t; | |
1289 | ||
1290 | /* | |
1291 | * PktId (Locker) #0 is never allocated and is considered invalid. | |
1292 | * | |
1293 | * On request for a pktid, a value DHD_PKTID_INVALID must be treated as a | |
1294 | * depleted pktid pool and must not be used by the caller. | |
1295 | * | |
1296 | * Likewise, a caller must never free a pktid of value DHD_PKTID_INVALID. | |
1297 | */ | |
1298 | ||
1299 | #define DHD_PKTID_FREE_LOCKER (FALSE) | |
1300 | #define DHD_PKTID_RSV_LOCKER (TRUE) | |
1301 | ||
1302 | #define DHD_PKTID_ITEM_SZ (sizeof(dhd_pktid_item_t)) | |
1303 | #define DHD_PKIDMAP_ITEMS(items) (items) | |
1304 | #define DHD_PKTID_MAP_SZ(items) (sizeof(dhd_pktid_map_t) + \ | |
1305 | (DHD_PKTID_ITEM_SZ * ((items) + 1))) | |
1306 | #define DHD_PKTIDMAP_KEYS_SZ(items) (sizeof(dhd_pktid_key_t) * ((items) + 1)) | |
1307 | ||
1308 | #define DHD_NATIVE_TO_PKTID_RESET_IOCTL(dhd, map) dhd_pktid_map_reset_ioctl((dhd), (map)) | |
1309 | ||
1310 | /* Convert a packet to a pktid, and save pkt pointer in busy locker */ | |
1311 | #define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt, pkttype) \ | |
1312 | dhd_pktid_map_reserve((dhd), (map), (pkt), (pkttype)) | |
1313 | /* Reuse a previously reserved locker to save packet params */ | |
1314 | #define DHD_NATIVE_TO_PKTID_SAVE(dhd, map, pkt, nkey, pa, len, dir, dmah, secdma, pkttype) \ | |
1315 | dhd_pktid_map_save((dhd), (map), (void *)(pkt), (nkey), (pa), (uint32)(len), \ | |
1316 | (uint8)(dir), (void *)(dmah), (void *)(secdma), \ | |
1317 | (dhd_pkttype_t)(pkttype)) | |
1318 | /* Convert a packet to a pktid, and save packet params in locker */ | |
1319 | #define DHD_NATIVE_TO_PKTID(dhd, map, pkt, pa, len, dir, dmah, secdma, pkttype) \ | |
1320 | dhd_pktid_map_alloc((dhd), (map), (void *)(pkt), (pa), (uint32)(len), \ | |
1321 | (uint8)(dir), (void *)(dmah), (void *)(secdma), \ | |
1322 | (dhd_pkttype_t)(pkttype)) | |
1323 | ||
1324 | /* Convert pktid to a packet, and free the locker */ | |
1325 | #define DHD_PKTID_TO_NATIVE(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \ | |
1326 | dhd_pktid_map_free((dhd), (map), (uint32)(pktid), \ | |
1327 | (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \ | |
1328 | (void **)&(secdma), (dhd_pkttype_t)(pkttype), DHD_PKTID_FREE_LOCKER) | |
1329 | ||
1330 | /* Convert the pktid to a packet, empty locker, but keep it reserved */ | |
1331 | #define DHD_PKTID_TO_NATIVE_RSV(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \ | |
1332 | dhd_pktid_map_free((dhd), (map), (uint32)(pktid), \ | |
1333 | (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \ | |
1334 | (void **)&(secdma), (dhd_pkttype_t)(pkttype), DHD_PKTID_RSV_LOCKER) | |
1335 | ||
1336 | #define DHD_PKTID_AVAIL(map) dhd_pktid_map_avail_cnt(map) | |
1337 | ||
1338 | #if defined(DHD_PKTID_AUDIT_ENABLED) | |
1339 | /** | |
1340 | * dhd_pktid_audit - Use the mwbmap to audit validity of a pktid. | |
1341 | */ | |
1342 | static int | |
1343 | dhd_pktid_audit(dhd_pub_t *dhd, dhd_pktid_map_t *pktid_map, uint32 pktid, | |
1344 | const int test_for, const char *errmsg) | |
1345 | { | |
1346 | #define DHD_PKT_AUDIT_STR "ERROR: %16s Host PktId Audit: " | |
1347 | struct bcm_mwbmap *handle; | |
1348 | uint32 flags; | |
1349 | bool ignore_audit; | |
1350 | ||
1351 | if (pktid_map == (dhd_pktid_map_t *)NULL) { | |
1352 | DHD_ERROR((DHD_PKT_AUDIT_STR "Pkt id map NULL\n", errmsg)); | |
1353 | return BCME_OK; | |
1354 | } | |
1355 | ||
1356 | flags = DHD_PKTID_AUDIT_LOCK(pktid_map->pktid_audit_lock); | |
1357 | ||
1358 | handle = pktid_map->pktid_audit; | |
1359 | if (handle == (struct bcm_mwbmap *)NULL) { | |
1360 | DHD_ERROR((DHD_PKT_AUDIT_STR "Handle NULL\n", errmsg)); | |
1361 | DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags); | |
1362 | return BCME_OK; | |
1363 | } | |
1364 | ||
1365 | /* Exclude special pktids from audit */ | |
1366 | ignore_audit = (pktid == DHD_IOCTL_REQ_PKTID) | (pktid == DHD_FAKE_PKTID); | |
1367 | if (ignore_audit) { | |
1368 | DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags); | |
1369 | return BCME_OK; | |
1370 | } | |
1371 | ||
1372 | if ((pktid == DHD_PKTID_INVALID) || (pktid > pktid_map->items)) { | |
1373 | DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> invalid\n", errmsg, pktid)); | |
1374 | /* lock is released in "error" */ | |
1375 | goto error; | |
1376 | } | |
1377 | ||
1378 | /* Perform audit */ | |
1379 | switch (test_for) { | |
1380 | case DHD_DUPLICATE_ALLOC: | |
1381 | if (!bcm_mwbmap_isfree(handle, pktid)) { | |
1382 | DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> alloc duplicate\n", | |
1383 | errmsg, pktid)); | |
1384 | goto error; | |
1385 | } | |
1386 | bcm_mwbmap_force(handle, pktid); | |
1387 | break; | |
1388 | ||
1389 | case DHD_DUPLICATE_FREE: | |
1390 | if (bcm_mwbmap_isfree(handle, pktid)) { | |
1391 | DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> free duplicate\n", | |
1392 | errmsg, pktid)); | |
1393 | goto error; | |
1394 | } | |
1395 | bcm_mwbmap_free(handle, pktid); | |
1396 | break; | |
1397 | ||
1398 | case DHD_TEST_IS_ALLOC: | |
1399 | if (bcm_mwbmap_isfree(handle, pktid)) { | |
1400 | DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> is not allocated\n", | |
1401 | errmsg, pktid)); | |
1402 | goto error; | |
1403 | } | |
1404 | break; | |
1405 | ||
1406 | case DHD_TEST_IS_FREE: | |
1407 | if (!bcm_mwbmap_isfree(handle, pktid)) { | |
1408 | DHD_ERROR((DHD_PKT_AUDIT_STR "PktId<%d> is not free", | |
1409 | errmsg, pktid)); | |
1410 | goto error; | |
1411 | } | |
1412 | break; | |
1413 | ||
1414 | default: | |
1415 | goto error; | |
1416 | } | |
1417 | ||
1418 | DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags); | |
1419 | return BCME_OK; | |
1420 | ||
1421 | error: | |
1422 | ||
1423 | DHD_PKTID_AUDIT_UNLOCK(pktid_map->pktid_audit_lock, flags); | |
1424 | /* May insert any trap mechanism here ! */ | |
1425 | dhd_pktid_error_handler(dhd); | |
1426 | ||
1427 | return BCME_ERROR; | |
1428 | } | |
1429 | ||
1430 | #define DHD_PKTID_AUDIT(dhdp, map, pktid, test_for) \ | |
1431 | dhd_pktid_audit((dhdp), (dhd_pktid_map_t *)(map), (pktid), (test_for), __FUNCTION__) | |
1432 | ||
1433 | static int | |
1434 | dhd_pktid_audit_ring_debug(dhd_pub_t *dhdp, dhd_pktid_map_t *map, uint32 pktid, | |
1435 | const int test_for, void *msg, uint32 msg_len, const char * func) | |
1436 | { | |
1437 | int ret = 0; | |
1438 | ret = DHD_PKTID_AUDIT(dhdp, map, pktid, test_for); | |
1439 | if (ret == BCME_ERROR) { | |
1440 | prhex(func, (uchar *)msg, msg_len); | |
1441 | } | |
1442 | return ret; | |
1443 | } | |
1444 | #define DHD_PKTID_AUDIT_RING_DEBUG(dhdp, map, pktid, test_for, msg, msg_len) \ | |
1445 | dhd_pktid_audit_ring_debug((dhdp), (dhd_pktid_map_t *)(map), \ | |
1446 | (pktid), (test_for), msg, msg_len, __FUNCTION__) | |
1447 | ||
1448 | #endif /* DHD_PKTID_AUDIT_ENABLED */ | |
1449 | ||
1450 | ||
1451 | /** | |
1452 | * +---------------------------------------------------------------------------+ | |
1453 | * Packet to Packet Id mapper using a <numbered_key, locker> paradigm. | |
1454 | * | |
1455 | * dhd_pktid_map manages a set of unique Packet Ids range[1..MAX_xxx_PKTID]. | |
1456 | * | |
1457 | * dhd_pktid_map_alloc() may be used to save some packet metadata, and a unique | |
1458 | * packet id is returned. This unique packet id may be used to retrieve the | |
1459 | * previously saved packet metadata, using dhd_pktid_map_free(). On invocation | |
1460 | * of dhd_pktid_map_free(), the unique packet id is essentially freed. A | |
1461 | * subsequent call to dhd_pktid_map_alloc() may reuse this packet id. | |
1462 | * | |
1463 | * Implementation Note: | |
1464 | * Convert this into a <key,locker> abstraction and place into bcmutils ! | |
1465 | * Locker abstraction should treat contents as opaque storage, and a | |
1466 | * callback should be registered to handle busy lockers on destructor. | |
1467 | * | |
1468 | * +---------------------------------------------------------------------------+ | |
1469 | */ | |
1470 | ||
1471 | /** Allocate and initialize a mapper of num_items <numbered_key, locker> */ | |
1472 | ||
1473 | static dhd_pktid_map_handle_t * | |
1474 | dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items) | |
1475 | { | |
1476 | void* osh; | |
1477 | uint32 nkey; | |
1478 | dhd_pktid_map_t *map; | |
1479 | uint32 dhd_pktid_map_sz; | |
1480 | uint32 map_items; | |
1481 | uint32 map_keys_sz; | |
1482 | osh = dhd->osh; | |
1483 | ||
1484 | dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(num_items); | |
1485 | ||
1486 | map = (dhd_pktid_map_t *)VMALLOC(osh, dhd_pktid_map_sz); | |
1487 | if (map == NULL) { | |
1488 | DHD_ERROR(("%s:%d: MALLOC failed for size %d\n", | |
1489 | __FUNCTION__, __LINE__, dhd_pktid_map_sz)); | |
1490 | return (dhd_pktid_map_handle_t *)NULL; | |
1491 | } | |
1492 | ||
1493 | /* Initialize the lock that protects this structure */ | |
1494 | map->items = num_items; | |
1495 | map->avail = num_items; | |
1496 | ||
1497 | map_items = DHD_PKIDMAP_ITEMS(map->items); | |
1498 | ||
1499 | map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items); | |
1500 | map->keys = (dhd_pktid_key_t *)MALLOC(osh, map_keys_sz); | |
1501 | if (map->keys == NULL) { | |
1502 | DHD_ERROR(("%s:%d: MALLOC failed for map->keys size %d\n", | |
1503 | __FUNCTION__, __LINE__, map_keys_sz)); | |
1504 | goto error; | |
1505 | } | |
1506 | ||
1507 | #if defined(DHD_PKTID_AUDIT_ENABLED) | |
1508 | /* Incarnate a hierarchical multiword bitmap for auditing pktid allocator */ | |
1509 | map->pktid_audit = bcm_mwbmap_init(osh, map_items + 1); | |
1510 | if (map->pktid_audit == (struct bcm_mwbmap *)NULL) { | |
1511 | DHD_ERROR(("%s:%d: pktid_audit init failed\r\n", __FUNCTION__, __LINE__)); | |
1512 | goto error; | |
1513 | } else { | |
1514 | DHD_ERROR(("%s:%d: pktid_audit init succeeded %d\n", | |
1515 | __FUNCTION__, __LINE__, map_items + 1)); | |
1516 | } | |
1517 | map->pktid_audit_lock = DHD_PKTID_AUDIT_LOCK_INIT(osh); | |
1518 | #endif /* DHD_PKTID_AUDIT_ENABLED */ | |
1519 | ||
1520 | for (nkey = 1; nkey <= map_items; nkey++) { /* locker #0 is reserved */ | |
1521 | map->keys[nkey] = nkey; /* populate with unique keys */ | |
1522 | map->lockers[nkey].state = LOCKER_IS_FREE; | |
1523 | map->lockers[nkey].pkt = NULL; /* bzero: redundant */ | |
1524 | map->lockers[nkey].len = 0; | |
1525 | } | |
1526 | ||
1527 | /* Reserve pktid #0, i.e. DHD_PKTID_INVALID to be inuse */ | |
1528 | map->lockers[DHD_PKTID_INVALID].state = LOCKER_IS_BUSY; /* tag locker #0 as inuse */ | |
1529 | map->lockers[DHD_PKTID_INVALID].pkt = NULL; /* bzero: redundant */ | |
1530 | map->lockers[DHD_PKTID_INVALID].len = 0; | |
1531 | ||
1532 | #if defined(DHD_PKTID_AUDIT_ENABLED) | |
1533 | /* do not use dhd_pktid_audit() here, use bcm_mwbmap_force directly */ | |
1534 | bcm_mwbmap_force(map->pktid_audit, DHD_PKTID_INVALID); | |
1535 | #endif /* DHD_PKTID_AUDIT_ENABLED */ | |
1536 | ||
1537 | return (dhd_pktid_map_handle_t *)map; /* opaque handle */ | |
1538 | ||
1539 | error: | |
1540 | if (map) { | |
1541 | #if defined(DHD_PKTID_AUDIT_ENABLED) | |
1542 | if (map->pktid_audit != (struct bcm_mwbmap *)NULL) { | |
1543 | bcm_mwbmap_fini(osh, map->pktid_audit); /* Destruct pktid_audit */ | |
1544 | map->pktid_audit = (struct bcm_mwbmap *)NULL; | |
1545 | if (map->pktid_audit_lock) | |
1546 | DHD_PKTID_AUDIT_LOCK_DEINIT(osh, map->pktid_audit_lock); | |
1547 | } | |
1548 | #endif /* DHD_PKTID_AUDIT_ENABLED */ | |
1549 | if (map->keys) { | |
1550 | MFREE(osh, map->keys, map_keys_sz); | |
1551 | } | |
1552 | VMFREE(osh, map, dhd_pktid_map_sz); | |
1553 | } | |
1554 | return (dhd_pktid_map_handle_t *)NULL; | |
1555 | } | |
1556 | ||
1557 | /** | |
1558 | * Retrieve all allocated keys and free all <numbered_key, locker>. | |
1559 | * Freeing implies: unmapping the buffers and freeing the native packet | |
1560 | * This could have been a callback registered with the pktid mapper. | |
1561 | */ | |
1562 | static void | |
1563 | dhd_pktid_map_reset(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle) | |
1564 | { | |
1565 | void *osh; | |
1566 | uint32 nkey; | |
1567 | dhd_pktid_map_t *map; | |
1568 | dhd_pktid_item_t *locker; | |
1569 | uint32 map_items; | |
1570 | uint32 flags; | |
1571 | bool data_tx = FALSE; | |
1572 | ||
1573 | map = (dhd_pktid_map_t *)handle; | |
1574 | DHD_GENERAL_LOCK(dhd, flags); | |
1575 | osh = dhd->osh; | |
1576 | ||
1577 | map_items = DHD_PKIDMAP_ITEMS(map->items); | |
1578 | /* skip reserved KEY #0, and start from 1 */ | |
1579 | ||
1580 | for (nkey = 1; nkey <= map_items; nkey++) { | |
1581 | if (map->lockers[nkey].state == LOCKER_IS_BUSY) { | |
1582 | locker = &map->lockers[nkey]; | |
1583 | locker->state = LOCKER_IS_FREE; | |
1584 | data_tx = (locker->pkttype == PKTTYPE_DATA_TX); | |
1585 | if (data_tx) { | |
1586 | dhd->prot->active_tx_count--; | |
1587 | } | |
1588 | ||
1589 | #ifdef DHD_PKTID_AUDIT_RING | |
1590 | DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* duplicate frees */ | |
1591 | #endif /* DHD_PKTID_AUDIT_RING */ | |
1592 | ||
1593 | { | |
1594 | if (SECURE_DMA_ENAB(dhd->osh)) | |
1595 | SECURE_DMA_UNMAP(osh, locker->pa, | |
1596 | locker->len, locker->dir, 0, | |
1597 | locker->dmah, locker->secdma, 0); | |
1598 | else | |
1599 | DMA_UNMAP(osh, locker->pa, locker->len, | |
1600 | locker->dir, 0, locker->dmah); | |
1601 | } | |
1602 | dhd_prot_packet_free(dhd, (ulong*)locker->pkt, | |
1603 | locker->pkttype, data_tx); | |
1604 | } | |
1605 | else { | |
1606 | #ifdef DHD_PKTID_AUDIT_RING | |
1607 | DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE); | |
1608 | #endif /* DHD_PKTID_AUDIT_RING */ | |
1609 | } | |
1610 | map->keys[nkey] = nkey; /* populate with unique keys */ | |
1611 | } | |
1612 | ||
1613 | map->avail = map_items; | |
1614 | memset(&map->lockers[1], 0, sizeof(dhd_pktid_item_t) * map_items); | |
1615 | DHD_GENERAL_UNLOCK(dhd, flags); | |
1616 | } | |
1617 | ||
1618 | #ifdef IOCTLRESP_USE_CONSTMEM | |
1619 | /** Called in detach scenario. Releasing IOCTL buffers. */ | |
1620 | static void | |
1621 | dhd_pktid_map_reset_ioctl(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle) | |
1622 | { | |
1623 | uint32 nkey; | |
1624 | dhd_pktid_map_t *map; | |
1625 | dhd_pktid_item_t *locker; | |
1626 | uint32 map_items; | |
1627 | uint32 flags; | |
1628 | ||
1629 | map = (dhd_pktid_map_t *)handle; | |
1630 | DHD_GENERAL_LOCK(dhd, flags); | |
1631 | ||
1632 | map_items = DHD_PKIDMAP_ITEMS(map->items); | |
1633 | /* skip reserved KEY #0, and start from 1 */ | |
1634 | for (nkey = 1; nkey <= map_items; nkey++) { | |
1635 | if (map->lockers[nkey].state == LOCKER_IS_BUSY) { | |
1636 | dhd_dma_buf_t retbuf; | |
1637 | ||
1638 | #ifdef DHD_PKTID_AUDIT_RING | |
1639 | DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* duplicate frees */ | |
1640 | #endif /* DHD_PKTID_AUDIT_RING */ | |
1641 | ||
1642 | locker = &map->lockers[nkey]; | |
1643 | retbuf.va = locker->pkt; | |
1644 | retbuf.len = locker->len; | |
1645 | retbuf.pa = locker->pa; | |
1646 | retbuf.dmah = locker->dmah; | |
1647 | retbuf.secdma = locker->secdma; | |
1648 | ||
1649 | /* This could be a callback registered with dhd_pktid_map */ | |
1650 | DHD_GENERAL_UNLOCK(dhd, flags); | |
1651 | free_ioctl_return_buffer(dhd, &retbuf); | |
1652 | DHD_GENERAL_LOCK(dhd, flags); | |
1653 | } | |
1654 | else { | |
1655 | #ifdef DHD_PKTID_AUDIT_RING | |
1656 | DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE); | |
1657 | #endif /* DHD_PKTID_AUDIT_RING */ | |
1658 | } | |
1659 | map->keys[nkey] = nkey; /* populate with unique keys */ | |
1660 | } | |
1661 | ||
1662 | map->avail = map_items; | |
1663 | memset(&map->lockers[1], 0, sizeof(dhd_pktid_item_t) * map_items); | |
1664 | DHD_GENERAL_UNLOCK(dhd, flags); | |
1665 | } | |
1666 | #endif /* IOCTLRESP_USE_CONSTMEM */ | |
1667 | ||
1668 | ||
1669 | /** | |
1670 | * Free the pktid map. | |
1671 | */ | |
1672 | static void | |
1673 | dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle) | |
1674 | { | |
1675 | dhd_pktid_map_t *map; | |
1676 | uint32 dhd_pktid_map_sz; | |
1677 | uint32 map_keys_sz; | |
1678 | ||
1679 | /* Free any pending packets */ | |
1680 | dhd_pktid_map_reset(dhd, handle); | |
1681 | ||
1682 | map = (dhd_pktid_map_t *)handle; | |
1683 | dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items); | |
1684 | map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items); | |
1685 | ||
1686 | #if defined(DHD_PKTID_AUDIT_ENABLED) | |
1687 | if (map->pktid_audit != (struct bcm_mwbmap *)NULL) { | |
1688 | bcm_mwbmap_fini(dhd->osh, map->pktid_audit); /* Destruct pktid_audit */ | |
1689 | map->pktid_audit = (struct bcm_mwbmap *)NULL; | |
1690 | if (map->pktid_audit_lock) { | |
1691 | DHD_PKTID_AUDIT_LOCK_DEINIT(dhd->osh, map->pktid_audit_lock); | |
1692 | } | |
1693 | } | |
1694 | #endif /* DHD_PKTID_AUDIT_ENABLED */ | |
1695 | MFREE(dhd->osh, map->keys, map_keys_sz); | |
1696 | VMFREE(dhd->osh, handle, dhd_pktid_map_sz); | |
1697 | } | |
1698 | #ifdef IOCTLRESP_USE_CONSTMEM | |
1699 | static void | |
1700 | dhd_pktid_map_fini_ioctl(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle) | |
1701 | { | |
1702 | dhd_pktid_map_t *map; | |
1703 | uint32 dhd_pktid_map_sz; | |
1704 | uint32 map_keys_sz; | |
1705 | ||
1706 | /* Free any pending packets */ | |
1707 | dhd_pktid_map_reset_ioctl(dhd, handle); | |
1708 | ||
1709 | map = (dhd_pktid_map_t *)handle; | |
1710 | dhd_pktid_map_sz = DHD_PKTID_MAP_SZ(map->items); | |
1711 | map_keys_sz = DHD_PKTIDMAP_KEYS_SZ(map->items); | |
1712 | ||
1713 | #if defined(DHD_PKTID_AUDIT_ENABLED) | |
1714 | if (map->pktid_audit != (struct bcm_mwbmap *)NULL) { | |
1715 | bcm_mwbmap_fini(dhd->osh, map->pktid_audit); /* Destruct pktid_audit */ | |
1716 | map->pktid_audit = (struct bcm_mwbmap *)NULL; | |
1717 | if (map->pktid_audit_lock) { | |
1718 | DHD_PKTID_AUDIT_LOCK_DEINIT(dhd->osh, map->pktid_audit_lock); | |
1719 | } | |
1720 | } | |
1721 | #endif /* DHD_PKTID_AUDIT_ENABLED */ | |
1722 | ||
1723 | MFREE(dhd->osh, map->keys, map_keys_sz); | |
1724 | VMFREE(dhd->osh, handle, dhd_pktid_map_sz); | |
1725 | } | |
1726 | #endif /* IOCTLRESP_USE_CONSTMEM */ | |
1727 | ||
1728 | /** Get the pktid free count */ | |
1729 | static INLINE uint32 BCMFASTPATH | |
1730 | dhd_pktid_map_avail_cnt(dhd_pktid_map_handle_t *handle) | |
1731 | { | |
1732 | dhd_pktid_map_t *map; | |
1733 | uint32 avail; | |
1734 | ||
1735 | ASSERT(handle != NULL); | |
1736 | map = (dhd_pktid_map_t *)handle; | |
1737 | ||
1738 | avail = map->avail; | |
1739 | ||
1740 | return avail; | |
1741 | } | |
1742 | ||
1743 | /** | |
1744 | * dhd_pktid_map_reserve - reserve a unique numbered key. Reserved locker is not | |
1745 | * yet populated. Invoke the pktid save api to populate the packet parameters | |
1746 | * into the locker. This function is not reentrant, and is the caller's | |
1747 | * responsibility. Caller must treat a returned value DHD_PKTID_INVALID as | |
1748 | * a failure case, implying a depleted pool of pktids. | |
1749 | */ | |
1750 | static INLINE uint32 | |
1751 | dhd_pktid_map_reserve(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, | |
1752 | void *pkt, dhd_pkttype_t pkttype) | |
1753 | { | |
1754 | uint32 nkey; | |
1755 | dhd_pktid_map_t *map; | |
1756 | dhd_pktid_item_t *locker; | |
1757 | ||
1758 | ASSERT(handle != NULL); | |
1759 | map = (dhd_pktid_map_t *)handle; | |
1760 | ||
1761 | if ((int)(map->avail) <= 0) { /* no more pktids to allocate */ | |
1762 | map->failures++; | |
1763 | DHD_INFO(("%s:%d: failed, no free keys\n", __FUNCTION__, __LINE__)); | |
1764 | return DHD_PKTID_INVALID; /* failed alloc request */ | |
1765 | } | |
1766 | ||
1767 | ASSERT(map->avail <= map->items); | |
1768 | nkey = map->keys[map->avail]; /* fetch a free locker, pop stack */ | |
1769 | ||
1770 | if ((map->avail > map->items) || (nkey > map->items)) { | |
1771 | map->failures++; | |
1772 | DHD_ERROR(("%s:%d: failed to allocate a new pktid," | |
1773 | " map->avail<%u>, nkey<%u>, pkttype<%u>\n", | |
1774 | __FUNCTION__, __LINE__, map->avail, nkey, | |
1775 | pkttype)); | |
1776 | return DHD_PKTID_INVALID; /* failed alloc request */ | |
1777 | } | |
1778 | ||
1779 | locker = &map->lockers[nkey]; /* save packet metadata in locker */ | |
1780 | map->avail--; | |
1781 | locker->pkt = pkt; /* pkt is saved, other params not yet saved. */ | |
1782 | locker->len = 0; | |
1783 | locker->state = LOCKER_IS_BUSY; /* reserve this locker */ | |
1784 | ||
1785 | ASSERT(nkey != DHD_PKTID_INVALID); | |
1786 | return nkey; /* return locker's numbered key */ | |
1787 | } | |
1788 | ||
1789 | /* | |
1790 | * dhd_pktid_map_save - Save a packet's parameters into a locker | |
1791 | * corresponding to a previously reserved unique numbered key. | |
1792 | */ | |
1793 | static INLINE void | |
1794 | dhd_pktid_map_save(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt, | |
1795 | uint32 nkey, dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma, | |
1796 | dhd_pkttype_t pkttype) | |
1797 | { | |
1798 | dhd_pktid_map_t *map; | |
1799 | dhd_pktid_item_t *locker; | |
1800 | ||
1801 | ASSERT(handle != NULL); | |
1802 | map = (dhd_pktid_map_t *)handle; | |
1803 | ||
1804 | if ((nkey == DHD_PKTID_INVALID) || (nkey > DHD_PKIDMAP_ITEMS(map->items))) { | |
1805 | DHD_ERROR(("%s:%d: Error! saving invalid pktid<%u> pkttype<%u>\n", | |
1806 | __FUNCTION__, __LINE__, nkey, pkttype)); | |
1807 | #ifdef DHD_FW_COREDUMP | |
1808 | if (dhd->memdump_enabled) { | |
1809 | /* collect core dump */ | |
1810 | dhd->memdump_type = DUMP_TYPE_PKTID_INVALID; | |
1811 | dhd_bus_mem_dump(dhd); | |
1812 | } | |
1813 | #else | |
1814 | ASSERT(0); | |
1815 | #endif /* DHD_FW_COREDUMP */ | |
1816 | return; | |
1817 | } | |
1818 | ||
1819 | locker = &map->lockers[nkey]; | |
1820 | ||
1821 | ASSERT(((locker->state == LOCKER_IS_BUSY) && (locker->pkt == pkt)) || | |
1822 | ((locker->state == LOCKER_IS_RSVD) && (locker->pkt == NULL))); | |
1823 | ||
1824 | /* store contents in locker */ | |
1825 | locker->dir = dir; | |
1826 | locker->pa = pa; | |
1827 | locker->len = (uint16)len; /* 16bit len */ | |
1828 | locker->dmah = dmah; /* 16bit len */ | |
1829 | locker->secdma = secdma; | |
1830 | locker->pkttype = pkttype; | |
1831 | locker->pkt = pkt; | |
1832 | locker->state = LOCKER_IS_BUSY; /* make this locker busy */ | |
1833 | } | |
1834 | ||
1835 | /** | |
1836 | * dhd_pktid_map_alloc - Allocate a unique numbered key and save the packet | |
1837 | * contents into the corresponding locker. Return the numbered key. | |
1838 | */ | |
1839 | static uint32 BCMFASTPATH | |
1840 | dhd_pktid_map_alloc(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, void *pkt, | |
1841 | dmaaddr_t pa, uint32 len, uint8 dir, void *dmah, void *secdma, | |
1842 | dhd_pkttype_t pkttype) | |
1843 | { | |
1844 | uint32 nkey; | |
1845 | ||
1846 | nkey = dhd_pktid_map_reserve(dhd, handle, pkt, pkttype); | |
1847 | if (nkey != DHD_PKTID_INVALID) { | |
1848 | dhd_pktid_map_save(dhd, handle, pkt, nkey, pa, | |
1849 | len, dir, dmah, secdma, pkttype); | |
1850 | } | |
1851 | ||
1852 | return nkey; | |
1853 | } | |
1854 | ||
1855 | /** | |
1856 | * dhd_pktid_map_free - Given a numbered key, return the locker contents. | |
1857 | * dhd_pktid_map_free() is not reentrant, and is the caller's responsibility. | |
1858 | * Caller may not free a pktid value DHD_PKTID_INVALID or an arbitrary pktid | |
1859 | * value. Only a previously allocated pktid may be freed. | |
1860 | */ | |
1861 | static void * BCMFASTPATH | |
1862 | dhd_pktid_map_free(dhd_pub_t *dhd, dhd_pktid_map_handle_t *handle, uint32 nkey, | |
1863 | dmaaddr_t *pa, uint32 *len, void **dmah, void **secdma, dhd_pkttype_t pkttype, | |
1864 | bool rsv_locker) | |
1865 | { | |
1866 | dhd_pktid_map_t *map; | |
1867 | dhd_pktid_item_t *locker; | |
1868 | void * pkt; | |
1869 | unsigned long long locker_addr; | |
1870 | ||
1871 | ASSERT(handle != NULL); | |
1872 | ||
1873 | map = (dhd_pktid_map_t *)handle; | |
1874 | ||
1875 | if ((nkey == DHD_PKTID_INVALID) || (nkey > DHD_PKIDMAP_ITEMS(map->items))) { | |
1876 | DHD_ERROR(("%s:%d: Error! Try to free invalid pktid<%u>, pkttype<%d>\n", | |
1877 | __FUNCTION__, __LINE__, nkey, pkttype)); | |
1878 | #ifdef DHD_FW_COREDUMP | |
1879 | if (dhd->memdump_enabled) { | |
1880 | /* collect core dump */ | |
1881 | dhd->memdump_type = DUMP_TYPE_PKTID_INVALID; | |
1882 | dhd_bus_mem_dump(dhd); | |
1883 | } | |
1884 | #else | |
1885 | ASSERT(0); | |
1886 | #endif /* DHD_FW_COREDUMP */ | |
1887 | return NULL; | |
1888 | } | |
1889 | ||
1890 | locker = &map->lockers[nkey]; | |
1891 | ||
1892 | #if defined(DHD_PKTID_AUDIT_MAP) | |
1893 | DHD_PKTID_AUDIT(dhd, map, nkey, DHD_DUPLICATE_FREE); /* Audit duplicate FREE */ | |
1894 | #endif /* DHD_PKTID_AUDIT_MAP */ | |
1895 | ||
1896 | /* Debug check for cloned numbered key */ | |
1897 | if (locker->state == LOCKER_IS_FREE) { | |
1898 | DHD_ERROR(("%s:%d: Error! freeing already freed invalid pktid<%u>\n", | |
1899 | __FUNCTION__, __LINE__, nkey)); | |
1900 | #ifdef DHD_FW_COREDUMP | |
1901 | if (dhd->memdump_enabled) { | |
1902 | /* collect core dump */ | |
1903 | dhd->memdump_type = DUMP_TYPE_PKTID_INVALID; | |
1904 | dhd_bus_mem_dump(dhd); | |
1905 | } | |
1906 | #else | |
1907 | ASSERT(0); | |
1908 | #endif /* DHD_FW_COREDUMP */ | |
1909 | return NULL; | |
1910 | } | |
1911 | ||
1912 | /* Check for the colour of the buffer i.e The buffer posted for TX, | |
1913 | * should be freed for TX completion. Similarly the buffer posted for | |
1914 | * IOCTL should be freed for IOCT completion etc. | |
1915 | */ | |
1916 | if ((pkttype != PKTTYPE_NO_CHECK) && (locker->pkttype != pkttype)) { | |
1917 | ||
1918 | DHD_ERROR(("%s:%d: Error! Invalid Buffer Free for pktid<%u> \n", | |
1919 | __FUNCTION__, __LINE__, nkey)); | |
1920 | #ifdef BCMDMA64OSL | |
1921 | PHYSADDRTOULONG(locker->pa, locker_addr); | |
1922 | #else | |
1923 | locker_addr = PHYSADDRLO(locker->pa); | |
1924 | #endif /* BCMDMA64OSL */ | |
1925 | DHD_ERROR(("%s:%d: locker->state <%d>, locker->pkttype <%d>," | |
1926 | "pkttype <%d> locker->pa <0x%llx> \n", | |
1927 | __FUNCTION__, __LINE__, locker->state, locker->pkttype, | |
1928 | pkttype, locker_addr)); | |
1929 | #ifdef DHD_FW_COREDUMP | |
1930 | if (dhd->memdump_enabled) { | |
1931 | /* collect core dump */ | |
1932 | dhd->memdump_type = DUMP_TYPE_PKTID_INVALID; | |
1933 | dhd_bus_mem_dump(dhd); | |
1934 | } | |
1935 | #else | |
1936 | ASSERT(0); | |
1937 | #endif /* DHD_FW_COREDUMP */ | |
1938 | return NULL; | |
1939 | } | |
1940 | ||
1941 | if (rsv_locker == DHD_PKTID_FREE_LOCKER) { | |
1942 | map->avail++; | |
1943 | map->keys[map->avail] = nkey; /* make this numbered key available */ | |
1944 | locker->state = LOCKER_IS_FREE; /* open and free Locker */ | |
1945 | } else { | |
1946 | /* pktid will be reused, but the locker does not have a valid pkt */ | |
1947 | locker->state = LOCKER_IS_RSVD; | |
1948 | } | |
1949 | ||
1950 | #if defined(DHD_PKTID_AUDIT_MAP) | |
1951 | DHD_PKTID_AUDIT(dhd, map, nkey, DHD_TEST_IS_FREE); | |
1952 | #endif /* DHD_PKTID_AUDIT_MAP */ | |
1953 | ||
1954 | *pa = locker->pa; /* return contents of locker */ | |
1955 | *len = (uint32)locker->len; | |
1956 | *dmah = locker->dmah; | |
1957 | *secdma = locker->secdma; | |
1958 | ||
1959 | pkt = locker->pkt; | |
1960 | locker->pkt = NULL; /* Clear pkt */ | |
1961 | locker->len = 0; | |
1962 | ||
1963 | return pkt; | |
1964 | } | |
1965 | ||
1966 | #else /* ! DHD_PCIE_PKTID */ | |
1967 | ||
1968 | ||
1969 | typedef struct pktlist { | |
1970 | PKT_LIST *tx_pkt_list; /* list for tx packets */ | |
1971 | PKT_LIST *rx_pkt_list; /* list for rx packets */ | |
1972 | PKT_LIST *ctrl_pkt_list; /* list for ioctl/event buf post */ | |
1973 | } pktlists_t; | |
1974 | ||
1975 | /* | |
1976 | * Given that each workitem only uses a 32bit pktid, only 32bit hosts may avail | |
1977 | * of a one to one mapping 32bit pktptr and a 32bit pktid. | |
1978 | * | |
1979 | * - When PKTIDMAP is not used, DHD_NATIVE_TO_PKTID variants will never fail. | |
1980 | * - Neither DHD_NATIVE_TO_PKTID nor DHD_PKTID_TO_NATIVE need to be protected by | |
1981 | * a lock. | |
1982 | * - Hence DHD_PKTID_INVALID is not defined when DHD_PCIE_PKTID is undefined. | |
1983 | */ | |
1984 | #define DHD_PKTID32(pktptr32) ((uint32)(pktptr32)) | |
1985 | #define DHD_PKTPTR32(pktid32) ((void *)(pktid32)) | |
1986 | ||
1987 | ||
1988 | static INLINE uint32 dhd_native_to_pktid(dhd_pktid_map_handle_t *map, void *pktptr32, | |
1989 | dmaaddr_t pa, uint32 dma_len, void *dmah, void *secdma, | |
1990 | dhd_pkttype_t pkttype); | |
1991 | static INLINE void * dhd_pktid_to_native(dhd_pktid_map_handle_t *map, uint32 pktid32, | |
1992 | dmaaddr_t *pa, uint32 *dma_len, void **dmah, void **secdma, | |
1993 | dhd_pkttype_t pkttype); | |
1994 | ||
1995 | static dhd_pktid_map_handle_t * | |
1996 | dhd_pktid_map_init(dhd_pub_t *dhd, uint32 num_items) | |
1997 | { | |
1998 | osl_t *osh = dhd->osh; | |
1999 | pktlists_t *handle = NULL; | |
2000 | ||
2001 | if ((handle = (pktlists_t *) MALLOCZ(osh, sizeof(pktlists_t))) == NULL) { | |
2002 | DHD_ERROR(("%s:%d: MALLOC failed for lists allocation, size=%d\n", | |
2003 | __FUNCTION__, __LINE__, sizeof(pktlists_t))); | |
2004 | goto error_done; | |
2005 | } | |
2006 | ||
2007 | if ((handle->tx_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) { | |
2008 | DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n", | |
2009 | __FUNCTION__, __LINE__, sizeof(PKT_LIST))); | |
2010 | goto error; | |
2011 | } | |
2012 | ||
2013 | if ((handle->rx_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) { | |
2014 | DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n", | |
2015 | __FUNCTION__, __LINE__, sizeof(PKT_LIST))); | |
2016 | goto error; | |
2017 | } | |
2018 | ||
2019 | if ((handle->ctrl_pkt_list = (PKT_LIST *) MALLOC(osh, sizeof(PKT_LIST))) == NULL) { | |
2020 | DHD_ERROR(("%s:%d: MALLOC failed for list allocation, size=%d\n", | |
2021 | __FUNCTION__, __LINE__, sizeof(PKT_LIST))); | |
2022 | goto error; | |
2023 | } | |
2024 | ||
2025 | PKTLIST_INIT(handle->tx_pkt_list); | |
2026 | PKTLIST_INIT(handle->rx_pkt_list); | |
2027 | PKTLIST_INIT(handle->ctrl_pkt_list); | |
2028 | ||
2029 | return (dhd_pktid_map_handle_t *) handle; | |
2030 | ||
2031 | error: | |
2032 | if (handle->ctrl_pkt_list) { | |
2033 | MFREE(osh, handle->ctrl_pkt_list, sizeof(PKT_LIST)); | |
2034 | } | |
2035 | ||
2036 | if (handle->rx_pkt_list) { | |
2037 | MFREE(osh, handle->rx_pkt_list, sizeof(PKT_LIST)); | |
2038 | } | |
2039 | ||
2040 | if (handle->tx_pkt_list) { | |
2041 | MFREE(osh, handle->tx_pkt_list, sizeof(PKT_LIST)); | |
2042 | } | |
2043 | ||
2044 | if (handle) { | |
2045 | MFREE(osh, handle, sizeof(pktlists_t)); | |
2046 | } | |
2047 | ||
2048 | error_done: | |
2049 | return (dhd_pktid_map_handle_t *)NULL; | |
2050 | } | |
2051 | ||
2052 | static void | |
2053 | dhd_pktid_map_fini(dhd_pub_t *dhd, dhd_pktid_map_handle_t *map) | |
2054 | { | |
2055 | osl_t *osh = dhd->osh; | |
2056 | pktlists_t *handle = (pktlists_t *) map; | |
2057 | ||
2058 | ASSERT(handle != NULL); | |
2059 | if (handle == (pktlists_t *)NULL) | |
2060 | return; | |
2061 | ||
2062 | if (handle->ctrl_pkt_list) { | |
2063 | PKTLIST_FINI(handle->ctrl_pkt_list); | |
2064 | MFREE(osh, handle->ctrl_pkt_list, sizeof(PKT_LIST)); | |
2065 | } | |
2066 | ||
2067 | if (handle->rx_pkt_list) { | |
2068 | PKTLIST_FINI(handle->rx_pkt_list); | |
2069 | MFREE(osh, handle->rx_pkt_list, sizeof(PKT_LIST)); | |
2070 | } | |
2071 | ||
2072 | if (handle->tx_pkt_list) { | |
2073 | PKTLIST_FINI(handle->tx_pkt_list); | |
2074 | MFREE(osh, handle->tx_pkt_list, sizeof(PKT_LIST)); | |
2075 | } | |
2076 | ||
2077 | if (handle) { | |
2078 | MFREE(osh, handle, sizeof(pktlists_t)); | |
2079 | } | |
2080 | } | |
2081 | ||
2082 | /** Save dma parameters into the packet's pkttag and convert a pktptr to pktid */ | |
2083 | static INLINE uint32 | |
2084 | dhd_native_to_pktid(dhd_pktid_map_handle_t *map, void *pktptr32, | |
2085 | dmaaddr_t pa, uint32 dma_len, void *dmah, void *secdma, | |
2086 | dhd_pkttype_t pkttype) | |
2087 | { | |
2088 | pktlists_t *handle = (pktlists_t *) map; | |
2089 | ASSERT(pktptr32 != NULL); | |
2090 | DHD_PKT_SET_DMA_LEN(pktptr32, dma_len); | |
2091 | DHD_PKT_SET_DMAH(pktptr32, dmah); | |
2092 | DHD_PKT_SET_PA(pktptr32, pa); | |
2093 | DHD_PKT_SET_SECDMA(pktptr32, secdma); | |
2094 | ||
2095 | if (pkttype == PKTTYPE_DATA_TX) { | |
2096 | PKTLIST_ENQ(handle->tx_pkt_list, pktptr32); | |
2097 | } else if (pkttype == PKTTYPE_DATA_RX) { | |
2098 | PKTLIST_ENQ(handle->rx_pkt_list, pktptr32); | |
2099 | } else { | |
2100 | PKTLIST_ENQ(handle->ctrl_pkt_list, pktptr32); | |
2101 | } | |
2102 | ||
2103 | return DHD_PKTID32(pktptr32); | |
2104 | } | |
2105 | ||
2106 | /** Convert a pktid to pktptr and retrieve saved dma parameters from packet */ | |
2107 | static INLINE void * | |
2108 | dhd_pktid_to_native(dhd_pktid_map_handle_t *map, uint32 pktid32, | |
2109 | dmaaddr_t *pa, uint32 *dma_len, void **dmah, void **secdma, | |
2110 | dhd_pkttype_t pkttype) | |
2111 | { | |
2112 | pktlists_t *handle = (pktlists_t *) map; | |
2113 | void *pktptr32; | |
2114 | ||
2115 | ASSERT(pktid32 != 0U); | |
2116 | pktptr32 = DHD_PKTPTR32(pktid32); | |
2117 | *dma_len = DHD_PKT_GET_DMA_LEN(pktptr32); | |
2118 | *dmah = DHD_PKT_GET_DMAH(pktptr32); | |
2119 | *pa = DHD_PKT_GET_PA(pktptr32); | |
2120 | *secdma = DHD_PKT_GET_SECDMA(pktptr32); | |
2121 | ||
2122 | if (pkttype == PKTTYPE_DATA_TX) { | |
2123 | PKTLIST_UNLINK(handle->tx_pkt_list, pktptr32); | |
2124 | } else if (pkttype == PKTTYPE_DATA_RX) { | |
2125 | PKTLIST_UNLINK(handle->rx_pkt_list, pktptr32); | |
2126 | } else { | |
2127 | PKTLIST_UNLINK(handle->ctrl_pkt_list, pktptr32); | |
2128 | } | |
2129 | ||
2130 | return pktptr32; | |
2131 | } | |
2132 | ||
2133 | #define DHD_NATIVE_TO_PKTID_RSV(dhd, map, pkt, pkttype) DHD_PKTID32(pkt) | |
2134 | ||
2135 | #define DHD_NATIVE_TO_PKTID_SAVE(dhd, map, pkt, nkey, pa, len, dma_dir, dmah, secdma, pkttype) \ | |
2136 | ({ BCM_REFERENCE(dhd); BCM_REFERENCE(nkey); BCM_REFERENCE(dma_dir); \ | |
2137 | dhd_native_to_pktid((dhd_pktid_map_handle_t *) map, (pkt), (pa), (len), \ | |
2138 | (dmah), (secdma), (dhd_pkttype_t)(pkttype)); \ | |
2139 | }) | |
2140 | ||
2141 | #define DHD_NATIVE_TO_PKTID(dhd, map, pkt, pa, len, dma_dir, dmah, secdma, pkttype) \ | |
2142 | ({ BCM_REFERENCE(dhd); BCM_REFERENCE(dma_dir); \ | |
2143 | dhd_native_to_pktid((dhd_pktid_map_handle_t *) map, (pkt), (pa), (len), \ | |
2144 | (dmah), (secdma), (dhd_pkttype_t)(pkttype)); \ | |
2145 | }) | |
2146 | ||
2147 | #define DHD_PKTID_TO_NATIVE(dhd, map, pktid, pa, len, dmah, secdma, pkttype) \ | |
2148 | ({ BCM_REFERENCE(dhd); BCM_REFERENCE(pkttype); \ | |
2149 | dhd_pktid_to_native((dhd_pktid_map_handle_t *) map, (uint32)(pktid), \ | |
2150 | (dmaaddr_t *)&(pa), (uint32 *)&(len), (void **)&(dmah), \ | |
2151 | (void **)&secdma, (dhd_pkttype_t)(pkttype)); \ | |
2152 | }) | |
2153 | ||
2154 | #define DHD_PKTID_AVAIL(map) (~0) | |
2155 | ||
2156 | #endif /* ! DHD_PCIE_PKTID */ | |
2157 | ||
2158 | /* +------------------ End of PCIE DHD PKTID MAPPER -----------------------+ */ | |
2159 | ||
2160 | ||
2161 | /** | |
2162 | * The PCIE FD protocol layer is constructed in two phases: | |
2163 | * Phase 1. dhd_prot_attach() | |
2164 | * Phase 2. dhd_prot_init() | |
2165 | * | |
2166 | * dhd_prot_attach() - Allocates a dhd_prot_t object and resets all its fields. | |
2167 | * All Common rings are allose attached (msgbuf_ring_t objects are allocated | |
2168 | * with DMA-able buffers). | |
2169 | * All dhd_dma_buf_t objects are also allocated here. | |
2170 | * | |
2171 | * As dhd_prot_attach is invoked prior to the pcie_shared object is read, any | |
2172 | * initialization of objects that requires information advertized by the dongle | |
2173 | * may not be performed here. | |
2174 | * E.g. the number of TxPost flowrings is not know at this point, neither do | |
2175 | * we know shich form of D2H DMA sync mechanism is advertized by the dongle, or | |
2176 | * whether the dongle supports DMA-ing of WR/RD indices for the H2D and/or D2H | |
2177 | * rings (common + flow). | |
2178 | * | |
2179 | * dhd_prot_init() is invoked after the bus layer has fetched the information | |
2180 | * advertized by the dongle in the pcie_shared_t. | |
2181 | */ | |
2182 | int | |
2183 | dhd_prot_attach(dhd_pub_t *dhd) | |
2184 | { | |
2185 | osl_t *osh = dhd->osh; | |
2186 | dhd_prot_t *prot; | |
2187 | ||
2188 | /* Allocate prot structure */ | |
2189 | if (!(prot = (dhd_prot_t *)DHD_OS_PREALLOC(dhd, DHD_PREALLOC_PROT, | |
2190 | sizeof(dhd_prot_t)))) { | |
2191 | DHD_ERROR(("%s: kmalloc failed\n", __FUNCTION__)); | |
2192 | goto fail; | |
2193 | } | |
2194 | memset(prot, 0, sizeof(*prot)); | |
2195 | ||
2196 | prot->osh = osh; | |
2197 | dhd->prot = prot; | |
2198 | ||
2199 | /* DMAing ring completes supported? FALSE by default */ | |
2200 | dhd->dma_d2h_ring_upd_support = FALSE; | |
2201 | dhd->dma_h2d_ring_upd_support = FALSE; | |
2202 | dhd->dma_ring_upd_overwrite = FALSE; | |
2203 | ||
2204 | dhd->idma_inited = 0; | |
2205 | dhd->ifrm_inited = 0; | |
2206 | ||
2207 | /* Common Ring Allocations */ | |
2208 | ||
2209 | /* Ring 0: H2D Control Submission */ | |
2210 | if (dhd_prot_ring_attach(dhd, &prot->h2dring_ctrl_subn, "h2dctrl", | |
2211 | H2DRING_CTRL_SUB_MAX_ITEM, H2DRING_CTRL_SUB_ITEMSIZE, | |
2212 | BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT) != BCME_OK) { | |
2213 | DHD_ERROR(("%s: dhd_prot_ring_attach H2D Ctrl Submission failed\n", | |
2214 | __FUNCTION__)); | |
2215 | goto fail; | |
2216 | } | |
2217 | ||
2218 | /* Ring 1: H2D Receive Buffer Post */ | |
2219 | if (dhd_prot_ring_attach(dhd, &prot->h2dring_rxp_subn, "h2drxp", | |
2220 | H2DRING_RXPOST_MAX_ITEM, H2DRING_RXPOST_ITEMSIZE, | |
2221 | BCMPCIE_H2D_MSGRING_RXPOST_SUBMIT) != BCME_OK) { | |
2222 | DHD_ERROR(("%s: dhd_prot_ring_attach H2D RxPost failed\n", | |
2223 | __FUNCTION__)); | |
2224 | goto fail; | |
2225 | } | |
2226 | ||
2227 | /* Ring 2: D2H Control Completion */ | |
2228 | if (dhd_prot_ring_attach(dhd, &prot->d2hring_ctrl_cpln, "d2hctrl", | |
2229 | D2HRING_CTRL_CMPLT_MAX_ITEM, D2HRING_CTRL_CMPLT_ITEMSIZE, | |
2230 | BCMPCIE_D2H_MSGRING_CONTROL_COMPLETE) != BCME_OK) { | |
2231 | DHD_ERROR(("%s: dhd_prot_ring_attach D2H Ctrl Completion failed\n", | |
2232 | __FUNCTION__)); | |
2233 | goto fail; | |
2234 | } | |
2235 | ||
2236 | /* Ring 3: D2H Transmit Complete */ | |
2237 | if (dhd_prot_ring_attach(dhd, &prot->d2hring_tx_cpln, "d2htxcpl", | |
2238 | D2HRING_TXCMPLT_MAX_ITEM, D2HRING_TXCMPLT_ITEMSIZE, | |
2239 | BCMPCIE_D2H_MSGRING_TX_COMPLETE) != BCME_OK) { | |
2240 | DHD_ERROR(("%s: dhd_prot_ring_attach D2H Tx Completion failed\n", | |
2241 | __FUNCTION__)); | |
2242 | goto fail; | |
2243 | ||
2244 | } | |
2245 | ||
2246 | /* Ring 4: D2H Receive Complete */ | |
2247 | if (dhd_prot_ring_attach(dhd, &prot->d2hring_rx_cpln, "d2hrxcpl", | |
2248 | D2HRING_RXCMPLT_MAX_ITEM, D2HRING_RXCMPLT_ITEMSIZE, | |
2249 | BCMPCIE_D2H_MSGRING_RX_COMPLETE) != BCME_OK) { | |
2250 | DHD_ERROR(("%s: dhd_prot_ring_attach D2H Rx Completion failed\n", | |
2251 | __FUNCTION__)); | |
2252 | goto fail; | |
2253 | ||
2254 | } | |
2255 | ||
2256 | /* | |
2257 | * Max number of flowrings is not yet known. msgbuf_ring_t with DMA-able | |
2258 | * buffers for flowrings will be instantiated, in dhd_prot_init() . | |
2259 | * See dhd_prot_flowrings_pool_attach() | |
2260 | */ | |
2261 | /* ioctl response buffer */ | |
2262 | if (dhd_dma_buf_alloc(dhd, &prot->retbuf, IOCT_RETBUF_SIZE)) { | |
2263 | goto fail; | |
2264 | } | |
2265 | ||
2266 | /* IOCTL request buffer */ | |
2267 | if (dhd_dma_buf_alloc(dhd, &prot->ioctbuf, IOCT_RETBUF_SIZE)) { | |
2268 | goto fail; | |
2269 | } | |
2270 | ||
2271 | /* Host TS request buffer one buffer for now */ | |
2272 | if (dhd_dma_buf_alloc(dhd, &prot->hostts_req_buf, CTRLSUB_HOSTTS_MEESAGE_SIZE)) { | |
2273 | goto fail; | |
2274 | } | |
2275 | prot->hostts_req_buf_inuse = FALSE; | |
2276 | ||
2277 | /* Scratch buffer for dma rx offset */ | |
2278 | #ifdef BCM_HOST_BUF | |
2279 | if (dhd_dma_buf_alloc(dhd, &prot->d2h_dma_scratch_buf, | |
2280 | ROUNDUP(DMA_D2H_SCRATCH_BUF_LEN, 16) + DMA_HOST_BUFFER_LEN)) { | |
2281 | #else | |
2282 | if (dhd_dma_buf_alloc(dhd, &prot->d2h_dma_scratch_buf, DMA_D2H_SCRATCH_BUF_LEN)) { | |
2283 | ||
2284 | #endif /* BCM_HOST_BUF */ | |
2285 | goto fail; | |
2286 | } | |
2287 | ||
2288 | /* scratch buffer bus throughput measurement */ | |
2289 | if (dhd_dma_buf_alloc(dhd, &prot->host_bus_throughput_buf, DHD_BUS_TPUT_BUF_LEN)) { | |
2290 | goto fail; | |
2291 | } | |
2292 | ||
2293 | #ifdef DHD_RX_CHAINING | |
2294 | dhd_rxchain_reset(&prot->rxchain); | |
2295 | #endif | |
2296 | ||
2297 | prot->rx_lock = dhd_os_spin_lock_init(dhd->osh); | |
2298 | ||
2299 | prot->pktid_ctrl_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_CTRL_PKTID); | |
2300 | if (prot->pktid_ctrl_map == NULL) { | |
2301 | goto fail; | |
2302 | } | |
2303 | ||
2304 | prot->pktid_rx_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_RX_PKTID); | |
2305 | if (prot->pktid_rx_map == NULL) | |
2306 | goto fail; | |
2307 | ||
2308 | prot->pktid_tx_map = DHD_NATIVE_TO_PKTID_INIT(dhd, MAX_TX_PKTID); | |
2309 | if (prot->pktid_rx_map == NULL) | |
2310 | goto fail; | |
2311 | ||
2312 | #ifdef IOCTLRESP_USE_CONSTMEM | |
2313 | prot->pktid_map_handle_ioctl = DHD_NATIVE_TO_PKTID_INIT(dhd, | |
2314 | DHD_FLOWRING_MAX_IOCTLRESPBUF_POST); | |
2315 | if (prot->pktid_map_handle_ioctl == NULL) { | |
2316 | goto fail; | |
2317 | } | |
2318 | #endif /* IOCTLRESP_USE_CONSTMEM */ | |
2319 | ||
2320 | /* Initialize the work queues to be used by the Load Balancing logic */ | |
2321 | #if defined(DHD_LB_TXC) | |
2322 | { | |
2323 | void *buffer; | |
2324 | buffer = MALLOC(dhd->osh, sizeof(void*) * DHD_LB_WORKQ_SZ); | |
2325 | bcm_workq_init(&prot->tx_compl_prod, &prot->tx_compl_cons, | |
2326 | buffer, DHD_LB_WORKQ_SZ); | |
2327 | prot->tx_compl_prod_sync = 0; | |
2328 | DHD_INFO(("%s: created tx_compl_workq <%p,%d>\n", | |
2329 | __FUNCTION__, buffer, DHD_LB_WORKQ_SZ)); | |
2330 | } | |
2331 | #endif /* DHD_LB_TXC */ | |
2332 | ||
2333 | #if defined(DHD_LB_RXC) | |
2334 | { | |
2335 | void *buffer; | |
2336 | buffer = MALLOC(dhd->osh, sizeof(void*) * DHD_LB_WORKQ_SZ); | |
2337 | bcm_workq_init(&prot->rx_compl_prod, &prot->rx_compl_cons, | |
2338 | buffer, DHD_LB_WORKQ_SZ); | |
2339 | prot->rx_compl_prod_sync = 0; | |
2340 | DHD_INFO(("%s: created rx_compl_workq <%p,%d>\n", | |
2341 | __FUNCTION__, buffer, DHD_LB_WORKQ_SZ)); | |
2342 | } | |
2343 | #endif /* DHD_LB_RXC */ | |
2344 | /* Initialize trap buffer */ | |
2345 | if (dhd_dma_buf_alloc(dhd, &dhd->prot->fw_trap_buf, BCMPCIE_EXT_TRAP_DATA_MAXLEN)) { | |
2346 | DHD_ERROR(("%s: dhd_init_trap_buffer falied\n", __FUNCTION__)); | |
2347 | goto fail; | |
2348 | } | |
2349 | ||
2350 | return BCME_OK; | |
2351 | ||
2352 | fail: | |
2353 | ||
2354 | #ifndef CONFIG_DHD_USE_STATIC_BUF | |
2355 | if (prot != NULL) { | |
2356 | dhd_prot_detach(dhd); | |
2357 | } | |
2358 | #endif /* CONFIG_DHD_USE_STATIC_BUF */ | |
2359 | ||
2360 | return BCME_NOMEM; | |
2361 | } /* dhd_prot_attach */ | |
2362 | ||
2363 | void | |
2364 | dhd_set_host_cap(dhd_pub_t *dhd) | |
2365 | { | |
2366 | uint32 data = 0; | |
2367 | dhd_prot_t *prot = dhd->prot; | |
2368 | ||
2369 | if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6) { | |
2370 | if (dhd->h2d_phase_supported) { | |
2371 | ||
2372 | data |= HOSTCAP_H2D_VALID_PHASE; | |
2373 | ||
2374 | if (dhd->force_dongletrap_on_bad_h2d_phase) { | |
2375 | data |= HOSTCAP_H2D_ENABLE_TRAP_ON_BADPHASE; | |
2376 | } | |
2377 | } | |
2378 | if (prot->host_ipc_version > prot->device_ipc_version) { | |
2379 | prot->active_ipc_version = prot->device_ipc_version; | |
2380 | } else { | |
2381 | prot->active_ipc_version = prot->host_ipc_version; | |
2382 | } | |
2383 | ||
2384 | data |= prot->active_ipc_version; | |
2385 | ||
2386 | if (dhdpcie_bus_get_pcie_hostready_supported(dhd->bus)) { | |
2387 | ||
2388 | DHD_INFO(("Advertise Hostready Capability\n")); | |
2389 | ||
2390 | data |= HOSTCAP_H2D_ENABLE_HOSTRDY; | |
2391 | } | |
2392 | #ifdef PCIE_INB_DW | |
2393 | if (dhdpcie_bus_get_pcie_inband_dw_supported(dhd->bus)) { | |
2394 | DHD_INFO(("Advertise Inband-DW Capability\n")); | |
2395 | data |= HOSTCAP_DS_INBAND_DW; | |
2396 | data |= HOSTCAP_DS_NO_OOB_DW; | |
2397 | dhdpcie_bus_enab_pcie_dw(dhd->bus, DEVICE_WAKE_INB); | |
2398 | } else | |
2399 | #endif /* PCIE_INB_DW */ | |
2400 | #ifdef PCIE_OOB | |
2401 | if (dhdpcie_bus_get_pcie_oob_dw_supported(dhd->bus)) { | |
2402 | dhdpcie_bus_enab_pcie_dw(dhd->bus, DEVICE_WAKE_OOB); | |
2403 | } else | |
2404 | #endif /* PCIE_OOB */ | |
2405 | { | |
2406 | /* Disable DS altogether */ | |
2407 | data |= HOSTCAP_DS_NO_OOB_DW; | |
2408 | dhdpcie_bus_enab_pcie_dw(dhd->bus, DEVICE_WAKE_NONE); | |
2409 | } | |
2410 | ||
2411 | if (dhdpcie_bus_get_pcie_idma_supported(dhd->bus)) { | |
2412 | ||
2413 | DHD_ERROR(("IDMA inited\n")); | |
2414 | data |= HOSTCAP_H2D_IDMA; | |
2415 | dhd->idma_inited = TRUE; | |
2416 | } | |
2417 | ||
2418 | if (dhdpcie_bus_get_pcie_ifrm_supported(dhd->bus)) { | |
2419 | DHD_ERROR(("IFRM Inited\n")); | |
2420 | data |= HOSTCAP_H2D_IFRM; | |
2421 | dhd->ifrm_inited = TRUE; | |
2422 | dhd->dma_h2d_ring_upd_support = FALSE; | |
2423 | dhd_prot_dma_indx_free(dhd); | |
2424 | } | |
2425 | ||
2426 | /* Indicate support for TX status metadata */ | |
2427 | data |= HOSTCAP_TXSTATUS_METADATA; | |
2428 | ||
2429 | /* Indicate support for extended trap data */ | |
2430 | data |= HOSTCAP_EXTENDED_TRAP_DATA; | |
2431 | ||
2432 | DHD_INFO(("%s:Active Ver:%d, Host Ver:%d, FW Ver:%d\n", | |
2433 | __FUNCTION__, | |
2434 | prot->active_ipc_version, prot->host_ipc_version, | |
2435 | prot->device_ipc_version)); | |
2436 | ||
2437 | dhd_bus_cmn_writeshared(dhd->bus, &data, sizeof(uint32), HOST_API_VERSION, 0); | |
2438 | dhd_bus_cmn_writeshared(dhd->bus, &prot->fw_trap_buf.pa, | |
2439 | sizeof(prot->fw_trap_buf.pa), DNGL_TO_HOST_TRAP_ADDR, 0); | |
2440 | } | |
2441 | #ifdef HOFFLOAD_MODULES | |
2442 | dhd_bus_cmn_writeshared(dhd->bus, &dhd->hmem.data_addr, | |
2443 | sizeof(dhd->hmem.data_addr), WRT_HOST_MODULE_ADDR, 0); | |
2444 | #endif | |
2445 | ||
2446 | #ifdef DHD_TIMESYNC | |
2447 | dhd_timesync_notify_ipc_rev(dhd->ts, prot->active_ipc_version); | |
2448 | #endif /* DHD_TIMESYNC */ | |
2449 | } | |
2450 | ||
2451 | /** | |
2452 | * dhd_prot_init - second stage of dhd_prot_attach. Now that the dongle has | |
2453 | * completed it's initialization of the pcie_shared structure, we may now fetch | |
2454 | * the dongle advertized features and adjust the protocol layer accordingly. | |
2455 | * | |
2456 | * dhd_prot_init() may be invoked again after a dhd_prot_reset(). | |
2457 | */ | |
2458 | int | |
2459 | dhd_prot_init(dhd_pub_t *dhd) | |
2460 | { | |
2461 | sh_addr_t base_addr; | |
2462 | dhd_prot_t *prot = dhd->prot; | |
2463 | int ret = 0; | |
2464 | ||
2465 | /** | |
2466 | * A user defined value can be assigned to global variable h2d_max_txpost via | |
2467 | * 1. DHD IOVAR h2d_max_txpost, before firmware download | |
2468 | * 2. module parameter h2d_max_txpost | |
2469 | * prot->h2d_max_txpost is assigned with H2DRING_TXPOST_MAX_ITEM, | |
2470 | * if user has not defined any buffers by one of the above methods. | |
2471 | */ | |
2472 | prot->h2d_max_txpost = (uint16)h2d_max_txpost; | |
2473 | ||
2474 | DHD_ERROR(("%s:%d: h2d_max_txpost = %d\n", __FUNCTION__, __LINE__, prot->h2d_max_txpost)); | |
2475 | ||
2476 | /* Read max rx packets supported by dongle */ | |
2477 | dhd_bus_cmn_readshared(dhd->bus, &prot->max_rxbufpost, MAX_HOST_RXBUFS, 0); | |
2478 | if (prot->max_rxbufpost == 0) { | |
2479 | /* This would happen if the dongle firmware is not */ | |
2480 | /* using the latest shared structure template */ | |
2481 | prot->max_rxbufpost = DEFAULT_RX_BUFFERS_TO_POST; | |
2482 | } | |
2483 | DHD_INFO(("%s:%d: MAX_RXBUFPOST = %d\n", __FUNCTION__, __LINE__, prot->max_rxbufpost)); | |
2484 | ||
2485 | /* Initialize. bzero() would blow away the dma pointers. */ | |
2486 | prot->max_eventbufpost = DHD_FLOWRING_MAX_EVENTBUF_POST; | |
2487 | prot->max_ioctlrespbufpost = DHD_FLOWRING_MAX_IOCTLRESPBUF_POST; | |
2488 | prot->max_infobufpost = DHD_H2D_INFORING_MAX_BUF_POST; | |
2489 | prot->max_tsbufpost = DHD_MAX_TSBUF_POST; | |
2490 | ||
2491 | prot->cur_ioctlresp_bufs_posted = 0; | |
2492 | prot->active_tx_count = 0; | |
2493 | prot->data_seq_no = 0; | |
2494 | prot->ioctl_seq_no = 0; | |
2495 | prot->rxbufpost = 0; | |
2496 | prot->cur_event_bufs_posted = 0; | |
2497 | prot->ioctl_state = 0; | |
2498 | prot->curr_ioctl_cmd = 0; | |
2499 | prot->cur_ts_bufs_posted = 0; | |
2500 | prot->infobufpost = 0; | |
2501 | ||
2502 | prot->dmaxfer.srcmem.va = NULL; | |
2503 | prot->dmaxfer.dstmem.va = NULL; | |
2504 | prot->dmaxfer.in_progress = FALSE; | |
2505 | ||
2506 | prot->metadata_dbg = FALSE; | |
2507 | prot->rx_metadata_offset = 0; | |
2508 | prot->tx_metadata_offset = 0; | |
2509 | prot->txp_threshold = TXP_FLUSH_MAX_ITEMS_FLUSH_CNT; | |
2510 | ||
2511 | /* To catch any rollover issues fast, starting with higher ioctl_trans_id */ | |
2512 | prot->ioctl_trans_id = MAXBITVAL(NBITS(prot->ioctl_trans_id)) - BUFFER_BEFORE_ROLLOVER; | |
2513 | prot->ioctl_state = 0; | |
2514 | prot->ioctl_status = 0; | |
2515 | prot->ioctl_resplen = 0; | |
2516 | prot->ioctl_received = IOCTL_WAIT; | |
2517 | ||
2518 | /* Register the interrupt function upfront */ | |
2519 | /* remove corerev checks in data path */ | |
2520 | prot->mb_ring_fn = dhd_bus_get_mbintr_fn(dhd->bus); | |
2521 | ||
2522 | prot->mb_2_ring_fn = dhd_bus_get_mbintr_2_fn(dhd->bus); | |
2523 | ||
2524 | /* Initialize Common MsgBuf Rings */ | |
2525 | ||
2526 | prot->device_ipc_version = dhd->bus->api.fw_rev; | |
2527 | prot->host_ipc_version = PCIE_SHARED_VERSION; | |
2528 | ||
2529 | /* Init the host API version */ | |
2530 | dhd_set_host_cap(dhd); | |
2531 | ||
2532 | dhd_prot_ring_init(dhd, &prot->h2dring_ctrl_subn); | |
2533 | dhd_prot_ring_init(dhd, &prot->h2dring_rxp_subn); | |
2534 | dhd_prot_ring_init(dhd, &prot->d2hring_ctrl_cpln); | |
2535 | ||
2536 | /* Make it compatibile with pre-rev7 Firmware */ | |
2537 | if (prot->active_ipc_version < PCIE_SHARED_VERSION_7) { | |
2538 | prot->d2hring_tx_cpln.item_len = | |
2539 | D2HRING_TXCMPLT_ITEMSIZE_PREREV7; | |
2540 | prot->d2hring_rx_cpln.item_len = | |
2541 | D2HRING_RXCMPLT_ITEMSIZE_PREREV7; | |
2542 | } | |
2543 | dhd_prot_ring_init(dhd, &prot->d2hring_tx_cpln); | |
2544 | dhd_prot_ring_init(dhd, &prot->d2hring_rx_cpln); | |
2545 | ||
2546 | dhd_prot_d2h_sync_init(dhd); | |
2547 | ||
2548 | dhd_prot_h2d_sync_init(dhd); | |
2549 | ||
2550 | #ifdef PCIE_INB_DW | |
2551 | /* Set the initial DS state */ | |
2552 | if (INBAND_DW_ENAB(dhd->bus)) { | |
2553 | dhdpcie_bus_set_pcie_inband_dw_state(dhd->bus, | |
2554 | DW_DEVICE_DS_ACTIVE); | |
2555 | } | |
2556 | #endif /* PCIE_INB_DW */ | |
2557 | ||
2558 | /* init the scratch buffer */ | |
2559 | dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_scratch_buf.pa); | |
2560 | dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr), | |
2561 | D2H_DMA_SCRATCH_BUF, 0); | |
2562 | dhd_bus_cmn_writeshared(dhd->bus, &prot->d2h_dma_scratch_buf.len, | |
2563 | sizeof(prot->d2h_dma_scratch_buf.len), D2H_DMA_SCRATCH_BUF_LEN, 0); | |
2564 | ||
2565 | /* If supported by the host, indicate the memory block | |
2566 | * for completion writes / submission reads to shared space | |
2567 | */ | |
2568 | if (dhd->dma_d2h_ring_upd_support) { | |
2569 | dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_indx_wr_buf.pa); | |
2570 | dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr), | |
2571 | D2H_DMA_INDX_WR_BUF, 0); | |
2572 | dhd_base_addr_htolpa(&base_addr, prot->h2d_dma_indx_rd_buf.pa); | |
2573 | dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr), | |
2574 | H2D_DMA_INDX_RD_BUF, 0); | |
2575 | } | |
2576 | ||
2577 | if (dhd->dma_h2d_ring_upd_support || IDMA_ENAB(dhd)) { | |
2578 | dhd_base_addr_htolpa(&base_addr, prot->h2d_dma_indx_wr_buf.pa); | |
2579 | dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr), | |
2580 | H2D_DMA_INDX_WR_BUF, 0); | |
2581 | dhd_base_addr_htolpa(&base_addr, prot->d2h_dma_indx_rd_buf.pa); | |
2582 | dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr), | |
2583 | D2H_DMA_INDX_RD_BUF, 0); | |
2584 | ||
2585 | } | |
2586 | ||
2587 | /* Signal to the dongle that common ring init is complete */ | |
2588 | dhd_bus_hostready(dhd->bus); | |
2589 | ||
2590 | /* | |
2591 | * If the DMA-able buffers for flowring needs to come from a specific | |
2592 | * contiguous memory region, then setup prot->flowrings_dma_buf here. | |
2593 | * dhd_prot_flowrings_pool_attach() will carve out DMA-able buffers from | |
2594 | * this contiguous memory region, for each of the flowrings. | |
2595 | */ | |
2596 | ||
2597 | /* Pre-allocate pool of msgbuf_ring for flowrings */ | |
2598 | if (dhd_prot_flowrings_pool_attach(dhd) != BCME_OK) { | |
2599 | return BCME_ERROR; | |
2600 | } | |
2601 | ||
2602 | /* If IFRM is enabled, wait for FW to setup the DMA channel */ | |
2603 | if (IFRM_ENAB(dhd)) { | |
2604 | dhd_base_addr_htolpa(&base_addr, prot->h2d_ifrm_indx_wr_buf.pa); | |
2605 | dhd_bus_cmn_writeshared(dhd->bus, &base_addr, sizeof(base_addr), | |
2606 | H2D_IFRM_INDX_WR_BUF, 0); | |
2607 | } | |
2608 | ||
2609 | /* See if info rings could be created */ | |
2610 | if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6) { | |
2611 | if ((ret = dhd_prot_init_info_rings(dhd)) != BCME_OK) { | |
2612 | /* For now log and proceed, further clean up action maybe necessary | |
2613 | * when we have more clarity. | |
2614 | */ | |
2615 | DHD_ERROR(("%s Info rings couldn't be created: Err Code%d", | |
2616 | __FUNCTION__, ret)); | |
2617 | } | |
2618 | } | |
2619 | ||
2620 | /* Host should configure soft doorbells if needed ... here */ | |
2621 | ||
2622 | /* Post to dongle host configured soft doorbells */ | |
2623 | dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd); | |
2624 | ||
2625 | /* Post buffers for packet reception and ioctl/event responses */ | |
2626 | dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */ | |
2627 | dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd); | |
2628 | dhd_msgbuf_rxbuf_post_event_bufs(dhd); | |
2629 | ||
2630 | return BCME_OK; | |
2631 | } /* dhd_prot_init */ | |
2632 | ||
2633 | ||
2634 | /** | |
2635 | * dhd_prot_detach - PCIE FD protocol layer destructor. | |
2636 | * Unlink, frees allocated protocol memory (including dhd_prot) | |
2637 | */ | |
2638 | void dhd_prot_detach(dhd_pub_t *dhd) | |
2639 | { | |
2640 | dhd_prot_t *prot = dhd->prot; | |
2641 | ||
2642 | /* Stop the protocol module */ | |
2643 | if (prot) { | |
2644 | ||
2645 | /* free up all DMA-able buffers allocated during prot attach/init */ | |
2646 | ||
2647 | dhd_dma_buf_free(dhd, &prot->d2h_dma_scratch_buf); | |
2648 | dhd_dma_buf_free(dhd, &prot->retbuf); | |
2649 | dhd_dma_buf_free(dhd, &prot->ioctbuf); | |
2650 | dhd_dma_buf_free(dhd, &prot->host_bus_throughput_buf); | |
2651 | dhd_dma_buf_free(dhd, &prot->hostts_req_buf); | |
2652 | dhd_dma_buf_free(dhd, &prot->fw_trap_buf); | |
2653 | ||
2654 | /* DMA-able buffers for DMAing H2D/D2H WR/RD indices */ | |
2655 | dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_wr_buf); | |
2656 | dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_rd_buf); | |
2657 | dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_wr_buf); | |
2658 | dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_rd_buf); | |
2659 | ||
2660 | dhd_dma_buf_free(dhd, &prot->h2d_ifrm_indx_wr_buf); | |
2661 | ||
2662 | /* Common MsgBuf Rings */ | |
2663 | dhd_prot_ring_detach(dhd, &prot->h2dring_ctrl_subn); | |
2664 | dhd_prot_ring_detach(dhd, &prot->h2dring_rxp_subn); | |
2665 | dhd_prot_ring_detach(dhd, &prot->d2hring_ctrl_cpln); | |
2666 | dhd_prot_ring_detach(dhd, &prot->d2hring_tx_cpln); | |
2667 | dhd_prot_ring_detach(dhd, &prot->d2hring_rx_cpln); | |
2668 | ||
2669 | /* Detach each DMA-able buffer and free the pool of msgbuf_ring_t */ | |
2670 | dhd_prot_flowrings_pool_detach(dhd); | |
2671 | ||
2672 | /* detach info rings */ | |
2673 | dhd_prot_detach_info_rings(dhd); | |
2674 | ||
2675 | /* if IOCTLRESP_USE_CONSTMEM is defined IOCTL PKTs use pktid_map_handle_ioctl | |
2676 | * handler and PKT memory is allocated using alloc_ioctl_return_buffer(), Otherwise | |
2677 | * they will be part of pktid_ctrl_map handler and PKT memory is allocated using | |
2678 | * PKTGET_STATIC (if DHD_USE_STATIC_CTRLBUF is defined) OR PKGET. | |
2679 | * Similarly for freeing PKT buffers DHD_NATIVE_TO_PKTID_FINI will be used | |
2680 | * which calls PKTFREE_STATIC (if DHD_USE_STATIC_CTRLBUF is defined) OR PKFREE. | |
2681 | * Else if IOCTLRESP_USE_CONSTMEM is defined IOCTL PKTs will be freed using | |
2682 | * DHD_NATIVE_TO_PKTID_FINI_IOCTL which calls free_ioctl_return_buffer. | |
2683 | */ | |
2684 | DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_ctrl_map); | |
2685 | DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_rx_map); | |
2686 | DHD_NATIVE_TO_PKTID_FINI(dhd, prot->pktid_tx_map); | |
2687 | #ifdef IOCTLRESP_USE_CONSTMEM | |
2688 | DHD_NATIVE_TO_PKTID_FINI_IOCTL(dhd, prot->pktid_map_handle_ioctl); | |
2689 | #endif | |
2690 | ||
2691 | dhd_os_spin_lock_deinit(dhd->osh, prot->rx_lock); | |
2692 | ||
2693 | #ifndef CONFIG_DHD_USE_STATIC_BUF | |
2694 | MFREE(dhd->osh, dhd->prot, sizeof(dhd_prot_t)); | |
2695 | #endif /* CONFIG_DHD_USE_STATIC_BUF */ | |
2696 | ||
2697 | #if defined(DHD_LB_TXC) | |
2698 | if (prot->tx_compl_prod.buffer) | |
2699 | MFREE(dhd->osh, prot->tx_compl_prod.buffer, | |
2700 | sizeof(void*) * DHD_LB_WORKQ_SZ); | |
2701 | #endif /* DHD_LB_TXC */ | |
2702 | #if defined(DHD_LB_RXC) | |
2703 | if (prot->rx_compl_prod.buffer) | |
2704 | MFREE(dhd->osh, prot->rx_compl_prod.buffer, | |
2705 | sizeof(void*) * DHD_LB_WORKQ_SZ); | |
2706 | #endif /* DHD_LB_RXC */ | |
2707 | ||
2708 | dhd->prot = NULL; | |
2709 | } | |
2710 | } /* dhd_prot_detach */ | |
2711 | ||
2712 | ||
2713 | /** | |
2714 | * dhd_prot_reset - Reset the protocol layer without freeing any objects. | |
2715 | * This may be invoked to soft reboot the dongle, without having to | |
2716 | * detach and attach the entire protocol layer. | |
2717 | * | |
2718 | * After dhd_prot_reset(), dhd_prot_init() may be invoked | |
2719 | * without going througha dhd_prot_attach() phase. | |
2720 | */ | |
2721 | void | |
2722 | dhd_prot_reset(dhd_pub_t *dhd) | |
2723 | { | |
2724 | struct dhd_prot *prot = dhd->prot; | |
2725 | ||
2726 | DHD_TRACE(("%s\n", __FUNCTION__)); | |
2727 | ||
2728 | if (prot == NULL) { | |
2729 | return; | |
2730 | } | |
2731 | ||
2732 | dhd_prot_flowrings_pool_reset(dhd); | |
2733 | ||
2734 | /* Reset Common MsgBuf Rings */ | |
2735 | dhd_prot_ring_reset(dhd, &prot->h2dring_ctrl_subn); | |
2736 | dhd_prot_ring_reset(dhd, &prot->h2dring_rxp_subn); | |
2737 | dhd_prot_ring_reset(dhd, &prot->d2hring_ctrl_cpln); | |
2738 | dhd_prot_ring_reset(dhd, &prot->d2hring_tx_cpln); | |
2739 | dhd_prot_ring_reset(dhd, &prot->d2hring_rx_cpln); | |
2740 | ||
2741 | /* Reset info rings */ | |
2742 | if (prot->h2dring_info_subn) { | |
2743 | dhd_prot_ring_reset(dhd, prot->h2dring_info_subn); | |
2744 | } | |
2745 | ||
2746 | if (prot->d2hring_info_cpln) { | |
2747 | dhd_prot_ring_reset(dhd, prot->d2hring_info_cpln); | |
2748 | } | |
2749 | ||
2750 | /* Reset all DMA-able buffers allocated during prot attach */ | |
2751 | dhd_dma_buf_reset(dhd, &prot->d2h_dma_scratch_buf); | |
2752 | dhd_dma_buf_reset(dhd, &prot->retbuf); | |
2753 | dhd_dma_buf_reset(dhd, &prot->ioctbuf); | |
2754 | dhd_dma_buf_reset(dhd, &prot->host_bus_throughput_buf); | |
2755 | dhd_dma_buf_reset(dhd, &prot->hostts_req_buf); | |
2756 | dhd_dma_buf_reset(dhd, &prot->fw_trap_buf); | |
2757 | ||
2758 | dhd_dma_buf_reset(dhd, &prot->h2d_ifrm_indx_wr_buf); | |
2759 | ||
2760 | /* Reset all DMA-able buffers for DMAing H2D/D2H WR/RD indices */ | |
2761 | dhd_dma_buf_reset(dhd, &prot->h2d_dma_indx_rd_buf); | |
2762 | dhd_dma_buf_reset(dhd, &prot->h2d_dma_indx_wr_buf); | |
2763 | dhd_dma_buf_reset(dhd, &prot->d2h_dma_indx_rd_buf); | |
2764 | dhd_dma_buf_reset(dhd, &prot->d2h_dma_indx_wr_buf); | |
2765 | ||
2766 | ||
2767 | prot->rx_metadata_offset = 0; | |
2768 | prot->tx_metadata_offset = 0; | |
2769 | ||
2770 | prot->rxbufpost = 0; | |
2771 | prot->cur_event_bufs_posted = 0; | |
2772 | prot->cur_ioctlresp_bufs_posted = 0; | |
2773 | ||
2774 | prot->active_tx_count = 0; | |
2775 | prot->data_seq_no = 0; | |
2776 | prot->ioctl_seq_no = 0; | |
2777 | prot->ioctl_state = 0; | |
2778 | prot->curr_ioctl_cmd = 0; | |
2779 | prot->ioctl_received = IOCTL_WAIT; | |
2780 | /* To catch any rollover issues fast, starting with higher ioctl_trans_id */ | |
2781 | prot->ioctl_trans_id = MAXBITVAL(NBITS(prot->ioctl_trans_id)) - BUFFER_BEFORE_ROLLOVER; | |
2782 | ||
2783 | /* dhd_flow_rings_init is located at dhd_bus_start, | |
2784 | * so when stopping bus, flowrings shall be deleted | |
2785 | */ | |
2786 | if (dhd->flow_rings_inited) { | |
2787 | dhd_flow_rings_deinit(dhd); | |
2788 | } | |
2789 | ||
2790 | /* Reset PKTID map */ | |
2791 | DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_ctrl_map); | |
2792 | DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_rx_map); | |
2793 | DHD_NATIVE_TO_PKTID_RESET(dhd, prot->pktid_tx_map); | |
2794 | #ifdef IOCTLRESP_USE_CONSTMEM | |
2795 | DHD_NATIVE_TO_PKTID_RESET_IOCTL(dhd, prot->pktid_map_handle_ioctl); | |
2796 | #endif /* IOCTLRESP_USE_CONSTMEM */ | |
2797 | } /* dhd_prot_reset */ | |
2798 | ||
2799 | #if defined(DHD_LB_RXP) | |
2800 | #define DHD_LB_DISPATCH_RX_PROCESS(dhdp) dhd_lb_dispatch_rx_process(dhdp) | |
2801 | #else /* !DHD_LB_RXP */ | |
2802 | #define DHD_LB_DISPATCH_RX_PROCESS(dhdp) do { /* noop */ } while (0) | |
2803 | #endif /* !DHD_LB_RXP */ | |
2804 | ||
2805 | #if defined(DHD_LB_RXC) | |
2806 | #define DHD_LB_DISPATCH_RX_COMPL(dhdp) dhd_lb_dispatch_rx_compl(dhdp) | |
2807 | #else /* !DHD_LB_RXC */ | |
2808 | #define DHD_LB_DISPATCH_RX_COMPL(dhdp) do { /* noop */ } while (0) | |
2809 | #endif /* !DHD_LB_RXC */ | |
2810 | ||
2811 | #if defined(DHD_LB_TXC) | |
2812 | #define DHD_LB_DISPATCH_TX_COMPL(dhdp) dhd_lb_dispatch_tx_compl(dhdp) | |
2813 | #else /* !DHD_LB_TXC */ | |
2814 | #define DHD_LB_DISPATCH_TX_COMPL(dhdp) do { /* noop */ } while (0) | |
2815 | #endif /* !DHD_LB_TXC */ | |
2816 | ||
2817 | ||
2818 | #if defined(DHD_LB) | |
2819 | /* DHD load balancing: deferral of work to another online CPU */ | |
2820 | /* DHD_LB_TXC DHD_LB_RXC DHD_LB_RXP dispatchers, in dhd_linux.c */ | |
2821 | extern void dhd_lb_tx_compl_dispatch(dhd_pub_t *dhdp); | |
2822 | extern void dhd_lb_rx_compl_dispatch(dhd_pub_t *dhdp); | |
2823 | extern void dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp); | |
2824 | extern void dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx); | |
2825 | ||
2826 | #if defined(DHD_LB_RXP) | |
2827 | /** | |
2828 | * dhd_lb_dispatch_rx_process - load balance by dispatch Rx processing work | |
2829 | * to other CPU cores | |
2830 | */ | |
2831 | static INLINE void | |
2832 | dhd_lb_dispatch_rx_process(dhd_pub_t *dhdp) | |
2833 | { | |
2834 | dhd_lb_rx_napi_dispatch(dhdp); /* dispatch rx_process_napi */ | |
2835 | } | |
2836 | #endif /* DHD_LB_RXP */ | |
2837 | ||
2838 | #if defined(DHD_LB_TXC) | |
2839 | /** | |
2840 | * dhd_lb_dispatch_tx_compl - load balance by dispatch Tx complition work | |
2841 | * to other CPU cores | |
2842 | */ | |
2843 | static INLINE void | |
2844 | dhd_lb_dispatch_tx_compl(dhd_pub_t *dhdp, uint16 ring_idx) | |
2845 | { | |
2846 | bcm_workq_prod_sync(&dhdp->prot->tx_compl_prod); /* flush WR index */ | |
2847 | dhd_lb_tx_compl_dispatch(dhdp); /* dispatch tx_compl_tasklet */ | |
2848 | } | |
2849 | ||
2850 | /** | |
2851 | * DHD load balanced tx completion tasklet handler, that will perform the | |
2852 | * freeing of packets on the selected CPU. Packet pointers are delivered to | |
2853 | * this tasklet via the tx complete workq. | |
2854 | */ | |
2855 | void | |
2856 | dhd_lb_tx_compl_handler(unsigned long data) | |
2857 | { | |
2858 | int elem_ix; | |
2859 | void *pkt, **elem; | |
2860 | dmaaddr_t pa; | |
2861 | uint32 pa_len; | |
2862 | dhd_pub_t *dhd = (dhd_pub_t *)data; | |
2863 | dhd_prot_t *prot = dhd->prot; | |
2864 | bcm_workq_t *workq = &prot->tx_compl_cons; | |
2865 | uint32 count = 0; | |
2866 | ||
2867 | int curr_cpu; | |
2868 | curr_cpu = get_cpu(); | |
2869 | put_cpu(); | |
2870 | ||
2871 | DHD_LB_STATS_TXC_PERCPU_CNT_INCR(dhd); | |
2872 | ||
2873 | while (1) { | |
2874 | elem_ix = bcm_ring_cons(WORKQ_RING(workq), DHD_LB_WORKQ_SZ); | |
2875 | ||
2876 | if (elem_ix == BCM_RING_EMPTY) { | |
2877 | break; | |
2878 | } | |
2879 | ||
2880 | elem = WORKQ_ELEMENT(void *, workq, elem_ix); | |
2881 | pkt = *elem; | |
2882 | ||
2883 | DHD_INFO(("%s: tx_compl_cons pkt<%p>\n", __FUNCTION__, pkt)); | |
2884 | ||
2885 | OSL_PREFETCH(PKTTAG(pkt)); | |
2886 | OSL_PREFETCH(pkt); | |
2887 | ||
2888 | pa = DHD_PKTTAG_PA((dhd_pkttag_fr_t *)PKTTAG(pkt)); | |
2889 | pa_len = DHD_PKTTAG_PA_LEN((dhd_pkttag_fr_t *)PKTTAG(pkt)); | |
2890 | ||
2891 | DMA_UNMAP(dhd->osh, pa, pa_len, DMA_RX, 0, 0); | |
2892 | #if defined(BCMPCIE) | |
2893 | dhd_txcomplete(dhd, pkt, true); | |
2894 | #endif | |
2895 | ||
2896 | PKTFREE(dhd->osh, pkt, TRUE); | |
2897 | count++; | |
2898 | } | |
2899 | ||
2900 | /* smp_wmb(); */ | |
2901 | bcm_workq_cons_sync(workq); | |
2902 | DHD_LB_STATS_UPDATE_TXC_HISTO(dhd, count); | |
2903 | } | |
2904 | #endif /* DHD_LB_TXC */ | |
2905 | ||
2906 | #if defined(DHD_LB_RXC) | |
2907 | ||
2908 | /** | |
2909 | * dhd_lb_dispatch_rx_compl - load balance by dispatch rx complition work | |
2910 | * to other CPU cores | |
2911 | */ | |
2912 | static INLINE void | |
2913 | dhd_lb_dispatch_rx_compl(dhd_pub_t *dhdp) | |
2914 | { | |
2915 | dhd_prot_t *prot = dhdp->prot; | |
2916 | /* Schedule the takslet only if we have to */ | |
2917 | if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD)) { | |
2918 | /* flush WR index */ | |
2919 | bcm_workq_prod_sync(&dhdp->prot->rx_compl_prod); | |
2920 | dhd_lb_rx_compl_dispatch(dhdp); /* dispatch rx_compl_tasklet */ | |
2921 | } | |
2922 | } | |
2923 | ||
2924 | void | |
2925 | dhd_lb_rx_compl_handler(unsigned long data) | |
2926 | { | |
2927 | dhd_pub_t *dhd = (dhd_pub_t *)data; | |
2928 | bcm_workq_t *workq = &dhd->prot->rx_compl_cons; | |
2929 | ||
2930 | DHD_LB_STATS_RXC_PERCPU_CNT_INCR(dhd); | |
2931 | ||
2932 | dhd_msgbuf_rxbuf_post(dhd, TRUE); /* re-use pktids */ | |
2933 | bcm_workq_cons_sync(workq); | |
2934 | } | |
2935 | #endif /* DHD_LB_RXC */ | |
2936 | #endif /* DHD_LB */ | |
2937 | ||
2938 | void | |
2939 | dhd_prot_rx_dataoffset(dhd_pub_t *dhd, uint32 rx_offset) | |
2940 | { | |
2941 | dhd_prot_t *prot = dhd->prot; | |
2942 | prot->rx_dataoffset = rx_offset; | |
2943 | } | |
2944 | ||
2945 | static int | |
2946 | dhd_check_create_info_rings(dhd_pub_t *dhd) | |
2947 | { | |
2948 | dhd_prot_t *prot = dhd->prot; | |
2949 | int ret = BCME_ERROR; | |
2950 | uint16 ringid = dhd->bus->max_tx_flowrings + BCMPCIE_COMMON_MSGRINGS; | |
2951 | ||
2952 | if (prot->h2dring_info_subn && prot->d2hring_info_cpln) { | |
2953 | return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */ | |
2954 | } | |
2955 | ||
2956 | if (prot->h2dring_info_subn == NULL) { | |
2957 | prot->h2dring_info_subn = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t)); | |
2958 | ||
2959 | if (prot->h2dring_info_subn == NULL) { | |
2960 | DHD_ERROR(("%s: couldn't alloc memory for h2dring_info_subn\n", | |
2961 | __FUNCTION__)); | |
2962 | return BCME_NOMEM; | |
2963 | } | |
2964 | ||
2965 | DHD_INFO(("%s: about to create debug submit ring\n", __FUNCTION__)); | |
2966 | ret = dhd_prot_ring_attach(dhd, prot->h2dring_info_subn, "h2dinfo", | |
2967 | H2DRING_DYNAMIC_INFO_MAX_ITEM, H2DRING_INFO_BUFPOST_ITEMSIZE, | |
2968 | ringid); | |
2969 | if (ret != BCME_OK) { | |
2970 | DHD_ERROR(("%s: couldn't alloc resources for dbg submit ring\n", | |
2971 | __FUNCTION__)); | |
2972 | goto err; | |
2973 | } | |
2974 | } | |
2975 | ||
2976 | if (prot->d2hring_info_cpln == NULL) { | |
2977 | prot->d2hring_info_cpln = MALLOCZ(prot->osh, sizeof(msgbuf_ring_t)); | |
2978 | ||
2979 | if (prot->d2hring_info_cpln == NULL) { | |
2980 | DHD_ERROR(("%s: couldn't alloc memory for h2dring_info_subn\n", | |
2981 | __FUNCTION__)); | |
2982 | return BCME_NOMEM; | |
2983 | } | |
2984 | ||
2985 | /* create the debug info completion ring next to debug info submit ring | |
2986 | * ringid = id next to debug info submit ring | |
2987 | */ | |
2988 | ringid = ringid + 1; | |
2989 | ||
2990 | DHD_INFO(("%s: about to create debug cpl ring\n", __FUNCTION__)); | |
2991 | ret = dhd_prot_ring_attach(dhd, prot->d2hring_info_cpln, "d2hinfo", | |
2992 | D2HRING_DYNAMIC_INFO_MAX_ITEM, D2HRING_INFO_BUFCMPLT_ITEMSIZE, | |
2993 | ringid); | |
2994 | if (ret != BCME_OK) { | |
2995 | DHD_ERROR(("%s: couldn't alloc resources for dbg cpl ring\n", | |
2996 | __FUNCTION__)); | |
2997 | dhd_prot_ring_detach(dhd, prot->h2dring_info_subn); | |
2998 | goto err; | |
2999 | } | |
3000 | } | |
3001 | ||
3002 | return ret; | |
3003 | err: | |
3004 | MFREE(prot->osh, prot->h2dring_info_subn, sizeof(msgbuf_ring_t)); | |
3005 | prot->h2dring_info_subn = NULL; | |
3006 | ||
3007 | if (prot->d2hring_info_cpln) { | |
3008 | MFREE(prot->osh, prot->d2hring_info_cpln, sizeof(msgbuf_ring_t)); | |
3009 | prot->d2hring_info_cpln = NULL; | |
3010 | } | |
3011 | return ret; | |
3012 | } /* dhd_check_create_info_rings */ | |
3013 | ||
3014 | int | |
3015 | dhd_prot_init_info_rings(dhd_pub_t *dhd) | |
3016 | { | |
3017 | dhd_prot_t *prot = dhd->prot; | |
3018 | int ret = BCME_OK; | |
3019 | ||
3020 | if ((ret = dhd_check_create_info_rings(dhd)) != BCME_OK) { | |
3021 | DHD_ERROR(("%s: info rings aren't created! \n", | |
3022 | __FUNCTION__)); | |
3023 | return ret; | |
3024 | } | |
3025 | ||
3026 | if ((prot->d2hring_info_cpln->inited) || (prot->d2hring_info_cpln->create_pending)) { | |
3027 | DHD_INFO(("Info completion ring was created!\n")); | |
3028 | return ret; | |
3029 | } | |
3030 | ||
3031 | DHD_TRACE(("trying to send create d2h info ring: id %d\n", prot->d2hring_info_cpln->idx)); | |
3032 | ret = dhd_send_d2h_ringcreate(dhd, prot->d2hring_info_cpln); | |
3033 | if (ret != BCME_OK) | |
3034 | return ret; | |
3035 | ||
3036 | prot->d2hring_info_cpln->seqnum = D2H_EPOCH_INIT_VAL; | |
3037 | ||
3038 | DHD_TRACE(("trying to send create h2d info ring id %d\n", prot->h2dring_info_subn->idx)); | |
3039 | prot->h2dring_info_subn->n_completion_ids = 1; | |
3040 | prot->h2dring_info_subn->compeltion_ring_ids[0] = prot->d2hring_info_cpln->idx; | |
3041 | ||
3042 | ret = dhd_send_h2d_ringcreate(dhd, prot->h2dring_info_subn); | |
3043 | ||
3044 | /* Note that there is no way to delete d2h or h2d ring deletion incase either fails, | |
3045 | * so can not cleanup if one ring was created while the other failed | |
3046 | */ | |
3047 | return ret; | |
3048 | } /* dhd_prot_init_info_rings */ | |
3049 | ||
3050 | static void | |
3051 | dhd_prot_detach_info_rings(dhd_pub_t *dhd) | |
3052 | { | |
3053 | if (dhd->prot->h2dring_info_subn) { | |
3054 | dhd_prot_ring_detach(dhd, dhd->prot->h2dring_info_subn); | |
3055 | MFREE(dhd->prot->osh, dhd->prot->h2dring_info_subn, sizeof(msgbuf_ring_t)); | |
3056 | dhd->prot->h2dring_info_subn = NULL; | |
3057 | } | |
3058 | if (dhd->prot->d2hring_info_cpln) { | |
3059 | dhd_prot_ring_detach(dhd, dhd->prot->d2hring_info_cpln); | |
3060 | MFREE(dhd->prot->osh, dhd->prot->d2hring_info_cpln, sizeof(msgbuf_ring_t)); | |
3061 | dhd->prot->d2hring_info_cpln = NULL; | |
3062 | } | |
3063 | } | |
3064 | ||
3065 | /** | |
3066 | * Initialize protocol: sync w/dongle state. | |
3067 | * Sets dongle media info (iswl, drv_version, mac address). | |
3068 | */ | |
3069 | int dhd_sync_with_dongle(dhd_pub_t *dhd) | |
3070 | { | |
3071 | int ret = 0; | |
3072 | wlc_rev_info_t revinfo; | |
3073 | ||
3074 | ||
3075 | DHD_TRACE(("%s: Enter\n", __FUNCTION__)); | |
3076 | ||
3077 | dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT); | |
3078 | ||
3079 | /* Post ts buffer after shim layer is attached */ | |
3080 | ret = dhd_msgbuf_rxbuf_post_ts_bufs(dhd); | |
3081 | ||
3082 | ||
3083 | #ifdef DHD_FW_COREDUMP | |
3084 | /* Check the memdump capability */ | |
3085 | dhd_get_memdump_info(dhd); | |
3086 | #endif /* DHD_FW_COREDUMP */ | |
3087 | #ifdef BCMASSERT_LOG | |
3088 | dhd_get_assert_info(dhd); | |
3089 | #endif /* BCMASSERT_LOG */ | |
3090 | ||
3091 | /* Get the device rev info */ | |
3092 | memset(&revinfo, 0, sizeof(revinfo)); | |
3093 | ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_REVINFO, &revinfo, sizeof(revinfo), FALSE, 0); | |
3094 | if (ret < 0) { | |
3095 | DHD_ERROR(("%s: GET revinfo FAILED\n", __FUNCTION__)); | |
3096 | goto done; | |
3097 | } | |
3098 | DHD_ERROR(("%s: GET_REVINFO device 0x%x, vendor 0x%x, chipnum 0x%x\n", __FUNCTION__, | |
3099 | revinfo.deviceid, revinfo.vendorid, revinfo.chipnum)); | |
3100 | ||
3101 | DHD_SSSR_DUMP_INIT(dhd); | |
3102 | ||
3103 | dhd_process_cid_mac(dhd, TRUE); | |
3104 | ret = dhd_preinit_ioctls(dhd); | |
3105 | dhd_process_cid_mac(dhd, FALSE); | |
3106 | ||
3107 | /* Always assumes wl for now */ | |
3108 | dhd->iswl = TRUE; | |
3109 | done: | |
3110 | return ret; | |
3111 | } /* dhd_sync_with_dongle */ | |
3112 | ||
3113 | ||
3114 | #define DHD_DBG_SHOW_METADATA 0 | |
3115 | ||
3116 | #if DHD_DBG_SHOW_METADATA | |
3117 | static void BCMFASTPATH | |
3118 | dhd_prot_print_metadata(dhd_pub_t *dhd, void *ptr, int len) | |
3119 | { | |
3120 | uint8 tlv_t; | |
3121 | uint8 tlv_l; | |
3122 | uint8 *tlv_v = (uint8 *)ptr; | |
3123 | ||
3124 | if (len <= BCMPCIE_D2H_METADATA_HDRLEN) | |
3125 | return; | |
3126 | ||
3127 | len -= BCMPCIE_D2H_METADATA_HDRLEN; | |
3128 | tlv_v += BCMPCIE_D2H_METADATA_HDRLEN; | |
3129 | ||
3130 | while (len > TLV_HDR_LEN) { | |
3131 | tlv_t = tlv_v[TLV_TAG_OFF]; | |
3132 | tlv_l = tlv_v[TLV_LEN_OFF]; | |
3133 | ||
3134 | len -= TLV_HDR_LEN; | |
3135 | tlv_v += TLV_HDR_LEN; | |
3136 | if (len < tlv_l) | |
3137 | break; | |
3138 | if ((tlv_t == 0) || (tlv_t == WLFC_CTL_TYPE_FILLER)) | |
3139 | break; | |
3140 | ||
3141 | switch (tlv_t) { | |
3142 | case WLFC_CTL_TYPE_TXSTATUS: { | |
3143 | uint32 txs; | |
3144 | memcpy(&txs, tlv_v, sizeof(uint32)); | |
3145 | if (tlv_l < (sizeof(wl_txstatus_additional_info_t) + sizeof(uint32))) { | |
3146 | printf("METADATA TX_STATUS: %08x\n", txs); | |
3147 | } else { | |
3148 | wl_txstatus_additional_info_t tx_add_info; | |
3149 | memcpy(&tx_add_info, tlv_v + sizeof(uint32), | |
3150 | sizeof(wl_txstatus_additional_info_t)); | |
3151 | printf("METADATA TX_STATUS: %08x WLFCTS[%04x | %08x - %08x - %08x]" | |
3152 | " rate = %08x tries = %d - %d\n", txs, | |
3153 | tx_add_info.seq, tx_add_info.entry_ts, | |
3154 | tx_add_info.enq_ts, tx_add_info.last_ts, | |
3155 | tx_add_info.rspec, tx_add_info.rts_cnt, | |
3156 | tx_add_info.tx_cnt); | |
3157 | } | |
3158 | } break; | |
3159 | ||
3160 | case WLFC_CTL_TYPE_RSSI: { | |
3161 | if (tlv_l == 1) | |
3162 | printf("METADATA RX_RSSI: rssi = %d\n", *tlv_v); | |
3163 | else | |
3164 | printf("METADATA RX_RSSI[%04x]: rssi = %d snr = %d\n", | |
3165 | (*(tlv_v + 3) << 8) | *(tlv_v + 2), | |
3166 | (int8)(*tlv_v), *(tlv_v + 1)); | |
3167 | } break; | |
3168 | ||
3169 | case WLFC_CTL_TYPE_FIFO_CREDITBACK: | |
3170 | bcm_print_bytes("METADATA FIFO_CREDITBACK", tlv_v, tlv_l); | |
3171 | break; | |
3172 | ||
3173 | case WLFC_CTL_TYPE_TX_ENTRY_STAMP: | |
3174 | bcm_print_bytes("METADATA TX_ENTRY", tlv_v, tlv_l); | |
3175 | break; | |
3176 | ||
3177 | case WLFC_CTL_TYPE_RX_STAMP: { | |
3178 | struct { | |
3179 | uint32 rspec; | |
3180 | uint32 bus_time; | |
3181 | uint32 wlan_time; | |
3182 | } rx_tmstamp; | |
3183 | memcpy(&rx_tmstamp, tlv_v, sizeof(rx_tmstamp)); | |
3184 | printf("METADATA RX TIMESTMAP: WLFCTS[%08x - %08x] rate = %08x\n", | |
3185 | rx_tmstamp.wlan_time, rx_tmstamp.bus_time, rx_tmstamp.rspec); | |
3186 | } break; | |
3187 | ||
3188 | case WLFC_CTL_TYPE_TRANS_ID: | |
3189 | bcm_print_bytes("METADATA TRANS_ID", tlv_v, tlv_l); | |
3190 | break; | |
3191 | ||
3192 | case WLFC_CTL_TYPE_COMP_TXSTATUS: | |
3193 | bcm_print_bytes("METADATA COMP_TXSTATUS", tlv_v, tlv_l); | |
3194 | break; | |
3195 | ||
3196 | default: | |
3197 | bcm_print_bytes("METADATA UNKNOWN", tlv_v, tlv_l); | |
3198 | break; | |
3199 | } | |
3200 | ||
3201 | len -= tlv_l; | |
3202 | tlv_v += tlv_l; | |
3203 | } | |
3204 | } | |
3205 | #endif /* DHD_DBG_SHOW_METADATA */ | |
3206 | ||
3207 | static INLINE void BCMFASTPATH | |
3208 | dhd_prot_packet_free(dhd_pub_t *dhd, void *pkt, uint8 pkttype, bool send) | |
3209 | { | |
3210 | if (pkt) { | |
3211 | if (pkttype == PKTTYPE_IOCTL_RX || | |
3212 | pkttype == PKTTYPE_EVENT_RX || | |
3213 | pkttype == PKTTYPE_INFO_RX || | |
3214 | pkttype == PKTTYPE_TSBUF_RX) { | |
3215 | #ifdef DHD_USE_STATIC_CTRLBUF | |
3216 | PKTFREE_STATIC(dhd->osh, pkt, send); | |
3217 | #else | |
3218 | PKTFREE(dhd->osh, pkt, send); | |
3219 | #endif /* DHD_USE_STATIC_CTRLBUF */ | |
3220 | } else { | |
3221 | PKTFREE(dhd->osh, pkt, send); | |
3222 | } | |
3223 | } | |
3224 | } | |
3225 | ||
3226 | /* dhd_prot_packet_get should be called only for items having pktid_ctrl_map handle */ | |
3227 | static INLINE void * BCMFASTPATH | |
3228 | dhd_prot_packet_get(dhd_pub_t *dhd, uint32 pktid, uint8 pkttype, bool free_pktid) | |
3229 | { | |
3230 | void *PKTBUF; | |
3231 | dmaaddr_t pa; | |
3232 | uint32 len; | |
3233 | void *dmah; | |
3234 | void *secdma; | |
3235 | ||
3236 | #ifdef DHD_PCIE_PKTID | |
3237 | if (free_pktid) { | |
3238 | PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_ctrl_map, | |
3239 | pktid, pa, len, dmah, secdma, pkttype); | |
3240 | } else { | |
3241 | PKTBUF = DHD_PKTID_TO_NATIVE_RSV(dhd, dhd->prot->pktid_ctrl_map, | |
3242 | pktid, pa, len, dmah, secdma, pkttype); | |
3243 | } | |
3244 | #else | |
3245 | PKTBUF = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_ctrl_map, pktid, pa, | |
3246 | len, dmah, secdma, pkttype); | |
3247 | #endif /* DHD_PCIE_PKTID */ | |
3248 | if (PKTBUF) { | |
3249 | { | |
3250 | if (SECURE_DMA_ENAB(dhd->osh)) | |
3251 | SECURE_DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah, | |
3252 | secdma, 0); | |
3253 | else | |
3254 | DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah); | |
3255 | } | |
3256 | } | |
3257 | ||
3258 | return PKTBUF; | |
3259 | } | |
3260 | ||
3261 | #ifdef IOCTLRESP_USE_CONSTMEM | |
3262 | static INLINE void BCMFASTPATH | |
3263 | dhd_prot_ioctl_ret_buffer_get(dhd_pub_t *dhd, uint32 pktid, dhd_dma_buf_t *retbuf) | |
3264 | { | |
3265 | memset(retbuf, 0, sizeof(dhd_dma_buf_t)); | |
3266 | retbuf->va = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_map_handle_ioctl, pktid, | |
3267 | retbuf->pa, retbuf->len, retbuf->dmah, retbuf->secdma, PKTTYPE_IOCTL_RX); | |
3268 | ||
3269 | return; | |
3270 | } | |
3271 | #endif | |
3272 | ||
3273 | #ifdef PCIE_INB_DW | |
3274 | static int | |
3275 | dhd_prot_inc_hostactive_devwake_assert(dhd_bus_t *bus) | |
3276 | { | |
3277 | unsigned long flags = 0; | |
3278 | ||
3279 | if (INBAND_DW_ENAB(bus)) { | |
3280 | DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags); | |
3281 | bus->host_active_cnt++; | |
3282 | DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags); | |
3283 | if (dhd_bus_set_device_wake(bus, TRUE) != BCME_OK) { | |
3284 | DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags); | |
3285 | bus->host_active_cnt--; | |
3286 | dhd_bus_inb_ack_pending_ds_req(bus); | |
3287 | DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags); | |
3288 | return BCME_ERROR; | |
3289 | } | |
3290 | } | |
3291 | ||
3292 | return BCME_OK; | |
3293 | } | |
3294 | ||
3295 | static void | |
3296 | dhd_prot_dec_hostactive_ack_pending_dsreq(dhd_bus_t *bus) | |
3297 | { | |
3298 | unsigned long flags = 0; | |
3299 | if (INBAND_DW_ENAB(bus)) { | |
3300 | DHD_BUS_INB_DW_LOCK(bus->inb_lock, flags); | |
3301 | bus->host_active_cnt--; | |
3302 | dhd_bus_inb_ack_pending_ds_req(bus); | |
3303 | DHD_BUS_INB_DW_UNLOCK(bus->inb_lock, flags); | |
3304 | } | |
3305 | } | |
3306 | #endif /* PCIE_INB_DW */ | |
3307 | ||
3308 | static void BCMFASTPATH | |
3309 | dhd_msgbuf_rxbuf_post(dhd_pub_t *dhd, bool use_rsv_pktid) | |
3310 | { | |
3311 | dhd_prot_t *prot = dhd->prot; | |
3312 | int16 fillbufs; | |
3313 | uint16 cnt = 256; | |
3314 | int retcount = 0; | |
3315 | ||
3316 | fillbufs = prot->max_rxbufpost - prot->rxbufpost; | |
3317 | while (fillbufs >= RX_BUF_BURST) { | |
3318 | cnt--; | |
3319 | if (cnt == 0) { | |
3320 | /* find a better way to reschedule rx buf post if space not available */ | |
3321 | DHD_ERROR(("h2d rx post ring not available to post host buffers \n")); | |
3322 | DHD_ERROR(("Current posted host buf count %d \n", prot->rxbufpost)); | |
3323 | break; | |
3324 | } | |
3325 | ||
3326 | /* Post in a burst of 32 buffers at a time */ | |
3327 | fillbufs = MIN(fillbufs, RX_BUF_BURST); | |
3328 | ||
3329 | /* Post buffers */ | |
3330 | retcount = dhd_prot_rxbuf_post(dhd, fillbufs, use_rsv_pktid); | |
3331 | ||
3332 | if (retcount >= 0) { | |
3333 | prot->rxbufpost += (uint16)retcount; | |
3334 | #ifdef DHD_LB_RXC | |
3335 | /* dhd_prot_rxbuf_post returns the number of buffers posted */ | |
3336 | DHD_LB_STATS_UPDATE_RXC_HISTO(dhd, retcount); | |
3337 | #endif /* DHD_LB_RXC */ | |
3338 | /* how many more to post */ | |
3339 | fillbufs = prot->max_rxbufpost - prot->rxbufpost; | |
3340 | } else { | |
3341 | /* Make sure we don't run loop any further */ | |
3342 | fillbufs = 0; | |
3343 | } | |
3344 | } | |
3345 | } | |
3346 | ||
3347 | /** Post 'count' no of rx buffers to dongle */ | |
3348 | static int BCMFASTPATH | |
3349 | dhd_prot_rxbuf_post(dhd_pub_t *dhd, uint16 count, bool use_rsv_pktid) | |
3350 | { | |
3351 | void *p, **pktbuf; | |
3352 | uint16 pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ; | |
3353 | uint8 *rxbuf_post_tmp; | |
3354 | host_rxbuf_post_t *rxbuf_post; | |
3355 | void *msg_start; | |
3356 | dmaaddr_t pa, *pktbuf_pa; | |
3357 | uint32 *pktlen; | |
3358 | uint16 i = 0, alloced = 0; | |
3359 | unsigned long flags; | |
3360 | uint32 pktid; | |
3361 | dhd_prot_t *prot = dhd->prot; | |
3362 | msgbuf_ring_t *ring = &prot->h2dring_rxp_subn; | |
3363 | void *lcl_buf; | |
3364 | uint16 lcl_buf_size; | |
3365 | ||
3366 | #ifdef PCIE_INB_DW | |
3367 | if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) | |
3368 | return BCME_ERROR; | |
3369 | #endif /* PCIE_INB_DW */ | |
3370 | ||
3371 | /* allocate a local buffer to store pkt buffer va, pa and length */ | |
3372 | lcl_buf_size = (sizeof(void *) + sizeof(dmaaddr_t) + sizeof(uint32)) * | |
3373 | RX_BUF_BURST; | |
3374 | lcl_buf = MALLOC(dhd->osh, lcl_buf_size); | |
3375 | if (!lcl_buf) { | |
3376 | DHD_ERROR(("%s: local scratch buffer allocation failed\n", __FUNCTION__)); | |
3377 | #ifdef PCIE_INB_DW | |
3378 | dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); | |
3379 | #endif | |
3380 | return 0; | |
3381 | } | |
3382 | pktbuf = lcl_buf; | |
3383 | pktbuf_pa = (dmaaddr_t *)((uint8 *)pktbuf + sizeof(void *) * RX_BUF_BURST); | |
3384 | pktlen = (uint32 *)((uint8 *)pktbuf_pa + sizeof(dmaaddr_t) * RX_BUF_BURST); | |
3385 | ||
3386 | for (i = 0; i < count; i++) { | |
3387 | if ((p = PKTGET(dhd->osh, pktsz, FALSE)) == NULL) { | |
3388 | DHD_ERROR(("%s:%d: PKTGET for rxbuf failed\n", __FUNCTION__, __LINE__)); | |
3389 | dhd->rx_pktgetfail++; | |
3390 | break; | |
3391 | } | |
3392 | ||
3393 | pktlen[i] = PKTLEN(dhd->osh, p); | |
3394 | if (SECURE_DMA_ENAB(dhd->osh)) { | |
3395 | pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen[i], | |
3396 | DMA_RX, p, 0, ring->dma_buf.secdma, 0); | |
3397 | } | |
3398 | #ifndef BCM_SECURE_DMA | |
3399 | else | |
3400 | pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen[i], DMA_RX, p, 0); | |
3401 | #endif /* #ifndef BCM_SECURE_DMA */ | |
3402 | ||
3403 | if (PHYSADDRISZERO(pa)) { | |
3404 | PKTFREE(dhd->osh, p, FALSE); | |
3405 | DHD_ERROR(("Invalid phyaddr 0\n")); | |
3406 | ASSERT(0); | |
3407 | break; | |
3408 | } | |
3409 | ||
3410 | PKTPULL(dhd->osh, p, prot->rx_metadata_offset); | |
3411 | pktlen[i] = PKTLEN(dhd->osh, p); | |
3412 | pktbuf[i] = p; | |
3413 | pktbuf_pa[i] = pa; | |
3414 | } | |
3415 | ||
3416 | /* only post what we have */ | |
3417 | count = i; | |
3418 | ||
3419 | /* grab the rx lock to allocate pktid and post on ring */ | |
3420 | DHD_SPIN_LOCK(prot->rx_lock, flags); | |
3421 | ||
3422 | /* Claim space for exactly 'count' no of messages, for mitigation purpose */ | |
3423 | msg_start = (void *) | |
3424 | dhd_prot_alloc_ring_space(dhd, ring, count, &alloced, TRUE); | |
3425 | if (msg_start == NULL) { | |
3426 | DHD_INFO(("%s:%d: Rxbufpost Msgbuf Not available\n", __FUNCTION__, __LINE__)); | |
3427 | goto cleanup; | |
3428 | } | |
3429 | /* if msg_start != NULL, we should have alloced space for atleast 1 item */ | |
3430 | ASSERT(alloced > 0); | |
3431 | ||
3432 | rxbuf_post_tmp = (uint8*)msg_start; | |
3433 | ||
3434 | for (i = 0; i < alloced; i++) { | |
3435 | rxbuf_post = (host_rxbuf_post_t *)rxbuf_post_tmp; | |
3436 | p = pktbuf[i]; | |
3437 | pa = pktbuf_pa[i]; | |
3438 | ||
3439 | #if defined(DHD_LB_RXC) | |
3440 | if (use_rsv_pktid == TRUE) { | |
3441 | bcm_workq_t *workq = &prot->rx_compl_cons; | |
3442 | int elem_ix = bcm_ring_cons(WORKQ_RING(workq), DHD_LB_WORKQ_SZ); | |
3443 | ||
3444 | if (elem_ix == BCM_RING_EMPTY) { | |
3445 | DHD_INFO(("%s rx_compl_cons ring is empty\n", __FUNCTION__)); | |
3446 | pktid = DHD_PKTID_INVALID; | |
3447 | goto alloc_pkt_id; | |
3448 | } else { | |
3449 | uint32 *elem = WORKQ_ELEMENT(uint32, workq, elem_ix); | |
3450 | pktid = *elem; | |
3451 | } | |
3452 | ||
3453 | rxbuf_post->cmn_hdr.request_id = htol32(pktid); | |
3454 | ||
3455 | /* Now populate the previous locker with valid information */ | |
3456 | if (pktid != DHD_PKTID_INVALID) { | |
3457 | DHD_NATIVE_TO_PKTID_SAVE(dhd, dhd->prot->pktid_rx_map, | |
3458 | p, pktid, pa, pktlen[i], DMA_RX, NULL, NULL, | |
3459 | PKTTYPE_DATA_RX); | |
3460 | } | |
3461 | } else | |
3462 | #endif /* ! DHD_LB_RXC */ | |
3463 | { | |
3464 | #if defined(DHD_LB_RXC) | |
3465 | alloc_pkt_id: | |
3466 | #endif /* DHD_LB_RXC */ | |
3467 | pktid = DHD_NATIVE_TO_PKTID(dhd, dhd->prot->pktid_rx_map, p, pa, | |
3468 | pktlen[i], DMA_RX, NULL, ring->dma_buf.secdma, PKTTYPE_DATA_RX); | |
3469 | #if defined(DHD_PCIE_PKTID) | |
3470 | if (pktid == DHD_PKTID_INVALID) { | |
3471 | break; | |
3472 | } | |
3473 | #endif /* DHD_PCIE_PKTID */ | |
3474 | } | |
3475 | ||
3476 | /* Common msg header */ | |
3477 | rxbuf_post->cmn_hdr.msg_type = MSG_TYPE_RXBUF_POST; | |
3478 | rxbuf_post->cmn_hdr.if_id = 0; | |
3479 | rxbuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO; | |
3480 | rxbuf_post->cmn_hdr.flags = ring->current_phase; | |
3481 | ring->seqnum++; | |
3482 | rxbuf_post->data_buf_len = htol16((uint16)pktlen[i]); | |
3483 | rxbuf_post->data_buf_addr.high_addr = htol32(PHYSADDRHI(pa)); | |
3484 | rxbuf_post->data_buf_addr.low_addr = | |
3485 | htol32(PHYSADDRLO(pa) + prot->rx_metadata_offset); | |
3486 | ||
3487 | if (prot->rx_metadata_offset) { | |
3488 | rxbuf_post->metadata_buf_len = prot->rx_metadata_offset; | |
3489 | rxbuf_post->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(pa)); | |
3490 | rxbuf_post->metadata_buf_addr.low_addr = htol32(PHYSADDRLO(pa)); | |
3491 | } else { | |
3492 | rxbuf_post->metadata_buf_len = 0; | |
3493 | rxbuf_post->metadata_buf_addr.high_addr = 0; | |
3494 | rxbuf_post->metadata_buf_addr.low_addr = 0; | |
3495 | } | |
3496 | ||
3497 | #ifdef DHD_PKTID_AUDIT_RING | |
3498 | DHD_PKTID_AUDIT(dhd, prot->pktid_rx_map, pktid, DHD_DUPLICATE_ALLOC); | |
3499 | #endif /* DHD_PKTID_AUDIT_RING */ | |
3500 | ||
3501 | rxbuf_post->cmn_hdr.request_id = htol32(pktid); | |
3502 | ||
3503 | /* Move rxbuf_post_tmp to next item */ | |
3504 | rxbuf_post_tmp = rxbuf_post_tmp + ring->item_len; | |
3505 | } | |
3506 | ||
3507 | if (i < alloced) { | |
3508 | if (ring->wr < (alloced - i)) | |
3509 | ring->wr = ring->max_items - (alloced - i); | |
3510 | else | |
3511 | ring->wr -= (alloced - i); | |
3512 | ||
3513 | if (ring->wr == 0) { | |
3514 | DHD_INFO(("%s: flipping the phase now\n", ring->name)); | |
3515 | ring->current_phase = ring->current_phase ? | |
3516 | 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT; | |
3517 | } | |
3518 | ||
3519 | alloced = i; | |
3520 | } | |
3521 | ||
3522 | /* update ring's WR index and ring doorbell to dongle */ | |
3523 | if (alloced > 0) { | |
3524 | unsigned long flags1; | |
3525 | DHD_GENERAL_LOCK(dhd, flags1); | |
3526 | dhd_prot_ring_write_complete(dhd, ring, msg_start, alloced); | |
3527 | DHD_GENERAL_UNLOCK(dhd, flags1); | |
3528 | } | |
3529 | ||
3530 | DHD_SPIN_UNLOCK(prot->rx_lock, flags); | |
3531 | ||
3532 | cleanup: | |
3533 | for (i = alloced; i < count; i++) { | |
3534 | p = pktbuf[i]; | |
3535 | pa = pktbuf_pa[i]; | |
3536 | ||
3537 | if (SECURE_DMA_ENAB(dhd->osh)) | |
3538 | SECURE_DMA_UNMAP(dhd->osh, pa, pktlen[i], DMA_RX, 0, | |
3539 | DHD_DMAH_NULL, ring->dma_buf.secdma, 0); | |
3540 | else | |
3541 | DMA_UNMAP(dhd->osh, pa, pktlen[i], DMA_RX, 0, DHD_DMAH_NULL); | |
3542 | PKTFREE(dhd->osh, p, FALSE); | |
3543 | } | |
3544 | ||
3545 | MFREE(dhd->osh, lcl_buf, lcl_buf_size); | |
3546 | #ifdef PCIE_INB_DW | |
3547 | dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); | |
3548 | #endif | |
3549 | return alloced; | |
3550 | } /* dhd_prot_rxbufpost */ | |
3551 | ||
3552 | static int | |
3553 | dhd_prot_infobufpost(dhd_pub_t *dhd) | |
3554 | { | |
3555 | unsigned long flags; | |
3556 | uint32 pktid; | |
3557 | dhd_prot_t *prot = dhd->prot; | |
3558 | msgbuf_ring_t *ring = prot->h2dring_info_subn; | |
3559 | uint16 alloced = 0; | |
3560 | uint16 pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ; | |
3561 | uint32 pktlen; | |
3562 | info_buf_post_msg_t *infobuf_post; | |
3563 | uint8 *infobuf_post_tmp; | |
3564 | void *p; | |
3565 | void* msg_start; | |
3566 | uint8 i = 0; | |
3567 | dmaaddr_t pa; | |
3568 | int16 count; | |
3569 | ||
3570 | if (ring == NULL) | |
3571 | return 0; | |
3572 | ||
3573 | if (ring->inited != TRUE) | |
3574 | return 0; | |
3575 | if (prot->max_infobufpost == 0) | |
3576 | return 0; | |
3577 | ||
3578 | count = prot->max_infobufpost - prot->infobufpost; | |
3579 | ||
3580 | if (count <= 0) { | |
3581 | DHD_INFO(("%s: Cannot post more than max info resp buffers\n", | |
3582 | __FUNCTION__)); | |
3583 | return 0; | |
3584 | } | |
3585 | ||
3586 | #ifdef PCIE_INB_DW | |
3587 | if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) | |
3588 | return BCME_ERROR; | |
3589 | #endif /* PCIE_INB_DW */ | |
3590 | ||
3591 | DHD_GENERAL_LOCK(dhd, flags); | |
3592 | /* Claim space for exactly 'count' no of messages, for mitigation purpose */ | |
3593 | msg_start = (void *) dhd_prot_alloc_ring_space(dhd, ring, count, &alloced, FALSE); | |
3594 | DHD_GENERAL_UNLOCK(dhd, flags); | |
3595 | ||
3596 | if (msg_start == NULL) { | |
3597 | DHD_INFO(("%s:%d: infobufpost Msgbuf Not available\n", __FUNCTION__, __LINE__)); | |
3598 | #ifdef PCIE_INB_DW | |
3599 | dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); | |
3600 | #endif | |
3601 | return -1; | |
3602 | } | |
3603 | ||
3604 | /* if msg_start != NULL, we should have alloced space for atleast 1 item */ | |
3605 | ASSERT(alloced > 0); | |
3606 | ||
3607 | infobuf_post_tmp = (uint8*) msg_start; | |
3608 | ||
3609 | /* loop through each allocated message in the host ring */ | |
3610 | for (i = 0; i < alloced; i++) { | |
3611 | infobuf_post = (info_buf_post_msg_t *) infobuf_post_tmp; | |
3612 | /* Create a rx buffer */ | |
3613 | #ifdef DHD_USE_STATIC_CTRLBUF | |
3614 | p = PKTGET_STATIC(dhd->osh, pktsz, FALSE); | |
3615 | #else | |
3616 | p = PKTGET(dhd->osh, pktsz, FALSE); | |
3617 | #endif /* DHD_USE_STATIC_CTRLBUF */ | |
3618 | if (p == NULL) { | |
3619 | DHD_ERROR(("%s:%d: PKTGET for infobuf failed\n", __FUNCTION__, __LINE__)); | |
3620 | dhd->rx_pktgetfail++; | |
3621 | break; | |
3622 | } | |
3623 | pktlen = PKTLEN(dhd->osh, p); | |
3624 | if (SECURE_DMA_ENAB(dhd->osh)) { | |
3625 | pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, | |
3626 | DMA_RX, p, 0, ring->dma_buf.secdma, 0); | |
3627 | } | |
3628 | #ifndef BCM_SECURE_DMA | |
3629 | else | |
3630 | pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0); | |
3631 | #endif /* #ifndef BCM_SECURE_DMA */ | |
3632 | if (PHYSADDRISZERO(pa)) { | |
3633 | if (SECURE_DMA_ENAB(dhd->osh)) { | |
3634 | SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL, | |
3635 | ring->dma_buf.secdma, 0); | |
3636 | } | |
3637 | else | |
3638 | DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL); | |
3639 | #ifdef DHD_USE_STATIC_CTRLBUF | |
3640 | PKTFREE_STATIC(dhd->osh, p, FALSE); | |
3641 | #else | |
3642 | PKTFREE(dhd->osh, p, FALSE); | |
3643 | #endif /* DHD_USE_STATIC_CTRLBUF */ | |
3644 | DHD_ERROR(("Invalid phyaddr 0\n")); | |
3645 | ASSERT(0); | |
3646 | break; | |
3647 | } | |
3648 | ||
3649 | pktlen = PKTLEN(dhd->osh, p); | |
3650 | ||
3651 | /* Common msg header */ | |
3652 | infobuf_post->cmn_hdr.msg_type = MSG_TYPE_INFO_BUF_POST; | |
3653 | infobuf_post->cmn_hdr.if_id = 0; | |
3654 | infobuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO; | |
3655 | infobuf_post->cmn_hdr.flags = ring->current_phase; | |
3656 | ring->seqnum++; | |
3657 | ||
3658 | #if defined(DHD_PCIE_PKTID) | |
3659 | /* get the lock before calling DHD_NATIVE_TO_PKTID */ | |
3660 | DHD_GENERAL_LOCK(dhd, flags); | |
3661 | #endif /* DHD_PCIE_PKTID */ | |
3662 | ||
3663 | pktid = DHD_NATIVE_TO_PKTID(dhd, dhd->prot->pktid_ctrl_map, p, pa, | |
3664 | pktlen, DMA_RX, NULL, ring->dma_buf.secdma, PKTTYPE_INFO_RX); | |
3665 | ||
3666 | ||
3667 | #if defined(DHD_PCIE_PKTID) | |
3668 | /* free lock */ | |
3669 | DHD_GENERAL_UNLOCK(dhd, flags); | |
3670 | ||
3671 | if (pktid == DHD_PKTID_INVALID) { | |
3672 | if (SECURE_DMA_ENAB(dhd->osh)) { | |
3673 | DHD_GENERAL_LOCK(dhd, flags); | |
3674 | SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, 0, | |
3675 | ring->dma_buf.secdma, 0); | |
3676 | DHD_GENERAL_UNLOCK(dhd, flags); | |
3677 | } else | |
3678 | DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, 0); | |
3679 | ||
3680 | #ifdef DHD_USE_STATIC_CTRLBUF | |
3681 | PKTFREE_STATIC(dhd->osh, p, FALSE); | |
3682 | #else | |
3683 | PKTFREE(dhd->osh, p, FALSE); | |
3684 | #endif /* DHD_USE_STATIC_CTRLBUF */ | |
3685 | DHD_ERROR(("%s: Pktid pool depleted.\n", __FUNCTION__)); | |
3686 | break; | |
3687 | } | |
3688 | #endif /* DHD_PCIE_PKTID */ | |
3689 | ||
3690 | infobuf_post->host_buf_len = htol16((uint16)pktlen); | |
3691 | infobuf_post->host_buf_addr.high_addr = htol32(PHYSADDRHI(pa)); | |
3692 | infobuf_post->host_buf_addr.low_addr = htol32(PHYSADDRLO(pa)); | |
3693 | ||
3694 | #ifdef DHD_PKTID_AUDIT_RING | |
3695 | DHD_PKTID_AUDIT(dhd, prot->pktid_ctrl_map, pktid, DHD_DUPLICATE_ALLOC); | |
3696 | #endif /* DHD_PKTID_AUDIT_RING */ | |
3697 | ||
3698 | DHD_INFO(("ID %d, low_addr 0x%08x, high_addr 0x%08x\n", | |
3699 | infobuf_post->cmn_hdr.request_id, infobuf_post->host_buf_addr.low_addr, | |
3700 | infobuf_post->host_buf_addr.high_addr)); | |
3701 | ||
3702 | infobuf_post->cmn_hdr.request_id = htol32(pktid); | |
3703 | /* Move rxbuf_post_tmp to next item */ | |
3704 | infobuf_post_tmp = infobuf_post_tmp + ring->item_len; | |
3705 | } | |
3706 | ||
3707 | if (i < alloced) { | |
3708 | if (ring->wr < (alloced - i)) | |
3709 | ring->wr = ring->max_items - (alloced - i); | |
3710 | else | |
3711 | ring->wr -= (alloced - i); | |
3712 | ||
3713 | alloced = i; | |
3714 | if (alloced && ring->wr == 0) { | |
3715 | DHD_INFO(("%s: flipping the phase now\n", ring->name)); | |
3716 | ring->current_phase = ring->current_phase ? | |
3717 | 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT; | |
3718 | } | |
3719 | } | |
3720 | ||
3721 | /* Update the write pointer in TCM & ring bell */ | |
3722 | if (alloced > 0) { | |
3723 | prot->infobufpost += alloced; | |
3724 | DHD_INFO(("allocated %d buffers for info ring\n", alloced)); | |
3725 | DHD_GENERAL_LOCK(dhd, flags); | |
3726 | dhd_prot_ring_write_complete(dhd, ring, msg_start, alloced); | |
3727 | DHD_GENERAL_UNLOCK(dhd, flags); | |
3728 | } | |
3729 | #ifdef PCIE_INB_DW | |
3730 | dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); | |
3731 | #endif | |
3732 | return alloced; | |
3733 | } /* dhd_prot_infobufpost */ | |
3734 | ||
3735 | #ifdef IOCTLRESP_USE_CONSTMEM | |
3736 | static int | |
3737 | alloc_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf) | |
3738 | { | |
3739 | int err; | |
3740 | memset(retbuf, 0, sizeof(dhd_dma_buf_t)); | |
3741 | ||
3742 | if ((err = dhd_dma_buf_alloc(dhd, retbuf, IOCT_RETBUF_SIZE)) != BCME_OK) { | |
3743 | DHD_ERROR(("%s: dhd_dma_buf_alloc err %d\n", __FUNCTION__, err)); | |
3744 | ASSERT(0); | |
3745 | return BCME_NOMEM; | |
3746 | } | |
3747 | ||
3748 | return BCME_OK; | |
3749 | } | |
3750 | ||
3751 | static void | |
3752 | free_ioctl_return_buffer(dhd_pub_t *dhd, dhd_dma_buf_t *retbuf) | |
3753 | { | |
3754 | /* retbuf (declared on stack) not fully populated ... */ | |
3755 | if (retbuf->va) { | |
3756 | uint32 dma_pad; | |
3757 | dma_pad = (IOCT_RETBUF_SIZE % DHD_DMA_PAD) ? DHD_DMA_PAD : 0; | |
3758 | retbuf->len = IOCT_RETBUF_SIZE; | |
3759 | retbuf->_alloced = retbuf->len + dma_pad; | |
3760 | } | |
3761 | ||
3762 | dhd_dma_buf_free(dhd, retbuf); | |
3763 | return; | |
3764 | } | |
3765 | #endif /* IOCTLRESP_USE_CONSTMEM */ | |
3766 | ||
3767 | static int | |
3768 | dhd_prot_rxbufpost_ctrl(dhd_pub_t *dhd, uint8 msg_type) | |
3769 | { | |
3770 | void *p; | |
3771 | uint16 pktsz; | |
3772 | ioctl_resp_evt_buf_post_msg_t *rxbuf_post; | |
3773 | dmaaddr_t pa; | |
3774 | uint32 pktlen; | |
3775 | dhd_prot_t *prot = dhd->prot; | |
3776 | uint16 alloced = 0; | |
3777 | unsigned long flags; | |
3778 | dhd_dma_buf_t retbuf; | |
3779 | void *dmah = NULL; | |
3780 | uint32 pktid; | |
3781 | void *map_handle; | |
3782 | msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn; | |
3783 | bool non_ioctl_resp_buf = 0; | |
3784 | dhd_pkttype_t buf_type; | |
3785 | ||
3786 | if (dhd->busstate == DHD_BUS_DOWN) { | |
3787 | DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__)); | |
3788 | return -1; | |
3789 | } | |
3790 | memset(&retbuf, 0, sizeof(dhd_dma_buf_t)); | |
3791 | ||
3792 | if (msg_type == MSG_TYPE_IOCTLRESP_BUF_POST) | |
3793 | buf_type = PKTTYPE_IOCTL_RX; | |
3794 | else if (msg_type == MSG_TYPE_EVENT_BUF_POST) | |
3795 | buf_type = PKTTYPE_EVENT_RX; | |
3796 | else if (msg_type == MSG_TYPE_TIMSTAMP_BUFPOST) | |
3797 | buf_type = PKTTYPE_TSBUF_RX; | |
3798 | else { | |
3799 | DHD_ERROR(("invalid message type to be posted to Ctrl ring %d\n", msg_type)); | |
3800 | return -1; | |
3801 | } | |
3802 | ||
3803 | ||
3804 | if ((msg_type == MSG_TYPE_EVENT_BUF_POST) || (msg_type == MSG_TYPE_TIMSTAMP_BUFPOST)) | |
3805 | non_ioctl_resp_buf = TRUE; | |
3806 | else | |
3807 | non_ioctl_resp_buf = FALSE; | |
3808 | ||
3809 | if (non_ioctl_resp_buf) { | |
3810 | /* Allocate packet for not ioctl resp buffer post */ | |
3811 | pktsz = DHD_FLOWRING_RX_BUFPOST_PKTSZ; | |
3812 | } else { | |
3813 | /* Allocate packet for ctrl/ioctl buffer post */ | |
3814 | pktsz = DHD_FLOWRING_IOCTL_BUFPOST_PKTSZ; | |
3815 | } | |
3816 | ||
3817 | #ifdef IOCTLRESP_USE_CONSTMEM | |
3818 | if (!non_ioctl_resp_buf) { | |
3819 | if (alloc_ioctl_return_buffer(dhd, &retbuf) != BCME_OK) { | |
3820 | DHD_ERROR(("Could not allocate IOCTL response buffer\n")); | |
3821 | return -1; | |
3822 | } | |
3823 | ASSERT(retbuf.len == IOCT_RETBUF_SIZE); | |
3824 | p = retbuf.va; | |
3825 | pktlen = retbuf.len; | |
3826 | pa = retbuf.pa; | |
3827 | dmah = retbuf.dmah; | |
3828 | } else | |
3829 | #endif /* IOCTLRESP_USE_CONSTMEM */ | |
3830 | { | |
3831 | #ifdef DHD_USE_STATIC_CTRLBUF | |
3832 | p = PKTGET_STATIC(dhd->osh, pktsz, FALSE); | |
3833 | #else | |
3834 | p = PKTGET(dhd->osh, pktsz, FALSE); | |
3835 | #endif /* DHD_USE_STATIC_CTRLBUF */ | |
3836 | if (p == NULL) { | |
3837 | DHD_ERROR(("%s:%d: PKTGET for %s buf failed\n", | |
3838 | __FUNCTION__, __LINE__, non_ioctl_resp_buf ? | |
3839 | "EVENT" : "IOCTL RESP")); | |
3840 | dhd->rx_pktgetfail++; | |
3841 | return -1; | |
3842 | } | |
3843 | ||
3844 | pktlen = PKTLEN(dhd->osh, p); | |
3845 | ||
3846 | if (SECURE_DMA_ENAB(dhd->osh)) { | |
3847 | DHD_GENERAL_LOCK(dhd, flags); | |
3848 | pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, | |
3849 | DMA_RX, p, 0, ring->dma_buf.secdma, 0); | |
3850 | DHD_GENERAL_UNLOCK(dhd, flags); | |
3851 | } | |
3852 | #ifndef BCM_SECURE_DMA | |
3853 | else | |
3854 | pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, p), pktlen, DMA_RX, p, 0); | |
3855 | #endif /* #ifndef BCM_SECURE_DMA */ | |
3856 | ||
3857 | if (PHYSADDRISZERO(pa)) { | |
3858 | DHD_ERROR(("Invalid physaddr 0\n")); | |
3859 | ASSERT(0); | |
3860 | goto free_pkt_return; | |
3861 | } | |
3862 | ||
3863 | } | |
3864 | #ifdef PCIE_INB_DW | |
3865 | if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) | |
3866 | return BCME_ERROR; | |
3867 | #endif /* PCIE_INB_DW */ | |
3868 | ||
3869 | DHD_GENERAL_LOCK(dhd, flags); | |
3870 | ||
3871 | rxbuf_post = (ioctl_resp_evt_buf_post_msg_t *) | |
3872 | dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE); | |
3873 | ||
3874 | if (rxbuf_post == NULL) { | |
3875 | DHD_GENERAL_UNLOCK(dhd, flags); | |
3876 | DHD_ERROR(("%s:%d: Ctrl submit Msgbuf Not available to post buffer \n", | |
3877 | __FUNCTION__, __LINE__)); | |
3878 | ||
3879 | #ifdef IOCTLRESP_USE_CONSTMEM | |
3880 | if (non_ioctl_resp_buf) | |
3881 | #endif /* IOCTLRESP_USE_CONSTMEM */ | |
3882 | { | |
3883 | if (SECURE_DMA_ENAB(dhd->osh)) { | |
3884 | DHD_GENERAL_LOCK(dhd, flags); | |
3885 | SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL, | |
3886 | ring->dma_buf.secdma, 0); | |
3887 | DHD_GENERAL_UNLOCK(dhd, flags); | |
3888 | } else { | |
3889 | DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL); | |
3890 | } | |
3891 | } | |
3892 | goto free_pkt_return; | |
3893 | } | |
3894 | ||
3895 | /* CMN msg header */ | |
3896 | rxbuf_post->cmn_hdr.msg_type = msg_type; | |
3897 | ||
3898 | #ifdef IOCTLRESP_USE_CONSTMEM | |
3899 | if (!non_ioctl_resp_buf) { | |
3900 | map_handle = dhd->prot->pktid_map_handle_ioctl; | |
3901 | pktid = DHD_NATIVE_TO_PKTID(dhd, map_handle, p, pa, pktlen, DMA_RX, dmah, | |
3902 | ring->dma_buf.secdma, buf_type); | |
3903 | } else | |
3904 | #endif /* IOCTLRESP_USE_CONSTMEM */ | |
3905 | { | |
3906 | map_handle = dhd->prot->pktid_ctrl_map; | |
3907 | pktid = DHD_NATIVE_TO_PKTID(dhd, map_handle, | |
3908 | p, pa, pktlen, DMA_RX, dmah, ring->dma_buf.secdma, | |
3909 | buf_type); | |
3910 | } | |
3911 | ||
3912 | if (pktid == DHD_PKTID_INVALID) { | |
3913 | if (ring->wr == 0) { | |
3914 | ring->wr = ring->max_items - 1; | |
3915 | } else { | |
3916 | ring->wr--; | |
3917 | if (ring->wr == 0) { | |
3918 | ring->current_phase = ring->current_phase ? 0 : | |
3919 | BCMPCIE_CMNHDR_PHASE_BIT_INIT; | |
3920 | } | |
3921 | } | |
3922 | DHD_GENERAL_UNLOCK(dhd, flags); | |
3923 | DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL); | |
3924 | DHD_ERROR(("%s: Pktid pool depleted.\n", __FUNCTION__)); | |
3925 | goto free_pkt_return; | |
3926 | } | |
3927 | ||
3928 | #ifdef DHD_PKTID_AUDIT_RING | |
3929 | DHD_PKTID_AUDIT(dhd, map_handle, pktid, DHD_DUPLICATE_ALLOC); | |
3930 | #endif /* DHD_PKTID_AUDIT_RING */ | |
3931 | ||
3932 | rxbuf_post->cmn_hdr.request_id = htol32(pktid); | |
3933 | rxbuf_post->cmn_hdr.if_id = 0; | |
3934 | rxbuf_post->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO; | |
3935 | ring->seqnum++; | |
3936 | rxbuf_post->cmn_hdr.flags = ring->current_phase; | |
3937 | ||
3938 | #if defined(DHD_PCIE_PKTID) | |
3939 | if (rxbuf_post->cmn_hdr.request_id == DHD_PKTID_INVALID) { | |
3940 | if (ring->wr == 0) { | |
3941 | ring->wr = ring->max_items - 1; | |
3942 | } else { | |
3943 | if (ring->wr == 0) { | |
3944 | ring->current_phase = ring->current_phase ? 0 : | |
3945 | BCMPCIE_CMNHDR_PHASE_BIT_INIT; | |
3946 | } | |
3947 | } | |
3948 | DHD_GENERAL_UNLOCK(dhd, flags); | |
3949 | #ifdef IOCTLRESP_USE_CONSTMEM | |
3950 | if (non_ioctl_resp_buf) | |
3951 | #endif /* IOCTLRESP_USE_CONSTMEM */ | |
3952 | { | |
3953 | if (SECURE_DMA_ENAB(dhd->osh)) { | |
3954 | DHD_GENERAL_LOCK(dhd, flags); | |
3955 | SECURE_DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL, | |
3956 | ring->dma_buf.secdma, 0); | |
3957 | DHD_GENERAL_UNLOCK(dhd, flags); | |
3958 | } else | |
3959 | DMA_UNMAP(dhd->osh, pa, pktlen, DMA_RX, 0, DHD_DMAH_NULL); | |
3960 | } | |
3961 | goto free_pkt_return; | |
3962 | } | |
3963 | #endif /* DHD_PCIE_PKTID */ | |
3964 | ||
3965 | #ifndef IOCTLRESP_USE_CONSTMEM | |
3966 | rxbuf_post->host_buf_len = htol16((uint16)PKTLEN(dhd->osh, p)); | |
3967 | #else | |
3968 | rxbuf_post->host_buf_len = htol16((uint16)pktlen); | |
3969 | #endif /* IOCTLRESP_USE_CONSTMEM */ | |
3970 | rxbuf_post->host_buf_addr.high_addr = htol32(PHYSADDRHI(pa)); | |
3971 | rxbuf_post->host_buf_addr.low_addr = htol32(PHYSADDRLO(pa)); | |
3972 | ||
3973 | /* update ring's WR index and ring doorbell to dongle */ | |
3974 | dhd_prot_ring_write_complete(dhd, ring, rxbuf_post, 1); | |
3975 | DHD_GENERAL_UNLOCK(dhd, flags); | |
3976 | ||
3977 | #ifdef PCIE_INB_DW | |
3978 | dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); | |
3979 | #endif | |
3980 | ||
3981 | return 1; | |
3982 | ||
3983 | free_pkt_return: | |
3984 | #ifdef IOCTLRESP_USE_CONSTMEM | |
3985 | if (!non_ioctl_resp_buf) { | |
3986 | free_ioctl_return_buffer(dhd, &retbuf); | |
3987 | } else | |
3988 | #endif | |
3989 | { | |
3990 | dhd_prot_packet_free(dhd, p, buf_type, FALSE); | |
3991 | } | |
3992 | ||
3993 | #ifdef PCIE_INB_DW | |
3994 | dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); | |
3995 | #endif | |
3996 | ||
3997 | return -1; | |
3998 | } /* dhd_prot_rxbufpost_ctrl */ | |
3999 | ||
4000 | static uint16 | |
4001 | dhd_msgbuf_rxbuf_post_ctrlpath(dhd_pub_t *dhd, uint8 msg_type, uint32 max_to_post) | |
4002 | { | |
4003 | uint32 i = 0; | |
4004 | int32 ret_val; | |
4005 | ||
4006 | DHD_INFO(("max to post %d, event %d \n", max_to_post, msg_type)); | |
4007 | ||
4008 | if (dhd->busstate == DHD_BUS_DOWN) { | |
4009 | DHD_ERROR(("%s: bus is already down.\n", __FUNCTION__)); | |
4010 | return 0; | |
4011 | } | |
4012 | ||
4013 | while (i < max_to_post) { | |
4014 | ret_val = dhd_prot_rxbufpost_ctrl(dhd, msg_type); | |
4015 | if (ret_val < 0) | |
4016 | break; | |
4017 | i++; | |
4018 | } | |
4019 | DHD_INFO(("posted %d buffers of type %d\n", i, msg_type)); | |
4020 | return (uint16)i; | |
4021 | } | |
4022 | ||
4023 | static void | |
4024 | dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd_pub_t *dhd) | |
4025 | { | |
4026 | dhd_prot_t *prot = dhd->prot; | |
4027 | int max_to_post; | |
4028 | ||
4029 | DHD_INFO(("ioctl resp buf post\n")); | |
4030 | max_to_post = prot->max_ioctlrespbufpost - prot->cur_ioctlresp_bufs_posted; | |
4031 | if (max_to_post <= 0) { | |
4032 | DHD_INFO(("%s: Cannot post more than max IOCTL resp buffers\n", | |
4033 | __FUNCTION__)); | |
4034 | return; | |
4035 | } | |
4036 | prot->cur_ioctlresp_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd, | |
4037 | MSG_TYPE_IOCTLRESP_BUF_POST, max_to_post); | |
4038 | } | |
4039 | ||
4040 | static void | |
4041 | dhd_msgbuf_rxbuf_post_event_bufs(dhd_pub_t *dhd) | |
4042 | { | |
4043 | dhd_prot_t *prot = dhd->prot; | |
4044 | int max_to_post; | |
4045 | ||
4046 | max_to_post = prot->max_eventbufpost - prot->cur_event_bufs_posted; | |
4047 | if (max_to_post <= 0) { | |
4048 | DHD_ERROR(("%s: Cannot post more than max event buffers\n", | |
4049 | __FUNCTION__)); | |
4050 | return; | |
4051 | } | |
4052 | prot->cur_event_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd, | |
4053 | MSG_TYPE_EVENT_BUF_POST, max_to_post); | |
4054 | } | |
4055 | ||
4056 | static int | |
4057 | dhd_msgbuf_rxbuf_post_ts_bufs(dhd_pub_t *dhd) | |
4058 | { | |
4059 | #ifdef DHD_TIMESYNC | |
4060 | dhd_prot_t *prot = dhd->prot; | |
4061 | int max_to_post; | |
4062 | ||
4063 | if (prot->active_ipc_version < 7) { | |
4064 | DHD_ERROR(("no ts buffers to device ipc rev is %d, needs to be atleast 7\n", | |
4065 | prot->active_ipc_version)); | |
4066 | return 0; | |
4067 | } | |
4068 | ||
4069 | max_to_post = prot->max_tsbufpost - prot->cur_ts_bufs_posted; | |
4070 | if (max_to_post <= 0) { | |
4071 | DHD_INFO(("%s: Cannot post more than max ts buffers\n", | |
4072 | __FUNCTION__)); | |
4073 | return 0; | |
4074 | } | |
4075 | ||
4076 | prot->cur_ts_bufs_posted += dhd_msgbuf_rxbuf_post_ctrlpath(dhd, | |
4077 | MSG_TYPE_TIMSTAMP_BUFPOST, max_to_post); | |
4078 | #endif /* DHD_TIMESYNC */ | |
4079 | return 0; | |
4080 | } | |
4081 | ||
4082 | bool BCMFASTPATH | |
4083 | dhd_prot_process_msgbuf_infocpl(dhd_pub_t *dhd, uint bound) | |
4084 | { | |
4085 | dhd_prot_t *prot = dhd->prot; | |
4086 | bool more = TRUE; | |
4087 | uint n = 0; | |
4088 | msgbuf_ring_t *ring = prot->d2hring_info_cpln; | |
4089 | ||
4090 | if (ring == NULL) | |
4091 | return FALSE; | |
4092 | if (ring->inited != TRUE) | |
4093 | return FALSE; | |
4094 | ||
4095 | /* Process all the messages - DTOH direction */ | |
4096 | while (!dhd_is_device_removed(dhd)) { | |
4097 | uint8 *msg_addr; | |
4098 | uint32 msg_len; | |
4099 | ||
4100 | if (dhd->hang_was_sent) { | |
4101 | more = FALSE; | |
4102 | break; | |
4103 | } | |
4104 | ||
4105 | /* Get the message from ring */ | |
4106 | msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len); | |
4107 | if (msg_addr == NULL) { | |
4108 | more = FALSE; | |
4109 | break; | |
4110 | } | |
4111 | ||
4112 | /* Prefetch data to populate the cache */ | |
4113 | OSL_PREFETCH(msg_addr); | |
4114 | ||
4115 | if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) { | |
4116 | DHD_ERROR(("%s: Error at process rxpl msgbuf of len %d\n", | |
4117 | __FUNCTION__, msg_len)); | |
4118 | } | |
4119 | ||
4120 | /* Update read pointer */ | |
4121 | dhd_prot_upd_read_idx(dhd, ring); | |
4122 | ||
4123 | /* After batch processing, check RX bound */ | |
4124 | n += msg_len / ring->item_len; | |
4125 | if (n >= bound) { | |
4126 | break; | |
4127 | } | |
4128 | } | |
4129 | ||
4130 | return more; | |
4131 | } | |
4132 | ||
4133 | /** called when DHD needs to check for 'receive complete' messages from the dongle */ | |
4134 | bool BCMFASTPATH | |
4135 | dhd_prot_process_msgbuf_rxcpl(dhd_pub_t *dhd, uint bound) | |
4136 | { | |
4137 | bool more = FALSE; | |
4138 | uint n = 0; | |
4139 | dhd_prot_t *prot = dhd->prot; | |
4140 | msgbuf_ring_t *ring = &prot->d2hring_rx_cpln; | |
4141 | uint16 item_len = ring->item_len; | |
4142 | host_rxbuf_cmpl_t *msg = NULL; | |
4143 | uint8 *msg_addr; | |
4144 | uint32 msg_len; | |
4145 | uint16 pkt_cnt, pkt_cnt_newidx; | |
4146 | unsigned long flags; | |
4147 | dmaaddr_t pa; | |
4148 | uint32 len; | |
4149 | void *dmah; | |
4150 | void *secdma; | |
4151 | int ifidx = 0, if_newidx = 0; | |
4152 | void *pkt, *pktqhead = NULL, *prevpkt = NULL, *pkt_newidx, *nextpkt; | |
4153 | uint32 pktid; | |
4154 | int i; | |
4155 | uint8 sync; | |
4156 | ||
4157 | #ifdef DHD_WAKE_STATUS | |
4158 | int pkt_wake = bcmpcie_set_get_wake(dhd->bus, 0); | |
4159 | #endif | |
4160 | ||
4161 | while (1) { | |
4162 | if (dhd_is_device_removed(dhd)) | |
4163 | break; | |
4164 | ||
4165 | if (dhd->hang_was_sent) | |
4166 | break; | |
4167 | ||
4168 | pkt_cnt = 0; | |
4169 | pktqhead = pkt_newidx = NULL; | |
4170 | pkt_cnt_newidx = 0; | |
4171 | ||
4172 | DHD_SPIN_LOCK(prot->rx_lock, flags); | |
4173 | ||
4174 | /* Get the address of the next message to be read from ring */ | |
4175 | msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len); | |
4176 | if (msg_addr == NULL) { | |
4177 | DHD_SPIN_UNLOCK(prot->rx_lock, flags); | |
4178 | break; | |
4179 | } | |
4180 | ||
4181 | while (msg_len > 0) { | |
4182 | msg = (host_rxbuf_cmpl_t *)msg_addr; | |
4183 | ||
4184 | /* Wait until DMA completes, then fetch msg_type */ | |
4185 | sync = prot->d2h_sync_cb(dhd, ring, &msg->cmn_hdr, item_len); | |
4186 | /* | |
4187 | * Update the curr_rd to the current index in the ring, from where | |
4188 | * the work item is fetched. This way if the fetched work item | |
4189 | * fails in LIVELOCK, we can print the exact read index in the ring | |
4190 | * that shows up the corrupted work item. | |
4191 | */ | |
4192 | if ((ring->curr_rd + 1) >= ring->max_items) { | |
4193 | ring->curr_rd = 0; | |
4194 | } else { | |
4195 | ring->curr_rd += 1; | |
4196 | } | |
4197 | ||
4198 | if (!sync) { | |
4199 | msg_len -= item_len; | |
4200 | msg_addr += item_len; | |
4201 | continue; | |
4202 | } | |
4203 | ||
4204 | pktid = ltoh32(msg->cmn_hdr.request_id); | |
4205 | ||
4206 | #ifdef DHD_PKTID_AUDIT_RING | |
4207 | DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_rx_map, pktid, | |
4208 | DHD_DUPLICATE_FREE, msg, D2HRING_RXCMPLT_ITEMSIZE); | |
4209 | #endif /* DHD_PKTID_AUDIT_RING */ | |
4210 | ||
4211 | pkt = DHD_PKTID_TO_NATIVE(dhd, prot->pktid_rx_map, pktid, pa, | |
4212 | len, dmah, secdma, PKTTYPE_DATA_RX); | |
4213 | if (!pkt) { | |
4214 | msg_len -= item_len; | |
4215 | msg_addr += item_len; | |
4216 | continue; | |
4217 | } | |
4218 | ||
4219 | if (SECURE_DMA_ENAB(dhd->osh)) | |
4220 | SECURE_DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, | |
4221 | dmah, secdma, 0); | |
4222 | else | |
4223 | DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah); | |
4224 | ||
4225 | DHD_INFO(("id 0x%04x, offset %d, len %d, idx %d, phase 0x%02x, " | |
4226 | "pktdata %p, metalen %d\n", | |
4227 | ltoh32(msg->cmn_hdr.request_id), | |
4228 | ltoh16(msg->data_offset), | |
4229 | ltoh16(msg->data_len), msg->cmn_hdr.if_id, | |
4230 | msg->cmn_hdr.flags, PKTDATA(dhd->osh, pkt), | |
4231 | ltoh16(msg->metadata_len))); | |
4232 | ||
4233 | pkt_cnt++; | |
4234 | msg_len -= item_len; | |
4235 | msg_addr += item_len; | |
4236 | ||
4237 | #if DHD_DBG_SHOW_METADATA | |
4238 | if (prot->metadata_dbg && prot->rx_metadata_offset && | |
4239 | msg->metadata_len) { | |
4240 | uchar *ptr; | |
4241 | ptr = PKTDATA(dhd->osh, pkt) - (prot->rx_metadata_offset); | |
4242 | /* header followed by data */ | |
4243 | bcm_print_bytes("rxmetadata", ptr, msg->metadata_len); | |
4244 | dhd_prot_print_metadata(dhd, ptr, msg->metadata_len); | |
4245 | } | |
4246 | #endif /* DHD_DBG_SHOW_METADATA */ | |
4247 | ||
4248 | /* data_offset from buf start */ | |
4249 | if (ltoh16(msg->data_offset)) { | |
4250 | /* data offset given from dongle after split rx */ | |
4251 | PKTPULL(dhd->osh, pkt, ltoh16(msg->data_offset)); | |
4252 | } | |
4253 | else if (prot->rx_dataoffset) { | |
4254 | /* DMA RX offset updated through shared area */ | |
4255 | PKTPULL(dhd->osh, pkt, prot->rx_dataoffset); | |
4256 | } | |
4257 | /* Actual length of the packet */ | |
4258 | PKTSETLEN(dhd->osh, pkt, ltoh16(msg->data_len)); | |
4259 | #if defined(WL_MONITOR) | |
4260 | if (dhd_monitor_enabled(dhd, ifidx) && | |
4261 | (msg->flags & BCMPCIE_PKT_FLAGS_FRAME_802_11)) { | |
4262 | dhd_rx_mon_pkt(dhd, msg, pkt, ifidx); | |
4263 | continue; | |
4264 | } | |
4265 | #endif | |
4266 | ||
4267 | if (!pktqhead) { | |
4268 | pktqhead = prevpkt = pkt; | |
4269 | ifidx = msg->cmn_hdr.if_id; | |
4270 | } else { | |
4271 | if (ifidx != msg->cmn_hdr.if_id) { | |
4272 | pkt_newidx = pkt; | |
4273 | if_newidx = msg->cmn_hdr.if_id; | |
4274 | pkt_cnt--; | |
4275 | pkt_cnt_newidx = 1; | |
4276 | break; | |
4277 | } else { | |
4278 | PKTSETNEXT(dhd->osh, prevpkt, pkt); | |
4279 | prevpkt = pkt; | |
4280 | } | |
4281 | } | |
4282 | ||
4283 | #ifdef DHD_TIMESYNC | |
4284 | if (dhd->prot->rx_ts_log_enabled) { | |
4285 | ts_timestamp_t *ts = (ts_timestamp_t *)&msg->ts; | |
4286 | dhd_timesync_log_rx_timestamp(dhd->ts, ifidx, ts->low, ts->high); | |
4287 | } | |
4288 | #endif /* DHD_TIMESYNC */ | |
4289 | } | |
4290 | ||
4291 | /* roll back read pointer for unprocessed message */ | |
4292 | if (msg_len > 0) { | |
4293 | if (ring->rd < msg_len / item_len) | |
4294 | ring->rd = ring->max_items - msg_len / item_len; | |
4295 | else | |
4296 | ring->rd -= msg_len / item_len; | |
4297 | } | |
4298 | ||
4299 | /* Update read pointer */ | |
4300 | dhd_prot_upd_read_idx(dhd, ring); | |
4301 | ||
4302 | DHD_SPIN_UNLOCK(prot->rx_lock, flags); | |
4303 | ||
4304 | pkt = pktqhead; | |
4305 | for (i = 0; pkt && i < pkt_cnt; i++, pkt = nextpkt) { | |
4306 | nextpkt = PKTNEXT(dhd->osh, pkt); | |
4307 | PKTSETNEXT(dhd->osh, pkt, NULL); | |
4308 | #ifdef DHD_LB_RXP | |
4309 | dhd_lb_rx_pkt_enqueue(dhd, pkt, ifidx); | |
4310 | #elif defined(DHD_RX_CHAINING) | |
4311 | dhd_rxchain_frame(dhd, pkt, ifidx); | |
4312 | #else | |
4313 | #ifdef DHD_WAKE_STATUS | |
4314 | dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1, pkt_wake); | |
4315 | #else | |
4316 | dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1); | |
4317 | #endif /* DHD_WAKE_STATUS */ | |
4318 | #endif /* DHD_LB_RXP */ | |
4319 | } | |
4320 | ||
4321 | if (pkt_newidx) { | |
4322 | #ifdef DHD_LB_RXP | |
4323 | dhd_lb_rx_pkt_enqueue(dhd, pkt_newidx, if_newidx); | |
4324 | #elif defined(DHD_RX_CHAINING) | |
4325 | dhd_rxchain_frame(dhd, pkt_newidx, if_newidx); | |
4326 | #else | |
4327 | #ifdef DHD_WAKE_STATUS | |
4328 | dhd_bus_rx_frame(dhd->bus, pkt_newidx, if_newidx, 1, pkt_wake); | |
4329 | #else | |
4330 | dhd_bus_rx_frame(dhd->bus, pkt_newidx, if_newidx, 1); | |
4331 | #endif /* DHD_WAKE_STATUS */ | |
4332 | #endif /* DHD_LB_RXP */ | |
4333 | } | |
4334 | ||
4335 | pkt_cnt += pkt_cnt_newidx; | |
4336 | ||
4337 | /* Post another set of rxbufs to the device */ | |
4338 | dhd_prot_return_rxbuf(dhd, 0, pkt_cnt); | |
4339 | ||
4340 | /* After batch processing, check RX bound */ | |
4341 | n += pkt_cnt; | |
4342 | if (n >= bound) { | |
4343 | more = TRUE; | |
4344 | break; | |
4345 | } | |
4346 | } | |
4347 | ||
4348 | /* Call lb_dispatch only if packets are queued */ | |
4349 | if (n) { | |
4350 | DHD_LB_DISPATCH_RX_COMPL(dhd); | |
4351 | DHD_LB_DISPATCH_RX_PROCESS(dhd); | |
4352 | } | |
4353 | ||
4354 | return more; | |
4355 | } | |
4356 | ||
4357 | /** | |
4358 | * Hands transmit packets (with a caller provided flow_id) over to dongle territory (the flow ring) | |
4359 | */ | |
4360 | void | |
4361 | dhd_prot_update_txflowring(dhd_pub_t *dhd, uint16 flowid, void *msgring) | |
4362 | { | |
4363 | msgbuf_ring_t *ring = (msgbuf_ring_t *)msgring; | |
4364 | ||
4365 | if (ring == NULL) { | |
4366 | DHD_ERROR(("%s: NULL txflowring. exiting...\n", __FUNCTION__)); | |
4367 | return; | |
4368 | } | |
4369 | /* Update read pointer */ | |
4370 | if (dhd->dma_d2h_ring_upd_support) { | |
4371 | ring->rd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx); | |
4372 | } | |
4373 | ||
4374 | DHD_TRACE(("ringid %d flowid %d write %d read %d \n\n", | |
4375 | ring->idx, flowid, ring->wr, ring->rd)); | |
4376 | ||
4377 | /* Need more logic here, but for now use it directly */ | |
4378 | dhd_bus_schedule_queue(dhd->bus, flowid, TRUE); /* from queue to flowring */ | |
4379 | } | |
4380 | ||
4381 | /** called when DHD needs to check for 'transmit complete' messages from the dongle */ | |
4382 | bool BCMFASTPATH | |
4383 | dhd_prot_process_msgbuf_txcpl(dhd_pub_t *dhd, uint bound) | |
4384 | { | |
4385 | bool more = TRUE; | |
4386 | uint n = 0; | |
4387 | msgbuf_ring_t *ring = &dhd->prot->d2hring_tx_cpln; | |
4388 | ||
4389 | /* Process all the messages - DTOH direction */ | |
4390 | while (!dhd_is_device_removed(dhd)) { | |
4391 | uint8 *msg_addr; | |
4392 | uint32 msg_len; | |
4393 | ||
4394 | if (dhd->hang_was_sent) { | |
4395 | more = FALSE; | |
4396 | break; | |
4397 | } | |
4398 | ||
4399 | /* Get the address of the next message to be read from ring */ | |
4400 | msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len); | |
4401 | if (msg_addr == NULL) { | |
4402 | more = FALSE; | |
4403 | break; | |
4404 | } | |
4405 | ||
4406 | /* Prefetch data to populate the cache */ | |
4407 | OSL_PREFETCH(msg_addr); | |
4408 | ||
4409 | if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) { | |
4410 | DHD_ERROR(("%s: process %s msg addr %p len %d\n", | |
4411 | __FUNCTION__, ring->name, msg_addr, msg_len)); | |
4412 | } | |
4413 | ||
4414 | /* Write to dngl rd ptr */ | |
4415 | dhd_prot_upd_read_idx(dhd, ring); | |
4416 | ||
4417 | /* After batch processing, check bound */ | |
4418 | n += msg_len / ring->item_len; | |
4419 | if (n >= bound) { | |
4420 | break; | |
4421 | } | |
4422 | } | |
4423 | ||
4424 | DHD_LB_DISPATCH_TX_COMPL(dhd); | |
4425 | ||
4426 | return more; | |
4427 | } | |
4428 | ||
4429 | int BCMFASTPATH | |
4430 | dhd_prot_process_trapbuf(dhd_pub_t *dhd) | |
4431 | { | |
4432 | uint32 data; | |
4433 | dhd_dma_buf_t *trap_addr = &dhd->prot->fw_trap_buf; | |
4434 | ||
4435 | /* Interrupts can come in before this struct | |
4436 | * has been initialized. | |
4437 | */ | |
4438 | if (trap_addr->va == NULL) { | |
4439 | DHD_ERROR(("%s: trap_addr->va is NULL\n", __FUNCTION__)); | |
4440 | return 0; | |
4441 | } | |
4442 | ||
4443 | OSL_CACHE_INV((void *)trap_addr->va, sizeof(uint32)); | |
4444 | data = *(uint32 *)(trap_addr->va); | |
4445 | ||
4446 | if (data & D2H_DEV_FWHALT) { | |
4447 | DHD_ERROR(("Firmware trapped and trap_data is 0x%04x\n", data)); | |
4448 | if (data & D2H_DEV_EXT_TRAP_DATA) | |
4449 | { | |
4450 | if (dhd->extended_trap_data) { | |
4451 | OSL_CACHE_INV((void *)trap_addr->va, | |
4452 | BCMPCIE_EXT_TRAP_DATA_MAXLEN); | |
4453 | memcpy(dhd->extended_trap_data, (uint32 *)trap_addr->va, | |
4454 | BCMPCIE_EXT_TRAP_DATA_MAXLEN); | |
4455 | } | |
4456 | DHD_ERROR(("Extended trap data available\n")); | |
4457 | } | |
4458 | return data; | |
4459 | } | |
4460 | return 0; | |
4461 | } | |
4462 | ||
4463 | /** called when DHD needs to check for 'ioctl complete' messages from the dongle */ | |
4464 | int BCMFASTPATH | |
4465 | dhd_prot_process_ctrlbuf(dhd_pub_t *dhd) | |
4466 | { | |
4467 | dhd_prot_t *prot = dhd->prot; | |
4468 | msgbuf_ring_t *ring = &prot->d2hring_ctrl_cpln; | |
4469 | ||
4470 | /* Process all the messages - DTOH direction */ | |
4471 | while (!dhd_is_device_removed(dhd)) { | |
4472 | uint8 *msg_addr; | |
4473 | uint32 msg_len; | |
4474 | ||
4475 | if (dhd->hang_was_sent) { | |
4476 | break; | |
4477 | } | |
4478 | ||
4479 | /* Get the address of the next message to be read from ring */ | |
4480 | msg_addr = dhd_prot_get_read_addr(dhd, ring, &msg_len); | |
4481 | if (msg_addr == NULL) { | |
4482 | break; | |
4483 | } | |
4484 | ||
4485 | /* Prefetch data to populate the cache */ | |
4486 | OSL_PREFETCH(msg_addr); | |
4487 | if (dhd_prot_process_msgtype(dhd, ring, msg_addr, msg_len) != BCME_OK) { | |
4488 | DHD_ERROR(("%s: process %s msg addr %p len %d\n", | |
4489 | __FUNCTION__, ring->name, msg_addr, msg_len)); | |
4490 | } | |
4491 | ||
4492 | /* Write to dngl rd ptr */ | |
4493 | dhd_prot_upd_read_idx(dhd, ring); | |
4494 | } | |
4495 | ||
4496 | return 0; | |
4497 | } | |
4498 | ||
4499 | /** | |
4500 | * Consume messages out of the D2H ring. Ensure that the message's DMA to host | |
4501 | * memory has completed, before invoking the message handler via a table lookup | |
4502 | * of the cmn_msg_hdr::msg_type. | |
4503 | */ | |
4504 | static int BCMFASTPATH | |
4505 | dhd_prot_process_msgtype(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint8 *buf, uint32 len) | |
4506 | { | |
4507 | uint32 buf_len = len; | |
4508 | uint16 item_len; | |
4509 | uint8 msg_type; | |
4510 | cmn_msg_hdr_t *msg = NULL; | |
4511 | int ret = BCME_OK; | |
4512 | ||
4513 | ASSERT(ring); | |
4514 | item_len = ring->item_len; | |
4515 | if (item_len == 0) { | |
4516 | DHD_ERROR(("%s: ringidx %d, item_len %d buf_len %d \n", | |
4517 | __FUNCTION__, ring->idx, item_len, buf_len)); | |
4518 | return BCME_ERROR; | |
4519 | } | |
4520 | ||
4521 | while (buf_len > 0) { | |
4522 | if (dhd->hang_was_sent) { | |
4523 | ret = BCME_ERROR; | |
4524 | goto done; | |
4525 | } | |
4526 | ||
4527 | msg = (cmn_msg_hdr_t *)buf; | |
4528 | ||
4529 | /* Wait until DMA completes, then fetch msg_type */ | |
4530 | msg_type = dhd->prot->d2h_sync_cb(dhd, ring, msg, item_len); | |
4531 | ||
4532 | /* | |
4533 | * Update the curr_rd to the current index in the ring, from where | |
4534 | * the work item is fetched. This way if the fetched work item | |
4535 | * fails in LIVELOCK, we can print the exact read index in the ring | |
4536 | * that shows up the corrupted work item. | |
4537 | */ | |
4538 | if ((ring->curr_rd + 1) >= ring->max_items) { | |
4539 | ring->curr_rd = 0; | |
4540 | } else { | |
4541 | ring->curr_rd += 1; | |
4542 | } | |
4543 | ||
4544 | /* Prefetch data to populate the cache */ | |
4545 | OSL_PREFETCH(buf + item_len); | |
4546 | ||
4547 | DHD_INFO(("msg_type %d item_len %d buf_len %d\n", | |
4548 | msg_type, item_len, buf_len)); | |
4549 | ||
4550 | if (msg_type == MSG_TYPE_LOOPBACK) { | |
4551 | bcm_print_bytes("LPBK RESP: ", (uint8 *)msg, item_len); | |
4552 | DHD_ERROR((" MSG_TYPE_LOOPBACK, len %d\n", item_len)); | |
4553 | } | |
4554 | ||
4555 | ASSERT(msg_type < DHD_PROT_FUNCS); | |
4556 | if (msg_type >= DHD_PROT_FUNCS) { | |
4557 | DHD_ERROR(("%s: msg_type %d, item_len %d buf_len %d\n", | |
4558 | __FUNCTION__, msg_type, item_len, buf_len)); | |
4559 | ret = BCME_ERROR; | |
4560 | goto done; | |
4561 | } | |
4562 | ||
4563 | if (table_lookup[msg_type]) { | |
4564 | table_lookup[msg_type](dhd, buf); | |
4565 | } | |
4566 | ||
4567 | if (buf_len < item_len) { | |
4568 | ret = BCME_ERROR; | |
4569 | goto done; | |
4570 | } | |
4571 | buf_len = buf_len - item_len; | |
4572 | buf = buf + item_len; | |
4573 | } | |
4574 | ||
4575 | done: | |
4576 | ||
4577 | #ifdef DHD_RX_CHAINING | |
4578 | dhd_rxchain_commit(dhd); | |
4579 | #endif | |
4580 | ||
4581 | return ret; | |
4582 | } /* dhd_prot_process_msgtype */ | |
4583 | ||
4584 | static void | |
4585 | dhd_prot_noop(dhd_pub_t *dhd, void *msg) | |
4586 | { | |
4587 | return; | |
4588 | } | |
4589 | ||
4590 | /** called on MSG_TYPE_RING_STATUS message received from dongle */ | |
4591 | static void | |
4592 | dhd_prot_ringstatus_process(dhd_pub_t *dhd, void *msg) | |
4593 | { | |
4594 | pcie_ring_status_t *ring_status = (pcie_ring_status_t *) msg; | |
4595 | uint32 request_id = ltoh32(ring_status->cmn_hdr.request_id); | |
4596 | uint16 status = ltoh16(ring_status->compl_hdr.status); | |
4597 | uint16 ring_id = ltoh16(ring_status->compl_hdr.flow_ring_id); | |
4598 | ||
4599 | DHD_ERROR(("ring status: request_id %d, status 0x%04x, flow ring %d, write_idx %d \n", | |
4600 | request_id, status, ring_id, ltoh16(ring_status->write_idx))); | |
4601 | ||
4602 | if (ltoh16(ring_status->compl_hdr.ring_id) != BCMPCIE_H2D_MSGRING_CONTROL_SUBMIT) | |
4603 | return; | |
4604 | if (status == BCMPCIE_BAD_PHASE) { | |
4605 | /* bad phase report from */ | |
4606 | DHD_ERROR(("Bad phase\n")); | |
4607 | } | |
4608 | if (status != BCMPCIE_BADOPTION) | |
4609 | return; | |
4610 | ||
4611 | if (request_id == DHD_H2D_DBGRING_REQ_PKTID) { | |
4612 | if (dhd->prot->h2dring_info_subn != NULL) { | |
4613 | if (dhd->prot->h2dring_info_subn->create_pending == TRUE) { | |
4614 | DHD_ERROR(("H2D ring create failed for info ring\n")); | |
4615 | dhd->prot->h2dring_info_subn->create_pending = FALSE; | |
4616 | } | |
4617 | else | |
4618 | DHD_ERROR(("ring create ID for a ring, create not pending\n")); | |
4619 | } else { | |
4620 | DHD_ERROR(("%s info submit ring doesn't exist\n", __FUNCTION__)); | |
4621 | } | |
4622 | } | |
4623 | else if (request_id == DHD_D2H_DBGRING_REQ_PKTID) { | |
4624 | if (dhd->prot->d2hring_info_cpln != NULL) { | |
4625 | if (dhd->prot->d2hring_info_cpln->create_pending == TRUE) { | |
4626 | DHD_ERROR(("D2H ring create failed for info ring\n")); | |
4627 | dhd->prot->d2hring_info_cpln->create_pending = FALSE; | |
4628 | } | |
4629 | else | |
4630 | DHD_ERROR(("ring create ID for info ring, create not pending\n")); | |
4631 | } else { | |
4632 | DHD_ERROR(("%s info cpl ring doesn't exist\n", __FUNCTION__)); | |
4633 | } | |
4634 | } | |
4635 | else { | |
4636 | DHD_ERROR(("don;t know how to pair with original request\n")); | |
4637 | } | |
4638 | /* How do we track this to pair it with ??? */ | |
4639 | return; | |
4640 | } | |
4641 | ||
4642 | /** called on MSG_TYPE_GEN_STATUS ('general status') message received from dongle */ | |
4643 | static void | |
4644 | dhd_prot_genstatus_process(dhd_pub_t *dhd, void *msg) | |
4645 | { | |
4646 | pcie_gen_status_t *gen_status = (pcie_gen_status_t *)msg; | |
4647 | DHD_ERROR(("ERROR: gen status: request_id %d, STATUS 0x%04x, flow ring %d \n", | |
4648 | gen_status->cmn_hdr.request_id, gen_status->compl_hdr.status, | |
4649 | gen_status->compl_hdr.flow_ring_id)); | |
4650 | ||
4651 | /* How do we track this to pair it with ??? */ | |
4652 | return; | |
4653 | } | |
4654 | ||
4655 | /** | |
4656 | * Called on MSG_TYPE_IOCTLPTR_REQ_ACK ('ioctl ack') message received from dongle, meaning that the | |
4657 | * dongle received the ioctl message in dongle memory. | |
4658 | */ | |
4659 | static void | |
4660 | dhd_prot_ioctack_process(dhd_pub_t *dhd, void *msg) | |
4661 | { | |
4662 | ioctl_req_ack_msg_t *ioct_ack = (ioctl_req_ack_msg_t *)msg; | |
4663 | unsigned long flags; | |
4664 | #ifdef DHD_PKTID_AUDIT_RING | |
4665 | uint32 pktid = ltoh32(ioct_ack->cmn_hdr.request_id); | |
4666 | ||
4667 | /* Skip audit for ADHD_IOCTL_REQ_PKTID = 0xFFFE */ | |
4668 | if (pktid != DHD_IOCTL_REQ_PKTID) { | |
4669 | #ifndef IOCTLRESP_USE_CONSTMEM | |
4670 | DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, pktid, | |
4671 | DHD_TEST_IS_ALLOC, msg, D2HRING_CTRL_CMPLT_ITEMSIZE); | |
4672 | #else | |
4673 | DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_map_handle_ioctl, pktid, | |
4674 | DHD_TEST_IS_ALLOC, msg, D2HRING_CTRL_CMPLT_ITEMSIZE); | |
4675 | #endif /* !IOCTLRESP_USE_CONSTMEM */ | |
4676 | } | |
4677 | #endif /* DHD_PKTID_AUDIT_RING */ | |
4678 | ||
4679 | DHD_GENERAL_LOCK(dhd, flags); | |
4680 | if ((dhd->prot->ioctl_state & MSGBUF_IOCTL_ACK_PENDING) && | |
4681 | (dhd->prot->ioctl_state & MSGBUF_IOCTL_RESP_PENDING)) { | |
4682 | dhd->prot->ioctl_state &= ~MSGBUF_IOCTL_ACK_PENDING; | |
4683 | } else { | |
4684 | DHD_ERROR(("%s: received ioctl ACK with state %02x trans_id = %d\n", | |
4685 | __FUNCTION__, dhd->prot->ioctl_state, dhd->prot->ioctl_trans_id)); | |
4686 | prhex("dhd_prot_ioctack_process:", | |
4687 | (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE); | |
4688 | } | |
4689 | DHD_GENERAL_UNLOCK(dhd, flags); | |
4690 | ||
4691 | DHD_CTL(("ioctl req ack: request_id %d, status 0x%04x, flow ring %d \n", | |
4692 | ioct_ack->cmn_hdr.request_id, ioct_ack->compl_hdr.status, | |
4693 | ioct_ack->compl_hdr.flow_ring_id)); | |
4694 | if (ioct_ack->compl_hdr.status != 0) { | |
4695 | DHD_ERROR(("got an error status for the ioctl request...need to handle that\n")); | |
4696 | } | |
4697 | #ifdef REPORT_FATAL_TIMEOUTS | |
4698 | else { | |
4699 | dhd_stop_bus_timer(dhd); | |
4700 | } | |
4701 | #endif /* REPORT_FATAL_TIMEOUTS */ | |
4702 | } | |
4703 | ||
4704 | /** called on MSG_TYPE_IOCTL_CMPLT message received from dongle */ | |
4705 | static void | |
4706 | dhd_prot_ioctcmplt_process(dhd_pub_t *dhd, void *msg) | |
4707 | { | |
4708 | dhd_prot_t *prot = dhd->prot; | |
4709 | uint32 pkt_id, xt_id; | |
4710 | ioctl_comp_resp_msg_t *ioct_resp = (ioctl_comp_resp_msg_t *)msg; | |
4711 | void *pkt; | |
4712 | unsigned long flags; | |
4713 | dhd_dma_buf_t retbuf; | |
4714 | #ifdef REPORT_FATAL_TIMEOUTS | |
4715 | uint16 dhd_xt_id; | |
4716 | #endif | |
4717 | ||
4718 | memset(&retbuf, 0, sizeof(dhd_dma_buf_t)); | |
4719 | ||
4720 | pkt_id = ltoh32(ioct_resp->cmn_hdr.request_id); | |
4721 | ||
4722 | #ifdef DHD_PKTID_AUDIT_RING | |
4723 | #ifndef IOCTLRESP_USE_CONSTMEM | |
4724 | DHD_PKTID_AUDIT_RING_DEBUG(dhd, prot->pktid_ctrl_map, pkt_id, | |
4725 | DHD_DUPLICATE_FREE, msg, D2HRING_CTRL_CMPLT_ITEMSIZE); | |
4726 | #else | |
4727 | DHD_PKTID_AUDIT_RING_DEBUG(dhd, prot->pktid_map_handle_ioctl, pkt_id, | |
4728 | DHD_DUPLICATE_FREE, msg, D2HRING_CTRL_CMPLT_ITEMSIZE); | |
4729 | #endif /* !IOCTLRESP_USE_CONSTMEM */ | |
4730 | #endif /* DHD_PKTID_AUDIT_RING */ | |
4731 | ||
4732 | DHD_GENERAL_LOCK(dhd, flags); | |
4733 | if ((prot->ioctl_state & MSGBUF_IOCTL_ACK_PENDING) || | |
4734 | !(prot->ioctl_state & MSGBUF_IOCTL_RESP_PENDING)) { | |
4735 | DHD_ERROR(("%s: received ioctl response with state %02x trans_id = %d\n", | |
4736 | __FUNCTION__, dhd->prot->ioctl_state, dhd->prot->ioctl_trans_id)); | |
4737 | prhex("dhd_prot_ioctcmplt_process:", | |
4738 | (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE); | |
4739 | DHD_GENERAL_UNLOCK(dhd, flags); | |
4740 | return; | |
4741 | } | |
4742 | ||
4743 | /* Clear Response pending bit */ | |
4744 | prot->ioctl_state &= ~MSGBUF_IOCTL_RESP_PENDING; | |
4745 | ||
4746 | #ifndef IOCTLRESP_USE_CONSTMEM | |
4747 | pkt = dhd_prot_packet_get(dhd, pkt_id, PKTTYPE_IOCTL_RX, TRUE); | |
4748 | #else | |
4749 | dhd_prot_ioctl_ret_buffer_get(dhd, pkt_id, &retbuf); | |
4750 | pkt = retbuf.va; | |
4751 | #endif /* !IOCTLRESP_USE_CONSTMEM */ | |
4752 | if (!pkt) { | |
4753 | DHD_GENERAL_UNLOCK(dhd, flags); | |
4754 | DHD_ERROR(("%s: received ioctl response with NULL pkt\n", __FUNCTION__)); | |
4755 | prhex("dhd_prot_ioctcmplt_process:", | |
4756 | (uchar *)msg, D2HRING_CTRL_CMPLT_ITEMSIZE); | |
4757 | return; | |
4758 | } | |
4759 | DHD_GENERAL_UNLOCK(dhd, flags); | |
4760 | ||
4761 | prot->ioctl_resplen = ltoh16(ioct_resp->resp_len); | |
4762 | prot->ioctl_status = ltoh16(ioct_resp->compl_hdr.status); | |
4763 | xt_id = ltoh16(ioct_resp->trans_id); | |
4764 | ||
4765 | if (xt_id != prot->ioctl_trans_id || prot->curr_ioctl_cmd != ioct_resp->cmd) { | |
4766 | DHD_ERROR(("%s: transaction id(%d %d) or cmd(%d %d) mismatch\n", | |
4767 | __FUNCTION__, xt_id, prot->ioctl_trans_id, | |
4768 | prot->curr_ioctl_cmd, ioct_resp->cmd)); | |
4769 | #ifdef REPORT_FATAL_TIMEOUTS | |
4770 | dhd_stop_cmd_timer(dhd); | |
4771 | #endif /* REPORT_FATAL_TIMEOUTS */ | |
4772 | dhd_wakeup_ioctl_event(dhd, IOCTL_RETURN_ON_ERROR); | |
4773 | dhd_prot_debug_info_print(dhd); | |
4774 | #ifdef DHD_FW_COREDUMP | |
4775 | if (dhd->memdump_enabled) { | |
4776 | /* collect core dump */ | |
4777 | dhd->memdump_type = DUMP_TYPE_TRANS_ID_MISMATCH; | |
4778 | dhd_bus_mem_dump(dhd); | |
4779 | } | |
4780 | #else | |
4781 | ASSERT(0); | |
4782 | #endif /* DHD_FW_COREDUMP */ | |
4783 | dhd_schedule_reset(dhd); | |
4784 | goto exit; | |
4785 | } | |
4786 | #ifdef REPORT_FATAL_TIMEOUTS | |
4787 | dhd_xt_id = dhd_get_request_id(dhd); | |
4788 | if (xt_id == dhd_xt_id) { | |
4789 | dhd_stop_cmd_timer(dhd); | |
4790 | } else { | |
4791 | DHD_ERROR(("%s: Cmd timer not stopped received xt_id %d stored xt_id %d", | |
4792 | __FUNCTION__, xt_id, dhd_xt_id)); | |
4793 | } | |
4794 | #endif /* REPORT_FATAL_TIMEOUTS */ | |
4795 | DHD_CTL(("IOCTL_COMPLETE: req_id %x transid %d status %x resplen %d\n", | |
4796 | pkt_id, xt_id, prot->ioctl_status, prot->ioctl_resplen)); | |
4797 | ||
4798 | if (prot->ioctl_resplen > 0) { | |
4799 | #ifndef IOCTLRESP_USE_CONSTMEM | |
4800 | bcopy(PKTDATA(dhd->osh, pkt), prot->retbuf.va, prot->ioctl_resplen); | |
4801 | #else | |
4802 | bcopy(pkt, prot->retbuf.va, prot->ioctl_resplen); | |
4803 | #endif /* !IOCTLRESP_USE_CONSTMEM */ | |
4804 | } | |
4805 | ||
4806 | /* wake up any dhd_os_ioctl_resp_wait() */ | |
4807 | dhd_wakeup_ioctl_event(dhd, IOCTL_RETURN_ON_SUCCESS); | |
4808 | ||
4809 | exit: | |
4810 | #ifndef IOCTLRESP_USE_CONSTMEM | |
4811 | dhd_prot_packet_free(dhd, pkt, | |
4812 | PKTTYPE_IOCTL_RX, FALSE); | |
4813 | #else | |
4814 | free_ioctl_return_buffer(dhd, &retbuf); | |
4815 | #endif /* !IOCTLRESP_USE_CONSTMEM */ | |
4816 | ||
4817 | /* Post another ioctl buf to the device */ | |
4818 | if (prot->cur_ioctlresp_bufs_posted > 0) { | |
4819 | prot->cur_ioctlresp_bufs_posted--; | |
4820 | } | |
4821 | ||
4822 | dhd_msgbuf_rxbuf_post_ioctlresp_bufs(dhd); | |
4823 | } | |
4824 | ||
4825 | /** called on MSG_TYPE_TX_STATUS message received from dongle */ | |
4826 | static void BCMFASTPATH | |
4827 | dhd_prot_txstatus_process(dhd_pub_t *dhd, void *msg) | |
4828 | { | |
4829 | dhd_prot_t *prot = dhd->prot; | |
4830 | host_txbuf_cmpl_t * txstatus; | |
4831 | unsigned long flags; | |
4832 | uint32 pktid; | |
4833 | void *pkt; | |
4834 | dmaaddr_t pa; | |
4835 | uint32 len; | |
4836 | void *dmah; | |
4837 | void *secdma; | |
4838 | bool pkt_fate; | |
4839 | #ifdef DEVICE_TX_STUCK_DETECT | |
4840 | flow_ring_node_t *flow_ring_node; | |
4841 | uint16 flowid; | |
4842 | #endif /* DEVICE_TX_STUCK_DETECT */ | |
4843 | ||
4844 | ||
4845 | txstatus = (host_txbuf_cmpl_t *)msg; | |
4846 | #ifdef DEVICE_TX_STUCK_DETECT | |
4847 | flowid = txstatus->compl_hdr.flow_ring_id; | |
4848 | flow_ring_node = DHD_FLOW_RING(dhd, flowid); | |
4849 | /** | |
4850 | * Since we got a completion message on this flowid, | |
4851 | * update tx_cmpl time stamp | |
4852 | */ | |
4853 | flow_ring_node->tx_cmpl = OSL_SYSUPTIME(); | |
4854 | #endif /* DEVICE_TX_STUCK_DETECT */ | |
4855 | ||
4856 | /* locks required to protect circular buffer accesses */ | |
4857 | DHD_GENERAL_LOCK(dhd, flags); | |
4858 | pktid = ltoh32(txstatus->cmn_hdr.request_id); | |
4859 | pkt_fate = TRUE; | |
4860 | ||
4861 | #ifdef DHD_PKTID_AUDIT_RING | |
4862 | DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_tx_map, pktid, | |
4863 | DHD_DUPLICATE_FREE, msg, D2HRING_TXCMPLT_ITEMSIZE); | |
4864 | #endif /* DHD_PKTID_AUDIT_RING */ | |
4865 | ||
4866 | DHD_INFO(("txstatus for pktid 0x%04x\n", pktid)); | |
4867 | if (prot->active_tx_count) { | |
4868 | prot->active_tx_count--; | |
4869 | ||
4870 | /* Release the Lock when no more tx packets are pending */ | |
4871 | if (prot->active_tx_count == 0) | |
4872 | DHD_TXFL_WAKE_UNLOCK(dhd); | |
4873 | } else { | |
4874 | DHD_ERROR(("Extra packets are freed\n")); | |
4875 | } | |
4876 | ||
4877 | ASSERT(pktid != 0); | |
4878 | #if defined(DHD_LB_TXC) && !defined(BCM_SECURE_DMA) | |
4879 | { | |
4880 | int elem_ix; | |
4881 | void **elem; | |
4882 | bcm_workq_t *workq; | |
4883 | dmaaddr_t pa; | |
4884 | uint32 pa_len; | |
4885 | ||
4886 | pkt = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_tx_map, | |
4887 | pktid, pa, pa_len, dmah, secdma, PKTTYPE_DATA_TX); | |
4888 | ||
4889 | workq = &prot->tx_compl_prod; | |
4890 | /* | |
4891 | * Produce the packet into the tx_compl workq for the tx compl tasklet | |
4892 | * to consume. | |
4893 | */ | |
4894 | OSL_PREFETCH(PKTTAG(pkt)); | |
4895 | ||
4896 | /* fetch next available slot in workq */ | |
4897 | elem_ix = bcm_ring_prod(WORKQ_RING(workq), DHD_LB_WORKQ_SZ); | |
4898 | ||
4899 | DHD_PKTTAG_SET_PA((dhd_pkttag_fr_t *)PKTTAG(pkt), pa); | |
4900 | DHD_PKTTAG_SET_PA_LEN((dhd_pkttag_fr_t *)PKTTAG(pkt), pa_len); | |
4901 | ||
4902 | if (elem_ix == BCM_RING_FULL) { | |
4903 | DHD_ERROR(("tx_compl_prod BCM_RING_FULL\n")); | |
4904 | goto workq_ring_full; | |
4905 | } | |
4906 | ||
4907 | elem = WORKQ_ELEMENT(void *, &prot->tx_compl_prod, elem_ix); | |
4908 | *elem = pkt; | |
4909 | ||
4910 | smp_wmb(); | |
4911 | ||
4912 | /* Sync WR index to consumer if the SYNC threshold has been reached */ | |
4913 | if (++prot->tx_compl_prod_sync >= DHD_LB_WORKQ_SYNC) { | |
4914 | bcm_workq_prod_sync(workq); | |
4915 | prot->tx_compl_prod_sync = 0; | |
4916 | } | |
4917 | ||
4918 | DHD_INFO(("%s: tx_compl_prod pkt<%p> sync<%d>\n", | |
4919 | __FUNCTION__, pkt, prot->tx_compl_prod_sync)); | |
4920 | ||
4921 | DHD_GENERAL_UNLOCK(dhd, flags); | |
4922 | ||
4923 | return; | |
4924 | } | |
4925 | ||
4926 | workq_ring_full: | |
4927 | ||
4928 | #endif /* !DHD_LB_TXC */ | |
4929 | ||
4930 | pkt = DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_tx_map, pktid, | |
4931 | pa, len, dmah, secdma, PKTTYPE_DATA_TX); | |
4932 | ||
4933 | if (pkt) { | |
4934 | if (SECURE_DMA_ENAB(dhd->osh)) { | |
4935 | int offset = 0; | |
4936 | BCM_REFERENCE(offset); | |
4937 | ||
4938 | if (dhd->prot->tx_metadata_offset) | |
4939 | offset = dhd->prot->tx_metadata_offset + ETHER_HDR_LEN; | |
4940 | SECURE_DMA_UNMAP(dhd->osh, (uint) pa, | |
4941 | (uint) dhd->prot->tx_metadata_offset, DMA_RX, 0, dmah, | |
4942 | secdma, offset); | |
4943 | } else | |
4944 | DMA_UNMAP(dhd->osh, pa, (uint) len, DMA_RX, 0, dmah); | |
4945 | #ifdef DBG_PKT_MON | |
4946 | if (dhd->d11_tx_status) { | |
4947 | uint16 tx_status; | |
4948 | ||
4949 | tx_status = ltoh16(txstatus->compl_hdr.status) & | |
4950 | WLFC_CTL_PKTFLAG_MASK; | |
4951 | pkt_fate = (tx_status == WLFC_CTL_PKTFLAG_DISCARD) ? TRUE : FALSE; | |
4952 | ||
4953 | DHD_DBG_PKT_MON_TX_STATUS(dhd, pkt, pktid, tx_status); | |
4954 | } | |
4955 | #endif /* DBG_PKT_MON */ | |
4956 | ||
4957 | #if defined(BCMPCIE) | |
4958 | dhd_txcomplete(dhd, pkt, pkt_fate); | |
4959 | #endif | |
4960 | ||
4961 | #if DHD_DBG_SHOW_METADATA | |
4962 | if (dhd->prot->metadata_dbg && | |
4963 | dhd->prot->tx_metadata_offset && txstatus->metadata_len) { | |
4964 | uchar *ptr; | |
4965 | /* The Ethernet header of TX frame was copied and removed. | |
4966 | * Here, move the data pointer forward by Ethernet header size. | |
4967 | */ | |
4968 | PKTPULL(dhd->osh, pkt, ETHER_HDR_LEN); | |
4969 | ptr = PKTDATA(dhd->osh, pkt) - (dhd->prot->tx_metadata_offset); | |
4970 | bcm_print_bytes("txmetadata", ptr, txstatus->metadata_len); | |
4971 | dhd_prot_print_metadata(dhd, ptr, txstatus->metadata_len); | |
4972 | } | |
4973 | #endif /* DHD_DBG_SHOW_METADATA */ | |
4974 | DHD_GENERAL_UNLOCK(dhd, flags); | |
4975 | PKTFREE(dhd->osh, pkt, TRUE); | |
4976 | DHD_GENERAL_LOCK(dhd, flags); | |
4977 | DHD_FLOWRING_TXSTATUS_CNT_UPDATE(dhd->bus, txstatus->compl_hdr.flow_ring_id, | |
4978 | txstatus->tx_status); | |
4979 | ||
4980 | #ifdef DHD_TIMESYNC | |
4981 | if (dhd->prot->tx_ts_log_enabled) { | |
4982 | ts_timestamp_t *ts = (ts_timestamp_t *)&(txstatus->ts); | |
4983 | dhd_timesync_log_tx_timestamp(dhd->ts, | |
4984 | txstatus->compl_hdr.flow_ring_id, | |
4985 | txstatus->cmn_hdr.if_id, | |
4986 | ts->low, ts->high); | |
4987 | } | |
4988 | #endif /* DHD_TIMESYNC */ | |
4989 | } | |
4990 | ||
4991 | DHD_GENERAL_UNLOCK(dhd, flags); | |
4992 | ||
4993 | return; | |
4994 | } /* dhd_prot_txstatus_process */ | |
4995 | ||
4996 | /** called on MSG_TYPE_WL_EVENT message received from dongle */ | |
4997 | static void | |
4998 | dhd_prot_event_process(dhd_pub_t *dhd, void *msg) | |
4999 | { | |
5000 | wlevent_req_msg_t *evnt; | |
5001 | uint32 bufid; | |
5002 | uint16 buflen; | |
5003 | int ifidx = 0; | |
5004 | void* pkt; | |
5005 | unsigned long flags; | |
5006 | dhd_prot_t *prot = dhd->prot; | |
5007 | ||
5008 | #ifdef DHD_WAKE_STATUS | |
5009 | int pkt_wake = bcmpcie_set_get_wake(dhd->bus, 0); | |
5010 | #endif | |
5011 | ||
5012 | /* Event complete header */ | |
5013 | evnt = (wlevent_req_msg_t *)msg; | |
5014 | bufid = ltoh32(evnt->cmn_hdr.request_id); | |
5015 | ||
5016 | #ifdef DHD_PKTID_AUDIT_RING | |
5017 | DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, bufid, | |
5018 | DHD_DUPLICATE_FREE, msg, D2HRING_CTRL_CMPLT_ITEMSIZE); | |
5019 | #endif /* DHD_PKTID_AUDIT_RING */ | |
5020 | ||
5021 | buflen = ltoh16(evnt->event_data_len); | |
5022 | ||
5023 | ifidx = BCMMSGBUF_API_IFIDX(&evnt->cmn_hdr); | |
5024 | ||
5025 | /* Post another rxbuf to the device */ | |
5026 | if (prot->cur_event_bufs_posted) | |
5027 | prot->cur_event_bufs_posted--; | |
5028 | dhd_msgbuf_rxbuf_post_event_bufs(dhd); | |
5029 | ||
5030 | /* locks required to protect pktid_map */ | |
5031 | DHD_GENERAL_LOCK(dhd, flags); | |
5032 | pkt = dhd_prot_packet_get(dhd, bufid, PKTTYPE_EVENT_RX, TRUE); | |
5033 | DHD_GENERAL_UNLOCK(dhd, flags); | |
5034 | ||
5035 | if (!pkt) { | |
5036 | DHD_ERROR(("%s: pkt is NULL for pktid %d\n", __FUNCTION__, bufid)); | |
5037 | return; | |
5038 | } | |
5039 | ||
5040 | /* DMA RX offset updated through shared area */ | |
5041 | if (dhd->prot->rx_dataoffset) | |
5042 | PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset); | |
5043 | ||
5044 | PKTSETLEN(dhd->osh, pkt, buflen); | |
5045 | ||
5046 | #ifdef DHD_WAKE_STATUS | |
5047 | dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1, pkt_wake); | |
5048 | #else | |
5049 | dhd_bus_rx_frame(dhd->bus, pkt, ifidx, 1); | |
5050 | #endif /* DHD_WAKE_STATUS */ | |
5051 | } | |
5052 | ||
5053 | /** called on MSG_TYPE_INFO_BUF_CMPLT message received from dongle */ | |
5054 | static void BCMFASTPATH | |
5055 | dhd_prot_process_infobuf_complete(dhd_pub_t *dhd, void* buf) | |
5056 | { | |
5057 | info_buf_resp_t *resp; | |
5058 | uint32 pktid; | |
5059 | uint16 buflen; | |
5060 | void * pkt; | |
5061 | unsigned long flags; | |
5062 | ||
5063 | #ifdef DHD_WAKE_STATUS | |
5064 | int pkt_wake = bcmpcie_set_get_wake(dhd->bus, 0); | |
5065 | #endif | |
5066 | resp = (info_buf_resp_t *)buf; | |
5067 | pktid = ltoh32(resp->cmn_hdr.request_id); | |
5068 | buflen = ltoh16(resp->info_data_len); | |
5069 | ||
5070 | #ifdef DHD_PKTID_AUDIT_RING | |
5071 | DHD_PKTID_AUDIT_RING_DEBUG(dhd, dhd->prot->pktid_ctrl_map, pktid, | |
5072 | DHD_DUPLICATE_FREE, buf, D2HRING_INFO_BUFCMPLT_ITEMSIZE); | |
5073 | #endif /* DHD_PKTID_AUDIT_RING */ | |
5074 | ||
5075 | DHD_INFO(("id 0x%04x, len %d, phase 0x%02x, seqnum %d, rx_dataoffset %d\n", | |
5076 | pktid, buflen, resp->cmn_hdr.flags, ltoh16(resp->seqnum), | |
5077 | dhd->prot->rx_dataoffset)); | |
5078 | ||
5079 | if (!dhd->prot->infobufpost) { | |
5080 | DHD_ERROR(("infobuf posted are zero, but there is a completion\n")); | |
5081 | return; | |
5082 | } | |
5083 | ||
5084 | dhd->prot->infobufpost--; | |
5085 | dhd_prot_infobufpost(dhd); | |
5086 | ||
5087 | DHD_GENERAL_LOCK(dhd, flags); | |
5088 | pkt = dhd_prot_packet_get(dhd, pktid, PKTTYPE_INFO_RX, TRUE); | |
5089 | DHD_GENERAL_UNLOCK(dhd, flags); | |
5090 | ||
5091 | if (!pkt) | |
5092 | return; | |
5093 | ||
5094 | /* DMA RX offset updated through shared area */ | |
5095 | if (dhd->prot->rx_dataoffset) | |
5096 | PKTPULL(dhd->osh, pkt, dhd->prot->rx_dataoffset); | |
5097 | ||
5098 | PKTSETLEN(dhd->osh, pkt, buflen); | |
5099 | ||
5100 | /* info ring "debug" data, which is not a 802.3 frame, is sent/hacked with a | |
5101 | * special ifidx of -1. This is just internal to dhd to get the data to | |
5102 | * dhd_linux.c:dhd_rx_frame() from here (dhd_prot_infobuf_cmplt_process). | |
5103 | */ | |
5104 | #ifdef DHD_WAKE_STATUS | |
5105 | dhd_bus_rx_frame(dhd->bus, pkt, DHD_EVENT_IF /* ifidx HACK */, 1, pkt_wake); | |
5106 | #else | |
5107 | dhd_bus_rx_frame(dhd->bus, pkt, DHD_EVENT_IF /* ifidx HACK */, 1); | |
5108 | #endif /* DHD_WAKE_STATUS */ | |
5109 | } | |
5110 | ||
5111 | /** Stop protocol: sync w/dongle state. */ | |
5112 | void dhd_prot_stop(dhd_pub_t *dhd) | |
5113 | { | |
5114 | ASSERT(dhd); | |
5115 | DHD_TRACE(("%s: Enter\n", __FUNCTION__)); | |
5116 | ||
5117 | } | |
5118 | ||
5119 | /* Add any protocol-specific data header. | |
5120 | * Caller must reserve prot_hdrlen prepend space. | |
5121 | */ | |
5122 | void BCMFASTPATH | |
5123 | dhd_prot_hdrpush(dhd_pub_t *dhd, int ifidx, void *PKTBUF) | |
5124 | { | |
5125 | return; | |
5126 | } | |
5127 | ||
5128 | uint | |
5129 | dhd_prot_hdrlen(dhd_pub_t *dhd, void *PKTBUF) | |
5130 | { | |
5131 | return 0; | |
5132 | } | |
5133 | ||
5134 | ||
5135 | #define PKTBUF pktbuf | |
5136 | ||
5137 | /** | |
5138 | * Called when a tx ethernet packet has been dequeued from a flow queue, and has to be inserted in | |
5139 | * the corresponding flow ring. | |
5140 | */ | |
5141 | int BCMFASTPATH | |
5142 | dhd_prot_txdata(dhd_pub_t *dhd, void *PKTBUF, uint8 ifidx) | |
5143 | { | |
5144 | unsigned long flags; | |
5145 | dhd_prot_t *prot = dhd->prot; | |
5146 | host_txbuf_post_t *txdesc = NULL; | |
5147 | dmaaddr_t pa, meta_pa; | |
5148 | uint8 *pktdata; | |
5149 | uint32 pktlen; | |
5150 | uint32 pktid; | |
5151 | uint8 prio; | |
5152 | uint16 flowid = 0; | |
5153 | uint16 alloced = 0; | |
5154 | uint16 headroom; | |
5155 | msgbuf_ring_t *ring; | |
5156 | flow_ring_table_t *flow_ring_table; | |
5157 | flow_ring_node_t *flow_ring_node; | |
5158 | ||
5159 | if (dhd->flow_ring_table == NULL) { | |
5160 | return BCME_NORESOURCE; | |
5161 | } | |
5162 | ||
5163 | flowid = DHD_PKT_GET_FLOWID(PKTBUF); | |
5164 | flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table; | |
5165 | flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid]; | |
5166 | ||
5167 | ring = (msgbuf_ring_t *)flow_ring_node->prot_info; | |
5168 | ||
5169 | #ifdef PCIE_INB_DW | |
5170 | if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) | |
5171 | return BCME_ERROR; | |
5172 | #endif /* PCIE_INB_DW */ | |
5173 | ||
5174 | DHD_GENERAL_LOCK(dhd, flags); | |
5175 | ||
5176 | /* Create a unique 32-bit packet id */ | |
5177 | pktid = DHD_NATIVE_TO_PKTID_RSV(dhd, dhd->prot->pktid_tx_map, | |
5178 | PKTBUF, PKTTYPE_DATA_TX); | |
5179 | #if defined(DHD_PCIE_PKTID) | |
5180 | if (pktid == DHD_PKTID_INVALID) { | |
5181 | DHD_ERROR(("%s: Pktid pool depleted.\n", __FUNCTION__)); | |
5182 | /* | |
5183 | * If we return error here, the caller would queue the packet | |
5184 | * again. So we'll just free the skb allocated in DMA Zone. | |
5185 | * Since we have not freed the original SKB yet the caller would | |
5186 | * requeue the same. | |
5187 | */ | |
5188 | goto err_no_res_pktfree; | |
5189 | } | |
5190 | #endif /* DHD_PCIE_PKTID */ | |
5191 | ||
5192 | /* Reserve space in the circular buffer */ | |
5193 | txdesc = (host_txbuf_post_t *) | |
5194 | dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE); | |
5195 | if (txdesc == NULL) { | |
5196 | #if defined(DHD_PCIE_PKTID) | |
5197 | void *dmah; | |
5198 | void *secdma; | |
5199 | /* Free up the PKTID. physaddr and pktlen will be garbage. */ | |
5200 | DHD_PKTID_TO_NATIVE(dhd, dhd->prot->pktid_tx_map, pktid, | |
5201 | pa, pktlen, dmah, secdma, PKTTYPE_NO_CHECK); | |
5202 | #endif /* DHD_PCIE_PKTID */ | |
5203 | DHD_INFO(("%s:%d: HTOD Msgbuf Not available TxCount = %d\n", | |
5204 | __FUNCTION__, __LINE__, prot->active_tx_count)); | |
5205 | goto err_no_res_pktfree; | |
5206 | } | |
5207 | ||
5208 | #ifdef DBG_PKT_MON | |
5209 | DHD_DBG_PKT_MON_TX(dhd, PKTBUF, pktid); | |
5210 | #endif /* DBG_PKT_MON */ | |
5211 | ||
5212 | ||
5213 | /* Extract the data pointer and length information */ | |
5214 | pktdata = PKTDATA(dhd->osh, PKTBUF); | |
5215 | pktlen = PKTLEN(dhd->osh, PKTBUF); | |
5216 | ||
5217 | /* Ethernet header: Copy before we cache flush packet using DMA_MAP */ | |
5218 | bcopy(pktdata, txdesc->txhdr, ETHER_HDR_LEN); | |
5219 | ||
5220 | /* Extract the ethernet header and adjust the data pointer and length */ | |
5221 | pktdata = PKTPULL(dhd->osh, PKTBUF, ETHER_HDR_LEN); | |
5222 | pktlen -= ETHER_HDR_LEN; | |
5223 | ||
5224 | /* Map the data pointer to a DMA-able address */ | |
5225 | if (SECURE_DMA_ENAB(dhd->osh)) { | |
5226 | int offset = 0; | |
5227 | BCM_REFERENCE(offset); | |
5228 | ||
5229 | if (prot->tx_metadata_offset) | |
5230 | offset = prot->tx_metadata_offset + ETHER_HDR_LEN; | |
5231 | ||
5232 | pa = SECURE_DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), pktlen, | |
5233 | DMA_TX, PKTBUF, 0, ring->dma_buf.secdma, offset); | |
5234 | } | |
5235 | #ifndef BCM_SECURE_DMA | |
5236 | else | |
5237 | pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), pktlen, DMA_TX, PKTBUF, 0); | |
5238 | #endif /* #ifndef BCM_SECURE_DMA */ | |
5239 | ||
5240 | if (PHYSADDRISZERO(pa)) { | |
5241 | DHD_ERROR(("Something really bad, unless 0 is a valid phyaddr\n")); | |
5242 | ASSERT(0); | |
5243 | } | |
5244 | ||
5245 | /* No need to lock. Save the rest of the packet's metadata */ | |
5246 | DHD_NATIVE_TO_PKTID_SAVE(dhd, dhd->prot->pktid_tx_map, PKTBUF, pktid, | |
5247 | pa, pktlen, DMA_TX, NULL, ring->dma_buf.secdma, PKTTYPE_DATA_TX); | |
5248 | ||
5249 | #ifdef TXP_FLUSH_NITEMS | |
5250 | if (ring->pend_items_count == 0) | |
5251 | ring->start_addr = (void *)txdesc; | |
5252 | ring->pend_items_count++; | |
5253 | #endif | |
5254 | ||
5255 | /* Form the Tx descriptor message buffer */ | |
5256 | ||
5257 | /* Common message hdr */ | |
5258 | txdesc->cmn_hdr.msg_type = MSG_TYPE_TX_POST; | |
5259 | txdesc->cmn_hdr.if_id = ifidx; | |
5260 | txdesc->cmn_hdr.flags = ring->current_phase; | |
5261 | ||
5262 | txdesc->flags = BCMPCIE_PKT_FLAGS_FRAME_802_3; | |
5263 | prio = (uint8)PKTPRIO(PKTBUF); | |
5264 | ||
5265 | ||
5266 | txdesc->flags |= (prio & 0x7) << BCMPCIE_PKT_FLAGS_PRIO_SHIFT; | |
5267 | txdesc->seg_cnt = 1; | |
5268 | ||
5269 | txdesc->data_len = htol16((uint16) pktlen); | |
5270 | txdesc->data_buf_addr.high_addr = htol32(PHYSADDRHI(pa)); | |
5271 | txdesc->data_buf_addr.low_addr = htol32(PHYSADDRLO(pa)); | |
5272 | ||
5273 | /* Move data pointer to keep ether header in local PKTBUF for later reference */ | |
5274 | PKTPUSH(dhd->osh, PKTBUF, ETHER_HDR_LEN); | |
5275 | ||
5276 | /* Handle Tx metadata */ | |
5277 | headroom = (uint16)PKTHEADROOM(dhd->osh, PKTBUF); | |
5278 | if (prot->tx_metadata_offset && (headroom < prot->tx_metadata_offset)) | |
5279 | DHD_ERROR(("No headroom for Metadata tx %d %d\n", | |
5280 | prot->tx_metadata_offset, headroom)); | |
5281 | ||
5282 | if (prot->tx_metadata_offset && (headroom >= prot->tx_metadata_offset)) { | |
5283 | DHD_TRACE(("Metadata in tx %d\n", prot->tx_metadata_offset)); | |
5284 | ||
5285 | /* Adjust the data pointer to account for meta data in DMA_MAP */ | |
5286 | PKTPUSH(dhd->osh, PKTBUF, prot->tx_metadata_offset); | |
5287 | ||
5288 | if (SECURE_DMA_ENAB(dhd->osh)) { | |
5289 | meta_pa = SECURE_DMA_MAP_TXMETA(dhd->osh, PKTDATA(dhd->osh, PKTBUF), | |
5290 | prot->tx_metadata_offset + ETHER_HDR_LEN, DMA_RX, PKTBUF, | |
5291 | 0, ring->dma_buf.secdma); | |
5292 | } | |
5293 | #ifndef BCM_SECURE_DMA | |
5294 | else | |
5295 | meta_pa = DMA_MAP(dhd->osh, PKTDATA(dhd->osh, PKTBUF), | |
5296 | prot->tx_metadata_offset, DMA_RX, PKTBUF, 0); | |
5297 | #endif /* #ifndef BCM_SECURE_DMA */ | |
5298 | ||
5299 | if (PHYSADDRISZERO(meta_pa)) { | |
5300 | DHD_ERROR(("Something really bad, unless 0 is a valid phyaddr\n")); | |
5301 | ASSERT(0); | |
5302 | } | |
5303 | ||
5304 | /* Adjust the data pointer back to original value */ | |
5305 | PKTPULL(dhd->osh, PKTBUF, prot->tx_metadata_offset); | |
5306 | ||
5307 | txdesc->metadata_buf_len = prot->tx_metadata_offset; | |
5308 | txdesc->metadata_buf_addr.high_addr = htol32(PHYSADDRHI(meta_pa)); | |
5309 | txdesc->metadata_buf_addr.low_addr = htol32(PHYSADDRLO(meta_pa)); | |
5310 | } else { | |
5311 | txdesc->metadata_buf_len = htol16(0); | |
5312 | txdesc->metadata_buf_addr.high_addr = 0; | |
5313 | txdesc->metadata_buf_addr.low_addr = 0; | |
5314 | } | |
5315 | ||
5316 | #ifdef DHD_PKTID_AUDIT_RING | |
5317 | DHD_PKTID_AUDIT(dhd, prot->pktid_tx_map, pktid, DHD_DUPLICATE_ALLOC); | |
5318 | #endif /* DHD_PKTID_AUDIT_RING */ | |
5319 | ||
5320 | txdesc->cmn_hdr.request_id = htol32(pktid); | |
5321 | ||
5322 | DHD_TRACE(("txpost: data_len %d, pktid 0x%04x\n", txdesc->data_len, | |
5323 | txdesc->cmn_hdr.request_id)); | |
5324 | ||
5325 | /* Update the write pointer in TCM & ring bell */ | |
5326 | #ifdef TXP_FLUSH_NITEMS | |
5327 | /* Flush if we have either hit the txp_threshold or if this msg is */ | |
5328 | /* occupying the last slot in the flow_ring - before wrap around. */ | |
5329 | if ((ring->pend_items_count == prot->txp_threshold) || | |
5330 | ((uint8 *) txdesc == (uint8 *) DHD_RING_END_VA(ring))) { | |
5331 | dhd_prot_txdata_write_flush(dhd, flowid, TRUE); | |
5332 | } | |
5333 | #else | |
5334 | /* update ring's WR index and ring doorbell to dongle */ | |
5335 | dhd_prot_ring_write_complete(dhd, ring, txdesc, 1); | |
5336 | #endif | |
5337 | ||
5338 | prot->active_tx_count++; | |
5339 | ||
5340 | /* | |
5341 | * Take a wake lock, do not sleep if we have atleast one packet | |
5342 | * to finish. | |
5343 | */ | |
5344 | if (prot->active_tx_count >= 1) | |
5345 | DHD_TXFL_WAKE_LOCK_TIMEOUT(dhd, MAX_TX_TIMEOUT); | |
5346 | ||
5347 | DHD_GENERAL_UNLOCK(dhd, flags); | |
5348 | ||
5349 | #ifdef PCIE_INB_DW | |
5350 | dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); | |
5351 | #endif | |
5352 | ||
5353 | return BCME_OK; | |
5354 | ||
5355 | err_no_res_pktfree: | |
5356 | ||
5357 | ||
5358 | ||
5359 | DHD_GENERAL_UNLOCK(dhd, flags); | |
5360 | #ifdef PCIE_INB_DW | |
5361 | dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); | |
5362 | #endif | |
5363 | return BCME_NORESOURCE; | |
5364 | } /* dhd_prot_txdata */ | |
5365 | ||
5366 | /* called with a lock */ | |
5367 | /** optimization to write "n" tx items at a time to ring */ | |
5368 | void BCMFASTPATH | |
5369 | dhd_prot_txdata_write_flush(dhd_pub_t *dhd, uint16 flowid, bool in_lock) | |
5370 | { | |
5371 | #ifdef TXP_FLUSH_NITEMS | |
5372 | unsigned long flags = 0; | |
5373 | flow_ring_table_t *flow_ring_table; | |
5374 | flow_ring_node_t *flow_ring_node; | |
5375 | msgbuf_ring_t *ring; | |
5376 | ||
5377 | if (dhd->flow_ring_table == NULL) { | |
5378 | return; | |
5379 | } | |
5380 | ||
5381 | if (!in_lock) { | |
5382 | DHD_GENERAL_LOCK(dhd, flags); | |
5383 | } | |
5384 | ||
5385 | flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table; | |
5386 | flow_ring_node = (flow_ring_node_t *)&flow_ring_table[flowid]; | |
5387 | ring = (msgbuf_ring_t *)flow_ring_node->prot_info; | |
5388 | ||
5389 | if (ring->pend_items_count) { | |
5390 | /* update ring's WR index and ring doorbell to dongle */ | |
5391 | dhd_prot_ring_write_complete(dhd, ring, ring->start_addr, | |
5392 | ring->pend_items_count); | |
5393 | ring->pend_items_count = 0; | |
5394 | ring->start_addr = NULL; | |
5395 | } | |
5396 | ||
5397 | if (!in_lock) { | |
5398 | DHD_GENERAL_UNLOCK(dhd, flags); | |
5399 | } | |
5400 | #endif /* TXP_FLUSH_NITEMS */ | |
5401 | } | |
5402 | ||
5403 | #undef PKTBUF /* Only defined in the above routine */ | |
5404 | ||
5405 | int BCMFASTPATH | |
5406 | dhd_prot_hdrpull(dhd_pub_t *dhd, int *ifidx, void *pkt, uchar *buf, uint *len) | |
5407 | { | |
5408 | return 0; | |
5409 | } | |
5410 | ||
5411 | /** post a set of receive buffers to the dongle */ | |
5412 | static void BCMFASTPATH | |
5413 | dhd_prot_return_rxbuf(dhd_pub_t *dhd, uint32 pktid, uint32 rxcnt) | |
5414 | { | |
5415 | dhd_prot_t *prot = dhd->prot; | |
5416 | #if defined(DHD_LB_RXC) | |
5417 | int elem_ix; | |
5418 | uint32 *elem; | |
5419 | bcm_workq_t *workq; | |
5420 | ||
5421 | workq = &prot->rx_compl_prod; | |
5422 | ||
5423 | /* Produce the work item */ | |
5424 | elem_ix = bcm_ring_prod(WORKQ_RING(workq), DHD_LB_WORKQ_SZ); | |
5425 | if (elem_ix == BCM_RING_FULL) { | |
5426 | DHD_ERROR(("%s LB RxCompl workQ is full\n", __FUNCTION__)); | |
5427 | ASSERT(0); | |
5428 | return; | |
5429 | } | |
5430 | ||
5431 | elem = WORKQ_ELEMENT(uint32, workq, elem_ix); | |
5432 | *elem = pktid; | |
5433 | ||
5434 | smp_wmb(); | |
5435 | ||
5436 | /* Sync WR index to consumer if the SYNC threshold has been reached */ | |
5437 | if (++prot->rx_compl_prod_sync >= DHD_LB_WORKQ_SYNC) { | |
5438 | bcm_workq_prod_sync(workq); | |
5439 | prot->rx_compl_prod_sync = 0; | |
5440 | } | |
5441 | ||
5442 | DHD_INFO(("%s: rx_compl_prod pktid<%u> sync<%d>\n", | |
5443 | __FUNCTION__, pktid, prot->rx_compl_prod_sync)); | |
5444 | ||
5445 | #endif /* DHD_LB_RXC */ | |
5446 | ||
5447 | if (prot->rxbufpost >= rxcnt) { | |
5448 | prot->rxbufpost -= (uint16)rxcnt; | |
5449 | } else { | |
5450 | /* ASSERT(0); */ | |
5451 | prot->rxbufpost = 0; | |
5452 | } | |
5453 | ||
5454 | #if !defined(DHD_LB_RXC) | |
5455 | if (prot->rxbufpost <= (prot->max_rxbufpost - RXBUFPOST_THRESHOLD)) | |
5456 | dhd_msgbuf_rxbuf_post(dhd, FALSE); /* alloc pkt ids */ | |
5457 | #endif /* !DHD_LB_RXC */ | |
5458 | return; | |
5459 | } | |
5460 | ||
5461 | /* called before an ioctl is sent to the dongle */ | |
5462 | static void | |
5463 | dhd_prot_wlioctl_intercept(dhd_pub_t *dhd, wl_ioctl_t * ioc, void * buf) | |
5464 | { | |
5465 | dhd_prot_t *prot = dhd->prot; | |
5466 | ||
5467 | if (ioc->cmd == WLC_SET_VAR && buf != NULL && !strcmp(buf, "pcie_bus_tput")) { | |
5468 | int slen = 0; | |
5469 | pcie_bus_tput_params_t *tput_params; | |
5470 | ||
5471 | slen = strlen("pcie_bus_tput") + 1; | |
5472 | tput_params = (pcie_bus_tput_params_t*)((char *)buf + slen); | |
5473 | bcopy(&prot->host_bus_throughput_buf.pa, &tput_params->host_buf_addr, | |
5474 | sizeof(tput_params->host_buf_addr)); | |
5475 | tput_params->host_buf_len = DHD_BUS_TPUT_BUF_LEN; | |
5476 | } | |
5477 | } | |
5478 | ||
5479 | #ifdef DHD_PM_CONTROL_FROM_FILE | |
5480 | extern bool g_pm_control; | |
5481 | #endif /* DHD_PM_CONTROL_FROM_FILE */ | |
5482 | ||
5483 | /** Use protocol to issue ioctl to dongle. Only one ioctl may be in transit. */ | |
5484 | int dhd_prot_ioctl(dhd_pub_t *dhd, int ifidx, wl_ioctl_t * ioc, void * buf, int len) | |
5485 | { | |
5486 | int ret = -1; | |
5487 | uint8 action; | |
5488 | ||
5489 | if ((dhd->busstate == DHD_BUS_DOWN) || dhd->hang_was_sent) { | |
5490 | DHD_ERROR(("%s : bus is down. we have nothing to do - bs: %d, has: %d\n", | |
5491 | __FUNCTION__, dhd->busstate, dhd->hang_was_sent)); | |
5492 | goto done; | |
5493 | } | |
5494 | ||
5495 | if (dhd->busstate == DHD_BUS_SUSPEND) { | |
5496 | DHD_ERROR(("%s : bus is suspended\n", __FUNCTION__)); | |
5497 | goto done; | |
5498 | } | |
5499 | ||
5500 | DHD_TRACE(("%s: Enter\n", __FUNCTION__)); | |
5501 | ||
5502 | if (ioc->cmd == WLC_SET_PM) { | |
5503 | #ifdef DHD_PM_CONTROL_FROM_FILE | |
5504 | if (g_pm_control == TRUE) { | |
5505 | DHD_ERROR(("%s: SET PM ignored!(Requested:%d)\n", | |
5506 | __FUNCTION__, buf ? *(char *)buf : 0)); | |
5507 | goto done; | |
5508 | } | |
5509 | #endif /* DHD_PM_CONTROL_FROM_FILE */ | |
5510 | DHD_TRACE_HW4(("%s: SET PM to %d\n", __FUNCTION__, buf ? *(char *)buf : 0)); | |
5511 | } | |
5512 | ||
5513 | ASSERT(len <= WLC_IOCTL_MAXLEN); | |
5514 | ||
5515 | if (len > WLC_IOCTL_MAXLEN) | |
5516 | goto done; | |
5517 | ||
5518 | action = ioc->set; | |
5519 | ||
5520 | dhd_prot_wlioctl_intercept(dhd, ioc, buf); | |
5521 | ||
5522 | if (action & WL_IOCTL_ACTION_SET) { | |
5523 | ret = dhd_msgbuf_set_ioctl(dhd, ifidx, ioc->cmd, buf, len, action); | |
5524 | } else { | |
5525 | ret = dhd_msgbuf_query_ioctl(dhd, ifidx, ioc->cmd, buf, len, action); | |
5526 | if (ret > 0) | |
5527 | ioc->used = ret; | |
5528 | } | |
5529 | ||
5530 | /* Too many programs assume ioctl() returns 0 on success */ | |
5531 | if (ret >= 0) { | |
5532 | ret = 0; | |
5533 | } else { | |
5534 | dhd->dongle_error = ret; | |
5535 | } | |
5536 | ||
5537 | if (!ret && ioc->cmd == WLC_SET_VAR && buf != NULL) { | |
5538 | /* Intercept the wme_dp ioctl here */ | |
5539 | if (!strcmp(buf, "wme_dp")) { | |
5540 | int slen, val = 0; | |
5541 | ||
5542 | slen = strlen("wme_dp") + 1; | |
5543 | if (len >= (int)(slen + sizeof(int))) | |
5544 | bcopy(((char *)buf + slen), &val, sizeof(int)); | |
5545 | dhd->wme_dp = (uint8) ltoh32(val); | |
5546 | } | |
5547 | ||
5548 | } | |
5549 | ||
5550 | done: | |
5551 | return ret; | |
5552 | ||
5553 | } /* dhd_prot_ioctl */ | |
5554 | ||
5555 | /** test / loopback */ | |
5556 | ||
5557 | int | |
5558 | dhdmsgbuf_lpbk_req(dhd_pub_t *dhd, uint len) | |
5559 | { | |
5560 | unsigned long flags; | |
5561 | dhd_prot_t *prot = dhd->prot; | |
5562 | uint16 alloced = 0; | |
5563 | ||
5564 | ioct_reqst_hdr_t *ioct_rqst; | |
5565 | ||
5566 | uint16 hdrlen = sizeof(ioct_reqst_hdr_t); | |
5567 | uint16 msglen = len + hdrlen; | |
5568 | msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn; | |
5569 | ||
5570 | msglen = ALIGN_SIZE(msglen, DMA_ALIGN_LEN); | |
5571 | msglen = LIMIT_TO_MAX(msglen, MSGBUF_MAX_MSG_SIZE); | |
5572 | ||
5573 | #ifdef PCIE_INB_DW | |
5574 | if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) | |
5575 | return BCME_ERROR; | |
5576 | #endif /* PCIE_INB_DW */ | |
5577 | ||
5578 | DHD_GENERAL_LOCK(dhd, flags); | |
5579 | ||
5580 | ioct_rqst = (ioct_reqst_hdr_t *) | |
5581 | dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE); | |
5582 | ||
5583 | if (ioct_rqst == NULL) { | |
5584 | DHD_GENERAL_UNLOCK(dhd, flags); | |
5585 | #ifdef PCIE_INB_DW | |
5586 | dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); | |
5587 | #endif | |
5588 | return 0; | |
5589 | } | |
5590 | ||
5591 | { | |
5592 | uint8 *ptr; | |
5593 | uint16 i; | |
5594 | ||
5595 | ptr = (uint8 *)ioct_rqst; | |
5596 | for (i = 0; i < msglen; i++) { | |
5597 | ptr[i] = i % 256; | |
5598 | } | |
5599 | } | |
5600 | ||
5601 | /* Common msg buf hdr */ | |
5602 | ioct_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO; | |
5603 | ring->seqnum++; | |
5604 | ||
5605 | ioct_rqst->msg.msg_type = MSG_TYPE_LOOPBACK; | |
5606 | ioct_rqst->msg.if_id = 0; | |
5607 | ioct_rqst->msg.flags = ring->current_phase; | |
5608 | ||
5609 | bcm_print_bytes("LPBK REQ: ", (uint8 *)ioct_rqst, msglen); | |
5610 | ||
5611 | /* update ring's WR index and ring doorbell to dongle */ | |
5612 | dhd_prot_ring_write_complete(dhd, ring, ioct_rqst, 1); | |
5613 | DHD_GENERAL_UNLOCK(dhd, flags); | |
5614 | #ifdef PCIE_INB_DW | |
5615 | dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); | |
5616 | #endif | |
5617 | ||
5618 | return 0; | |
5619 | } | |
5620 | ||
5621 | /** test / loopback */ | |
5622 | void dmaxfer_free_dmaaddr(dhd_pub_t *dhd, dhd_dmaxfer_t *dmaxfer) | |
5623 | { | |
5624 | if (dmaxfer == NULL) | |
5625 | return; | |
5626 | ||
5627 | dhd_dma_buf_free(dhd, &dmaxfer->srcmem); | |
5628 | dhd_dma_buf_free(dhd, &dmaxfer->dstmem); | |
5629 | } | |
5630 | ||
5631 | /** test / loopback */ | |
5632 | int | |
5633 | dhd_prepare_schedule_dmaxfer_free(dhd_pub_t *dhdp) | |
5634 | { | |
5635 | dhd_prot_t *prot = dhdp->prot; | |
5636 | dhd_dmaxfer_t *dmaxfer = &prot->dmaxfer; | |
5637 | dmaxref_mem_map_t *dmap = NULL; | |
5638 | ||
5639 | dmap = MALLOCZ(dhdp->osh, sizeof(dmaxref_mem_map_t)); | |
5640 | if (!dmap) { | |
5641 | DHD_ERROR(("%s: dmap alloc failed\n", __FUNCTION__)); | |
5642 | goto mem_alloc_fail; | |
5643 | } | |
5644 | dmap->srcmem = &(dmaxfer->srcmem); | |
5645 | dmap->dstmem = &(dmaxfer->dstmem); | |
5646 | ||
5647 | DMAXFER_FREE(dhdp, dmap); | |
5648 | return BCME_OK; | |
5649 | ||
5650 | mem_alloc_fail: | |
5651 | if (dmap) { | |
5652 | MFREE(dhdp->osh, dmap, sizeof(dmaxref_mem_map_t)); | |
5653 | dmap = NULL; | |
5654 | } | |
5655 | return BCME_NOMEM; | |
5656 | } /* dhd_prepare_schedule_dmaxfer_free */ | |
5657 | ||
5658 | ||
5659 | /** test / loopback */ | |
5660 | void | |
5661 | dmaxfer_free_prev_dmaaddr(dhd_pub_t *dhdp, dmaxref_mem_map_t *dmmap) | |
5662 | { | |
5663 | ||
5664 | dhd_dma_buf_free(dhdp, dmmap->srcmem); | |
5665 | dhd_dma_buf_free(dhdp, dmmap->dstmem); | |
5666 | ||
5667 | MFREE(dhdp->osh, dmmap, sizeof(dmaxref_mem_map_t)); | |
5668 | dmmap = NULL; | |
5669 | ||
5670 | } /* dmaxfer_free_prev_dmaaddr */ | |
5671 | ||
5672 | ||
5673 | /** test / loopback */ | |
5674 | int dmaxfer_prepare_dmaaddr(dhd_pub_t *dhd, uint len, | |
5675 | uint srcdelay, uint destdelay, dhd_dmaxfer_t *dmaxfer) | |
5676 | { | |
5677 | uint i; | |
5678 | if (!dmaxfer) | |
5679 | return BCME_ERROR; | |
5680 | ||
5681 | /* First free up existing buffers */ | |
5682 | dmaxfer_free_dmaaddr(dhd, dmaxfer); | |
5683 | ||
5684 | if (dhd_dma_buf_alloc(dhd, &dmaxfer->srcmem, len)) { | |
5685 | return BCME_NOMEM; | |
5686 | } | |
5687 | ||
5688 | if (dhd_dma_buf_alloc(dhd, &dmaxfer->dstmem, len + 8)) { | |
5689 | dhd_dma_buf_free(dhd, &dmaxfer->srcmem); | |
5690 | return BCME_NOMEM; | |
5691 | } | |
5692 | ||
5693 | dmaxfer->len = len; | |
5694 | ||
5695 | /* Populate source with a pattern */ | |
5696 | for (i = 0; i < dmaxfer->len; i++) { | |
5697 | ((uint8*)dmaxfer->srcmem.va)[i] = i % 256; | |
5698 | } | |
5699 | OSL_CACHE_FLUSH(dmaxfer->srcmem.va, dmaxfer->len); | |
5700 | ||
5701 | dmaxfer->srcdelay = srcdelay; | |
5702 | dmaxfer->destdelay = destdelay; | |
5703 | ||
5704 | return BCME_OK; | |
5705 | } /* dmaxfer_prepare_dmaaddr */ | |
5706 | ||
5707 | static void | |
5708 | dhd_msgbuf_dmaxfer_process(dhd_pub_t *dhd, void *msg) | |
5709 | { | |
5710 | dhd_prot_t *prot = dhd->prot; | |
5711 | uint64 end_usec; | |
5712 | pcie_dmaxfer_cmplt_t *cmplt = (pcie_dmaxfer_cmplt_t *)msg; | |
5713 | ||
5714 | BCM_REFERENCE(cmplt); | |
5715 | DHD_INFO(("DMA status: %d\n", cmplt->compl_hdr.status)); | |
5716 | OSL_CACHE_INV(prot->dmaxfer.dstmem.va, prot->dmaxfer.len); | |
5717 | if (prot->dmaxfer.srcmem.va && prot->dmaxfer.dstmem.va) { | |
5718 | if (memcmp(prot->dmaxfer.srcmem.va, | |
5719 | prot->dmaxfer.dstmem.va, prot->dmaxfer.len)) { | |
5720 | prhex("XFER SRC: ", | |
5721 | prot->dmaxfer.srcmem.va, prot->dmaxfer.len); | |
5722 | prhex("XFER DST: ", | |
5723 | prot->dmaxfer.dstmem.va, prot->dmaxfer.len); | |
5724 | DHD_ERROR(("DMA failed\n")); | |
5725 | } | |
5726 | else { | |
5727 | if (prot->dmaxfer.d11_lpbk) { | |
5728 | DHD_ERROR(("DMA successful with d11 loopback\n")); | |
5729 | } else { | |
5730 | DHD_ERROR(("DMA successful without d11 loopback\n")); | |
5731 | } | |
5732 | } | |
5733 | } | |
5734 | end_usec = OSL_SYSUPTIME_US(); | |
5735 | dhd_prepare_schedule_dmaxfer_free(dhd); | |
5736 | end_usec -= prot->dmaxfer.start_usec; | |
5737 | DHD_ERROR(("DMA loopback %d bytes in %llu usec, %u kBps\n", | |
5738 | prot->dmaxfer.len, end_usec, | |
5739 | (prot->dmaxfer.len * (1000 * 1000 / 1024) / (uint32)(end_usec + 1)))); | |
5740 | dhd->prot->dmaxfer.in_progress = FALSE; | |
5741 | } | |
5742 | ||
5743 | /** Test functionality. | |
5744 | * Transfers bytes from host to dongle and to host again using DMA | |
5745 | * This function is not reentrant, as prot->dmaxfer.in_progress is not protected | |
5746 | * by a spinlock. | |
5747 | */ | |
5748 | int | |
5749 | dhdmsgbuf_dmaxfer_req(dhd_pub_t *dhd, uint len, uint srcdelay, uint destdelay, uint d11_lpbk) | |
5750 | { | |
5751 | unsigned long flags; | |
5752 | int ret = BCME_OK; | |
5753 | dhd_prot_t *prot = dhd->prot; | |
5754 | pcie_dma_xfer_params_t *dmap; | |
5755 | uint32 xferlen = LIMIT_TO_MAX(len, DMA_XFER_LEN_LIMIT); | |
5756 | uint16 alloced = 0; | |
5757 | msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn; | |
5758 | ||
5759 | if (prot->dmaxfer.in_progress) { | |
5760 | DHD_ERROR(("DMA is in progress...\n")); | |
5761 | return ret; | |
5762 | } | |
5763 | ||
5764 | prot->dmaxfer.in_progress = TRUE; | |
5765 | if ((ret = dmaxfer_prepare_dmaaddr(dhd, xferlen, srcdelay, destdelay, | |
5766 | &prot->dmaxfer)) != BCME_OK) { | |
5767 | prot->dmaxfer.in_progress = FALSE; | |
5768 | return ret; | |
5769 | } | |
5770 | ||
5771 | #ifdef PCIE_INB_DW | |
5772 | if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) | |
5773 | return BCME_ERROR; | |
5774 | #endif /* PCIE_INB_DW */ | |
5775 | ||
5776 | DHD_GENERAL_LOCK(dhd, flags); | |
5777 | ||
5778 | dmap = (pcie_dma_xfer_params_t *) | |
5779 | dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE); | |
5780 | ||
5781 | if (dmap == NULL) { | |
5782 | dmaxfer_free_dmaaddr(dhd, &prot->dmaxfer); | |
5783 | prot->dmaxfer.in_progress = FALSE; | |
5784 | DHD_GENERAL_UNLOCK(dhd, flags); | |
5785 | #ifdef PCIE_INB_DW | |
5786 | dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); | |
5787 | #endif | |
5788 | return BCME_NOMEM; | |
5789 | } | |
5790 | ||
5791 | /* Common msg buf hdr */ | |
5792 | dmap->cmn_hdr.msg_type = MSG_TYPE_LPBK_DMAXFER; | |
5793 | dmap->cmn_hdr.request_id = htol32(DHD_FAKE_PKTID); | |
5794 | dmap->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO; | |
5795 | dmap->cmn_hdr.flags = ring->current_phase; | |
5796 | ring->seqnum++; | |
5797 | ||
5798 | dmap->host_input_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.srcmem.pa)); | |
5799 | dmap->host_input_buf_addr.low = htol32(PHYSADDRLO(prot->dmaxfer.srcmem.pa)); | |
5800 | dmap->host_ouput_buf_addr.high = htol32(PHYSADDRHI(prot->dmaxfer.dstmem.pa)); | |
5801 | dmap->host_ouput_buf_addr.low = htol32(PHYSADDRLO(prot->dmaxfer.dstmem.pa)); | |
5802 | dmap->xfer_len = htol32(prot->dmaxfer.len); | |
5803 | dmap->srcdelay = htol32(prot->dmaxfer.srcdelay); | |
5804 | dmap->destdelay = htol32(prot->dmaxfer.destdelay); | |
5805 | prot->dmaxfer.d11_lpbk = d11_lpbk ? 1 : 0; | |
5806 | dmap->flags = (prot->dmaxfer.d11_lpbk << PCIE_DMA_XFER_FLG_D11_LPBK_SHIFT) | |
5807 | & PCIE_DMA_XFER_FLG_D11_LPBK_MASK; | |
5808 | ||
5809 | /* update ring's WR index and ring doorbell to dongle */ | |
5810 | prot->dmaxfer.start_usec = OSL_SYSUPTIME_US(); | |
5811 | dhd_prot_ring_write_complete(dhd, ring, dmap, 1); | |
5812 | DHD_GENERAL_UNLOCK(dhd, flags); | |
5813 | #ifdef PCIE_INB_DW | |
5814 | dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); | |
5815 | #endif | |
5816 | ||
5817 | DHD_INFO(("DMA Started...\n")); | |
5818 | ||
5819 | return BCME_OK; | |
5820 | } /* dhdmsgbuf_dmaxfer_req */ | |
5821 | ||
5822 | /** Called in the process of submitting an ioctl to the dongle */ | |
5823 | static int | |
5824 | dhd_msgbuf_query_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action) | |
5825 | { | |
5826 | int ret = 0; | |
5827 | uint copylen = 0; | |
5828 | ||
5829 | DHD_TRACE(("%s: Enter\n", __FUNCTION__)); | |
5830 | ||
5831 | if (cmd == WLC_GET_VAR && buf) | |
5832 | { | |
5833 | if (!len || !*(uint8 *)buf) { | |
5834 | DHD_ERROR(("%s(): Zero length bailing\n", __FUNCTION__)); | |
5835 | ret = BCME_BADARG; | |
5836 | goto done; | |
5837 | } | |
5838 | ||
5839 | /* Respond "bcmerror" and "bcmerrorstr" with local cache */ | |
5840 | copylen = MIN(len, BCME_STRLEN); | |
5841 | ||
5842 | if ((len >= strlen("bcmerrorstr")) && | |
5843 | (!strcmp((char *)buf, "bcmerrorstr"))) { | |
5844 | ||
5845 | strncpy((char *)buf, bcmerrorstr(dhd->dongle_error), copylen); | |
5846 | *(uint8 *)((uint8 *)buf + (copylen - 1)) = '\0'; | |
5847 | ||
5848 | goto done; | |
5849 | } else if ((len >= strlen("bcmerror")) && | |
5850 | !strcmp((char *)buf, "bcmerror")) { | |
5851 | ||
5852 | *(uint32 *)(uint32 *)buf = dhd->dongle_error; | |
5853 | ||
5854 | goto done; | |
5855 | } | |
5856 | } | |
5857 | ||
5858 | ||
5859 | DHD_CTL(("query_ioctl: ACTION %d ifdix %d cmd %d len %d \n", | |
5860 | action, ifidx, cmd, len)); | |
5861 | #ifdef REPORT_FATAL_TIMEOUTS | |
5862 | /* | |
5863 | * These timers "should" be started before sending H2D interrupt. | |
5864 | * Think of the scenario where H2D interrupt is fired and the Dongle | |
5865 | * responds back immediately. From the DPC we would stop the cmd, bus | |
5866 | * timers. But the process context could have switched out leading to | |
5867 | * a situation where the timers are Not started yet, but are actually stopped. | |
5868 | * | |
5869 | * Disable preemption from the time we start the timer until we are done | |
5870 | * with seding H2D interrupts. | |
5871 | */ | |
5872 | OSL_DISABLE_PREEMPTION(dhd->osh); | |
5873 | dhd_set_request_id(dhd, dhd->prot->ioctl_trans_id+1, cmd); | |
5874 | dhd_start_cmd_timer(dhd); | |
5875 | dhd_start_bus_timer(dhd); | |
5876 | #endif /* REPORT_FATAL_TIMEOUTS */ | |
5877 | ||
5878 | ret = dhd_fillup_ioct_reqst(dhd, (uint16)len, cmd, buf, ifidx); | |
5879 | ||
5880 | #ifdef REPORT_FATAL_TIMEOUTS | |
5881 | /* For some reason if we fail to ring door bell, stop the timers */ | |
5882 | if (ret < 0) { | |
5883 | DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__)); | |
5884 | dhd_stop_cmd_timer(dhd); | |
5885 | dhd_stop_bus_timer(dhd); | |
5886 | OSL_ENABLE_PREEMPTION(dhd->osh); | |
5887 | goto done; | |
5888 | } | |
5889 | OSL_ENABLE_PREEMPTION(dhd->osh); | |
5890 | #else | |
5891 | if (ret < 0) { | |
5892 | DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__)); | |
5893 | goto done; | |
5894 | } | |
5895 | #endif /* REPORT_FATAL_TIMEOUTS */ | |
5896 | ||
5897 | /* wait for IOCTL completion message from dongle and get first fragment */ | |
5898 | ret = dhd_msgbuf_wait_ioctl_cmplt(dhd, len, buf); | |
5899 | ||
5900 | done: | |
5901 | return ret; | |
5902 | } | |
5903 | ||
5904 | /** | |
5905 | * Waits for IOCTL completion message from the dongle, copies this into caller | |
5906 | * provided parameter 'buf'. | |
5907 | */ | |
5908 | static int | |
5909 | dhd_msgbuf_wait_ioctl_cmplt(dhd_pub_t *dhd, uint32 len, void *buf) | |
5910 | { | |
5911 | dhd_prot_t *prot = dhd->prot; | |
5912 | int timeleft; | |
5913 | unsigned long flags; | |
5914 | int ret = 0; | |
5915 | ||
5916 | DHD_TRACE(("%s: Enter\n", __FUNCTION__)); | |
5917 | ||
5918 | if (dhd_query_bus_erros(dhd)) { | |
5919 | ret = -EIO; | |
5920 | goto out; | |
5921 | } | |
5922 | ||
5923 | timeleft = dhd_os_ioctl_resp_wait(dhd, (uint *)&prot->ioctl_received); | |
5924 | ||
5925 | #ifdef DHD_RECOVER_TIMEOUT | |
5926 | if (prot->ioctl_received == 0) { | |
5927 | uint32 intstatus = 0; | |
5928 | uint32 intmask = 0; | |
5929 | intstatus = si_corereg(dhd->bus->sih, | |
5930 | dhd->bus->sih->buscoreidx, PCIMailBoxInt, 0, 0); | |
5931 | intmask = si_corereg(dhd->bus->sih, | |
5932 | dhd->bus->sih->buscoreidx, PCIMailBoxMask, 0, 0); | |
5933 | if ((intstatus) && (!intmask) && (timeleft == 0) && (!dhd_query_bus_erros(dhd))) | |
5934 | { | |
5935 | DHD_ERROR(("%s: iovar timeout trying again intstatus=%x intmask=%x\n", | |
5936 | __FUNCTION__, intstatus, intmask)); | |
5937 | DHD_ERROR(("\n ------- DUMPING INTR enable/disable counters\r\n")); | |
5938 | DHD_ERROR(("resume_intr_enable_count=%lu dpc_intr_enable_count=%lu\n" | |
5939 | "isr_intr_disable_count=%lu suspend_intr_disable_count=%lu\n" | |
5940 | "dpc_return_busdown_count=%lu\n", | |
5941 | dhd->bus->resume_intr_enable_count, dhd->bus->dpc_intr_enable_count, | |
5942 | dhd->bus->isr_intr_disable_count, | |
5943 | dhd->bus->suspend_intr_disable_count, | |
5944 | dhd->bus->dpc_return_busdown_count)); | |
5945 | ||
5946 | dhd_prot_process_ctrlbuf(dhd); | |
5947 | ||
5948 | timeleft = dhd_os_ioctl_resp_wait(dhd, (uint *)&prot->ioctl_received); | |
5949 | /* Enable Back Interrupts using IntMask */ | |
5950 | dhdpcie_bus_intr_enable(dhd->bus); | |
5951 | } | |
5952 | } | |
5953 | #endif /* DHD_RECOVER_TIMEOUT */ | |
5954 | ||
5955 | if (timeleft == 0 && (!dhd_query_bus_erros(dhd))) { | |
5956 | uint32 intstatus; | |
5957 | ||
5958 | dhd->rxcnt_timeout++; | |
5959 | dhd->rx_ctlerrs++; | |
5960 | dhd->iovar_timeout_occured = TRUE; | |
5961 | DHD_ERROR(("%s: resumed on timeout rxcnt_timeout %d ioctl_cmd %d " | |
5962 | "trans_id %d state %d busstate=%d ioctl_received=%d\n", | |
5963 | __FUNCTION__, dhd->rxcnt_timeout, prot->curr_ioctl_cmd, | |
5964 | prot->ioctl_trans_id, prot->ioctl_state, | |
5965 | dhd->busstate, prot->ioctl_received)); | |
5966 | if (prot->curr_ioctl_cmd == WLC_SET_VAR || | |
5967 | prot->curr_ioctl_cmd == WLC_GET_VAR) { | |
5968 | char iovbuf[32]; | |
5969 | int i; | |
5970 | int dump_size = 128; | |
5971 | uint8 *ioctl_buf = (uint8 *)prot->ioctbuf.va; | |
5972 | memset(iovbuf, 0, sizeof(iovbuf)); | |
5973 | strncpy(iovbuf, ioctl_buf, sizeof(iovbuf) - 1); | |
5974 | iovbuf[sizeof(iovbuf) - 1] = '\0'; | |
5975 | DHD_ERROR(("Current IOVAR (%s): %s\n", | |
5976 | prot->curr_ioctl_cmd == WLC_SET_VAR ? | |
5977 | "WLC_SET_VAR" : "WLC_GET_VAR", iovbuf)); | |
5978 | DHD_ERROR(("========== START IOCTL REQBUF DUMP ==========\n")); | |
5979 | for (i = 0; i < dump_size; i++) { | |
5980 | DHD_ERROR(("%02X ", ioctl_buf[i])); | |
5981 | if ((i % 32) == 31) { | |
5982 | DHD_ERROR(("\n")); | |
5983 | } | |
5984 | } | |
5985 | DHD_ERROR(("\n========== END IOCTL REQBUF DUMP ==========\n")); | |
5986 | } | |
5987 | ||
5988 | /* Check the PCIe link status by reading intstatus register */ | |
5989 | intstatus = si_corereg(dhd->bus->sih, | |
5990 | dhd->bus->sih->buscoreidx, PCIMailBoxInt, 0, 0); | |
5991 | if (intstatus == (uint32)-1) { | |
5992 | DHD_ERROR(("%s : PCIe link might be down\n", __FUNCTION__)); | |
5993 | dhd->bus->is_linkdown = TRUE; | |
5994 | } | |
5995 | ||
5996 | dhd_bus_dump_console_buffer(dhd->bus); | |
5997 | dhd_prot_debug_info_print(dhd); | |
5998 | ||
5999 | #ifdef DHD_FW_COREDUMP | |
6000 | /* Collect socram dump */ | |
6001 | if (dhd->memdump_enabled) { | |
6002 | /* collect core dump */ | |
6003 | dhd->memdump_type = DUMP_TYPE_RESUMED_ON_TIMEOUT; | |
6004 | dhd_bus_mem_dump(dhd); | |
6005 | } | |
6006 | #endif /* DHD_FW_COREDUMP */ | |
6007 | #ifdef SUPPORT_LINKDOWN_RECOVERY | |
6008 | #ifdef CONFIG_ARCH_MSM | |
6009 | dhd->bus->no_cfg_restore = 1; | |
6010 | #endif /* CONFIG_ARCH_MSM */ | |
6011 | #endif /* SUPPORT_LINKDOWN_RECOVERY */ | |
6012 | ret = -ETIMEDOUT; | |
6013 | goto out; | |
6014 | } else { | |
6015 | if (prot->ioctl_received != IOCTL_RETURN_ON_SUCCESS) { | |
6016 | DHD_ERROR(("%s: IOCTL failure due to ioctl_received = %d\n", | |
6017 | __FUNCTION__, prot->ioctl_received)); | |
6018 | ret = -EINVAL; | |
6019 | goto out; | |
6020 | } | |
6021 | dhd->rxcnt_timeout = 0; | |
6022 | dhd->rx_ctlpkts++; | |
6023 | DHD_CTL(("%s: ioctl resp resumed, got %d\n", | |
6024 | __FUNCTION__, prot->ioctl_resplen)); | |
6025 | } | |
6026 | ||
6027 | if (dhd->prot->ioctl_resplen > len) | |
6028 | dhd->prot->ioctl_resplen = (uint16)len; | |
6029 | if (buf) | |
6030 | bcopy(dhd->prot->retbuf.va, buf, dhd->prot->ioctl_resplen); | |
6031 | ||
6032 | ret = (int)(dhd->prot->ioctl_status); | |
6033 | ||
6034 | out: | |
6035 | DHD_GENERAL_LOCK(dhd, flags); | |
6036 | dhd->prot->ioctl_state = 0; | |
6037 | dhd->prot->ioctl_resplen = 0; | |
6038 | dhd->prot->ioctl_received = IOCTL_WAIT; | |
6039 | dhd->prot->curr_ioctl_cmd = 0; | |
6040 | DHD_GENERAL_UNLOCK(dhd, flags); | |
6041 | ||
6042 | return ret; | |
6043 | } /* dhd_msgbuf_wait_ioctl_cmplt */ | |
6044 | ||
6045 | static int | |
6046 | dhd_msgbuf_set_ioctl(dhd_pub_t *dhd, int ifidx, uint cmd, void *buf, uint len, uint8 action) | |
6047 | { | |
6048 | int ret = 0; | |
6049 | ||
6050 | DHD_TRACE(("%s: Enter \n", __FUNCTION__)); | |
6051 | ||
6052 | if (dhd->busstate == DHD_BUS_DOWN) { | |
6053 | DHD_ERROR(("%s : bus is down. we have nothing to do\n", __FUNCTION__)); | |
6054 | return -EIO; | |
6055 | } | |
6056 | ||
6057 | /* don't talk to the dongle if fw is about to be reloaded */ | |
6058 | if (dhd->hang_was_sent) { | |
6059 | DHD_ERROR(("%s: HANG was sent up earlier. Not talking to the chip\n", | |
6060 | __FUNCTION__)); | |
6061 | return -EIO; | |
6062 | } | |
6063 | ||
6064 | DHD_CTL(("ACTION %d ifdix %d cmd %d len %d \n", | |
6065 | action, ifidx, cmd, len)); | |
6066 | ||
6067 | #ifdef REPORT_FATAL_TIMEOUTS | |
6068 | /* | |
6069 | * These timers "should" be started before sending H2D interrupt. | |
6070 | * Think of the scenario where H2D interrupt is fired and the Dongle | |
6071 | * responds back immediately. From the DPC we would stop the cmd, bus | |
6072 | * timers. But the process context could have switched out leading to | |
6073 | * a situation where the timers are Not started yet, but are actually stopped. | |
6074 | * | |
6075 | * Disable preemption from the time we start the timer until we are done | |
6076 | * with seding H2D interrupts. | |
6077 | */ | |
6078 | OSL_DISABLE_PREEMPTION(dhd->osh); | |
6079 | dhd_set_request_id(dhd, dhd->prot->ioctl_trans_id+1, cmd); | |
6080 | dhd_start_cmd_timer(dhd); | |
6081 | dhd_start_bus_timer(dhd); | |
6082 | #endif /* REPORT_FATAL_TIMEOUTS */ | |
6083 | ||
6084 | /* Fill up msgbuf for ioctl req */ | |
6085 | ret = dhd_fillup_ioct_reqst(dhd, (uint16)len, cmd, buf, ifidx); | |
6086 | ||
6087 | #ifdef REPORT_FATAL_TIMEOUTS | |
6088 | /* For some reason if we fail to ring door bell, stop the timers */ | |
6089 | if (ret < 0) { | |
6090 | DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__)); | |
6091 | dhd_stop_cmd_timer(dhd); | |
6092 | dhd_stop_bus_timer(dhd); | |
6093 | OSL_ENABLE_PREEMPTION(dhd->osh); | |
6094 | goto done; | |
6095 | } | |
6096 | ||
6097 | OSL_ENABLE_PREEMPTION(dhd->osh); | |
6098 | #else | |
6099 | if (ret < 0) { | |
6100 | DHD_ERROR(("%s(): dhd_fillup_ioct_reqst failed \r\n", __FUNCTION__)); | |
6101 | goto done; | |
6102 | } | |
6103 | #endif /* REPORT_FATAL_TIMEOUTS */ | |
6104 | ||
6105 | ret = dhd_msgbuf_wait_ioctl_cmplt(dhd, len, buf); | |
6106 | ||
6107 | done: | |
6108 | return ret; | |
6109 | } | |
6110 | ||
6111 | /** Called by upper DHD layer. Handles a protocol control response asynchronously. */ | |
6112 | int dhd_prot_ctl_complete(dhd_pub_t *dhd) | |
6113 | { | |
6114 | return 0; | |
6115 | } | |
6116 | ||
6117 | /** Called by upper DHD layer. Check for and handle local prot-specific iovar commands */ | |
6118 | int dhd_prot_iovar_op(dhd_pub_t *dhd, const char *name, | |
6119 | void *params, int plen, void *arg, int len, bool set) | |
6120 | { | |
6121 | return BCME_UNSUPPORTED; | |
6122 | } | |
6123 | ||
6124 | /** Add prot dump output to a buffer */ | |
6125 | void dhd_prot_dump(dhd_pub_t *dhd, struct bcmstrbuf *b) | |
6126 | { | |
6127 | ||
6128 | if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_SEQNUM) | |
6129 | bcm_bprintf(b, "\nd2h_sync: SEQNUM:"); | |
6130 | else if (dhd->d2h_sync_mode & PCIE_SHARED_D2H_SYNC_XORCSUM) | |
6131 | bcm_bprintf(b, "\nd2h_sync: XORCSUM:"); | |
6132 | else | |
6133 | bcm_bprintf(b, "\nd2h_sync: NONE:"); | |
6134 | bcm_bprintf(b, " d2h_sync_wait max<%lu> tot<%lu>\n", | |
6135 | dhd->prot->d2h_sync_wait_max, dhd->prot->d2h_sync_wait_tot); | |
6136 | ||
6137 | bcm_bprintf(b, "\nDongle DMA Indices: h2d %d d2h %d index size %d bytes\n", | |
6138 | dhd->dma_h2d_ring_upd_support, | |
6139 | dhd->dma_d2h_ring_upd_support, | |
6140 | dhd->prot->rw_index_sz); | |
6141 | bcm_bprintf(b, "h2d_max_txpost: %d, prot->h2d_max_txpost: %d\n", | |
6142 | h2d_max_txpost, dhd->prot->h2d_max_txpost); | |
6143 | } | |
6144 | ||
6145 | /* Update local copy of dongle statistics */ | |
6146 | void dhd_prot_dstats(dhd_pub_t *dhd) | |
6147 | { | |
6148 | return; | |
6149 | } | |
6150 | ||
6151 | /** Called by upper DHD layer */ | |
6152 | int dhd_process_pkt_reorder_info(dhd_pub_t *dhd, uchar *reorder_info_buf, | |
6153 | uint reorder_info_len, void **pkt, uint32 *free_buf_count) | |
6154 | { | |
6155 | return 0; | |
6156 | } | |
6157 | ||
6158 | /** Debug related, post a dummy message to interrupt dongle. Used to process cons commands. */ | |
6159 | int | |
6160 | dhd_post_dummy_msg(dhd_pub_t *dhd) | |
6161 | { | |
6162 | unsigned long flags; | |
6163 | hostevent_hdr_t *hevent = NULL; | |
6164 | uint16 alloced = 0; | |
6165 | ||
6166 | dhd_prot_t *prot = dhd->prot; | |
6167 | msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn; | |
6168 | ||
6169 | #ifdef PCIE_INB_DW | |
6170 | if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) | |
6171 | return BCME_ERROR; | |
6172 | #endif /* PCIE_INB_DW */ | |
6173 | ||
6174 | DHD_GENERAL_LOCK(dhd, flags); | |
6175 | ||
6176 | hevent = (hostevent_hdr_t *) | |
6177 | dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE); | |
6178 | ||
6179 | if (hevent == NULL) { | |
6180 | DHD_GENERAL_UNLOCK(dhd, flags); | |
6181 | #ifdef PCIE_INB_DW | |
6182 | dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); | |
6183 | #endif | |
6184 | return -1; | |
6185 | } | |
6186 | ||
6187 | /* CMN msg header */ | |
6188 | hevent->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO; | |
6189 | ring->seqnum++; | |
6190 | hevent->msg.msg_type = MSG_TYPE_HOST_EVNT; | |
6191 | hevent->msg.if_id = 0; | |
6192 | hevent->msg.flags = ring->current_phase; | |
6193 | ||
6194 | /* Event payload */ | |
6195 | hevent->evnt_pyld = htol32(HOST_EVENT_CONS_CMD); | |
6196 | ||
6197 | /* Since, we are filling the data directly into the bufptr obtained | |
6198 | * from the msgbuf, we can directly call the write_complete | |
6199 | */ | |
6200 | dhd_prot_ring_write_complete(dhd, ring, hevent, 1); | |
6201 | DHD_GENERAL_UNLOCK(dhd, flags); | |
6202 | #ifdef PCIE_INB_DW | |
6203 | dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); | |
6204 | #endif | |
6205 | ||
6206 | return 0; | |
6207 | } | |
6208 | ||
6209 | /** | |
6210 | * If exactly_nitems is true, this function will allocate space for nitems or fail | |
6211 | * If exactly_nitems is false, this function will allocate space for nitems or less | |
6212 | */ | |
6213 | static void * BCMFASTPATH | |
6214 | dhd_prot_alloc_ring_space(dhd_pub_t *dhd, msgbuf_ring_t *ring, | |
6215 | uint16 nitems, uint16 * alloced, bool exactly_nitems) | |
6216 | { | |
6217 | void * ret_buf; | |
6218 | ||
6219 | /* Alloc space for nitems in the ring */ | |
6220 | ret_buf = dhd_prot_get_ring_space(ring, nitems, alloced, exactly_nitems); | |
6221 | ||
6222 | if (ret_buf == NULL) { | |
6223 | /* if alloc failed , invalidate cached read ptr */ | |
6224 | if (dhd->dma_d2h_ring_upd_support) { | |
6225 | ring->rd = dhd_prot_dma_indx_get(dhd, H2D_DMA_INDX_RD_UPD, ring->idx); | |
6226 | } else { | |
6227 | dhd_bus_cmn_readshared(dhd->bus, &(ring->rd), RING_RD_UPD, ring->idx); | |
6228 | #ifdef SUPPORT_LINKDOWN_RECOVERY | |
6229 | /* Check if ring->rd is valid */ | |
6230 | if (ring->rd >= ring->max_items) { | |
6231 | dhd->bus->read_shm_fail = TRUE; | |
6232 | DHD_ERROR(("%s: Invalid rd idx=%d\n", ring->name, ring->rd)); | |
6233 | return NULL; | |
6234 | } | |
6235 | #endif /* SUPPORT_LINKDOWN_RECOVERY */ | |
6236 | } | |
6237 | ||
6238 | /* Try allocating once more */ | |
6239 | ret_buf = dhd_prot_get_ring_space(ring, nitems, alloced, exactly_nitems); | |
6240 | ||
6241 | if (ret_buf == NULL) { | |
6242 | DHD_INFO(("%s: Ring space not available \n", ring->name)); | |
6243 | return NULL; | |
6244 | } | |
6245 | } | |
6246 | ||
6247 | if (ret_buf == HOST_RING_BASE(ring)) { | |
6248 | DHD_INFO(("%s: setting the phase now\n", ring->name)); | |
6249 | ring->current_phase = ring->current_phase ? 0 : BCMPCIE_CMNHDR_PHASE_BIT_INIT; | |
6250 | } | |
6251 | ||
6252 | /* Return alloced space */ | |
6253 | return ret_buf; | |
6254 | } | |
6255 | ||
6256 | /** | |
6257 | * Non inline ioct request. | |
6258 | * Form a ioctl request first as per ioctptr_reqst_hdr_t header in the circular buffer | |
6259 | * Form a separate request buffer where a 4 byte cmn header is added in the front | |
6260 | * buf contents from parent function is copied to remaining section of this buffer | |
6261 | */ | |
6262 | static int | |
6263 | dhd_fillup_ioct_reqst(dhd_pub_t *dhd, uint16 len, uint cmd, void* buf, int ifidx) | |
6264 | { | |
6265 | dhd_prot_t *prot = dhd->prot; | |
6266 | ioctl_req_msg_t *ioct_rqst; | |
6267 | void * ioct_buf; /* For ioctl payload */ | |
6268 | uint16 rqstlen, resplen; | |
6269 | unsigned long flags; | |
6270 | uint16 alloced = 0; | |
6271 | msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn; | |
6272 | ||
6273 | if (dhd_query_bus_erros(dhd)) { | |
6274 | return -EIO; | |
6275 | } | |
6276 | ||
6277 | rqstlen = len; | |
6278 | resplen = len; | |
6279 | ||
6280 | /* Limit ioct request to MSGBUF_MAX_MSG_SIZE bytes including hdrs */ | |
6281 | /* 8K allocation of dongle buffer fails */ | |
6282 | /* dhd doesnt give separate input & output buf lens */ | |
6283 | /* so making the assumption that input length can never be more than 1.5k */ | |
6284 | rqstlen = MIN(rqstlen, MSGBUF_MAX_MSG_SIZE); | |
6285 | ||
6286 | #ifdef PCIE_INB_DW | |
6287 | if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) | |
6288 | return BCME_ERROR; | |
6289 | #endif /* PCIE_INB_DW */ | |
6290 | ||
6291 | DHD_GENERAL_LOCK(dhd, flags); | |
6292 | ||
6293 | if (prot->ioctl_state) { | |
6294 | DHD_ERROR(("%s: pending ioctl %02x\n", __FUNCTION__, prot->ioctl_state)); | |
6295 | DHD_GENERAL_UNLOCK(dhd, flags); | |
6296 | #ifdef PCIE_INB_DW | |
6297 | dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); | |
6298 | #endif | |
6299 | return BCME_BUSY; | |
6300 | } else { | |
6301 | prot->ioctl_state = MSGBUF_IOCTL_ACK_PENDING | MSGBUF_IOCTL_RESP_PENDING; | |
6302 | } | |
6303 | ||
6304 | /* Request for cbuf space */ | |
6305 | ioct_rqst = (ioctl_req_msg_t*) | |
6306 | dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE); | |
6307 | if (ioct_rqst == NULL) { | |
6308 | DHD_ERROR(("couldn't allocate space on msgring to send ioctl request\n")); | |
6309 | prot->ioctl_state = 0; | |
6310 | prot->curr_ioctl_cmd = 0; | |
6311 | prot->ioctl_received = IOCTL_WAIT; | |
6312 | DHD_GENERAL_UNLOCK(dhd, flags); | |
6313 | #ifdef PCIE_INB_DW | |
6314 | dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); | |
6315 | #endif | |
6316 | return -1; | |
6317 | } | |
6318 | ||
6319 | /* Common msg buf hdr */ | |
6320 | ioct_rqst->cmn_hdr.msg_type = MSG_TYPE_IOCTLPTR_REQ; | |
6321 | ioct_rqst->cmn_hdr.if_id = (uint8)ifidx; | |
6322 | ioct_rqst->cmn_hdr.flags = ring->current_phase; | |
6323 | ioct_rqst->cmn_hdr.request_id = htol32(DHD_IOCTL_REQ_PKTID); | |
6324 | ioct_rqst->cmn_hdr.epoch = ring->seqnum % H2D_EPOCH_MODULO; | |
6325 | ring->seqnum++; | |
6326 | ||
6327 | ioct_rqst->cmd = htol32(cmd); | |
6328 | prot->curr_ioctl_cmd = cmd; | |
6329 | ioct_rqst->output_buf_len = htol16(resplen); | |
6330 | prot->ioctl_trans_id++; | |
6331 | ioct_rqst->trans_id = prot->ioctl_trans_id; | |
6332 | ||
6333 | /* populate ioctl buffer info */ | |
6334 | ioct_rqst->input_buf_len = htol16(rqstlen); | |
6335 | ioct_rqst->host_input_buf_addr.high = htol32(PHYSADDRHI(prot->ioctbuf.pa)); | |
6336 | ioct_rqst->host_input_buf_addr.low = htol32(PHYSADDRLO(prot->ioctbuf.pa)); | |
6337 | /* copy ioct payload */ | |
6338 | ioct_buf = (void *) prot->ioctbuf.va; | |
6339 | ||
6340 | if (buf) | |
6341 | memcpy(ioct_buf, buf, len); | |
6342 | ||
6343 | OSL_CACHE_FLUSH((void *) prot->ioctbuf.va, len); | |
6344 | ||
6345 | if (!ISALIGNED(ioct_buf, DMA_ALIGN_LEN)) | |
6346 | DHD_ERROR(("host ioct address unaligned !!!!! \n")); | |
6347 | ||
6348 | DHD_CTL(("submitted IOCTL request request_id %d, cmd %d, output_buf_len %d, tx_id %d\n", | |
6349 | ioct_rqst->cmn_hdr.request_id, cmd, ioct_rqst->output_buf_len, | |
6350 | ioct_rqst->trans_id)); | |
6351 | ||
6352 | /* update ring's WR index and ring doorbell to dongle */ | |
6353 | dhd_prot_ring_write_complete(dhd, ring, ioct_rqst, 1); | |
6354 | DHD_GENERAL_UNLOCK(dhd, flags); | |
6355 | #ifdef PCIE_INB_DW | |
6356 | dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); | |
6357 | #endif | |
6358 | ||
6359 | return 0; | |
6360 | } /* dhd_fillup_ioct_reqst */ | |
6361 | ||
6362 | ||
6363 | /** | |
6364 | * dhd_prot_ring_attach - Initialize the msgbuf_ring object and attach a | |
6365 | * DMA-able buffer to it. The ring is NOT tagged as inited until all the ring | |
6366 | * information is posted to the dongle. | |
6367 | * | |
6368 | * Invoked in dhd_prot_attach for the common rings, and in dhd_prot_init for | |
6369 | * each flowring in pool of flowrings. | |
6370 | * | |
6371 | * returns BCME_OK=0 on success | |
6372 | * returns non-zero negative error value on failure. | |
6373 | */ | |
6374 | static int | |
6375 | dhd_prot_ring_attach(dhd_pub_t *dhd, msgbuf_ring_t *ring, const char *name, | |
6376 | uint16 max_items, uint16 item_len, uint16 ringid) | |
6377 | { | |
6378 | int dma_buf_alloced = BCME_NOMEM; | |
6379 | uint32 dma_buf_len = max_items * item_len; | |
6380 | dhd_prot_t *prot = dhd->prot; | |
6381 | uint16 max_flowrings = dhd->bus->max_tx_flowrings; | |
6382 | ||
6383 | ASSERT(ring); | |
6384 | ASSERT(name); | |
6385 | ASSERT((max_items < 0xFFFF) && (item_len < 0xFFFF) && (ringid < 0xFFFF)); | |
6386 | ||
6387 | /* Init name */ | |
6388 | strncpy(ring->name, name, RING_NAME_MAX_LENGTH); | |
6389 | ring->name[RING_NAME_MAX_LENGTH - 1] = '\0'; | |
6390 | ||
6391 | ring->idx = ringid; | |
6392 | ||
6393 | ring->max_items = max_items; | |
6394 | ring->item_len = item_len; | |
6395 | ||
6396 | /* A contiguous space may be reserved for all flowrings */ | |
6397 | if (DHD_IS_FLOWRING(ringid, max_flowrings) && (prot->flowrings_dma_buf.va)) { | |
6398 | /* Carve out from the contiguous DMA-able flowring buffer */ | |
6399 | uint16 flowid; | |
6400 | uint32 base_offset; | |
6401 | ||
6402 | dhd_dma_buf_t *dma_buf = &ring->dma_buf; | |
6403 | dhd_dma_buf_t *rsv_buf = &prot->flowrings_dma_buf; | |
6404 | ||
6405 | flowid = DHD_RINGID_TO_FLOWID(ringid); | |
6406 | base_offset = (flowid - BCMPCIE_H2D_COMMON_MSGRINGS) * dma_buf_len; | |
6407 | ||
6408 | ASSERT(base_offset + dma_buf_len <= rsv_buf->len); | |
6409 | ||
6410 | dma_buf->len = dma_buf_len; | |
6411 | dma_buf->va = (void *)((uintptr)rsv_buf->va + base_offset); | |
6412 | PHYSADDRHISET(dma_buf->pa, PHYSADDRHI(rsv_buf->pa)); | |
6413 | PHYSADDRLOSET(dma_buf->pa, PHYSADDRLO(rsv_buf->pa) + base_offset); | |
6414 | ||
6415 | /* On 64bit, contiguous space may not span across 0x00000000FFFFFFFF */ | |
6416 | ASSERT(PHYSADDRLO(dma_buf->pa) >= PHYSADDRLO(rsv_buf->pa)); | |
6417 | ||
6418 | dma_buf->dmah = rsv_buf->dmah; | |
6419 | dma_buf->secdma = rsv_buf->secdma; | |
6420 | ||
6421 | (void)dhd_dma_buf_audit(dhd, &ring->dma_buf); | |
6422 | } else { | |
6423 | /* Allocate a dhd_dma_buf */ | |
6424 | dma_buf_alloced = dhd_dma_buf_alloc(dhd, &ring->dma_buf, dma_buf_len); | |
6425 | if (dma_buf_alloced != BCME_OK) { | |
6426 | return BCME_NOMEM; | |
6427 | } | |
6428 | } | |
6429 | ||
6430 | /* CAUTION: Save ring::base_addr in little endian format! */ | |
6431 | dhd_base_addr_htolpa(&ring->base_addr, ring->dma_buf.pa); | |
6432 | ||
6433 | #ifdef BCM_SECURE_DMA | |
6434 | if (SECURE_DMA_ENAB(prot->osh)) { | |
6435 | ring->dma_buf.secdma = MALLOCZ(prot->osh, sizeof(sec_cma_info_t)); | |
6436 | if (ring->dma_buf.secdma == NULL) { | |
6437 | goto free_dma_buf; | |
6438 | } | |
6439 | } | |
6440 | #endif /* BCM_SECURE_DMA */ | |
6441 | ||
6442 | DHD_INFO(("RING_ATTACH : %s Max item %d len item %d total size %d " | |
6443 | "ring start %p buf phys addr %x:%x \n", | |
6444 | ring->name, ring->max_items, ring->item_len, | |
6445 | dma_buf_len, ring->dma_buf.va, ltoh32(ring->base_addr.high_addr), | |
6446 | ltoh32(ring->base_addr.low_addr))); | |
6447 | ||
6448 | return BCME_OK; | |
6449 | ||
6450 | #ifdef BCM_SECURE_DMA | |
6451 | free_dma_buf: | |
6452 | if (dma_buf_alloced == BCME_OK) { | |
6453 | dhd_dma_buf_free(dhd, &ring->dma_buf); | |
6454 | } | |
6455 | #endif /* BCM_SECURE_DMA */ | |
6456 | ||
6457 | return BCME_NOMEM; | |
6458 | ||
6459 | } /* dhd_prot_ring_attach */ | |
6460 | ||
6461 | ||
6462 | /** | |
6463 | * dhd_prot_ring_init - Post the common ring information to dongle. | |
6464 | * | |
6465 | * Used only for common rings. | |
6466 | * | |
6467 | * The flowrings information is passed via the create flowring control message | |
6468 | * (tx_flowring_create_request_t) sent over the H2D control submission common | |
6469 | * ring. | |
6470 | */ | |
6471 | static void | |
6472 | dhd_prot_ring_init(dhd_pub_t *dhd, msgbuf_ring_t *ring) | |
6473 | { | |
6474 | ring->wr = 0; | |
6475 | ring->rd = 0; | |
6476 | ring->curr_rd = 0; | |
6477 | ||
6478 | /* CAUTION: ring::base_addr already in Little Endian */ | |
6479 | dhd_bus_cmn_writeshared(dhd->bus, &ring->base_addr, | |
6480 | sizeof(sh_addr_t), RING_BUF_ADDR, ring->idx); | |
6481 | dhd_bus_cmn_writeshared(dhd->bus, &ring->max_items, | |
6482 | sizeof(uint16), RING_MAX_ITEMS, ring->idx); | |
6483 | dhd_bus_cmn_writeshared(dhd->bus, &ring->item_len, | |
6484 | sizeof(uint16), RING_ITEM_LEN, ring->idx); | |
6485 | ||
6486 | dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr), | |
6487 | sizeof(uint16), RING_WR_UPD, ring->idx); | |
6488 | dhd_bus_cmn_writeshared(dhd->bus, &(ring->rd), | |
6489 | sizeof(uint16), RING_RD_UPD, ring->idx); | |
6490 | ||
6491 | /* ring inited */ | |
6492 | ring->inited = TRUE; | |
6493 | ||
6494 | } /* dhd_prot_ring_init */ | |
6495 | ||
6496 | ||
6497 | /** | |
6498 | * dhd_prot_ring_reset - bzero a ring's DMA-ble buffer and cache flush | |
6499 | * Reset WR and RD indices to 0. | |
6500 | */ | |
6501 | static void | |
6502 | dhd_prot_ring_reset(dhd_pub_t *dhd, msgbuf_ring_t *ring) | |
6503 | { | |
6504 | DHD_TRACE(("%s\n", __FUNCTION__)); | |
6505 | ||
6506 | dhd_dma_buf_reset(dhd, &ring->dma_buf); | |
6507 | ||
6508 | ring->rd = ring->wr = 0; | |
6509 | ring->curr_rd = 0; | |
6510 | ring->inited = FALSE; | |
6511 | ring->create_pending = FALSE; | |
6512 | } | |
6513 | ||
6514 | ||
6515 | /** | |
6516 | * dhd_prot_ring_detach - Detach the DMA-able buffer and any other objects | |
6517 | * hanging off the msgbuf_ring. | |
6518 | */ | |
6519 | static void | |
6520 | dhd_prot_ring_detach(dhd_pub_t *dhd, msgbuf_ring_t *ring) | |
6521 | { | |
6522 | dhd_prot_t *prot = dhd->prot; | |
6523 | uint16 max_flowrings = dhd->bus->max_tx_flowrings; | |
6524 | ASSERT(ring); | |
6525 | ||
6526 | ring->inited = FALSE; | |
6527 | /* rd = ~0, wr = ring->rd - 1, max_items = 0, len_item = ~0 */ | |
6528 | ||
6529 | #ifdef BCM_SECURE_DMA | |
6530 | if (SECURE_DMA_ENAB(prot->osh)) { | |
6531 | if (ring->dma_buf.secdma) { | |
6532 | SECURE_DMA_UNMAP_ALL(prot->osh, ring->dma_buf.secdma); | |
6533 | MFREE(prot->osh, ring->dma_buf.secdma, sizeof(sec_cma_info_t)); | |
6534 | ring->dma_buf.secdma = NULL; | |
6535 | } | |
6536 | } | |
6537 | #endif /* BCM_SECURE_DMA */ | |
6538 | ||
6539 | /* If the DMA-able buffer was carved out of a pre-reserved contiguous | |
6540 | * memory, then simply stop using it. | |
6541 | */ | |
6542 | if (DHD_IS_FLOWRING(ring->idx, max_flowrings) && (prot->flowrings_dma_buf.va)) { | |
6543 | (void)dhd_dma_buf_audit(dhd, &ring->dma_buf); | |
6544 | memset(&ring->dma_buf, 0, sizeof(dhd_dma_buf_t)); | |
6545 | } else { | |
6546 | dhd_dma_buf_free(dhd, &ring->dma_buf); | |
6547 | } | |
6548 | ||
6549 | } /* dhd_prot_ring_detach */ | |
6550 | ||
6551 | ||
6552 | /* | |
6553 | * +---------------------------------------------------------------------------- | |
6554 | * Flowring Pool | |
6555 | * | |
6556 | * Unlike common rings, which are attached very early on (dhd_prot_attach), | |
6557 | * flowrings are dynamically instantiated. Moreover, flowrings may require a | |
6558 | * larger DMA-able buffer. To avoid issues with fragmented cache coherent | |
6559 | * DMA-able memory, a pre-allocated pool of msgbuf_ring_t is allocated once. | |
6560 | * The DMA-able buffers are attached to these pre-allocated msgbuf_ring. | |
6561 | * | |
6562 | * Each DMA-able buffer may be allocated independently, or may be carved out | |
6563 | * of a single large contiguous region that is registered with the protocol | |
6564 | * layer into flowrings_dma_buf. On a 64bit platform, this contiguous region | |
6565 | * may not span 0x00000000FFFFFFFF (avoid dongle side 64bit ptr arithmetic). | |
6566 | * | |
6567 | * No flowring pool action is performed in dhd_prot_attach(), as the number | |
6568 | * of h2d rings is not yet known. | |
6569 | * | |
6570 | * In dhd_prot_init(), the dongle advertized number of h2d rings is used to | |
6571 | * determine the number of flowrings required, and a pool of msgbuf_rings are | |
6572 | * allocated and a DMA-able buffer (carved or allocated) is attached. | |
6573 | * See: dhd_prot_flowrings_pool_attach() | |
6574 | * | |
6575 | * A flowring msgbuf_ring object may be fetched from this pool during flowring | |
6576 | * creation, using the flowid. Likewise, flowrings may be freed back into the | |
6577 | * pool on flowring deletion. | |
6578 | * See: dhd_prot_flowrings_pool_fetch(), dhd_prot_flowrings_pool_release() | |
6579 | * | |
6580 | * In dhd_prot_detach(), the flowring pool is detached. The DMA-able buffers | |
6581 | * are detached (returned back to the carved region or freed), and the pool of | |
6582 | * msgbuf_ring and any objects allocated against it are freed. | |
6583 | * See: dhd_prot_flowrings_pool_detach() | |
6584 | * | |
6585 | * In dhd_prot_reset(), the flowring pool is simply reset by returning it to a | |
6586 | * state as-if upon an attach. All DMA-able buffers are retained. | |
6587 | * Following a dhd_prot_reset(), in a subsequent dhd_prot_init(), the flowring | |
6588 | * pool attach will notice that the pool persists and continue to use it. This | |
6589 | * will avoid the case of a fragmented DMA-able region. | |
6590 | * | |
6591 | * +---------------------------------------------------------------------------- | |
6592 | */ | |
6593 | ||
6594 | /* Conversion of a flowid to a flowring pool index */ | |
6595 | #define DHD_FLOWRINGS_POOL_OFFSET(flowid) \ | |
6596 | ((flowid) - BCMPCIE_H2D_COMMON_MSGRINGS) | |
6597 | ||
6598 | /* Fetch the msgbuf_ring_t from the flowring pool given a flowid */ | |
6599 | #define DHD_RING_IN_FLOWRINGS_POOL(prot, flowid) \ | |
6600 | (msgbuf_ring_t*)((prot)->h2d_flowrings_pool) + \ | |
6601 | DHD_FLOWRINGS_POOL_OFFSET(flowid) | |
6602 | ||
6603 | /* Traverse each flowring in the flowring pool, assigning ring and flowid */ | |
6604 | #define FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, total_flowrings) \ | |
6605 | for ((flowid) = DHD_FLOWRING_START_FLOWID, \ | |
6606 | (ring) = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid); \ | |
6607 | (flowid) < ((total_flowrings) + DHD_FLOWRING_START_FLOWID); \ | |
6608 | (ring)++, (flowid)++) | |
6609 | ||
6610 | /* Fetch number of H2D flowrings given the total number of h2d rings */ | |
6611 | static uint16 | |
6612 | dhd_get_max_flow_rings(dhd_pub_t *dhd) | |
6613 | { | |
6614 | if (dhd->bus->api.fw_rev >= PCIE_SHARED_VERSION_6) | |
6615 | return dhd->bus->max_tx_flowrings; | |
6616 | else | |
6617 | return (dhd->bus->max_tx_flowrings - BCMPCIE_H2D_COMMON_MSGRINGS); | |
6618 | } | |
6619 | ||
6620 | /** | |
6621 | * dhd_prot_flowrings_pool_attach - Initialize a pool of flowring msgbuf_ring_t. | |
6622 | * | |
6623 | * Allocate a pool of msgbuf_ring along with DMA-able buffers for flowrings. | |
6624 | * Dongle includes common rings when it advertizes the number of H2D rings. | |
6625 | * Allocates a pool of msgbuf_ring_t and invokes dhd_prot_ring_attach to | |
6626 | * allocate the DMA-able buffer and initialize each msgbuf_ring_t object. | |
6627 | * | |
6628 | * dhd_prot_ring_attach is invoked to perform the actual initialization and | |
6629 | * attaching the DMA-able buffer. | |
6630 | * | |
6631 | * Later dhd_prot_flowrings_pool_fetch() may be used to fetch a preallocated and | |
6632 | * initialized msgbuf_ring_t object. | |
6633 | * | |
6634 | * returns BCME_OK=0 on success | |
6635 | * returns non-zero negative error value on failure. | |
6636 | */ | |
6637 | static int | |
6638 | dhd_prot_flowrings_pool_attach(dhd_pub_t *dhd) | |
6639 | { | |
6640 | uint16 flowid; | |
6641 | msgbuf_ring_t *ring; | |
6642 | uint16 h2d_flowrings_total; /* exclude H2D common rings */ | |
6643 | dhd_prot_t *prot = dhd->prot; | |
6644 | char ring_name[RING_NAME_MAX_LENGTH]; | |
6645 | ||
6646 | if (prot->h2d_flowrings_pool != NULL) | |
6647 | return BCME_OK; /* dhd_prot_init rentry after a dhd_prot_reset */ | |
6648 | ||
6649 | ASSERT(prot->h2d_rings_total == 0); | |
6650 | ||
6651 | /* h2d_rings_total includes H2D common rings: ctrl and rxbuf subn */ | |
6652 | prot->h2d_rings_total = (uint16)dhd_bus_max_h2d_queues(dhd->bus); | |
6653 | ||
6654 | if (prot->h2d_rings_total < BCMPCIE_H2D_COMMON_MSGRINGS) { | |
6655 | DHD_ERROR(("%s: h2d_rings_total advertized as %u\n", | |
6656 | __FUNCTION__, prot->h2d_rings_total)); | |
6657 | return BCME_ERROR; | |
6658 | } | |
6659 | ||
6660 | /* Subtract number of H2D common rings, to determine number of flowrings */ | |
6661 | h2d_flowrings_total = dhd_get_max_flow_rings(dhd); | |
6662 | ||
6663 | DHD_ERROR(("Attach flowrings pool for %d rings\n", h2d_flowrings_total)); | |
6664 | ||
6665 | /* Allocate pool of msgbuf_ring_t objects for all flowrings */ | |
6666 | prot->h2d_flowrings_pool = (msgbuf_ring_t *)MALLOCZ(prot->osh, | |
6667 | (h2d_flowrings_total * sizeof(msgbuf_ring_t))); | |
6668 | ||
6669 | if (prot->h2d_flowrings_pool == NULL) { | |
6670 | DHD_ERROR(("%s: flowrings pool for %d flowrings, alloc failure\n", | |
6671 | __FUNCTION__, h2d_flowrings_total)); | |
6672 | goto fail; | |
6673 | } | |
6674 | ||
6675 | /* Setup & Attach a DMA-able buffer to each flowring in the flowring pool */ | |
6676 | FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) { | |
6677 | snprintf(ring_name, sizeof(ring_name), "h2dflr_%03u", flowid); | |
6678 | if (dhd_prot_ring_attach(dhd, ring, ring_name, | |
6679 | prot->h2d_max_txpost, H2DRING_TXPOST_ITEMSIZE, | |
6680 | DHD_FLOWID_TO_RINGID(flowid)) != BCME_OK) { | |
6681 | goto attach_fail; | |
6682 | } | |
6683 | } | |
6684 | ||
6685 | return BCME_OK; | |
6686 | ||
6687 | attach_fail: | |
6688 | dhd_prot_flowrings_pool_detach(dhd); /* Free entire pool of flowrings */ | |
6689 | ||
6690 | fail: | |
6691 | prot->h2d_rings_total = 0; | |
6692 | return BCME_NOMEM; | |
6693 | ||
6694 | } /* dhd_prot_flowrings_pool_attach */ | |
6695 | ||
6696 | ||
6697 | /** | |
6698 | * dhd_prot_flowrings_pool_reset - Reset all msgbuf_ring_t objects in the pool. | |
6699 | * Invokes dhd_prot_ring_reset to perform the actual reset. | |
6700 | * | |
6701 | * The DMA-able buffer is not freed during reset and neither is the flowring | |
6702 | * pool freed. | |
6703 | * | |
6704 | * dhd_prot_flowrings_pool_reset will be invoked in dhd_prot_reset. Following | |
6705 | * the dhd_prot_reset, dhd_prot_init will be re-invoked, and the flowring pool | |
6706 | * from a previous flowring pool instantiation will be reused. | |
6707 | * | |
6708 | * This will avoid a fragmented DMA-able memory condition, if multiple | |
6709 | * dhd_prot_reset were invoked to reboot the dongle without a full detach/attach | |
6710 | * cycle. | |
6711 | */ | |
6712 | static void | |
6713 | dhd_prot_flowrings_pool_reset(dhd_pub_t *dhd) | |
6714 | { | |
6715 | uint16 flowid, h2d_flowrings_total; | |
6716 | msgbuf_ring_t *ring; | |
6717 | dhd_prot_t *prot = dhd->prot; | |
6718 | ||
6719 | if (prot->h2d_flowrings_pool == NULL) { | |
6720 | ASSERT(prot->h2d_rings_total == 0); | |
6721 | return; | |
6722 | } | |
6723 | h2d_flowrings_total = dhd_get_max_flow_rings(dhd); | |
6724 | /* Reset each flowring in the flowring pool */ | |
6725 | FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) { | |
6726 | dhd_prot_ring_reset(dhd, ring); | |
6727 | ring->inited = FALSE; | |
6728 | } | |
6729 | ||
6730 | /* Flowring pool state must be as-if dhd_prot_flowrings_pool_attach */ | |
6731 | } | |
6732 | ||
6733 | ||
6734 | /** | |
6735 | * dhd_prot_flowrings_pool_detach - Free pool of msgbuf_ring along with | |
6736 | * DMA-able buffers for flowrings. | |
6737 | * dhd_prot_ring_detach is invoked to free the DMA-able buffer and perform any | |
6738 | * de-initialization of each msgbuf_ring_t. | |
6739 | */ | |
6740 | static void | |
6741 | dhd_prot_flowrings_pool_detach(dhd_pub_t *dhd) | |
6742 | { | |
6743 | int flowid; | |
6744 | msgbuf_ring_t *ring; | |
6745 | uint16 h2d_flowrings_total; /* exclude H2D common rings */ | |
6746 | dhd_prot_t *prot = dhd->prot; | |
6747 | ||
6748 | if (prot->h2d_flowrings_pool == NULL) { | |
6749 | ASSERT(prot->h2d_rings_total == 0); | |
6750 | return; | |
6751 | } | |
6752 | ||
6753 | h2d_flowrings_total = dhd_get_max_flow_rings(dhd); | |
6754 | /* Detach the DMA-able buffer for each flowring in the flowring pool */ | |
6755 | FOREACH_RING_IN_FLOWRINGS_POOL(prot, ring, flowid, h2d_flowrings_total) { | |
6756 | dhd_prot_ring_detach(dhd, ring); | |
6757 | } | |
6758 | ||
6759 | ||
6760 | MFREE(prot->osh, prot->h2d_flowrings_pool, | |
6761 | (h2d_flowrings_total * sizeof(msgbuf_ring_t))); | |
6762 | ||
6763 | prot->h2d_flowrings_pool = (msgbuf_ring_t*)NULL; | |
6764 | prot->h2d_rings_total = 0; | |
6765 | ||
6766 | } /* dhd_prot_flowrings_pool_detach */ | |
6767 | ||
6768 | ||
6769 | /** | |
6770 | * dhd_prot_flowrings_pool_fetch - Fetch a preallocated and initialized | |
6771 | * msgbuf_ring from the flowring pool, and assign it. | |
6772 | * | |
6773 | * Unlike common rings, which uses a dhd_prot_ring_init() to pass the common | |
6774 | * ring information to the dongle, a flowring's information is passed via a | |
6775 | * flowring create control message. | |
6776 | * | |
6777 | * Only the ring state (WR, RD) index are initialized. | |
6778 | */ | |
6779 | static msgbuf_ring_t * | |
6780 | dhd_prot_flowrings_pool_fetch(dhd_pub_t *dhd, uint16 flowid) | |
6781 | { | |
6782 | msgbuf_ring_t *ring; | |
6783 | dhd_prot_t *prot = dhd->prot; | |
6784 | ||
6785 | ASSERT(flowid >= DHD_FLOWRING_START_FLOWID); | |
6786 | ASSERT(flowid < prot->h2d_rings_total); | |
6787 | ASSERT(prot->h2d_flowrings_pool != NULL); | |
6788 | ||
6789 | ring = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid); | |
6790 | ||
6791 | /* ASSERT flow_ring->inited == FALSE */ | |
6792 | ||
6793 | ring->wr = 0; | |
6794 | ring->rd = 0; | |
6795 | ring->curr_rd = 0; | |
6796 | ring->inited = TRUE; | |
6797 | /** | |
6798 | * Every time a flowring starts dynamically, initialize current_phase with 0 | |
6799 | * then flip to BCMPCIE_CMNHDR_PHASE_BIT_INIT | |
6800 | */ | |
6801 | ring->current_phase = 0; | |
6802 | return ring; | |
6803 | } | |
6804 | ||
6805 | ||
6806 | /** | |
6807 | * dhd_prot_flowrings_pool_release - release a previously fetched flowring's | |
6808 | * msgbuf_ring back to the flow_ring pool. | |
6809 | */ | |
6810 | void | |
6811 | dhd_prot_flowrings_pool_release(dhd_pub_t *dhd, uint16 flowid, void *flow_ring) | |
6812 | { | |
6813 | msgbuf_ring_t *ring; | |
6814 | dhd_prot_t *prot = dhd->prot; | |
6815 | ||
6816 | ASSERT(flowid >= DHD_FLOWRING_START_FLOWID); | |
6817 | ASSERT(flowid < prot->h2d_rings_total); | |
6818 | ASSERT(prot->h2d_flowrings_pool != NULL); | |
6819 | ||
6820 | ring = DHD_RING_IN_FLOWRINGS_POOL(prot, flowid); | |
6821 | ||
6822 | ASSERT(ring == (msgbuf_ring_t*)flow_ring); | |
6823 | /* ASSERT flow_ring->inited == TRUE */ | |
6824 | ||
6825 | (void)dhd_dma_buf_audit(dhd, &ring->dma_buf); | |
6826 | ||
6827 | ring->wr = 0; | |
6828 | ring->rd = 0; | |
6829 | ring->inited = FALSE; | |
6830 | ||
6831 | ring->curr_rd = 0; | |
6832 | } | |
6833 | ||
6834 | ||
6835 | /* Assumes only one index is updated at a time */ | |
6836 | /* If exactly_nitems is true, this function will allocate space for nitems or fail */ | |
6837 | /* Exception: when wrap around is encountered, to prevent hangup (last nitems of ring buffer) */ | |
6838 | /* If exactly_nitems is false, this function will allocate space for nitems or less */ | |
6839 | static void *BCMFASTPATH | |
6840 | dhd_prot_get_ring_space(msgbuf_ring_t *ring, uint16 nitems, uint16 * alloced, | |
6841 | bool exactly_nitems) | |
6842 | { | |
6843 | void *ret_ptr = NULL; | |
6844 | uint16 ring_avail_cnt; | |
6845 | ||
6846 | ASSERT(nitems <= ring->max_items); | |
6847 | ||
6848 | ring_avail_cnt = CHECK_WRITE_SPACE(ring->rd, ring->wr, ring->max_items); | |
6849 | ||
6850 | if ((ring_avail_cnt == 0) || | |
6851 | (exactly_nitems && (ring_avail_cnt < nitems) && | |
6852 | ((ring->max_items - ring->wr) >= nitems))) { | |
6853 | DHD_INFO(("Space not available: ring %s items %d write %d read %d\n", | |
6854 | ring->name, nitems, ring->wr, ring->rd)); | |
6855 | return NULL; | |
6856 | } | |
6857 | *alloced = MIN(nitems, ring_avail_cnt); | |
6858 | ||
6859 | /* Return next available space */ | |
6860 | ret_ptr = (char *)DHD_RING_BGN_VA(ring) + (ring->wr * ring->item_len); | |
6861 | ||
6862 | /* Update write index */ | |
6863 | if ((ring->wr + *alloced) == ring->max_items) | |
6864 | ring->wr = 0; | |
6865 | else if ((ring->wr + *alloced) < ring->max_items) | |
6866 | ring->wr += *alloced; | |
6867 | else { | |
6868 | /* Should never hit this */ | |
6869 | ASSERT(0); | |
6870 | return NULL; | |
6871 | } | |
6872 | ||
6873 | return ret_ptr; | |
6874 | } /* dhd_prot_get_ring_space */ | |
6875 | ||
6876 | ||
6877 | /** | |
6878 | * dhd_prot_ring_write_complete - Host updates the new WR index on producing | |
6879 | * new messages in a H2D ring. The messages are flushed from cache prior to | |
6880 | * posting the new WR index. The new WR index will be updated in the DMA index | |
6881 | * array or directly in the dongle's ring state memory. | |
6882 | * A PCIE doorbell will be generated to wake up the dongle. | |
6883 | * This is a non-atomic function, make sure the callers | |
6884 | * always hold appropriate locks. | |
6885 | */ | |
6886 | static void BCMFASTPATH | |
6887 | dhd_prot_ring_write_complete(dhd_pub_t *dhd, msgbuf_ring_t * ring, void* p, | |
6888 | uint16 nitems) | |
6889 | { | |
6890 | dhd_prot_t *prot = dhd->prot; | |
6891 | uint8 db_index; | |
6892 | uint16 max_flowrings = dhd->bus->max_tx_flowrings; | |
6893 | ||
6894 | /* cache flush */ | |
6895 | OSL_CACHE_FLUSH(p, ring->item_len * nitems); | |
6896 | ||
6897 | if (IDMA_DS_ACTIVE(dhd) && IDMA_ACTIVE(dhd)) { | |
6898 | dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr), | |
6899 | sizeof(uint16), RING_WR_UPD, ring->idx); | |
6900 | } else if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) { | |
6901 | dhd_prot_dma_indx_set(dhd, ring->wr, | |
6902 | H2D_DMA_INDX_WR_UPD, ring->idx); | |
6903 | } else if (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(ring->idx, max_flowrings)) { | |
6904 | dhd_prot_dma_indx_set(dhd, ring->wr, | |
6905 | H2D_IFRM_INDX_WR_UPD, ring->idx); | |
6906 | } else { | |
6907 | dhd_bus_cmn_writeshared(dhd->bus, &(ring->wr), | |
6908 | sizeof(uint16), RING_WR_UPD, ring->idx); | |
6909 | } | |
6910 | ||
6911 | /* raise h2d interrupt */ | |
6912 | if (IDMA_ACTIVE(dhd) || | |
6913 | (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(ring->idx, max_flowrings))) { | |
6914 | if (IDMA_DS_ACTIVE(dhd)) { | |
6915 | prot->mb_ring_fn(dhd->bus, ring->wr); | |
6916 | } else { | |
6917 | db_index = IDMA_IDX0; | |
6918 | prot->mb_2_ring_fn(dhd->bus, db_index, TRUE); | |
6919 | } | |
6920 | } else { | |
6921 | prot->mb_ring_fn(dhd->bus, ring->wr); | |
6922 | } | |
6923 | } | |
6924 | ||
6925 | /** | |
6926 | * dhd_prot_upd_read_idx - Host updates the new RD index on consuming messages | |
6927 | * from a D2H ring. The new RD index will be updated in the DMA Index array or | |
6928 | * directly in dongle's ring state memory. | |
6929 | */ | |
6930 | static void | |
6931 | dhd_prot_upd_read_idx(dhd_pub_t *dhd, msgbuf_ring_t * ring) | |
6932 | { | |
6933 | dhd_prot_t *prot = dhd->prot; | |
6934 | uint8 db_index; | |
6935 | ||
6936 | /* update read index */ | |
6937 | /* If dma'ing h2d indices supported | |
6938 | * update the r -indices in the | |
6939 | * host memory o/w in TCM | |
6940 | */ | |
6941 | if (IDMA_ACTIVE(dhd)) { | |
6942 | dhd_prot_dma_indx_set(dhd, ring->rd, | |
6943 | D2H_DMA_INDX_RD_UPD, ring->idx); | |
6944 | if (IDMA_DS_ACTIVE(dhd)) { | |
6945 | dhd_bus_cmn_writeshared(dhd->bus, &(ring->rd), | |
6946 | sizeof(uint16), RING_RD_UPD, ring->idx); | |
6947 | } else { | |
6948 | db_index = IDMA_IDX1; | |
6949 | prot->mb_2_ring_fn(dhd->bus, db_index, FALSE); | |
6950 | } | |
6951 | } else if (dhd->dma_h2d_ring_upd_support) { | |
6952 | dhd_prot_dma_indx_set(dhd, ring->rd, | |
6953 | D2H_DMA_INDX_RD_UPD, ring->idx); | |
6954 | } else { | |
6955 | dhd_bus_cmn_writeshared(dhd->bus, &(ring->rd), | |
6956 | sizeof(uint16), RING_RD_UPD, ring->idx); | |
6957 | } | |
6958 | } | |
6959 | ||
6960 | static int | |
6961 | dhd_send_d2h_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create) | |
6962 | { | |
6963 | unsigned long flags; | |
6964 | d2h_ring_create_req_t *d2h_ring; | |
6965 | uint16 alloced = 0; | |
6966 | int ret = BCME_OK; | |
6967 | uint16 max_h2d_rings = dhd->bus->max_submission_rings; | |
6968 | ||
6969 | #ifdef PCIE_INB_DW | |
6970 | if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) | |
6971 | return BCME_ERROR; | |
6972 | #endif /* PCIE_INB_DW */ | |
6973 | DHD_GENERAL_LOCK(dhd, flags); | |
6974 | ||
6975 | DHD_TRACE(("%s trying to send D2H ring create Req\n", __FUNCTION__)); | |
6976 | ||
6977 | if (ring_to_create == NULL) { | |
6978 | DHD_ERROR(("%s: FATAL: ring_to_create is NULL\n", __FUNCTION__)); | |
6979 | ret = BCME_ERROR; | |
6980 | goto err; | |
6981 | } | |
6982 | ||
6983 | /* Request for ring buffer space */ | |
6984 | d2h_ring = (d2h_ring_create_req_t *) dhd_prot_alloc_ring_space(dhd, | |
6985 | &dhd->prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, | |
6986 | &alloced, FALSE); | |
6987 | ||
6988 | if (d2h_ring == NULL) { | |
6989 | DHD_ERROR(("%s: FATAL: No space in control ring to send D2H ring create\n", | |
6990 | __FUNCTION__)); | |
6991 | ret = BCME_NOMEM; | |
6992 | goto err; | |
6993 | } | |
6994 | ring_to_create->create_req_id = DHD_D2H_DBGRING_REQ_PKTID; | |
6995 | ring_to_create->create_pending = TRUE; | |
6996 | ||
6997 | /* Common msg buf hdr */ | |
6998 | d2h_ring->msg.msg_type = MSG_TYPE_D2H_RING_CREATE; | |
6999 | d2h_ring->msg.if_id = 0; | |
7000 | d2h_ring->msg.flags = dhd->prot->h2dring_ctrl_subn.current_phase; | |
7001 | d2h_ring->msg.request_id = htol32(ring_to_create->create_req_id); | |
7002 | d2h_ring->ring_id = htol16(DHD_D2H_RING_OFFSET(ring_to_create->idx, max_h2d_rings)); | |
7003 | d2h_ring->ring_type = BCMPCIE_D2H_RING_TYPE_DBGBUF_CPL; | |
7004 | d2h_ring->max_items = htol16(D2HRING_DYNAMIC_INFO_MAX_ITEM); | |
7005 | d2h_ring->len_item = htol16(D2HRING_INFO_BUFCMPLT_ITEMSIZE); | |
7006 | d2h_ring->ring_ptr.low_addr = ring_to_create->base_addr.low_addr; | |
7007 | d2h_ring->ring_ptr.high_addr = ring_to_create->base_addr.high_addr; | |
7008 | ||
7009 | d2h_ring->flags = 0; | |
7010 | d2h_ring->msg.epoch = | |
7011 | dhd->prot->h2dring_ctrl_subn.seqnum % H2D_EPOCH_MODULO; | |
7012 | dhd->prot->h2dring_ctrl_subn.seqnum++; | |
7013 | ||
7014 | /* Update the flow_ring's WRITE index */ | |
7015 | dhd_prot_ring_write_complete(dhd, &dhd->prot->h2dring_ctrl_subn, d2h_ring, | |
7016 | DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D); | |
7017 | ||
7018 | err: | |
7019 | DHD_GENERAL_UNLOCK(dhd, flags); | |
7020 | #ifdef PCIE_INB_DW | |
7021 | dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); | |
7022 | #endif | |
7023 | return ret; | |
7024 | } | |
7025 | ||
7026 | static int | |
7027 | dhd_send_h2d_ringcreate(dhd_pub_t *dhd, msgbuf_ring_t *ring_to_create) | |
7028 | { | |
7029 | unsigned long flags; | |
7030 | h2d_ring_create_req_t *h2d_ring; | |
7031 | uint16 alloced = 0; | |
7032 | uint8 i = 0; | |
7033 | int ret = BCME_OK; | |
7034 | ||
7035 | ||
7036 | #ifdef PCIE_INB_DW | |
7037 | if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) | |
7038 | return BCME_ERROR; | |
7039 | #endif /* PCIE_INB_DW */ | |
7040 | DHD_GENERAL_LOCK(dhd, flags); | |
7041 | ||
7042 | DHD_TRACE(("%s trying to send H2D ring create Req\n", __FUNCTION__)); | |
7043 | ||
7044 | if (ring_to_create == NULL) { | |
7045 | DHD_ERROR(("%s: FATAL: ring_to_create is NULL\n", __FUNCTION__)); | |
7046 | ret = BCME_ERROR; | |
7047 | goto err; | |
7048 | } | |
7049 | ||
7050 | /* Request for ring buffer space */ | |
7051 | h2d_ring = (h2d_ring_create_req_t *)dhd_prot_alloc_ring_space(dhd, | |
7052 | &dhd->prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, | |
7053 | &alloced, FALSE); | |
7054 | ||
7055 | if (h2d_ring == NULL) { | |
7056 | DHD_ERROR(("%s: FATAL: No space in control ring to send H2D ring create\n", | |
7057 | __FUNCTION__)); | |
7058 | ret = BCME_NOMEM; | |
7059 | goto err; | |
7060 | } | |
7061 | ring_to_create->create_req_id = DHD_H2D_DBGRING_REQ_PKTID; | |
7062 | ring_to_create->create_pending = TRUE; | |
7063 | ||
7064 | /* Common msg buf hdr */ | |
7065 | h2d_ring->msg.msg_type = MSG_TYPE_H2D_RING_CREATE; | |
7066 | h2d_ring->msg.if_id = 0; | |
7067 | h2d_ring->msg.request_id = htol32(ring_to_create->create_req_id); | |
7068 | h2d_ring->msg.flags = dhd->prot->h2dring_ctrl_subn.current_phase; | |
7069 | h2d_ring->ring_id = htol16(DHD_H2D_RING_OFFSET(ring_to_create->idx)); | |
7070 | h2d_ring->ring_type = BCMPCIE_H2D_RING_TYPE_DBGBUF_SUBMIT; | |
7071 | h2d_ring->max_items = htol16(H2DRING_DYNAMIC_INFO_MAX_ITEM); | |
7072 | h2d_ring->n_completion_ids = ring_to_create->n_completion_ids; | |
7073 | h2d_ring->len_item = htol16(H2DRING_INFO_BUFPOST_ITEMSIZE); | |
7074 | h2d_ring->ring_ptr.low_addr = ring_to_create->base_addr.low_addr; | |
7075 | h2d_ring->ring_ptr.high_addr = ring_to_create->base_addr.high_addr; | |
7076 | ||
7077 | for (i = 0; i < ring_to_create->n_completion_ids; i++) { | |
7078 | h2d_ring->completion_ring_ids[i] = htol16(ring_to_create->compeltion_ring_ids[i]); | |
7079 | } | |
7080 | ||
7081 | h2d_ring->flags = 0; | |
7082 | h2d_ring->msg.epoch = | |
7083 | dhd->prot->h2dring_ctrl_subn.seqnum % H2D_EPOCH_MODULO; | |
7084 | dhd->prot->h2dring_ctrl_subn.seqnum++; | |
7085 | ||
7086 | /* Update the flow_ring's WRITE index */ | |
7087 | dhd_prot_ring_write_complete(dhd, &dhd->prot->h2dring_ctrl_subn, h2d_ring, | |
7088 | DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D); | |
7089 | ||
7090 | err: | |
7091 | DHD_GENERAL_UNLOCK(dhd, flags); | |
7092 | #ifdef PCIE_INB_DW | |
7093 | dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); | |
7094 | #endif | |
7095 | return ret; | |
7096 | } | |
7097 | ||
7098 | /** | |
7099 | * dhd_prot_dma_indx_set - set a new WR or RD index in the DMA index array. | |
7100 | * Dongle will DMA the entire array (if DMA_INDX feature is enabled). | |
7101 | * See dhd_prot_dma_indx_init() | |
7102 | */ | |
7103 | void | |
7104 | dhd_prot_dma_indx_set(dhd_pub_t *dhd, uint16 new_index, uint8 type, uint16 ringid) | |
7105 | { | |
7106 | uint8 *ptr; | |
7107 | uint16 offset; | |
7108 | dhd_prot_t *prot = dhd->prot; | |
7109 | uint16 max_h2d_rings = dhd->bus->max_submission_rings; | |
7110 | ||
7111 | switch (type) { | |
7112 | case H2D_DMA_INDX_WR_UPD: | |
7113 | ptr = (uint8 *)(prot->h2d_dma_indx_wr_buf.va); | |
7114 | offset = DHD_H2D_RING_OFFSET(ringid); | |
7115 | break; | |
7116 | ||
7117 | case D2H_DMA_INDX_RD_UPD: | |
7118 | ptr = (uint8 *)(prot->d2h_dma_indx_rd_buf.va); | |
7119 | offset = DHD_D2H_RING_OFFSET(ringid, max_h2d_rings); | |
7120 | break; | |
7121 | ||
7122 | case H2D_IFRM_INDX_WR_UPD: | |
7123 | ptr = (uint8 *)(prot->h2d_ifrm_indx_wr_buf.va); | |
7124 | offset = DHD_H2D_FRM_FLOW_RING_OFFSET(ringid); | |
7125 | break; | |
7126 | ||
7127 | default: | |
7128 | DHD_ERROR(("%s: Invalid option for DMAing read/write index\n", | |
7129 | __FUNCTION__)); | |
7130 | return; | |
7131 | } | |
7132 | ||
7133 | ASSERT(prot->rw_index_sz != 0); | |
7134 | ptr += offset * prot->rw_index_sz; | |
7135 | ||
7136 | *(uint16*)ptr = htol16(new_index); | |
7137 | ||
7138 | OSL_CACHE_FLUSH((void *)ptr, prot->rw_index_sz); | |
7139 | ||
7140 | DHD_TRACE(("%s: data %d type %d ringid %d ptr 0x%p offset %d\n", | |
7141 | __FUNCTION__, new_index, type, ringid, ptr, offset)); | |
7142 | ||
7143 | } /* dhd_prot_dma_indx_set */ | |
7144 | ||
7145 | ||
7146 | /** | |
7147 | * dhd_prot_dma_indx_get - Fetch a WR or RD index from the dongle DMA-ed index | |
7148 | * array. | |
7149 | * Dongle DMAes an entire array to host memory (if the feature is enabled). | |
7150 | * See dhd_prot_dma_indx_init() | |
7151 | */ | |
7152 | static uint16 | |
7153 | dhd_prot_dma_indx_get(dhd_pub_t *dhd, uint8 type, uint16 ringid) | |
7154 | { | |
7155 | uint8 *ptr; | |
7156 | uint16 data; | |
7157 | uint16 offset; | |
7158 | dhd_prot_t *prot = dhd->prot; | |
7159 | uint16 max_h2d_rings = dhd->bus->max_submission_rings; | |
7160 | ||
7161 | switch (type) { | |
7162 | case H2D_DMA_INDX_WR_UPD: | |
7163 | ptr = (uint8 *)(prot->h2d_dma_indx_wr_buf.va); | |
7164 | offset = DHD_H2D_RING_OFFSET(ringid); | |
7165 | break; | |
7166 | ||
7167 | case H2D_DMA_INDX_RD_UPD: | |
7168 | ptr = (uint8 *)(prot->h2d_dma_indx_rd_buf.va); | |
7169 | offset = DHD_H2D_RING_OFFSET(ringid); | |
7170 | break; | |
7171 | ||
7172 | case D2H_DMA_INDX_WR_UPD: | |
7173 | ptr = (uint8 *)(prot->d2h_dma_indx_wr_buf.va); | |
7174 | offset = DHD_D2H_RING_OFFSET(ringid, max_h2d_rings); | |
7175 | break; | |
7176 | ||
7177 | case D2H_DMA_INDX_RD_UPD: | |
7178 | ptr = (uint8 *)(prot->d2h_dma_indx_rd_buf.va); | |
7179 | offset = DHD_D2H_RING_OFFSET(ringid, max_h2d_rings); | |
7180 | break; | |
7181 | ||
7182 | default: | |
7183 | DHD_ERROR(("%s: Invalid option for DMAing read/write index\n", | |
7184 | __FUNCTION__)); | |
7185 | return 0; | |
7186 | } | |
7187 | ||
7188 | ASSERT(prot->rw_index_sz != 0); | |
7189 | ptr += offset * prot->rw_index_sz; | |
7190 | ||
7191 | OSL_CACHE_INV((void *)ptr, prot->rw_index_sz); | |
7192 | ||
7193 | data = LTOH16(*((uint16*)ptr)); | |
7194 | ||
7195 | DHD_TRACE(("%s: data %d type %d ringid %d ptr 0x%p offset %d\n", | |
7196 | __FUNCTION__, data, type, ringid, ptr, offset)); | |
7197 | ||
7198 | return (data); | |
7199 | ||
7200 | } /* dhd_prot_dma_indx_get */ | |
7201 | ||
7202 | /** | |
7203 | * An array of DMA read/write indices, containing information about host rings, can be maintained | |
7204 | * either in host memory or in device memory, dependent on preprocessor options. This function is, | |
7205 | * dependent on these options, called during driver initialization. It reserves and initializes | |
7206 | * blocks of DMA'able host memory containing an array of DMA read or DMA write indices. The physical | |
7207 | * address of these host memory blocks are communicated to the dongle later on. By reading this host | |
7208 | * memory, the dongle learns about the state of the host rings. | |
7209 | */ | |
7210 | ||
7211 | static INLINE int | |
7212 | dhd_prot_dma_indx_alloc(dhd_pub_t *dhd, uint8 type, | |
7213 | dhd_dma_buf_t *dma_buf, uint32 bufsz) | |
7214 | { | |
7215 | int rc; | |
7216 | ||
7217 | if ((dma_buf->len == bufsz) || (dma_buf->va != NULL)) | |
7218 | return BCME_OK; | |
7219 | ||
7220 | rc = dhd_dma_buf_alloc(dhd, dma_buf, bufsz); | |
7221 | ||
7222 | return rc; | |
7223 | } | |
7224 | ||
7225 | int | |
7226 | dhd_prot_dma_indx_init(dhd_pub_t *dhd, uint32 rw_index_sz, uint8 type, uint32 length) | |
7227 | { | |
7228 | uint32 bufsz; | |
7229 | dhd_prot_t *prot = dhd->prot; | |
7230 | dhd_dma_buf_t *dma_buf; | |
7231 | ||
7232 | if (prot == NULL) { | |
7233 | DHD_ERROR(("prot is not inited\n")); | |
7234 | return BCME_ERROR; | |
7235 | } | |
7236 | ||
7237 | /* Dongle advertizes 2B or 4B RW index size */ | |
7238 | ASSERT(rw_index_sz != 0); | |
7239 | prot->rw_index_sz = rw_index_sz; | |
7240 | ||
7241 | bufsz = rw_index_sz * length; | |
7242 | ||
7243 | switch (type) { | |
7244 | case H2D_DMA_INDX_WR_BUF: | |
7245 | dma_buf = &prot->h2d_dma_indx_wr_buf; | |
7246 | if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) | |
7247 | goto ret_no_mem; | |
7248 | DHD_ERROR(("H2D DMA WR INDX : array size %d = %d * %d\n", | |
7249 | dma_buf->len, rw_index_sz, length)); | |
7250 | break; | |
7251 | ||
7252 | case H2D_DMA_INDX_RD_BUF: | |
7253 | dma_buf = &prot->h2d_dma_indx_rd_buf; | |
7254 | if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) | |
7255 | goto ret_no_mem; | |
7256 | DHD_ERROR(("H2D DMA RD INDX : array size %d = %d * %d\n", | |
7257 | dma_buf->len, rw_index_sz, length)); | |
7258 | break; | |
7259 | ||
7260 | case D2H_DMA_INDX_WR_BUF: | |
7261 | dma_buf = &prot->d2h_dma_indx_wr_buf; | |
7262 | if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) | |
7263 | goto ret_no_mem; | |
7264 | DHD_ERROR(("D2H DMA WR INDX : array size %d = %d * %d\n", | |
7265 | dma_buf->len, rw_index_sz, length)); | |
7266 | break; | |
7267 | ||
7268 | case D2H_DMA_INDX_RD_BUF: | |
7269 | dma_buf = &prot->d2h_dma_indx_rd_buf; | |
7270 | if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) | |
7271 | goto ret_no_mem; | |
7272 | DHD_ERROR(("D2H DMA RD INDX : array size %d = %d * %d\n", | |
7273 | dma_buf->len, rw_index_sz, length)); | |
7274 | break; | |
7275 | ||
7276 | case H2D_IFRM_INDX_WR_BUF: | |
7277 | dma_buf = &prot->h2d_ifrm_indx_wr_buf; | |
7278 | if (dhd_prot_dma_indx_alloc(dhd, type, dma_buf, bufsz)) | |
7279 | goto ret_no_mem; | |
7280 | DHD_ERROR(("H2D IFRM WR INDX : array size %d = %d * %d\n", | |
7281 | dma_buf->len, rw_index_sz, length)); | |
7282 | break; | |
7283 | ||
7284 | default: | |
7285 | DHD_ERROR(("%s: Unexpected option\n", __FUNCTION__)); | |
7286 | return BCME_BADOPTION; | |
7287 | } | |
7288 | ||
7289 | return BCME_OK; | |
7290 | ||
7291 | ret_no_mem: | |
7292 | DHD_ERROR(("%s: dhd_prot_dma_indx_alloc type %d buf_sz %d failure\n", | |
7293 | __FUNCTION__, type, bufsz)); | |
7294 | return BCME_NOMEM; | |
7295 | ||
7296 | } /* dhd_prot_dma_indx_init */ | |
7297 | ||
7298 | ||
7299 | /** | |
7300 | * Called on checking for 'completion' messages from the dongle. Returns next host buffer to read | |
7301 | * from, or NULL if there are no more messages to read. | |
7302 | */ | |
7303 | static uint8* | |
7304 | dhd_prot_get_read_addr(dhd_pub_t *dhd, msgbuf_ring_t *ring, uint32 *available_len) | |
7305 | { | |
7306 | uint16 wr; | |
7307 | uint16 rd; | |
7308 | uint16 depth; | |
7309 | uint16 items; | |
7310 | void *read_addr = NULL; /* address of next msg to be read in ring */ | |
7311 | uint16 d2h_wr = 0; | |
7312 | ||
7313 | DHD_TRACE(("%s: d2h_dma_indx_rd_buf %p, d2h_dma_indx_wr_buf %p\n", | |
7314 | __FUNCTION__, (uint32 *)(dhd->prot->d2h_dma_indx_rd_buf.va), | |
7315 | (uint32 *)(dhd->prot->d2h_dma_indx_wr_buf.va))); | |
7316 | ||
7317 | /* Remember the read index in a variable. | |
7318 | * This is becuase ring->rd gets updated in the end of this function | |
7319 | * So if we have to print the exact read index from which the | |
7320 | * message is read its not possible. | |
7321 | */ | |
7322 | ring->curr_rd = ring->rd; | |
7323 | ||
7324 | /* update write pointer */ | |
7325 | if (dhd->dma_d2h_ring_upd_support) { | |
7326 | /* DMAing write/read indices supported */ | |
7327 | d2h_wr = dhd_prot_dma_indx_get(dhd, D2H_DMA_INDX_WR_UPD, ring->idx); | |
7328 | ring->wr = d2h_wr; | |
7329 | } else { | |
7330 | dhd_bus_cmn_readshared(dhd->bus, &(ring->wr), RING_WR_UPD, ring->idx); | |
7331 | } | |
7332 | ||
7333 | wr = ring->wr; | |
7334 | rd = ring->rd; | |
7335 | depth = ring->max_items; | |
7336 | ||
7337 | /* check for avail space, in number of ring items */ | |
7338 | items = READ_AVAIL_SPACE(wr, rd, depth); | |
7339 | if (items == 0) | |
7340 | return NULL; | |
7341 | ||
7342 | /* | |
7343 | * Note that there are builds where Assert translates to just printk | |
7344 | * so, even if we had hit this condition we would never halt. Now | |
7345 | * dhd_prot_process_msgtype can get into an big loop if this | |
7346 | * happens. | |
7347 | */ | |
7348 | if (items > ring->max_items) { | |
7349 | DHD_ERROR(("\r\n======================= \r\n")); | |
7350 | DHD_ERROR(("%s(): ring %p, ring->name %s, ring->max_items %d, items %d \r\n", | |
7351 | __FUNCTION__, ring, ring->name, ring->max_items, items)); | |
7352 | DHD_ERROR(("wr: %d, rd: %d, depth: %d \r\n", wr, rd, depth)); | |
7353 | DHD_ERROR(("dhd->busstate %d bus->wait_for_d3_ack %d \r\n", | |
7354 | dhd->busstate, dhd->bus->wait_for_d3_ack)); | |
7355 | DHD_ERROR(("\r\n======================= \r\n")); | |
7356 | #ifdef SUPPORT_LINKDOWN_RECOVERY | |
7357 | if (wr >= ring->max_items) { | |
7358 | dhd->bus->read_shm_fail = TRUE; | |
7359 | } | |
7360 | #else | |
7361 | #ifdef DHD_FW_COREDUMP | |
7362 | if (dhd->memdump_enabled) { | |
7363 | /* collect core dump */ | |
7364 | dhd->memdump_type = DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR; | |
7365 | dhd_bus_mem_dump(dhd); | |
7366 | ||
7367 | } | |
7368 | #endif /* DHD_FW_COREDUMP */ | |
7369 | #endif /* SUPPORT_LINKDOWN_RECOVERY */ | |
7370 | ||
7371 | *available_len = 0; | |
7372 | dhd_schedule_reset(dhd); | |
7373 | ||
7374 | return NULL; | |
7375 | } | |
7376 | ||
7377 | /* if space is available, calculate address to be read */ | |
7378 | read_addr = (char*)ring->dma_buf.va + (rd * ring->item_len); | |
7379 | ||
7380 | /* update read pointer */ | |
7381 | if ((ring->rd + items) >= ring->max_items) | |
7382 | ring->rd = 0; | |
7383 | else | |
7384 | ring->rd += items; | |
7385 | ||
7386 | ASSERT(ring->rd < ring->max_items); | |
7387 | ||
7388 | /* convert items to bytes : available_len must be 32bits */ | |
7389 | *available_len = (uint32)(items * ring->item_len); | |
7390 | ||
7391 | OSL_CACHE_INV(read_addr, *available_len); | |
7392 | ||
7393 | /* return read address */ | |
7394 | return read_addr; | |
7395 | ||
7396 | } /* dhd_prot_get_read_addr */ | |
7397 | ||
7398 | /** | |
7399 | * dhd_prot_h2d_mbdata_send_ctrlmsg is a non-atomic function, | |
7400 | * make sure the callers always hold appropriate locks. | |
7401 | */ | |
7402 | int dhd_prot_h2d_mbdata_send_ctrlmsg(dhd_pub_t *dhd, uint32 mb_data) | |
7403 | { | |
7404 | h2d_mailbox_data_t *h2d_mb_data; | |
7405 | uint16 alloced = 0; | |
7406 | int num_post = 1; | |
7407 | int i; | |
7408 | ||
7409 | DHD_INFO(("%s Sending H2D MB data Req data 0x%04x\n", | |
7410 | __FUNCTION__, mb_data)); | |
7411 | if (!dhd->prot->h2dring_ctrl_subn.inited) { | |
7412 | DHD_ERROR(("%s: Ctrl Submit Ring: not inited\n", __FUNCTION__)); | |
7413 | return BCME_ERROR; | |
7414 | } | |
7415 | #ifdef PCIE_INB_DW | |
7416 | if ((INBAND_DW_ENAB(dhd->bus)) && | |
7417 | (dhdpcie_bus_get_pcie_inband_dw_state(dhd->bus) == | |
7418 | DW_DEVICE_DS_DEV_SLEEP)) { | |
7419 | if (mb_data == H2D_HOST_CONS_INT) { | |
7420 | /* One additional device_wake post needed */ | |
7421 | num_post = 2; | |
7422 | } | |
7423 | } | |
7424 | #endif /* PCIE_INB_DW */ | |
7425 | ||
7426 | for (i = 0; i < num_post; i ++) { | |
7427 | /* Request for ring buffer space */ | |
7428 | h2d_mb_data = (h2d_mailbox_data_t *)dhd_prot_alloc_ring_space(dhd, | |
7429 | &dhd->prot->h2dring_ctrl_subn, DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, | |
7430 | &alloced, FALSE); | |
7431 | ||
7432 | if (h2d_mb_data == NULL) { | |
7433 | DHD_ERROR(("%s: FATAL: No space in control ring to send H2D Mb data\n", | |
7434 | __FUNCTION__)); | |
7435 | return BCME_NOMEM; | |
7436 | } | |
7437 | ||
7438 | memset(h2d_mb_data, 0, sizeof(h2d_mailbox_data_t)); | |
7439 | /* Common msg buf hdr */ | |
7440 | h2d_mb_data->msg.msg_type = MSG_TYPE_H2D_MAILBOX_DATA; | |
7441 | h2d_mb_data->msg.flags = dhd->prot->h2dring_ctrl_subn.current_phase; | |
7442 | ||
7443 | h2d_mb_data->msg.epoch = | |
7444 | dhd->prot->h2dring_ctrl_subn.seqnum % H2D_EPOCH_MODULO; | |
7445 | dhd->prot->h2dring_ctrl_subn.seqnum++; | |
7446 | ||
7447 | #ifdef PCIE_INB_DW | |
7448 | /* post device_wake first */ | |
7449 | if ((num_post == 2) && (i == 0)) { | |
7450 | h2d_mb_data->mail_box_data = htol32(H2DMB_DS_DEVICE_WAKE); | |
7451 | } else | |
7452 | #endif /* PCIE_INB_DW */ | |
7453 | { | |
7454 | h2d_mb_data->mail_box_data = htol32(mb_data); | |
7455 | } | |
7456 | ||
7457 | DHD_INFO(("%s Send H2D MB data Req data 0x%04x\n", __FUNCTION__, mb_data)); | |
7458 | ||
7459 | /* upd wrt ptr and raise interrupt */ | |
7460 | /* caller of dhd_prot_h2d_mbdata_send_ctrlmsg already holding general lock */ | |
7461 | dhd_prot_ring_write_complete(dhd, &dhd->prot->h2dring_ctrl_subn, h2d_mb_data, | |
7462 | DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D); | |
7463 | #ifdef PCIE_INB_DW | |
7464 | /* Add a delay if device_wake is posted */ | |
7465 | if ((num_post == 2) && (i == 0)) { | |
7466 | OSL_DELAY(1000); | |
7467 | } | |
7468 | #endif /* PCIE_INB_DW */ | |
7469 | } | |
7470 | ||
7471 | return 0; | |
7472 | } | |
7473 | ||
7474 | /** Creates a flow ring and informs dongle of this event */ | |
7475 | int | |
7476 | dhd_prot_flow_ring_create(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node) | |
7477 | { | |
7478 | tx_flowring_create_request_t *flow_create_rqst; | |
7479 | msgbuf_ring_t *flow_ring; | |
7480 | dhd_prot_t *prot = dhd->prot; | |
7481 | unsigned long flags; | |
7482 | uint16 alloced = 0; | |
7483 | msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn; | |
7484 | uint16 max_flowrings = dhd->bus->max_tx_flowrings; | |
7485 | ||
7486 | /* Fetch a pre-initialized msgbuf_ring from the flowring pool */ | |
7487 | flow_ring = dhd_prot_flowrings_pool_fetch(dhd, flow_ring_node->flowid); | |
7488 | if (flow_ring == NULL) { | |
7489 | DHD_ERROR(("%s: dhd_prot_flowrings_pool_fetch TX Flowid %d failed\n", | |
7490 | __FUNCTION__, flow_ring_node->flowid)); | |
7491 | return BCME_NOMEM; | |
7492 | } | |
7493 | ||
7494 | #ifdef PCIE_INB_DW | |
7495 | if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) | |
7496 | return BCME_ERROR; | |
7497 | #endif /* PCIE_INB_DW */ | |
7498 | DHD_GENERAL_LOCK(dhd, flags); | |
7499 | ||
7500 | /* Request for ctrl_ring buffer space */ | |
7501 | flow_create_rqst = (tx_flowring_create_request_t *) | |
7502 | dhd_prot_alloc_ring_space(dhd, ctrl_ring, 1, &alloced, FALSE); | |
7503 | ||
7504 | if (flow_create_rqst == NULL) { | |
7505 | dhd_prot_flowrings_pool_release(dhd, flow_ring_node->flowid, flow_ring); | |
7506 | DHD_ERROR(("%s: Flow Create Req flowid %d - failure ring space\n", | |
7507 | __FUNCTION__, flow_ring_node->flowid)); | |
7508 | DHD_GENERAL_UNLOCK(dhd, flags); | |
7509 | #ifdef PCIE_INB_DW | |
7510 | dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); | |
7511 | #endif | |
7512 | return BCME_NOMEM; | |
7513 | } | |
7514 | ||
7515 | flow_ring_node->prot_info = (void *)flow_ring; | |
7516 | ||
7517 | /* Common msg buf hdr */ | |
7518 | flow_create_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_CREATE; | |
7519 | flow_create_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex; | |
7520 | flow_create_rqst->msg.request_id = htol32(0); /* TBD */ | |
7521 | flow_create_rqst->msg.flags = ctrl_ring->current_phase; | |
7522 | ||
7523 | flow_create_rqst->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO; | |
7524 | ctrl_ring->seqnum++; | |
7525 | ||
7526 | /* Update flow create message */ | |
7527 | flow_create_rqst->tid = flow_ring_node->flow_info.tid; | |
7528 | flow_create_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid); | |
7529 | memcpy(flow_create_rqst->sa, flow_ring_node->flow_info.sa, sizeof(flow_create_rqst->sa)); | |
7530 | memcpy(flow_create_rqst->da, flow_ring_node->flow_info.da, sizeof(flow_create_rqst->da)); | |
7531 | /* CAUTION: ring::base_addr already in Little Endian */ | |
7532 | flow_create_rqst->flow_ring_ptr.low_addr = flow_ring->base_addr.low_addr; | |
7533 | flow_create_rqst->flow_ring_ptr.high_addr = flow_ring->base_addr.high_addr; | |
7534 | flow_create_rqst->max_items = htol16(prot->h2d_max_txpost); | |
7535 | flow_create_rqst->len_item = htol16(H2DRING_TXPOST_ITEMSIZE); | |
7536 | ||
7537 | /* definition for ifrm mask : bit0:d11ac core, bit1:d11ad core | |
7538 | * currently it is not used for priority. so uses solely for ifrm mask | |
7539 | */ | |
7540 | if (IFRM_ACTIVE(dhd)) | |
7541 | flow_create_rqst->priority_ifrmmask = (1 << IFRM_DEV_0); | |
7542 | ||
7543 | DHD_ERROR(("%s: Send Flow Create Req flow ID %d for peer " MACDBG | |
7544 | " prio %d ifindex %d\n", __FUNCTION__, flow_ring_node->flowid, | |
7545 | MAC2STRDBG(flow_ring_node->flow_info.da), flow_ring_node->flow_info.tid, | |
7546 | flow_ring_node->flow_info.ifindex)); | |
7547 | ||
7548 | /* Update the flow_ring's WRITE index */ | |
7549 | if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) { | |
7550 | dhd_prot_dma_indx_set(dhd, flow_ring->wr, | |
7551 | H2D_DMA_INDX_WR_UPD, flow_ring->idx); | |
7552 | } else if (IFRM_ACTIVE(dhd) && DHD_IS_FLOWRING(flow_ring->idx, max_flowrings)) { | |
7553 | dhd_prot_dma_indx_set(dhd, flow_ring->wr, | |
7554 | H2D_IFRM_INDX_WR_UPD, flow_ring->idx); | |
7555 | } else { | |
7556 | dhd_bus_cmn_writeshared(dhd->bus, &(flow_ring->wr), | |
7557 | sizeof(uint16), RING_WR_UPD, flow_ring->idx); | |
7558 | } | |
7559 | ||
7560 | /* update control subn ring's WR index and ring doorbell to dongle */ | |
7561 | dhd_prot_ring_write_complete(dhd, ctrl_ring, flow_create_rqst, 1); | |
7562 | ||
7563 | DHD_GENERAL_UNLOCK(dhd, flags); | |
7564 | #ifdef PCIE_INB_DW | |
7565 | dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); | |
7566 | #endif | |
7567 | ||
7568 | return BCME_OK; | |
7569 | } /* dhd_prot_flow_ring_create */ | |
7570 | ||
7571 | /** called on receiving MSG_TYPE_FLOW_RING_CREATE_CMPLT message from dongle */ | |
7572 | static void | |
7573 | dhd_prot_flow_ring_create_response_process(dhd_pub_t *dhd, void *msg) | |
7574 | { | |
7575 | tx_flowring_create_response_t *flow_create_resp = (tx_flowring_create_response_t *)msg; | |
7576 | ||
7577 | DHD_ERROR(("%s: Flow Create Response status = %d Flow %d\n", __FUNCTION__, | |
7578 | ltoh16(flow_create_resp->cmplt.status), | |
7579 | ltoh16(flow_create_resp->cmplt.flow_ring_id))); | |
7580 | ||
7581 | dhd_bus_flow_ring_create_response(dhd->bus, | |
7582 | ltoh16(flow_create_resp->cmplt.flow_ring_id), | |
7583 | ltoh16(flow_create_resp->cmplt.status)); | |
7584 | } | |
7585 | ||
7586 | static void | |
7587 | dhd_prot_process_h2d_ring_create_complete(dhd_pub_t *dhd, void *buf) | |
7588 | { | |
7589 | h2d_ring_create_response_t *resp = (h2d_ring_create_response_t *)buf; | |
7590 | DHD_INFO(("%s ring create Response status = %d ring %d, id 0x%04x\n", __FUNCTION__, | |
7591 | ltoh16(resp->cmplt.status), | |
7592 | ltoh16(resp->cmplt.ring_id), | |
7593 | ltoh32(resp->cmn_hdr.request_id))); | |
7594 | if (ltoh32(resp->cmn_hdr.request_id) != DHD_H2D_DBGRING_REQ_PKTID) { | |
7595 | DHD_ERROR(("invalid request ID with h2d ring create complete\n")); | |
7596 | return; | |
7597 | } | |
7598 | if (!dhd->prot->h2dring_info_subn->create_pending) { | |
7599 | DHD_ERROR(("info ring create status for not pending submit ring\n")); | |
7600 | } | |
7601 | ||
7602 | if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) { | |
7603 | DHD_ERROR(("info ring create failed with status %d\n", | |
7604 | ltoh16(resp->cmplt.status))); | |
7605 | return; | |
7606 | } | |
7607 | dhd->prot->h2dring_info_subn->create_pending = FALSE; | |
7608 | dhd->prot->h2dring_info_subn->inited = TRUE; | |
7609 | dhd_prot_infobufpost(dhd); | |
7610 | } | |
7611 | ||
7612 | static void | |
7613 | dhd_prot_process_d2h_ring_create_complete(dhd_pub_t *dhd, void *buf) | |
7614 | { | |
7615 | d2h_ring_create_response_t *resp = (d2h_ring_create_response_t *)buf; | |
7616 | DHD_INFO(("%s ring create Response status = %d ring %d, id 0x%04x\n", __FUNCTION__, | |
7617 | ltoh16(resp->cmplt.status), | |
7618 | ltoh16(resp->cmplt.ring_id), | |
7619 | ltoh32(resp->cmn_hdr.request_id))); | |
7620 | if (ltoh32(resp->cmn_hdr.request_id) != DHD_D2H_DBGRING_REQ_PKTID) { | |
7621 | DHD_ERROR(("invalid request ID with d2h ring create complete\n")); | |
7622 | return; | |
7623 | } | |
7624 | if (!dhd->prot->d2hring_info_cpln->create_pending) { | |
7625 | DHD_ERROR(("info ring create status for not pending cpl ring\n")); | |
7626 | return; | |
7627 | } | |
7628 | ||
7629 | if (ltoh16(resp->cmplt.status) != BCMPCIE_SUCCESS) { | |
7630 | DHD_ERROR(("info cpl ring create failed with status %d\n", | |
7631 | ltoh16(resp->cmplt.status))); | |
7632 | return; | |
7633 | } | |
7634 | dhd->prot->d2hring_info_cpln->create_pending = FALSE; | |
7635 | dhd->prot->d2hring_info_cpln->inited = TRUE; | |
7636 | } | |
7637 | ||
7638 | static void | |
7639 | dhd_prot_process_d2h_mb_data(dhd_pub_t *dhd, void* buf) | |
7640 | { | |
7641 | d2h_mailbox_data_t *d2h_data; | |
7642 | ||
7643 | d2h_data = (d2h_mailbox_data_t *)buf; | |
7644 | DHD_INFO(("%s dhd_prot_process_d2h_mb_data, 0x%04x\n", __FUNCTION__, | |
7645 | d2h_data->d2h_mailbox_data)); | |
7646 | dhd_bus_handle_mb_data(dhd->bus, d2h_data->d2h_mailbox_data); | |
7647 | } | |
7648 | ||
7649 | static void | |
7650 | dhd_prot_process_d2h_host_ts_complete(dhd_pub_t *dhd, void* buf) | |
7651 | { | |
7652 | #ifdef DHD_TIMESYNC | |
7653 | host_timestamp_msg_cpl_t *host_ts_cpl; | |
7654 | uint32 pktid; | |
7655 | dhd_prot_t *prot = dhd->prot; | |
7656 | ||
7657 | host_ts_cpl = (host_timestamp_msg_cpl_t *)buf; | |
7658 | DHD_INFO(("%s host TS cpl: status %d, req_ID: 0x%04x, xt_id %d \n", __FUNCTION__, | |
7659 | host_ts_cpl->cmplt.status, host_ts_cpl->msg.request_id, host_ts_cpl->xt_id)); | |
7660 | ||
7661 | pktid = ltoh32(host_ts_cpl->msg.request_id); | |
7662 | if (prot->hostts_req_buf_inuse == FALSE) { | |
7663 | DHD_ERROR(("No Pending Host TS req, but completion\n")); | |
7664 | return; | |
7665 | } | |
7666 | prot->hostts_req_buf_inuse = FALSE; | |
7667 | if (pktid != DHD_H2D_HOSTTS_REQ_PKTID) { | |
7668 | DHD_ERROR(("Host TS req CPL, but req ID different 0x%04x, exp 0x%04x\n", | |
7669 | pktid, DHD_H2D_HOSTTS_REQ_PKTID)); | |
7670 | return; | |
7671 | } | |
7672 | dhd_timesync_handle_host_ts_complete(dhd->ts, host_ts_cpl->xt_id, | |
7673 | host_ts_cpl->cmplt.status); | |
7674 | #else /* DHD_TIMESYNC */ | |
7675 | DHD_ERROR(("Timesunc feature not compiled in but GOT HOST_TS_COMPLETE\n")); | |
7676 | #endif /* DHD_TIMESYNC */ | |
7677 | ||
7678 | } | |
7679 | ||
7680 | /** called on e.g. flow ring delete */ | |
7681 | void dhd_prot_clean_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info) | |
7682 | { | |
7683 | msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info; | |
7684 | dhd_prot_ring_detach(dhd, flow_ring); | |
7685 | DHD_INFO(("%s Cleaning up Flow \n", __FUNCTION__)); | |
7686 | } | |
7687 | ||
7688 | void dhd_prot_print_flow_ring(dhd_pub_t *dhd, void *msgbuf_flow_info, | |
7689 | struct bcmstrbuf *strbuf, const char * fmt) | |
7690 | { | |
7691 | const char *default_fmt = "RD %d WR %d BASE(VA) %p BASE(PA) %x:%x" | |
7692 | " WORK ITEM SIZE %d MAX WORK ITEMS %d SIZE %d\n"; | |
7693 | msgbuf_ring_t *flow_ring = (msgbuf_ring_t *)msgbuf_flow_info; | |
7694 | uint16 rd, wr; | |
7695 | uint32 dma_buf_len = flow_ring->max_items * flow_ring->item_len; | |
7696 | ||
7697 | if (fmt == NULL) { | |
7698 | fmt = default_fmt; | |
7699 | } | |
7700 | dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, flow_ring->idx); | |
7701 | dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, flow_ring->idx); | |
7702 | bcm_bprintf(strbuf, fmt, rd, wr, flow_ring->dma_buf.va, | |
7703 | ltoh32(flow_ring->base_addr.high_addr), | |
7704 | ltoh32(flow_ring->base_addr.low_addr), | |
7705 | flow_ring->item_len, flow_ring->max_items, dma_buf_len); | |
7706 | } | |
7707 | ||
7708 | void dhd_prot_print_info(dhd_pub_t *dhd, struct bcmstrbuf *strbuf) | |
7709 | { | |
7710 | dhd_prot_t *prot = dhd->prot; | |
7711 | bcm_bprintf(strbuf, "IPCrevs: Dev %d, \t Host %d, \tactive %d\n", | |
7712 | dhd->prot->device_ipc_version, | |
7713 | dhd->prot->host_ipc_version, | |
7714 | dhd->prot->active_ipc_version); | |
7715 | ||
7716 | bcm_bprintf(strbuf, "max Host TS bufs to post: %d, \t posted %d \n", | |
7717 | dhd->prot->max_tsbufpost, dhd->prot->cur_ts_bufs_posted); | |
7718 | bcm_bprintf(strbuf, "max INFO bufs to post: %d, \t posted %d \n", | |
7719 | dhd->prot->max_infobufpost, dhd->prot->infobufpost); | |
7720 | bcm_bprintf(strbuf, "max event bufs to post: %d, \t posted %d \n", | |
7721 | dhd->prot->max_eventbufpost, dhd->prot->cur_event_bufs_posted); | |
7722 | bcm_bprintf(strbuf, "max ioctlresp bufs to post: %d, \t posted %d \n", | |
7723 | dhd->prot->max_ioctlrespbufpost, dhd->prot->cur_ioctlresp_bufs_posted); | |
7724 | bcm_bprintf(strbuf, "max RX bufs to post: %d, \t posted %d \n", | |
7725 | dhd->prot->max_rxbufpost, dhd->prot->rxbufpost); | |
7726 | ||
7727 | bcm_bprintf(strbuf, | |
7728 | "%14s %5s %5s %17s %17s %14s %14s %10s\n", | |
7729 | "Type", "RD", "WR", "BASE(VA)", "BASE(PA)", | |
7730 | "WORK_ITEM_SIZE", "MAX_WORK_ITEMS", "TOTAL_SIZE"); | |
7731 | bcm_bprintf(strbuf, "%14s", "H2DCtrlPost"); | |
7732 | dhd_prot_print_flow_ring(dhd, &prot->h2dring_ctrl_subn, strbuf, | |
7733 | " %5d %5d %17p %8x:%8x %14d %14d %10d\n"); | |
7734 | bcm_bprintf(strbuf, "%14s", "D2HCtrlCpl"); | |
7735 | dhd_prot_print_flow_ring(dhd, &prot->d2hring_ctrl_cpln, strbuf, | |
7736 | " %5d %5d %17p %8x:%8x %14d %14d %10d\n"); | |
7737 | bcm_bprintf(strbuf, "%14s", "H2DRxPost", prot->rxbufpost); | |
7738 | dhd_prot_print_flow_ring(dhd, &prot->h2dring_rxp_subn, strbuf, | |
7739 | " %5d %5d %17p %8x:%8x %14d %14d %10d\n"); | |
7740 | bcm_bprintf(strbuf, "%14s", "D2HRxCpl"); | |
7741 | dhd_prot_print_flow_ring(dhd, &prot->d2hring_rx_cpln, strbuf, | |
7742 | " %5d %5d %17p %8x:%8x %14d %14d %10d\n"); | |
7743 | bcm_bprintf(strbuf, "%14s", "D2HTxCpl"); | |
7744 | dhd_prot_print_flow_ring(dhd, &prot->d2hring_tx_cpln, strbuf, | |
7745 | " %5d %5d %17p %8x:%8x %14d %14d %10d\n"); | |
7746 | if (dhd->prot->h2dring_info_subn != NULL && dhd->prot->d2hring_info_cpln != NULL) { | |
7747 | bcm_bprintf(strbuf, "%14s", "H2DRingInfoSub"); | |
7748 | dhd_prot_print_flow_ring(dhd, prot->h2dring_info_subn, strbuf, | |
7749 | " %5d %5d %17p %8x:%8x %14d %14d %10d\n"); | |
7750 | bcm_bprintf(strbuf, "%14s", "D2HRingInfoCpl"); | |
7751 | dhd_prot_print_flow_ring(dhd, prot->d2hring_info_cpln, strbuf, | |
7752 | " %5d %5d %17p %8x:%8x %14d %14d %10d\n"); | |
7753 | } | |
7754 | ||
7755 | bcm_bprintf(strbuf, "active_tx_count %d pktidmap_avail(ctrl/rx/tx) %d %d %d\n", | |
7756 | dhd->prot->active_tx_count, | |
7757 | DHD_PKTID_AVAIL(dhd->prot->pktid_ctrl_map), | |
7758 | DHD_PKTID_AVAIL(dhd->prot->pktid_rx_map), | |
7759 | DHD_PKTID_AVAIL(dhd->prot->pktid_tx_map)); | |
7760 | ||
7761 | } | |
7762 | ||
7763 | int | |
7764 | dhd_prot_flow_ring_delete(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node) | |
7765 | { | |
7766 | tx_flowring_delete_request_t *flow_delete_rqst; | |
7767 | dhd_prot_t *prot = dhd->prot; | |
7768 | unsigned long flags; | |
7769 | uint16 alloced = 0; | |
7770 | msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn; | |
7771 | ||
7772 | #ifdef PCIE_INB_DW | |
7773 | if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) | |
7774 | return BCME_ERROR; | |
7775 | #endif /* PCIE_INB_DW */ | |
7776 | ||
7777 | DHD_GENERAL_LOCK(dhd, flags); | |
7778 | ||
7779 | /* Request for ring buffer space */ | |
7780 | flow_delete_rqst = (tx_flowring_delete_request_t *) | |
7781 | dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE); | |
7782 | ||
7783 | if (flow_delete_rqst == NULL) { | |
7784 | DHD_GENERAL_UNLOCK(dhd, flags); | |
7785 | DHD_ERROR(("%s: Flow Delete Req - failure ring space\n", __FUNCTION__)); | |
7786 | #ifdef PCIE_INB_DW | |
7787 | dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); | |
7788 | #endif | |
7789 | return BCME_NOMEM; | |
7790 | } | |
7791 | ||
7792 | /* Common msg buf hdr */ | |
7793 | flow_delete_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_DELETE; | |
7794 | flow_delete_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex; | |
7795 | flow_delete_rqst->msg.request_id = htol32(0); /* TBD */ | |
7796 | flow_delete_rqst->msg.flags = ring->current_phase; | |
7797 | ||
7798 | flow_delete_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO; | |
7799 | ring->seqnum++; | |
7800 | ||
7801 | /* Update Delete info */ | |
7802 | flow_delete_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid); | |
7803 | flow_delete_rqst->reason = htol16(BCME_OK); | |
7804 | ||
7805 | DHD_ERROR(("%s: Send Flow Delete Req RING ID %d for peer " MACDBG | |
7806 | " prio %d ifindex %d\n", __FUNCTION__, flow_ring_node->flowid, | |
7807 | MAC2STRDBG(flow_ring_node->flow_info.da), flow_ring_node->flow_info.tid, | |
7808 | flow_ring_node->flow_info.ifindex)); | |
7809 | ||
7810 | /* update ring's WR index and ring doorbell to dongle */ | |
7811 | dhd_prot_ring_write_complete(dhd, ring, flow_delete_rqst, 1); | |
7812 | DHD_GENERAL_UNLOCK(dhd, flags); | |
7813 | #ifdef PCIE_INB_DW | |
7814 | dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); | |
7815 | #endif | |
7816 | ||
7817 | return BCME_OK; | |
7818 | } | |
7819 | ||
7820 | static void | |
7821 | dhd_prot_flow_ring_delete_response_process(dhd_pub_t *dhd, void *msg) | |
7822 | { | |
7823 | tx_flowring_delete_response_t *flow_delete_resp = (tx_flowring_delete_response_t *)msg; | |
7824 | ||
7825 | DHD_ERROR(("%s: Flow Delete Response status = %d Flow %d\n", __FUNCTION__, | |
7826 | flow_delete_resp->cmplt.status, flow_delete_resp->cmplt.flow_ring_id)); | |
7827 | ||
7828 | dhd_bus_flow_ring_delete_response(dhd->bus, flow_delete_resp->cmplt.flow_ring_id, | |
7829 | flow_delete_resp->cmplt.status); | |
7830 | } | |
7831 | ||
7832 | static void | |
7833 | dhd_prot_process_flow_ring_resume_response(dhd_pub_t *dhd, void* msg) | |
7834 | { | |
7835 | #ifdef IDLE_TX_FLOW_MGMT | |
7836 | tx_idle_flowring_resume_response_t *flow_resume_resp = | |
7837 | (tx_idle_flowring_resume_response_t *)msg; | |
7838 | ||
7839 | DHD_ERROR(("%s Flow resume Response status = %d Flow %d\n", __FUNCTION__, | |
7840 | flow_resume_resp->cmplt.status, flow_resume_resp->cmplt.flow_ring_id)); | |
7841 | ||
7842 | dhd_bus_flow_ring_resume_response(dhd->bus, flow_resume_resp->cmplt.flow_ring_id, | |
7843 | flow_resume_resp->cmplt.status); | |
7844 | #endif /* IDLE_TX_FLOW_MGMT */ | |
7845 | } | |
7846 | ||
7847 | static void | |
7848 | dhd_prot_process_flow_ring_suspend_response(dhd_pub_t *dhd, void* msg) | |
7849 | { | |
7850 | #ifdef IDLE_TX_FLOW_MGMT | |
7851 | int16 status; | |
7852 | tx_idle_flowring_suspend_response_t *flow_suspend_resp = | |
7853 | (tx_idle_flowring_suspend_response_t *)msg; | |
7854 | status = flow_suspend_resp->cmplt.status; | |
7855 | ||
7856 | DHD_ERROR(("%s Flow id %d suspend Response status = %d\n", | |
7857 | __FUNCTION__, flow_suspend_resp->cmplt.flow_ring_id, | |
7858 | status)); | |
7859 | ||
7860 | if (status != BCME_OK) { | |
7861 | ||
7862 | DHD_ERROR(("%s Error in Suspending Flow rings!!" | |
7863 | "Dongle will still be polling idle rings!!Status = %d \n", | |
7864 | __FUNCTION__, status)); | |
7865 | } | |
7866 | #endif /* IDLE_TX_FLOW_MGMT */ | |
7867 | } | |
7868 | ||
7869 | int | |
7870 | dhd_prot_flow_ring_flush(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node) | |
7871 | { | |
7872 | tx_flowring_flush_request_t *flow_flush_rqst; | |
7873 | dhd_prot_t *prot = dhd->prot; | |
7874 | unsigned long flags; | |
7875 | uint16 alloced = 0; | |
7876 | msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn; | |
7877 | ||
7878 | #ifdef PCIE_INB_DW | |
7879 | if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) | |
7880 | return BCME_ERROR; | |
7881 | #endif /* PCIE_INB_DW */ | |
7882 | ||
7883 | DHD_GENERAL_LOCK(dhd, flags); | |
7884 | ||
7885 | /* Request for ring buffer space */ | |
7886 | flow_flush_rqst = (tx_flowring_flush_request_t *) | |
7887 | dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE); | |
7888 | if (flow_flush_rqst == NULL) { | |
7889 | DHD_GENERAL_UNLOCK(dhd, flags); | |
7890 | DHD_ERROR(("%s: Flow Flush Req - failure ring space\n", __FUNCTION__)); | |
7891 | #ifdef PCIE_INB_DW | |
7892 | dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); | |
7893 | #endif | |
7894 | return BCME_NOMEM; | |
7895 | } | |
7896 | ||
7897 | /* Common msg buf hdr */ | |
7898 | flow_flush_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_FLUSH; | |
7899 | flow_flush_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex; | |
7900 | flow_flush_rqst->msg.request_id = htol32(0); /* TBD */ | |
7901 | flow_flush_rqst->msg.flags = ring->current_phase; | |
7902 | flow_flush_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO; | |
7903 | ring->seqnum++; | |
7904 | ||
7905 | flow_flush_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid); | |
7906 | flow_flush_rqst->reason = htol16(BCME_OK); | |
7907 | ||
7908 | DHD_INFO(("%s: Send Flow Flush Req\n", __FUNCTION__)); | |
7909 | ||
7910 | /* update ring's WR index and ring doorbell to dongle */ | |
7911 | dhd_prot_ring_write_complete(dhd, ring, flow_flush_rqst, 1); | |
7912 | DHD_GENERAL_UNLOCK(dhd, flags); | |
7913 | #ifdef PCIE_INB_DW | |
7914 | dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); | |
7915 | #endif | |
7916 | ||
7917 | return BCME_OK; | |
7918 | } /* dhd_prot_flow_ring_flush */ | |
7919 | ||
7920 | static void | |
7921 | dhd_prot_flow_ring_flush_response_process(dhd_pub_t *dhd, void *msg) | |
7922 | { | |
7923 | tx_flowring_flush_response_t *flow_flush_resp = (tx_flowring_flush_response_t *)msg; | |
7924 | ||
7925 | DHD_INFO(("%s: Flow Flush Response status = %d\n", __FUNCTION__, | |
7926 | flow_flush_resp->cmplt.status)); | |
7927 | ||
7928 | dhd_bus_flow_ring_flush_response(dhd->bus, flow_flush_resp->cmplt.flow_ring_id, | |
7929 | flow_flush_resp->cmplt.status); | |
7930 | } | |
7931 | ||
7932 | /** | |
7933 | * Request dongle to configure soft doorbells for D2H rings. Host populated soft | |
7934 | * doorbell information is transferred to dongle via the d2h ring config control | |
7935 | * message. | |
7936 | */ | |
7937 | void | |
7938 | dhd_msgbuf_ring_config_d2h_soft_doorbell(dhd_pub_t *dhd) | |
7939 | { | |
7940 | #if defined(DHD_D2H_SOFT_DOORBELL_SUPPORT) | |
7941 | uint16 ring_idx; | |
7942 | uint8 *msg_next; | |
7943 | void *msg_start; | |
7944 | uint16 alloced = 0; | |
7945 | unsigned long flags; | |
7946 | dhd_prot_t *prot = dhd->prot; | |
7947 | ring_config_req_t *ring_config_req; | |
7948 | bcmpcie_soft_doorbell_t *soft_doorbell; | |
7949 | msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn; | |
7950 | const uint16 d2h_rings = BCMPCIE_D2H_COMMON_MSGRINGS; | |
7951 | ||
7952 | #ifdef PCIE_INB_DW | |
7953 | if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) | |
7954 | return BCME_ERROR; | |
7955 | #endif /* PCIE_INB_DW */ | |
7956 | /* Claim space for d2h_ring number of d2h_ring_config_req_t messages */ | |
7957 | DHD_GENERAL_LOCK(dhd, flags); | |
7958 | ||
7959 | msg_start = dhd_prot_alloc_ring_space(dhd, ctrl_ring, d2h_rings, &alloced, TRUE); | |
7960 | ||
7961 | if (msg_start == NULL) { | |
7962 | DHD_ERROR(("%s Msgbuf no space for %d D2H ring config soft doorbells\n", | |
7963 | __FUNCTION__, d2h_rings)); | |
7964 | DHD_GENERAL_UNLOCK(dhd, flags); | |
7965 | #ifdef PCIE_INB_DW | |
7966 | dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); | |
7967 | #endif | |
7968 | return; | |
7969 | } | |
7970 | ||
7971 | msg_next = (uint8*)msg_start; | |
7972 | ||
7973 | for (ring_idx = 0; ring_idx < d2h_rings; ring_idx++) { | |
7974 | ||
7975 | /* position the ring_config_req into the ctrl subm ring */ | |
7976 | ring_config_req = (ring_config_req_t *)msg_next; | |
7977 | ||
7978 | /* Common msg header */ | |
7979 | ring_config_req->msg.msg_type = MSG_TYPE_D2H_RING_CONFIG; | |
7980 | ring_config_req->msg.if_id = 0; | |
7981 | ring_config_req->msg.flags = 0; | |
7982 | ||
7983 | ring_config_req->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO; | |
7984 | ctrl_ring->seqnum++; | |
7985 | ||
7986 | ring_config_req->msg.request_id = htol32(DHD_FAKE_PKTID); /* unused */ | |
7987 | ||
7988 | /* Ring Config subtype and d2h ring_id */ | |
7989 | ring_config_req->subtype = htol16(D2H_RING_CONFIG_SUBTYPE_SOFT_DOORBELL); | |
7990 | ring_config_req->ring_id = htol16(DHD_D2H_RINGID(ring_idx)); | |
7991 | ||
7992 | /* Host soft doorbell configuration */ | |
7993 | soft_doorbell = &prot->soft_doorbell[ring_idx]; | |
7994 | ||
7995 | ring_config_req->soft_doorbell.value = htol32(soft_doorbell->value); | |
7996 | ring_config_req->soft_doorbell.haddr.high = | |
7997 | htol32(soft_doorbell->haddr.high); | |
7998 | ring_config_req->soft_doorbell.haddr.low = | |
7999 | htol32(soft_doorbell->haddr.low); | |
8000 | ring_config_req->soft_doorbell.items = htol16(soft_doorbell->items); | |
8001 | ring_config_req->soft_doorbell.msecs = htol16(soft_doorbell->msecs); | |
8002 | ||
8003 | DHD_INFO(("%s: Soft doorbell haddr 0x%08x 0x%08x value 0x%08x\n", | |
8004 | __FUNCTION__, ring_config_req->soft_doorbell.haddr.high, | |
8005 | ring_config_req->soft_doorbell.haddr.low, | |
8006 | ring_config_req->soft_doorbell.value)); | |
8007 | ||
8008 | msg_next = msg_next + ctrl_ring->item_len; | |
8009 | } | |
8010 | ||
8011 | /* update control subn ring's WR index and ring doorbell to dongle */ | |
8012 | dhd_prot_ring_write_complete(dhd, ctrl_ring, msg_start, d2h_rings); | |
8013 | DHD_GENERAL_UNLOCK(dhd, flags); | |
8014 | #ifdef PCIE_INB_DW | |
8015 | dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); | |
8016 | #endif | |
8017 | #endif /* DHD_D2H_SOFT_DOORBELL_SUPPORT */ | |
8018 | } | |
8019 | ||
8020 | static void | |
8021 | dhd_prot_process_d2h_ring_config_complete(dhd_pub_t *dhd, void *msg) | |
8022 | { | |
8023 | DHD_INFO(("%s: Ring Config Response - status %d ringid %d\n", | |
8024 | __FUNCTION__, ltoh16(((ring_config_resp_t *)msg)->compl_hdr.status), | |
8025 | ltoh16(((ring_config_resp_t *)msg)->compl_hdr.flow_ring_id))); | |
8026 | } | |
8027 | ||
8028 | int | |
8029 | dhd_prot_debug_dma_info_print(dhd_pub_t *dhd) | |
8030 | { | |
8031 | if (dhd->bus->is_linkdown) { | |
8032 | DHD_ERROR(("\n ------- SKIP DUMPING DMA Registers " | |
8033 | "due to PCIe link down ------- \r\n")); | |
8034 | return 0; | |
8035 | } | |
8036 | ||
8037 | DHD_ERROR(("\n ------- DUMPING DMA Registers ------- \r\n")); | |
8038 | ||
8039 | //HostToDev | |
8040 | DHD_ERROR(("HostToDev TX: XmtCtrl=0x%08x XmtPtr=0x%08x\n", | |
8041 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x200, 0, 0), | |
8042 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x204, 0, 0))); | |
8043 | DHD_ERROR((" : XmtAddrLow=0x%08x XmtAddrHigh=0x%08x\n", | |
8044 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x208, 0, 0), | |
8045 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x20C, 0, 0))); | |
8046 | DHD_ERROR((" : XmtStatus0=0x%08x XmtStatus1=0x%08x\n", | |
8047 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x210, 0, 0), | |
8048 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x214, 0, 0))); | |
8049 | ||
8050 | DHD_ERROR(("HostToDev RX: RcvCtrl=0x%08x RcvPtr=0x%08x\n", | |
8051 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x220, 0, 0), | |
8052 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x224, 0, 0))); | |
8053 | DHD_ERROR((" : RcvAddrLow=0x%08x RcvAddrHigh=0x%08x\n", | |
8054 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x228, 0, 0), | |
8055 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x22C, 0, 0))); | |
8056 | DHD_ERROR((" : RcvStatus0=0x%08x RcvStatus1=0x%08x\n", | |
8057 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x230, 0, 0), | |
8058 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x234, 0, 0))); | |
8059 | ||
8060 | //DevToHost | |
8061 | DHD_ERROR(("DevToHost TX: XmtCtrl=0x%08x XmtPtr=0x%08x\n", | |
8062 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x240, 0, 0), | |
8063 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x244, 0, 0))); | |
8064 | DHD_ERROR((" : XmtAddrLow=0x%08x XmtAddrHigh=0x%08x\n", | |
8065 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x248, 0, 0), | |
8066 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x24C, 0, 0))); | |
8067 | DHD_ERROR((" : XmtStatus0=0x%08x XmtStatus1=0x%08x\n", | |
8068 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x250, 0, 0), | |
8069 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x254, 0, 0))); | |
8070 | ||
8071 | DHD_ERROR(("DevToHost RX: RcvCtrl=0x%08x RcvPtr=0x%08x\n", | |
8072 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x260, 0, 0), | |
8073 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x264, 0, 0))); | |
8074 | DHD_ERROR((" : RcvAddrLow=0x%08x RcvAddrHigh=0x%08x\n", | |
8075 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x268, 0, 0), | |
8076 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x26C, 0, 0))); | |
8077 | DHD_ERROR((" : RcvStatus0=0x%08x RcvStatus1=0x%08x\n", | |
8078 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x270, 0, 0), | |
8079 | si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, 0x274, 0, 0))); | |
8080 | ||
8081 | return 0; | |
8082 | } | |
8083 | ||
8084 | int | |
8085 | dhd_prot_debug_info_print(dhd_pub_t *dhd) | |
8086 | { | |
8087 | dhd_prot_t *prot = dhd->prot; | |
8088 | msgbuf_ring_t *ring; | |
8089 | uint16 rd, wr; | |
8090 | uint32 intstatus = 0; | |
8091 | uint32 intmask = 0; | |
8092 | uint32 mbintstatus = 0; | |
8093 | uint32 d2h_mb_data = 0; | |
8094 | uint32 dma_buf_len; | |
8095 | ||
8096 | DHD_ERROR(("\n ------- DUMPING VERSION INFORMATION ------- \r\n")); | |
8097 | DHD_ERROR(("DHD: %s\n", dhd_version)); | |
8098 | DHD_ERROR(("Firmware: %s\n", fw_version)); | |
8099 | ||
8100 | DHD_ERROR(("\n ------- DUMPING PROTOCOL INFORMATION ------- \r\n")); | |
8101 | DHD_ERROR(("ICPrevs: Dev %d, Host %d, active %d\n", | |
8102 | prot->device_ipc_version, | |
8103 | prot->host_ipc_version, | |
8104 | prot->active_ipc_version)); | |
8105 | DHD_ERROR(("max Host TS bufs to post: %d, posted %d\n", | |
8106 | prot->max_tsbufpost, prot->cur_ts_bufs_posted)); | |
8107 | DHD_ERROR(("max INFO bufs to post: %d, posted %d\n", | |
8108 | prot->max_infobufpost, prot->infobufpost)); | |
8109 | DHD_ERROR(("max event bufs to post: %d, posted %d\n", | |
8110 | prot->max_eventbufpost, prot->cur_event_bufs_posted)); | |
8111 | DHD_ERROR(("max ioctlresp bufs to post: %d, posted %d\n", | |
8112 | prot->max_ioctlrespbufpost, prot->cur_ioctlresp_bufs_posted)); | |
8113 | DHD_ERROR(("max RX bufs to post: %d, posted %d\n", | |
8114 | prot->max_rxbufpost, prot->rxbufpost)); | |
8115 | DHD_ERROR(("h2d_max_txpost: %d, prot->h2d_max_txpost: %d\n", | |
8116 | h2d_max_txpost, prot->h2d_max_txpost)); | |
8117 | ||
8118 | DHD_ERROR(("\n ------- DUMPING IOCTL RING RD WR Pointers ------- \r\n")); | |
8119 | ||
8120 | ring = &prot->h2dring_ctrl_subn; | |
8121 | dma_buf_len = ring->max_items * ring->item_len; | |
8122 | DHD_ERROR(("CtrlPost: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n", | |
8123 | ring->dma_buf.va, ltoh32(ring->base_addr.high_addr), | |
8124 | ltoh32(ring->base_addr.low_addr), dma_buf_len)); | |
8125 | DHD_ERROR(("CtrlPost: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr)); | |
8126 | dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx); | |
8127 | dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx); | |
8128 | DHD_ERROR(("CtrlPost: From Shared Mem: RD: %d WR %d \r\n", rd, wr)); | |
8129 | DHD_ERROR(("CtrlPost: seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO)); | |
8130 | ||
8131 | ring = &prot->d2hring_ctrl_cpln; | |
8132 | dma_buf_len = ring->max_items * ring->item_len; | |
8133 | DHD_ERROR(("CtrlCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n", | |
8134 | ring->dma_buf.va, ltoh32(ring->base_addr.high_addr), | |
8135 | ltoh32(ring->base_addr.low_addr), dma_buf_len)); | |
8136 | DHD_ERROR(("CtrlCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr)); | |
8137 | dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx); | |
8138 | dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx); | |
8139 | DHD_ERROR(("CtrlCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr)); | |
8140 | DHD_ERROR(("CtrlCpl: Expected seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO)); | |
8141 | ||
8142 | ring = prot->h2dring_info_subn; | |
8143 | if (ring) { | |
8144 | dma_buf_len = ring->max_items * ring->item_len; | |
8145 | DHD_ERROR(("InfoSub: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n", | |
8146 | ring->dma_buf.va, ltoh32(ring->base_addr.high_addr), | |
8147 | ltoh32(ring->base_addr.low_addr), dma_buf_len)); | |
8148 | DHD_ERROR(("InfoSub: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr)); | |
8149 | dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx); | |
8150 | dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx); | |
8151 | DHD_ERROR(("InfoSub: From Shared Mem: RD: %d WR %d \r\n", rd, wr)); | |
8152 | DHD_ERROR(("InfoSub: seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO)); | |
8153 | } | |
8154 | ring = prot->d2hring_info_cpln; | |
8155 | if (ring) { | |
8156 | dma_buf_len = ring->max_items * ring->item_len; | |
8157 | DHD_ERROR(("InfoCpl: Mem Info: BASE(VA) %p BASE(PA) %x:%x SIZE %d \r\n", | |
8158 | ring->dma_buf.va, ltoh32(ring->base_addr.high_addr), | |
8159 | ltoh32(ring->base_addr.low_addr), dma_buf_len)); | |
8160 | DHD_ERROR(("InfoCpl: From Host mem: RD: %d WR %d \r\n", ring->rd, ring->wr)); | |
8161 | dhd_bus_cmn_readshared(dhd->bus, &rd, RING_RD_UPD, ring->idx); | |
8162 | dhd_bus_cmn_readshared(dhd->bus, &wr, RING_WR_UPD, ring->idx); | |
8163 | DHD_ERROR(("InfoCpl: From Shared Mem: RD: %d WR %d \r\n", rd, wr)); | |
8164 | DHD_ERROR(("InfoCpl: Expected seq num: %d \r\n", ring->seqnum % H2D_EPOCH_MODULO)); | |
8165 | } | |
8166 | ||
8167 | DHD_ERROR(("%s: cur_ioctlresp_bufs_posted %d cur_event_bufs_posted %d\n", | |
8168 | __FUNCTION__, prot->cur_ioctlresp_bufs_posted, prot->cur_event_bufs_posted)); | |
8169 | ||
8170 | if (!dhd->bus->is_linkdown && dhd->bus->intstatus != (uint32)-1) { | |
8171 | DHD_ERROR(("\n ------- DUMPING INTR Status and Masks ------- \r\n")); | |
8172 | intstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, | |
8173 | PCIMailBoxInt, 0, 0); | |
8174 | intmask = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, | |
8175 | PCIMailBoxMask, 0, 0); | |
8176 | mbintstatus = si_corereg(dhd->bus->sih, dhd->bus->sih->buscoreidx, | |
8177 | PCID2H_MailBox, 0, 0); | |
8178 | dhd_bus_cmn_readshared(dhd->bus, &d2h_mb_data, D2H_MB_DATA, 0); | |
8179 | ||
8180 | DHD_ERROR(("intstatus=0x%x intmask=0x%x mbintstatus=0x%x\n", | |
8181 | intstatus, intmask, mbintstatus)); | |
8182 | DHD_ERROR(("d2h_mb_data=0x%x def_intmask=0x%x \r\n", d2h_mb_data, | |
8183 | dhd->bus->def_intmask)); | |
8184 | ||
8185 | DHD_ERROR(("host pcie_irq enabled = %d\n", dhdpcie_irq_enabled(dhd->bus))); | |
8186 | ||
8187 | DHD_ERROR(("\n ------- DUMPING PCIE Registers ------- \r\n")); | |
8188 | /* hwnbu-twiki.sj.broadcom.com/bin/view/Mwgroup/CurrentPcieGen2ProgramGuide */ | |
8189 | DHD_ERROR(("Status Command(0x%x)=0x%x, BaseAddress0(0x%x)=0x%x\n", | |
8190 | PCIECFGREG_STATUS_CMD, | |
8191 | dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_STATUS_CMD, sizeof(uint32)), | |
8192 | PCIECFGREG_BASEADDR0, | |
8193 | dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_BASEADDR0, sizeof(uint32)))); | |
8194 | DHD_ERROR(("LinkCtl(0x%x)=0x%x DeviceStatusControl2(0x%x)=0x%x " | |
8195 | "L1SSControl(0x%x)=0x%x\n", PCIECFGREG_LINK_STATUS_CTRL, | |
8196 | dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_LINK_STATUS_CTRL, | |
8197 | sizeof(uint32)), PCIECFGGEN_DEV_STATUS_CTRL2, | |
8198 | dhd_pcie_config_read(dhd->bus->osh, PCIECFGGEN_DEV_STATUS_CTRL2, | |
8199 | sizeof(uint32)), PCIECFGREG_PML1_SUB_CTRL1, | |
8200 | dhd_pcie_config_read(dhd->bus->osh, PCIECFGREG_PML1_SUB_CTRL1, | |
8201 | sizeof(uint32)))); | |
8202 | ||
8203 | /* hwnbu-twiki.sj.broadcom.com/twiki/pub/Mwgroup/ | |
8204 | * CurrentPcieGen2ProgramGuide/pcie_ep.htm | |
8205 | */ | |
8206 | DHD_ERROR(("ClkReq0(0x%x)=0x%x ClkReq1(0x%x)=0x%x ClkReq2(0x%x)=0x%x " | |
8207 | "ClkReq3(0x%x)=0x%x\n", PCIECFGREG_PHY_DBG_CLKREQ0, | |
8208 | dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ0), | |
8209 | PCIECFGREG_PHY_DBG_CLKREQ1, | |
8210 | dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ1), | |
8211 | PCIECFGREG_PHY_DBG_CLKREQ2, | |
8212 | dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ2), | |
8213 | PCIECFGREG_PHY_DBG_CLKREQ3, | |
8214 | dhd_pcie_corereg_read(dhd->bus->sih, PCIECFGREG_PHY_DBG_CLKREQ3))); | |
8215 | ||
8216 | DHD_ERROR(("Pcie RC Error Status Val=0x%x\n", | |
8217 | dhdpcie_rc_access_cap(dhd->bus, PCIE_EXTCAP_ID_ERR, | |
8218 | PCIE_EXTCAP_AER_UCERR_OFFSET, TRUE, FALSE, 0))); | |
8219 | ||
8220 | DHD_ERROR(("RootPort PCIe linkcap=0x%08x\n", | |
8221 | dhd_debug_get_rc_linkcap(dhd->bus))); | |
8222 | ||
8223 | DHD_ERROR(("\n ------- DUMPING INTR enable/disable counters ------- \r\n")); | |
8224 | DHD_ERROR(("resume_intr_enable_count=%lu dpc_intr_enable_count=%lu\n" | |
8225 | "isr_intr_disable_count=%lu suspend_intr_disable_count=%lu\n" | |
8226 | "dpc_return_busdown_count=%lu\n", | |
8227 | dhd->bus->resume_intr_enable_count, dhd->bus->dpc_intr_enable_count, | |
8228 | dhd->bus->isr_intr_disable_count, dhd->bus->suspend_intr_disable_count, | |
8229 | dhd->bus->dpc_return_busdown_count)); | |
8230 | ||
8231 | } | |
8232 | dhd_prot_debug_dma_info_print(dhd); | |
8233 | #ifdef DHD_FW_COREDUMP | |
8234 | if (dhd->memdump_enabled) { | |
8235 | #ifdef DHD_SSSR_DUMP | |
8236 | if (dhd->sssr_inited) { | |
8237 | dhdpcie_sssr_dump(dhd); | |
8238 | } | |
8239 | #endif /* DHD_SSSR_DUMP */ | |
8240 | } | |
8241 | #endif /* DHD_FW_COREDUMP */ | |
8242 | return 0; | |
8243 | } | |
8244 | ||
8245 | int | |
8246 | dhd_prot_ringupd_dump(dhd_pub_t *dhd, struct bcmstrbuf *b) | |
8247 | { | |
8248 | uint32 *ptr; | |
8249 | uint32 value; | |
8250 | uint32 i; | |
8251 | uint32 max_h2d_queues = dhd_bus_max_h2d_queues(dhd->bus); | |
8252 | ||
8253 | OSL_CACHE_INV((void *)dhd->prot->d2h_dma_indx_wr_buf.va, | |
8254 | dhd->prot->d2h_dma_indx_wr_buf.len); | |
8255 | ||
8256 | ptr = (uint32 *)(dhd->prot->d2h_dma_indx_wr_buf.va); | |
8257 | ||
8258 | bcm_bprintf(b, "\n max_tx_queues %d\n", max_h2d_queues); | |
8259 | ||
8260 | bcm_bprintf(b, "\nRPTR block H2D common rings, 0x%04x\n", ptr); | |
8261 | value = ltoh32(*ptr); | |
8262 | bcm_bprintf(b, "\tH2D CTRL: value 0x%04x\n", value); | |
8263 | ptr++; | |
8264 | value = ltoh32(*ptr); | |
8265 | bcm_bprintf(b, "\tH2D RXPOST: value 0x%04x\n", value); | |
8266 | ||
8267 | ptr++; | |
8268 | bcm_bprintf(b, "RPTR block Flow rings , 0x%04x\n", ptr); | |
8269 | for (i = BCMPCIE_H2D_COMMON_MSGRINGS; i < max_h2d_queues; i++) { | |
8270 | value = ltoh32(*ptr); | |
8271 | bcm_bprintf(b, "\tflowring ID %d: value 0x%04x\n", i, value); | |
8272 | ptr++; | |
8273 | } | |
8274 | ||
8275 | OSL_CACHE_INV((void *)dhd->prot->h2d_dma_indx_rd_buf.va, | |
8276 | dhd->prot->h2d_dma_indx_rd_buf.len); | |
8277 | ||
8278 | ptr = (uint32 *)(dhd->prot->h2d_dma_indx_rd_buf.va); | |
8279 | ||
8280 | bcm_bprintf(b, "\nWPTR block D2H common rings, 0x%04x\n", ptr); | |
8281 | value = ltoh32(*ptr); | |
8282 | bcm_bprintf(b, "\tD2H CTRLCPLT: value 0x%04x\n", value); | |
8283 | ptr++; | |
8284 | value = ltoh32(*ptr); | |
8285 | bcm_bprintf(b, "\tD2H TXCPLT: value 0x%04x\n", value); | |
8286 | ptr++; | |
8287 | value = ltoh32(*ptr); | |
8288 | bcm_bprintf(b, "\tD2H RXCPLT: value 0x%04x\n", value); | |
8289 | ||
8290 | return 0; | |
8291 | } | |
8292 | ||
8293 | uint32 | |
8294 | dhd_prot_metadata_dbg_set(dhd_pub_t *dhd, bool val) | |
8295 | { | |
8296 | dhd_prot_t *prot = dhd->prot; | |
8297 | #if DHD_DBG_SHOW_METADATA | |
8298 | prot->metadata_dbg = val; | |
8299 | #endif | |
8300 | return (uint32)prot->metadata_dbg; | |
8301 | } | |
8302 | ||
8303 | uint32 | |
8304 | dhd_prot_metadata_dbg_get(dhd_pub_t *dhd) | |
8305 | { | |
8306 | dhd_prot_t *prot = dhd->prot; | |
8307 | return (uint32)prot->metadata_dbg; | |
8308 | } | |
8309 | ||
8310 | uint32 | |
8311 | dhd_prot_metadatalen_set(dhd_pub_t *dhd, uint32 val, bool rx) | |
8312 | { | |
8313 | dhd_prot_t *prot = dhd->prot; | |
8314 | if (rx) | |
8315 | prot->rx_metadata_offset = (uint16)val; | |
8316 | else | |
8317 | prot->tx_metadata_offset = (uint16)val; | |
8318 | return dhd_prot_metadatalen_get(dhd, rx); | |
8319 | } | |
8320 | ||
8321 | uint32 | |
8322 | dhd_prot_metadatalen_get(dhd_pub_t *dhd, bool rx) | |
8323 | { | |
8324 | dhd_prot_t *prot = dhd->prot; | |
8325 | if (rx) | |
8326 | return prot->rx_metadata_offset; | |
8327 | else | |
8328 | return prot->tx_metadata_offset; | |
8329 | } | |
8330 | ||
8331 | /** optimization to write "n" tx items at a time to ring */ | |
8332 | uint32 | |
8333 | dhd_prot_txp_threshold(dhd_pub_t *dhd, bool set, uint32 val) | |
8334 | { | |
8335 | dhd_prot_t *prot = dhd->prot; | |
8336 | if (set) | |
8337 | prot->txp_threshold = (uint16)val; | |
8338 | val = prot->txp_threshold; | |
8339 | return val; | |
8340 | } | |
8341 | ||
8342 | #ifdef DHD_RX_CHAINING | |
8343 | ||
8344 | static INLINE void BCMFASTPATH | |
8345 | dhd_rxchain_reset(rxchain_info_t *rxchain) | |
8346 | { | |
8347 | rxchain->pkt_count = 0; | |
8348 | } | |
8349 | ||
8350 | static void BCMFASTPATH | |
8351 | dhd_rxchain_frame(dhd_pub_t *dhd, void *pkt, uint ifidx) | |
8352 | { | |
8353 | uint8 *eh; | |
8354 | uint8 prio; | |
8355 | dhd_prot_t *prot = dhd->prot; | |
8356 | rxchain_info_t *rxchain = &prot->rxchain; | |
8357 | ||
8358 | ASSERT(!PKTISCHAINED(pkt)); | |
8359 | ASSERT(PKTCLINK(pkt) == NULL); | |
8360 | ASSERT(PKTCGETATTR(pkt) == 0); | |
8361 | ||
8362 | eh = PKTDATA(dhd->osh, pkt); | |
8363 | prio = IP_TOS46(eh + ETHER_HDR_LEN) >> IPV4_TOS_PREC_SHIFT; | |
8364 | ||
8365 | if (rxchain->pkt_count && !(PKT_CTF_CHAINABLE(dhd, ifidx, eh, prio, rxchain->h_sa, | |
8366 | rxchain->h_da, rxchain->h_prio))) { | |
8367 | /* Different flow - First release the existing chain */ | |
8368 | dhd_rxchain_commit(dhd); | |
8369 | } | |
8370 | ||
8371 | /* For routers, with HNDCTF, link the packets using PKTSETCLINK, */ | |
8372 | /* so that the chain can be handed off to CTF bridge as is. */ | |
8373 | if (rxchain->pkt_count == 0) { | |
8374 | /* First packet in chain */ | |
8375 | rxchain->pkthead = rxchain->pkttail = pkt; | |
8376 | ||
8377 | /* Keep a copy of ptr to ether_da, ether_sa and prio */ | |
8378 | rxchain->h_da = ((struct ether_header *)eh)->ether_dhost; | |
8379 | rxchain->h_sa = ((struct ether_header *)eh)->ether_shost; | |
8380 | rxchain->h_prio = prio; | |
8381 | rxchain->ifidx = ifidx; | |
8382 | rxchain->pkt_count++; | |
8383 | } else { | |
8384 | /* Same flow - keep chaining */ | |
8385 | PKTSETCLINK(rxchain->pkttail, pkt); | |
8386 | rxchain->pkttail = pkt; | |
8387 | rxchain->pkt_count++; | |
8388 | } | |
8389 | ||
8390 | if ((dhd_rx_pkt_chainable(dhd, ifidx)) && (!ETHER_ISMULTI(rxchain->h_da)) && | |
8391 | ((((struct ether_header *)eh)->ether_type == HTON16(ETHER_TYPE_IP)) || | |
8392 | (((struct ether_header *)eh)->ether_type == HTON16(ETHER_TYPE_IPV6)))) { | |
8393 | PKTSETCHAINED(dhd->osh, pkt); | |
8394 | PKTCINCRCNT(rxchain->pkthead); | |
8395 | PKTCADDLEN(rxchain->pkthead, PKTLEN(dhd->osh, pkt)); | |
8396 | } else { | |
8397 | dhd_rxchain_commit(dhd); | |
8398 | return; | |
8399 | } | |
8400 | ||
8401 | /* If we have hit the max chain length, dispatch the chain and reset */ | |
8402 | if (rxchain->pkt_count >= DHD_PKT_CTF_MAX_CHAIN_LEN) { | |
8403 | dhd_rxchain_commit(dhd); | |
8404 | } | |
8405 | } | |
8406 | ||
8407 | static void BCMFASTPATH | |
8408 | dhd_rxchain_commit(dhd_pub_t *dhd) | |
8409 | { | |
8410 | dhd_prot_t *prot = dhd->prot; | |
8411 | rxchain_info_t *rxchain = &prot->rxchain; | |
8412 | ||
8413 | #ifdef DHD_WAKE_STATUS | |
8414 | int pkt_wake = bcmpcie_set_get_wake(dhd->bus, 0); | |
8415 | #endif /* DHD_WAKE_STATUS */ | |
8416 | ||
8417 | if (rxchain->pkt_count == 0) | |
8418 | return; | |
8419 | ||
8420 | /* Release the packets to dhd_linux */ | |
8421 | #ifdef DHD_WAKE_STATUS | |
8422 | dhd_bus_rx_frame(dhd->bus, rxchain->pkthead, rxchain->ifidx, rxchain->pkt_count, pkt_wake); | |
8423 | #else | |
8424 | dhd_bus_rx_frame(dhd->bus, rxchain->pkthead, rxchain->ifidx, rxchain->pkt_count); | |
8425 | #endif /* DHD_WAKE_STATUS */ | |
8426 | ||
8427 | /* Reset the chain */ | |
8428 | dhd_rxchain_reset(rxchain); | |
8429 | } | |
8430 | ||
8431 | #endif /* DHD_RX_CHAINING */ | |
8432 | ||
8433 | ||
8434 | #ifdef IDLE_TX_FLOW_MGMT | |
8435 | int | |
8436 | dhd_prot_flow_ring_resume(dhd_pub_t *dhd, flow_ring_node_t *flow_ring_node) | |
8437 | { | |
8438 | tx_idle_flowring_resume_request_t *flow_resume_rqst; | |
8439 | msgbuf_ring_t *flow_ring; | |
8440 | dhd_prot_t *prot = dhd->prot; | |
8441 | unsigned long flags; | |
8442 | uint16 alloced = 0; | |
8443 | msgbuf_ring_t *ctrl_ring = &prot->h2dring_ctrl_subn; | |
8444 | ||
8445 | /* Fetch a pre-initialized msgbuf_ring from the flowring pool */ | |
8446 | flow_ring = dhd_prot_flowrings_pool_fetch(dhd, flow_ring_node->flowid); | |
8447 | if (flow_ring == NULL) { | |
8448 | DHD_ERROR(("%s: dhd_prot_flowrings_pool_fetch TX Flowid %d failed\n", | |
8449 | __FUNCTION__, flow_ring_node->flowid)); | |
8450 | return BCME_NOMEM; | |
8451 | } | |
8452 | #ifdef PCIE_INB_DW | |
8453 | if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) | |
8454 | return BCME_ERROR; | |
8455 | #endif /* PCIE_INB_DW */ | |
8456 | ||
8457 | DHD_GENERAL_LOCK(dhd, flags); | |
8458 | ||
8459 | /* Request for ctrl_ring buffer space */ | |
8460 | flow_resume_rqst = (tx_idle_flowring_resume_request_t *) | |
8461 | dhd_prot_alloc_ring_space(dhd, ctrl_ring, 1, &alloced, FALSE); | |
8462 | ||
8463 | if (flow_resume_rqst == NULL) { | |
8464 | dhd_prot_flowrings_pool_release(dhd, flow_ring_node->flowid, flow_ring); | |
8465 | DHD_ERROR(("%s: Flow resume Req flowid %d - failure ring space\n", | |
8466 | __FUNCTION__, flow_ring_node->flowid)); | |
8467 | DHD_GENERAL_UNLOCK(dhd, flags); | |
8468 | #ifdef PCIE_INB_DW | |
8469 | dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); | |
8470 | #endif | |
8471 | return BCME_NOMEM; | |
8472 | } | |
8473 | ||
8474 | flow_ring_node->prot_info = (void *)flow_ring; | |
8475 | ||
8476 | /* Common msg buf hdr */ | |
8477 | flow_resume_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_RESUME; | |
8478 | flow_resume_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex; | |
8479 | flow_resume_rqst->msg.request_id = htol32(0); /* TBD */ | |
8480 | ||
8481 | flow_resume_rqst->msg.epoch = ctrl_ring->seqnum % H2D_EPOCH_MODULO; | |
8482 | ctrl_ring->seqnum++; | |
8483 | ||
8484 | flow_resume_rqst->flow_ring_id = htol16((uint16)flow_ring_node->flowid); | |
8485 | DHD_ERROR(("%s Send Flow resume Req flow ID %d\n", | |
8486 | __FUNCTION__, flow_ring_node->flowid)); | |
8487 | ||
8488 | /* Update the flow_ring's WRITE index */ | |
8489 | if (IDMA_ACTIVE(dhd) || dhd->dma_h2d_ring_upd_support) { | |
8490 | dhd_prot_dma_indx_set(dhd, flow_ring->wr, | |
8491 | H2D_DMA_INDX_WR_UPD, flow_ring->idx); | |
8492 | } else if (IFRM_ACTIVE(dhd) && (flow_ring->idx >= BCMPCIE_H2D_MSGRING_TXFLOW_IDX_START)) { | |
8493 | dhd_prot_dma_indx_set(dhd, flow_ring->wr, | |
8494 | H2D_IFRM_INDX_WR_UPD, | |
8495 | (flow_ring->idx - BCMPCIE_H2D_MSGRING_TXFLOW_IDX_START)); | |
8496 | } else { | |
8497 | dhd_bus_cmn_writeshared(dhd->bus, &(flow_ring->wr), | |
8498 | sizeof(uint16), RING_WR_UPD, flow_ring->idx); | |
8499 | } | |
8500 | ||
8501 | /* update control subn ring's WR index and ring doorbell to dongle */ | |
8502 | dhd_prot_ring_write_complete(dhd, ctrl_ring, flow_resume_rqst, 1); | |
8503 | ||
8504 | DHD_GENERAL_UNLOCK(dhd, flags); | |
8505 | #ifdef PCIE_INB_DW | |
8506 | dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); | |
8507 | #endif | |
8508 | ||
8509 | return BCME_OK; | |
8510 | } /* dhd_prot_flow_ring_create */ | |
8511 | ||
8512 | int | |
8513 | dhd_prot_flow_ring_batch_suspend_request(dhd_pub_t *dhd, uint16 *ringid, uint16 count) | |
8514 | { | |
8515 | tx_idle_flowring_suspend_request_t *flow_suspend_rqst; | |
8516 | dhd_prot_t *prot = dhd->prot; | |
8517 | unsigned long flags; | |
8518 | uint16 index; | |
8519 | uint16 alloced = 0; | |
8520 | msgbuf_ring_t *ring = &prot->h2dring_ctrl_subn; | |
8521 | ||
8522 | #ifdef PCIE_INB_DW | |
8523 | if (dhd_prot_inc_hostactive_devwake_assert(dhd->bus) != BCME_OK) | |
8524 | return BCME_ERROR; | |
8525 | #endif /* PCIE_INB_DW */ | |
8526 | ||
8527 | DHD_GENERAL_LOCK(dhd, flags); | |
8528 | ||
8529 | /* Request for ring buffer space */ | |
8530 | flow_suspend_rqst = (tx_idle_flowring_suspend_request_t *) | |
8531 | dhd_prot_alloc_ring_space(dhd, ring, 1, &alloced, FALSE); | |
8532 | ||
8533 | if (flow_suspend_rqst == NULL) { | |
8534 | DHD_GENERAL_UNLOCK(dhd, flags); | |
8535 | DHD_ERROR(("%s: Flow suspend Req - failure ring space\n", __FUNCTION__)); | |
8536 | #ifdef PCIE_INB_DW | |
8537 | dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); | |
8538 | #endif | |
8539 | return BCME_NOMEM; | |
8540 | } | |
8541 | ||
8542 | /* Common msg buf hdr */ | |
8543 | flow_suspend_rqst->msg.msg_type = MSG_TYPE_FLOW_RING_SUSPEND; | |
8544 | /* flow_suspend_rqst->msg.if_id = (uint8)flow_ring_node->flow_info.ifindex; */ | |
8545 | flow_suspend_rqst->msg.request_id = htol32(0); /* TBD */ | |
8546 | ||
8547 | flow_suspend_rqst->msg.epoch = ring->seqnum % H2D_EPOCH_MODULO; | |
8548 | ring->seqnum++; | |
8549 | ||
8550 | /* Update flow id info */ | |
8551 | for (index = 0; index < count; index++) | |
8552 | { | |
8553 | flow_suspend_rqst->ring_id[index] = ringid[index]; | |
8554 | } | |
8555 | flow_suspend_rqst->num = count; | |
8556 | ||
8557 | DHD_ERROR(("%s sending batch suspend!! count is %d\n", __FUNCTION__, count)); | |
8558 | ||
8559 | /* update ring's WR index and ring doorbell to dongle */ | |
8560 | dhd_prot_ring_write_complete(dhd, ring, flow_suspend_rqst, 1); | |
8561 | DHD_GENERAL_UNLOCK(dhd, flags); | |
8562 | #ifdef PCIE_INB_DW | |
8563 | dhd_prot_dec_hostactive_ack_pending_dsreq(dhd->bus); | |
8564 | #endif | |
8565 | ||
8566 | return BCME_OK; | |
8567 | } | |
8568 | #endif /* IDLE_TX_FLOW_MGMT */ | |
8569 | ||
8570 | ||
8571 | int dhd_prot_dump_extended_trap(dhd_pub_t *dhdp, struct bcmstrbuf *b, bool raw) | |
8572 | { | |
8573 | uint32 i; | |
8574 | uint32 *ext_data; | |
8575 | hnd_ext_trap_hdr_t *hdr; | |
8576 | bcm_tlv_t *tlv; | |
8577 | trap_t *tr; | |
8578 | uint32 *stack; | |
8579 | hnd_ext_trap_bp_err_t *bpe; | |
8580 | uint32 raw_len; | |
8581 | ||
8582 | ext_data = dhdp->extended_trap_data; | |
8583 | ||
8584 | /* return if there is no extended trap data */ | |
8585 | if (!ext_data || !(dhdp->dongle_trap_data & D2H_DEV_EXT_TRAP_DATA)) | |
8586 | { | |
8587 | bcm_bprintf(b, "%d (0x%x)", dhdp->dongle_trap_data, dhdp->dongle_trap_data); | |
8588 | return BCME_OK; | |
8589 | } | |
8590 | ||
8591 | bcm_bprintf(b, "Extended trap data\n"); | |
8592 | ||
8593 | /* First word is original trap_data */ | |
8594 | bcm_bprintf(b, "trap_data = 0x%08x\n", *ext_data); | |
8595 | ext_data++; | |
8596 | ||
8597 | /* Followed by the extended trap data header */ | |
8598 | hdr = (hnd_ext_trap_hdr_t *)ext_data; | |
8599 | bcm_bprintf(b, "version: %d, len: %d\n", hdr->version, hdr->len); | |
8600 | ||
8601 | if (raw) | |
8602 | { | |
8603 | raw_len = sizeof(hnd_ext_trap_hdr_t) + (hdr->len / 4) + (hdr->len % 4 ? 1 : 0); | |
8604 | for (i = 0; i < raw_len; i++) | |
8605 | { | |
8606 | bcm_bprintf(b, "0x%08x ", ext_data[i]); | |
8607 | if (i % 4 == 3) | |
8608 | bcm_bprintf(b, "\n"); | |
8609 | } | |
8610 | return BCME_OK; | |
8611 | } | |
8612 | ||
8613 | /* Extract the various supported TLVs from the extended trap data */ | |
8614 | tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_SIGNATURE); | |
8615 | if (tlv) | |
8616 | { | |
8617 | bcm_bprintf(b, "\nTAG_TRAP_SIGNATURE len: %d\n", tlv->len); | |
8618 | tr = (trap_t *)tlv->data; | |
8619 | ||
8620 | bcm_bprintf(b, "TRAP %x: pc %x, lr %x, sp %x, cpsr %x, spsr %x\n", | |
8621 | tr->type, tr->pc, tr->r14, tr->r13, tr->cpsr, tr->spsr); | |
8622 | bcm_bprintf(b, " r0 %x, r1 %x, r2 %x, r3 %x, r4 %x, r5 %x, r6 %x\n", | |
8623 | tr->r0, tr->r1, tr->r2, tr->r3, tr->r4, tr->r5, tr->r6); | |
8624 | bcm_bprintf(b, " r7 %x, r8 %x, r9 %x, r10 %x, r11 %x, r12 %x\n", | |
8625 | tr->r7, tr->r8, tr->r9, tr->r10, tr->r11, tr->r12); | |
8626 | } | |
8627 | ||
8628 | tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_STACK); | |
8629 | if (tlv) | |
8630 | { | |
8631 | bcm_bprintf(b, "\nTAG_TRAP_STACK len: %d\n", tlv->len); | |
8632 | stack = (uint32 *)tlv->data; | |
8633 | for (i = 0; i < (uint32)(tlv->len / 4); i++) | |
8634 | { | |
8635 | bcm_bprintf(b, " 0x%08x\n", *stack); | |
8636 | stack++; | |
8637 | } | |
8638 | } | |
8639 | ||
8640 | tlv = bcm_parse_tlvs(hdr->data, hdr->len, TAG_TRAP_BACKPLANE); | |
8641 | if (tlv) | |
8642 | { | |
8643 | bcm_bprintf(b, "\nTAG_TRAP_BACKPLANE len: %d\n", tlv->len); | |
8644 | bpe = (hnd_ext_trap_bp_err_t *)tlv->data; | |
8645 | bcm_bprintf(b, " error: %x\n", bpe->error); | |
8646 | bcm_bprintf(b, " coreid: %x\n", bpe->coreid); | |
8647 | bcm_bprintf(b, " baseaddr: %x\n", bpe->baseaddr); | |
8648 | bcm_bprintf(b, " ioctrl: %x\n", bpe->ioctrl); | |
8649 | bcm_bprintf(b, " iostatus: %x\n", bpe->iostatus); | |
8650 | bcm_bprintf(b, " resetctrl: %x\n", bpe->resetctrl); | |
8651 | bcm_bprintf(b, " resetstatus: %x\n", bpe->resetstatus); | |
8652 | bcm_bprintf(b, " errlogctrl: %x\n", bpe->errlogctrl); | |
8653 | bcm_bprintf(b, " errlogdone: %x\n", bpe->errlogdone); | |
8654 | bcm_bprintf(b, " errlogstatus: %x\n", bpe->errlogstatus); | |
8655 | bcm_bprintf(b, " errlogaddrlo: %x\n", bpe->errlogaddrlo); | |
8656 | bcm_bprintf(b, " errlogaddrhi: %x\n", bpe->errlogaddrhi); | |
8657 | bcm_bprintf(b, " errlogid: %x\n", bpe->errlogid); | |
8658 | bcm_bprintf(b, " errloguser: %x\n", bpe->errloguser); | |
8659 | bcm_bprintf(b, " errlogflags: %x\n", bpe->errlogflags); | |
8660 | } | |
8661 | ||
8662 | return BCME_OK; | |
8663 | } | |
8664 | ||
8665 | ||
8666 | #ifdef BCMPCIE | |
8667 | int | |
8668 | dhd_prot_send_host_timestamp(dhd_pub_t *dhdp, uchar *tlvs, uint16 tlv_len, | |
8669 | uint16 seqnum, uint16 xt_id) | |
8670 | { | |
8671 | dhd_prot_t *prot = dhdp->prot; | |
8672 | host_timestamp_msg_t *ts_req; | |
8673 | unsigned long flags; | |
8674 | uint16 alloced = 0; | |
8675 | uchar *ts_tlv_buf; | |
8676 | ||
8677 | if ((tlvs == NULL) || (tlv_len == 0)) { | |
8678 | DHD_ERROR(("%s: argument error tlv: %p, tlv_len %d\n", | |
8679 | __FUNCTION__, tlvs, tlv_len)); | |
8680 | return -1; | |
8681 | } | |
8682 | #ifdef PCIE_INB_DW | |
8683 | if (dhd_prot_inc_hostactive_devwake_assert(dhdp->bus) != BCME_OK) | |
8684 | return BCME_ERROR; | |
8685 | #endif /* PCIE_INB_DW */ | |
8686 | ||
8687 | DHD_GENERAL_LOCK(dhdp, flags); | |
8688 | ||
8689 | /* if Host TS req already pending go away */ | |
8690 | if (prot->hostts_req_buf_inuse == TRUE) { | |
8691 | DHD_ERROR(("one host TS request already pending at device\n")); | |
8692 | DHD_GENERAL_UNLOCK(dhdp, flags); | |
8693 | #ifdef PCIE_INB_DW | |
8694 | dhd_prot_dec_hostactive_ack_pending_dsreq(dhdp->bus); | |
8695 | #endif | |
8696 | return -1; | |
8697 | } | |
8698 | ||
8699 | /* Request for cbuf space */ | |
8700 | ts_req = (host_timestamp_msg_t*)dhd_prot_alloc_ring_space(dhdp, &prot->h2dring_ctrl_subn, | |
8701 | DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D, &alloced, FALSE); | |
8702 | if (ts_req == NULL) { | |
8703 | DHD_ERROR(("couldn't allocate space on msgring to send host TS request\n")); | |
8704 | DHD_GENERAL_UNLOCK(dhdp, flags); | |
8705 | #ifdef PCIE_INB_DW | |
8706 | dhd_prot_dec_hostactive_ack_pending_dsreq(dhdp->bus); | |
8707 | #endif | |
8708 | return -1; | |
8709 | } | |
8710 | ||
8711 | /* Common msg buf hdr */ | |
8712 | ts_req->msg.msg_type = MSG_TYPE_HOSTTIMSTAMP; | |
8713 | ts_req->msg.if_id = 0; | |
8714 | ts_req->msg.flags = prot->h2dring_ctrl_subn.current_phase; | |
8715 | ts_req->msg.request_id = DHD_H2D_HOSTTS_REQ_PKTID; | |
8716 | ||
8717 | ts_req->msg.epoch = prot->h2dring_ctrl_subn.seqnum % H2D_EPOCH_MODULO; | |
8718 | prot->h2dring_ctrl_subn.seqnum++; | |
8719 | ||
8720 | ts_req->xt_id = xt_id; | |
8721 | ts_req->seqnum = seqnum; | |
8722 | /* populate TS req buffer info */ | |
8723 | ts_req->input_data_len = htol16(tlv_len); | |
8724 | ts_req->host_buf_addr.high = htol32(PHYSADDRHI(prot->hostts_req_buf.pa)); | |
8725 | ts_req->host_buf_addr.low = htol32(PHYSADDRLO(prot->hostts_req_buf.pa)); | |
8726 | /* copy ioct payload */ | |
8727 | ts_tlv_buf = (void *) prot->hostts_req_buf.va; | |
8728 | prot->hostts_req_buf_inuse = TRUE; | |
8729 | memcpy(ts_tlv_buf, tlvs, tlv_len); | |
8730 | ||
8731 | OSL_CACHE_FLUSH((void *) prot->hostts_req_buf.va, tlv_len); | |
8732 | ||
8733 | if (ISALIGNED(ts_tlv_buf, DMA_ALIGN_LEN) == FALSE) { | |
8734 | DHD_ERROR(("host TS req buffer address unaligned !!!!! \n")); | |
8735 | } | |
8736 | ||
8737 | DHD_CTL(("submitted Host TS request request_id %d, data_len %d, tx_id %d, seq %d\n", | |
8738 | ts_req->msg.request_id, ts_req->input_data_len, | |
8739 | ts_req->xt_id, ts_req->seqnum)); | |
8740 | ||
8741 | ||
8742 | /* upd wrt ptr and raise interrupt */ | |
8743 | dhd_prot_ring_write_complete(dhdp, &prot->h2dring_ctrl_subn, ts_req, | |
8744 | DHD_FLOWRING_DEFAULT_NITEMS_POSTED_H2D); | |
8745 | DHD_GENERAL_UNLOCK(dhdp, flags); | |
8746 | #ifdef PCIE_INB_DW | |
8747 | dhd_prot_dec_hostactive_ack_pending_dsreq(dhdp->bus); | |
8748 | #endif | |
8749 | ||
8750 | return 0; | |
8751 | } /* dhd_prot_send_host_timestamp */ | |
8752 | ||
8753 | ||
8754 | bool | |
8755 | dhd_prot_data_path_tx_timestamp_logging(dhd_pub_t *dhd, bool enable, bool set) | |
8756 | { | |
8757 | if (set) | |
8758 | dhd->prot->tx_ts_log_enabled = enable; | |
8759 | ||
8760 | return dhd->prot->tx_ts_log_enabled; | |
8761 | } | |
8762 | ||
8763 | bool | |
8764 | dhd_prot_data_path_rx_timestamp_logging(dhd_pub_t *dhd, bool enable, bool set) | |
8765 | { | |
8766 | if (set) | |
8767 | dhd->prot->rx_ts_log_enabled = enable; | |
8768 | ||
8769 | return dhd->prot->rx_ts_log_enabled; | |
8770 | } | |
8771 | #endif /* BCMPCIE */ | |
8772 | ||
8773 | void | |
8774 | dhd_prot_dma_indx_free(dhd_pub_t *dhd) | |
8775 | { | |
8776 | dhd_prot_t *prot = dhd->prot; | |
8777 | ||
8778 | dhd_dma_buf_free(dhd, &prot->h2d_dma_indx_wr_buf); | |
8779 | dhd_dma_buf_free(dhd, &prot->d2h_dma_indx_rd_buf); | |
8780 | } | |
8781 | ||
8782 | static void BCMFASTPATH | |
8783 | dhd_prot_process_fw_timestamp(dhd_pub_t *dhd, void* buf) | |
8784 | { | |
8785 | #ifdef DHD_TIMESYNC | |
8786 | fw_timestamp_event_msg_t *resp; | |
8787 | uint32 pktid; | |
8788 | uint16 buflen, seqnum; | |
8789 | void * pkt; | |
8790 | unsigned long flags; | |
8791 | ||
8792 | resp = (fw_timestamp_event_msg_t *)buf; | |
8793 | pktid = ltoh32(resp->msg.request_id); | |
8794 | buflen = ltoh16(resp->buf_len); | |
8795 | seqnum = ltoh16(resp->seqnum); | |
8796 | ||
8797 | #if defined(DHD_PKTID_AUDIT_RING) | |
8798 | DHD_PKTID_AUDIT(dhd, dhd->prot->pktid_ctrl_map, pktid, | |
8799 | DHD_DUPLICATE_FREE); | |
8800 | #endif /* DHD_PKTID_AUDIT_RING */ | |
8801 | ||
8802 | DHD_INFO(("id 0x%04x, len %d, phase 0x%02x, seqnum %d\n", | |
8803 | pktid, buflen, resp->msg.flags, ltoh16(resp->seqnum))); | |
8804 | ||
8805 | if (!dhd->prot->cur_ts_bufs_posted) { | |
8806 | DHD_ERROR(("tsbuf posted are zero, but there is a completion\n")); | |
8807 | return; | |
8808 | } | |
8809 | ||
8810 | dhd->prot->cur_ts_bufs_posted--; | |
8811 | if (dhd->prot->max_tsbufpost > 0) | |
8812 | dhd_msgbuf_rxbuf_post_ts_bufs(dhd); | |
8813 | ||
8814 | DHD_GENERAL_LOCK(dhd, flags); | |
8815 | pkt = dhd_prot_packet_get(dhd, pktid, PKTTYPE_TSBUF_RX, TRUE); | |
8816 | DHD_GENERAL_UNLOCK(dhd, flags); | |
8817 | ||
8818 | if (!pkt) { | |
8819 | DHD_ERROR(("no ts buffer associated with pktid 0x%04x\n", pktid)); | |
8820 | return; | |
8821 | } | |
8822 | ||
8823 | PKTSETLEN(dhd->osh, pkt, buflen); | |
8824 | dhd_timesync_handle_fw_timestamp(dhd->ts, PKTDATA(dhd->osh, pkt), buflen, seqnum); | |
8825 | #ifdef DHD_USE_STATIC_CTRLBUF | |
8826 | PKTFREE_STATIC(dhd->osh, pkt, TRUE); | |
8827 | #else | |
8828 | PKTFREE(dhd->osh, pkt, TRUE); | |
8829 | #endif /* DHD_USE_STATIC_CTRLBUF */ | |
8830 | #else /* DHD_TIMESYNC */ | |
8831 | DHD_ERROR(("Timesunc feature not compiled in but GOT FW TS message\n")); | |
8832 | #endif /* DHD_TIMESYNC */ | |
8833 | ||
8834 | } |