Merge branch 'wl12xx-next' into for-linville
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / wireless / ti / wlcore / main.c
1
2 /*
3 * This file is part of wl1271
4 *
5 * Copyright (C) 2008-2010 Nokia Corporation
6 *
7 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
21 * 02110-1301 USA
22 *
23 */
24
25 #include <linux/module.h>
26 #include <linux/firmware.h>
27 #include <linux/delay.h>
28 #include <linux/spi/spi.h>
29 #include <linux/crc32.h>
30 #include <linux/etherdevice.h>
31 #include <linux/vmalloc.h>
32 #include <linux/platform_device.h>
33 #include <linux/slab.h>
34 #include <linux/wl12xx.h>
35 #include <linux/sched.h>
36 #include <linux/interrupt.h>
37
38 #include "wlcore.h"
39 #include "debug.h"
40 #include "wl12xx_80211.h"
41 #include "io.h"
42 #include "event.h"
43 #include "tx.h"
44 #include "rx.h"
45 #include "ps.h"
46 #include "init.h"
47 #include "debugfs.h"
48 #include "cmd.h"
49 #include "boot.h"
50 #include "testmode.h"
51 #include "scan.h"
52 #include "hw_ops.h"
53
54 #define WL1271_BOOT_RETRIES 3
55
56 #define WL1271_BOOT_RETRIES 3
57
58 static char *fwlog_param;
59 static int bug_on_recovery = -1;
60 static int no_recovery = -1;
61
62 static void __wl1271_op_remove_interface(struct wl1271 *wl,
63 struct ieee80211_vif *vif,
64 bool reset_tx_queues);
65 static void wlcore_op_stop_locked(struct wl1271 *wl);
66 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
67
68 static int wl12xx_set_authorized(struct wl1271 *wl,
69 struct wl12xx_vif *wlvif)
70 {
71 int ret;
72
73 if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
74 return -EINVAL;
75
76 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
77 return 0;
78
79 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
80 return 0;
81
82 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
83 if (ret < 0)
84 return ret;
85
86 wl1271_info("Association completed.");
87 return 0;
88 }
89
90 static void wl1271_reg_notify(struct wiphy *wiphy,
91 struct regulatory_request *request)
92 {
93 struct ieee80211_supported_band *band;
94 struct ieee80211_channel *ch;
95 int i;
96 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
97 struct wl1271 *wl = hw->priv;
98
99 band = wiphy->bands[IEEE80211_BAND_5GHZ];
100 for (i = 0; i < band->n_channels; i++) {
101 ch = &band->channels[i];
102 if (ch->flags & IEEE80211_CHAN_DISABLED)
103 continue;
104
105 if (ch->flags & IEEE80211_CHAN_RADAR)
106 ch->flags |= IEEE80211_CHAN_NO_IBSS |
107 IEEE80211_CHAN_PASSIVE_SCAN;
108
109 }
110
111 wlcore_regdomain_config(wl);
112 }
113
114 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
115 bool enable)
116 {
117 int ret = 0;
118
119 /* we should hold wl->mutex */
120 ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
121 if (ret < 0)
122 goto out;
123
124 if (enable)
125 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
126 else
127 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
128 out:
129 return ret;
130 }
131
132 /*
133 * this function is being called when the rx_streaming interval
134 * has beed changed or rx_streaming should be disabled
135 */
136 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
137 {
138 int ret = 0;
139 int period = wl->conf.rx_streaming.interval;
140
141 /* don't reconfigure if rx_streaming is disabled */
142 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
143 goto out;
144
145 /* reconfigure/disable according to new streaming_period */
146 if (period &&
147 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
148 (wl->conf.rx_streaming.always ||
149 test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
150 ret = wl1271_set_rx_streaming(wl, wlvif, true);
151 else {
152 ret = wl1271_set_rx_streaming(wl, wlvif, false);
153 /* don't cancel_work_sync since we might deadlock */
154 del_timer_sync(&wlvif->rx_streaming_timer);
155 }
156 out:
157 return ret;
158 }
159
160 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
161 {
162 int ret;
163 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
164 rx_streaming_enable_work);
165 struct wl1271 *wl = wlvif->wl;
166
167 mutex_lock(&wl->mutex);
168
169 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
170 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
171 (!wl->conf.rx_streaming.always &&
172 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
173 goto out;
174
175 if (!wl->conf.rx_streaming.interval)
176 goto out;
177
178 ret = wl1271_ps_elp_wakeup(wl);
179 if (ret < 0)
180 goto out;
181
182 ret = wl1271_set_rx_streaming(wl, wlvif, true);
183 if (ret < 0)
184 goto out_sleep;
185
186 /* stop it after some time of inactivity */
187 mod_timer(&wlvif->rx_streaming_timer,
188 jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
189
190 out_sleep:
191 wl1271_ps_elp_sleep(wl);
192 out:
193 mutex_unlock(&wl->mutex);
194 }
195
196 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
197 {
198 int ret;
199 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
200 rx_streaming_disable_work);
201 struct wl1271 *wl = wlvif->wl;
202
203 mutex_lock(&wl->mutex);
204
205 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
206 goto out;
207
208 ret = wl1271_ps_elp_wakeup(wl);
209 if (ret < 0)
210 goto out;
211
212 ret = wl1271_set_rx_streaming(wl, wlvif, false);
213 if (ret)
214 goto out_sleep;
215
216 out_sleep:
217 wl1271_ps_elp_sleep(wl);
218 out:
219 mutex_unlock(&wl->mutex);
220 }
221
222 static void wl1271_rx_streaming_timer(unsigned long data)
223 {
224 struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
225 struct wl1271 *wl = wlvif->wl;
226 ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
227 }
228
229 /* wl->mutex must be taken */
230 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
231 {
232 /* if the watchdog is not armed, don't do anything */
233 if (wl->tx_allocated_blocks == 0)
234 return;
235
236 cancel_delayed_work(&wl->tx_watchdog_work);
237 ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
238 msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
239 }
240
241 static void wl12xx_tx_watchdog_work(struct work_struct *work)
242 {
243 struct delayed_work *dwork;
244 struct wl1271 *wl;
245
246 dwork = container_of(work, struct delayed_work, work);
247 wl = container_of(dwork, struct wl1271, tx_watchdog_work);
248
249 mutex_lock(&wl->mutex);
250
251 if (unlikely(wl->state != WLCORE_STATE_ON))
252 goto out;
253
254 /* Tx went out in the meantime - everything is ok */
255 if (unlikely(wl->tx_allocated_blocks == 0))
256 goto out;
257
258 /*
259 * if a ROC is in progress, we might not have any Tx for a long
260 * time (e.g. pending Tx on the non-ROC channels)
261 */
262 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
263 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
264 wl->conf.tx.tx_watchdog_timeout);
265 wl12xx_rearm_tx_watchdog_locked(wl);
266 goto out;
267 }
268
269 /*
270 * if a scan is in progress, we might not have any Tx for a long
271 * time
272 */
273 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
274 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
275 wl->conf.tx.tx_watchdog_timeout);
276 wl12xx_rearm_tx_watchdog_locked(wl);
277 goto out;
278 }
279
280 /*
281 * AP might cache a frame for a long time for a sleeping station,
282 * so rearm the timer if there's an AP interface with stations. If
283 * Tx is genuinely stuck we will most hopefully discover it when all
284 * stations are removed due to inactivity.
285 */
286 if (wl->active_sta_count) {
287 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
288 " %d stations",
289 wl->conf.tx.tx_watchdog_timeout,
290 wl->active_sta_count);
291 wl12xx_rearm_tx_watchdog_locked(wl);
292 goto out;
293 }
294
295 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
296 wl->conf.tx.tx_watchdog_timeout);
297 wl12xx_queue_recovery_work(wl);
298
299 out:
300 mutex_unlock(&wl->mutex);
301 }
302
303 static void wlcore_adjust_conf(struct wl1271 *wl)
304 {
305 /* Adjust settings according to optional module parameters */
306
307 if (fwlog_param) {
308 if (!strcmp(fwlog_param, "continuous")) {
309 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
310 } else if (!strcmp(fwlog_param, "ondemand")) {
311 wl->conf.fwlog.mode = WL12XX_FWLOG_ON_DEMAND;
312 } else if (!strcmp(fwlog_param, "dbgpins")) {
313 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
314 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
315 } else if (!strcmp(fwlog_param, "disable")) {
316 wl->conf.fwlog.mem_blocks = 0;
317 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
318 } else {
319 wl1271_error("Unknown fwlog parameter %s", fwlog_param);
320 }
321 }
322
323 if (bug_on_recovery != -1)
324 wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
325
326 if (no_recovery != -1)
327 wl->conf.recovery.no_recovery = (u8) no_recovery;
328 }
329
330 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
331 struct wl12xx_vif *wlvif,
332 u8 hlid, u8 tx_pkts)
333 {
334 bool fw_ps;
335
336 fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
337
338 /*
339 * Wake up from high level PS if the STA is asleep with too little
340 * packets in FW or if the STA is awake.
341 */
342 if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
343 wl12xx_ps_link_end(wl, wlvif, hlid);
344
345 /*
346 * Start high-level PS if the STA is asleep with enough blocks in FW.
347 * Make an exception if this is the only connected link. In this
348 * case FW-memory congestion is less of a problem.
349 * Note that a single connected STA means 3 active links, since we must
350 * account for the global and broadcast AP links. The "fw_ps" check
351 * assures us the third link is a STA connected to the AP. Otherwise
352 * the FW would not set the PSM bit.
353 */
354 else if (wl->active_link_count > 3 && fw_ps &&
355 tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
356 wl12xx_ps_link_start(wl, wlvif, hlid, true);
357 }
358
359 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
360 struct wl12xx_vif *wlvif,
361 struct wl_fw_status_2 *status)
362 {
363 u32 cur_fw_ps_map;
364 u8 hlid;
365
366 cur_fw_ps_map = le32_to_cpu(status->link_ps_bitmap);
367 if (wl->ap_fw_ps_map != cur_fw_ps_map) {
368 wl1271_debug(DEBUG_PSM,
369 "link ps prev 0x%x cur 0x%x changed 0x%x",
370 wl->ap_fw_ps_map, cur_fw_ps_map,
371 wl->ap_fw_ps_map ^ cur_fw_ps_map);
372
373 wl->ap_fw_ps_map = cur_fw_ps_map;
374 }
375
376 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, WL12XX_MAX_LINKS)
377 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
378 wl->links[hlid].allocated_pkts);
379 }
380
381 static int wlcore_fw_status(struct wl1271 *wl,
382 struct wl_fw_status_1 *status_1,
383 struct wl_fw_status_2 *status_2)
384 {
385 struct wl12xx_vif *wlvif;
386 struct timespec ts;
387 u32 old_tx_blk_count = wl->tx_blocks_available;
388 int avail, freed_blocks;
389 int i;
390 size_t status_len;
391 int ret;
392 struct wl1271_link *lnk;
393
394 status_len = WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
395 sizeof(*status_2) + wl->fw_status_priv_len;
396
397 ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR, status_1,
398 status_len, false);
399 if (ret < 0)
400 return ret;
401
402 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
403 "drv_rx_counter = %d, tx_results_counter = %d)",
404 status_1->intr,
405 status_1->fw_rx_counter,
406 status_1->drv_rx_counter,
407 status_1->tx_results_counter);
408
409 for (i = 0; i < NUM_TX_QUEUES; i++) {
410 /* prevent wrap-around in freed-packets counter */
411 wl->tx_allocated_pkts[i] -=
412 (status_2->counters.tx_released_pkts[i] -
413 wl->tx_pkts_freed[i]) & 0xff;
414
415 wl->tx_pkts_freed[i] = status_2->counters.tx_released_pkts[i];
416 }
417
418
419 for_each_set_bit(i, wl->links_map, WL12XX_MAX_LINKS) {
420 u8 diff;
421 lnk = &wl->links[i];
422
423 /* prevent wrap-around in freed-packets counter */
424 diff = (status_2->counters.tx_lnk_free_pkts[i] -
425 lnk->prev_freed_pkts) & 0xff;
426
427 if (diff == 0)
428 continue;
429
430 lnk->allocated_pkts -= diff;
431 lnk->prev_freed_pkts = status_2->counters.tx_lnk_free_pkts[i];
432
433 /* accumulate the prev_freed_pkts counter */
434 lnk->total_freed_pkts += diff;
435 }
436
437 /* prevent wrap-around in total blocks counter */
438 if (likely(wl->tx_blocks_freed <=
439 le32_to_cpu(status_2->total_released_blks)))
440 freed_blocks = le32_to_cpu(status_2->total_released_blks) -
441 wl->tx_blocks_freed;
442 else
443 freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
444 le32_to_cpu(status_2->total_released_blks);
445
446 wl->tx_blocks_freed = le32_to_cpu(status_2->total_released_blks);
447
448 wl->tx_allocated_blocks -= freed_blocks;
449
450 /*
451 * If the FW freed some blocks:
452 * If we still have allocated blocks - re-arm the timer, Tx is
453 * not stuck. Otherwise, cancel the timer (no Tx currently).
454 */
455 if (freed_blocks) {
456 if (wl->tx_allocated_blocks)
457 wl12xx_rearm_tx_watchdog_locked(wl);
458 else
459 cancel_delayed_work(&wl->tx_watchdog_work);
460 }
461
462 avail = le32_to_cpu(status_2->tx_total) - wl->tx_allocated_blocks;
463
464 /*
465 * The FW might change the total number of TX memblocks before
466 * we get a notification about blocks being released. Thus, the
467 * available blocks calculation might yield a temporary result
468 * which is lower than the actual available blocks. Keeping in
469 * mind that only blocks that were allocated can be moved from
470 * TX to RX, tx_blocks_available should never decrease here.
471 */
472 wl->tx_blocks_available = max((int)wl->tx_blocks_available,
473 avail);
474
475 /* if more blocks are available now, tx work can be scheduled */
476 if (wl->tx_blocks_available > old_tx_blk_count)
477 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
478
479 /* for AP update num of allocated TX blocks per link and ps status */
480 wl12xx_for_each_wlvif_ap(wl, wlvif) {
481 wl12xx_irq_update_links_status(wl, wlvif, status_2);
482 }
483
484 /* update the host-chipset time offset */
485 getnstimeofday(&ts);
486 wl->time_offset = (timespec_to_ns(&ts) >> 10) -
487 (s64)le32_to_cpu(status_2->fw_localtime);
488
489 wl->fw_fast_lnk_map = le32_to_cpu(status_2->link_fast_bitmap);
490
491 return 0;
492 }
493
494 static void wl1271_flush_deferred_work(struct wl1271 *wl)
495 {
496 struct sk_buff *skb;
497
498 /* Pass all received frames to the network stack */
499 while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
500 ieee80211_rx_ni(wl->hw, skb);
501
502 /* Return sent skbs to the network stack */
503 while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
504 ieee80211_tx_status_ni(wl->hw, skb);
505 }
506
507 static void wl1271_netstack_work(struct work_struct *work)
508 {
509 struct wl1271 *wl =
510 container_of(work, struct wl1271, netstack_work);
511
512 do {
513 wl1271_flush_deferred_work(wl);
514 } while (skb_queue_len(&wl->deferred_rx_queue));
515 }
516
517 #define WL1271_IRQ_MAX_LOOPS 256
518
519 static int wlcore_irq_locked(struct wl1271 *wl)
520 {
521 int ret = 0;
522 u32 intr;
523 int loopcount = WL1271_IRQ_MAX_LOOPS;
524 bool done = false;
525 unsigned int defer_count;
526 unsigned long flags;
527
528 /*
529 * In case edge triggered interrupt must be used, we cannot iterate
530 * more than once without introducing race conditions with the hardirq.
531 */
532 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
533 loopcount = 1;
534
535 wl1271_debug(DEBUG_IRQ, "IRQ work");
536
537 if (unlikely(wl->state != WLCORE_STATE_ON))
538 goto out;
539
540 ret = wl1271_ps_elp_wakeup(wl);
541 if (ret < 0)
542 goto out;
543
544 while (!done && loopcount--) {
545 /*
546 * In order to avoid a race with the hardirq, clear the flag
547 * before acknowledging the chip. Since the mutex is held,
548 * wl1271_ps_elp_wakeup cannot be called concurrently.
549 */
550 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
551 smp_mb__after_clear_bit();
552
553 ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
554 if (ret < 0)
555 goto out;
556
557 wlcore_hw_tx_immediate_compl(wl);
558
559 intr = le32_to_cpu(wl->fw_status_1->intr);
560 intr &= WLCORE_ALL_INTR_MASK;
561 if (!intr) {
562 done = true;
563 continue;
564 }
565
566 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
567 wl1271_error("HW watchdog interrupt received! starting recovery.");
568 wl->watchdog_recovery = true;
569 ret = -EIO;
570
571 /* restarting the chip. ignore any other interrupt. */
572 goto out;
573 }
574
575 if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
576 wl1271_error("SW watchdog interrupt received! "
577 "starting recovery.");
578 wl->watchdog_recovery = true;
579 ret = -EIO;
580
581 /* restarting the chip. ignore any other interrupt. */
582 goto out;
583 }
584
585 if (likely(intr & WL1271_ACX_INTR_DATA)) {
586 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
587
588 ret = wlcore_rx(wl, wl->fw_status_1);
589 if (ret < 0)
590 goto out;
591
592 /* Check if any tx blocks were freed */
593 spin_lock_irqsave(&wl->wl_lock, flags);
594 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
595 wl1271_tx_total_queue_count(wl) > 0) {
596 spin_unlock_irqrestore(&wl->wl_lock, flags);
597 /*
598 * In order to avoid starvation of the TX path,
599 * call the work function directly.
600 */
601 ret = wlcore_tx_work_locked(wl);
602 if (ret < 0)
603 goto out;
604 } else {
605 spin_unlock_irqrestore(&wl->wl_lock, flags);
606 }
607
608 /* check for tx results */
609 ret = wlcore_hw_tx_delayed_compl(wl);
610 if (ret < 0)
611 goto out;
612
613 /* Make sure the deferred queues don't get too long */
614 defer_count = skb_queue_len(&wl->deferred_tx_queue) +
615 skb_queue_len(&wl->deferred_rx_queue);
616 if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
617 wl1271_flush_deferred_work(wl);
618 }
619
620 if (intr & WL1271_ACX_INTR_EVENT_A) {
621 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
622 ret = wl1271_event_handle(wl, 0);
623 if (ret < 0)
624 goto out;
625 }
626
627 if (intr & WL1271_ACX_INTR_EVENT_B) {
628 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
629 ret = wl1271_event_handle(wl, 1);
630 if (ret < 0)
631 goto out;
632 }
633
634 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
635 wl1271_debug(DEBUG_IRQ,
636 "WL1271_ACX_INTR_INIT_COMPLETE");
637
638 if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
639 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
640 }
641
642 wl1271_ps_elp_sleep(wl);
643
644 out:
645 return ret;
646 }
647
648 static irqreturn_t wlcore_irq(int irq, void *cookie)
649 {
650 int ret;
651 unsigned long flags;
652 struct wl1271 *wl = cookie;
653
654 /* complete the ELP completion */
655 spin_lock_irqsave(&wl->wl_lock, flags);
656 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
657 if (wl->elp_compl) {
658 complete(wl->elp_compl);
659 wl->elp_compl = NULL;
660 }
661
662 if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
663 /* don't enqueue a work right now. mark it as pending */
664 set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
665 wl1271_debug(DEBUG_IRQ, "should not enqueue work");
666 disable_irq_nosync(wl->irq);
667 pm_wakeup_event(wl->dev, 0);
668 spin_unlock_irqrestore(&wl->wl_lock, flags);
669 return IRQ_HANDLED;
670 }
671 spin_unlock_irqrestore(&wl->wl_lock, flags);
672
673 /* TX might be handled here, avoid redundant work */
674 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
675 cancel_work_sync(&wl->tx_work);
676
677 mutex_lock(&wl->mutex);
678
679 ret = wlcore_irq_locked(wl);
680 if (ret)
681 wl12xx_queue_recovery_work(wl);
682
683 spin_lock_irqsave(&wl->wl_lock, flags);
684 /* In case TX was not handled here, queue TX work */
685 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
686 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
687 wl1271_tx_total_queue_count(wl) > 0)
688 ieee80211_queue_work(wl->hw, &wl->tx_work);
689 spin_unlock_irqrestore(&wl->wl_lock, flags);
690
691 mutex_unlock(&wl->mutex);
692
693 return IRQ_HANDLED;
694 }
695
696 struct vif_counter_data {
697 u8 counter;
698
699 struct ieee80211_vif *cur_vif;
700 bool cur_vif_running;
701 };
702
703 static void wl12xx_vif_count_iter(void *data, u8 *mac,
704 struct ieee80211_vif *vif)
705 {
706 struct vif_counter_data *counter = data;
707
708 counter->counter++;
709 if (counter->cur_vif == vif)
710 counter->cur_vif_running = true;
711 }
712
713 /* caller must not hold wl->mutex, as it might deadlock */
714 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
715 struct ieee80211_vif *cur_vif,
716 struct vif_counter_data *data)
717 {
718 memset(data, 0, sizeof(*data));
719 data->cur_vif = cur_vif;
720
721 ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
722 wl12xx_vif_count_iter, data);
723 }
724
725 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
726 {
727 const struct firmware *fw;
728 const char *fw_name;
729 enum wl12xx_fw_type fw_type;
730 int ret;
731
732 if (plt) {
733 fw_type = WL12XX_FW_TYPE_PLT;
734 fw_name = wl->plt_fw_name;
735 } else {
736 /*
737 * we can't call wl12xx_get_vif_count() here because
738 * wl->mutex is taken, so use the cached last_vif_count value
739 */
740 if (wl->last_vif_count > 1 && wl->mr_fw_name) {
741 fw_type = WL12XX_FW_TYPE_MULTI;
742 fw_name = wl->mr_fw_name;
743 } else {
744 fw_type = WL12XX_FW_TYPE_NORMAL;
745 fw_name = wl->sr_fw_name;
746 }
747 }
748
749 if (wl->fw_type == fw_type)
750 return 0;
751
752 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
753
754 ret = request_firmware(&fw, fw_name, wl->dev);
755
756 if (ret < 0) {
757 wl1271_error("could not get firmware %s: %d", fw_name, ret);
758 return ret;
759 }
760
761 if (fw->size % 4) {
762 wl1271_error("firmware size is not multiple of 32 bits: %zu",
763 fw->size);
764 ret = -EILSEQ;
765 goto out;
766 }
767
768 vfree(wl->fw);
769 wl->fw_type = WL12XX_FW_TYPE_NONE;
770 wl->fw_len = fw->size;
771 wl->fw = vmalloc(wl->fw_len);
772
773 if (!wl->fw) {
774 wl1271_error("could not allocate memory for the firmware");
775 ret = -ENOMEM;
776 goto out;
777 }
778
779 memcpy(wl->fw, fw->data, wl->fw_len);
780 ret = 0;
781 wl->fw_type = fw_type;
782 out:
783 release_firmware(fw);
784
785 return ret;
786 }
787
788 void wl12xx_queue_recovery_work(struct wl1271 *wl)
789 {
790 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
791
792 /* Avoid a recursive recovery */
793 if (wl->state == WLCORE_STATE_ON) {
794 wl->state = WLCORE_STATE_RESTARTING;
795 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
796 wlcore_disable_interrupts_nosync(wl);
797 ieee80211_queue_work(wl->hw, &wl->recovery_work);
798 }
799 }
800
801 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
802 {
803 size_t len = 0;
804
805 /* The FW log is a length-value list, find where the log end */
806 while (len < maxlen) {
807 if (memblock[len] == 0)
808 break;
809 if (len + memblock[len] + 1 > maxlen)
810 break;
811 len += memblock[len] + 1;
812 }
813
814 /* Make sure we have enough room */
815 len = min(len, (size_t)(PAGE_SIZE - wl->fwlog_size));
816
817 /* Fill the FW log file, consumed by the sysfs fwlog entry */
818 memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
819 wl->fwlog_size += len;
820
821 return len;
822 }
823
824 #define WLCORE_FW_LOG_END 0x2000000
825
826 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
827 {
828 u32 addr;
829 u32 offset;
830 u32 end_of_log;
831 u8 *block;
832 int ret;
833
834 if ((wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) ||
835 (wl->conf.fwlog.mem_blocks == 0))
836 return;
837
838 wl1271_info("Reading FW panic log");
839
840 block = kmalloc(WL12XX_HW_BLOCK_SIZE, GFP_KERNEL);
841 if (!block)
842 return;
843
844 /*
845 * Make sure the chip is awake and the logger isn't active.
846 * Do not send a stop fwlog command if the fw is hanged or if
847 * dbgpins are used (due to some fw bug).
848 */
849 if (wl1271_ps_elp_wakeup(wl))
850 goto out;
851 if (!wl->watchdog_recovery &&
852 wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
853 wl12xx_cmd_stop_fwlog(wl);
854
855 /* Read the first memory block address */
856 ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
857 if (ret < 0)
858 goto out;
859
860 addr = le32_to_cpu(wl->fw_status_2->log_start_addr);
861 if (!addr)
862 goto out;
863
864 if (wl->conf.fwlog.mode == WL12XX_FWLOG_CONTINUOUS) {
865 offset = sizeof(addr) + sizeof(struct wl1271_rx_descriptor);
866 end_of_log = WLCORE_FW_LOG_END;
867 } else {
868 offset = sizeof(addr);
869 end_of_log = addr;
870 }
871
872 /* Traverse the memory blocks linked list */
873 do {
874 memset(block, 0, WL12XX_HW_BLOCK_SIZE);
875 ret = wlcore_read_hwaddr(wl, addr, block, WL12XX_HW_BLOCK_SIZE,
876 false);
877 if (ret < 0)
878 goto out;
879
880 /*
881 * Memory blocks are linked to one another. The first 4 bytes
882 * of each memory block hold the hardware address of the next
883 * one. The last memory block points to the first one in
884 * on demand mode and is equal to 0x2000000 in continuous mode.
885 */
886 addr = le32_to_cpup((__le32 *)block);
887 if (!wl12xx_copy_fwlog(wl, block + offset,
888 WL12XX_HW_BLOCK_SIZE - offset))
889 break;
890 } while (addr && (addr != end_of_log));
891
892 wake_up_interruptible(&wl->fwlog_waitq);
893
894 out:
895 kfree(block);
896 }
897
898 static void wlcore_print_recovery(struct wl1271 *wl)
899 {
900 u32 pc = 0;
901 u32 hint_sts = 0;
902 int ret;
903
904 wl1271_info("Hardware recovery in progress. FW ver: %s",
905 wl->chip.fw_ver_str);
906
907 /* change partitions momentarily so we can read the FW pc */
908 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
909 if (ret < 0)
910 return;
911
912 ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
913 if (ret < 0)
914 return;
915
916 ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
917 if (ret < 0)
918 return;
919
920 wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
921 pc, hint_sts, ++wl->recovery_count);
922
923 wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
924 }
925
926
927 static void wl1271_recovery_work(struct work_struct *work)
928 {
929 struct wl1271 *wl =
930 container_of(work, struct wl1271, recovery_work);
931 struct wl12xx_vif *wlvif;
932 struct ieee80211_vif *vif;
933
934 mutex_lock(&wl->mutex);
935
936 if (wl->state == WLCORE_STATE_OFF || wl->plt)
937 goto out_unlock;
938
939 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
940 wl12xx_read_fwlog_panic(wl);
941 wlcore_print_recovery(wl);
942 }
943
944 BUG_ON(wl->conf.recovery.bug_on_recovery &&
945 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
946
947 if (wl->conf.recovery.no_recovery) {
948 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
949 goto out_unlock;
950 }
951
952 /* Prevent spurious TX during FW restart */
953 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
954
955 /* reboot the chipset */
956 while (!list_empty(&wl->wlvif_list)) {
957 wlvif = list_first_entry(&wl->wlvif_list,
958 struct wl12xx_vif, list);
959 vif = wl12xx_wlvif_to_vif(wlvif);
960 __wl1271_op_remove_interface(wl, vif, false);
961 }
962
963 wlcore_op_stop_locked(wl);
964
965 ieee80211_restart_hw(wl->hw);
966
967 /*
968 * Its safe to enable TX now - the queues are stopped after a request
969 * to restart the HW.
970 */
971 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
972
973 out_unlock:
974 wl->watchdog_recovery = false;
975 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
976 mutex_unlock(&wl->mutex);
977 }
978
979 static int wlcore_fw_wakeup(struct wl1271 *wl)
980 {
981 return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
982 }
983
984 static int wl1271_setup(struct wl1271 *wl)
985 {
986 wl->fw_status_1 = kmalloc(WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
987 sizeof(*wl->fw_status_2) +
988 wl->fw_status_priv_len, GFP_KERNEL);
989 if (!wl->fw_status_1)
990 return -ENOMEM;
991
992 wl->fw_status_2 = (struct wl_fw_status_2 *)
993 (((u8 *) wl->fw_status_1) +
994 WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc));
995
996 wl->tx_res_if = kmalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
997 if (!wl->tx_res_if) {
998 kfree(wl->fw_status_1);
999 return -ENOMEM;
1000 }
1001
1002 return 0;
1003 }
1004
1005 static int wl12xx_set_power_on(struct wl1271 *wl)
1006 {
1007 int ret;
1008
1009 msleep(WL1271_PRE_POWER_ON_SLEEP);
1010 ret = wl1271_power_on(wl);
1011 if (ret < 0)
1012 goto out;
1013 msleep(WL1271_POWER_ON_SLEEP);
1014 wl1271_io_reset(wl);
1015 wl1271_io_init(wl);
1016
1017 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1018 if (ret < 0)
1019 goto fail;
1020
1021 /* ELP module wake up */
1022 ret = wlcore_fw_wakeup(wl);
1023 if (ret < 0)
1024 goto fail;
1025
1026 out:
1027 return ret;
1028
1029 fail:
1030 wl1271_power_off(wl);
1031 return ret;
1032 }
1033
1034 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1035 {
1036 int ret = 0;
1037
1038 ret = wl12xx_set_power_on(wl);
1039 if (ret < 0)
1040 goto out;
1041
1042 /*
1043 * For wl127x based devices we could use the default block
1044 * size (512 bytes), but due to a bug in the sdio driver, we
1045 * need to set it explicitly after the chip is powered on. To
1046 * simplify the code and since the performance impact is
1047 * negligible, we use the same block size for all different
1048 * chip types.
1049 *
1050 * Check if the bus supports blocksize alignment and, if it
1051 * doesn't, make sure we don't have the quirk.
1052 */
1053 if (!wl1271_set_block_size(wl))
1054 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1055
1056 /* TODO: make sure the lower driver has set things up correctly */
1057
1058 ret = wl1271_setup(wl);
1059 if (ret < 0)
1060 goto out;
1061
1062 ret = wl12xx_fetch_firmware(wl, plt);
1063 if (ret < 0)
1064 goto out;
1065
1066 out:
1067 return ret;
1068 }
1069
1070 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1071 {
1072 int retries = WL1271_BOOT_RETRIES;
1073 struct wiphy *wiphy = wl->hw->wiphy;
1074
1075 static const char* const PLT_MODE[] = {
1076 "PLT_OFF",
1077 "PLT_ON",
1078 "PLT_FEM_DETECT"
1079 };
1080
1081 int ret;
1082
1083 mutex_lock(&wl->mutex);
1084
1085 wl1271_notice("power up");
1086
1087 if (wl->state != WLCORE_STATE_OFF) {
1088 wl1271_error("cannot go into PLT state because not "
1089 "in off state: %d", wl->state);
1090 ret = -EBUSY;
1091 goto out;
1092 }
1093
1094 /* Indicate to lower levels that we are now in PLT mode */
1095 wl->plt = true;
1096 wl->plt_mode = plt_mode;
1097
1098 while (retries) {
1099 retries--;
1100 ret = wl12xx_chip_wakeup(wl, true);
1101 if (ret < 0)
1102 goto power_off;
1103
1104 ret = wl->ops->plt_init(wl);
1105 if (ret < 0)
1106 goto power_off;
1107
1108 wl->state = WLCORE_STATE_ON;
1109 wl1271_notice("firmware booted in PLT mode %s (%s)",
1110 PLT_MODE[plt_mode],
1111 wl->chip.fw_ver_str);
1112
1113 /* update hw/fw version info in wiphy struct */
1114 wiphy->hw_version = wl->chip.id;
1115 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1116 sizeof(wiphy->fw_version));
1117
1118 goto out;
1119
1120 power_off:
1121 wl1271_power_off(wl);
1122 }
1123
1124 wl->plt = false;
1125 wl->plt_mode = PLT_OFF;
1126
1127 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1128 WL1271_BOOT_RETRIES);
1129 out:
1130 mutex_unlock(&wl->mutex);
1131
1132 return ret;
1133 }
1134
1135 int wl1271_plt_stop(struct wl1271 *wl)
1136 {
1137 int ret = 0;
1138
1139 wl1271_notice("power down");
1140
1141 /*
1142 * Interrupts must be disabled before setting the state to OFF.
1143 * Otherwise, the interrupt handler might be called and exit without
1144 * reading the interrupt status.
1145 */
1146 wlcore_disable_interrupts(wl);
1147 mutex_lock(&wl->mutex);
1148 if (!wl->plt) {
1149 mutex_unlock(&wl->mutex);
1150
1151 /*
1152 * This will not necessarily enable interrupts as interrupts
1153 * may have been disabled when op_stop was called. It will,
1154 * however, balance the above call to disable_interrupts().
1155 */
1156 wlcore_enable_interrupts(wl);
1157
1158 wl1271_error("cannot power down because not in PLT "
1159 "state: %d", wl->state);
1160 ret = -EBUSY;
1161 goto out;
1162 }
1163
1164 mutex_unlock(&wl->mutex);
1165
1166 wl1271_flush_deferred_work(wl);
1167 cancel_work_sync(&wl->netstack_work);
1168 cancel_work_sync(&wl->recovery_work);
1169 cancel_delayed_work_sync(&wl->elp_work);
1170 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1171
1172 mutex_lock(&wl->mutex);
1173 wl1271_power_off(wl);
1174 wl->flags = 0;
1175 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1176 wl->state = WLCORE_STATE_OFF;
1177 wl->plt = false;
1178 wl->plt_mode = PLT_OFF;
1179 wl->rx_counter = 0;
1180 mutex_unlock(&wl->mutex);
1181
1182 out:
1183 return ret;
1184 }
1185
1186 static void wl1271_op_tx(struct ieee80211_hw *hw,
1187 struct ieee80211_tx_control *control,
1188 struct sk_buff *skb)
1189 {
1190 struct wl1271 *wl = hw->priv;
1191 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1192 struct ieee80211_vif *vif = info->control.vif;
1193 struct wl12xx_vif *wlvif = NULL;
1194 unsigned long flags;
1195 int q, mapping;
1196 u8 hlid;
1197
1198 if (!vif) {
1199 wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1200 ieee80211_free_txskb(hw, skb);
1201 return;
1202 }
1203
1204 wlvif = wl12xx_vif_to_data(vif);
1205 mapping = skb_get_queue_mapping(skb);
1206 q = wl1271_tx_get_queue(mapping);
1207
1208 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1209
1210 spin_lock_irqsave(&wl->wl_lock, flags);
1211
1212 /*
1213 * drop the packet if the link is invalid or the queue is stopped
1214 * for any reason but watermark. Watermark is a "soft"-stop so we
1215 * allow these packets through.
1216 */
1217 if (hlid == WL12XX_INVALID_LINK_ID ||
1218 (!test_bit(hlid, wlvif->links_map)) ||
1219 (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1220 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1221 WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1222 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1223 ieee80211_free_txskb(hw, skb);
1224 goto out;
1225 }
1226
1227 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1228 hlid, q, skb->len);
1229 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1230
1231 wl->tx_queue_count[q]++;
1232 wlvif->tx_queue_count[q]++;
1233
1234 /*
1235 * The workqueue is slow to process the tx_queue and we need stop
1236 * the queue here, otherwise the queue will get too long.
1237 */
1238 if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1239 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1240 WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1241 wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1242 wlcore_stop_queue_locked(wl, wlvif, q,
1243 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1244 }
1245
1246 /*
1247 * The chip specific setup must run before the first TX packet -
1248 * before that, the tx_work will not be initialized!
1249 */
1250
1251 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1252 !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1253 ieee80211_queue_work(wl->hw, &wl->tx_work);
1254
1255 out:
1256 spin_unlock_irqrestore(&wl->wl_lock, flags);
1257 }
1258
1259 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1260 {
1261 unsigned long flags;
1262 int q;
1263
1264 /* no need to queue a new dummy packet if one is already pending */
1265 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1266 return 0;
1267
1268 q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1269
1270 spin_lock_irqsave(&wl->wl_lock, flags);
1271 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1272 wl->tx_queue_count[q]++;
1273 spin_unlock_irqrestore(&wl->wl_lock, flags);
1274
1275 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1276 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1277 return wlcore_tx_work_locked(wl);
1278
1279 /*
1280 * If the FW TX is busy, TX work will be scheduled by the threaded
1281 * interrupt handler function
1282 */
1283 return 0;
1284 }
1285
1286 /*
1287 * The size of the dummy packet should be at least 1400 bytes. However, in
1288 * order to minimize the number of bus transactions, aligning it to 512 bytes
1289 * boundaries could be beneficial, performance wise
1290 */
1291 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1292
1293 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1294 {
1295 struct sk_buff *skb;
1296 struct ieee80211_hdr_3addr *hdr;
1297 unsigned int dummy_packet_size;
1298
1299 dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1300 sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1301
1302 skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1303 if (!skb) {
1304 wl1271_warning("Failed to allocate a dummy packet skb");
1305 return NULL;
1306 }
1307
1308 skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1309
1310 hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
1311 memset(hdr, 0, sizeof(*hdr));
1312 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1313 IEEE80211_STYPE_NULLFUNC |
1314 IEEE80211_FCTL_TODS);
1315
1316 memset(skb_put(skb, dummy_packet_size), 0, dummy_packet_size);
1317
1318 /* Dummy packets require the TID to be management */
1319 skb->priority = WL1271_TID_MGMT;
1320
1321 /* Initialize all fields that might be used */
1322 skb_set_queue_mapping(skb, 0);
1323 memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1324
1325 return skb;
1326 }
1327
1328
1329 #ifdef CONFIG_PM
1330 static int
1331 wl1271_validate_wowlan_pattern(struct cfg80211_wowlan_trig_pkt_pattern *p)
1332 {
1333 int num_fields = 0, in_field = 0, fields_size = 0;
1334 int i, pattern_len = 0;
1335
1336 if (!p->mask) {
1337 wl1271_warning("No mask in WoWLAN pattern");
1338 return -EINVAL;
1339 }
1340
1341 /*
1342 * The pattern is broken up into segments of bytes at different offsets
1343 * that need to be checked by the FW filter. Each segment is called
1344 * a field in the FW API. We verify that the total number of fields
1345 * required for this pattern won't exceed FW limits (8)
1346 * as well as the total fields buffer won't exceed the FW limit.
1347 * Note that if there's a pattern which crosses Ethernet/IP header
1348 * boundary a new field is required.
1349 */
1350 for (i = 0; i < p->pattern_len; i++) {
1351 if (test_bit(i, (unsigned long *)p->mask)) {
1352 if (!in_field) {
1353 in_field = 1;
1354 pattern_len = 1;
1355 } else {
1356 if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1357 num_fields++;
1358 fields_size += pattern_len +
1359 RX_FILTER_FIELD_OVERHEAD;
1360 pattern_len = 1;
1361 } else
1362 pattern_len++;
1363 }
1364 } else {
1365 if (in_field) {
1366 in_field = 0;
1367 fields_size += pattern_len +
1368 RX_FILTER_FIELD_OVERHEAD;
1369 num_fields++;
1370 }
1371 }
1372 }
1373
1374 if (in_field) {
1375 fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1376 num_fields++;
1377 }
1378
1379 if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1380 wl1271_warning("RX Filter too complex. Too many segments");
1381 return -EINVAL;
1382 }
1383
1384 if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1385 wl1271_warning("RX filter pattern is too big");
1386 return -E2BIG;
1387 }
1388
1389 return 0;
1390 }
1391
1392 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1393 {
1394 return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1395 }
1396
1397 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1398 {
1399 int i;
1400
1401 if (filter == NULL)
1402 return;
1403
1404 for (i = 0; i < filter->num_fields; i++)
1405 kfree(filter->fields[i].pattern);
1406
1407 kfree(filter);
1408 }
1409
1410 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1411 u16 offset, u8 flags,
1412 u8 *pattern, u8 len)
1413 {
1414 struct wl12xx_rx_filter_field *field;
1415
1416 if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1417 wl1271_warning("Max fields per RX filter. can't alloc another");
1418 return -EINVAL;
1419 }
1420
1421 field = &filter->fields[filter->num_fields];
1422
1423 field->pattern = kzalloc(len, GFP_KERNEL);
1424 if (!field->pattern) {
1425 wl1271_warning("Failed to allocate RX filter pattern");
1426 return -ENOMEM;
1427 }
1428
1429 filter->num_fields++;
1430
1431 field->offset = cpu_to_le16(offset);
1432 field->flags = flags;
1433 field->len = len;
1434 memcpy(field->pattern, pattern, len);
1435
1436 return 0;
1437 }
1438
1439 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1440 {
1441 int i, fields_size = 0;
1442
1443 for (i = 0; i < filter->num_fields; i++)
1444 fields_size += filter->fields[i].len +
1445 sizeof(struct wl12xx_rx_filter_field) -
1446 sizeof(u8 *);
1447
1448 return fields_size;
1449 }
1450
1451 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1452 u8 *buf)
1453 {
1454 int i;
1455 struct wl12xx_rx_filter_field *field;
1456
1457 for (i = 0; i < filter->num_fields; i++) {
1458 field = (struct wl12xx_rx_filter_field *)buf;
1459
1460 field->offset = filter->fields[i].offset;
1461 field->flags = filter->fields[i].flags;
1462 field->len = filter->fields[i].len;
1463
1464 memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1465 buf += sizeof(struct wl12xx_rx_filter_field) -
1466 sizeof(u8 *) + field->len;
1467 }
1468 }
1469
1470 /*
1471 * Allocates an RX filter returned through f
1472 * which needs to be freed using rx_filter_free()
1473 */
1474 static int wl1271_convert_wowlan_pattern_to_rx_filter(
1475 struct cfg80211_wowlan_trig_pkt_pattern *p,
1476 struct wl12xx_rx_filter **f)
1477 {
1478 int i, j, ret = 0;
1479 struct wl12xx_rx_filter *filter;
1480 u16 offset;
1481 u8 flags, len;
1482
1483 filter = wl1271_rx_filter_alloc();
1484 if (!filter) {
1485 wl1271_warning("Failed to alloc rx filter");
1486 ret = -ENOMEM;
1487 goto err;
1488 }
1489
1490 i = 0;
1491 while (i < p->pattern_len) {
1492 if (!test_bit(i, (unsigned long *)p->mask)) {
1493 i++;
1494 continue;
1495 }
1496
1497 for (j = i; j < p->pattern_len; j++) {
1498 if (!test_bit(j, (unsigned long *)p->mask))
1499 break;
1500
1501 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1502 j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1503 break;
1504 }
1505
1506 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1507 offset = i;
1508 flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1509 } else {
1510 offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1511 flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1512 }
1513
1514 len = j - i;
1515
1516 ret = wl1271_rx_filter_alloc_field(filter,
1517 offset,
1518 flags,
1519 &p->pattern[i], len);
1520 if (ret)
1521 goto err;
1522
1523 i = j;
1524 }
1525
1526 filter->action = FILTER_SIGNAL;
1527
1528 *f = filter;
1529 return 0;
1530
1531 err:
1532 wl1271_rx_filter_free(filter);
1533 *f = NULL;
1534
1535 return ret;
1536 }
1537
1538 static int wl1271_configure_wowlan(struct wl1271 *wl,
1539 struct cfg80211_wowlan *wow)
1540 {
1541 int i, ret;
1542
1543 if (!wow || wow->any || !wow->n_patterns) {
1544 ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1545 FILTER_SIGNAL);
1546 if (ret)
1547 goto out;
1548
1549 ret = wl1271_rx_filter_clear_all(wl);
1550 if (ret)
1551 goto out;
1552
1553 return 0;
1554 }
1555
1556 if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1557 return -EINVAL;
1558
1559 /* Validate all incoming patterns before clearing current FW state */
1560 for (i = 0; i < wow->n_patterns; i++) {
1561 ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1562 if (ret) {
1563 wl1271_warning("Bad wowlan pattern %d", i);
1564 return ret;
1565 }
1566 }
1567
1568 ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1569 if (ret)
1570 goto out;
1571
1572 ret = wl1271_rx_filter_clear_all(wl);
1573 if (ret)
1574 goto out;
1575
1576 /* Translate WoWLAN patterns into filters */
1577 for (i = 0; i < wow->n_patterns; i++) {
1578 struct cfg80211_wowlan_trig_pkt_pattern *p;
1579 struct wl12xx_rx_filter *filter = NULL;
1580
1581 p = &wow->patterns[i];
1582
1583 ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1584 if (ret) {
1585 wl1271_warning("Failed to create an RX filter from "
1586 "wowlan pattern %d", i);
1587 goto out;
1588 }
1589
1590 ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1591
1592 wl1271_rx_filter_free(filter);
1593 if (ret)
1594 goto out;
1595 }
1596
1597 ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1598
1599 out:
1600 return ret;
1601 }
1602
1603 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1604 struct wl12xx_vif *wlvif,
1605 struct cfg80211_wowlan *wow)
1606 {
1607 int ret = 0;
1608
1609 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1610 goto out;
1611
1612 ret = wl1271_ps_elp_wakeup(wl);
1613 if (ret < 0)
1614 goto out;
1615
1616 ret = wl1271_configure_wowlan(wl, wow);
1617 if (ret < 0)
1618 goto out_sleep;
1619
1620 if ((wl->conf.conn.suspend_wake_up_event ==
1621 wl->conf.conn.wake_up_event) &&
1622 (wl->conf.conn.suspend_listen_interval ==
1623 wl->conf.conn.listen_interval))
1624 goto out_sleep;
1625
1626 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1627 wl->conf.conn.suspend_wake_up_event,
1628 wl->conf.conn.suspend_listen_interval);
1629
1630 if (ret < 0)
1631 wl1271_error("suspend: set wake up conditions failed: %d", ret);
1632
1633 out_sleep:
1634 wl1271_ps_elp_sleep(wl);
1635 out:
1636 return ret;
1637
1638 }
1639
1640 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1641 struct wl12xx_vif *wlvif)
1642 {
1643 int ret = 0;
1644
1645 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1646 goto out;
1647
1648 ret = wl1271_ps_elp_wakeup(wl);
1649 if (ret < 0)
1650 goto out;
1651
1652 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1653
1654 wl1271_ps_elp_sleep(wl);
1655 out:
1656 return ret;
1657
1658 }
1659
1660 static int wl1271_configure_suspend(struct wl1271 *wl,
1661 struct wl12xx_vif *wlvif,
1662 struct cfg80211_wowlan *wow)
1663 {
1664 if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1665 return wl1271_configure_suspend_sta(wl, wlvif, wow);
1666 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1667 return wl1271_configure_suspend_ap(wl, wlvif);
1668 return 0;
1669 }
1670
1671 static void wl1271_configure_resume(struct wl1271 *wl,
1672 struct wl12xx_vif *wlvif)
1673 {
1674 int ret = 0;
1675 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1676 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1677
1678 if ((!is_ap) && (!is_sta))
1679 return;
1680
1681 if (is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1682 return;
1683
1684 ret = wl1271_ps_elp_wakeup(wl);
1685 if (ret < 0)
1686 return;
1687
1688 if (is_sta) {
1689 wl1271_configure_wowlan(wl, NULL);
1690
1691 if ((wl->conf.conn.suspend_wake_up_event ==
1692 wl->conf.conn.wake_up_event) &&
1693 (wl->conf.conn.suspend_listen_interval ==
1694 wl->conf.conn.listen_interval))
1695 goto out_sleep;
1696
1697 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1698 wl->conf.conn.wake_up_event,
1699 wl->conf.conn.listen_interval);
1700
1701 if (ret < 0)
1702 wl1271_error("resume: wake up conditions failed: %d",
1703 ret);
1704
1705 } else if (is_ap) {
1706 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1707 }
1708
1709 out_sleep:
1710 wl1271_ps_elp_sleep(wl);
1711 }
1712
1713 static int wl1271_op_suspend(struct ieee80211_hw *hw,
1714 struct cfg80211_wowlan *wow)
1715 {
1716 struct wl1271 *wl = hw->priv;
1717 struct wl12xx_vif *wlvif;
1718 int ret;
1719
1720 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1721 WARN_ON(!wow);
1722
1723 /* we want to perform the recovery before suspending */
1724 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1725 wl1271_warning("postponing suspend to perform recovery");
1726 return -EBUSY;
1727 }
1728
1729 wl1271_tx_flush(wl);
1730
1731 mutex_lock(&wl->mutex);
1732 wl->wow_enabled = true;
1733 wl12xx_for_each_wlvif(wl, wlvif) {
1734 ret = wl1271_configure_suspend(wl, wlvif, wow);
1735 if (ret < 0) {
1736 mutex_unlock(&wl->mutex);
1737 wl1271_warning("couldn't prepare device to suspend");
1738 return ret;
1739 }
1740 }
1741 mutex_unlock(&wl->mutex);
1742 /* flush any remaining work */
1743 wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1744
1745 /*
1746 * disable and re-enable interrupts in order to flush
1747 * the threaded_irq
1748 */
1749 wlcore_disable_interrupts(wl);
1750
1751 /*
1752 * set suspended flag to avoid triggering a new threaded_irq
1753 * work. no need for spinlock as interrupts are disabled.
1754 */
1755 set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1756
1757 wlcore_enable_interrupts(wl);
1758 flush_work(&wl->tx_work);
1759 flush_delayed_work(&wl->elp_work);
1760
1761 return 0;
1762 }
1763
1764 static int wl1271_op_resume(struct ieee80211_hw *hw)
1765 {
1766 struct wl1271 *wl = hw->priv;
1767 struct wl12xx_vif *wlvif;
1768 unsigned long flags;
1769 bool run_irq_work = false, pending_recovery;
1770 int ret;
1771
1772 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1773 wl->wow_enabled);
1774 WARN_ON(!wl->wow_enabled);
1775
1776 /*
1777 * re-enable irq_work enqueuing, and call irq_work directly if
1778 * there is a pending work.
1779 */
1780 spin_lock_irqsave(&wl->wl_lock, flags);
1781 clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1782 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1783 run_irq_work = true;
1784 spin_unlock_irqrestore(&wl->wl_lock, flags);
1785
1786 mutex_lock(&wl->mutex);
1787
1788 /* test the recovery flag before calling any SDIO functions */
1789 pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1790 &wl->flags);
1791
1792 if (run_irq_work) {
1793 wl1271_debug(DEBUG_MAC80211,
1794 "run postponed irq_work directly");
1795
1796 /* don't talk to the HW if recovery is pending */
1797 if (!pending_recovery) {
1798 ret = wlcore_irq_locked(wl);
1799 if (ret)
1800 wl12xx_queue_recovery_work(wl);
1801 }
1802
1803 wlcore_enable_interrupts(wl);
1804 }
1805
1806 if (pending_recovery) {
1807 wl1271_warning("queuing forgotten recovery on resume");
1808 ieee80211_queue_work(wl->hw, &wl->recovery_work);
1809 goto out;
1810 }
1811
1812 wl12xx_for_each_wlvif(wl, wlvif) {
1813 wl1271_configure_resume(wl, wlvif);
1814 }
1815
1816 out:
1817 wl->wow_enabled = false;
1818 mutex_unlock(&wl->mutex);
1819
1820 return 0;
1821 }
1822 #endif
1823
1824 static int wl1271_op_start(struct ieee80211_hw *hw)
1825 {
1826 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1827
1828 /*
1829 * We have to delay the booting of the hardware because
1830 * we need to know the local MAC address before downloading and
1831 * initializing the firmware. The MAC address cannot be changed
1832 * after boot, and without the proper MAC address, the firmware
1833 * will not function properly.
1834 *
1835 * The MAC address is first known when the corresponding interface
1836 * is added. That is where we will initialize the hardware.
1837 */
1838
1839 return 0;
1840 }
1841
1842 static void wlcore_op_stop_locked(struct wl1271 *wl)
1843 {
1844 int i;
1845
1846 if (wl->state == WLCORE_STATE_OFF) {
1847 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1848 &wl->flags))
1849 wlcore_enable_interrupts(wl);
1850
1851 return;
1852 }
1853
1854 /*
1855 * this must be before the cancel_work calls below, so that the work
1856 * functions don't perform further work.
1857 */
1858 wl->state = WLCORE_STATE_OFF;
1859
1860 /*
1861 * Use the nosync variant to disable interrupts, so the mutex could be
1862 * held while doing so without deadlocking.
1863 */
1864 wlcore_disable_interrupts_nosync(wl);
1865
1866 mutex_unlock(&wl->mutex);
1867
1868 wlcore_synchronize_interrupts(wl);
1869 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1870 cancel_work_sync(&wl->recovery_work);
1871 wl1271_flush_deferred_work(wl);
1872 cancel_delayed_work_sync(&wl->scan_complete_work);
1873 cancel_work_sync(&wl->netstack_work);
1874 cancel_work_sync(&wl->tx_work);
1875 cancel_delayed_work_sync(&wl->elp_work);
1876 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1877
1878 /* let's notify MAC80211 about the remaining pending TX frames */
1879 mutex_lock(&wl->mutex);
1880 wl12xx_tx_reset(wl);
1881
1882 wl1271_power_off(wl);
1883 /*
1884 * In case a recovery was scheduled, interrupts were disabled to avoid
1885 * an interrupt storm. Now that the power is down, it is safe to
1886 * re-enable interrupts to balance the disable depth
1887 */
1888 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1889 wlcore_enable_interrupts(wl);
1890
1891 wl->band = IEEE80211_BAND_2GHZ;
1892
1893 wl->rx_counter = 0;
1894 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1895 wl->channel_type = NL80211_CHAN_NO_HT;
1896 wl->tx_blocks_available = 0;
1897 wl->tx_allocated_blocks = 0;
1898 wl->tx_results_count = 0;
1899 wl->tx_packets_count = 0;
1900 wl->time_offset = 0;
1901 wl->ap_fw_ps_map = 0;
1902 wl->ap_ps_map = 0;
1903 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1904 memset(wl->roles_map, 0, sizeof(wl->roles_map));
1905 memset(wl->links_map, 0, sizeof(wl->links_map));
1906 memset(wl->roc_map, 0, sizeof(wl->roc_map));
1907 memset(wl->session_ids, 0, sizeof(wl->session_ids));
1908 wl->active_sta_count = 0;
1909 wl->active_link_count = 0;
1910
1911 /* The system link is always allocated */
1912 wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
1913 wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
1914 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1915
1916 /*
1917 * this is performed after the cancel_work calls and the associated
1918 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1919 * get executed before all these vars have been reset.
1920 */
1921 wl->flags = 0;
1922
1923 wl->tx_blocks_freed = 0;
1924
1925 for (i = 0; i < NUM_TX_QUEUES; i++) {
1926 wl->tx_pkts_freed[i] = 0;
1927 wl->tx_allocated_pkts[i] = 0;
1928 }
1929
1930 wl1271_debugfs_reset(wl);
1931
1932 kfree(wl->fw_status_1);
1933 wl->fw_status_1 = NULL;
1934 wl->fw_status_2 = NULL;
1935 kfree(wl->tx_res_if);
1936 wl->tx_res_if = NULL;
1937 kfree(wl->target_mem_map);
1938 wl->target_mem_map = NULL;
1939
1940 /*
1941 * FW channels must be re-calibrated after recovery,
1942 * clear the last Reg-Domain channel configuration.
1943 */
1944 memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
1945 }
1946
1947 static void wlcore_op_stop(struct ieee80211_hw *hw)
1948 {
1949 struct wl1271 *wl = hw->priv;
1950
1951 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
1952
1953 mutex_lock(&wl->mutex);
1954
1955 wlcore_op_stop_locked(wl);
1956
1957 mutex_unlock(&wl->mutex);
1958 }
1959
1960 static void wlcore_channel_switch_work(struct work_struct *work)
1961 {
1962 struct delayed_work *dwork;
1963 struct wl1271 *wl;
1964 struct ieee80211_vif *vif;
1965 struct wl12xx_vif *wlvif;
1966 int ret;
1967
1968 dwork = container_of(work, struct delayed_work, work);
1969 wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
1970 wl = wlvif->wl;
1971
1972 wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
1973
1974 mutex_lock(&wl->mutex);
1975
1976 if (unlikely(wl->state != WLCORE_STATE_ON))
1977 goto out;
1978
1979 /* check the channel switch is still ongoing */
1980 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
1981 goto out;
1982
1983 vif = wl12xx_wlvif_to_vif(wlvif);
1984 ieee80211_chswitch_done(vif, false);
1985
1986 ret = wl1271_ps_elp_wakeup(wl);
1987 if (ret < 0)
1988 goto out;
1989
1990 wl12xx_cmd_stop_channel_switch(wl, wlvif);
1991
1992 wl1271_ps_elp_sleep(wl);
1993 out:
1994 mutex_unlock(&wl->mutex);
1995 }
1996
1997 static void wlcore_connection_loss_work(struct work_struct *work)
1998 {
1999 struct delayed_work *dwork;
2000 struct wl1271 *wl;
2001 struct ieee80211_vif *vif;
2002 struct wl12xx_vif *wlvif;
2003
2004 dwork = container_of(work, struct delayed_work, work);
2005 wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
2006 wl = wlvif->wl;
2007
2008 wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
2009
2010 mutex_lock(&wl->mutex);
2011
2012 if (unlikely(wl->state != WLCORE_STATE_ON))
2013 goto out;
2014
2015 /* Call mac80211 connection loss */
2016 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2017 goto out;
2018
2019 vif = wl12xx_wlvif_to_vif(wlvif);
2020 ieee80211_connection_loss(vif);
2021 out:
2022 mutex_unlock(&wl->mutex);
2023 }
2024
2025 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2026 {
2027 u8 policy = find_first_zero_bit(wl->rate_policies_map,
2028 WL12XX_MAX_RATE_POLICIES);
2029 if (policy >= WL12XX_MAX_RATE_POLICIES)
2030 return -EBUSY;
2031
2032 __set_bit(policy, wl->rate_policies_map);
2033 *idx = policy;
2034 return 0;
2035 }
2036
2037 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2038 {
2039 if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2040 return;
2041
2042 __clear_bit(*idx, wl->rate_policies_map);
2043 *idx = WL12XX_MAX_RATE_POLICIES;
2044 }
2045
2046 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2047 {
2048 u8 policy = find_first_zero_bit(wl->klv_templates_map,
2049 WLCORE_MAX_KLV_TEMPLATES);
2050 if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2051 return -EBUSY;
2052
2053 __set_bit(policy, wl->klv_templates_map);
2054 *idx = policy;
2055 return 0;
2056 }
2057
2058 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2059 {
2060 if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2061 return;
2062
2063 __clear_bit(*idx, wl->klv_templates_map);
2064 *idx = WLCORE_MAX_KLV_TEMPLATES;
2065 }
2066
2067 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2068 {
2069 switch (wlvif->bss_type) {
2070 case BSS_TYPE_AP_BSS:
2071 if (wlvif->p2p)
2072 return WL1271_ROLE_P2P_GO;
2073 else
2074 return WL1271_ROLE_AP;
2075
2076 case BSS_TYPE_STA_BSS:
2077 if (wlvif->p2p)
2078 return WL1271_ROLE_P2P_CL;
2079 else
2080 return WL1271_ROLE_STA;
2081
2082 case BSS_TYPE_IBSS:
2083 return WL1271_ROLE_IBSS;
2084
2085 default:
2086 wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2087 }
2088 return WL12XX_INVALID_ROLE_TYPE;
2089 }
2090
2091 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2092 {
2093 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2094 int i;
2095
2096 /* clear everything but the persistent data */
2097 memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2098
2099 switch (ieee80211_vif_type_p2p(vif)) {
2100 case NL80211_IFTYPE_P2P_CLIENT:
2101 wlvif->p2p = 1;
2102 /* fall-through */
2103 case NL80211_IFTYPE_STATION:
2104 wlvif->bss_type = BSS_TYPE_STA_BSS;
2105 break;
2106 case NL80211_IFTYPE_ADHOC:
2107 wlvif->bss_type = BSS_TYPE_IBSS;
2108 break;
2109 case NL80211_IFTYPE_P2P_GO:
2110 wlvif->p2p = 1;
2111 /* fall-through */
2112 case NL80211_IFTYPE_AP:
2113 wlvif->bss_type = BSS_TYPE_AP_BSS;
2114 break;
2115 default:
2116 wlvif->bss_type = MAX_BSS_TYPE;
2117 return -EOPNOTSUPP;
2118 }
2119
2120 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2121 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2122 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2123
2124 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2125 wlvif->bss_type == BSS_TYPE_IBSS) {
2126 /* init sta/ibss data */
2127 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2128 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2129 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2130 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2131 wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2132 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2133 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2134 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2135 } else {
2136 /* init ap data */
2137 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2138 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2139 wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2140 wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2141 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2142 wl12xx_allocate_rate_policy(wl,
2143 &wlvif->ap.ucast_rate_idx[i]);
2144 wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2145 /*
2146 * TODO: check if basic_rate shouldn't be
2147 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2148 * instead (the same thing for STA above).
2149 */
2150 wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2151 /* TODO: this seems to be used only for STA, check it */
2152 wlvif->rate_set = CONF_TX_ENABLED_RATES;
2153 }
2154
2155 wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2156 wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2157 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2158
2159 /*
2160 * mac80211 configures some values globally, while we treat them
2161 * per-interface. thus, on init, we have to copy them from wl
2162 */
2163 wlvif->band = wl->band;
2164 wlvif->channel = wl->channel;
2165 wlvif->power_level = wl->power_level;
2166 wlvif->channel_type = wl->channel_type;
2167
2168 INIT_WORK(&wlvif->rx_streaming_enable_work,
2169 wl1271_rx_streaming_enable_work);
2170 INIT_WORK(&wlvif->rx_streaming_disable_work,
2171 wl1271_rx_streaming_disable_work);
2172 INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2173 wlcore_channel_switch_work);
2174 INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2175 wlcore_connection_loss_work);
2176 INIT_LIST_HEAD(&wlvif->list);
2177
2178 setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
2179 (unsigned long) wlvif);
2180 return 0;
2181 }
2182
2183 static int wl12xx_init_fw(struct wl1271 *wl)
2184 {
2185 int retries = WL1271_BOOT_RETRIES;
2186 bool booted = false;
2187 struct wiphy *wiphy = wl->hw->wiphy;
2188 int ret;
2189
2190 while (retries) {
2191 retries--;
2192 ret = wl12xx_chip_wakeup(wl, false);
2193 if (ret < 0)
2194 goto power_off;
2195
2196 ret = wl->ops->boot(wl);
2197 if (ret < 0)
2198 goto power_off;
2199
2200 ret = wl1271_hw_init(wl);
2201 if (ret < 0)
2202 goto irq_disable;
2203
2204 booted = true;
2205 break;
2206
2207 irq_disable:
2208 mutex_unlock(&wl->mutex);
2209 /* Unlocking the mutex in the middle of handling is
2210 inherently unsafe. In this case we deem it safe to do,
2211 because we need to let any possibly pending IRQ out of
2212 the system (and while we are WLCORE_STATE_OFF the IRQ
2213 work function will not do anything.) Also, any other
2214 possible concurrent operations will fail due to the
2215 current state, hence the wl1271 struct should be safe. */
2216 wlcore_disable_interrupts(wl);
2217 wl1271_flush_deferred_work(wl);
2218 cancel_work_sync(&wl->netstack_work);
2219 mutex_lock(&wl->mutex);
2220 power_off:
2221 wl1271_power_off(wl);
2222 }
2223
2224 if (!booted) {
2225 wl1271_error("firmware boot failed despite %d retries",
2226 WL1271_BOOT_RETRIES);
2227 goto out;
2228 }
2229
2230 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2231
2232 /* update hw/fw version info in wiphy struct */
2233 wiphy->hw_version = wl->chip.id;
2234 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2235 sizeof(wiphy->fw_version));
2236
2237 /*
2238 * Now we know if 11a is supported (info from the NVS), so disable
2239 * 11a channels if not supported
2240 */
2241 if (!wl->enable_11a)
2242 wiphy->bands[IEEE80211_BAND_5GHZ]->n_channels = 0;
2243
2244 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2245 wl->enable_11a ? "" : "not ");
2246
2247 wl->state = WLCORE_STATE_ON;
2248 out:
2249 return ret;
2250 }
2251
2252 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2253 {
2254 return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2255 }
2256
2257 /*
2258 * Check whether a fw switch (i.e. moving from one loaded
2259 * fw to another) is needed. This function is also responsible
2260 * for updating wl->last_vif_count, so it must be called before
2261 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2262 * will be used).
2263 */
2264 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2265 struct vif_counter_data vif_counter_data,
2266 bool add)
2267 {
2268 enum wl12xx_fw_type current_fw = wl->fw_type;
2269 u8 vif_count = vif_counter_data.counter;
2270
2271 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2272 return false;
2273
2274 /* increase the vif count if this is a new vif */
2275 if (add && !vif_counter_data.cur_vif_running)
2276 vif_count++;
2277
2278 wl->last_vif_count = vif_count;
2279
2280 /* no need for fw change if the device is OFF */
2281 if (wl->state == WLCORE_STATE_OFF)
2282 return false;
2283
2284 /* no need for fw change if a single fw is used */
2285 if (!wl->mr_fw_name)
2286 return false;
2287
2288 if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2289 return true;
2290 if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2291 return true;
2292
2293 return false;
2294 }
2295
2296 /*
2297 * Enter "forced psm". Make sure the sta is in psm against the ap,
2298 * to make the fw switch a bit more disconnection-persistent.
2299 */
2300 static void wl12xx_force_active_psm(struct wl1271 *wl)
2301 {
2302 struct wl12xx_vif *wlvif;
2303
2304 wl12xx_for_each_wlvif_sta(wl, wlvif) {
2305 wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2306 }
2307 }
2308
2309 struct wlcore_hw_queue_iter_data {
2310 unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2311 /* current vif */
2312 struct ieee80211_vif *vif;
2313 /* is the current vif among those iterated */
2314 bool cur_running;
2315 };
2316
2317 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2318 struct ieee80211_vif *vif)
2319 {
2320 struct wlcore_hw_queue_iter_data *iter_data = data;
2321
2322 if (WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2323 return;
2324
2325 if (iter_data->cur_running || vif == iter_data->vif) {
2326 iter_data->cur_running = true;
2327 return;
2328 }
2329
2330 __set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2331 }
2332
2333 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2334 struct wl12xx_vif *wlvif)
2335 {
2336 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2337 struct wlcore_hw_queue_iter_data iter_data = {};
2338 int i, q_base;
2339
2340 iter_data.vif = vif;
2341
2342 /* mark all bits taken by active interfaces */
2343 ieee80211_iterate_active_interfaces_atomic(wl->hw,
2344 IEEE80211_IFACE_ITER_RESUME_ALL,
2345 wlcore_hw_queue_iter, &iter_data);
2346
2347 /* the current vif is already running in mac80211 (resume/recovery) */
2348 if (iter_data.cur_running) {
2349 wlvif->hw_queue_base = vif->hw_queue[0];
2350 wl1271_debug(DEBUG_MAC80211,
2351 "using pre-allocated hw queue base %d",
2352 wlvif->hw_queue_base);
2353
2354 /* interface type might have changed type */
2355 goto adjust_cab_queue;
2356 }
2357
2358 q_base = find_first_zero_bit(iter_data.hw_queue_map,
2359 WLCORE_NUM_MAC_ADDRESSES);
2360 if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2361 return -EBUSY;
2362
2363 wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2364 wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2365 wlvif->hw_queue_base);
2366
2367 for (i = 0; i < NUM_TX_QUEUES; i++) {
2368 wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2369 /* register hw queues in mac80211 */
2370 vif->hw_queue[i] = wlvif->hw_queue_base + i;
2371 }
2372
2373 adjust_cab_queue:
2374 /* the last places are reserved for cab queues per interface */
2375 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2376 vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2377 wlvif->hw_queue_base / NUM_TX_QUEUES;
2378 else
2379 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2380
2381 return 0;
2382 }
2383
2384 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2385 struct ieee80211_vif *vif)
2386 {
2387 struct wl1271 *wl = hw->priv;
2388 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2389 struct vif_counter_data vif_count;
2390 int ret = 0;
2391 u8 role_type;
2392
2393 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2394 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2395
2396 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2397 ieee80211_vif_type_p2p(vif), vif->addr);
2398
2399 wl12xx_get_vif_count(hw, vif, &vif_count);
2400
2401 mutex_lock(&wl->mutex);
2402 ret = wl1271_ps_elp_wakeup(wl);
2403 if (ret < 0)
2404 goto out_unlock;
2405
2406 /*
2407 * in some very corner case HW recovery scenarios its possible to
2408 * get here before __wl1271_op_remove_interface is complete, so
2409 * opt out if that is the case.
2410 */
2411 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2412 test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2413 ret = -EBUSY;
2414 goto out;
2415 }
2416
2417
2418 ret = wl12xx_init_vif_data(wl, vif);
2419 if (ret < 0)
2420 goto out;
2421
2422 wlvif->wl = wl;
2423 role_type = wl12xx_get_role_type(wl, wlvif);
2424 if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2425 ret = -EINVAL;
2426 goto out;
2427 }
2428
2429 ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2430 if (ret < 0)
2431 goto out;
2432
2433 if (wl12xx_need_fw_change(wl, vif_count, true)) {
2434 wl12xx_force_active_psm(wl);
2435 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2436 mutex_unlock(&wl->mutex);
2437 wl1271_recovery_work(&wl->recovery_work);
2438 return 0;
2439 }
2440
2441 /*
2442 * TODO: after the nvs issue will be solved, move this block
2443 * to start(), and make sure here the driver is ON.
2444 */
2445 if (wl->state == WLCORE_STATE_OFF) {
2446 /*
2447 * we still need this in order to configure the fw
2448 * while uploading the nvs
2449 */
2450 memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2451
2452 ret = wl12xx_init_fw(wl);
2453 if (ret < 0)
2454 goto out;
2455 }
2456
2457 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2458 role_type, &wlvif->role_id);
2459 if (ret < 0)
2460 goto out;
2461
2462 ret = wl1271_init_vif_specific(wl, vif);
2463 if (ret < 0)
2464 goto out;
2465
2466 list_add(&wlvif->list, &wl->wlvif_list);
2467 set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2468
2469 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2470 wl->ap_count++;
2471 else
2472 wl->sta_count++;
2473 out:
2474 wl1271_ps_elp_sleep(wl);
2475 out_unlock:
2476 mutex_unlock(&wl->mutex);
2477
2478 return ret;
2479 }
2480
2481 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2482 struct ieee80211_vif *vif,
2483 bool reset_tx_queues)
2484 {
2485 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2486 int i, ret;
2487 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2488
2489 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2490
2491 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2492 return;
2493
2494 /* because of hardware recovery, we may get here twice */
2495 if (wl->state == WLCORE_STATE_OFF)
2496 return;
2497
2498 wl1271_info("down");
2499
2500 if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2501 wl->scan_wlvif == wlvif) {
2502 /*
2503 * Rearm the tx watchdog just before idling scan. This
2504 * prevents just-finished scans from triggering the watchdog
2505 */
2506 wl12xx_rearm_tx_watchdog_locked(wl);
2507
2508 wl->scan.state = WL1271_SCAN_STATE_IDLE;
2509 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2510 wl->scan_wlvif = NULL;
2511 wl->scan.req = NULL;
2512 ieee80211_scan_completed(wl->hw, true);
2513 }
2514
2515 if (wl->sched_vif == wlvif) {
2516 ieee80211_sched_scan_stopped(wl->hw);
2517 wl->sched_vif = NULL;
2518 }
2519
2520 if (wl->roc_vif == vif) {
2521 wl->roc_vif = NULL;
2522 ieee80211_remain_on_channel_expired(wl->hw);
2523 }
2524
2525 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2526 /* disable active roles */
2527 ret = wl1271_ps_elp_wakeup(wl);
2528 if (ret < 0)
2529 goto deinit;
2530
2531 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2532 wlvif->bss_type == BSS_TYPE_IBSS) {
2533 if (wl12xx_dev_role_started(wlvif))
2534 wl12xx_stop_dev(wl, wlvif);
2535 }
2536
2537 ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2538 if (ret < 0)
2539 goto deinit;
2540
2541 wl1271_ps_elp_sleep(wl);
2542 }
2543 deinit:
2544 wl12xx_tx_reset_wlvif(wl, wlvif);
2545
2546 /* clear all hlids (except system_hlid) */
2547 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2548
2549 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2550 wlvif->bss_type == BSS_TYPE_IBSS) {
2551 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2552 wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2553 wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2554 wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2555 wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2556 } else {
2557 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2558 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2559 wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2560 wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2561 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2562 wl12xx_free_rate_policy(wl,
2563 &wlvif->ap.ucast_rate_idx[i]);
2564 wl1271_free_ap_keys(wl, wlvif);
2565 }
2566
2567 dev_kfree_skb(wlvif->probereq);
2568 wlvif->probereq = NULL;
2569 if (wl->last_wlvif == wlvif)
2570 wl->last_wlvif = NULL;
2571 list_del(&wlvif->list);
2572 memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2573 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2574 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2575
2576 if (is_ap)
2577 wl->ap_count--;
2578 else
2579 wl->sta_count--;
2580
2581 /*
2582 * Last AP, have more stations. Configure sleep auth according to STA.
2583 * Don't do thin on unintended recovery.
2584 */
2585 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2586 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2587 goto unlock;
2588
2589 if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2590 u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2591 /* Configure for power according to debugfs */
2592 if (sta_auth != WL1271_PSM_ILLEGAL)
2593 wl1271_acx_sleep_auth(wl, sta_auth);
2594 /* Configure for ELP power saving */
2595 else
2596 wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2597 }
2598
2599 unlock:
2600 mutex_unlock(&wl->mutex);
2601
2602 del_timer_sync(&wlvif->rx_streaming_timer);
2603 cancel_work_sync(&wlvif->rx_streaming_enable_work);
2604 cancel_work_sync(&wlvif->rx_streaming_disable_work);
2605 cancel_delayed_work_sync(&wlvif->connection_loss_work);
2606
2607 mutex_lock(&wl->mutex);
2608 }
2609
2610 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2611 struct ieee80211_vif *vif)
2612 {
2613 struct wl1271 *wl = hw->priv;
2614 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2615 struct wl12xx_vif *iter;
2616 struct vif_counter_data vif_count;
2617
2618 wl12xx_get_vif_count(hw, vif, &vif_count);
2619 mutex_lock(&wl->mutex);
2620
2621 if (wl->state == WLCORE_STATE_OFF ||
2622 !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2623 goto out;
2624
2625 /*
2626 * wl->vif can be null here if someone shuts down the interface
2627 * just when hardware recovery has been started.
2628 */
2629 wl12xx_for_each_wlvif(wl, iter) {
2630 if (iter != wlvif)
2631 continue;
2632
2633 __wl1271_op_remove_interface(wl, vif, true);
2634 break;
2635 }
2636 WARN_ON(iter != wlvif);
2637 if (wl12xx_need_fw_change(wl, vif_count, false)) {
2638 wl12xx_force_active_psm(wl);
2639 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2640 wl12xx_queue_recovery_work(wl);
2641 }
2642 out:
2643 mutex_unlock(&wl->mutex);
2644 }
2645
2646 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2647 struct ieee80211_vif *vif,
2648 enum nl80211_iftype new_type, bool p2p)
2649 {
2650 struct wl1271 *wl = hw->priv;
2651 int ret;
2652
2653 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2654 wl1271_op_remove_interface(hw, vif);
2655
2656 vif->type = new_type;
2657 vif->p2p = p2p;
2658 ret = wl1271_op_add_interface(hw, vif);
2659
2660 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2661 return ret;
2662 }
2663
2664 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2665 {
2666 int ret;
2667 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2668
2669 /*
2670 * One of the side effects of the JOIN command is that is clears
2671 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2672 * to a WPA/WPA2 access point will therefore kill the data-path.
2673 * Currently the only valid scenario for JOIN during association
2674 * is on roaming, in which case we will also be given new keys.
2675 * Keep the below message for now, unless it starts bothering
2676 * users who really like to roam a lot :)
2677 */
2678 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2679 wl1271_info("JOIN while associated.");
2680
2681 /* clear encryption type */
2682 wlvif->encryption_type = KEY_NONE;
2683
2684 if (is_ibss)
2685 ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2686 else {
2687 if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
2688 /*
2689 * TODO: this is an ugly workaround for wl12xx fw
2690 * bug - we are not able to tx/rx after the first
2691 * start_sta, so make dummy start+stop calls,
2692 * and then call start_sta again.
2693 * this should be fixed in the fw.
2694 */
2695 wl12xx_cmd_role_start_sta(wl, wlvif);
2696 wl12xx_cmd_role_stop_sta(wl, wlvif);
2697 }
2698
2699 ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2700 }
2701
2702 return ret;
2703 }
2704
2705 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2706 int offset)
2707 {
2708 u8 ssid_len;
2709 const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2710 skb->len - offset);
2711
2712 if (!ptr) {
2713 wl1271_error("No SSID in IEs!");
2714 return -ENOENT;
2715 }
2716
2717 ssid_len = ptr[1];
2718 if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2719 wl1271_error("SSID is too long!");
2720 return -EINVAL;
2721 }
2722
2723 wlvif->ssid_len = ssid_len;
2724 memcpy(wlvif->ssid, ptr+2, ssid_len);
2725 return 0;
2726 }
2727
2728 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2729 {
2730 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2731 struct sk_buff *skb;
2732 int ieoffset;
2733
2734 /* we currently only support setting the ssid from the ap probe req */
2735 if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2736 return -EINVAL;
2737
2738 skb = ieee80211_ap_probereq_get(wl->hw, vif);
2739 if (!skb)
2740 return -EINVAL;
2741
2742 ieoffset = offsetof(struct ieee80211_mgmt,
2743 u.probe_req.variable);
2744 wl1271_ssid_set(wlvif, skb, ieoffset);
2745 dev_kfree_skb(skb);
2746
2747 return 0;
2748 }
2749
2750 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2751 struct ieee80211_bss_conf *bss_conf,
2752 u32 sta_rate_set)
2753 {
2754 int ieoffset;
2755 int ret;
2756
2757 wlvif->aid = bss_conf->aid;
2758 wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2759 wlvif->beacon_int = bss_conf->beacon_int;
2760 wlvif->wmm_enabled = bss_conf->qos;
2761
2762 set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2763
2764 /*
2765 * with wl1271, we don't need to update the
2766 * beacon_int and dtim_period, because the firmware
2767 * updates it by itself when the first beacon is
2768 * received after a join.
2769 */
2770 ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2771 if (ret < 0)
2772 return ret;
2773
2774 /*
2775 * Get a template for hardware connection maintenance
2776 */
2777 dev_kfree_skb(wlvif->probereq);
2778 wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2779 wlvif,
2780 NULL);
2781 ieoffset = offsetof(struct ieee80211_mgmt,
2782 u.probe_req.variable);
2783 wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2784
2785 /* enable the connection monitoring feature */
2786 ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2787 if (ret < 0)
2788 return ret;
2789
2790 /*
2791 * The join command disable the keep-alive mode, shut down its process,
2792 * and also clear the template config, so we need to reset it all after
2793 * the join. The acx_aid starts the keep-alive process, and the order
2794 * of the commands below is relevant.
2795 */
2796 ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2797 if (ret < 0)
2798 return ret;
2799
2800 ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2801 if (ret < 0)
2802 return ret;
2803
2804 ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2805 if (ret < 0)
2806 return ret;
2807
2808 ret = wl1271_acx_keep_alive_config(wl, wlvif,
2809 wlvif->sta.klv_template_id,
2810 ACX_KEEP_ALIVE_TPL_VALID);
2811 if (ret < 0)
2812 return ret;
2813
2814 /*
2815 * The default fw psm configuration is AUTO, while mac80211 default
2816 * setting is off (ACTIVE), so sync the fw with the correct value.
2817 */
2818 ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
2819 if (ret < 0)
2820 return ret;
2821
2822 if (sta_rate_set) {
2823 wlvif->rate_set =
2824 wl1271_tx_enabled_rates_get(wl,
2825 sta_rate_set,
2826 wlvif->band);
2827 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
2828 if (ret < 0)
2829 return ret;
2830 }
2831
2832 return ret;
2833 }
2834
2835 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2836 {
2837 int ret;
2838 bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
2839
2840 /* make sure we are connected (sta) joined */
2841 if (sta &&
2842 !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2843 return false;
2844
2845 /* make sure we are joined (ibss) */
2846 if (!sta &&
2847 test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
2848 return false;
2849
2850 if (sta) {
2851 /* use defaults when not associated */
2852 wlvif->aid = 0;
2853
2854 /* free probe-request template */
2855 dev_kfree_skb(wlvif->probereq);
2856 wlvif->probereq = NULL;
2857
2858 /* disable connection monitor features */
2859 ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
2860 if (ret < 0)
2861 return ret;
2862
2863 /* Disable the keep-alive feature */
2864 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
2865 if (ret < 0)
2866 return ret;
2867 }
2868
2869 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
2870 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2871
2872 wl12xx_cmd_stop_channel_switch(wl, wlvif);
2873 ieee80211_chswitch_done(vif, false);
2874 cancel_delayed_work(&wlvif->channel_switch_work);
2875 }
2876
2877 /* invalidate keep-alive template */
2878 wl1271_acx_keep_alive_config(wl, wlvif,
2879 wlvif->sta.klv_template_id,
2880 ACX_KEEP_ALIVE_TPL_INVALID);
2881
2882 return 0;
2883 }
2884
2885 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2886 {
2887 wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
2888 wlvif->rate_set = wlvif->basic_rate_set;
2889 }
2890
2891 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2892 struct ieee80211_conf *conf, u32 changed)
2893 {
2894 int ret;
2895
2896 if (conf->power_level != wlvif->power_level) {
2897 ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
2898 if (ret < 0)
2899 return ret;
2900
2901 wlvif->power_level = conf->power_level;
2902 }
2903
2904 return 0;
2905 }
2906
2907 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
2908 {
2909 struct wl1271 *wl = hw->priv;
2910 struct wl12xx_vif *wlvif;
2911 struct ieee80211_conf *conf = &hw->conf;
2912 int ret = 0;
2913
2914 wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
2915 " changed 0x%x",
2916 conf->flags & IEEE80211_CONF_PS ? "on" : "off",
2917 conf->power_level,
2918 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
2919 changed);
2920
2921 mutex_lock(&wl->mutex);
2922
2923 if (changed & IEEE80211_CONF_CHANGE_POWER)
2924 wl->power_level = conf->power_level;
2925
2926 if (unlikely(wl->state != WLCORE_STATE_ON))
2927 goto out;
2928
2929 ret = wl1271_ps_elp_wakeup(wl);
2930 if (ret < 0)
2931 goto out;
2932
2933 /* configure each interface */
2934 wl12xx_for_each_wlvif(wl, wlvif) {
2935 ret = wl12xx_config_vif(wl, wlvif, conf, changed);
2936 if (ret < 0)
2937 goto out_sleep;
2938 }
2939
2940 out_sleep:
2941 wl1271_ps_elp_sleep(wl);
2942
2943 out:
2944 mutex_unlock(&wl->mutex);
2945
2946 return ret;
2947 }
2948
2949 struct wl1271_filter_params {
2950 bool enabled;
2951 int mc_list_length;
2952 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
2953 };
2954
2955 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
2956 struct netdev_hw_addr_list *mc_list)
2957 {
2958 struct wl1271_filter_params *fp;
2959 struct netdev_hw_addr *ha;
2960
2961 fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
2962 if (!fp) {
2963 wl1271_error("Out of memory setting filters.");
2964 return 0;
2965 }
2966
2967 /* update multicast filtering parameters */
2968 fp->mc_list_length = 0;
2969 if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
2970 fp->enabled = false;
2971 } else {
2972 fp->enabled = true;
2973 netdev_hw_addr_list_for_each(ha, mc_list) {
2974 memcpy(fp->mc_list[fp->mc_list_length],
2975 ha->addr, ETH_ALEN);
2976 fp->mc_list_length++;
2977 }
2978 }
2979
2980 return (u64)(unsigned long)fp;
2981 }
2982
2983 #define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
2984 FIF_ALLMULTI | \
2985 FIF_FCSFAIL | \
2986 FIF_BCN_PRBRESP_PROMISC | \
2987 FIF_CONTROL | \
2988 FIF_OTHER_BSS)
2989
2990 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
2991 unsigned int changed,
2992 unsigned int *total, u64 multicast)
2993 {
2994 struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
2995 struct wl1271 *wl = hw->priv;
2996 struct wl12xx_vif *wlvif;
2997
2998 int ret;
2999
3000 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
3001 " total %x", changed, *total);
3002
3003 mutex_lock(&wl->mutex);
3004
3005 *total &= WL1271_SUPPORTED_FILTERS;
3006 changed &= WL1271_SUPPORTED_FILTERS;
3007
3008 if (unlikely(wl->state != WLCORE_STATE_ON))
3009 goto out;
3010
3011 ret = wl1271_ps_elp_wakeup(wl);
3012 if (ret < 0)
3013 goto out;
3014
3015 wl12xx_for_each_wlvif(wl, wlvif) {
3016 if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3017 if (*total & FIF_ALLMULTI)
3018 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3019 false,
3020 NULL, 0);
3021 else if (fp)
3022 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3023 fp->enabled,
3024 fp->mc_list,
3025 fp->mc_list_length);
3026 if (ret < 0)
3027 goto out_sleep;
3028 }
3029 }
3030
3031 /*
3032 * the fw doesn't provide an api to configure the filters. instead,
3033 * the filters configuration is based on the active roles / ROC
3034 * state.
3035 */
3036
3037 out_sleep:
3038 wl1271_ps_elp_sleep(wl);
3039
3040 out:
3041 mutex_unlock(&wl->mutex);
3042 kfree(fp);
3043 }
3044
3045 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3046 u8 id, u8 key_type, u8 key_size,
3047 const u8 *key, u8 hlid, u32 tx_seq_32,
3048 u16 tx_seq_16)
3049 {
3050 struct wl1271_ap_key *ap_key;
3051 int i;
3052
3053 wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3054
3055 if (key_size > MAX_KEY_SIZE)
3056 return -EINVAL;
3057
3058 /*
3059 * Find next free entry in ap_keys. Also check we are not replacing
3060 * an existing key.
3061 */
3062 for (i = 0; i < MAX_NUM_KEYS; i++) {
3063 if (wlvif->ap.recorded_keys[i] == NULL)
3064 break;
3065
3066 if (wlvif->ap.recorded_keys[i]->id == id) {
3067 wl1271_warning("trying to record key replacement");
3068 return -EINVAL;
3069 }
3070 }
3071
3072 if (i == MAX_NUM_KEYS)
3073 return -EBUSY;
3074
3075 ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3076 if (!ap_key)
3077 return -ENOMEM;
3078
3079 ap_key->id = id;
3080 ap_key->key_type = key_type;
3081 ap_key->key_size = key_size;
3082 memcpy(ap_key->key, key, key_size);
3083 ap_key->hlid = hlid;
3084 ap_key->tx_seq_32 = tx_seq_32;
3085 ap_key->tx_seq_16 = tx_seq_16;
3086
3087 wlvif->ap.recorded_keys[i] = ap_key;
3088 return 0;
3089 }
3090
3091 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3092 {
3093 int i;
3094
3095 for (i = 0; i < MAX_NUM_KEYS; i++) {
3096 kfree(wlvif->ap.recorded_keys[i]);
3097 wlvif->ap.recorded_keys[i] = NULL;
3098 }
3099 }
3100
3101 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3102 {
3103 int i, ret = 0;
3104 struct wl1271_ap_key *key;
3105 bool wep_key_added = false;
3106
3107 for (i = 0; i < MAX_NUM_KEYS; i++) {
3108 u8 hlid;
3109 if (wlvif->ap.recorded_keys[i] == NULL)
3110 break;
3111
3112 key = wlvif->ap.recorded_keys[i];
3113 hlid = key->hlid;
3114 if (hlid == WL12XX_INVALID_LINK_ID)
3115 hlid = wlvif->ap.bcast_hlid;
3116
3117 ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3118 key->id, key->key_type,
3119 key->key_size, key->key,
3120 hlid, key->tx_seq_32,
3121 key->tx_seq_16);
3122 if (ret < 0)
3123 goto out;
3124
3125 if (key->key_type == KEY_WEP)
3126 wep_key_added = true;
3127 }
3128
3129 if (wep_key_added) {
3130 ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3131 wlvif->ap.bcast_hlid);
3132 if (ret < 0)
3133 goto out;
3134 }
3135
3136 out:
3137 wl1271_free_ap_keys(wl, wlvif);
3138 return ret;
3139 }
3140
3141 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3142 u16 action, u8 id, u8 key_type,
3143 u8 key_size, const u8 *key, u32 tx_seq_32,
3144 u16 tx_seq_16, struct ieee80211_sta *sta)
3145 {
3146 int ret;
3147 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3148
3149 if (is_ap) {
3150 struct wl1271_station *wl_sta;
3151 u8 hlid;
3152
3153 if (sta) {
3154 wl_sta = (struct wl1271_station *)sta->drv_priv;
3155 hlid = wl_sta->hlid;
3156 } else {
3157 hlid = wlvif->ap.bcast_hlid;
3158 }
3159
3160 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3161 /*
3162 * We do not support removing keys after AP shutdown.
3163 * Pretend we do to make mac80211 happy.
3164 */
3165 if (action != KEY_ADD_OR_REPLACE)
3166 return 0;
3167
3168 ret = wl1271_record_ap_key(wl, wlvif, id,
3169 key_type, key_size,
3170 key, hlid, tx_seq_32,
3171 tx_seq_16);
3172 } else {
3173 ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3174 id, key_type, key_size,
3175 key, hlid, tx_seq_32,
3176 tx_seq_16);
3177 }
3178
3179 if (ret < 0)
3180 return ret;
3181 } else {
3182 const u8 *addr;
3183 static const u8 bcast_addr[ETH_ALEN] = {
3184 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3185 };
3186
3187 addr = sta ? sta->addr : bcast_addr;
3188
3189 if (is_zero_ether_addr(addr)) {
3190 /* We dont support TX only encryption */
3191 return -EOPNOTSUPP;
3192 }
3193
3194 /* The wl1271 does not allow to remove unicast keys - they
3195 will be cleared automatically on next CMD_JOIN. Ignore the
3196 request silently, as we dont want the mac80211 to emit
3197 an error message. */
3198 if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3199 return 0;
3200
3201 /* don't remove key if hlid was already deleted */
3202 if (action == KEY_REMOVE &&
3203 wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3204 return 0;
3205
3206 ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3207 id, key_type, key_size,
3208 key, addr, tx_seq_32,
3209 tx_seq_16);
3210 if (ret < 0)
3211 return ret;
3212
3213 /* the default WEP key needs to be configured at least once */
3214 if (key_type == KEY_WEP) {
3215 ret = wl12xx_cmd_set_default_wep_key(wl,
3216 wlvif->default_key,
3217 wlvif->sta.hlid);
3218 if (ret < 0)
3219 return ret;
3220 }
3221 }
3222
3223 return 0;
3224 }
3225
3226 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3227 struct ieee80211_vif *vif,
3228 struct ieee80211_sta *sta,
3229 struct ieee80211_key_conf *key_conf)
3230 {
3231 struct wl1271 *wl = hw->priv;
3232 int ret;
3233 bool might_change_spare =
3234 key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3235 key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3236
3237 if (might_change_spare) {
3238 /*
3239 * stop the queues and flush to ensure the next packets are
3240 * in sync with FW spare block accounting
3241 */
3242 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3243 wl1271_tx_flush(wl);
3244 }
3245
3246 mutex_lock(&wl->mutex);
3247
3248 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3249 ret = -EAGAIN;
3250 goto out_wake_queues;
3251 }
3252
3253 ret = wl1271_ps_elp_wakeup(wl);
3254 if (ret < 0)
3255 goto out_wake_queues;
3256
3257 ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3258
3259 wl1271_ps_elp_sleep(wl);
3260
3261 out_wake_queues:
3262 if (might_change_spare)
3263 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3264
3265 mutex_unlock(&wl->mutex);
3266
3267 return ret;
3268 }
3269
3270 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3271 struct ieee80211_vif *vif,
3272 struct ieee80211_sta *sta,
3273 struct ieee80211_key_conf *key_conf)
3274 {
3275 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3276 int ret;
3277 u32 tx_seq_32 = 0;
3278 u16 tx_seq_16 = 0;
3279 u8 key_type;
3280 u8 hlid;
3281
3282 wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3283
3284 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3285 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3286 key_conf->cipher, key_conf->keyidx,
3287 key_conf->keylen, key_conf->flags);
3288 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3289
3290 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
3291 if (sta) {
3292 struct wl1271_station *wl_sta = (void *)sta->drv_priv;
3293 hlid = wl_sta->hlid;
3294 } else {
3295 hlid = wlvif->ap.bcast_hlid;
3296 }
3297 else
3298 hlid = wlvif->sta.hlid;
3299
3300 if (hlid != WL12XX_INVALID_LINK_ID) {
3301 u64 tx_seq = wl->links[hlid].total_freed_pkts;
3302 tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
3303 tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
3304 }
3305
3306 switch (key_conf->cipher) {
3307 case WLAN_CIPHER_SUITE_WEP40:
3308 case WLAN_CIPHER_SUITE_WEP104:
3309 key_type = KEY_WEP;
3310
3311 key_conf->hw_key_idx = key_conf->keyidx;
3312 break;
3313 case WLAN_CIPHER_SUITE_TKIP:
3314 key_type = KEY_TKIP;
3315 key_conf->hw_key_idx = key_conf->keyidx;
3316 break;
3317 case WLAN_CIPHER_SUITE_CCMP:
3318 key_type = KEY_AES;
3319 key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3320 break;
3321 case WL1271_CIPHER_SUITE_GEM:
3322 key_type = KEY_GEM;
3323 break;
3324 default:
3325 wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3326
3327 return -EOPNOTSUPP;
3328 }
3329
3330 switch (cmd) {
3331 case SET_KEY:
3332 ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3333 key_conf->keyidx, key_type,
3334 key_conf->keylen, key_conf->key,
3335 tx_seq_32, tx_seq_16, sta);
3336 if (ret < 0) {
3337 wl1271_error("Could not add or replace key");
3338 return ret;
3339 }
3340
3341 /*
3342 * reconfiguring arp response if the unicast (or common)
3343 * encryption key type was changed
3344 */
3345 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3346 (sta || key_type == KEY_WEP) &&
3347 wlvif->encryption_type != key_type) {
3348 wlvif->encryption_type = key_type;
3349 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3350 if (ret < 0) {
3351 wl1271_warning("build arp rsp failed: %d", ret);
3352 return ret;
3353 }
3354 }
3355 break;
3356
3357 case DISABLE_KEY:
3358 ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3359 key_conf->keyidx, key_type,
3360 key_conf->keylen, key_conf->key,
3361 0, 0, sta);
3362 if (ret < 0) {
3363 wl1271_error("Could not remove key");
3364 return ret;
3365 }
3366 break;
3367
3368 default:
3369 wl1271_error("Unsupported key cmd 0x%x", cmd);
3370 return -EOPNOTSUPP;
3371 }
3372
3373 return ret;
3374 }
3375 EXPORT_SYMBOL_GPL(wlcore_set_key);
3376
3377 void wlcore_regdomain_config(struct wl1271 *wl)
3378 {
3379 int ret;
3380
3381 if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3382 return;
3383
3384 mutex_lock(&wl->mutex);
3385
3386 if (unlikely(wl->state != WLCORE_STATE_ON))
3387 goto out;
3388
3389 ret = wl1271_ps_elp_wakeup(wl);
3390 if (ret < 0)
3391 goto out;
3392
3393 ret = wlcore_cmd_regdomain_config_locked(wl);
3394 if (ret < 0) {
3395 wl12xx_queue_recovery_work(wl);
3396 goto out;
3397 }
3398
3399 wl1271_ps_elp_sleep(wl);
3400 out:
3401 mutex_unlock(&wl->mutex);
3402 }
3403
3404 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3405 struct ieee80211_vif *vif,
3406 struct cfg80211_scan_request *req)
3407 {
3408 struct wl1271 *wl = hw->priv;
3409 int ret;
3410 u8 *ssid = NULL;
3411 size_t len = 0;
3412
3413 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3414
3415 if (req->n_ssids) {
3416 ssid = req->ssids[0].ssid;
3417 len = req->ssids[0].ssid_len;
3418 }
3419
3420 mutex_lock(&wl->mutex);
3421
3422 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3423 /*
3424 * We cannot return -EBUSY here because cfg80211 will expect
3425 * a call to ieee80211_scan_completed if we do - in this case
3426 * there won't be any call.
3427 */
3428 ret = -EAGAIN;
3429 goto out;
3430 }
3431
3432 ret = wl1271_ps_elp_wakeup(wl);
3433 if (ret < 0)
3434 goto out;
3435
3436 /* fail if there is any role in ROC */
3437 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3438 /* don't allow scanning right now */
3439 ret = -EBUSY;
3440 goto out_sleep;
3441 }
3442
3443 ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3444 out_sleep:
3445 wl1271_ps_elp_sleep(wl);
3446 out:
3447 mutex_unlock(&wl->mutex);
3448
3449 return ret;
3450 }
3451
3452 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3453 struct ieee80211_vif *vif)
3454 {
3455 struct wl1271 *wl = hw->priv;
3456 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3457 int ret;
3458
3459 wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3460
3461 mutex_lock(&wl->mutex);
3462
3463 if (unlikely(wl->state != WLCORE_STATE_ON))
3464 goto out;
3465
3466 if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3467 goto out;
3468
3469 ret = wl1271_ps_elp_wakeup(wl);
3470 if (ret < 0)
3471 goto out;
3472
3473 if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3474 ret = wl->ops->scan_stop(wl, wlvif);
3475 if (ret < 0)
3476 goto out_sleep;
3477 }
3478
3479 /*
3480 * Rearm the tx watchdog just before idling scan. This
3481 * prevents just-finished scans from triggering the watchdog
3482 */
3483 wl12xx_rearm_tx_watchdog_locked(wl);
3484
3485 wl->scan.state = WL1271_SCAN_STATE_IDLE;
3486 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3487 wl->scan_wlvif = NULL;
3488 wl->scan.req = NULL;
3489 ieee80211_scan_completed(wl->hw, true);
3490
3491 out_sleep:
3492 wl1271_ps_elp_sleep(wl);
3493 out:
3494 mutex_unlock(&wl->mutex);
3495
3496 cancel_delayed_work_sync(&wl->scan_complete_work);
3497 }
3498
3499 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3500 struct ieee80211_vif *vif,
3501 struct cfg80211_sched_scan_request *req,
3502 struct ieee80211_sched_scan_ies *ies)
3503 {
3504 struct wl1271 *wl = hw->priv;
3505 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3506 int ret;
3507
3508 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3509
3510 mutex_lock(&wl->mutex);
3511
3512 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3513 ret = -EAGAIN;
3514 goto out;
3515 }
3516
3517 ret = wl1271_ps_elp_wakeup(wl);
3518 if (ret < 0)
3519 goto out;
3520
3521 ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3522 if (ret < 0)
3523 goto out_sleep;
3524
3525 wl->sched_vif = wlvif;
3526
3527 out_sleep:
3528 wl1271_ps_elp_sleep(wl);
3529 out:
3530 mutex_unlock(&wl->mutex);
3531 return ret;
3532 }
3533
3534 static void wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3535 struct ieee80211_vif *vif)
3536 {
3537 struct wl1271 *wl = hw->priv;
3538 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3539 int ret;
3540
3541 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3542
3543 mutex_lock(&wl->mutex);
3544
3545 if (unlikely(wl->state != WLCORE_STATE_ON))
3546 goto out;
3547
3548 ret = wl1271_ps_elp_wakeup(wl);
3549 if (ret < 0)
3550 goto out;
3551
3552 wl->ops->sched_scan_stop(wl, wlvif);
3553
3554 wl1271_ps_elp_sleep(wl);
3555 out:
3556 mutex_unlock(&wl->mutex);
3557 }
3558
3559 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3560 {
3561 struct wl1271 *wl = hw->priv;
3562 int ret = 0;
3563
3564 mutex_lock(&wl->mutex);
3565
3566 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3567 ret = -EAGAIN;
3568 goto out;
3569 }
3570
3571 ret = wl1271_ps_elp_wakeup(wl);
3572 if (ret < 0)
3573 goto out;
3574
3575 ret = wl1271_acx_frag_threshold(wl, value);
3576 if (ret < 0)
3577 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3578
3579 wl1271_ps_elp_sleep(wl);
3580
3581 out:
3582 mutex_unlock(&wl->mutex);
3583
3584 return ret;
3585 }
3586
3587 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3588 {
3589 struct wl1271 *wl = hw->priv;
3590 struct wl12xx_vif *wlvif;
3591 int ret = 0;
3592
3593 mutex_lock(&wl->mutex);
3594
3595 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3596 ret = -EAGAIN;
3597 goto out;
3598 }
3599
3600 ret = wl1271_ps_elp_wakeup(wl);
3601 if (ret < 0)
3602 goto out;
3603
3604 wl12xx_for_each_wlvif(wl, wlvif) {
3605 ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3606 if (ret < 0)
3607 wl1271_warning("set rts threshold failed: %d", ret);
3608 }
3609 wl1271_ps_elp_sleep(wl);
3610
3611 out:
3612 mutex_unlock(&wl->mutex);
3613
3614 return ret;
3615 }
3616
3617 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3618 {
3619 int len;
3620 const u8 *next, *end = skb->data + skb->len;
3621 u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3622 skb->len - ieoffset);
3623 if (!ie)
3624 return;
3625 len = ie[1] + 2;
3626 next = ie + len;
3627 memmove(ie, next, end - next);
3628 skb_trim(skb, skb->len - len);
3629 }
3630
3631 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3632 unsigned int oui, u8 oui_type,
3633 int ieoffset)
3634 {
3635 int len;
3636 const u8 *next, *end = skb->data + skb->len;
3637 u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3638 skb->data + ieoffset,
3639 skb->len - ieoffset);
3640 if (!ie)
3641 return;
3642 len = ie[1] + 2;
3643 next = ie + len;
3644 memmove(ie, next, end - next);
3645 skb_trim(skb, skb->len - len);
3646 }
3647
3648 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3649 struct ieee80211_vif *vif)
3650 {
3651 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3652 struct sk_buff *skb;
3653 int ret;
3654
3655 skb = ieee80211_proberesp_get(wl->hw, vif);
3656 if (!skb)
3657 return -EOPNOTSUPP;
3658
3659 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3660 CMD_TEMPL_AP_PROBE_RESPONSE,
3661 skb->data,
3662 skb->len, 0,
3663 rates);
3664 dev_kfree_skb(skb);
3665
3666 if (ret < 0)
3667 goto out;
3668
3669 wl1271_debug(DEBUG_AP, "probe response updated");
3670 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3671
3672 out:
3673 return ret;
3674 }
3675
3676 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3677 struct ieee80211_vif *vif,
3678 u8 *probe_rsp_data,
3679 size_t probe_rsp_len,
3680 u32 rates)
3681 {
3682 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3683 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3684 u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3685 int ssid_ie_offset, ie_offset, templ_len;
3686 const u8 *ptr;
3687
3688 /* no need to change probe response if the SSID is set correctly */
3689 if (wlvif->ssid_len > 0)
3690 return wl1271_cmd_template_set(wl, wlvif->role_id,
3691 CMD_TEMPL_AP_PROBE_RESPONSE,
3692 probe_rsp_data,
3693 probe_rsp_len, 0,
3694 rates);
3695
3696 if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3697 wl1271_error("probe_rsp template too big");
3698 return -EINVAL;
3699 }
3700
3701 /* start searching from IE offset */
3702 ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
3703
3704 ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
3705 probe_rsp_len - ie_offset);
3706 if (!ptr) {
3707 wl1271_error("No SSID in beacon!");
3708 return -EINVAL;
3709 }
3710
3711 ssid_ie_offset = ptr - probe_rsp_data;
3712 ptr += (ptr[1] + 2);
3713
3714 memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
3715
3716 /* insert SSID from bss_conf */
3717 probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
3718 probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
3719 memcpy(probe_rsp_templ + ssid_ie_offset + 2,
3720 bss_conf->ssid, bss_conf->ssid_len);
3721 templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
3722
3723 memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
3724 ptr, probe_rsp_len - (ptr - probe_rsp_data));
3725 templ_len += probe_rsp_len - (ptr - probe_rsp_data);
3726
3727 return wl1271_cmd_template_set(wl, wlvif->role_id,
3728 CMD_TEMPL_AP_PROBE_RESPONSE,
3729 probe_rsp_templ,
3730 templ_len, 0,
3731 rates);
3732 }
3733
3734 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
3735 struct ieee80211_vif *vif,
3736 struct ieee80211_bss_conf *bss_conf,
3737 u32 changed)
3738 {
3739 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3740 int ret = 0;
3741
3742 if (changed & BSS_CHANGED_ERP_SLOT) {
3743 if (bss_conf->use_short_slot)
3744 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
3745 else
3746 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
3747 if (ret < 0) {
3748 wl1271_warning("Set slot time failed %d", ret);
3749 goto out;
3750 }
3751 }
3752
3753 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
3754 if (bss_conf->use_short_preamble)
3755 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
3756 else
3757 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
3758 }
3759
3760 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
3761 if (bss_conf->use_cts_prot)
3762 ret = wl1271_acx_cts_protect(wl, wlvif,
3763 CTSPROTECT_ENABLE);
3764 else
3765 ret = wl1271_acx_cts_protect(wl, wlvif,
3766 CTSPROTECT_DISABLE);
3767 if (ret < 0) {
3768 wl1271_warning("Set ctsprotect failed %d", ret);
3769 goto out;
3770 }
3771 }
3772
3773 out:
3774 return ret;
3775 }
3776
3777 static int wlcore_set_beacon_template(struct wl1271 *wl,
3778 struct ieee80211_vif *vif,
3779 bool is_ap)
3780 {
3781 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3782 struct ieee80211_hdr *hdr;
3783 u32 min_rate;
3784 int ret;
3785 int ieoffset = offsetof(struct ieee80211_mgmt,
3786 u.beacon.variable);
3787 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
3788 u16 tmpl_id;
3789
3790 if (!beacon) {
3791 ret = -EINVAL;
3792 goto out;
3793 }
3794
3795 wl1271_debug(DEBUG_MASTER, "beacon updated");
3796
3797 ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
3798 if (ret < 0) {
3799 dev_kfree_skb(beacon);
3800 goto out;
3801 }
3802 min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3803 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
3804 CMD_TEMPL_BEACON;
3805 ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
3806 beacon->data,
3807 beacon->len, 0,
3808 min_rate);
3809 if (ret < 0) {
3810 dev_kfree_skb(beacon);
3811 goto out;
3812 }
3813
3814 wlvif->wmm_enabled =
3815 cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
3816 WLAN_OUI_TYPE_MICROSOFT_WMM,
3817 beacon->data + ieoffset,
3818 beacon->len - ieoffset);
3819
3820 /*
3821 * In case we already have a probe-resp beacon set explicitly
3822 * by usermode, don't use the beacon data.
3823 */
3824 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
3825 goto end_bcn;
3826
3827 /* remove TIM ie from probe response */
3828 wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
3829
3830 /*
3831 * remove p2p ie from probe response.
3832 * the fw reponds to probe requests that don't include
3833 * the p2p ie. probe requests with p2p ie will be passed,
3834 * and will be responded by the supplicant (the spec
3835 * forbids including the p2p ie when responding to probe
3836 * requests that didn't include it).
3837 */
3838 wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
3839 WLAN_OUI_TYPE_WFA_P2P, ieoffset);
3840
3841 hdr = (struct ieee80211_hdr *) beacon->data;
3842 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
3843 IEEE80211_STYPE_PROBE_RESP);
3844 if (is_ap)
3845 ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
3846 beacon->data,
3847 beacon->len,
3848 min_rate);
3849 else
3850 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3851 CMD_TEMPL_PROBE_RESPONSE,
3852 beacon->data,
3853 beacon->len, 0,
3854 min_rate);
3855 end_bcn:
3856 dev_kfree_skb(beacon);
3857 if (ret < 0)
3858 goto out;
3859
3860 out:
3861 return ret;
3862 }
3863
3864 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
3865 struct ieee80211_vif *vif,
3866 struct ieee80211_bss_conf *bss_conf,
3867 u32 changed)
3868 {
3869 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3870 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3871 int ret = 0;
3872
3873 if (changed & BSS_CHANGED_BEACON_INT) {
3874 wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
3875 bss_conf->beacon_int);
3876
3877 wlvif->beacon_int = bss_conf->beacon_int;
3878 }
3879
3880 if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
3881 u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3882
3883 wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
3884 }
3885
3886 if (changed & BSS_CHANGED_BEACON) {
3887 ret = wlcore_set_beacon_template(wl, vif, is_ap);
3888 if (ret < 0)
3889 goto out;
3890 }
3891
3892 out:
3893 if (ret != 0)
3894 wl1271_error("beacon info change failed: %d", ret);
3895 return ret;
3896 }
3897
3898 /* AP mode changes */
3899 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
3900 struct ieee80211_vif *vif,
3901 struct ieee80211_bss_conf *bss_conf,
3902 u32 changed)
3903 {
3904 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3905 int ret = 0;
3906
3907 if (changed & BSS_CHANGED_BASIC_RATES) {
3908 u32 rates = bss_conf->basic_rates;
3909
3910 wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
3911 wlvif->band);
3912 wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
3913 wlvif->basic_rate_set);
3914
3915 ret = wl1271_init_ap_rates(wl, wlvif);
3916 if (ret < 0) {
3917 wl1271_error("AP rate policy change failed %d", ret);
3918 goto out;
3919 }
3920
3921 ret = wl1271_ap_init_templates(wl, vif);
3922 if (ret < 0)
3923 goto out;
3924
3925 ret = wl1271_ap_set_probe_resp_tmpl(wl, wlvif->basic_rate, vif);
3926 if (ret < 0)
3927 goto out;
3928
3929 ret = wlcore_set_beacon_template(wl, vif, true);
3930 if (ret < 0)
3931 goto out;
3932 }
3933
3934 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
3935 if (ret < 0)
3936 goto out;
3937
3938 if (changed & BSS_CHANGED_BEACON_ENABLED) {
3939 if (bss_conf->enable_beacon) {
3940 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3941 ret = wl12xx_cmd_role_start_ap(wl, wlvif);
3942 if (ret < 0)
3943 goto out;
3944
3945 ret = wl1271_ap_init_hwenc(wl, wlvif);
3946 if (ret < 0)
3947 goto out;
3948
3949 set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
3950 wl1271_debug(DEBUG_AP, "started AP");
3951 }
3952 } else {
3953 if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3954 ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
3955 if (ret < 0)
3956 goto out;
3957
3958 clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
3959 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
3960 &wlvif->flags);
3961 wl1271_debug(DEBUG_AP, "stopped AP");
3962 }
3963 }
3964 }
3965
3966 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
3967 if (ret < 0)
3968 goto out;
3969
3970 /* Handle HT information change */
3971 if ((changed & BSS_CHANGED_HT) &&
3972 (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
3973 ret = wl1271_acx_set_ht_information(wl, wlvif,
3974 bss_conf->ht_operation_mode);
3975 if (ret < 0) {
3976 wl1271_warning("Set ht information failed %d", ret);
3977 goto out;
3978 }
3979 }
3980
3981 out:
3982 return;
3983 }
3984
3985 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3986 struct ieee80211_bss_conf *bss_conf,
3987 u32 sta_rate_set)
3988 {
3989 u32 rates;
3990 int ret;
3991
3992 wl1271_debug(DEBUG_MAC80211,
3993 "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
3994 bss_conf->bssid, bss_conf->aid,
3995 bss_conf->beacon_int,
3996 bss_conf->basic_rates, sta_rate_set);
3997
3998 wlvif->beacon_int = bss_conf->beacon_int;
3999 rates = bss_conf->basic_rates;
4000 wlvif->basic_rate_set =
4001 wl1271_tx_enabled_rates_get(wl, rates,
4002 wlvif->band);
4003 wlvif->basic_rate =
4004 wl1271_tx_min_rate_get(wl,
4005 wlvif->basic_rate_set);
4006
4007 if (sta_rate_set)
4008 wlvif->rate_set =
4009 wl1271_tx_enabled_rates_get(wl,
4010 sta_rate_set,
4011 wlvif->band);
4012
4013 /* we only support sched_scan while not connected */
4014 if (wl->sched_vif == wlvif)
4015 wl->ops->sched_scan_stop(wl, wlvif);
4016
4017 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4018 if (ret < 0)
4019 return ret;
4020
4021 ret = wl12xx_cmd_build_null_data(wl, wlvif);
4022 if (ret < 0)
4023 return ret;
4024
4025 ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4026 if (ret < 0)
4027 return ret;
4028
4029 wlcore_set_ssid(wl, wlvif);
4030
4031 set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4032
4033 return 0;
4034 }
4035
4036 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4037 {
4038 int ret;
4039
4040 /* revert back to minimum rates for the current band */
4041 wl1271_set_band_rate(wl, wlvif);
4042 wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4043
4044 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4045 if (ret < 0)
4046 return ret;
4047
4048 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4049 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4050 ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4051 if (ret < 0)
4052 return ret;
4053 }
4054
4055 clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4056 return 0;
4057 }
4058 /* STA/IBSS mode changes */
4059 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4060 struct ieee80211_vif *vif,
4061 struct ieee80211_bss_conf *bss_conf,
4062 u32 changed)
4063 {
4064 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4065 bool do_join = false;
4066 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4067 bool ibss_joined = false;
4068 u32 sta_rate_set = 0;
4069 int ret;
4070 struct ieee80211_sta *sta;
4071 bool sta_exists = false;
4072 struct ieee80211_sta_ht_cap sta_ht_cap;
4073
4074 if (is_ibss) {
4075 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4076 changed);
4077 if (ret < 0)
4078 goto out;
4079 }
4080
4081 if (changed & BSS_CHANGED_IBSS) {
4082 if (bss_conf->ibss_joined) {
4083 set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4084 ibss_joined = true;
4085 } else {
4086 wlcore_unset_assoc(wl, wlvif);
4087 wl12xx_cmd_role_stop_sta(wl, wlvif);
4088 }
4089 }
4090
4091 if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4092 do_join = true;
4093
4094 /* Need to update the SSID (for filtering etc) */
4095 if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4096 do_join = true;
4097
4098 if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4099 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4100 bss_conf->enable_beacon ? "enabled" : "disabled");
4101
4102 do_join = true;
4103 }
4104
4105 if (changed & BSS_CHANGED_CQM) {
4106 bool enable = false;
4107 if (bss_conf->cqm_rssi_thold)
4108 enable = true;
4109 ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4110 bss_conf->cqm_rssi_thold,
4111 bss_conf->cqm_rssi_hyst);
4112 if (ret < 0)
4113 goto out;
4114 wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4115 }
4116
4117 if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4118 BSS_CHANGED_ASSOC)) {
4119 rcu_read_lock();
4120 sta = ieee80211_find_sta(vif, bss_conf->bssid);
4121 if (sta) {
4122 u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4123
4124 /* save the supp_rates of the ap */
4125 sta_rate_set = sta->supp_rates[wlvif->band];
4126 if (sta->ht_cap.ht_supported)
4127 sta_rate_set |=
4128 (rx_mask[0] << HW_HT_RATES_OFFSET) |
4129 (rx_mask[1] << HW_MIMO_RATES_OFFSET);
4130 sta_ht_cap = sta->ht_cap;
4131 sta_exists = true;
4132 }
4133
4134 rcu_read_unlock();
4135 }
4136
4137 if (changed & BSS_CHANGED_BSSID) {
4138 if (!is_zero_ether_addr(bss_conf->bssid)) {
4139 ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4140 sta_rate_set);
4141 if (ret < 0)
4142 goto out;
4143
4144 /* Need to update the BSSID (for filtering etc) */
4145 do_join = true;
4146 } else {
4147 ret = wlcore_clear_bssid(wl, wlvif);
4148 if (ret < 0)
4149 goto out;
4150 }
4151 }
4152
4153 if (changed & BSS_CHANGED_IBSS) {
4154 wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4155 bss_conf->ibss_joined);
4156
4157 if (bss_conf->ibss_joined) {
4158 u32 rates = bss_conf->basic_rates;
4159 wlvif->basic_rate_set =
4160 wl1271_tx_enabled_rates_get(wl, rates,
4161 wlvif->band);
4162 wlvif->basic_rate =
4163 wl1271_tx_min_rate_get(wl,
4164 wlvif->basic_rate_set);
4165
4166 /* by default, use 11b + OFDM rates */
4167 wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4168 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4169 if (ret < 0)
4170 goto out;
4171 }
4172 }
4173
4174 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4175 if (ret < 0)
4176 goto out;
4177
4178 if (do_join) {
4179 ret = wlcore_join(wl, wlvif);
4180 if (ret < 0) {
4181 wl1271_warning("cmd join failed %d", ret);
4182 goto out;
4183 }
4184 }
4185
4186 if (changed & BSS_CHANGED_ASSOC) {
4187 if (bss_conf->assoc) {
4188 ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4189 sta_rate_set);
4190 if (ret < 0)
4191 goto out;
4192
4193 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4194 wl12xx_set_authorized(wl, wlvif);
4195 } else {
4196 wlcore_unset_assoc(wl, wlvif);
4197 }
4198 }
4199
4200 if (changed & BSS_CHANGED_PS) {
4201 if ((bss_conf->ps) &&
4202 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4203 !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4204 int ps_mode;
4205 char *ps_mode_str;
4206
4207 if (wl->conf.conn.forced_ps) {
4208 ps_mode = STATION_POWER_SAVE_MODE;
4209 ps_mode_str = "forced";
4210 } else {
4211 ps_mode = STATION_AUTO_PS_MODE;
4212 ps_mode_str = "auto";
4213 }
4214
4215 wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4216
4217 ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4218 if (ret < 0)
4219 wl1271_warning("enter %s ps failed %d",
4220 ps_mode_str, ret);
4221 } else if (!bss_conf->ps &&
4222 test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4223 wl1271_debug(DEBUG_PSM, "auto ps disabled");
4224
4225 ret = wl1271_ps_set_mode(wl, wlvif,
4226 STATION_ACTIVE_MODE);
4227 if (ret < 0)
4228 wl1271_warning("exit auto ps failed %d", ret);
4229 }
4230 }
4231
4232 /* Handle new association with HT. Do this after join. */
4233 if (sta_exists &&
4234 (changed & BSS_CHANGED_HT)) {
4235 bool enabled =
4236 bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4237
4238 ret = wlcore_hw_set_peer_cap(wl,
4239 &sta_ht_cap,
4240 enabled,
4241 wlvif->rate_set,
4242 wlvif->sta.hlid);
4243 if (ret < 0) {
4244 wl1271_warning("Set ht cap failed %d", ret);
4245 goto out;
4246
4247 }
4248
4249 if (enabled) {
4250 ret = wl1271_acx_set_ht_information(wl, wlvif,
4251 bss_conf->ht_operation_mode);
4252 if (ret < 0) {
4253 wl1271_warning("Set ht information failed %d",
4254 ret);
4255 goto out;
4256 }
4257 }
4258 }
4259
4260 /* Handle arp filtering. Done after join. */
4261 if ((changed & BSS_CHANGED_ARP_FILTER) ||
4262 (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4263 __be32 addr = bss_conf->arp_addr_list[0];
4264 wlvif->sta.qos = bss_conf->qos;
4265 WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4266
4267 if (bss_conf->arp_addr_cnt == 1 && bss_conf->assoc) {
4268 wlvif->ip_addr = addr;
4269 /*
4270 * The template should have been configured only upon
4271 * association. however, it seems that the correct ip
4272 * isn't being set (when sending), so we have to
4273 * reconfigure the template upon every ip change.
4274 */
4275 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4276 if (ret < 0) {
4277 wl1271_warning("build arp rsp failed: %d", ret);
4278 goto out;
4279 }
4280
4281 ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4282 (ACX_ARP_FILTER_ARP_FILTERING |
4283 ACX_ARP_FILTER_AUTO_ARP),
4284 addr);
4285 } else {
4286 wlvif->ip_addr = 0;
4287 ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4288 }
4289
4290 if (ret < 0)
4291 goto out;
4292 }
4293
4294 out:
4295 return;
4296 }
4297
4298 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4299 struct ieee80211_vif *vif,
4300 struct ieee80211_bss_conf *bss_conf,
4301 u32 changed)
4302 {
4303 struct wl1271 *wl = hw->priv;
4304 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4305 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4306 int ret;
4307
4308 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4309 wlvif->role_id, (int)changed);
4310
4311 /*
4312 * make sure to cancel pending disconnections if our association
4313 * state changed
4314 */
4315 if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4316 cancel_delayed_work_sync(&wlvif->connection_loss_work);
4317
4318 if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4319 !bss_conf->enable_beacon)
4320 wl1271_tx_flush(wl);
4321
4322 mutex_lock(&wl->mutex);
4323
4324 if (unlikely(wl->state != WLCORE_STATE_ON))
4325 goto out;
4326
4327 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4328 goto out;
4329
4330 ret = wl1271_ps_elp_wakeup(wl);
4331 if (ret < 0)
4332 goto out;
4333
4334 if (is_ap)
4335 wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4336 else
4337 wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4338
4339 wl1271_ps_elp_sleep(wl);
4340
4341 out:
4342 mutex_unlock(&wl->mutex);
4343 }
4344
4345 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4346 struct ieee80211_chanctx_conf *ctx)
4347 {
4348 wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4349 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4350 cfg80211_get_chandef_type(&ctx->def));
4351 return 0;
4352 }
4353
4354 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4355 struct ieee80211_chanctx_conf *ctx)
4356 {
4357 wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4358 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4359 cfg80211_get_chandef_type(&ctx->def));
4360 }
4361
4362 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4363 struct ieee80211_chanctx_conf *ctx,
4364 u32 changed)
4365 {
4366 wl1271_debug(DEBUG_MAC80211,
4367 "mac80211 change chanctx %d (type %d) changed 0x%x",
4368 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4369 cfg80211_get_chandef_type(&ctx->def), changed);
4370 }
4371
4372 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4373 struct ieee80211_vif *vif,
4374 struct ieee80211_chanctx_conf *ctx)
4375 {
4376 struct wl1271 *wl = hw->priv;
4377 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4378 int channel = ieee80211_frequency_to_channel(
4379 ctx->def.chan->center_freq);
4380
4381 wl1271_debug(DEBUG_MAC80211,
4382 "mac80211 assign chanctx (role %d) %d (type %d)",
4383 wlvif->role_id, channel, cfg80211_get_chandef_type(&ctx->def));
4384
4385 mutex_lock(&wl->mutex);
4386
4387 wlvif->band = ctx->def.chan->band;
4388 wlvif->channel = channel;
4389 wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4390
4391 /* update default rates according to the band */
4392 wl1271_set_band_rate(wl, wlvif);
4393
4394 mutex_unlock(&wl->mutex);
4395
4396 return 0;
4397 }
4398
4399 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4400 struct ieee80211_vif *vif,
4401 struct ieee80211_chanctx_conf *ctx)
4402 {
4403 struct wl1271 *wl = hw->priv;
4404 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4405
4406 wl1271_debug(DEBUG_MAC80211,
4407 "mac80211 unassign chanctx (role %d) %d (type %d)",
4408 wlvif->role_id,
4409 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4410 cfg80211_get_chandef_type(&ctx->def));
4411
4412 wl1271_tx_flush(wl);
4413 }
4414
4415 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4416 struct ieee80211_vif *vif, u16 queue,
4417 const struct ieee80211_tx_queue_params *params)
4418 {
4419 struct wl1271 *wl = hw->priv;
4420 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4421 u8 ps_scheme;
4422 int ret = 0;
4423
4424 mutex_lock(&wl->mutex);
4425
4426 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4427
4428 if (params->uapsd)
4429 ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4430 else
4431 ps_scheme = CONF_PS_SCHEME_LEGACY;
4432
4433 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4434 goto out;
4435
4436 ret = wl1271_ps_elp_wakeup(wl);
4437 if (ret < 0)
4438 goto out;
4439
4440 /*
4441 * the txop is confed in units of 32us by the mac80211,
4442 * we need us
4443 */
4444 ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4445 params->cw_min, params->cw_max,
4446 params->aifs, params->txop << 5);
4447 if (ret < 0)
4448 goto out_sleep;
4449
4450 ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4451 CONF_CHANNEL_TYPE_EDCF,
4452 wl1271_tx_get_queue(queue),
4453 ps_scheme, CONF_ACK_POLICY_LEGACY,
4454 0, 0);
4455
4456 out_sleep:
4457 wl1271_ps_elp_sleep(wl);
4458
4459 out:
4460 mutex_unlock(&wl->mutex);
4461
4462 return ret;
4463 }
4464
4465 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4466 struct ieee80211_vif *vif)
4467 {
4468
4469 struct wl1271 *wl = hw->priv;
4470 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4471 u64 mactime = ULLONG_MAX;
4472 int ret;
4473
4474 wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4475
4476 mutex_lock(&wl->mutex);
4477
4478 if (unlikely(wl->state != WLCORE_STATE_ON))
4479 goto out;
4480
4481 ret = wl1271_ps_elp_wakeup(wl);
4482 if (ret < 0)
4483 goto out;
4484
4485 ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4486 if (ret < 0)
4487 goto out_sleep;
4488
4489 out_sleep:
4490 wl1271_ps_elp_sleep(wl);
4491
4492 out:
4493 mutex_unlock(&wl->mutex);
4494 return mactime;
4495 }
4496
4497 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
4498 struct survey_info *survey)
4499 {
4500 struct ieee80211_conf *conf = &hw->conf;
4501
4502 if (idx != 0)
4503 return -ENOENT;
4504
4505 survey->channel = conf->channel;
4506 survey->filled = 0;
4507 return 0;
4508 }
4509
4510 static int wl1271_allocate_sta(struct wl1271 *wl,
4511 struct wl12xx_vif *wlvif,
4512 struct ieee80211_sta *sta)
4513 {
4514 struct wl1271_station *wl_sta;
4515 int ret;
4516
4517
4518 if (wl->active_sta_count >= AP_MAX_STATIONS) {
4519 wl1271_warning("could not allocate HLID - too much stations");
4520 return -EBUSY;
4521 }
4522
4523 wl_sta = (struct wl1271_station *)sta->drv_priv;
4524 ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
4525 if (ret < 0) {
4526 wl1271_warning("could not allocate HLID - too many links");
4527 return -EBUSY;
4528 }
4529
4530 /* use the previous security seq, if this is a recovery/resume */
4531 wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
4532
4533 set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
4534 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
4535 wl->active_sta_count++;
4536 return 0;
4537 }
4538
4539 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
4540 {
4541 struct wl1271_station *wl_sta;
4542 struct ieee80211_sta *sta;
4543 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4544
4545 if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
4546 return;
4547
4548 clear_bit(hlid, wlvif->ap.sta_hlid_map);
4549 __clear_bit(hlid, &wl->ap_ps_map);
4550 __clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
4551
4552 /*
4553 * save the last used PN in the private part of iee80211_sta,
4554 * in case of recovery/suspend
4555 */
4556 rcu_read_lock();
4557 sta = ieee80211_find_sta(vif, wl->links[hlid].addr);
4558 if (sta) {
4559 wl_sta = (void *)sta->drv_priv;
4560 wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
4561
4562 /*
4563 * increment the initial seq number on recovery to account for
4564 * transmitted packets that we haven't yet got in the FW status
4565 */
4566 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
4567 wl_sta->total_freed_pkts +=
4568 WL1271_TX_SQN_POST_RECOVERY_PADDING;
4569 }
4570 rcu_read_unlock();
4571
4572 wl12xx_free_link(wl, wlvif, &hlid);
4573 wl->active_sta_count--;
4574
4575 /*
4576 * rearm the tx watchdog when the last STA is freed - give the FW a
4577 * chance to return STA-buffered packets before complaining.
4578 */
4579 if (wl->active_sta_count == 0)
4580 wl12xx_rearm_tx_watchdog_locked(wl);
4581 }
4582
4583 static int wl12xx_sta_add(struct wl1271 *wl,
4584 struct wl12xx_vif *wlvif,
4585 struct ieee80211_sta *sta)
4586 {
4587 struct wl1271_station *wl_sta;
4588 int ret = 0;
4589 u8 hlid;
4590
4591 wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
4592
4593 ret = wl1271_allocate_sta(wl, wlvif, sta);
4594 if (ret < 0)
4595 return ret;
4596
4597 wl_sta = (struct wl1271_station *)sta->drv_priv;
4598 hlid = wl_sta->hlid;
4599
4600 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
4601 if (ret < 0)
4602 wl1271_free_sta(wl, wlvif, hlid);
4603
4604 return ret;
4605 }
4606
4607 static int wl12xx_sta_remove(struct wl1271 *wl,
4608 struct wl12xx_vif *wlvif,
4609 struct ieee80211_sta *sta)
4610 {
4611 struct wl1271_station *wl_sta;
4612 int ret = 0, id;
4613
4614 wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
4615
4616 wl_sta = (struct wl1271_station *)sta->drv_priv;
4617 id = wl_sta->hlid;
4618 if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
4619 return -EINVAL;
4620
4621 ret = wl12xx_cmd_remove_peer(wl, wl_sta->hlid);
4622 if (ret < 0)
4623 return ret;
4624
4625 wl1271_free_sta(wl, wlvif, wl_sta->hlid);
4626 return ret;
4627 }
4628
4629 static void wlcore_roc_if_possible(struct wl1271 *wl,
4630 struct wl12xx_vif *wlvif)
4631 {
4632 if (find_first_bit(wl->roc_map,
4633 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
4634 return;
4635
4636 if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
4637 return;
4638
4639 wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
4640 }
4641
4642 static void wlcore_update_inconn_sta(struct wl1271 *wl,
4643 struct wl12xx_vif *wlvif,
4644 struct wl1271_station *wl_sta,
4645 bool in_connection)
4646 {
4647 if (in_connection) {
4648 if (WARN_ON(wl_sta->in_connection))
4649 return;
4650 wl_sta->in_connection = true;
4651 if (!wlvif->inconn_count++)
4652 wlcore_roc_if_possible(wl, wlvif);
4653 } else {
4654 if (!wl_sta->in_connection)
4655 return;
4656
4657 wl_sta->in_connection = false;
4658 wlvif->inconn_count--;
4659 if (WARN_ON(wlvif->inconn_count < 0))
4660 return;
4661
4662 if (!wlvif->inconn_count)
4663 if (test_bit(wlvif->role_id, wl->roc_map))
4664 wl12xx_croc(wl, wlvif->role_id);
4665 }
4666 }
4667
4668 static int wl12xx_update_sta_state(struct wl1271 *wl,
4669 struct wl12xx_vif *wlvif,
4670 struct ieee80211_sta *sta,
4671 enum ieee80211_sta_state old_state,
4672 enum ieee80211_sta_state new_state)
4673 {
4674 struct wl1271_station *wl_sta;
4675 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
4676 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
4677 int ret;
4678
4679 wl_sta = (struct wl1271_station *)sta->drv_priv;
4680
4681 /* Add station (AP mode) */
4682 if (is_ap &&
4683 old_state == IEEE80211_STA_NOTEXIST &&
4684 new_state == IEEE80211_STA_NONE) {
4685 ret = wl12xx_sta_add(wl, wlvif, sta);
4686 if (ret)
4687 return ret;
4688
4689 wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
4690 }
4691
4692 /* Remove station (AP mode) */
4693 if (is_ap &&
4694 old_state == IEEE80211_STA_NONE &&
4695 new_state == IEEE80211_STA_NOTEXIST) {
4696 /* must not fail */
4697 wl12xx_sta_remove(wl, wlvif, sta);
4698
4699 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
4700 }
4701
4702 /* Authorize station (AP mode) */
4703 if (is_ap &&
4704 new_state == IEEE80211_STA_AUTHORIZED) {
4705 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
4706 if (ret < 0)
4707 return ret;
4708
4709 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
4710 wl_sta->hlid);
4711 if (ret)
4712 return ret;
4713
4714 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
4715 }
4716
4717 /* Authorize station */
4718 if (is_sta &&
4719 new_state == IEEE80211_STA_AUTHORIZED) {
4720 set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4721 ret = wl12xx_set_authorized(wl, wlvif);
4722 if (ret)
4723 return ret;
4724 }
4725
4726 if (is_sta &&
4727 old_state == IEEE80211_STA_AUTHORIZED &&
4728 new_state == IEEE80211_STA_ASSOC) {
4729 clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4730 clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
4731 }
4732
4733 /* clear ROCs on failure or authorization */
4734 if (is_sta &&
4735 (new_state == IEEE80211_STA_AUTHORIZED ||
4736 new_state == IEEE80211_STA_NOTEXIST)) {
4737 if (test_bit(wlvif->role_id, wl->roc_map))
4738 wl12xx_croc(wl, wlvif->role_id);
4739 }
4740
4741 if (is_sta &&
4742 old_state == IEEE80211_STA_NOTEXIST &&
4743 new_state == IEEE80211_STA_NONE) {
4744 if (find_first_bit(wl->roc_map,
4745 WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
4746 WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
4747 wl12xx_roc(wl, wlvif, wlvif->role_id,
4748 wlvif->band, wlvif->channel);
4749 }
4750 }
4751 return 0;
4752 }
4753
4754 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
4755 struct ieee80211_vif *vif,
4756 struct ieee80211_sta *sta,
4757 enum ieee80211_sta_state old_state,
4758 enum ieee80211_sta_state new_state)
4759 {
4760 struct wl1271 *wl = hw->priv;
4761 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4762 int ret;
4763
4764 wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
4765 sta->aid, old_state, new_state);
4766
4767 mutex_lock(&wl->mutex);
4768
4769 if (unlikely(wl->state != WLCORE_STATE_ON)) {
4770 ret = -EBUSY;
4771 goto out;
4772 }
4773
4774 ret = wl1271_ps_elp_wakeup(wl);
4775 if (ret < 0)
4776 goto out;
4777
4778 ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
4779
4780 wl1271_ps_elp_sleep(wl);
4781 out:
4782 mutex_unlock(&wl->mutex);
4783 if (new_state < old_state)
4784 return 0;
4785 return ret;
4786 }
4787
4788 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
4789 struct ieee80211_vif *vif,
4790 enum ieee80211_ampdu_mlme_action action,
4791 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
4792 u8 buf_size)
4793 {
4794 struct wl1271 *wl = hw->priv;
4795 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4796 int ret;
4797 u8 hlid, *ba_bitmap;
4798
4799 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
4800 tid);
4801
4802 /* sanity check - the fields in FW are only 8bits wide */
4803 if (WARN_ON(tid > 0xFF))
4804 return -ENOTSUPP;
4805
4806 mutex_lock(&wl->mutex);
4807
4808 if (unlikely(wl->state != WLCORE_STATE_ON)) {
4809 ret = -EAGAIN;
4810 goto out;
4811 }
4812
4813 if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
4814 hlid = wlvif->sta.hlid;
4815 } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
4816 struct wl1271_station *wl_sta;
4817
4818 wl_sta = (struct wl1271_station *)sta->drv_priv;
4819 hlid = wl_sta->hlid;
4820 } else {
4821 ret = -EINVAL;
4822 goto out;
4823 }
4824
4825 ba_bitmap = &wl->links[hlid].ba_bitmap;
4826
4827 ret = wl1271_ps_elp_wakeup(wl);
4828 if (ret < 0)
4829 goto out;
4830
4831 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
4832 tid, action);
4833
4834 switch (action) {
4835 case IEEE80211_AMPDU_RX_START:
4836 if (!wlvif->ba_support || !wlvif->ba_allowed) {
4837 ret = -ENOTSUPP;
4838 break;
4839 }
4840
4841 if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
4842 ret = -EBUSY;
4843 wl1271_error("exceeded max RX BA sessions");
4844 break;
4845 }
4846
4847 if (*ba_bitmap & BIT(tid)) {
4848 ret = -EINVAL;
4849 wl1271_error("cannot enable RX BA session on active "
4850 "tid: %d", tid);
4851 break;
4852 }
4853
4854 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
4855 hlid);
4856 if (!ret) {
4857 *ba_bitmap |= BIT(tid);
4858 wl->ba_rx_session_count++;
4859 }
4860 break;
4861
4862 case IEEE80211_AMPDU_RX_STOP:
4863 if (!(*ba_bitmap & BIT(tid))) {
4864 /*
4865 * this happens on reconfig - so only output a debug
4866 * message for now, and don't fail the function.
4867 */
4868 wl1271_debug(DEBUG_MAC80211,
4869 "no active RX BA session on tid: %d",
4870 tid);
4871 ret = 0;
4872 break;
4873 }
4874
4875 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
4876 hlid);
4877 if (!ret) {
4878 *ba_bitmap &= ~BIT(tid);
4879 wl->ba_rx_session_count--;
4880 }
4881 break;
4882
4883 /*
4884 * The BA initiator session management in FW independently.
4885 * Falling break here on purpose for all TX APDU commands.
4886 */
4887 case IEEE80211_AMPDU_TX_START:
4888 case IEEE80211_AMPDU_TX_STOP_CONT:
4889 case IEEE80211_AMPDU_TX_STOP_FLUSH:
4890 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
4891 case IEEE80211_AMPDU_TX_OPERATIONAL:
4892 ret = -EINVAL;
4893 break;
4894
4895 default:
4896 wl1271_error("Incorrect ampdu action id=%x\n", action);
4897 ret = -EINVAL;
4898 }
4899
4900 wl1271_ps_elp_sleep(wl);
4901
4902 out:
4903 mutex_unlock(&wl->mutex);
4904
4905 return ret;
4906 }
4907
4908 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
4909 struct ieee80211_vif *vif,
4910 const struct cfg80211_bitrate_mask *mask)
4911 {
4912 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4913 struct wl1271 *wl = hw->priv;
4914 int i, ret = 0;
4915
4916 wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
4917 mask->control[NL80211_BAND_2GHZ].legacy,
4918 mask->control[NL80211_BAND_5GHZ].legacy);
4919
4920 mutex_lock(&wl->mutex);
4921
4922 for (i = 0; i < WLCORE_NUM_BANDS; i++)
4923 wlvif->bitrate_masks[i] =
4924 wl1271_tx_enabled_rates_get(wl,
4925 mask->control[i].legacy,
4926 i);
4927
4928 if (unlikely(wl->state != WLCORE_STATE_ON))
4929 goto out;
4930
4931 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4932 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
4933
4934 ret = wl1271_ps_elp_wakeup(wl);
4935 if (ret < 0)
4936 goto out;
4937
4938 wl1271_set_band_rate(wl, wlvif);
4939 wlvif->basic_rate =
4940 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4941 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4942
4943 wl1271_ps_elp_sleep(wl);
4944 }
4945 out:
4946 mutex_unlock(&wl->mutex);
4947
4948 return ret;
4949 }
4950
4951 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
4952 struct ieee80211_channel_switch *ch_switch)
4953 {
4954 struct wl1271 *wl = hw->priv;
4955 struct wl12xx_vif *wlvif;
4956 int ret;
4957
4958 wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
4959
4960 wl1271_tx_flush(wl);
4961
4962 mutex_lock(&wl->mutex);
4963
4964 if (unlikely(wl->state == WLCORE_STATE_OFF)) {
4965 wl12xx_for_each_wlvif_sta(wl, wlvif) {
4966 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4967 ieee80211_chswitch_done(vif, false);
4968 }
4969 goto out;
4970 } else if (unlikely(wl->state != WLCORE_STATE_ON)) {
4971 goto out;
4972 }
4973
4974 ret = wl1271_ps_elp_wakeup(wl);
4975 if (ret < 0)
4976 goto out;
4977
4978 /* TODO: change mac80211 to pass vif as param */
4979 wl12xx_for_each_wlvif_sta(wl, wlvif) {
4980 unsigned long delay_usec;
4981
4982 ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
4983 if (ret)
4984 goto out_sleep;
4985
4986 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
4987
4988 /* indicate failure 5 seconds after channel switch time */
4989 delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
4990 ch_switch->count;
4991 ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
4992 usecs_to_jiffies(delay_usec) +
4993 msecs_to_jiffies(5000));
4994 }
4995
4996 out_sleep:
4997 wl1271_ps_elp_sleep(wl);
4998
4999 out:
5000 mutex_unlock(&wl->mutex);
5001 }
5002
5003 static void wlcore_op_flush(struct ieee80211_hw *hw, bool drop)
5004 {
5005 struct wl1271 *wl = hw->priv;
5006
5007 wl1271_tx_flush(wl);
5008 }
5009
5010 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
5011 struct ieee80211_vif *vif,
5012 struct ieee80211_channel *chan,
5013 int duration,
5014 enum ieee80211_roc_type type)
5015 {
5016 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5017 struct wl1271 *wl = hw->priv;
5018 int channel, ret = 0;
5019
5020 channel = ieee80211_frequency_to_channel(chan->center_freq);
5021
5022 wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
5023 channel, wlvif->role_id);
5024
5025 mutex_lock(&wl->mutex);
5026
5027 if (unlikely(wl->state != WLCORE_STATE_ON))
5028 goto out;
5029
5030 /* return EBUSY if we can't ROC right now */
5031 if (WARN_ON(wl->roc_vif ||
5032 find_first_bit(wl->roc_map,
5033 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)) {
5034 ret = -EBUSY;
5035 goto out;
5036 }
5037
5038 ret = wl1271_ps_elp_wakeup(wl);
5039 if (ret < 0)
5040 goto out;
5041
5042 ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
5043 if (ret < 0)
5044 goto out_sleep;
5045
5046 wl->roc_vif = vif;
5047 ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
5048 msecs_to_jiffies(duration));
5049 out_sleep:
5050 wl1271_ps_elp_sleep(wl);
5051 out:
5052 mutex_unlock(&wl->mutex);
5053 return ret;
5054 }
5055
5056 static int __wlcore_roc_completed(struct wl1271 *wl)
5057 {
5058 struct wl12xx_vif *wlvif;
5059 int ret;
5060
5061 /* already completed */
5062 if (unlikely(!wl->roc_vif))
5063 return 0;
5064
5065 wlvif = wl12xx_vif_to_data(wl->roc_vif);
5066
5067 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5068 return -EBUSY;
5069
5070 ret = wl12xx_stop_dev(wl, wlvif);
5071 if (ret < 0)
5072 return ret;
5073
5074 wl->roc_vif = NULL;
5075
5076 return 0;
5077 }
5078
5079 static int wlcore_roc_completed(struct wl1271 *wl)
5080 {
5081 int ret;
5082
5083 wl1271_debug(DEBUG_MAC80211, "roc complete");
5084
5085 mutex_lock(&wl->mutex);
5086
5087 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5088 ret = -EBUSY;
5089 goto out;
5090 }
5091
5092 ret = wl1271_ps_elp_wakeup(wl);
5093 if (ret < 0)
5094 goto out;
5095
5096 ret = __wlcore_roc_completed(wl);
5097
5098 wl1271_ps_elp_sleep(wl);
5099 out:
5100 mutex_unlock(&wl->mutex);
5101
5102 return ret;
5103 }
5104
5105 static void wlcore_roc_complete_work(struct work_struct *work)
5106 {
5107 struct delayed_work *dwork;
5108 struct wl1271 *wl;
5109 int ret;
5110
5111 dwork = container_of(work, struct delayed_work, work);
5112 wl = container_of(dwork, struct wl1271, roc_complete_work);
5113
5114 ret = wlcore_roc_completed(wl);
5115 if (!ret)
5116 ieee80211_remain_on_channel_expired(wl->hw);
5117 }
5118
5119 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
5120 {
5121 struct wl1271 *wl = hw->priv;
5122
5123 wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5124
5125 /* TODO: per-vif */
5126 wl1271_tx_flush(wl);
5127
5128 /*
5129 * we can't just flush_work here, because it might deadlock
5130 * (as we might get called from the same workqueue)
5131 */
5132 cancel_delayed_work_sync(&wl->roc_complete_work);
5133 wlcore_roc_completed(wl);
5134
5135 return 0;
5136 }
5137
5138 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5139 struct ieee80211_vif *vif,
5140 struct ieee80211_sta *sta,
5141 u32 changed)
5142 {
5143 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5144 struct wl1271 *wl = hw->priv;
5145
5146 wlcore_hw_sta_rc_update(wl, wlvif, sta, changed);
5147 }
5148
5149 static int wlcore_op_get_rssi(struct ieee80211_hw *hw,
5150 struct ieee80211_vif *vif,
5151 struct ieee80211_sta *sta,
5152 s8 *rssi_dbm)
5153 {
5154 struct wl1271 *wl = hw->priv;
5155 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5156 int ret = 0;
5157
5158 wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
5159
5160 mutex_lock(&wl->mutex);
5161
5162 if (unlikely(wl->state != WLCORE_STATE_ON))
5163 goto out;
5164
5165 ret = wl1271_ps_elp_wakeup(wl);
5166 if (ret < 0)
5167 goto out_sleep;
5168
5169 ret = wlcore_acx_average_rssi(wl, wlvif, rssi_dbm);
5170 if (ret < 0)
5171 goto out_sleep;
5172
5173 out_sleep:
5174 wl1271_ps_elp_sleep(wl);
5175
5176 out:
5177 mutex_unlock(&wl->mutex);
5178
5179 return ret;
5180 }
5181
5182 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5183 {
5184 struct wl1271 *wl = hw->priv;
5185 bool ret = false;
5186
5187 mutex_lock(&wl->mutex);
5188
5189 if (unlikely(wl->state != WLCORE_STATE_ON))
5190 goto out;
5191
5192 /* packets are considered pending if in the TX queue or the FW */
5193 ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5194 out:
5195 mutex_unlock(&wl->mutex);
5196
5197 return ret;
5198 }
5199
5200 /* can't be const, mac80211 writes to this */
5201 static struct ieee80211_rate wl1271_rates[] = {
5202 { .bitrate = 10,
5203 .hw_value = CONF_HW_BIT_RATE_1MBPS,
5204 .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5205 { .bitrate = 20,
5206 .hw_value = CONF_HW_BIT_RATE_2MBPS,
5207 .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5208 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5209 { .bitrate = 55,
5210 .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5211 .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5212 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5213 { .bitrate = 110,
5214 .hw_value = CONF_HW_BIT_RATE_11MBPS,
5215 .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5216 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5217 { .bitrate = 60,
5218 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5219 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5220 { .bitrate = 90,
5221 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5222 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5223 { .bitrate = 120,
5224 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5225 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5226 { .bitrate = 180,
5227 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5228 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5229 { .bitrate = 240,
5230 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5231 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5232 { .bitrate = 360,
5233 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5234 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5235 { .bitrate = 480,
5236 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5237 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5238 { .bitrate = 540,
5239 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5240 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5241 };
5242
5243 /* can't be const, mac80211 writes to this */
5244 static struct ieee80211_channel wl1271_channels[] = {
5245 { .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5246 { .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5247 { .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5248 { .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5249 { .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5250 { .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5251 { .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5252 { .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5253 { .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5254 { .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5255 { .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5256 { .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5257 { .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5258 { .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5259 };
5260
5261 /* can't be const, mac80211 writes to this */
5262 static struct ieee80211_supported_band wl1271_band_2ghz = {
5263 .channels = wl1271_channels,
5264 .n_channels = ARRAY_SIZE(wl1271_channels),
5265 .bitrates = wl1271_rates,
5266 .n_bitrates = ARRAY_SIZE(wl1271_rates),
5267 };
5268
5269 /* 5 GHz data rates for WL1273 */
5270 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5271 { .bitrate = 60,
5272 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5273 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5274 { .bitrate = 90,
5275 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5276 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5277 { .bitrate = 120,
5278 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5279 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5280 { .bitrate = 180,
5281 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5282 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5283 { .bitrate = 240,
5284 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5285 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5286 { .bitrate = 360,
5287 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5288 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5289 { .bitrate = 480,
5290 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5291 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5292 { .bitrate = 540,
5293 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5294 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5295 };
5296
5297 /* 5 GHz band channels for WL1273 */
5298 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5299 { .hw_value = 7, .center_freq = 5035, .max_power = WLCORE_MAX_TXPWR },
5300 { .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5301 { .hw_value = 9, .center_freq = 5045, .max_power = WLCORE_MAX_TXPWR },
5302 { .hw_value = 11, .center_freq = 5055, .max_power = WLCORE_MAX_TXPWR },
5303 { .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5304 { .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5305 { .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5306 { .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5307 { .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5308 { .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5309 { .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5310 { .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5311 { .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5312 { .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5313 { .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5314 { .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5315 { .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5316 { .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5317 { .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5318 { .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5319 { .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5320 { .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5321 { .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5322 { .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5323 { .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5324 { .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5325 { .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5326 { .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5327 { .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5328 { .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5329 { .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5330 { .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5331 { .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5332 { .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
5333 };
5334
5335 static struct ieee80211_supported_band wl1271_band_5ghz = {
5336 .channels = wl1271_channels_5ghz,
5337 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
5338 .bitrates = wl1271_rates_5ghz,
5339 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
5340 };
5341
5342 static const struct ieee80211_ops wl1271_ops = {
5343 .start = wl1271_op_start,
5344 .stop = wlcore_op_stop,
5345 .add_interface = wl1271_op_add_interface,
5346 .remove_interface = wl1271_op_remove_interface,
5347 .change_interface = wl12xx_op_change_interface,
5348 #ifdef CONFIG_PM
5349 .suspend = wl1271_op_suspend,
5350 .resume = wl1271_op_resume,
5351 #endif
5352 .config = wl1271_op_config,
5353 .prepare_multicast = wl1271_op_prepare_multicast,
5354 .configure_filter = wl1271_op_configure_filter,
5355 .tx = wl1271_op_tx,
5356 .set_key = wlcore_op_set_key,
5357 .hw_scan = wl1271_op_hw_scan,
5358 .cancel_hw_scan = wl1271_op_cancel_hw_scan,
5359 .sched_scan_start = wl1271_op_sched_scan_start,
5360 .sched_scan_stop = wl1271_op_sched_scan_stop,
5361 .bss_info_changed = wl1271_op_bss_info_changed,
5362 .set_frag_threshold = wl1271_op_set_frag_threshold,
5363 .set_rts_threshold = wl1271_op_set_rts_threshold,
5364 .conf_tx = wl1271_op_conf_tx,
5365 .get_tsf = wl1271_op_get_tsf,
5366 .get_survey = wl1271_op_get_survey,
5367 .sta_state = wl12xx_op_sta_state,
5368 .ampdu_action = wl1271_op_ampdu_action,
5369 .tx_frames_pending = wl1271_tx_frames_pending,
5370 .set_bitrate_mask = wl12xx_set_bitrate_mask,
5371 .channel_switch = wl12xx_op_channel_switch,
5372 .flush = wlcore_op_flush,
5373 .remain_on_channel = wlcore_op_remain_on_channel,
5374 .cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
5375 .add_chanctx = wlcore_op_add_chanctx,
5376 .remove_chanctx = wlcore_op_remove_chanctx,
5377 .change_chanctx = wlcore_op_change_chanctx,
5378 .assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
5379 .unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
5380 .sta_rc_update = wlcore_op_sta_rc_update,
5381 .get_rssi = wlcore_op_get_rssi,
5382 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
5383 };
5384
5385
5386 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band)
5387 {
5388 u8 idx;
5389
5390 BUG_ON(band >= 2);
5391
5392 if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
5393 wl1271_error("Illegal RX rate from HW: %d", rate);
5394 return 0;
5395 }
5396
5397 idx = wl->band_rate_to_idx[band][rate];
5398 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
5399 wl1271_error("Unsupported RX rate from HW: %d", rate);
5400 return 0;
5401 }
5402
5403 return idx;
5404 }
5405
5406 static ssize_t wl1271_sysfs_show_bt_coex_state(struct device *dev,
5407 struct device_attribute *attr,
5408 char *buf)
5409 {
5410 struct wl1271 *wl = dev_get_drvdata(dev);
5411 ssize_t len;
5412
5413 len = PAGE_SIZE;
5414
5415 mutex_lock(&wl->mutex);
5416 len = snprintf(buf, len, "%d\n\n0 - off\n1 - on\n",
5417 wl->sg_enabled);
5418 mutex_unlock(&wl->mutex);
5419
5420 return len;
5421
5422 }
5423
5424 static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev,
5425 struct device_attribute *attr,
5426 const char *buf, size_t count)
5427 {
5428 struct wl1271 *wl = dev_get_drvdata(dev);
5429 unsigned long res;
5430 int ret;
5431
5432 ret = kstrtoul(buf, 10, &res);
5433 if (ret < 0) {
5434 wl1271_warning("incorrect value written to bt_coex_mode");
5435 return count;
5436 }
5437
5438 mutex_lock(&wl->mutex);
5439
5440 res = !!res;
5441
5442 if (res == wl->sg_enabled)
5443 goto out;
5444
5445 wl->sg_enabled = res;
5446
5447 if (unlikely(wl->state != WLCORE_STATE_ON))
5448 goto out;
5449
5450 ret = wl1271_ps_elp_wakeup(wl);
5451 if (ret < 0)
5452 goto out;
5453
5454 wl1271_acx_sg_enable(wl, wl->sg_enabled);
5455 wl1271_ps_elp_sleep(wl);
5456
5457 out:
5458 mutex_unlock(&wl->mutex);
5459 return count;
5460 }
5461
5462 static DEVICE_ATTR(bt_coex_state, S_IRUGO | S_IWUSR,
5463 wl1271_sysfs_show_bt_coex_state,
5464 wl1271_sysfs_store_bt_coex_state);
5465
5466 static ssize_t wl1271_sysfs_show_hw_pg_ver(struct device *dev,
5467 struct device_attribute *attr,
5468 char *buf)
5469 {
5470 struct wl1271 *wl = dev_get_drvdata(dev);
5471 ssize_t len;
5472
5473 len = PAGE_SIZE;
5474
5475 mutex_lock(&wl->mutex);
5476 if (wl->hw_pg_ver >= 0)
5477 len = snprintf(buf, len, "%d\n", wl->hw_pg_ver);
5478 else
5479 len = snprintf(buf, len, "n/a\n");
5480 mutex_unlock(&wl->mutex);
5481
5482 return len;
5483 }
5484
5485 static DEVICE_ATTR(hw_pg_ver, S_IRUGO,
5486 wl1271_sysfs_show_hw_pg_ver, NULL);
5487
5488 static ssize_t wl1271_sysfs_read_fwlog(struct file *filp, struct kobject *kobj,
5489 struct bin_attribute *bin_attr,
5490 char *buffer, loff_t pos, size_t count)
5491 {
5492 struct device *dev = container_of(kobj, struct device, kobj);
5493 struct wl1271 *wl = dev_get_drvdata(dev);
5494 ssize_t len;
5495 int ret;
5496
5497 ret = mutex_lock_interruptible(&wl->mutex);
5498 if (ret < 0)
5499 return -ERESTARTSYS;
5500
5501 /* Let only one thread read the log at a time, blocking others */
5502 while (wl->fwlog_size == 0) {
5503 DEFINE_WAIT(wait);
5504
5505 prepare_to_wait_exclusive(&wl->fwlog_waitq,
5506 &wait,
5507 TASK_INTERRUPTIBLE);
5508
5509 if (wl->fwlog_size != 0) {
5510 finish_wait(&wl->fwlog_waitq, &wait);
5511 break;
5512 }
5513
5514 mutex_unlock(&wl->mutex);
5515
5516 schedule();
5517 finish_wait(&wl->fwlog_waitq, &wait);
5518
5519 if (signal_pending(current))
5520 return -ERESTARTSYS;
5521
5522 ret = mutex_lock_interruptible(&wl->mutex);
5523 if (ret < 0)
5524 return -ERESTARTSYS;
5525 }
5526
5527 /* Check if the fwlog is still valid */
5528 if (wl->fwlog_size < 0) {
5529 mutex_unlock(&wl->mutex);
5530 return 0;
5531 }
5532
5533 /* Seeking is not supported - old logs are not kept. Disregard pos. */
5534 len = min(count, (size_t)wl->fwlog_size);
5535 wl->fwlog_size -= len;
5536 memcpy(buffer, wl->fwlog, len);
5537
5538 /* Make room for new messages */
5539 memmove(wl->fwlog, wl->fwlog + len, wl->fwlog_size);
5540
5541 mutex_unlock(&wl->mutex);
5542
5543 return len;
5544 }
5545
5546 static struct bin_attribute fwlog_attr = {
5547 .attr = {.name = "fwlog", .mode = S_IRUSR},
5548 .read = wl1271_sysfs_read_fwlog,
5549 };
5550
5551 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
5552 {
5553 int i;
5554
5555 wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
5556 oui, nic);
5557
5558 if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
5559 wl1271_warning("NIC part of the MAC address wraps around!");
5560
5561 for (i = 0; i < wl->num_mac_addr; i++) {
5562 wl->addresses[i].addr[0] = (u8)(oui >> 16);
5563 wl->addresses[i].addr[1] = (u8)(oui >> 8);
5564 wl->addresses[i].addr[2] = (u8) oui;
5565 wl->addresses[i].addr[3] = (u8)(nic >> 16);
5566 wl->addresses[i].addr[4] = (u8)(nic >> 8);
5567 wl->addresses[i].addr[5] = (u8) nic;
5568 nic++;
5569 }
5570
5571 /* we may be one address short at the most */
5572 WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
5573
5574 /*
5575 * turn on the LAA bit in the first address and use it as
5576 * the last address.
5577 */
5578 if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
5579 int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
5580 memcpy(&wl->addresses[idx], &wl->addresses[0],
5581 sizeof(wl->addresses[0]));
5582 /* LAA bit */
5583 wl->addresses[idx].addr[2] |= BIT(1);
5584 }
5585
5586 wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
5587 wl->hw->wiphy->addresses = wl->addresses;
5588 }
5589
5590 static int wl12xx_get_hw_info(struct wl1271 *wl)
5591 {
5592 int ret;
5593
5594 ret = wl12xx_set_power_on(wl);
5595 if (ret < 0)
5596 return ret;
5597
5598 ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
5599 if (ret < 0)
5600 goto out;
5601
5602 wl->fuse_oui_addr = 0;
5603 wl->fuse_nic_addr = 0;
5604
5605 ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
5606 if (ret < 0)
5607 goto out;
5608
5609 if (wl->ops->get_mac)
5610 ret = wl->ops->get_mac(wl);
5611
5612 out:
5613 wl1271_power_off(wl);
5614 return ret;
5615 }
5616
5617 static int wl1271_register_hw(struct wl1271 *wl)
5618 {
5619 int ret;
5620 u32 oui_addr = 0, nic_addr = 0;
5621
5622 if (wl->mac80211_registered)
5623 return 0;
5624
5625 if (wl->nvs_len >= 12) {
5626 /* NOTE: The wl->nvs->nvs element must be first, in
5627 * order to simplify the casting, we assume it is at
5628 * the beginning of the wl->nvs structure.
5629 */
5630 u8 *nvs_ptr = (u8 *)wl->nvs;
5631
5632 oui_addr =
5633 (nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
5634 nic_addr =
5635 (nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
5636 }
5637
5638 /* if the MAC address is zeroed in the NVS derive from fuse */
5639 if (oui_addr == 0 && nic_addr == 0) {
5640 oui_addr = wl->fuse_oui_addr;
5641 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
5642 nic_addr = wl->fuse_nic_addr + 1;
5643 }
5644
5645 wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
5646
5647 ret = ieee80211_register_hw(wl->hw);
5648 if (ret < 0) {
5649 wl1271_error("unable to register mac80211 hw: %d", ret);
5650 goto out;
5651 }
5652
5653 wl->mac80211_registered = true;
5654
5655 wl1271_debugfs_init(wl);
5656
5657 wl1271_notice("loaded");
5658
5659 out:
5660 return ret;
5661 }
5662
5663 static void wl1271_unregister_hw(struct wl1271 *wl)
5664 {
5665 if (wl->plt)
5666 wl1271_plt_stop(wl);
5667
5668 ieee80211_unregister_hw(wl->hw);
5669 wl->mac80211_registered = false;
5670
5671 }
5672
5673 static const struct ieee80211_iface_limit wlcore_iface_limits[] = {
5674 {
5675 .max = 3,
5676 .types = BIT(NL80211_IFTYPE_STATION),
5677 },
5678 {
5679 .max = 1,
5680 .types = BIT(NL80211_IFTYPE_AP) |
5681 BIT(NL80211_IFTYPE_P2P_GO) |
5682 BIT(NL80211_IFTYPE_P2P_CLIENT),
5683 },
5684 };
5685
5686 static struct ieee80211_iface_combination
5687 wlcore_iface_combinations[] = {
5688 {
5689 .max_interfaces = 3,
5690 .limits = wlcore_iface_limits,
5691 .n_limits = ARRAY_SIZE(wlcore_iface_limits),
5692 },
5693 };
5694
5695 static int wl1271_init_ieee80211(struct wl1271 *wl)
5696 {
5697 int i;
5698 static const u32 cipher_suites[] = {
5699 WLAN_CIPHER_SUITE_WEP40,
5700 WLAN_CIPHER_SUITE_WEP104,
5701 WLAN_CIPHER_SUITE_TKIP,
5702 WLAN_CIPHER_SUITE_CCMP,
5703 WL1271_CIPHER_SUITE_GEM,
5704 };
5705
5706 /* The tx descriptor buffer */
5707 wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
5708
5709 if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
5710 wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
5711
5712 /* unit us */
5713 /* FIXME: find a proper value */
5714 wl->hw->channel_change_time = 10000;
5715 wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
5716
5717 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
5718 IEEE80211_HW_SUPPORTS_PS |
5719 IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
5720 IEEE80211_HW_SUPPORTS_UAPSD |
5721 IEEE80211_HW_HAS_RATE_CONTROL |
5722 IEEE80211_HW_CONNECTION_MONITOR |
5723 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
5724 IEEE80211_HW_SPECTRUM_MGMT |
5725 IEEE80211_HW_AP_LINK_PS |
5726 IEEE80211_HW_AMPDU_AGGREGATION |
5727 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
5728 IEEE80211_HW_QUEUE_CONTROL;
5729
5730 wl->hw->wiphy->cipher_suites = cipher_suites;
5731 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
5732
5733 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
5734 BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) |
5735 BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
5736 wl->hw->wiphy->max_scan_ssids = 1;
5737 wl->hw->wiphy->max_sched_scan_ssids = 16;
5738 wl->hw->wiphy->max_match_sets = 16;
5739 /*
5740 * Maximum length of elements in scanning probe request templates
5741 * should be the maximum length possible for a template, without
5742 * the IEEE80211 header of the template
5743 */
5744 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5745 sizeof(struct ieee80211_header);
5746
5747 wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5748 sizeof(struct ieee80211_header);
5749
5750 wl->hw->wiphy->max_remain_on_channel_duration = 5000;
5751
5752 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
5753 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
5754
5755 /* make sure all our channels fit in the scanned_ch bitmask */
5756 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
5757 ARRAY_SIZE(wl1271_channels_5ghz) >
5758 WL1271_MAX_CHANNELS);
5759 /*
5760 * clear channel flags from the previous usage
5761 * and restore max_power & max_antenna_gain values.
5762 */
5763 for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
5764 wl1271_band_2ghz.channels[i].flags = 0;
5765 wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
5766 wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
5767 }
5768
5769 for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
5770 wl1271_band_5ghz.channels[i].flags = 0;
5771 wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
5772 wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
5773 }
5774
5775 /*
5776 * We keep local copies of the band structs because we need to
5777 * modify them on a per-device basis.
5778 */
5779 memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
5780 sizeof(wl1271_band_2ghz));
5781 memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap,
5782 &wl->ht_cap[IEEE80211_BAND_2GHZ],
5783 sizeof(*wl->ht_cap));
5784 memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
5785 sizeof(wl1271_band_5ghz));
5786 memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap,
5787 &wl->ht_cap[IEEE80211_BAND_5GHZ],
5788 sizeof(*wl->ht_cap));
5789
5790 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
5791 &wl->bands[IEEE80211_BAND_2GHZ];
5792 wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
5793 &wl->bands[IEEE80211_BAND_5GHZ];
5794
5795 /*
5796 * allow 4 queues per mac address we support +
5797 * 1 cab queue per mac + one global offchannel Tx queue
5798 */
5799 wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
5800
5801 /* the last queue is the offchannel queue */
5802 wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
5803 wl->hw->max_rates = 1;
5804
5805 wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
5806
5807 /* the FW answers probe-requests in AP-mode */
5808 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
5809 wl->hw->wiphy->probe_resp_offload =
5810 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
5811 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
5812 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
5813
5814 /* allowed interface combinations */
5815 wlcore_iface_combinations[0].num_different_channels = wl->num_channels;
5816 wl->hw->wiphy->iface_combinations = wlcore_iface_combinations;
5817 wl->hw->wiphy->n_iface_combinations =
5818 ARRAY_SIZE(wlcore_iface_combinations);
5819
5820 SET_IEEE80211_DEV(wl->hw, wl->dev);
5821
5822 wl->hw->sta_data_size = sizeof(struct wl1271_station);
5823 wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
5824
5825 wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
5826
5827 return 0;
5828 }
5829
5830 #define WL1271_DEFAULT_CHANNEL 0
5831
5832 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
5833 u32 mbox_size)
5834 {
5835 struct ieee80211_hw *hw;
5836 struct wl1271 *wl;
5837 int i, j, ret;
5838 unsigned int order;
5839
5840 BUILD_BUG_ON(AP_MAX_STATIONS > WL12XX_MAX_LINKS);
5841
5842 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
5843 if (!hw) {
5844 wl1271_error("could not alloc ieee80211_hw");
5845 ret = -ENOMEM;
5846 goto err_hw_alloc;
5847 }
5848
5849 wl = hw->priv;
5850 memset(wl, 0, sizeof(*wl));
5851
5852 wl->priv = kzalloc(priv_size, GFP_KERNEL);
5853 if (!wl->priv) {
5854 wl1271_error("could not alloc wl priv");
5855 ret = -ENOMEM;
5856 goto err_priv_alloc;
5857 }
5858
5859 INIT_LIST_HEAD(&wl->wlvif_list);
5860
5861 wl->hw = hw;
5862
5863 for (i = 0; i < NUM_TX_QUEUES; i++)
5864 for (j = 0; j < WL12XX_MAX_LINKS; j++)
5865 skb_queue_head_init(&wl->links[j].tx_queue[i]);
5866
5867 skb_queue_head_init(&wl->deferred_rx_queue);
5868 skb_queue_head_init(&wl->deferred_tx_queue);
5869
5870 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
5871 INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
5872 INIT_WORK(&wl->tx_work, wl1271_tx_work);
5873 INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
5874 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
5875 INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
5876 INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
5877
5878 wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
5879 if (!wl->freezable_wq) {
5880 ret = -ENOMEM;
5881 goto err_hw;
5882 }
5883
5884 wl->channel = WL1271_DEFAULT_CHANNEL;
5885 wl->rx_counter = 0;
5886 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
5887 wl->band = IEEE80211_BAND_2GHZ;
5888 wl->channel_type = NL80211_CHAN_NO_HT;
5889 wl->flags = 0;
5890 wl->sg_enabled = true;
5891 wl->sleep_auth = WL1271_PSM_ILLEGAL;
5892 wl->recovery_count = 0;
5893 wl->hw_pg_ver = -1;
5894 wl->ap_ps_map = 0;
5895 wl->ap_fw_ps_map = 0;
5896 wl->quirks = 0;
5897 wl->platform_quirks = 0;
5898 wl->system_hlid = WL12XX_SYSTEM_HLID;
5899 wl->active_sta_count = 0;
5900 wl->active_link_count = 0;
5901 wl->fwlog_size = 0;
5902 init_waitqueue_head(&wl->fwlog_waitq);
5903
5904 /* The system link is always allocated */
5905 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
5906
5907 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
5908 for (i = 0; i < wl->num_tx_desc; i++)
5909 wl->tx_frames[i] = NULL;
5910
5911 spin_lock_init(&wl->wl_lock);
5912
5913 wl->state = WLCORE_STATE_OFF;
5914 wl->fw_type = WL12XX_FW_TYPE_NONE;
5915 mutex_init(&wl->mutex);
5916 mutex_init(&wl->flush_mutex);
5917 init_completion(&wl->nvs_loading_complete);
5918
5919 order = get_order(aggr_buf_size);
5920 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
5921 if (!wl->aggr_buf) {
5922 ret = -ENOMEM;
5923 goto err_wq;
5924 }
5925 wl->aggr_buf_size = aggr_buf_size;
5926
5927 wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
5928 if (!wl->dummy_packet) {
5929 ret = -ENOMEM;
5930 goto err_aggr;
5931 }
5932
5933 /* Allocate one page for the FW log */
5934 wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
5935 if (!wl->fwlog) {
5936 ret = -ENOMEM;
5937 goto err_dummy_packet;
5938 }
5939
5940 wl->mbox_size = mbox_size;
5941 wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
5942 if (!wl->mbox) {
5943 ret = -ENOMEM;
5944 goto err_fwlog;
5945 }
5946
5947 wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
5948 if (!wl->buffer_32) {
5949 ret = -ENOMEM;
5950 goto err_mbox;
5951 }
5952
5953 return hw;
5954
5955 err_mbox:
5956 kfree(wl->mbox);
5957
5958 err_fwlog:
5959 free_page((unsigned long)wl->fwlog);
5960
5961 err_dummy_packet:
5962 dev_kfree_skb(wl->dummy_packet);
5963
5964 err_aggr:
5965 free_pages((unsigned long)wl->aggr_buf, order);
5966
5967 err_wq:
5968 destroy_workqueue(wl->freezable_wq);
5969
5970 err_hw:
5971 wl1271_debugfs_exit(wl);
5972 kfree(wl->priv);
5973
5974 err_priv_alloc:
5975 ieee80211_free_hw(hw);
5976
5977 err_hw_alloc:
5978
5979 return ERR_PTR(ret);
5980 }
5981 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
5982
5983 int wlcore_free_hw(struct wl1271 *wl)
5984 {
5985 /* Unblock any fwlog readers */
5986 mutex_lock(&wl->mutex);
5987 wl->fwlog_size = -1;
5988 wake_up_interruptible_all(&wl->fwlog_waitq);
5989 mutex_unlock(&wl->mutex);
5990
5991 device_remove_bin_file(wl->dev, &fwlog_attr);
5992
5993 device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
5994
5995 device_remove_file(wl->dev, &dev_attr_bt_coex_state);
5996 kfree(wl->buffer_32);
5997 kfree(wl->mbox);
5998 free_page((unsigned long)wl->fwlog);
5999 dev_kfree_skb(wl->dummy_packet);
6000 free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
6001
6002 wl1271_debugfs_exit(wl);
6003
6004 vfree(wl->fw);
6005 wl->fw = NULL;
6006 wl->fw_type = WL12XX_FW_TYPE_NONE;
6007 kfree(wl->nvs);
6008 wl->nvs = NULL;
6009
6010 kfree(wl->fw_status_1);
6011 kfree(wl->tx_res_if);
6012 destroy_workqueue(wl->freezable_wq);
6013
6014 kfree(wl->priv);
6015 ieee80211_free_hw(wl->hw);
6016
6017 return 0;
6018 }
6019 EXPORT_SYMBOL_GPL(wlcore_free_hw);
6020
6021 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
6022 {
6023 struct wl1271 *wl = context;
6024 struct platform_device *pdev = wl->pdev;
6025 struct wlcore_platdev_data *pdev_data = pdev->dev.platform_data;
6026 struct wl12xx_platform_data *pdata = pdev_data->pdata;
6027 unsigned long irqflags;
6028 int ret;
6029
6030 if (fw) {
6031 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
6032 if (!wl->nvs) {
6033 wl1271_error("Could not allocate nvs data");
6034 goto out;
6035 }
6036 wl->nvs_len = fw->size;
6037 } else {
6038 wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
6039 WL12XX_NVS_NAME);
6040 wl->nvs = NULL;
6041 wl->nvs_len = 0;
6042 }
6043
6044 ret = wl->ops->setup(wl);
6045 if (ret < 0)
6046 goto out_free_nvs;
6047
6048 BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
6049
6050 /* adjust some runtime configuration parameters */
6051 wlcore_adjust_conf(wl);
6052
6053 wl->irq = platform_get_irq(pdev, 0);
6054 wl->platform_quirks = pdata->platform_quirks;
6055 wl->if_ops = pdev_data->if_ops;
6056
6057 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
6058 irqflags = IRQF_TRIGGER_RISING;
6059 else
6060 irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
6061
6062 ret = request_threaded_irq(wl->irq, NULL, wlcore_irq,
6063 irqflags, pdev->name, wl);
6064 if (ret < 0) {
6065 wl1271_error("request_irq() failed: %d", ret);
6066 goto out_free_nvs;
6067 }
6068
6069 #ifdef CONFIG_PM
6070 ret = enable_irq_wake(wl->irq);
6071 if (!ret) {
6072 wl->irq_wake_enabled = true;
6073 device_init_wakeup(wl->dev, 1);
6074 if (pdata->pwr_in_suspend) {
6075 wl->hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY;
6076 wl->hw->wiphy->wowlan.n_patterns =
6077 WL1271_MAX_RX_FILTERS;
6078 wl->hw->wiphy->wowlan.pattern_min_len = 1;
6079 wl->hw->wiphy->wowlan.pattern_max_len =
6080 WL1271_RX_FILTER_MAX_PATTERN_SIZE;
6081 }
6082 }
6083 #endif
6084 disable_irq(wl->irq);
6085
6086 ret = wl12xx_get_hw_info(wl);
6087 if (ret < 0) {
6088 wl1271_error("couldn't get hw info");
6089 goto out_irq;
6090 }
6091
6092 ret = wl->ops->identify_chip(wl);
6093 if (ret < 0)
6094 goto out_irq;
6095
6096 ret = wl1271_init_ieee80211(wl);
6097 if (ret)
6098 goto out_irq;
6099
6100 ret = wl1271_register_hw(wl);
6101 if (ret)
6102 goto out_irq;
6103
6104 /* Create sysfs file to control bt coex state */
6105 ret = device_create_file(wl->dev, &dev_attr_bt_coex_state);
6106 if (ret < 0) {
6107 wl1271_error("failed to create sysfs file bt_coex_state");
6108 goto out_unreg;
6109 }
6110
6111 /* Create sysfs file to get HW PG version */
6112 ret = device_create_file(wl->dev, &dev_attr_hw_pg_ver);
6113 if (ret < 0) {
6114 wl1271_error("failed to create sysfs file hw_pg_ver");
6115 goto out_bt_coex_state;
6116 }
6117
6118 /* Create sysfs file for the FW log */
6119 ret = device_create_bin_file(wl->dev, &fwlog_attr);
6120 if (ret < 0) {
6121 wl1271_error("failed to create sysfs file fwlog");
6122 goto out_hw_pg_ver;
6123 }
6124
6125 wl->initialized = true;
6126 goto out;
6127
6128 out_hw_pg_ver:
6129 device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
6130
6131 out_bt_coex_state:
6132 device_remove_file(wl->dev, &dev_attr_bt_coex_state);
6133
6134 out_unreg:
6135 wl1271_unregister_hw(wl);
6136
6137 out_irq:
6138 free_irq(wl->irq, wl);
6139
6140 out_free_nvs:
6141 kfree(wl->nvs);
6142
6143 out:
6144 release_firmware(fw);
6145 complete_all(&wl->nvs_loading_complete);
6146 }
6147
6148 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6149 {
6150 int ret;
6151
6152 if (!wl->ops || !wl->ptable)
6153 return -EINVAL;
6154
6155 wl->dev = &pdev->dev;
6156 wl->pdev = pdev;
6157 platform_set_drvdata(pdev, wl);
6158
6159 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6160 WL12XX_NVS_NAME, &pdev->dev, GFP_KERNEL,
6161 wl, wlcore_nvs_cb);
6162 if (ret < 0) {
6163 wl1271_error("request_firmware_nowait failed: %d", ret);
6164 complete_all(&wl->nvs_loading_complete);
6165 }
6166
6167 return ret;
6168 }
6169 EXPORT_SYMBOL_GPL(wlcore_probe);
6170
6171 int wlcore_remove(struct platform_device *pdev)
6172 {
6173 struct wl1271 *wl = platform_get_drvdata(pdev);
6174
6175 wait_for_completion(&wl->nvs_loading_complete);
6176 if (!wl->initialized)
6177 return 0;
6178
6179 if (wl->irq_wake_enabled) {
6180 device_init_wakeup(wl->dev, 0);
6181 disable_irq_wake(wl->irq);
6182 }
6183 wl1271_unregister_hw(wl);
6184 free_irq(wl->irq, wl);
6185 wlcore_free_hw(wl);
6186
6187 return 0;
6188 }
6189 EXPORT_SYMBOL_GPL(wlcore_remove);
6190
6191 u32 wl12xx_debug_level = DEBUG_NONE;
6192 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6193 module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
6194 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6195
6196 module_param_named(fwlog, fwlog_param, charp, 0);
6197 MODULE_PARM_DESC(fwlog,
6198 "FW logger options: continuous, ondemand, dbgpins or disable");
6199
6200 module_param(bug_on_recovery, int, S_IRUSR | S_IWUSR);
6201 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6202
6203 module_param(no_recovery, int, S_IRUSR | S_IWUSR);
6204 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6205
6206 MODULE_LICENSE("GPL");
6207 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6208 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
6209 MODULE_FIRMWARE(WL12XX_NVS_NAME);