Merge branch 'wl12xx-next' into for-linville
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / net / wireless / ti / wlcore / main.c
1
2 /*
3 * This file is part of wl1271
4 *
5 * Copyright (C) 2008-2010 Nokia Corporation
6 *
7 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
21 * 02110-1301 USA
22 *
23 */
24
25 #include <linux/module.h>
26 #include <linux/firmware.h>
27 #include <linux/delay.h>
28 #include <linux/spi/spi.h>
29 #include <linux/crc32.h>
30 #include <linux/etherdevice.h>
31 #include <linux/vmalloc.h>
32 #include <linux/platform_device.h>
33 #include <linux/slab.h>
34 #include <linux/wl12xx.h>
35 #include <linux/sched.h>
36 #include <linux/interrupt.h>
37
38 #include "wlcore.h"
39 #include "debug.h"
40 #include "wl12xx_80211.h"
41 #include "io.h"
42 #include "event.h"
43 #include "tx.h"
44 #include "rx.h"
45 #include "ps.h"
46 #include "init.h"
47 #include "debugfs.h"
48 #include "cmd.h"
49 #include "boot.h"
50 #include "testmode.h"
51 #include "scan.h"
52 #include "hw_ops.h"
53
54 #define WL1271_BOOT_RETRIES 3
55
56 #define WL1271_BOOT_RETRIES 3
57
58 static char *fwlog_param;
59 static int bug_on_recovery = -1;
60 static int no_recovery = -1;
61
62 static void __wl1271_op_remove_interface(struct wl1271 *wl,
63 struct ieee80211_vif *vif,
64 bool reset_tx_queues);
65 static void wlcore_op_stop_locked(struct wl1271 *wl);
66 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
67
68 static int wl12xx_set_authorized(struct wl1271 *wl,
69 struct wl12xx_vif *wlvif)
70 {
71 int ret;
72
73 if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
74 return -EINVAL;
75
76 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
77 return 0;
78
79 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
80 return 0;
81
82 ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
83 if (ret < 0)
84 return ret;
85
86 wl1271_info("Association completed.");
87 return 0;
88 }
89
90 static int wl1271_reg_notify(struct wiphy *wiphy,
91 struct regulatory_request *request)
92 {
93 struct ieee80211_supported_band *band;
94 struct ieee80211_channel *ch;
95 int i;
96 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
97 struct wl1271 *wl = hw->priv;
98
99 band = wiphy->bands[IEEE80211_BAND_5GHZ];
100 for (i = 0; i < band->n_channels; i++) {
101 ch = &band->channels[i];
102 if (ch->flags & IEEE80211_CHAN_DISABLED)
103 continue;
104
105 if (ch->flags & IEEE80211_CHAN_RADAR)
106 ch->flags |= IEEE80211_CHAN_NO_IBSS |
107 IEEE80211_CHAN_PASSIVE_SCAN;
108
109 }
110
111 if (likely(wl->state == WLCORE_STATE_ON))
112 wlcore_regdomain_config(wl);
113
114 return 0;
115 }
116
117 static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
118 bool enable)
119 {
120 int ret = 0;
121
122 /* we should hold wl->mutex */
123 ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
124 if (ret < 0)
125 goto out;
126
127 if (enable)
128 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
129 else
130 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
131 out:
132 return ret;
133 }
134
135 /*
136 * this function is being called when the rx_streaming interval
137 * has beed changed or rx_streaming should be disabled
138 */
139 int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
140 {
141 int ret = 0;
142 int period = wl->conf.rx_streaming.interval;
143
144 /* don't reconfigure if rx_streaming is disabled */
145 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
146 goto out;
147
148 /* reconfigure/disable according to new streaming_period */
149 if (period &&
150 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
151 (wl->conf.rx_streaming.always ||
152 test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
153 ret = wl1271_set_rx_streaming(wl, wlvif, true);
154 else {
155 ret = wl1271_set_rx_streaming(wl, wlvif, false);
156 /* don't cancel_work_sync since we might deadlock */
157 del_timer_sync(&wlvif->rx_streaming_timer);
158 }
159 out:
160 return ret;
161 }
162
163 static void wl1271_rx_streaming_enable_work(struct work_struct *work)
164 {
165 int ret;
166 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
167 rx_streaming_enable_work);
168 struct wl1271 *wl = wlvif->wl;
169
170 mutex_lock(&wl->mutex);
171
172 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
173 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
174 (!wl->conf.rx_streaming.always &&
175 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
176 goto out;
177
178 if (!wl->conf.rx_streaming.interval)
179 goto out;
180
181 ret = wl1271_ps_elp_wakeup(wl);
182 if (ret < 0)
183 goto out;
184
185 ret = wl1271_set_rx_streaming(wl, wlvif, true);
186 if (ret < 0)
187 goto out_sleep;
188
189 /* stop it after some time of inactivity */
190 mod_timer(&wlvif->rx_streaming_timer,
191 jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
192
193 out_sleep:
194 wl1271_ps_elp_sleep(wl);
195 out:
196 mutex_unlock(&wl->mutex);
197 }
198
199 static void wl1271_rx_streaming_disable_work(struct work_struct *work)
200 {
201 int ret;
202 struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
203 rx_streaming_disable_work);
204 struct wl1271 *wl = wlvif->wl;
205
206 mutex_lock(&wl->mutex);
207
208 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
209 goto out;
210
211 ret = wl1271_ps_elp_wakeup(wl);
212 if (ret < 0)
213 goto out;
214
215 ret = wl1271_set_rx_streaming(wl, wlvif, false);
216 if (ret)
217 goto out_sleep;
218
219 out_sleep:
220 wl1271_ps_elp_sleep(wl);
221 out:
222 mutex_unlock(&wl->mutex);
223 }
224
225 static void wl1271_rx_streaming_timer(unsigned long data)
226 {
227 struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
228 struct wl1271 *wl = wlvif->wl;
229 ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
230 }
231
232 /* wl->mutex must be taken */
233 void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
234 {
235 /* if the watchdog is not armed, don't do anything */
236 if (wl->tx_allocated_blocks == 0)
237 return;
238
239 cancel_delayed_work(&wl->tx_watchdog_work);
240 ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
241 msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
242 }
243
244 static void wl12xx_tx_watchdog_work(struct work_struct *work)
245 {
246 struct delayed_work *dwork;
247 struct wl1271 *wl;
248
249 dwork = container_of(work, struct delayed_work, work);
250 wl = container_of(dwork, struct wl1271, tx_watchdog_work);
251
252 mutex_lock(&wl->mutex);
253
254 if (unlikely(wl->state != WLCORE_STATE_ON))
255 goto out;
256
257 /* Tx went out in the meantime - everything is ok */
258 if (unlikely(wl->tx_allocated_blocks == 0))
259 goto out;
260
261 /*
262 * if a ROC is in progress, we might not have any Tx for a long
263 * time (e.g. pending Tx on the non-ROC channels)
264 */
265 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
266 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
267 wl->conf.tx.tx_watchdog_timeout);
268 wl12xx_rearm_tx_watchdog_locked(wl);
269 goto out;
270 }
271
272 /*
273 * if a scan is in progress, we might not have any Tx for a long
274 * time
275 */
276 if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
277 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
278 wl->conf.tx.tx_watchdog_timeout);
279 wl12xx_rearm_tx_watchdog_locked(wl);
280 goto out;
281 }
282
283 /*
284 * AP might cache a frame for a long time for a sleeping station,
285 * so rearm the timer if there's an AP interface with stations. If
286 * Tx is genuinely stuck we will most hopefully discover it when all
287 * stations are removed due to inactivity.
288 */
289 if (wl->active_sta_count) {
290 wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
291 " %d stations",
292 wl->conf.tx.tx_watchdog_timeout,
293 wl->active_sta_count);
294 wl12xx_rearm_tx_watchdog_locked(wl);
295 goto out;
296 }
297
298 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
299 wl->conf.tx.tx_watchdog_timeout);
300 wl12xx_queue_recovery_work(wl);
301
302 out:
303 mutex_unlock(&wl->mutex);
304 }
305
306 static void wlcore_adjust_conf(struct wl1271 *wl)
307 {
308 /* Adjust settings according to optional module parameters */
309
310 if (fwlog_param) {
311 if (!strcmp(fwlog_param, "continuous")) {
312 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
313 } else if (!strcmp(fwlog_param, "ondemand")) {
314 wl->conf.fwlog.mode = WL12XX_FWLOG_ON_DEMAND;
315 } else if (!strcmp(fwlog_param, "dbgpins")) {
316 wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
317 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
318 } else if (!strcmp(fwlog_param, "disable")) {
319 wl->conf.fwlog.mem_blocks = 0;
320 wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
321 } else {
322 wl1271_error("Unknown fwlog parameter %s", fwlog_param);
323 }
324 }
325
326 if (bug_on_recovery != -1)
327 wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
328
329 if (no_recovery != -1)
330 wl->conf.recovery.no_recovery = (u8) no_recovery;
331 }
332
333 static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
334 struct wl12xx_vif *wlvif,
335 u8 hlid, u8 tx_pkts)
336 {
337 bool fw_ps, single_link;
338
339 fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
340 single_link = (wl->active_link_count == 1);
341
342 /*
343 * Wake up from high level PS if the STA is asleep with too little
344 * packets in FW or if the STA is awake.
345 */
346 if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
347 wl12xx_ps_link_end(wl, wlvif, hlid);
348
349 /*
350 * Start high-level PS if the STA is asleep with enough blocks in FW.
351 * Make an exception if this is the only connected link. In this
352 * case FW-memory congestion is less of a problem.
353 */
354 else if (!single_link && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
355 wl12xx_ps_link_start(wl, wlvif, hlid, true);
356 }
357
358 static void wl12xx_irq_update_links_status(struct wl1271 *wl,
359 struct wl12xx_vif *wlvif,
360 struct wl_fw_status_2 *status)
361 {
362 u32 cur_fw_ps_map;
363 u8 hlid;
364
365 cur_fw_ps_map = le32_to_cpu(status->link_ps_bitmap);
366 if (wl->ap_fw_ps_map != cur_fw_ps_map) {
367 wl1271_debug(DEBUG_PSM,
368 "link ps prev 0x%x cur 0x%x changed 0x%x",
369 wl->ap_fw_ps_map, cur_fw_ps_map,
370 wl->ap_fw_ps_map ^ cur_fw_ps_map);
371
372 wl->ap_fw_ps_map = cur_fw_ps_map;
373 }
374
375 for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, WL12XX_MAX_LINKS)
376 wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
377 wl->links[hlid].allocated_pkts);
378 }
379
380 static int wlcore_fw_status(struct wl1271 *wl,
381 struct wl_fw_status_1 *status_1,
382 struct wl_fw_status_2 *status_2)
383 {
384 struct wl12xx_vif *wlvif;
385 struct timespec ts;
386 u32 old_tx_blk_count = wl->tx_blocks_available;
387 int avail, freed_blocks;
388 int i;
389 size_t status_len;
390 int ret;
391 struct wl1271_link *lnk;
392
393 status_len = WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
394 sizeof(*status_2) + wl->fw_status_priv_len;
395
396 ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR, status_1,
397 status_len, false);
398 if (ret < 0)
399 return ret;
400
401 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
402 "drv_rx_counter = %d, tx_results_counter = %d)",
403 status_1->intr,
404 status_1->fw_rx_counter,
405 status_1->drv_rx_counter,
406 status_1->tx_results_counter);
407
408 for (i = 0; i < NUM_TX_QUEUES; i++) {
409 /* prevent wrap-around in freed-packets counter */
410 wl->tx_allocated_pkts[i] -=
411 (status_2->counters.tx_released_pkts[i] -
412 wl->tx_pkts_freed[i]) & 0xff;
413
414 wl->tx_pkts_freed[i] = status_2->counters.tx_released_pkts[i];
415 }
416
417
418 for_each_set_bit(i, wl->links_map, WL12XX_MAX_LINKS) {
419 lnk = &wl->links[i];
420 /* prevent wrap-around in freed-packets counter */
421 lnk->allocated_pkts -=
422 (status_2->counters.tx_lnk_free_pkts[i] -
423 lnk->prev_freed_pkts) & 0xff;
424
425 lnk->prev_freed_pkts = status_2->counters.tx_lnk_free_pkts[i];
426 }
427
428 /* prevent wrap-around in total blocks counter */
429 if (likely(wl->tx_blocks_freed <=
430 le32_to_cpu(status_2->total_released_blks)))
431 freed_blocks = le32_to_cpu(status_2->total_released_blks) -
432 wl->tx_blocks_freed;
433 else
434 freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
435 le32_to_cpu(status_2->total_released_blks);
436
437 wl->tx_blocks_freed = le32_to_cpu(status_2->total_released_blks);
438
439 wl->tx_allocated_blocks -= freed_blocks;
440
441 /*
442 * If the FW freed some blocks:
443 * If we still have allocated blocks - re-arm the timer, Tx is
444 * not stuck. Otherwise, cancel the timer (no Tx currently).
445 */
446 if (freed_blocks) {
447 if (wl->tx_allocated_blocks)
448 wl12xx_rearm_tx_watchdog_locked(wl);
449 else
450 cancel_delayed_work(&wl->tx_watchdog_work);
451 }
452
453 avail = le32_to_cpu(status_2->tx_total) - wl->tx_allocated_blocks;
454
455 /*
456 * The FW might change the total number of TX memblocks before
457 * we get a notification about blocks being released. Thus, the
458 * available blocks calculation might yield a temporary result
459 * which is lower than the actual available blocks. Keeping in
460 * mind that only blocks that were allocated can be moved from
461 * TX to RX, tx_blocks_available should never decrease here.
462 */
463 wl->tx_blocks_available = max((int)wl->tx_blocks_available,
464 avail);
465
466 /* if more blocks are available now, tx work can be scheduled */
467 if (wl->tx_blocks_available > old_tx_blk_count)
468 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
469
470 /* for AP update num of allocated TX blocks per link and ps status */
471 wl12xx_for_each_wlvif_ap(wl, wlvif) {
472 wl12xx_irq_update_links_status(wl, wlvif, status_2);
473 }
474
475 /* update the host-chipset time offset */
476 getnstimeofday(&ts);
477 wl->time_offset = (timespec_to_ns(&ts) >> 10) -
478 (s64)le32_to_cpu(status_2->fw_localtime);
479
480 wl->fw_fast_lnk_map = le32_to_cpu(status_2->link_fast_bitmap);
481
482 return 0;
483 }
484
485 static void wl1271_flush_deferred_work(struct wl1271 *wl)
486 {
487 struct sk_buff *skb;
488
489 /* Pass all received frames to the network stack */
490 while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
491 ieee80211_rx_ni(wl->hw, skb);
492
493 /* Return sent skbs to the network stack */
494 while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
495 ieee80211_tx_status_ni(wl->hw, skb);
496 }
497
498 static void wl1271_netstack_work(struct work_struct *work)
499 {
500 struct wl1271 *wl =
501 container_of(work, struct wl1271, netstack_work);
502
503 do {
504 wl1271_flush_deferred_work(wl);
505 } while (skb_queue_len(&wl->deferred_rx_queue));
506 }
507
508 #define WL1271_IRQ_MAX_LOOPS 256
509
510 static int wlcore_irq_locked(struct wl1271 *wl)
511 {
512 int ret = 0;
513 u32 intr;
514 int loopcount = WL1271_IRQ_MAX_LOOPS;
515 bool done = false;
516 unsigned int defer_count;
517 unsigned long flags;
518
519 /*
520 * In case edge triggered interrupt must be used, we cannot iterate
521 * more than once without introducing race conditions with the hardirq.
522 */
523 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
524 loopcount = 1;
525
526 wl1271_debug(DEBUG_IRQ, "IRQ work");
527
528 if (unlikely(wl->state != WLCORE_STATE_ON))
529 goto out;
530
531 ret = wl1271_ps_elp_wakeup(wl);
532 if (ret < 0)
533 goto out;
534
535 while (!done && loopcount--) {
536 /*
537 * In order to avoid a race with the hardirq, clear the flag
538 * before acknowledging the chip. Since the mutex is held,
539 * wl1271_ps_elp_wakeup cannot be called concurrently.
540 */
541 clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
542 smp_mb__after_clear_bit();
543
544 ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
545 if (ret < 0)
546 goto out;
547
548 wlcore_hw_tx_immediate_compl(wl);
549
550 intr = le32_to_cpu(wl->fw_status_1->intr);
551 intr &= WLCORE_ALL_INTR_MASK;
552 if (!intr) {
553 done = true;
554 continue;
555 }
556
557 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
558 wl1271_error("HW watchdog interrupt received! starting recovery.");
559 wl->watchdog_recovery = true;
560 ret = -EIO;
561
562 /* restarting the chip. ignore any other interrupt. */
563 goto out;
564 }
565
566 if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
567 wl1271_error("SW watchdog interrupt received! "
568 "starting recovery.");
569 wl->watchdog_recovery = true;
570 ret = -EIO;
571
572 /* restarting the chip. ignore any other interrupt. */
573 goto out;
574 }
575
576 if (likely(intr & WL1271_ACX_INTR_DATA)) {
577 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
578
579 ret = wlcore_rx(wl, wl->fw_status_1);
580 if (ret < 0)
581 goto out;
582
583 /* Check if any tx blocks were freed */
584 spin_lock_irqsave(&wl->wl_lock, flags);
585 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
586 wl1271_tx_total_queue_count(wl) > 0) {
587 spin_unlock_irqrestore(&wl->wl_lock, flags);
588 /*
589 * In order to avoid starvation of the TX path,
590 * call the work function directly.
591 */
592 ret = wlcore_tx_work_locked(wl);
593 if (ret < 0)
594 goto out;
595 } else {
596 spin_unlock_irqrestore(&wl->wl_lock, flags);
597 }
598
599 /* check for tx results */
600 ret = wlcore_hw_tx_delayed_compl(wl);
601 if (ret < 0)
602 goto out;
603
604 /* Make sure the deferred queues don't get too long */
605 defer_count = skb_queue_len(&wl->deferred_tx_queue) +
606 skb_queue_len(&wl->deferred_rx_queue);
607 if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
608 wl1271_flush_deferred_work(wl);
609 }
610
611 if (intr & WL1271_ACX_INTR_EVENT_A) {
612 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
613 ret = wl1271_event_handle(wl, 0);
614 if (ret < 0)
615 goto out;
616 }
617
618 if (intr & WL1271_ACX_INTR_EVENT_B) {
619 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
620 ret = wl1271_event_handle(wl, 1);
621 if (ret < 0)
622 goto out;
623 }
624
625 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
626 wl1271_debug(DEBUG_IRQ,
627 "WL1271_ACX_INTR_INIT_COMPLETE");
628
629 if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
630 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
631 }
632
633 wl1271_ps_elp_sleep(wl);
634
635 out:
636 return ret;
637 }
638
639 static irqreturn_t wlcore_irq(int irq, void *cookie)
640 {
641 int ret;
642 unsigned long flags;
643 struct wl1271 *wl = cookie;
644
645 /* TX might be handled here, avoid redundant work */
646 set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
647 cancel_work_sync(&wl->tx_work);
648
649 mutex_lock(&wl->mutex);
650
651 ret = wlcore_irq_locked(wl);
652 if (ret)
653 wl12xx_queue_recovery_work(wl);
654
655 spin_lock_irqsave(&wl->wl_lock, flags);
656 /* In case TX was not handled here, queue TX work */
657 clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
658 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
659 wl1271_tx_total_queue_count(wl) > 0)
660 ieee80211_queue_work(wl->hw, &wl->tx_work);
661 spin_unlock_irqrestore(&wl->wl_lock, flags);
662
663 mutex_unlock(&wl->mutex);
664
665 return IRQ_HANDLED;
666 }
667
668 struct vif_counter_data {
669 u8 counter;
670
671 struct ieee80211_vif *cur_vif;
672 bool cur_vif_running;
673 };
674
675 static void wl12xx_vif_count_iter(void *data, u8 *mac,
676 struct ieee80211_vif *vif)
677 {
678 struct vif_counter_data *counter = data;
679
680 counter->counter++;
681 if (counter->cur_vif == vif)
682 counter->cur_vif_running = true;
683 }
684
685 /* caller must not hold wl->mutex, as it might deadlock */
686 static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
687 struct ieee80211_vif *cur_vif,
688 struct vif_counter_data *data)
689 {
690 memset(data, 0, sizeof(*data));
691 data->cur_vif = cur_vif;
692
693 ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
694 wl12xx_vif_count_iter, data);
695 }
696
697 static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
698 {
699 const struct firmware *fw;
700 const char *fw_name;
701 enum wl12xx_fw_type fw_type;
702 int ret;
703
704 if (plt) {
705 fw_type = WL12XX_FW_TYPE_PLT;
706 fw_name = wl->plt_fw_name;
707 } else {
708 /*
709 * we can't call wl12xx_get_vif_count() here because
710 * wl->mutex is taken, so use the cached last_vif_count value
711 */
712 if (wl->last_vif_count > 1 && wl->mr_fw_name) {
713 fw_type = WL12XX_FW_TYPE_MULTI;
714 fw_name = wl->mr_fw_name;
715 } else {
716 fw_type = WL12XX_FW_TYPE_NORMAL;
717 fw_name = wl->sr_fw_name;
718 }
719 }
720
721 if (wl->fw_type == fw_type)
722 return 0;
723
724 wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
725
726 ret = request_firmware(&fw, fw_name, wl->dev);
727
728 if (ret < 0) {
729 wl1271_error("could not get firmware %s: %d", fw_name, ret);
730 return ret;
731 }
732
733 if (fw->size % 4) {
734 wl1271_error("firmware size is not multiple of 32 bits: %zu",
735 fw->size);
736 ret = -EILSEQ;
737 goto out;
738 }
739
740 vfree(wl->fw);
741 wl->fw_type = WL12XX_FW_TYPE_NONE;
742 wl->fw_len = fw->size;
743 wl->fw = vmalloc(wl->fw_len);
744
745 if (!wl->fw) {
746 wl1271_error("could not allocate memory for the firmware");
747 ret = -ENOMEM;
748 goto out;
749 }
750
751 memcpy(wl->fw, fw->data, wl->fw_len);
752 ret = 0;
753 wl->fw_type = fw_type;
754 out:
755 release_firmware(fw);
756
757 return ret;
758 }
759
760 void wl12xx_queue_recovery_work(struct wl1271 *wl)
761 {
762 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
763
764 /* Avoid a recursive recovery */
765 if (wl->state == WLCORE_STATE_ON) {
766 wl->state = WLCORE_STATE_RESTARTING;
767 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
768 wlcore_disable_interrupts_nosync(wl);
769 ieee80211_queue_work(wl->hw, &wl->recovery_work);
770 }
771 }
772
773 size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
774 {
775 size_t len = 0;
776
777 /* The FW log is a length-value list, find where the log end */
778 while (len < maxlen) {
779 if (memblock[len] == 0)
780 break;
781 if (len + memblock[len] + 1 > maxlen)
782 break;
783 len += memblock[len] + 1;
784 }
785
786 /* Make sure we have enough room */
787 len = min(len, (size_t)(PAGE_SIZE - wl->fwlog_size));
788
789 /* Fill the FW log file, consumed by the sysfs fwlog entry */
790 memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
791 wl->fwlog_size += len;
792
793 return len;
794 }
795
796 #define WLCORE_FW_LOG_END 0x2000000
797
798 static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
799 {
800 u32 addr;
801 u32 offset;
802 u32 end_of_log;
803 u8 *block;
804 int ret;
805
806 if ((wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED) ||
807 (wl->conf.fwlog.mem_blocks == 0))
808 return;
809
810 wl1271_info("Reading FW panic log");
811
812 block = kmalloc(WL12XX_HW_BLOCK_SIZE, GFP_KERNEL);
813 if (!block)
814 return;
815
816 /*
817 * Make sure the chip is awake and the logger isn't active.
818 * Do not send a stop fwlog command if the fw is hanged or if
819 * dbgpins are used (due to some fw bug).
820 */
821 if (wl1271_ps_elp_wakeup(wl))
822 goto out;
823 if (!wl->watchdog_recovery &&
824 wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
825 wl12xx_cmd_stop_fwlog(wl);
826
827 /* Read the first memory block address */
828 ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
829 if (ret < 0)
830 goto out;
831
832 addr = le32_to_cpu(wl->fw_status_2->log_start_addr);
833 if (!addr)
834 goto out;
835
836 if (wl->conf.fwlog.mode == WL12XX_FWLOG_CONTINUOUS) {
837 offset = sizeof(addr) + sizeof(struct wl1271_rx_descriptor);
838 end_of_log = WLCORE_FW_LOG_END;
839 } else {
840 offset = sizeof(addr);
841 end_of_log = addr;
842 }
843
844 /* Traverse the memory blocks linked list */
845 do {
846 memset(block, 0, WL12XX_HW_BLOCK_SIZE);
847 ret = wlcore_read_hwaddr(wl, addr, block, WL12XX_HW_BLOCK_SIZE,
848 false);
849 if (ret < 0)
850 goto out;
851
852 /*
853 * Memory blocks are linked to one another. The first 4 bytes
854 * of each memory block hold the hardware address of the next
855 * one. The last memory block points to the first one in
856 * on demand mode and is equal to 0x2000000 in continuous mode.
857 */
858 addr = le32_to_cpup((__le32 *)block);
859 if (!wl12xx_copy_fwlog(wl, block + offset,
860 WL12XX_HW_BLOCK_SIZE - offset))
861 break;
862 } while (addr && (addr != end_of_log));
863
864 wake_up_interruptible(&wl->fwlog_waitq);
865
866 out:
867 kfree(block);
868 }
869
870 static void wlcore_print_recovery(struct wl1271 *wl)
871 {
872 u32 pc = 0;
873 u32 hint_sts = 0;
874 int ret;
875
876 wl1271_info("Hardware recovery in progress. FW ver: %s",
877 wl->chip.fw_ver_str);
878
879 /* change partitions momentarily so we can read the FW pc */
880 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
881 if (ret < 0)
882 return;
883
884 ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
885 if (ret < 0)
886 return;
887
888 ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
889 if (ret < 0)
890 return;
891
892 wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
893 pc, hint_sts, ++wl->recovery_count);
894
895 wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
896 }
897
898
899 static void wl1271_recovery_work(struct work_struct *work)
900 {
901 struct wl1271 *wl =
902 container_of(work, struct wl1271, recovery_work);
903 struct wl12xx_vif *wlvif;
904 struct ieee80211_vif *vif;
905
906 mutex_lock(&wl->mutex);
907
908 if (wl->state == WLCORE_STATE_OFF || wl->plt)
909 goto out_unlock;
910
911 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
912 wl12xx_read_fwlog_panic(wl);
913 wlcore_print_recovery(wl);
914 }
915
916 BUG_ON(wl->conf.recovery.bug_on_recovery &&
917 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
918
919 if (wl->conf.recovery.no_recovery) {
920 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
921 goto out_unlock;
922 }
923
924 /*
925 * Advance security sequence number to overcome potential progress
926 * in the firmware during recovery. This doens't hurt if the network is
927 * not encrypted.
928 */
929 wl12xx_for_each_wlvif(wl, wlvif) {
930 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
931 test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
932 wlvif->tx_security_seq +=
933 WL1271_TX_SQN_POST_RECOVERY_PADDING;
934 }
935
936 /* Prevent spurious TX during FW restart */
937 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
938
939 /* reboot the chipset */
940 while (!list_empty(&wl->wlvif_list)) {
941 wlvif = list_first_entry(&wl->wlvif_list,
942 struct wl12xx_vif, list);
943 vif = wl12xx_wlvif_to_vif(wlvif);
944 __wl1271_op_remove_interface(wl, vif, false);
945 }
946
947 wlcore_op_stop_locked(wl);
948
949 ieee80211_restart_hw(wl->hw);
950
951 /*
952 * Its safe to enable TX now - the queues are stopped after a request
953 * to restart the HW.
954 */
955 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
956
957 out_unlock:
958 wl->watchdog_recovery = false;
959 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
960 mutex_unlock(&wl->mutex);
961 }
962
963 static int wlcore_fw_wakeup(struct wl1271 *wl)
964 {
965 return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
966 }
967
968 static int wl1271_setup(struct wl1271 *wl)
969 {
970 wl->fw_status_1 = kmalloc(WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
971 sizeof(*wl->fw_status_2) +
972 wl->fw_status_priv_len, GFP_KERNEL);
973 if (!wl->fw_status_1)
974 return -ENOMEM;
975
976 wl->fw_status_2 = (struct wl_fw_status_2 *)
977 (((u8 *) wl->fw_status_1) +
978 WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc));
979
980 wl->tx_res_if = kmalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
981 if (!wl->tx_res_if) {
982 kfree(wl->fw_status_1);
983 return -ENOMEM;
984 }
985
986 return 0;
987 }
988
989 static int wl12xx_set_power_on(struct wl1271 *wl)
990 {
991 int ret;
992
993 msleep(WL1271_PRE_POWER_ON_SLEEP);
994 ret = wl1271_power_on(wl);
995 if (ret < 0)
996 goto out;
997 msleep(WL1271_POWER_ON_SLEEP);
998 wl1271_io_reset(wl);
999 wl1271_io_init(wl);
1000
1001 ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
1002 if (ret < 0)
1003 goto fail;
1004
1005 /* ELP module wake up */
1006 ret = wlcore_fw_wakeup(wl);
1007 if (ret < 0)
1008 goto fail;
1009
1010 out:
1011 return ret;
1012
1013 fail:
1014 wl1271_power_off(wl);
1015 return ret;
1016 }
1017
1018 static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
1019 {
1020 int ret = 0;
1021
1022 ret = wl12xx_set_power_on(wl);
1023 if (ret < 0)
1024 goto out;
1025
1026 /*
1027 * For wl127x based devices we could use the default block
1028 * size (512 bytes), but due to a bug in the sdio driver, we
1029 * need to set it explicitly after the chip is powered on. To
1030 * simplify the code and since the performance impact is
1031 * negligible, we use the same block size for all different
1032 * chip types.
1033 *
1034 * Check if the bus supports blocksize alignment and, if it
1035 * doesn't, make sure we don't have the quirk.
1036 */
1037 if (!wl1271_set_block_size(wl))
1038 wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
1039
1040 /* TODO: make sure the lower driver has set things up correctly */
1041
1042 ret = wl1271_setup(wl);
1043 if (ret < 0)
1044 goto out;
1045
1046 ret = wl12xx_fetch_firmware(wl, plt);
1047 if (ret < 0)
1048 goto out;
1049
1050 out:
1051 return ret;
1052 }
1053
1054 int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
1055 {
1056 int retries = WL1271_BOOT_RETRIES;
1057 struct wiphy *wiphy = wl->hw->wiphy;
1058
1059 static const char* const PLT_MODE[] = {
1060 "PLT_OFF",
1061 "PLT_ON",
1062 "PLT_FEM_DETECT"
1063 };
1064
1065 int ret;
1066
1067 mutex_lock(&wl->mutex);
1068
1069 wl1271_notice("power up");
1070
1071 if (wl->state != WLCORE_STATE_OFF) {
1072 wl1271_error("cannot go into PLT state because not "
1073 "in off state: %d", wl->state);
1074 ret = -EBUSY;
1075 goto out;
1076 }
1077
1078 /* Indicate to lower levels that we are now in PLT mode */
1079 wl->plt = true;
1080 wl->plt_mode = plt_mode;
1081
1082 while (retries) {
1083 retries--;
1084 ret = wl12xx_chip_wakeup(wl, true);
1085 if (ret < 0)
1086 goto power_off;
1087
1088 ret = wl->ops->plt_init(wl);
1089 if (ret < 0)
1090 goto power_off;
1091
1092 wl->state = WLCORE_STATE_ON;
1093 wl1271_notice("firmware booted in PLT mode %s (%s)",
1094 PLT_MODE[plt_mode],
1095 wl->chip.fw_ver_str);
1096
1097 /* update hw/fw version info in wiphy struct */
1098 wiphy->hw_version = wl->chip.id;
1099 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
1100 sizeof(wiphy->fw_version));
1101
1102 goto out;
1103
1104 power_off:
1105 wl1271_power_off(wl);
1106 }
1107
1108 wl->plt = false;
1109 wl->plt_mode = PLT_OFF;
1110
1111 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1112 WL1271_BOOT_RETRIES);
1113 out:
1114 mutex_unlock(&wl->mutex);
1115
1116 return ret;
1117 }
1118
1119 int wl1271_plt_stop(struct wl1271 *wl)
1120 {
1121 int ret = 0;
1122
1123 wl1271_notice("power down");
1124
1125 /*
1126 * Interrupts must be disabled before setting the state to OFF.
1127 * Otherwise, the interrupt handler might be called and exit without
1128 * reading the interrupt status.
1129 */
1130 wlcore_disable_interrupts(wl);
1131 mutex_lock(&wl->mutex);
1132 if (!wl->plt) {
1133 mutex_unlock(&wl->mutex);
1134
1135 /*
1136 * This will not necessarily enable interrupts as interrupts
1137 * may have been disabled when op_stop was called. It will,
1138 * however, balance the above call to disable_interrupts().
1139 */
1140 wlcore_enable_interrupts(wl);
1141
1142 wl1271_error("cannot power down because not in PLT "
1143 "state: %d", wl->state);
1144 ret = -EBUSY;
1145 goto out;
1146 }
1147
1148 mutex_unlock(&wl->mutex);
1149
1150 wl1271_flush_deferred_work(wl);
1151 cancel_work_sync(&wl->netstack_work);
1152 cancel_work_sync(&wl->recovery_work);
1153 cancel_delayed_work_sync(&wl->elp_work);
1154 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1155
1156 mutex_lock(&wl->mutex);
1157 wl1271_power_off(wl);
1158 wl->flags = 0;
1159 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1160 wl->state = WLCORE_STATE_OFF;
1161 wl->plt = false;
1162 wl->plt_mode = PLT_OFF;
1163 wl->rx_counter = 0;
1164 mutex_unlock(&wl->mutex);
1165
1166 out:
1167 return ret;
1168 }
1169
1170 static void wl1271_op_tx(struct ieee80211_hw *hw,
1171 struct ieee80211_tx_control *control,
1172 struct sk_buff *skb)
1173 {
1174 struct wl1271 *wl = hw->priv;
1175 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1176 struct ieee80211_vif *vif = info->control.vif;
1177 struct wl12xx_vif *wlvif = NULL;
1178 unsigned long flags;
1179 int q, mapping;
1180 u8 hlid;
1181
1182 if (!vif) {
1183 wl1271_debug(DEBUG_TX, "DROP skb with no vif");
1184 ieee80211_free_txskb(hw, skb);
1185 return;
1186 }
1187
1188 wlvif = wl12xx_vif_to_data(vif);
1189 mapping = skb_get_queue_mapping(skb);
1190 q = wl1271_tx_get_queue(mapping);
1191
1192 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
1193
1194 spin_lock_irqsave(&wl->wl_lock, flags);
1195
1196 /*
1197 * drop the packet if the link is invalid or the queue is stopped
1198 * for any reason but watermark. Watermark is a "soft"-stop so we
1199 * allow these packets through.
1200 */
1201 if (hlid == WL12XX_INVALID_LINK_ID ||
1202 (!test_bit(hlid, wlvif->links_map)) ||
1203 (wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
1204 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1205 WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
1206 wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
1207 ieee80211_free_txskb(hw, skb);
1208 goto out;
1209 }
1210
1211 wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
1212 hlid, q, skb->len);
1213 skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
1214
1215 wl->tx_queue_count[q]++;
1216 wlvif->tx_queue_count[q]++;
1217
1218 /*
1219 * The workqueue is slow to process the tx_queue and we need stop
1220 * the queue here, otherwise the queue will get too long.
1221 */
1222 if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
1223 !wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
1224 WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
1225 wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
1226 wlcore_stop_queue_locked(wl, wlvif, q,
1227 WLCORE_QUEUE_STOP_REASON_WATERMARK);
1228 }
1229
1230 /*
1231 * The chip specific setup must run before the first TX packet -
1232 * before that, the tx_work will not be initialized!
1233 */
1234
1235 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
1236 !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
1237 ieee80211_queue_work(wl->hw, &wl->tx_work);
1238
1239 out:
1240 spin_unlock_irqrestore(&wl->wl_lock, flags);
1241 }
1242
1243 int wl1271_tx_dummy_packet(struct wl1271 *wl)
1244 {
1245 unsigned long flags;
1246 int q;
1247
1248 /* no need to queue a new dummy packet if one is already pending */
1249 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
1250 return 0;
1251
1252 q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
1253
1254 spin_lock_irqsave(&wl->wl_lock, flags);
1255 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
1256 wl->tx_queue_count[q]++;
1257 spin_unlock_irqrestore(&wl->wl_lock, flags);
1258
1259 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1260 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
1261 return wlcore_tx_work_locked(wl);
1262
1263 /*
1264 * If the FW TX is busy, TX work will be scheduled by the threaded
1265 * interrupt handler function
1266 */
1267 return 0;
1268 }
1269
1270 /*
1271 * The size of the dummy packet should be at least 1400 bytes. However, in
1272 * order to minimize the number of bus transactions, aligning it to 512 bytes
1273 * boundaries could be beneficial, performance wise
1274 */
1275 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1276
1277 static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
1278 {
1279 struct sk_buff *skb;
1280 struct ieee80211_hdr_3addr *hdr;
1281 unsigned int dummy_packet_size;
1282
1283 dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
1284 sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
1285
1286 skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
1287 if (!skb) {
1288 wl1271_warning("Failed to allocate a dummy packet skb");
1289 return NULL;
1290 }
1291
1292 skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
1293
1294 hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr));
1295 memset(hdr, 0, sizeof(*hdr));
1296 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
1297 IEEE80211_STYPE_NULLFUNC |
1298 IEEE80211_FCTL_TODS);
1299
1300 memset(skb_put(skb, dummy_packet_size), 0, dummy_packet_size);
1301
1302 /* Dummy packets require the TID to be management */
1303 skb->priority = WL1271_TID_MGMT;
1304
1305 /* Initialize all fields that might be used */
1306 skb_set_queue_mapping(skb, 0);
1307 memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
1308
1309 return skb;
1310 }
1311
1312
1313 #ifdef CONFIG_PM
1314 static int
1315 wl1271_validate_wowlan_pattern(struct cfg80211_wowlan_trig_pkt_pattern *p)
1316 {
1317 int num_fields = 0, in_field = 0, fields_size = 0;
1318 int i, pattern_len = 0;
1319
1320 if (!p->mask) {
1321 wl1271_warning("No mask in WoWLAN pattern");
1322 return -EINVAL;
1323 }
1324
1325 /*
1326 * The pattern is broken up into segments of bytes at different offsets
1327 * that need to be checked by the FW filter. Each segment is called
1328 * a field in the FW API. We verify that the total number of fields
1329 * required for this pattern won't exceed FW limits (8)
1330 * as well as the total fields buffer won't exceed the FW limit.
1331 * Note that if there's a pattern which crosses Ethernet/IP header
1332 * boundary a new field is required.
1333 */
1334 for (i = 0; i < p->pattern_len; i++) {
1335 if (test_bit(i, (unsigned long *)p->mask)) {
1336 if (!in_field) {
1337 in_field = 1;
1338 pattern_len = 1;
1339 } else {
1340 if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1341 num_fields++;
1342 fields_size += pattern_len +
1343 RX_FILTER_FIELD_OVERHEAD;
1344 pattern_len = 1;
1345 } else
1346 pattern_len++;
1347 }
1348 } else {
1349 if (in_field) {
1350 in_field = 0;
1351 fields_size += pattern_len +
1352 RX_FILTER_FIELD_OVERHEAD;
1353 num_fields++;
1354 }
1355 }
1356 }
1357
1358 if (in_field) {
1359 fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
1360 num_fields++;
1361 }
1362
1363 if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
1364 wl1271_warning("RX Filter too complex. Too many segments");
1365 return -EINVAL;
1366 }
1367
1368 if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
1369 wl1271_warning("RX filter pattern is too big");
1370 return -E2BIG;
1371 }
1372
1373 return 0;
1374 }
1375
1376 struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
1377 {
1378 return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
1379 }
1380
1381 void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
1382 {
1383 int i;
1384
1385 if (filter == NULL)
1386 return;
1387
1388 for (i = 0; i < filter->num_fields; i++)
1389 kfree(filter->fields[i].pattern);
1390
1391 kfree(filter);
1392 }
1393
1394 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
1395 u16 offset, u8 flags,
1396 u8 *pattern, u8 len)
1397 {
1398 struct wl12xx_rx_filter_field *field;
1399
1400 if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
1401 wl1271_warning("Max fields per RX filter. can't alloc another");
1402 return -EINVAL;
1403 }
1404
1405 field = &filter->fields[filter->num_fields];
1406
1407 field->pattern = kzalloc(len, GFP_KERNEL);
1408 if (!field->pattern) {
1409 wl1271_warning("Failed to allocate RX filter pattern");
1410 return -ENOMEM;
1411 }
1412
1413 filter->num_fields++;
1414
1415 field->offset = cpu_to_le16(offset);
1416 field->flags = flags;
1417 field->len = len;
1418 memcpy(field->pattern, pattern, len);
1419
1420 return 0;
1421 }
1422
1423 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
1424 {
1425 int i, fields_size = 0;
1426
1427 for (i = 0; i < filter->num_fields; i++)
1428 fields_size += filter->fields[i].len +
1429 sizeof(struct wl12xx_rx_filter_field) -
1430 sizeof(u8 *);
1431
1432 return fields_size;
1433 }
1434
1435 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
1436 u8 *buf)
1437 {
1438 int i;
1439 struct wl12xx_rx_filter_field *field;
1440
1441 for (i = 0; i < filter->num_fields; i++) {
1442 field = (struct wl12xx_rx_filter_field *)buf;
1443
1444 field->offset = filter->fields[i].offset;
1445 field->flags = filter->fields[i].flags;
1446 field->len = filter->fields[i].len;
1447
1448 memcpy(&field->pattern, filter->fields[i].pattern, field->len);
1449 buf += sizeof(struct wl12xx_rx_filter_field) -
1450 sizeof(u8 *) + field->len;
1451 }
1452 }
1453
1454 /*
1455 * Allocates an RX filter returned through f
1456 * which needs to be freed using rx_filter_free()
1457 */
1458 static int wl1271_convert_wowlan_pattern_to_rx_filter(
1459 struct cfg80211_wowlan_trig_pkt_pattern *p,
1460 struct wl12xx_rx_filter **f)
1461 {
1462 int i, j, ret = 0;
1463 struct wl12xx_rx_filter *filter;
1464 u16 offset;
1465 u8 flags, len;
1466
1467 filter = wl1271_rx_filter_alloc();
1468 if (!filter) {
1469 wl1271_warning("Failed to alloc rx filter");
1470 ret = -ENOMEM;
1471 goto err;
1472 }
1473
1474 i = 0;
1475 while (i < p->pattern_len) {
1476 if (!test_bit(i, (unsigned long *)p->mask)) {
1477 i++;
1478 continue;
1479 }
1480
1481 for (j = i; j < p->pattern_len; j++) {
1482 if (!test_bit(j, (unsigned long *)p->mask))
1483 break;
1484
1485 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
1486 j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
1487 break;
1488 }
1489
1490 if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
1491 offset = i;
1492 flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
1493 } else {
1494 offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
1495 flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
1496 }
1497
1498 len = j - i;
1499
1500 ret = wl1271_rx_filter_alloc_field(filter,
1501 offset,
1502 flags,
1503 &p->pattern[i], len);
1504 if (ret)
1505 goto err;
1506
1507 i = j;
1508 }
1509
1510 filter->action = FILTER_SIGNAL;
1511
1512 *f = filter;
1513 return 0;
1514
1515 err:
1516 wl1271_rx_filter_free(filter);
1517 *f = NULL;
1518
1519 return ret;
1520 }
1521
1522 static int wl1271_configure_wowlan(struct wl1271 *wl,
1523 struct cfg80211_wowlan *wow)
1524 {
1525 int i, ret;
1526
1527 if (!wow || wow->any || !wow->n_patterns) {
1528 ret = wl1271_acx_default_rx_filter_enable(wl, 0,
1529 FILTER_SIGNAL);
1530 if (ret)
1531 goto out;
1532
1533 ret = wl1271_rx_filter_clear_all(wl);
1534 if (ret)
1535 goto out;
1536
1537 return 0;
1538 }
1539
1540 if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
1541 return -EINVAL;
1542
1543 /* Validate all incoming patterns before clearing current FW state */
1544 for (i = 0; i < wow->n_patterns; i++) {
1545 ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
1546 if (ret) {
1547 wl1271_warning("Bad wowlan pattern %d", i);
1548 return ret;
1549 }
1550 }
1551
1552 ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
1553 if (ret)
1554 goto out;
1555
1556 ret = wl1271_rx_filter_clear_all(wl);
1557 if (ret)
1558 goto out;
1559
1560 /* Translate WoWLAN patterns into filters */
1561 for (i = 0; i < wow->n_patterns; i++) {
1562 struct cfg80211_wowlan_trig_pkt_pattern *p;
1563 struct wl12xx_rx_filter *filter = NULL;
1564
1565 p = &wow->patterns[i];
1566
1567 ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
1568 if (ret) {
1569 wl1271_warning("Failed to create an RX filter from "
1570 "wowlan pattern %d", i);
1571 goto out;
1572 }
1573
1574 ret = wl1271_rx_filter_enable(wl, i, 1, filter);
1575
1576 wl1271_rx_filter_free(filter);
1577 if (ret)
1578 goto out;
1579 }
1580
1581 ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
1582
1583 out:
1584 return ret;
1585 }
1586
1587 static int wl1271_configure_suspend_sta(struct wl1271 *wl,
1588 struct wl12xx_vif *wlvif,
1589 struct cfg80211_wowlan *wow)
1590 {
1591 int ret = 0;
1592
1593 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1594 goto out;
1595
1596 ret = wl1271_ps_elp_wakeup(wl);
1597 if (ret < 0)
1598 goto out;
1599
1600 ret = wl1271_configure_wowlan(wl, wow);
1601 if (ret < 0)
1602 goto out_sleep;
1603
1604 if ((wl->conf.conn.suspend_wake_up_event ==
1605 wl->conf.conn.wake_up_event) &&
1606 (wl->conf.conn.suspend_listen_interval ==
1607 wl->conf.conn.listen_interval))
1608 goto out_sleep;
1609
1610 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1611 wl->conf.conn.suspend_wake_up_event,
1612 wl->conf.conn.suspend_listen_interval);
1613
1614 if (ret < 0)
1615 wl1271_error("suspend: set wake up conditions failed: %d", ret);
1616
1617 out_sleep:
1618 wl1271_ps_elp_sleep(wl);
1619 out:
1620 return ret;
1621
1622 }
1623
1624 static int wl1271_configure_suspend_ap(struct wl1271 *wl,
1625 struct wl12xx_vif *wlvif)
1626 {
1627 int ret = 0;
1628
1629 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
1630 goto out;
1631
1632 ret = wl1271_ps_elp_wakeup(wl);
1633 if (ret < 0)
1634 goto out;
1635
1636 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
1637
1638 wl1271_ps_elp_sleep(wl);
1639 out:
1640 return ret;
1641
1642 }
1643
1644 static int wl1271_configure_suspend(struct wl1271 *wl,
1645 struct wl12xx_vif *wlvif,
1646 struct cfg80211_wowlan *wow)
1647 {
1648 if (wlvif->bss_type == BSS_TYPE_STA_BSS)
1649 return wl1271_configure_suspend_sta(wl, wlvif, wow);
1650 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
1651 return wl1271_configure_suspend_ap(wl, wlvif);
1652 return 0;
1653 }
1654
1655 static void wl1271_configure_resume(struct wl1271 *wl,
1656 struct wl12xx_vif *wlvif)
1657 {
1658 int ret = 0;
1659 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
1660 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
1661
1662 if ((!is_ap) && (!is_sta))
1663 return;
1664
1665 if (is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
1666 return;
1667
1668 ret = wl1271_ps_elp_wakeup(wl);
1669 if (ret < 0)
1670 return;
1671
1672 if (is_sta) {
1673 wl1271_configure_wowlan(wl, NULL);
1674
1675 if ((wl->conf.conn.suspend_wake_up_event ==
1676 wl->conf.conn.wake_up_event) &&
1677 (wl->conf.conn.suspend_listen_interval ==
1678 wl->conf.conn.listen_interval))
1679 goto out_sleep;
1680
1681 ret = wl1271_acx_wake_up_conditions(wl, wlvif,
1682 wl->conf.conn.wake_up_event,
1683 wl->conf.conn.listen_interval);
1684
1685 if (ret < 0)
1686 wl1271_error("resume: wake up conditions failed: %d",
1687 ret);
1688
1689 } else if (is_ap) {
1690 ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
1691 }
1692
1693 out_sleep:
1694 wl1271_ps_elp_sleep(wl);
1695 }
1696
1697 static int wl1271_op_suspend(struct ieee80211_hw *hw,
1698 struct cfg80211_wowlan *wow)
1699 {
1700 struct wl1271 *wl = hw->priv;
1701 struct wl12xx_vif *wlvif;
1702 int ret;
1703
1704 wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
1705 WARN_ON(!wow);
1706
1707 /* we want to perform the recovery before suspending */
1708 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
1709 wl1271_warning("postponing suspend to perform recovery");
1710 return -EBUSY;
1711 }
1712
1713 wl1271_tx_flush(wl);
1714
1715 mutex_lock(&wl->mutex);
1716 wl->wow_enabled = true;
1717 wl12xx_for_each_wlvif(wl, wlvif) {
1718 ret = wl1271_configure_suspend(wl, wlvif, wow);
1719 if (ret < 0) {
1720 mutex_unlock(&wl->mutex);
1721 wl1271_warning("couldn't prepare device to suspend");
1722 return ret;
1723 }
1724 }
1725 mutex_unlock(&wl->mutex);
1726 /* flush any remaining work */
1727 wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
1728
1729 /*
1730 * disable and re-enable interrupts in order to flush
1731 * the threaded_irq
1732 */
1733 wlcore_disable_interrupts(wl);
1734
1735 /*
1736 * set suspended flag to avoid triggering a new threaded_irq
1737 * work. no need for spinlock as interrupts are disabled.
1738 */
1739 set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1740
1741 wlcore_enable_interrupts(wl);
1742 flush_work(&wl->tx_work);
1743 flush_delayed_work(&wl->elp_work);
1744
1745 return 0;
1746 }
1747
1748 static int wl1271_op_resume(struct ieee80211_hw *hw)
1749 {
1750 struct wl1271 *wl = hw->priv;
1751 struct wl12xx_vif *wlvif;
1752 unsigned long flags;
1753 bool run_irq_work = false, pending_recovery;
1754 int ret;
1755
1756 wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
1757 wl->wow_enabled);
1758 WARN_ON(!wl->wow_enabled);
1759
1760 /*
1761 * re-enable irq_work enqueuing, and call irq_work directly if
1762 * there is a pending work.
1763 */
1764 spin_lock_irqsave(&wl->wl_lock, flags);
1765 clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
1766 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
1767 run_irq_work = true;
1768 spin_unlock_irqrestore(&wl->wl_lock, flags);
1769
1770 mutex_lock(&wl->mutex);
1771
1772 /* test the recovery flag before calling any SDIO functions */
1773 pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1774 &wl->flags);
1775
1776 if (run_irq_work) {
1777 wl1271_debug(DEBUG_MAC80211,
1778 "run postponed irq_work directly");
1779
1780 /* don't talk to the HW if recovery is pending */
1781 if (!pending_recovery) {
1782 ret = wlcore_irq_locked(wl);
1783 if (ret)
1784 wl12xx_queue_recovery_work(wl);
1785 }
1786
1787 wlcore_enable_interrupts(wl);
1788 }
1789
1790 if (pending_recovery) {
1791 wl1271_warning("queuing forgotten recovery on resume");
1792 ieee80211_queue_work(wl->hw, &wl->recovery_work);
1793 goto out;
1794 }
1795
1796 wl12xx_for_each_wlvif(wl, wlvif) {
1797 wl1271_configure_resume(wl, wlvif);
1798 }
1799
1800 out:
1801 wl->wow_enabled = false;
1802 mutex_unlock(&wl->mutex);
1803
1804 return 0;
1805 }
1806 #endif
1807
1808 static int wl1271_op_start(struct ieee80211_hw *hw)
1809 {
1810 wl1271_debug(DEBUG_MAC80211, "mac80211 start");
1811
1812 /*
1813 * We have to delay the booting of the hardware because
1814 * we need to know the local MAC address before downloading and
1815 * initializing the firmware. The MAC address cannot be changed
1816 * after boot, and without the proper MAC address, the firmware
1817 * will not function properly.
1818 *
1819 * The MAC address is first known when the corresponding interface
1820 * is added. That is where we will initialize the hardware.
1821 */
1822
1823 return 0;
1824 }
1825
1826 static void wlcore_op_stop_locked(struct wl1271 *wl)
1827 {
1828 int i;
1829
1830 if (wl->state == WLCORE_STATE_OFF) {
1831 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
1832 &wl->flags))
1833 wlcore_enable_interrupts(wl);
1834
1835 return;
1836 }
1837
1838 /*
1839 * this must be before the cancel_work calls below, so that the work
1840 * functions don't perform further work.
1841 */
1842 wl->state = WLCORE_STATE_OFF;
1843
1844 /*
1845 * Use the nosync variant to disable interrupts, so the mutex could be
1846 * held while doing so without deadlocking.
1847 */
1848 wlcore_disable_interrupts_nosync(wl);
1849
1850 mutex_unlock(&wl->mutex);
1851
1852 wlcore_synchronize_interrupts(wl);
1853 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1854 cancel_work_sync(&wl->recovery_work);
1855 wl1271_flush_deferred_work(wl);
1856 cancel_delayed_work_sync(&wl->scan_complete_work);
1857 cancel_work_sync(&wl->netstack_work);
1858 cancel_work_sync(&wl->tx_work);
1859 cancel_delayed_work_sync(&wl->elp_work);
1860 cancel_delayed_work_sync(&wl->tx_watchdog_work);
1861
1862 /* let's notify MAC80211 about the remaining pending TX frames */
1863 mutex_lock(&wl->mutex);
1864 wl12xx_tx_reset(wl);
1865
1866 wl1271_power_off(wl);
1867 /*
1868 * In case a recovery was scheduled, interrupts were disabled to avoid
1869 * an interrupt storm. Now that the power is down, it is safe to
1870 * re-enable interrupts to balance the disable depth
1871 */
1872 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
1873 wlcore_enable_interrupts(wl);
1874
1875 wl->band = IEEE80211_BAND_2GHZ;
1876
1877 wl->rx_counter = 0;
1878 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1879 wl->channel_type = NL80211_CHAN_NO_HT;
1880 wl->tx_blocks_available = 0;
1881 wl->tx_allocated_blocks = 0;
1882 wl->tx_results_count = 0;
1883 wl->tx_packets_count = 0;
1884 wl->time_offset = 0;
1885 wl->ap_fw_ps_map = 0;
1886 wl->ap_ps_map = 0;
1887 wl->sleep_auth = WL1271_PSM_ILLEGAL;
1888 memset(wl->roles_map, 0, sizeof(wl->roles_map));
1889 memset(wl->links_map, 0, sizeof(wl->links_map));
1890 memset(wl->roc_map, 0, sizeof(wl->roc_map));
1891 memset(wl->session_ids, 0, sizeof(wl->session_ids));
1892 wl->active_sta_count = 0;
1893 wl->active_link_count = 0;
1894
1895 /* The system link is always allocated */
1896 wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
1897 wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
1898 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
1899
1900 /*
1901 * this is performed after the cancel_work calls and the associated
1902 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1903 * get executed before all these vars have been reset.
1904 */
1905 wl->flags = 0;
1906
1907 wl->tx_blocks_freed = 0;
1908
1909 for (i = 0; i < NUM_TX_QUEUES; i++) {
1910 wl->tx_pkts_freed[i] = 0;
1911 wl->tx_allocated_pkts[i] = 0;
1912 }
1913
1914 wl1271_debugfs_reset(wl);
1915
1916 kfree(wl->fw_status_1);
1917 wl->fw_status_1 = NULL;
1918 wl->fw_status_2 = NULL;
1919 kfree(wl->tx_res_if);
1920 wl->tx_res_if = NULL;
1921 kfree(wl->target_mem_map);
1922 wl->target_mem_map = NULL;
1923
1924 /*
1925 * FW channels must be re-calibrated after recovery,
1926 * clear the last Reg-Domain channel configuration.
1927 */
1928 memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
1929 }
1930
1931 static void wlcore_op_stop(struct ieee80211_hw *hw)
1932 {
1933 struct wl1271 *wl = hw->priv;
1934
1935 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
1936
1937 mutex_lock(&wl->mutex);
1938
1939 wlcore_op_stop_locked(wl);
1940
1941 mutex_unlock(&wl->mutex);
1942 }
1943
1944 static void wlcore_channel_switch_work(struct work_struct *work)
1945 {
1946 struct delayed_work *dwork;
1947 struct wl1271 *wl;
1948 struct ieee80211_vif *vif;
1949 struct wl12xx_vif *wlvif;
1950 int ret;
1951
1952 dwork = container_of(work, struct delayed_work, work);
1953 wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
1954 wl = wlvif->wl;
1955
1956 wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
1957
1958 mutex_lock(&wl->mutex);
1959
1960 if (unlikely(wl->state != WLCORE_STATE_ON))
1961 goto out;
1962
1963 /* check the channel switch is still ongoing */
1964 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
1965 goto out;
1966
1967 vif = wl12xx_wlvif_to_vif(wlvif);
1968 ieee80211_chswitch_done(vif, false);
1969
1970 ret = wl1271_ps_elp_wakeup(wl);
1971 if (ret < 0)
1972 goto out;
1973
1974 wl12xx_cmd_stop_channel_switch(wl, wlvif);
1975
1976 wl1271_ps_elp_sleep(wl);
1977 out:
1978 mutex_unlock(&wl->mutex);
1979 }
1980
1981 static void wlcore_connection_loss_work(struct work_struct *work)
1982 {
1983 struct delayed_work *dwork;
1984 struct wl1271 *wl;
1985 struct ieee80211_vif *vif;
1986 struct wl12xx_vif *wlvif;
1987
1988 dwork = container_of(work, struct delayed_work, work);
1989 wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
1990 wl = wlvif->wl;
1991
1992 wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
1993
1994 mutex_lock(&wl->mutex);
1995
1996 if (unlikely(wl->state != WLCORE_STATE_ON))
1997 goto out;
1998
1999 /* Call mac80211 connection loss */
2000 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2001 goto out;
2002
2003 vif = wl12xx_wlvif_to_vif(wlvif);
2004 ieee80211_connection_loss(vif);
2005 out:
2006 mutex_unlock(&wl->mutex);
2007 }
2008
2009 static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
2010 {
2011 u8 policy = find_first_zero_bit(wl->rate_policies_map,
2012 WL12XX_MAX_RATE_POLICIES);
2013 if (policy >= WL12XX_MAX_RATE_POLICIES)
2014 return -EBUSY;
2015
2016 __set_bit(policy, wl->rate_policies_map);
2017 *idx = policy;
2018 return 0;
2019 }
2020
2021 static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
2022 {
2023 if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
2024 return;
2025
2026 __clear_bit(*idx, wl->rate_policies_map);
2027 *idx = WL12XX_MAX_RATE_POLICIES;
2028 }
2029
2030 static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
2031 {
2032 u8 policy = find_first_zero_bit(wl->klv_templates_map,
2033 WLCORE_MAX_KLV_TEMPLATES);
2034 if (policy >= WLCORE_MAX_KLV_TEMPLATES)
2035 return -EBUSY;
2036
2037 __set_bit(policy, wl->klv_templates_map);
2038 *idx = policy;
2039 return 0;
2040 }
2041
2042 static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
2043 {
2044 if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
2045 return;
2046
2047 __clear_bit(*idx, wl->klv_templates_map);
2048 *idx = WLCORE_MAX_KLV_TEMPLATES;
2049 }
2050
2051 static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2052 {
2053 switch (wlvif->bss_type) {
2054 case BSS_TYPE_AP_BSS:
2055 if (wlvif->p2p)
2056 return WL1271_ROLE_P2P_GO;
2057 else
2058 return WL1271_ROLE_AP;
2059
2060 case BSS_TYPE_STA_BSS:
2061 if (wlvif->p2p)
2062 return WL1271_ROLE_P2P_CL;
2063 else
2064 return WL1271_ROLE_STA;
2065
2066 case BSS_TYPE_IBSS:
2067 return WL1271_ROLE_IBSS;
2068
2069 default:
2070 wl1271_error("invalid bss_type: %d", wlvif->bss_type);
2071 }
2072 return WL12XX_INVALID_ROLE_TYPE;
2073 }
2074
2075 static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
2076 {
2077 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2078 int i;
2079
2080 /* clear everything but the persistent data */
2081 memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
2082
2083 switch (ieee80211_vif_type_p2p(vif)) {
2084 case NL80211_IFTYPE_P2P_CLIENT:
2085 wlvif->p2p = 1;
2086 /* fall-through */
2087 case NL80211_IFTYPE_STATION:
2088 wlvif->bss_type = BSS_TYPE_STA_BSS;
2089 break;
2090 case NL80211_IFTYPE_ADHOC:
2091 wlvif->bss_type = BSS_TYPE_IBSS;
2092 break;
2093 case NL80211_IFTYPE_P2P_GO:
2094 wlvif->p2p = 1;
2095 /* fall-through */
2096 case NL80211_IFTYPE_AP:
2097 wlvif->bss_type = BSS_TYPE_AP_BSS;
2098 break;
2099 default:
2100 wlvif->bss_type = MAX_BSS_TYPE;
2101 return -EOPNOTSUPP;
2102 }
2103
2104 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2105 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2106 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2107
2108 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2109 wlvif->bss_type == BSS_TYPE_IBSS) {
2110 /* init sta/ibss data */
2111 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2112 wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2113 wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2114 wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2115 wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
2116 wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
2117 wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
2118 wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
2119 } else {
2120 /* init ap data */
2121 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2122 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2123 wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2124 wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2125 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2126 wl12xx_allocate_rate_policy(wl,
2127 &wlvif->ap.ucast_rate_idx[i]);
2128 wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
2129 /*
2130 * TODO: check if basic_rate shouldn't be
2131 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2132 * instead (the same thing for STA above).
2133 */
2134 wlvif->basic_rate = CONF_TX_ENABLED_RATES;
2135 /* TODO: this seems to be used only for STA, check it */
2136 wlvif->rate_set = CONF_TX_ENABLED_RATES;
2137 }
2138
2139 wlvif->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
2140 wlvif->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
2141 wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
2142
2143 /*
2144 * mac80211 configures some values globally, while we treat them
2145 * per-interface. thus, on init, we have to copy them from wl
2146 */
2147 wlvif->band = wl->band;
2148 wlvif->channel = wl->channel;
2149 wlvif->power_level = wl->power_level;
2150 wlvif->channel_type = wl->channel_type;
2151
2152 INIT_WORK(&wlvif->rx_streaming_enable_work,
2153 wl1271_rx_streaming_enable_work);
2154 INIT_WORK(&wlvif->rx_streaming_disable_work,
2155 wl1271_rx_streaming_disable_work);
2156 INIT_DELAYED_WORK(&wlvif->channel_switch_work,
2157 wlcore_channel_switch_work);
2158 INIT_DELAYED_WORK(&wlvif->connection_loss_work,
2159 wlcore_connection_loss_work);
2160 INIT_LIST_HEAD(&wlvif->list);
2161
2162 setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
2163 (unsigned long) wlvif);
2164 return 0;
2165 }
2166
2167 static bool wl12xx_init_fw(struct wl1271 *wl)
2168 {
2169 int retries = WL1271_BOOT_RETRIES;
2170 bool booted = false;
2171 struct wiphy *wiphy = wl->hw->wiphy;
2172 int ret;
2173
2174 while (retries) {
2175 retries--;
2176 ret = wl12xx_chip_wakeup(wl, false);
2177 if (ret < 0)
2178 goto power_off;
2179
2180 ret = wl->ops->boot(wl);
2181 if (ret < 0)
2182 goto power_off;
2183
2184 ret = wl1271_hw_init(wl);
2185 if (ret < 0)
2186 goto irq_disable;
2187
2188 booted = true;
2189 break;
2190
2191 irq_disable:
2192 mutex_unlock(&wl->mutex);
2193 /* Unlocking the mutex in the middle of handling is
2194 inherently unsafe. In this case we deem it safe to do,
2195 because we need to let any possibly pending IRQ out of
2196 the system (and while we are WLCORE_STATE_OFF the IRQ
2197 work function will not do anything.) Also, any other
2198 possible concurrent operations will fail due to the
2199 current state, hence the wl1271 struct should be safe. */
2200 wlcore_disable_interrupts(wl);
2201 wl1271_flush_deferred_work(wl);
2202 cancel_work_sync(&wl->netstack_work);
2203 mutex_lock(&wl->mutex);
2204 power_off:
2205 wl1271_power_off(wl);
2206 }
2207
2208 if (!booted) {
2209 wl1271_error("firmware boot failed despite %d retries",
2210 WL1271_BOOT_RETRIES);
2211 goto out;
2212 }
2213
2214 wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
2215
2216 /* update hw/fw version info in wiphy struct */
2217 wiphy->hw_version = wl->chip.id;
2218 strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
2219 sizeof(wiphy->fw_version));
2220
2221 /*
2222 * Now we know if 11a is supported (info from the NVS), so disable
2223 * 11a channels if not supported
2224 */
2225 if (!wl->enable_11a)
2226 wiphy->bands[IEEE80211_BAND_5GHZ]->n_channels = 0;
2227
2228 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
2229 wl->enable_11a ? "" : "not ");
2230
2231 wl->state = WLCORE_STATE_ON;
2232 out:
2233 return booted;
2234 }
2235
2236 static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
2237 {
2238 return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
2239 }
2240
2241 /*
2242 * Check whether a fw switch (i.e. moving from one loaded
2243 * fw to another) is needed. This function is also responsible
2244 * for updating wl->last_vif_count, so it must be called before
2245 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2246 * will be used).
2247 */
2248 static bool wl12xx_need_fw_change(struct wl1271 *wl,
2249 struct vif_counter_data vif_counter_data,
2250 bool add)
2251 {
2252 enum wl12xx_fw_type current_fw = wl->fw_type;
2253 u8 vif_count = vif_counter_data.counter;
2254
2255 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
2256 return false;
2257
2258 /* increase the vif count if this is a new vif */
2259 if (add && !vif_counter_data.cur_vif_running)
2260 vif_count++;
2261
2262 wl->last_vif_count = vif_count;
2263
2264 /* no need for fw change if the device is OFF */
2265 if (wl->state == WLCORE_STATE_OFF)
2266 return false;
2267
2268 /* no need for fw change if a single fw is used */
2269 if (!wl->mr_fw_name)
2270 return false;
2271
2272 if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
2273 return true;
2274 if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
2275 return true;
2276
2277 return false;
2278 }
2279
2280 /*
2281 * Enter "forced psm". Make sure the sta is in psm against the ap,
2282 * to make the fw switch a bit more disconnection-persistent.
2283 */
2284 static void wl12xx_force_active_psm(struct wl1271 *wl)
2285 {
2286 struct wl12xx_vif *wlvif;
2287
2288 wl12xx_for_each_wlvif_sta(wl, wlvif) {
2289 wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
2290 }
2291 }
2292
2293 struct wlcore_hw_queue_iter_data {
2294 unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
2295 /* current vif */
2296 struct ieee80211_vif *vif;
2297 /* is the current vif among those iterated */
2298 bool cur_running;
2299 };
2300
2301 static void wlcore_hw_queue_iter(void *data, u8 *mac,
2302 struct ieee80211_vif *vif)
2303 {
2304 struct wlcore_hw_queue_iter_data *iter_data = data;
2305
2306 if (WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
2307 return;
2308
2309 if (iter_data->cur_running || vif == iter_data->vif) {
2310 iter_data->cur_running = true;
2311 return;
2312 }
2313
2314 __set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
2315 }
2316
2317 static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
2318 struct wl12xx_vif *wlvif)
2319 {
2320 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2321 struct wlcore_hw_queue_iter_data iter_data = {};
2322 int i, q_base;
2323
2324 iter_data.vif = vif;
2325
2326 /* mark all bits taken by active interfaces */
2327 ieee80211_iterate_active_interfaces_atomic(wl->hw,
2328 IEEE80211_IFACE_ITER_RESUME_ALL,
2329 wlcore_hw_queue_iter, &iter_data);
2330
2331 /* the current vif is already running in mac80211 (resume/recovery) */
2332 if (iter_data.cur_running) {
2333 wlvif->hw_queue_base = vif->hw_queue[0];
2334 wl1271_debug(DEBUG_MAC80211,
2335 "using pre-allocated hw queue base %d",
2336 wlvif->hw_queue_base);
2337
2338 /* interface type might have changed type */
2339 goto adjust_cab_queue;
2340 }
2341
2342 q_base = find_first_zero_bit(iter_data.hw_queue_map,
2343 WLCORE_NUM_MAC_ADDRESSES);
2344 if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
2345 return -EBUSY;
2346
2347 wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
2348 wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
2349 wlvif->hw_queue_base);
2350
2351 for (i = 0; i < NUM_TX_QUEUES; i++) {
2352 wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
2353 /* register hw queues in mac80211 */
2354 vif->hw_queue[i] = wlvif->hw_queue_base + i;
2355 }
2356
2357 adjust_cab_queue:
2358 /* the last places are reserved for cab queues per interface */
2359 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2360 vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
2361 wlvif->hw_queue_base / NUM_TX_QUEUES;
2362 else
2363 vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
2364
2365 return 0;
2366 }
2367
2368 static int wl1271_op_add_interface(struct ieee80211_hw *hw,
2369 struct ieee80211_vif *vif)
2370 {
2371 struct wl1271 *wl = hw->priv;
2372 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2373 struct vif_counter_data vif_count;
2374 int ret = 0;
2375 u8 role_type;
2376 bool booted = false;
2377
2378 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
2379 IEEE80211_VIF_SUPPORTS_CQM_RSSI;
2380
2381 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
2382 ieee80211_vif_type_p2p(vif), vif->addr);
2383
2384 wl12xx_get_vif_count(hw, vif, &vif_count);
2385
2386 mutex_lock(&wl->mutex);
2387 ret = wl1271_ps_elp_wakeup(wl);
2388 if (ret < 0)
2389 goto out_unlock;
2390
2391 /*
2392 * in some very corner case HW recovery scenarios its possible to
2393 * get here before __wl1271_op_remove_interface is complete, so
2394 * opt out if that is the case.
2395 */
2396 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
2397 test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
2398 ret = -EBUSY;
2399 goto out;
2400 }
2401
2402
2403 ret = wl12xx_init_vif_data(wl, vif);
2404 if (ret < 0)
2405 goto out;
2406
2407 wlvif->wl = wl;
2408 role_type = wl12xx_get_role_type(wl, wlvif);
2409 if (role_type == WL12XX_INVALID_ROLE_TYPE) {
2410 ret = -EINVAL;
2411 goto out;
2412 }
2413
2414 ret = wlcore_allocate_hw_queue_base(wl, wlvif);
2415 if (ret < 0)
2416 goto out;
2417
2418 if (wl12xx_need_fw_change(wl, vif_count, true)) {
2419 wl12xx_force_active_psm(wl);
2420 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2421 mutex_unlock(&wl->mutex);
2422 wl1271_recovery_work(&wl->recovery_work);
2423 return 0;
2424 }
2425
2426 /*
2427 * TODO: after the nvs issue will be solved, move this block
2428 * to start(), and make sure here the driver is ON.
2429 */
2430 if (wl->state == WLCORE_STATE_OFF) {
2431 /*
2432 * we still need this in order to configure the fw
2433 * while uploading the nvs
2434 */
2435 memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
2436
2437 booted = wl12xx_init_fw(wl);
2438 if (!booted) {
2439 ret = -EINVAL;
2440 goto out;
2441 }
2442 }
2443
2444 ret = wl12xx_cmd_role_enable(wl, vif->addr,
2445 role_type, &wlvif->role_id);
2446 if (ret < 0)
2447 goto out;
2448
2449 ret = wl1271_init_vif_specific(wl, vif);
2450 if (ret < 0)
2451 goto out;
2452
2453 list_add(&wlvif->list, &wl->wlvif_list);
2454 set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
2455
2456 if (wlvif->bss_type == BSS_TYPE_AP_BSS)
2457 wl->ap_count++;
2458 else
2459 wl->sta_count++;
2460 out:
2461 wl1271_ps_elp_sleep(wl);
2462 out_unlock:
2463 mutex_unlock(&wl->mutex);
2464
2465 return ret;
2466 }
2467
2468 static void __wl1271_op_remove_interface(struct wl1271 *wl,
2469 struct ieee80211_vif *vif,
2470 bool reset_tx_queues)
2471 {
2472 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2473 int i, ret;
2474 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
2475
2476 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
2477
2478 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2479 return;
2480
2481 /* because of hardware recovery, we may get here twice */
2482 if (wl->state == WLCORE_STATE_OFF)
2483 return;
2484
2485 wl1271_info("down");
2486
2487 if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
2488 wl->scan_wlvif == wlvif) {
2489 /*
2490 * Rearm the tx watchdog just before idling scan. This
2491 * prevents just-finished scans from triggering the watchdog
2492 */
2493 wl12xx_rearm_tx_watchdog_locked(wl);
2494
2495 wl->scan.state = WL1271_SCAN_STATE_IDLE;
2496 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
2497 wl->scan_wlvif = NULL;
2498 wl->scan.req = NULL;
2499 ieee80211_scan_completed(wl->hw, true);
2500 }
2501
2502 if (wl->sched_vif == wlvif) {
2503 ieee80211_sched_scan_stopped(wl->hw);
2504 wl->sched_vif = NULL;
2505 }
2506
2507 if (wl->roc_vif == vif) {
2508 wl->roc_vif = NULL;
2509 ieee80211_remain_on_channel_expired(wl->hw);
2510 }
2511
2512 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
2513 /* disable active roles */
2514 ret = wl1271_ps_elp_wakeup(wl);
2515 if (ret < 0)
2516 goto deinit;
2517
2518 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2519 wlvif->bss_type == BSS_TYPE_IBSS) {
2520 if (wl12xx_dev_role_started(wlvif))
2521 wl12xx_stop_dev(wl, wlvif);
2522 }
2523
2524 ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
2525 if (ret < 0)
2526 goto deinit;
2527
2528 wl1271_ps_elp_sleep(wl);
2529 }
2530 deinit:
2531 /* clear all hlids (except system_hlid) */
2532 wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
2533
2534 if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
2535 wlvif->bss_type == BSS_TYPE_IBSS) {
2536 wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
2537 wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
2538 wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
2539 wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
2540 wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
2541 } else {
2542 wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
2543 wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
2544 wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
2545 wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
2546 for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
2547 wl12xx_free_rate_policy(wl,
2548 &wlvif->ap.ucast_rate_idx[i]);
2549 wl1271_free_ap_keys(wl, wlvif);
2550 }
2551
2552 dev_kfree_skb(wlvif->probereq);
2553 wlvif->probereq = NULL;
2554 wl12xx_tx_reset_wlvif(wl, wlvif);
2555 if (wl->last_wlvif == wlvif)
2556 wl->last_wlvif = NULL;
2557 list_del(&wlvif->list);
2558 memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
2559 wlvif->role_id = WL12XX_INVALID_ROLE_ID;
2560 wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
2561
2562 if (is_ap)
2563 wl->ap_count--;
2564 else
2565 wl->sta_count--;
2566
2567 /*
2568 * Last AP, have more stations. Configure sleep auth according to STA.
2569 * Don't do thin on unintended recovery.
2570 */
2571 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
2572 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
2573 goto unlock;
2574
2575 if (wl->ap_count == 0 && is_ap && wl->sta_count) {
2576 u8 sta_auth = wl->conf.conn.sta_sleep_auth;
2577 /* Configure for power according to debugfs */
2578 if (sta_auth != WL1271_PSM_ILLEGAL)
2579 wl1271_acx_sleep_auth(wl, sta_auth);
2580 /* Configure for ELP power saving */
2581 else
2582 wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
2583 }
2584
2585 unlock:
2586 mutex_unlock(&wl->mutex);
2587
2588 del_timer_sync(&wlvif->rx_streaming_timer);
2589 cancel_work_sync(&wlvif->rx_streaming_enable_work);
2590 cancel_work_sync(&wlvif->rx_streaming_disable_work);
2591 cancel_delayed_work_sync(&wlvif->connection_loss_work);
2592
2593 mutex_lock(&wl->mutex);
2594 }
2595
2596 static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
2597 struct ieee80211_vif *vif)
2598 {
2599 struct wl1271 *wl = hw->priv;
2600 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
2601 struct wl12xx_vif *iter;
2602 struct vif_counter_data vif_count;
2603
2604 wl12xx_get_vif_count(hw, vif, &vif_count);
2605 mutex_lock(&wl->mutex);
2606
2607 if (wl->state == WLCORE_STATE_OFF ||
2608 !test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
2609 goto out;
2610
2611 /*
2612 * wl->vif can be null here if someone shuts down the interface
2613 * just when hardware recovery has been started.
2614 */
2615 wl12xx_for_each_wlvif(wl, iter) {
2616 if (iter != wlvif)
2617 continue;
2618
2619 __wl1271_op_remove_interface(wl, vif, true);
2620 break;
2621 }
2622 WARN_ON(iter != wlvif);
2623 if (wl12xx_need_fw_change(wl, vif_count, false)) {
2624 wl12xx_force_active_psm(wl);
2625 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
2626 wl12xx_queue_recovery_work(wl);
2627 }
2628 out:
2629 mutex_unlock(&wl->mutex);
2630 }
2631
2632 static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
2633 struct ieee80211_vif *vif,
2634 enum nl80211_iftype new_type, bool p2p)
2635 {
2636 struct wl1271 *wl = hw->priv;
2637 int ret;
2638
2639 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2640 wl1271_op_remove_interface(hw, vif);
2641
2642 vif->type = new_type;
2643 vif->p2p = p2p;
2644 ret = wl1271_op_add_interface(hw, vif);
2645
2646 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
2647 return ret;
2648 }
2649
2650 static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2651 {
2652 int ret;
2653 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
2654
2655 /*
2656 * One of the side effects of the JOIN command is that is clears
2657 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2658 * to a WPA/WPA2 access point will therefore kill the data-path.
2659 * Currently the only valid scenario for JOIN during association
2660 * is on roaming, in which case we will also be given new keys.
2661 * Keep the below message for now, unless it starts bothering
2662 * users who really like to roam a lot :)
2663 */
2664 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2665 wl1271_info("JOIN while associated.");
2666
2667 /* clear encryption type */
2668 wlvif->encryption_type = KEY_NONE;
2669
2670 if (is_ibss)
2671 ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
2672 else {
2673 if (wl->quirks & WLCORE_QUIRK_START_STA_FAILS) {
2674 /*
2675 * TODO: this is an ugly workaround for wl12xx fw
2676 * bug - we are not able to tx/rx after the first
2677 * start_sta, so make dummy start+stop calls,
2678 * and then call start_sta again.
2679 * this should be fixed in the fw.
2680 */
2681 wl12xx_cmd_role_start_sta(wl, wlvif);
2682 wl12xx_cmd_role_stop_sta(wl, wlvif);
2683 }
2684
2685 ret = wl12xx_cmd_role_start_sta(wl, wlvif);
2686 }
2687
2688 return ret;
2689 }
2690
2691 static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
2692 int offset)
2693 {
2694 u8 ssid_len;
2695 const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
2696 skb->len - offset);
2697
2698 if (!ptr) {
2699 wl1271_error("No SSID in IEs!");
2700 return -ENOENT;
2701 }
2702
2703 ssid_len = ptr[1];
2704 if (ssid_len > IEEE80211_MAX_SSID_LEN) {
2705 wl1271_error("SSID is too long!");
2706 return -EINVAL;
2707 }
2708
2709 wlvif->ssid_len = ssid_len;
2710 memcpy(wlvif->ssid, ptr+2, ssid_len);
2711 return 0;
2712 }
2713
2714 static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2715 {
2716 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2717 struct sk_buff *skb;
2718 int ieoffset;
2719
2720 /* we currently only support setting the ssid from the ap probe req */
2721 if (wlvif->bss_type != BSS_TYPE_STA_BSS)
2722 return -EINVAL;
2723
2724 skb = ieee80211_ap_probereq_get(wl->hw, vif);
2725 if (!skb)
2726 return -EINVAL;
2727
2728 ieoffset = offsetof(struct ieee80211_mgmt,
2729 u.probe_req.variable);
2730 wl1271_ssid_set(wlvif, skb, ieoffset);
2731 dev_kfree_skb(skb);
2732
2733 return 0;
2734 }
2735
2736 static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2737 struct ieee80211_bss_conf *bss_conf,
2738 u32 sta_rate_set)
2739 {
2740 int ieoffset;
2741 int ret;
2742
2743 wlvif->aid = bss_conf->aid;
2744 wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
2745 wlvif->beacon_int = bss_conf->beacon_int;
2746 wlvif->wmm_enabled = bss_conf->qos;
2747
2748 set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
2749
2750 /*
2751 * with wl1271, we don't need to update the
2752 * beacon_int and dtim_period, because the firmware
2753 * updates it by itself when the first beacon is
2754 * received after a join.
2755 */
2756 ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
2757 if (ret < 0)
2758 return ret;
2759
2760 /*
2761 * Get a template for hardware connection maintenance
2762 */
2763 dev_kfree_skb(wlvif->probereq);
2764 wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
2765 wlvif,
2766 NULL);
2767 ieoffset = offsetof(struct ieee80211_mgmt,
2768 u.probe_req.variable);
2769 wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
2770
2771 /* enable the connection monitoring feature */
2772 ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
2773 if (ret < 0)
2774 return ret;
2775
2776 /*
2777 * The join command disable the keep-alive mode, shut down its process,
2778 * and also clear the template config, so we need to reset it all after
2779 * the join. The acx_aid starts the keep-alive process, and the order
2780 * of the commands below is relevant.
2781 */
2782 ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
2783 if (ret < 0)
2784 return ret;
2785
2786 ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
2787 if (ret < 0)
2788 return ret;
2789
2790 ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
2791 if (ret < 0)
2792 return ret;
2793
2794 ret = wl1271_acx_keep_alive_config(wl, wlvif,
2795 wlvif->sta.klv_template_id,
2796 ACX_KEEP_ALIVE_TPL_VALID);
2797 if (ret < 0)
2798 return ret;
2799
2800 /*
2801 * The default fw psm configuration is AUTO, while mac80211 default
2802 * setting is off (ACTIVE), so sync the fw with the correct value.
2803 */
2804 ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
2805 if (ret < 0)
2806 return ret;
2807
2808 if (sta_rate_set) {
2809 wlvif->rate_set =
2810 wl1271_tx_enabled_rates_get(wl,
2811 sta_rate_set,
2812 wlvif->band);
2813 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
2814 if (ret < 0)
2815 return ret;
2816 }
2817
2818 return ret;
2819 }
2820
2821 static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2822 {
2823 int ret;
2824 bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
2825
2826 /* make sure we are connected (sta) joined */
2827 if (sta &&
2828 !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
2829 return false;
2830
2831 /* make sure we are joined (ibss) */
2832 if (!sta &&
2833 test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
2834 return false;
2835
2836 if (sta) {
2837 /* use defaults when not associated */
2838 wlvif->aid = 0;
2839
2840 /* free probe-request template */
2841 dev_kfree_skb(wlvif->probereq);
2842 wlvif->probereq = NULL;
2843
2844 /* disable connection monitor features */
2845 ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
2846 if (ret < 0)
2847 return ret;
2848
2849 /* Disable the keep-alive feature */
2850 ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
2851 if (ret < 0)
2852 return ret;
2853 }
2854
2855 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
2856 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
2857
2858 wl12xx_cmd_stop_channel_switch(wl, wlvif);
2859 ieee80211_chswitch_done(vif, false);
2860 cancel_delayed_work(&wlvif->channel_switch_work);
2861 }
2862
2863 /* invalidate keep-alive template */
2864 wl1271_acx_keep_alive_config(wl, wlvif,
2865 wlvif->sta.klv_template_id,
2866 ACX_KEEP_ALIVE_TPL_INVALID);
2867
2868 /* reset TX security counters on a clean disconnect */
2869 wlvif->tx_security_last_seq_lsb = 0;
2870 wlvif->tx_security_seq = 0;
2871
2872 return 0;
2873 }
2874
2875 static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
2876 {
2877 wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
2878 wlvif->rate_set = wlvif->basic_rate_set;
2879 }
2880
2881 static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
2882 struct ieee80211_conf *conf, u32 changed)
2883 {
2884 int ret;
2885
2886 if (conf->power_level != wlvif->power_level) {
2887 ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
2888 if (ret < 0)
2889 return ret;
2890
2891 wlvif->power_level = conf->power_level;
2892 }
2893
2894 return 0;
2895 }
2896
2897 static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
2898 {
2899 struct wl1271 *wl = hw->priv;
2900 struct wl12xx_vif *wlvif;
2901 struct ieee80211_conf *conf = &hw->conf;
2902 int ret = 0;
2903
2904 wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
2905 " changed 0x%x",
2906 conf->flags & IEEE80211_CONF_PS ? "on" : "off",
2907 conf->power_level,
2908 conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
2909 changed);
2910
2911 mutex_lock(&wl->mutex);
2912
2913 if (changed & IEEE80211_CONF_CHANGE_POWER)
2914 wl->power_level = conf->power_level;
2915
2916 if (unlikely(wl->state != WLCORE_STATE_ON))
2917 goto out;
2918
2919 ret = wl1271_ps_elp_wakeup(wl);
2920 if (ret < 0)
2921 goto out;
2922
2923 /* configure each interface */
2924 wl12xx_for_each_wlvif(wl, wlvif) {
2925 ret = wl12xx_config_vif(wl, wlvif, conf, changed);
2926 if (ret < 0)
2927 goto out_sleep;
2928 }
2929
2930 out_sleep:
2931 wl1271_ps_elp_sleep(wl);
2932
2933 out:
2934 mutex_unlock(&wl->mutex);
2935
2936 return ret;
2937 }
2938
2939 struct wl1271_filter_params {
2940 bool enabled;
2941 int mc_list_length;
2942 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
2943 };
2944
2945 static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
2946 struct netdev_hw_addr_list *mc_list)
2947 {
2948 struct wl1271_filter_params *fp;
2949 struct netdev_hw_addr *ha;
2950
2951 fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
2952 if (!fp) {
2953 wl1271_error("Out of memory setting filters.");
2954 return 0;
2955 }
2956
2957 /* update multicast filtering parameters */
2958 fp->mc_list_length = 0;
2959 if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
2960 fp->enabled = false;
2961 } else {
2962 fp->enabled = true;
2963 netdev_hw_addr_list_for_each(ha, mc_list) {
2964 memcpy(fp->mc_list[fp->mc_list_length],
2965 ha->addr, ETH_ALEN);
2966 fp->mc_list_length++;
2967 }
2968 }
2969
2970 return (u64)(unsigned long)fp;
2971 }
2972
2973 #define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
2974 FIF_ALLMULTI | \
2975 FIF_FCSFAIL | \
2976 FIF_BCN_PRBRESP_PROMISC | \
2977 FIF_CONTROL | \
2978 FIF_OTHER_BSS)
2979
2980 static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
2981 unsigned int changed,
2982 unsigned int *total, u64 multicast)
2983 {
2984 struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
2985 struct wl1271 *wl = hw->priv;
2986 struct wl12xx_vif *wlvif;
2987
2988 int ret;
2989
2990 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
2991 " total %x", changed, *total);
2992
2993 mutex_lock(&wl->mutex);
2994
2995 *total &= WL1271_SUPPORTED_FILTERS;
2996 changed &= WL1271_SUPPORTED_FILTERS;
2997
2998 if (unlikely(wl->state != WLCORE_STATE_ON))
2999 goto out;
3000
3001 ret = wl1271_ps_elp_wakeup(wl);
3002 if (ret < 0)
3003 goto out;
3004
3005 wl12xx_for_each_wlvif(wl, wlvif) {
3006 if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
3007 if (*total & FIF_ALLMULTI)
3008 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3009 false,
3010 NULL, 0);
3011 else if (fp)
3012 ret = wl1271_acx_group_address_tbl(wl, wlvif,
3013 fp->enabled,
3014 fp->mc_list,
3015 fp->mc_list_length);
3016 if (ret < 0)
3017 goto out_sleep;
3018 }
3019 }
3020
3021 /*
3022 * the fw doesn't provide an api to configure the filters. instead,
3023 * the filters configuration is based on the active roles / ROC
3024 * state.
3025 */
3026
3027 out_sleep:
3028 wl1271_ps_elp_sleep(wl);
3029
3030 out:
3031 mutex_unlock(&wl->mutex);
3032 kfree(fp);
3033 }
3034
3035 static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3036 u8 id, u8 key_type, u8 key_size,
3037 const u8 *key, u8 hlid, u32 tx_seq_32,
3038 u16 tx_seq_16)
3039 {
3040 struct wl1271_ap_key *ap_key;
3041 int i;
3042
3043 wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
3044
3045 if (key_size > MAX_KEY_SIZE)
3046 return -EINVAL;
3047
3048 /*
3049 * Find next free entry in ap_keys. Also check we are not replacing
3050 * an existing key.
3051 */
3052 for (i = 0; i < MAX_NUM_KEYS; i++) {
3053 if (wlvif->ap.recorded_keys[i] == NULL)
3054 break;
3055
3056 if (wlvif->ap.recorded_keys[i]->id == id) {
3057 wl1271_warning("trying to record key replacement");
3058 return -EINVAL;
3059 }
3060 }
3061
3062 if (i == MAX_NUM_KEYS)
3063 return -EBUSY;
3064
3065 ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
3066 if (!ap_key)
3067 return -ENOMEM;
3068
3069 ap_key->id = id;
3070 ap_key->key_type = key_type;
3071 ap_key->key_size = key_size;
3072 memcpy(ap_key->key, key, key_size);
3073 ap_key->hlid = hlid;
3074 ap_key->tx_seq_32 = tx_seq_32;
3075 ap_key->tx_seq_16 = tx_seq_16;
3076
3077 wlvif->ap.recorded_keys[i] = ap_key;
3078 return 0;
3079 }
3080
3081 static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3082 {
3083 int i;
3084
3085 for (i = 0; i < MAX_NUM_KEYS; i++) {
3086 kfree(wlvif->ap.recorded_keys[i]);
3087 wlvif->ap.recorded_keys[i] = NULL;
3088 }
3089 }
3090
3091 static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
3092 {
3093 int i, ret = 0;
3094 struct wl1271_ap_key *key;
3095 bool wep_key_added = false;
3096
3097 for (i = 0; i < MAX_NUM_KEYS; i++) {
3098 u8 hlid;
3099 if (wlvif->ap.recorded_keys[i] == NULL)
3100 break;
3101
3102 key = wlvif->ap.recorded_keys[i];
3103 hlid = key->hlid;
3104 if (hlid == WL12XX_INVALID_LINK_ID)
3105 hlid = wlvif->ap.bcast_hlid;
3106
3107 ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3108 key->id, key->key_type,
3109 key->key_size, key->key,
3110 hlid, key->tx_seq_32,
3111 key->tx_seq_16);
3112 if (ret < 0)
3113 goto out;
3114
3115 if (key->key_type == KEY_WEP)
3116 wep_key_added = true;
3117 }
3118
3119 if (wep_key_added) {
3120 ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
3121 wlvif->ap.bcast_hlid);
3122 if (ret < 0)
3123 goto out;
3124 }
3125
3126 out:
3127 wl1271_free_ap_keys(wl, wlvif);
3128 return ret;
3129 }
3130
3131 static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3132 u16 action, u8 id, u8 key_type,
3133 u8 key_size, const u8 *key, u32 tx_seq_32,
3134 u16 tx_seq_16, struct ieee80211_sta *sta)
3135 {
3136 int ret;
3137 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3138
3139 if (is_ap) {
3140 struct wl1271_station *wl_sta;
3141 u8 hlid;
3142
3143 if (sta) {
3144 wl_sta = (struct wl1271_station *)sta->drv_priv;
3145 hlid = wl_sta->hlid;
3146 } else {
3147 hlid = wlvif->ap.bcast_hlid;
3148 }
3149
3150 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3151 /*
3152 * We do not support removing keys after AP shutdown.
3153 * Pretend we do to make mac80211 happy.
3154 */
3155 if (action != KEY_ADD_OR_REPLACE)
3156 return 0;
3157
3158 ret = wl1271_record_ap_key(wl, wlvif, id,
3159 key_type, key_size,
3160 key, hlid, tx_seq_32,
3161 tx_seq_16);
3162 } else {
3163 ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
3164 id, key_type, key_size,
3165 key, hlid, tx_seq_32,
3166 tx_seq_16);
3167 }
3168
3169 if (ret < 0)
3170 return ret;
3171 } else {
3172 const u8 *addr;
3173 static const u8 bcast_addr[ETH_ALEN] = {
3174 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3175 };
3176
3177 addr = sta ? sta->addr : bcast_addr;
3178
3179 if (is_zero_ether_addr(addr)) {
3180 /* We dont support TX only encryption */
3181 return -EOPNOTSUPP;
3182 }
3183
3184 /* The wl1271 does not allow to remove unicast keys - they
3185 will be cleared automatically on next CMD_JOIN. Ignore the
3186 request silently, as we dont want the mac80211 to emit
3187 an error message. */
3188 if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
3189 return 0;
3190
3191 /* don't remove key if hlid was already deleted */
3192 if (action == KEY_REMOVE &&
3193 wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
3194 return 0;
3195
3196 ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
3197 id, key_type, key_size,
3198 key, addr, tx_seq_32,
3199 tx_seq_16);
3200 if (ret < 0)
3201 return ret;
3202
3203 /* the default WEP key needs to be configured at least once */
3204 if (key_type == KEY_WEP) {
3205 ret = wl12xx_cmd_set_default_wep_key(wl,
3206 wlvif->default_key,
3207 wlvif->sta.hlid);
3208 if (ret < 0)
3209 return ret;
3210 }
3211 }
3212
3213 return 0;
3214 }
3215
3216 static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3217 struct ieee80211_vif *vif,
3218 struct ieee80211_sta *sta,
3219 struct ieee80211_key_conf *key_conf)
3220 {
3221 struct wl1271 *wl = hw->priv;
3222 int ret;
3223 bool might_change_spare =
3224 key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
3225 key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
3226
3227 if (might_change_spare) {
3228 /*
3229 * stop the queues and flush to ensure the next packets are
3230 * in sync with FW spare block accounting
3231 */
3232 wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3233 wl1271_tx_flush(wl);
3234 }
3235
3236 mutex_lock(&wl->mutex);
3237
3238 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3239 ret = -EAGAIN;
3240 goto out_wake_queues;
3241 }
3242
3243 ret = wl1271_ps_elp_wakeup(wl);
3244 if (ret < 0)
3245 goto out_wake_queues;
3246
3247 ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
3248
3249 wl1271_ps_elp_sleep(wl);
3250
3251 out_wake_queues:
3252 if (might_change_spare)
3253 wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
3254
3255 mutex_unlock(&wl->mutex);
3256
3257 return ret;
3258 }
3259
3260 int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
3261 struct ieee80211_vif *vif,
3262 struct ieee80211_sta *sta,
3263 struct ieee80211_key_conf *key_conf)
3264 {
3265 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3266 int ret;
3267 u32 tx_seq_32 = 0;
3268 u16 tx_seq_16 = 0;
3269 u8 key_type;
3270
3271 wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
3272
3273 wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
3274 wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3275 key_conf->cipher, key_conf->keyidx,
3276 key_conf->keylen, key_conf->flags);
3277 wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
3278
3279 switch (key_conf->cipher) {
3280 case WLAN_CIPHER_SUITE_WEP40:
3281 case WLAN_CIPHER_SUITE_WEP104:
3282 key_type = KEY_WEP;
3283
3284 key_conf->hw_key_idx = key_conf->keyidx;
3285 break;
3286 case WLAN_CIPHER_SUITE_TKIP:
3287 key_type = KEY_TKIP;
3288
3289 key_conf->hw_key_idx = key_conf->keyidx;
3290 tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
3291 tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
3292 break;
3293 case WLAN_CIPHER_SUITE_CCMP:
3294 key_type = KEY_AES;
3295
3296 key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
3297 tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
3298 tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
3299 break;
3300 case WL1271_CIPHER_SUITE_GEM:
3301 key_type = KEY_GEM;
3302 tx_seq_32 = WL1271_TX_SECURITY_HI32(wlvif->tx_security_seq);
3303 tx_seq_16 = WL1271_TX_SECURITY_LO16(wlvif->tx_security_seq);
3304 break;
3305 default:
3306 wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
3307
3308 return -EOPNOTSUPP;
3309 }
3310
3311 switch (cmd) {
3312 case SET_KEY:
3313 ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
3314 key_conf->keyidx, key_type,
3315 key_conf->keylen, key_conf->key,
3316 tx_seq_32, tx_seq_16, sta);
3317 if (ret < 0) {
3318 wl1271_error("Could not add or replace key");
3319 return ret;
3320 }
3321
3322 /*
3323 * reconfiguring arp response if the unicast (or common)
3324 * encryption key type was changed
3325 */
3326 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
3327 (sta || key_type == KEY_WEP) &&
3328 wlvif->encryption_type != key_type) {
3329 wlvif->encryption_type = key_type;
3330 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
3331 if (ret < 0) {
3332 wl1271_warning("build arp rsp failed: %d", ret);
3333 return ret;
3334 }
3335 }
3336 break;
3337
3338 case DISABLE_KEY:
3339 ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
3340 key_conf->keyidx, key_type,
3341 key_conf->keylen, key_conf->key,
3342 0, 0, sta);
3343 if (ret < 0) {
3344 wl1271_error("Could not remove key");
3345 return ret;
3346 }
3347 break;
3348
3349 default:
3350 wl1271_error("Unsupported key cmd 0x%x", cmd);
3351 return -EOPNOTSUPP;
3352 }
3353
3354 return ret;
3355 }
3356 EXPORT_SYMBOL_GPL(wlcore_set_key);
3357
3358 void wlcore_regdomain_config(struct wl1271 *wl)
3359 {
3360 int ret;
3361
3362 if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
3363 return;
3364
3365 mutex_lock(&wl->mutex);
3366 ret = wl1271_ps_elp_wakeup(wl);
3367 if (ret < 0)
3368 goto out;
3369
3370 ret = wlcore_cmd_regdomain_config_locked(wl);
3371 if (ret < 0) {
3372 wl12xx_queue_recovery_work(wl);
3373 goto out;
3374 }
3375
3376 wl1271_ps_elp_sleep(wl);
3377 out:
3378 mutex_unlock(&wl->mutex);
3379 }
3380
3381 static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
3382 struct ieee80211_vif *vif,
3383 struct cfg80211_scan_request *req)
3384 {
3385 struct wl1271 *wl = hw->priv;
3386 int ret;
3387 u8 *ssid = NULL;
3388 size_t len = 0;
3389
3390 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
3391
3392 if (req->n_ssids) {
3393 ssid = req->ssids[0].ssid;
3394 len = req->ssids[0].ssid_len;
3395 }
3396
3397 mutex_lock(&wl->mutex);
3398
3399 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3400 /*
3401 * We cannot return -EBUSY here because cfg80211 will expect
3402 * a call to ieee80211_scan_completed if we do - in this case
3403 * there won't be any call.
3404 */
3405 ret = -EAGAIN;
3406 goto out;
3407 }
3408
3409 ret = wl1271_ps_elp_wakeup(wl);
3410 if (ret < 0)
3411 goto out;
3412
3413 /* fail if there is any role in ROC */
3414 if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
3415 /* don't allow scanning right now */
3416 ret = -EBUSY;
3417 goto out_sleep;
3418 }
3419
3420 ret = wlcore_scan(hw->priv, vif, ssid, len, req);
3421 out_sleep:
3422 wl1271_ps_elp_sleep(wl);
3423 out:
3424 mutex_unlock(&wl->mutex);
3425
3426 return ret;
3427 }
3428
3429 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
3430 struct ieee80211_vif *vif)
3431 {
3432 struct wl1271 *wl = hw->priv;
3433 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3434 int ret;
3435
3436 wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
3437
3438 mutex_lock(&wl->mutex);
3439
3440 if (unlikely(wl->state != WLCORE_STATE_ON))
3441 goto out;
3442
3443 if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
3444 goto out;
3445
3446 ret = wl1271_ps_elp_wakeup(wl);
3447 if (ret < 0)
3448 goto out;
3449
3450 if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
3451 ret = wl->ops->scan_stop(wl, wlvif);
3452 if (ret < 0)
3453 goto out_sleep;
3454 }
3455
3456 /*
3457 * Rearm the tx watchdog just before idling scan. This
3458 * prevents just-finished scans from triggering the watchdog
3459 */
3460 wl12xx_rearm_tx_watchdog_locked(wl);
3461
3462 wl->scan.state = WL1271_SCAN_STATE_IDLE;
3463 memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
3464 wl->scan_wlvif = NULL;
3465 wl->scan.req = NULL;
3466 ieee80211_scan_completed(wl->hw, true);
3467
3468 out_sleep:
3469 wl1271_ps_elp_sleep(wl);
3470 out:
3471 mutex_unlock(&wl->mutex);
3472
3473 cancel_delayed_work_sync(&wl->scan_complete_work);
3474 }
3475
3476 static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
3477 struct ieee80211_vif *vif,
3478 struct cfg80211_sched_scan_request *req,
3479 struct ieee80211_sched_scan_ies *ies)
3480 {
3481 struct wl1271 *wl = hw->priv;
3482 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3483 int ret;
3484
3485 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
3486
3487 mutex_lock(&wl->mutex);
3488
3489 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3490 ret = -EAGAIN;
3491 goto out;
3492 }
3493
3494 ret = wl1271_ps_elp_wakeup(wl);
3495 if (ret < 0)
3496 goto out;
3497
3498 ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
3499 if (ret < 0)
3500 goto out_sleep;
3501
3502 wl->sched_vif = wlvif;
3503
3504 out_sleep:
3505 wl1271_ps_elp_sleep(wl);
3506 out:
3507 mutex_unlock(&wl->mutex);
3508 return ret;
3509 }
3510
3511 static void wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
3512 struct ieee80211_vif *vif)
3513 {
3514 struct wl1271 *wl = hw->priv;
3515 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3516 int ret;
3517
3518 wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
3519
3520 mutex_lock(&wl->mutex);
3521
3522 if (unlikely(wl->state != WLCORE_STATE_ON))
3523 goto out;
3524
3525 ret = wl1271_ps_elp_wakeup(wl);
3526 if (ret < 0)
3527 goto out;
3528
3529 wl->ops->sched_scan_stop(wl, wlvif);
3530
3531 wl1271_ps_elp_sleep(wl);
3532 out:
3533 mutex_unlock(&wl->mutex);
3534 }
3535
3536 static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
3537 {
3538 struct wl1271 *wl = hw->priv;
3539 int ret = 0;
3540
3541 mutex_lock(&wl->mutex);
3542
3543 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3544 ret = -EAGAIN;
3545 goto out;
3546 }
3547
3548 ret = wl1271_ps_elp_wakeup(wl);
3549 if (ret < 0)
3550 goto out;
3551
3552 ret = wl1271_acx_frag_threshold(wl, value);
3553 if (ret < 0)
3554 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
3555
3556 wl1271_ps_elp_sleep(wl);
3557
3558 out:
3559 mutex_unlock(&wl->mutex);
3560
3561 return ret;
3562 }
3563
3564 static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
3565 {
3566 struct wl1271 *wl = hw->priv;
3567 struct wl12xx_vif *wlvif;
3568 int ret = 0;
3569
3570 mutex_lock(&wl->mutex);
3571
3572 if (unlikely(wl->state != WLCORE_STATE_ON)) {
3573 ret = -EAGAIN;
3574 goto out;
3575 }
3576
3577 ret = wl1271_ps_elp_wakeup(wl);
3578 if (ret < 0)
3579 goto out;
3580
3581 wl12xx_for_each_wlvif(wl, wlvif) {
3582 ret = wl1271_acx_rts_threshold(wl, wlvif, value);
3583 if (ret < 0)
3584 wl1271_warning("set rts threshold failed: %d", ret);
3585 }
3586 wl1271_ps_elp_sleep(wl);
3587
3588 out:
3589 mutex_unlock(&wl->mutex);
3590
3591 return ret;
3592 }
3593
3594 static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
3595 {
3596 int len;
3597 const u8 *next, *end = skb->data + skb->len;
3598 u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
3599 skb->len - ieoffset);
3600 if (!ie)
3601 return;
3602 len = ie[1] + 2;
3603 next = ie + len;
3604 memmove(ie, next, end - next);
3605 skb_trim(skb, skb->len - len);
3606 }
3607
3608 static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
3609 unsigned int oui, u8 oui_type,
3610 int ieoffset)
3611 {
3612 int len;
3613 const u8 *next, *end = skb->data + skb->len;
3614 u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
3615 skb->data + ieoffset,
3616 skb->len - ieoffset);
3617 if (!ie)
3618 return;
3619 len = ie[1] + 2;
3620 next = ie + len;
3621 memmove(ie, next, end - next);
3622 skb_trim(skb, skb->len - len);
3623 }
3624
3625 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
3626 struct ieee80211_vif *vif)
3627 {
3628 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3629 struct sk_buff *skb;
3630 int ret;
3631
3632 skb = ieee80211_proberesp_get(wl->hw, vif);
3633 if (!skb)
3634 return -EOPNOTSUPP;
3635
3636 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3637 CMD_TEMPL_AP_PROBE_RESPONSE,
3638 skb->data,
3639 skb->len, 0,
3640 rates);
3641 dev_kfree_skb(skb);
3642
3643 if (ret < 0)
3644 goto out;
3645
3646 wl1271_debug(DEBUG_AP, "probe response updated");
3647 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
3648
3649 out:
3650 return ret;
3651 }
3652
3653 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
3654 struct ieee80211_vif *vif,
3655 u8 *probe_rsp_data,
3656 size_t probe_rsp_len,
3657 u32 rates)
3658 {
3659 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3660 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
3661 u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
3662 int ssid_ie_offset, ie_offset, templ_len;
3663 const u8 *ptr;
3664
3665 /* no need to change probe response if the SSID is set correctly */
3666 if (wlvif->ssid_len > 0)
3667 return wl1271_cmd_template_set(wl, wlvif->role_id,
3668 CMD_TEMPL_AP_PROBE_RESPONSE,
3669 probe_rsp_data,
3670 probe_rsp_len, 0,
3671 rates);
3672
3673 if (probe_rsp_len + bss_conf->ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
3674 wl1271_error("probe_rsp template too big");
3675 return -EINVAL;
3676 }
3677
3678 /* start searching from IE offset */
3679 ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
3680
3681 ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
3682 probe_rsp_len - ie_offset);
3683 if (!ptr) {
3684 wl1271_error("No SSID in beacon!");
3685 return -EINVAL;
3686 }
3687
3688 ssid_ie_offset = ptr - probe_rsp_data;
3689 ptr += (ptr[1] + 2);
3690
3691 memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
3692
3693 /* insert SSID from bss_conf */
3694 probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
3695 probe_rsp_templ[ssid_ie_offset + 1] = bss_conf->ssid_len;
3696 memcpy(probe_rsp_templ + ssid_ie_offset + 2,
3697 bss_conf->ssid, bss_conf->ssid_len);
3698 templ_len = ssid_ie_offset + 2 + bss_conf->ssid_len;
3699
3700 memcpy(probe_rsp_templ + ssid_ie_offset + 2 + bss_conf->ssid_len,
3701 ptr, probe_rsp_len - (ptr - probe_rsp_data));
3702 templ_len += probe_rsp_len - (ptr - probe_rsp_data);
3703
3704 return wl1271_cmd_template_set(wl, wlvif->role_id,
3705 CMD_TEMPL_AP_PROBE_RESPONSE,
3706 probe_rsp_templ,
3707 templ_len, 0,
3708 rates);
3709 }
3710
3711 static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
3712 struct ieee80211_vif *vif,
3713 struct ieee80211_bss_conf *bss_conf,
3714 u32 changed)
3715 {
3716 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3717 int ret = 0;
3718
3719 if (changed & BSS_CHANGED_ERP_SLOT) {
3720 if (bss_conf->use_short_slot)
3721 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
3722 else
3723 ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
3724 if (ret < 0) {
3725 wl1271_warning("Set slot time failed %d", ret);
3726 goto out;
3727 }
3728 }
3729
3730 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
3731 if (bss_conf->use_short_preamble)
3732 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
3733 else
3734 wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
3735 }
3736
3737 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
3738 if (bss_conf->use_cts_prot)
3739 ret = wl1271_acx_cts_protect(wl, wlvif,
3740 CTSPROTECT_ENABLE);
3741 else
3742 ret = wl1271_acx_cts_protect(wl, wlvif,
3743 CTSPROTECT_DISABLE);
3744 if (ret < 0) {
3745 wl1271_warning("Set ctsprotect failed %d", ret);
3746 goto out;
3747 }
3748 }
3749
3750 out:
3751 return ret;
3752 }
3753
3754 static int wlcore_set_beacon_template(struct wl1271 *wl,
3755 struct ieee80211_vif *vif,
3756 bool is_ap)
3757 {
3758 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3759 struct ieee80211_hdr *hdr;
3760 u32 min_rate;
3761 int ret;
3762 int ieoffset = offsetof(struct ieee80211_mgmt,
3763 u.beacon.variable);
3764 struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif);
3765 u16 tmpl_id;
3766
3767 if (!beacon) {
3768 ret = -EINVAL;
3769 goto out;
3770 }
3771
3772 wl1271_debug(DEBUG_MASTER, "beacon updated");
3773
3774 ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
3775 if (ret < 0) {
3776 dev_kfree_skb(beacon);
3777 goto out;
3778 }
3779 min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3780 tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
3781 CMD_TEMPL_BEACON;
3782 ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
3783 beacon->data,
3784 beacon->len, 0,
3785 min_rate);
3786 if (ret < 0) {
3787 dev_kfree_skb(beacon);
3788 goto out;
3789 }
3790
3791 wlvif->wmm_enabled =
3792 cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
3793 WLAN_OUI_TYPE_MICROSOFT_WMM,
3794 beacon->data + ieoffset,
3795 beacon->len - ieoffset);
3796
3797 /*
3798 * In case we already have a probe-resp beacon set explicitly
3799 * by usermode, don't use the beacon data.
3800 */
3801 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
3802 goto end_bcn;
3803
3804 /* remove TIM ie from probe response */
3805 wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
3806
3807 /*
3808 * remove p2p ie from probe response.
3809 * the fw reponds to probe requests that don't include
3810 * the p2p ie. probe requests with p2p ie will be passed,
3811 * and will be responded by the supplicant (the spec
3812 * forbids including the p2p ie when responding to probe
3813 * requests that didn't include it).
3814 */
3815 wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
3816 WLAN_OUI_TYPE_WFA_P2P, ieoffset);
3817
3818 hdr = (struct ieee80211_hdr *) beacon->data;
3819 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
3820 IEEE80211_STYPE_PROBE_RESP);
3821 if (is_ap)
3822 ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
3823 beacon->data,
3824 beacon->len,
3825 min_rate);
3826 else
3827 ret = wl1271_cmd_template_set(wl, wlvif->role_id,
3828 CMD_TEMPL_PROBE_RESPONSE,
3829 beacon->data,
3830 beacon->len, 0,
3831 min_rate);
3832 end_bcn:
3833 dev_kfree_skb(beacon);
3834 if (ret < 0)
3835 goto out;
3836
3837 out:
3838 return ret;
3839 }
3840
3841 static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
3842 struct ieee80211_vif *vif,
3843 struct ieee80211_bss_conf *bss_conf,
3844 u32 changed)
3845 {
3846 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3847 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
3848 int ret = 0;
3849
3850 if (changed & BSS_CHANGED_BEACON_INT) {
3851 wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
3852 bss_conf->beacon_int);
3853
3854 wlvif->beacon_int = bss_conf->beacon_int;
3855 }
3856
3857 if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
3858 u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
3859
3860 wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
3861 }
3862
3863 if (changed & BSS_CHANGED_BEACON) {
3864 ret = wlcore_set_beacon_template(wl, vif, is_ap);
3865 if (ret < 0)
3866 goto out;
3867 }
3868
3869 out:
3870 if (ret != 0)
3871 wl1271_error("beacon info change failed: %d", ret);
3872 return ret;
3873 }
3874
3875 /* AP mode changes */
3876 static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
3877 struct ieee80211_vif *vif,
3878 struct ieee80211_bss_conf *bss_conf,
3879 u32 changed)
3880 {
3881 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
3882 int ret = 0;
3883
3884 if (changed & BSS_CHANGED_BASIC_RATES) {
3885 u32 rates = bss_conf->basic_rates;
3886
3887 wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
3888 wlvif->band);
3889 wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
3890 wlvif->basic_rate_set);
3891
3892 ret = wl1271_init_ap_rates(wl, wlvif);
3893 if (ret < 0) {
3894 wl1271_error("AP rate policy change failed %d", ret);
3895 goto out;
3896 }
3897
3898 ret = wl1271_ap_init_templates(wl, vif);
3899 if (ret < 0)
3900 goto out;
3901
3902 ret = wl1271_ap_set_probe_resp_tmpl(wl, wlvif->basic_rate, vif);
3903 if (ret < 0)
3904 goto out;
3905
3906 ret = wlcore_set_beacon_template(wl, vif, true);
3907 if (ret < 0)
3908 goto out;
3909 }
3910
3911 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
3912 if (ret < 0)
3913 goto out;
3914
3915 if (changed & BSS_CHANGED_BEACON_ENABLED) {
3916 if (bss_conf->enable_beacon) {
3917 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3918 ret = wl12xx_cmd_role_start_ap(wl, wlvif);
3919 if (ret < 0)
3920 goto out;
3921
3922 ret = wl1271_ap_init_hwenc(wl, wlvif);
3923 if (ret < 0)
3924 goto out;
3925
3926 set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
3927 wl1271_debug(DEBUG_AP, "started AP");
3928 }
3929 } else {
3930 if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
3931 ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
3932 if (ret < 0)
3933 goto out;
3934
3935 clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
3936 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
3937 &wlvif->flags);
3938 wl1271_debug(DEBUG_AP, "stopped AP");
3939 }
3940 }
3941 }
3942
3943 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
3944 if (ret < 0)
3945 goto out;
3946
3947 /* Handle HT information change */
3948 if ((changed & BSS_CHANGED_HT) &&
3949 (bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
3950 ret = wl1271_acx_set_ht_information(wl, wlvif,
3951 bss_conf->ht_operation_mode);
3952 if (ret < 0) {
3953 wl1271_warning("Set ht information failed %d", ret);
3954 goto out;
3955 }
3956 }
3957
3958 out:
3959 return;
3960 }
3961
3962 static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
3963 struct ieee80211_bss_conf *bss_conf,
3964 u32 sta_rate_set)
3965 {
3966 u32 rates;
3967 int ret;
3968
3969 wl1271_debug(DEBUG_MAC80211,
3970 "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
3971 bss_conf->bssid, bss_conf->aid,
3972 bss_conf->beacon_int,
3973 bss_conf->basic_rates, sta_rate_set);
3974
3975 wlvif->beacon_int = bss_conf->beacon_int;
3976 rates = bss_conf->basic_rates;
3977 wlvif->basic_rate_set =
3978 wl1271_tx_enabled_rates_get(wl, rates,
3979 wlvif->band);
3980 wlvif->basic_rate =
3981 wl1271_tx_min_rate_get(wl,
3982 wlvif->basic_rate_set);
3983
3984 if (sta_rate_set)
3985 wlvif->rate_set =
3986 wl1271_tx_enabled_rates_get(wl,
3987 sta_rate_set,
3988 wlvif->band);
3989
3990 /* we only support sched_scan while not connected */
3991 if (wl->sched_vif == wlvif)
3992 wl->ops->sched_scan_stop(wl, wlvif);
3993
3994 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
3995 if (ret < 0)
3996 return ret;
3997
3998 ret = wl12xx_cmd_build_null_data(wl, wlvif);
3999 if (ret < 0)
4000 return ret;
4001
4002 ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
4003 if (ret < 0)
4004 return ret;
4005
4006 wlcore_set_ssid(wl, wlvif);
4007
4008 set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4009
4010 return 0;
4011 }
4012
4013 static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
4014 {
4015 int ret;
4016
4017 /* revert back to minimum rates for the current band */
4018 wl1271_set_band_rate(wl, wlvif);
4019 wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4020
4021 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4022 if (ret < 0)
4023 return ret;
4024
4025 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4026 test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
4027 ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
4028 if (ret < 0)
4029 return ret;
4030 }
4031
4032 clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
4033 return 0;
4034 }
4035 /* STA/IBSS mode changes */
4036 static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
4037 struct ieee80211_vif *vif,
4038 struct ieee80211_bss_conf *bss_conf,
4039 u32 changed)
4040 {
4041 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4042 bool do_join = false;
4043 bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
4044 bool ibss_joined = false;
4045 u32 sta_rate_set = 0;
4046 int ret;
4047 struct ieee80211_sta *sta;
4048 bool sta_exists = false;
4049 struct ieee80211_sta_ht_cap sta_ht_cap;
4050
4051 if (is_ibss) {
4052 ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
4053 changed);
4054 if (ret < 0)
4055 goto out;
4056 }
4057
4058 if (changed & BSS_CHANGED_IBSS) {
4059 if (bss_conf->ibss_joined) {
4060 set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
4061 ibss_joined = true;
4062 } else {
4063 wlcore_unset_assoc(wl, wlvif);
4064 wl12xx_cmd_role_stop_sta(wl, wlvif);
4065 }
4066 }
4067
4068 if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
4069 do_join = true;
4070
4071 /* Need to update the SSID (for filtering etc) */
4072 if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
4073 do_join = true;
4074
4075 if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
4076 wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
4077 bss_conf->enable_beacon ? "enabled" : "disabled");
4078
4079 do_join = true;
4080 }
4081
4082 if (changed & BSS_CHANGED_CQM) {
4083 bool enable = false;
4084 if (bss_conf->cqm_rssi_thold)
4085 enable = true;
4086 ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
4087 bss_conf->cqm_rssi_thold,
4088 bss_conf->cqm_rssi_hyst);
4089 if (ret < 0)
4090 goto out;
4091 wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
4092 }
4093
4094 if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
4095 BSS_CHANGED_ASSOC)) {
4096 rcu_read_lock();
4097 sta = ieee80211_find_sta(vif, bss_conf->bssid);
4098 if (sta) {
4099 u8 *rx_mask = sta->ht_cap.mcs.rx_mask;
4100
4101 /* save the supp_rates of the ap */
4102 sta_rate_set = sta->supp_rates[wlvif->band];
4103 if (sta->ht_cap.ht_supported)
4104 sta_rate_set |=
4105 (rx_mask[0] << HW_HT_RATES_OFFSET) |
4106 (rx_mask[1] << HW_MIMO_RATES_OFFSET);
4107 sta_ht_cap = sta->ht_cap;
4108 sta_exists = true;
4109 }
4110
4111 rcu_read_unlock();
4112 }
4113
4114 if (changed & BSS_CHANGED_BSSID) {
4115 if (!is_zero_ether_addr(bss_conf->bssid)) {
4116 ret = wlcore_set_bssid(wl, wlvif, bss_conf,
4117 sta_rate_set);
4118 if (ret < 0)
4119 goto out;
4120
4121 /* Need to update the BSSID (for filtering etc) */
4122 do_join = true;
4123 } else {
4124 ret = wlcore_clear_bssid(wl, wlvif);
4125 if (ret < 0)
4126 goto out;
4127 }
4128 }
4129
4130 if (changed & BSS_CHANGED_IBSS) {
4131 wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
4132 bss_conf->ibss_joined);
4133
4134 if (bss_conf->ibss_joined) {
4135 u32 rates = bss_conf->basic_rates;
4136 wlvif->basic_rate_set =
4137 wl1271_tx_enabled_rates_get(wl, rates,
4138 wlvif->band);
4139 wlvif->basic_rate =
4140 wl1271_tx_min_rate_get(wl,
4141 wlvif->basic_rate_set);
4142
4143 /* by default, use 11b + OFDM rates */
4144 wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
4145 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4146 if (ret < 0)
4147 goto out;
4148 }
4149 }
4150
4151 ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
4152 if (ret < 0)
4153 goto out;
4154
4155 if (do_join) {
4156 ret = wlcore_join(wl, wlvif);
4157 if (ret < 0) {
4158 wl1271_warning("cmd join failed %d", ret);
4159 goto out;
4160 }
4161 }
4162
4163 if (changed & BSS_CHANGED_ASSOC) {
4164 if (bss_conf->assoc) {
4165 ret = wlcore_set_assoc(wl, wlvif, bss_conf,
4166 sta_rate_set);
4167 if (ret < 0)
4168 goto out;
4169
4170 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
4171 wl12xx_set_authorized(wl, wlvif);
4172 } else {
4173 wlcore_unset_assoc(wl, wlvif);
4174 }
4175 }
4176
4177 if (changed & BSS_CHANGED_PS) {
4178 if ((bss_conf->ps) &&
4179 test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
4180 !test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4181 int ps_mode;
4182 char *ps_mode_str;
4183
4184 if (wl->conf.conn.forced_ps) {
4185 ps_mode = STATION_POWER_SAVE_MODE;
4186 ps_mode_str = "forced";
4187 } else {
4188 ps_mode = STATION_AUTO_PS_MODE;
4189 ps_mode_str = "auto";
4190 }
4191
4192 wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
4193
4194 ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
4195 if (ret < 0)
4196 wl1271_warning("enter %s ps failed %d",
4197 ps_mode_str, ret);
4198 } else if (!bss_conf->ps &&
4199 test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
4200 wl1271_debug(DEBUG_PSM, "auto ps disabled");
4201
4202 ret = wl1271_ps_set_mode(wl, wlvif,
4203 STATION_ACTIVE_MODE);
4204 if (ret < 0)
4205 wl1271_warning("exit auto ps failed %d", ret);
4206 }
4207 }
4208
4209 /* Handle new association with HT. Do this after join. */
4210 if (sta_exists &&
4211 (changed & BSS_CHANGED_HT)) {
4212 bool enabled =
4213 bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
4214
4215 ret = wlcore_hw_set_peer_cap(wl,
4216 &sta_ht_cap,
4217 enabled,
4218 wlvif->rate_set,
4219 wlvif->sta.hlid);
4220 if (ret < 0) {
4221 wl1271_warning("Set ht cap failed %d", ret);
4222 goto out;
4223
4224 }
4225
4226 if (enabled) {
4227 ret = wl1271_acx_set_ht_information(wl, wlvif,
4228 bss_conf->ht_operation_mode);
4229 if (ret < 0) {
4230 wl1271_warning("Set ht information failed %d",
4231 ret);
4232 goto out;
4233 }
4234 }
4235 }
4236
4237 /* Handle arp filtering. Done after join. */
4238 if ((changed & BSS_CHANGED_ARP_FILTER) ||
4239 (!is_ibss && (changed & BSS_CHANGED_QOS))) {
4240 __be32 addr = bss_conf->arp_addr_list[0];
4241 wlvif->sta.qos = bss_conf->qos;
4242 WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
4243
4244 if (bss_conf->arp_addr_cnt == 1 &&
4245 bss_conf->arp_filter_enabled) {
4246 wlvif->ip_addr = addr;
4247 /*
4248 * The template should have been configured only upon
4249 * association. however, it seems that the correct ip
4250 * isn't being set (when sending), so we have to
4251 * reconfigure the template upon every ip change.
4252 */
4253 ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
4254 if (ret < 0) {
4255 wl1271_warning("build arp rsp failed: %d", ret);
4256 goto out;
4257 }
4258
4259 ret = wl1271_acx_arp_ip_filter(wl, wlvif,
4260 (ACX_ARP_FILTER_ARP_FILTERING |
4261 ACX_ARP_FILTER_AUTO_ARP),
4262 addr);
4263 } else {
4264 wlvif->ip_addr = 0;
4265 ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
4266 }
4267
4268 if (ret < 0)
4269 goto out;
4270 }
4271
4272 out:
4273 return;
4274 }
4275
4276 static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
4277 struct ieee80211_vif *vif,
4278 struct ieee80211_bss_conf *bss_conf,
4279 u32 changed)
4280 {
4281 struct wl1271 *wl = hw->priv;
4282 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4283 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
4284 int ret;
4285
4286 wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
4287 wlvif->role_id, (int)changed);
4288
4289 /*
4290 * make sure to cancel pending disconnections if our association
4291 * state changed
4292 */
4293 if (!is_ap && (changed & BSS_CHANGED_ASSOC))
4294 cancel_delayed_work_sync(&wlvif->connection_loss_work);
4295
4296 if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
4297 !bss_conf->enable_beacon)
4298 wl1271_tx_flush(wl);
4299
4300 mutex_lock(&wl->mutex);
4301
4302 if (unlikely(wl->state != WLCORE_STATE_ON))
4303 goto out;
4304
4305 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
4306 goto out;
4307
4308 ret = wl1271_ps_elp_wakeup(wl);
4309 if (ret < 0)
4310 goto out;
4311
4312 if (is_ap)
4313 wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
4314 else
4315 wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
4316
4317 wl1271_ps_elp_sleep(wl);
4318
4319 out:
4320 mutex_unlock(&wl->mutex);
4321 }
4322
4323 static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
4324 struct ieee80211_chanctx_conf *ctx)
4325 {
4326 wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
4327 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4328 cfg80211_get_chandef_type(&ctx->def));
4329 return 0;
4330 }
4331
4332 static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
4333 struct ieee80211_chanctx_conf *ctx)
4334 {
4335 wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
4336 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4337 cfg80211_get_chandef_type(&ctx->def));
4338 }
4339
4340 static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
4341 struct ieee80211_chanctx_conf *ctx,
4342 u32 changed)
4343 {
4344 wl1271_debug(DEBUG_MAC80211,
4345 "mac80211 change chanctx %d (type %d) changed 0x%x",
4346 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4347 cfg80211_get_chandef_type(&ctx->def), changed);
4348 }
4349
4350 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
4351 struct ieee80211_vif *vif,
4352 struct ieee80211_chanctx_conf *ctx)
4353 {
4354 struct wl1271 *wl = hw->priv;
4355 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4356 int channel = ieee80211_frequency_to_channel(
4357 ctx->def.chan->center_freq);
4358
4359 wl1271_debug(DEBUG_MAC80211,
4360 "mac80211 assign chanctx (role %d) %d (type %d)",
4361 wlvif->role_id, channel, cfg80211_get_chandef_type(&ctx->def));
4362
4363 mutex_lock(&wl->mutex);
4364
4365 wlvif->band = ctx->def.chan->band;
4366 wlvif->channel = channel;
4367 wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
4368
4369 /* update default rates according to the band */
4370 wl1271_set_band_rate(wl, wlvif);
4371
4372 mutex_unlock(&wl->mutex);
4373
4374 return 0;
4375 }
4376
4377 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
4378 struct ieee80211_vif *vif,
4379 struct ieee80211_chanctx_conf *ctx)
4380 {
4381 struct wl1271 *wl = hw->priv;
4382 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4383
4384 wl1271_debug(DEBUG_MAC80211,
4385 "mac80211 unassign chanctx (role %d) %d (type %d)",
4386 wlvif->role_id,
4387 ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
4388 cfg80211_get_chandef_type(&ctx->def));
4389
4390 wl1271_tx_flush(wl);
4391 }
4392
4393 static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
4394 struct ieee80211_vif *vif, u16 queue,
4395 const struct ieee80211_tx_queue_params *params)
4396 {
4397 struct wl1271 *wl = hw->priv;
4398 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4399 u8 ps_scheme;
4400 int ret = 0;
4401
4402 mutex_lock(&wl->mutex);
4403
4404 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
4405
4406 if (params->uapsd)
4407 ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
4408 else
4409 ps_scheme = CONF_PS_SCHEME_LEGACY;
4410
4411 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
4412 goto out;
4413
4414 ret = wl1271_ps_elp_wakeup(wl);
4415 if (ret < 0)
4416 goto out;
4417
4418 /*
4419 * the txop is confed in units of 32us by the mac80211,
4420 * we need us
4421 */
4422 ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4423 params->cw_min, params->cw_max,
4424 params->aifs, params->txop << 5);
4425 if (ret < 0)
4426 goto out_sleep;
4427
4428 ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
4429 CONF_CHANNEL_TYPE_EDCF,
4430 wl1271_tx_get_queue(queue),
4431 ps_scheme, CONF_ACK_POLICY_LEGACY,
4432 0, 0);
4433
4434 out_sleep:
4435 wl1271_ps_elp_sleep(wl);
4436
4437 out:
4438 mutex_unlock(&wl->mutex);
4439
4440 return ret;
4441 }
4442
4443 static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
4444 struct ieee80211_vif *vif)
4445 {
4446
4447 struct wl1271 *wl = hw->priv;
4448 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4449 u64 mactime = ULLONG_MAX;
4450 int ret;
4451
4452 wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
4453
4454 mutex_lock(&wl->mutex);
4455
4456 if (unlikely(wl->state != WLCORE_STATE_ON))
4457 goto out;
4458
4459 ret = wl1271_ps_elp_wakeup(wl);
4460 if (ret < 0)
4461 goto out;
4462
4463 ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
4464 if (ret < 0)
4465 goto out_sleep;
4466
4467 out_sleep:
4468 wl1271_ps_elp_sleep(wl);
4469
4470 out:
4471 mutex_unlock(&wl->mutex);
4472 return mactime;
4473 }
4474
4475 static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
4476 struct survey_info *survey)
4477 {
4478 struct ieee80211_conf *conf = &hw->conf;
4479
4480 if (idx != 0)
4481 return -ENOENT;
4482
4483 survey->channel = conf->channel;
4484 survey->filled = 0;
4485 return 0;
4486 }
4487
4488 static int wl1271_allocate_sta(struct wl1271 *wl,
4489 struct wl12xx_vif *wlvif,
4490 struct ieee80211_sta *sta)
4491 {
4492 struct wl1271_station *wl_sta;
4493 int ret;
4494
4495
4496 if (wl->active_sta_count >= AP_MAX_STATIONS) {
4497 wl1271_warning("could not allocate HLID - too much stations");
4498 return -EBUSY;
4499 }
4500
4501 wl_sta = (struct wl1271_station *)sta->drv_priv;
4502 ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
4503 if (ret < 0) {
4504 wl1271_warning("could not allocate HLID - too many links");
4505 return -EBUSY;
4506 }
4507
4508 set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
4509 memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
4510 wl->active_sta_count++;
4511 return 0;
4512 }
4513
4514 void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
4515 {
4516 if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
4517 return;
4518
4519 clear_bit(hlid, wlvif->ap.sta_hlid_map);
4520 __clear_bit(hlid, &wl->ap_ps_map);
4521 __clear_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
4522 wl12xx_free_link(wl, wlvif, &hlid);
4523 wl->active_sta_count--;
4524
4525 /*
4526 * rearm the tx watchdog when the last STA is freed - give the FW a
4527 * chance to return STA-buffered packets before complaining.
4528 */
4529 if (wl->active_sta_count == 0)
4530 wl12xx_rearm_tx_watchdog_locked(wl);
4531 }
4532
4533 static int wl12xx_sta_add(struct wl1271 *wl,
4534 struct wl12xx_vif *wlvif,
4535 struct ieee80211_sta *sta)
4536 {
4537 struct wl1271_station *wl_sta;
4538 int ret = 0;
4539 u8 hlid;
4540
4541 wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
4542
4543 ret = wl1271_allocate_sta(wl, wlvif, sta);
4544 if (ret < 0)
4545 return ret;
4546
4547 wl_sta = (struct wl1271_station *)sta->drv_priv;
4548 hlid = wl_sta->hlid;
4549
4550 ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
4551 if (ret < 0)
4552 wl1271_free_sta(wl, wlvif, hlid);
4553
4554 return ret;
4555 }
4556
4557 static int wl12xx_sta_remove(struct wl1271 *wl,
4558 struct wl12xx_vif *wlvif,
4559 struct ieee80211_sta *sta)
4560 {
4561 struct wl1271_station *wl_sta;
4562 int ret = 0, id;
4563
4564 wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
4565
4566 wl_sta = (struct wl1271_station *)sta->drv_priv;
4567 id = wl_sta->hlid;
4568 if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
4569 return -EINVAL;
4570
4571 ret = wl12xx_cmd_remove_peer(wl, wl_sta->hlid);
4572 if (ret < 0)
4573 return ret;
4574
4575 wl1271_free_sta(wl, wlvif, wl_sta->hlid);
4576 return ret;
4577 }
4578
4579 static void wlcore_roc_if_possible(struct wl1271 *wl,
4580 struct wl12xx_vif *wlvif)
4581 {
4582 if (find_first_bit(wl->roc_map,
4583 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
4584 return;
4585
4586 if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
4587 return;
4588
4589 wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
4590 }
4591
4592 static void wlcore_update_inconn_sta(struct wl1271 *wl,
4593 struct wl12xx_vif *wlvif,
4594 struct wl1271_station *wl_sta,
4595 bool in_connection)
4596 {
4597 if (in_connection) {
4598 if (WARN_ON(wl_sta->in_connection))
4599 return;
4600 wl_sta->in_connection = true;
4601 if (!wlvif->inconn_count++)
4602 wlcore_roc_if_possible(wl, wlvif);
4603 } else {
4604 if (!wl_sta->in_connection)
4605 return;
4606
4607 wl_sta->in_connection = false;
4608 wlvif->inconn_count--;
4609 if (WARN_ON(wlvif->inconn_count < 0))
4610 return;
4611
4612 if (!wlvif->inconn_count)
4613 if (test_bit(wlvif->role_id, wl->roc_map))
4614 wl12xx_croc(wl, wlvif->role_id);
4615 }
4616 }
4617
4618 static int wl12xx_update_sta_state(struct wl1271 *wl,
4619 struct wl12xx_vif *wlvif,
4620 struct ieee80211_sta *sta,
4621 enum ieee80211_sta_state old_state,
4622 enum ieee80211_sta_state new_state)
4623 {
4624 struct wl1271_station *wl_sta;
4625 u8 hlid;
4626 bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
4627 bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
4628 int ret;
4629
4630 wl_sta = (struct wl1271_station *)sta->drv_priv;
4631 hlid = wl_sta->hlid;
4632
4633 /* Add station (AP mode) */
4634 if (is_ap &&
4635 old_state == IEEE80211_STA_NOTEXIST &&
4636 new_state == IEEE80211_STA_NONE) {
4637 ret = wl12xx_sta_add(wl, wlvif, sta);
4638 if (ret)
4639 return ret;
4640
4641 wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
4642 }
4643
4644 /* Remove station (AP mode) */
4645 if (is_ap &&
4646 old_state == IEEE80211_STA_NONE &&
4647 new_state == IEEE80211_STA_NOTEXIST) {
4648 /* must not fail */
4649 wl12xx_sta_remove(wl, wlvif, sta);
4650
4651 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
4652 }
4653
4654 /* Authorize station (AP mode) */
4655 if (is_ap &&
4656 new_state == IEEE80211_STA_AUTHORIZED) {
4657 ret = wl12xx_cmd_set_peer_state(wl, wlvif, hlid);
4658 if (ret < 0)
4659 return ret;
4660
4661 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true,
4662 hlid);
4663 if (ret)
4664 return ret;
4665
4666 wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
4667 }
4668
4669 /* Authorize station */
4670 if (is_sta &&
4671 new_state == IEEE80211_STA_AUTHORIZED) {
4672 set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4673 ret = wl12xx_set_authorized(wl, wlvif);
4674 if (ret)
4675 return ret;
4676 }
4677
4678 if (is_sta &&
4679 old_state == IEEE80211_STA_AUTHORIZED &&
4680 new_state == IEEE80211_STA_ASSOC) {
4681 clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
4682 clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
4683 }
4684
4685 /* clear ROCs on failure or authorization */
4686 if (is_sta &&
4687 (new_state == IEEE80211_STA_AUTHORIZED ||
4688 new_state == IEEE80211_STA_NOTEXIST)) {
4689 if (test_bit(wlvif->role_id, wl->roc_map))
4690 wl12xx_croc(wl, wlvif->role_id);
4691 }
4692
4693 if (is_sta &&
4694 old_state == IEEE80211_STA_NOTEXIST &&
4695 new_state == IEEE80211_STA_NONE) {
4696 if (find_first_bit(wl->roc_map,
4697 WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
4698 WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
4699 wl12xx_roc(wl, wlvif, wlvif->role_id,
4700 wlvif->band, wlvif->channel);
4701 }
4702 }
4703 return 0;
4704 }
4705
4706 static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
4707 struct ieee80211_vif *vif,
4708 struct ieee80211_sta *sta,
4709 enum ieee80211_sta_state old_state,
4710 enum ieee80211_sta_state new_state)
4711 {
4712 struct wl1271 *wl = hw->priv;
4713 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4714 int ret;
4715
4716 wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
4717 sta->aid, old_state, new_state);
4718
4719 mutex_lock(&wl->mutex);
4720
4721 if (unlikely(wl->state != WLCORE_STATE_ON)) {
4722 ret = -EBUSY;
4723 goto out;
4724 }
4725
4726 ret = wl1271_ps_elp_wakeup(wl);
4727 if (ret < 0)
4728 goto out;
4729
4730 ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
4731
4732 wl1271_ps_elp_sleep(wl);
4733 out:
4734 mutex_unlock(&wl->mutex);
4735 if (new_state < old_state)
4736 return 0;
4737 return ret;
4738 }
4739
4740 static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
4741 struct ieee80211_vif *vif,
4742 enum ieee80211_ampdu_mlme_action action,
4743 struct ieee80211_sta *sta, u16 tid, u16 *ssn,
4744 u8 buf_size)
4745 {
4746 struct wl1271 *wl = hw->priv;
4747 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4748 int ret;
4749 u8 hlid, *ba_bitmap;
4750
4751 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
4752 tid);
4753
4754 /* sanity check - the fields in FW are only 8bits wide */
4755 if (WARN_ON(tid > 0xFF))
4756 return -ENOTSUPP;
4757
4758 mutex_lock(&wl->mutex);
4759
4760 if (unlikely(wl->state != WLCORE_STATE_ON)) {
4761 ret = -EAGAIN;
4762 goto out;
4763 }
4764
4765 if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
4766 hlid = wlvif->sta.hlid;
4767 } else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
4768 struct wl1271_station *wl_sta;
4769
4770 wl_sta = (struct wl1271_station *)sta->drv_priv;
4771 hlid = wl_sta->hlid;
4772 } else {
4773 ret = -EINVAL;
4774 goto out;
4775 }
4776
4777 ba_bitmap = &wl->links[hlid].ba_bitmap;
4778
4779 ret = wl1271_ps_elp_wakeup(wl);
4780 if (ret < 0)
4781 goto out;
4782
4783 wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
4784 tid, action);
4785
4786 switch (action) {
4787 case IEEE80211_AMPDU_RX_START:
4788 if (!wlvif->ba_support || !wlvif->ba_allowed) {
4789 ret = -ENOTSUPP;
4790 break;
4791 }
4792
4793 if (wl->ba_rx_session_count >= RX_BA_MAX_SESSIONS) {
4794 ret = -EBUSY;
4795 wl1271_error("exceeded max RX BA sessions");
4796 break;
4797 }
4798
4799 if (*ba_bitmap & BIT(tid)) {
4800 ret = -EINVAL;
4801 wl1271_error("cannot enable RX BA session on active "
4802 "tid: %d", tid);
4803 break;
4804 }
4805
4806 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
4807 hlid);
4808 if (!ret) {
4809 *ba_bitmap |= BIT(tid);
4810 wl->ba_rx_session_count++;
4811 }
4812 break;
4813
4814 case IEEE80211_AMPDU_RX_STOP:
4815 if (!(*ba_bitmap & BIT(tid))) {
4816 /*
4817 * this happens on reconfig - so only output a debug
4818 * message for now, and don't fail the function.
4819 */
4820 wl1271_debug(DEBUG_MAC80211,
4821 "no active RX BA session on tid: %d",
4822 tid);
4823 ret = 0;
4824 break;
4825 }
4826
4827 ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
4828 hlid);
4829 if (!ret) {
4830 *ba_bitmap &= ~BIT(tid);
4831 wl->ba_rx_session_count--;
4832 }
4833 break;
4834
4835 /*
4836 * The BA initiator session management in FW independently.
4837 * Falling break here on purpose for all TX APDU commands.
4838 */
4839 case IEEE80211_AMPDU_TX_START:
4840 case IEEE80211_AMPDU_TX_STOP_CONT:
4841 case IEEE80211_AMPDU_TX_STOP_FLUSH:
4842 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
4843 case IEEE80211_AMPDU_TX_OPERATIONAL:
4844 ret = -EINVAL;
4845 break;
4846
4847 default:
4848 wl1271_error("Incorrect ampdu action id=%x\n", action);
4849 ret = -EINVAL;
4850 }
4851
4852 wl1271_ps_elp_sleep(wl);
4853
4854 out:
4855 mutex_unlock(&wl->mutex);
4856
4857 return ret;
4858 }
4859
4860 static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
4861 struct ieee80211_vif *vif,
4862 const struct cfg80211_bitrate_mask *mask)
4863 {
4864 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4865 struct wl1271 *wl = hw->priv;
4866 int i, ret = 0;
4867
4868 wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
4869 mask->control[NL80211_BAND_2GHZ].legacy,
4870 mask->control[NL80211_BAND_5GHZ].legacy);
4871
4872 mutex_lock(&wl->mutex);
4873
4874 for (i = 0; i < WLCORE_NUM_BANDS; i++)
4875 wlvif->bitrate_masks[i] =
4876 wl1271_tx_enabled_rates_get(wl,
4877 mask->control[i].legacy,
4878 i);
4879
4880 if (unlikely(wl->state != WLCORE_STATE_ON))
4881 goto out;
4882
4883 if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
4884 !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
4885
4886 ret = wl1271_ps_elp_wakeup(wl);
4887 if (ret < 0)
4888 goto out;
4889
4890 wl1271_set_band_rate(wl, wlvif);
4891 wlvif->basic_rate =
4892 wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
4893 ret = wl1271_acx_sta_rate_policies(wl, wlvif);
4894
4895 wl1271_ps_elp_sleep(wl);
4896 }
4897 out:
4898 mutex_unlock(&wl->mutex);
4899
4900 return ret;
4901 }
4902
4903 static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
4904 struct ieee80211_channel_switch *ch_switch)
4905 {
4906 struct wl1271 *wl = hw->priv;
4907 struct wl12xx_vif *wlvif;
4908 int ret;
4909
4910 wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
4911
4912 wl1271_tx_flush(wl);
4913
4914 mutex_lock(&wl->mutex);
4915
4916 if (unlikely(wl->state == WLCORE_STATE_OFF)) {
4917 wl12xx_for_each_wlvif_sta(wl, wlvif) {
4918 struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
4919 ieee80211_chswitch_done(vif, false);
4920 }
4921 goto out;
4922 } else if (unlikely(wl->state != WLCORE_STATE_ON)) {
4923 goto out;
4924 }
4925
4926 ret = wl1271_ps_elp_wakeup(wl);
4927 if (ret < 0)
4928 goto out;
4929
4930 /* TODO: change mac80211 to pass vif as param */
4931 wl12xx_for_each_wlvif_sta(wl, wlvif) {
4932 unsigned long delay_usec;
4933
4934 ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
4935 if (ret)
4936 goto out_sleep;
4937
4938 set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
4939
4940 /* indicate failure 5 seconds after channel switch time */
4941 delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
4942 ch_switch->count;
4943 ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
4944 usecs_to_jiffies(delay_usec) +
4945 msecs_to_jiffies(5000));
4946 }
4947
4948 out_sleep:
4949 wl1271_ps_elp_sleep(wl);
4950
4951 out:
4952 mutex_unlock(&wl->mutex);
4953 }
4954
4955 static void wlcore_op_flush(struct ieee80211_hw *hw, bool drop)
4956 {
4957 struct wl1271 *wl = hw->priv;
4958
4959 wl1271_tx_flush(wl);
4960 }
4961
4962 static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
4963 struct ieee80211_vif *vif,
4964 struct ieee80211_channel *chan,
4965 int duration)
4966 {
4967 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
4968 struct wl1271 *wl = hw->priv;
4969 int channel, ret = 0;
4970
4971 channel = ieee80211_frequency_to_channel(chan->center_freq);
4972
4973 wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
4974 channel, wlvif->role_id);
4975
4976 mutex_lock(&wl->mutex);
4977
4978 if (unlikely(wl->state != WLCORE_STATE_ON))
4979 goto out;
4980
4981 /* return EBUSY if we can't ROC right now */
4982 if (WARN_ON(wl->roc_vif ||
4983 find_first_bit(wl->roc_map,
4984 WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)) {
4985 ret = -EBUSY;
4986 goto out;
4987 }
4988
4989 ret = wl1271_ps_elp_wakeup(wl);
4990 if (ret < 0)
4991 goto out;
4992
4993 ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
4994 if (ret < 0)
4995 goto out_sleep;
4996
4997 wl->roc_vif = vif;
4998 ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
4999 msecs_to_jiffies(duration));
5000 out_sleep:
5001 wl1271_ps_elp_sleep(wl);
5002 out:
5003 mutex_unlock(&wl->mutex);
5004 return ret;
5005 }
5006
5007 static int __wlcore_roc_completed(struct wl1271 *wl)
5008 {
5009 struct wl12xx_vif *wlvif;
5010 int ret;
5011
5012 /* already completed */
5013 if (unlikely(!wl->roc_vif))
5014 return 0;
5015
5016 wlvif = wl12xx_vif_to_data(wl->roc_vif);
5017
5018 if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
5019 return -EBUSY;
5020
5021 ret = wl12xx_stop_dev(wl, wlvif);
5022 if (ret < 0)
5023 return ret;
5024
5025 wl->roc_vif = NULL;
5026
5027 return 0;
5028 }
5029
5030 static int wlcore_roc_completed(struct wl1271 *wl)
5031 {
5032 int ret;
5033
5034 wl1271_debug(DEBUG_MAC80211, "roc complete");
5035
5036 mutex_lock(&wl->mutex);
5037
5038 if (unlikely(wl->state != WLCORE_STATE_ON)) {
5039 ret = -EBUSY;
5040 goto out;
5041 }
5042
5043 ret = wl1271_ps_elp_wakeup(wl);
5044 if (ret < 0)
5045 goto out;
5046
5047 ret = __wlcore_roc_completed(wl);
5048
5049 wl1271_ps_elp_sleep(wl);
5050 out:
5051 mutex_unlock(&wl->mutex);
5052
5053 return ret;
5054 }
5055
5056 static void wlcore_roc_complete_work(struct work_struct *work)
5057 {
5058 struct delayed_work *dwork;
5059 struct wl1271 *wl;
5060 int ret;
5061
5062 dwork = container_of(work, struct delayed_work, work);
5063 wl = container_of(dwork, struct wl1271, roc_complete_work);
5064
5065 ret = wlcore_roc_completed(wl);
5066 if (!ret)
5067 ieee80211_remain_on_channel_expired(wl->hw);
5068 }
5069
5070 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw)
5071 {
5072 struct wl1271 *wl = hw->priv;
5073
5074 wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
5075
5076 /* TODO: per-vif */
5077 wl1271_tx_flush(wl);
5078
5079 /*
5080 * we can't just flush_work here, because it might deadlock
5081 * (as we might get called from the same workqueue)
5082 */
5083 cancel_delayed_work_sync(&wl->roc_complete_work);
5084 wlcore_roc_completed(wl);
5085
5086 return 0;
5087 }
5088
5089 static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
5090 struct ieee80211_vif *vif,
5091 struct ieee80211_sta *sta,
5092 u32 changed)
5093 {
5094 struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
5095 struct wl1271 *wl = hw->priv;
5096
5097 wlcore_hw_sta_rc_update(wl, wlvif, sta, changed);
5098 }
5099
5100 static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
5101 {
5102 struct wl1271 *wl = hw->priv;
5103 bool ret = false;
5104
5105 mutex_lock(&wl->mutex);
5106
5107 if (unlikely(wl->state != WLCORE_STATE_ON))
5108 goto out;
5109
5110 /* packets are considered pending if in the TX queue or the FW */
5111 ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
5112 out:
5113 mutex_unlock(&wl->mutex);
5114
5115 return ret;
5116 }
5117
5118 /* can't be const, mac80211 writes to this */
5119 static struct ieee80211_rate wl1271_rates[] = {
5120 { .bitrate = 10,
5121 .hw_value = CONF_HW_BIT_RATE_1MBPS,
5122 .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
5123 { .bitrate = 20,
5124 .hw_value = CONF_HW_BIT_RATE_2MBPS,
5125 .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
5126 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5127 { .bitrate = 55,
5128 .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
5129 .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
5130 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5131 { .bitrate = 110,
5132 .hw_value = CONF_HW_BIT_RATE_11MBPS,
5133 .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
5134 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
5135 { .bitrate = 60,
5136 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5137 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5138 { .bitrate = 90,
5139 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5140 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5141 { .bitrate = 120,
5142 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5143 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5144 { .bitrate = 180,
5145 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5146 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5147 { .bitrate = 240,
5148 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5149 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5150 { .bitrate = 360,
5151 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5152 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5153 { .bitrate = 480,
5154 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5155 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5156 { .bitrate = 540,
5157 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5158 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5159 };
5160
5161 /* can't be const, mac80211 writes to this */
5162 static struct ieee80211_channel wl1271_channels[] = {
5163 { .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
5164 { .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
5165 { .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
5166 { .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
5167 { .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
5168 { .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
5169 { .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
5170 { .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
5171 { .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
5172 { .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
5173 { .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
5174 { .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
5175 { .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
5176 { .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
5177 };
5178
5179 /* can't be const, mac80211 writes to this */
5180 static struct ieee80211_supported_band wl1271_band_2ghz = {
5181 .channels = wl1271_channels,
5182 .n_channels = ARRAY_SIZE(wl1271_channels),
5183 .bitrates = wl1271_rates,
5184 .n_bitrates = ARRAY_SIZE(wl1271_rates),
5185 };
5186
5187 /* 5 GHz data rates for WL1273 */
5188 static struct ieee80211_rate wl1271_rates_5ghz[] = {
5189 { .bitrate = 60,
5190 .hw_value = CONF_HW_BIT_RATE_6MBPS,
5191 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
5192 { .bitrate = 90,
5193 .hw_value = CONF_HW_BIT_RATE_9MBPS,
5194 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
5195 { .bitrate = 120,
5196 .hw_value = CONF_HW_BIT_RATE_12MBPS,
5197 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
5198 { .bitrate = 180,
5199 .hw_value = CONF_HW_BIT_RATE_18MBPS,
5200 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
5201 { .bitrate = 240,
5202 .hw_value = CONF_HW_BIT_RATE_24MBPS,
5203 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
5204 { .bitrate = 360,
5205 .hw_value = CONF_HW_BIT_RATE_36MBPS,
5206 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
5207 { .bitrate = 480,
5208 .hw_value = CONF_HW_BIT_RATE_48MBPS,
5209 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
5210 { .bitrate = 540,
5211 .hw_value = CONF_HW_BIT_RATE_54MBPS,
5212 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
5213 };
5214
5215 /* 5 GHz band channels for WL1273 */
5216 static struct ieee80211_channel wl1271_channels_5ghz[] = {
5217 { .hw_value = 7, .center_freq = 5035, .max_power = WLCORE_MAX_TXPWR },
5218 { .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
5219 { .hw_value = 9, .center_freq = 5045, .max_power = WLCORE_MAX_TXPWR },
5220 { .hw_value = 11, .center_freq = 5055, .max_power = WLCORE_MAX_TXPWR },
5221 { .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
5222 { .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
5223 { .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
5224 { .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
5225 { .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
5226 { .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
5227 { .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
5228 { .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
5229 { .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
5230 { .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
5231 { .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
5232 { .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
5233 { .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
5234 { .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
5235 { .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
5236 { .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
5237 { .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
5238 { .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
5239 { .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
5240 { .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
5241 { .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
5242 { .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
5243 { .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
5244 { .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
5245 { .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
5246 { .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
5247 { .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
5248 { .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
5249 { .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
5250 { .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
5251 };
5252
5253 static struct ieee80211_supported_band wl1271_band_5ghz = {
5254 .channels = wl1271_channels_5ghz,
5255 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
5256 .bitrates = wl1271_rates_5ghz,
5257 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
5258 };
5259
5260 static const struct ieee80211_ops wl1271_ops = {
5261 .start = wl1271_op_start,
5262 .stop = wlcore_op_stop,
5263 .add_interface = wl1271_op_add_interface,
5264 .remove_interface = wl1271_op_remove_interface,
5265 .change_interface = wl12xx_op_change_interface,
5266 #ifdef CONFIG_PM
5267 .suspend = wl1271_op_suspend,
5268 .resume = wl1271_op_resume,
5269 #endif
5270 .config = wl1271_op_config,
5271 .prepare_multicast = wl1271_op_prepare_multicast,
5272 .configure_filter = wl1271_op_configure_filter,
5273 .tx = wl1271_op_tx,
5274 .set_key = wlcore_op_set_key,
5275 .hw_scan = wl1271_op_hw_scan,
5276 .cancel_hw_scan = wl1271_op_cancel_hw_scan,
5277 .sched_scan_start = wl1271_op_sched_scan_start,
5278 .sched_scan_stop = wl1271_op_sched_scan_stop,
5279 .bss_info_changed = wl1271_op_bss_info_changed,
5280 .set_frag_threshold = wl1271_op_set_frag_threshold,
5281 .set_rts_threshold = wl1271_op_set_rts_threshold,
5282 .conf_tx = wl1271_op_conf_tx,
5283 .get_tsf = wl1271_op_get_tsf,
5284 .get_survey = wl1271_op_get_survey,
5285 .sta_state = wl12xx_op_sta_state,
5286 .ampdu_action = wl1271_op_ampdu_action,
5287 .tx_frames_pending = wl1271_tx_frames_pending,
5288 .set_bitrate_mask = wl12xx_set_bitrate_mask,
5289 .channel_switch = wl12xx_op_channel_switch,
5290 .flush = wlcore_op_flush,
5291 .remain_on_channel = wlcore_op_remain_on_channel,
5292 .cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
5293 .add_chanctx = wlcore_op_add_chanctx,
5294 .remove_chanctx = wlcore_op_remove_chanctx,
5295 .change_chanctx = wlcore_op_change_chanctx,
5296 .assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
5297 .unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
5298 .sta_rc_update = wlcore_op_sta_rc_update,
5299 CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
5300 };
5301
5302
5303 u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum ieee80211_band band)
5304 {
5305 u8 idx;
5306
5307 BUG_ON(band >= 2);
5308
5309 if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
5310 wl1271_error("Illegal RX rate from HW: %d", rate);
5311 return 0;
5312 }
5313
5314 idx = wl->band_rate_to_idx[band][rate];
5315 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
5316 wl1271_error("Unsupported RX rate from HW: %d", rate);
5317 return 0;
5318 }
5319
5320 return idx;
5321 }
5322
5323 static ssize_t wl1271_sysfs_show_bt_coex_state(struct device *dev,
5324 struct device_attribute *attr,
5325 char *buf)
5326 {
5327 struct wl1271 *wl = dev_get_drvdata(dev);
5328 ssize_t len;
5329
5330 len = PAGE_SIZE;
5331
5332 mutex_lock(&wl->mutex);
5333 len = snprintf(buf, len, "%d\n\n0 - off\n1 - on\n",
5334 wl->sg_enabled);
5335 mutex_unlock(&wl->mutex);
5336
5337 return len;
5338
5339 }
5340
5341 static ssize_t wl1271_sysfs_store_bt_coex_state(struct device *dev,
5342 struct device_attribute *attr,
5343 const char *buf, size_t count)
5344 {
5345 struct wl1271 *wl = dev_get_drvdata(dev);
5346 unsigned long res;
5347 int ret;
5348
5349 ret = kstrtoul(buf, 10, &res);
5350 if (ret < 0) {
5351 wl1271_warning("incorrect value written to bt_coex_mode");
5352 return count;
5353 }
5354
5355 mutex_lock(&wl->mutex);
5356
5357 res = !!res;
5358
5359 if (res == wl->sg_enabled)
5360 goto out;
5361
5362 wl->sg_enabled = res;
5363
5364 if (unlikely(wl->state != WLCORE_STATE_ON))
5365 goto out;
5366
5367 ret = wl1271_ps_elp_wakeup(wl);
5368 if (ret < 0)
5369 goto out;
5370
5371 wl1271_acx_sg_enable(wl, wl->sg_enabled);
5372 wl1271_ps_elp_sleep(wl);
5373
5374 out:
5375 mutex_unlock(&wl->mutex);
5376 return count;
5377 }
5378
5379 static DEVICE_ATTR(bt_coex_state, S_IRUGO | S_IWUSR,
5380 wl1271_sysfs_show_bt_coex_state,
5381 wl1271_sysfs_store_bt_coex_state);
5382
5383 static ssize_t wl1271_sysfs_show_hw_pg_ver(struct device *dev,
5384 struct device_attribute *attr,
5385 char *buf)
5386 {
5387 struct wl1271 *wl = dev_get_drvdata(dev);
5388 ssize_t len;
5389
5390 len = PAGE_SIZE;
5391
5392 mutex_lock(&wl->mutex);
5393 if (wl->hw_pg_ver >= 0)
5394 len = snprintf(buf, len, "%d\n", wl->hw_pg_ver);
5395 else
5396 len = snprintf(buf, len, "n/a\n");
5397 mutex_unlock(&wl->mutex);
5398
5399 return len;
5400 }
5401
5402 static DEVICE_ATTR(hw_pg_ver, S_IRUGO,
5403 wl1271_sysfs_show_hw_pg_ver, NULL);
5404
5405 static ssize_t wl1271_sysfs_read_fwlog(struct file *filp, struct kobject *kobj,
5406 struct bin_attribute *bin_attr,
5407 char *buffer, loff_t pos, size_t count)
5408 {
5409 struct device *dev = container_of(kobj, struct device, kobj);
5410 struct wl1271 *wl = dev_get_drvdata(dev);
5411 ssize_t len;
5412 int ret;
5413
5414 ret = mutex_lock_interruptible(&wl->mutex);
5415 if (ret < 0)
5416 return -ERESTARTSYS;
5417
5418 /* Let only one thread read the log at a time, blocking others */
5419 while (wl->fwlog_size == 0) {
5420 DEFINE_WAIT(wait);
5421
5422 prepare_to_wait_exclusive(&wl->fwlog_waitq,
5423 &wait,
5424 TASK_INTERRUPTIBLE);
5425
5426 if (wl->fwlog_size != 0) {
5427 finish_wait(&wl->fwlog_waitq, &wait);
5428 break;
5429 }
5430
5431 mutex_unlock(&wl->mutex);
5432
5433 schedule();
5434 finish_wait(&wl->fwlog_waitq, &wait);
5435
5436 if (signal_pending(current))
5437 return -ERESTARTSYS;
5438
5439 ret = mutex_lock_interruptible(&wl->mutex);
5440 if (ret < 0)
5441 return -ERESTARTSYS;
5442 }
5443
5444 /* Check if the fwlog is still valid */
5445 if (wl->fwlog_size < 0) {
5446 mutex_unlock(&wl->mutex);
5447 return 0;
5448 }
5449
5450 /* Seeking is not supported - old logs are not kept. Disregard pos. */
5451 len = min(count, (size_t)wl->fwlog_size);
5452 wl->fwlog_size -= len;
5453 memcpy(buffer, wl->fwlog, len);
5454
5455 /* Make room for new messages */
5456 memmove(wl->fwlog, wl->fwlog + len, wl->fwlog_size);
5457
5458 mutex_unlock(&wl->mutex);
5459
5460 return len;
5461 }
5462
5463 static struct bin_attribute fwlog_attr = {
5464 .attr = {.name = "fwlog", .mode = S_IRUSR},
5465 .read = wl1271_sysfs_read_fwlog,
5466 };
5467
5468 static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
5469 {
5470 int i;
5471
5472 wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
5473 oui, nic);
5474
5475 if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
5476 wl1271_warning("NIC part of the MAC address wraps around!");
5477
5478 for (i = 0; i < wl->num_mac_addr; i++) {
5479 wl->addresses[i].addr[0] = (u8)(oui >> 16);
5480 wl->addresses[i].addr[1] = (u8)(oui >> 8);
5481 wl->addresses[i].addr[2] = (u8) oui;
5482 wl->addresses[i].addr[3] = (u8)(nic >> 16);
5483 wl->addresses[i].addr[4] = (u8)(nic >> 8);
5484 wl->addresses[i].addr[5] = (u8) nic;
5485 nic++;
5486 }
5487
5488 /* we may be one address short at the most */
5489 WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
5490
5491 /*
5492 * turn on the LAA bit in the first address and use it as
5493 * the last address.
5494 */
5495 if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
5496 int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
5497 memcpy(&wl->addresses[idx], &wl->addresses[0],
5498 sizeof(wl->addresses[0]));
5499 /* LAA bit */
5500 wl->addresses[idx].addr[2] |= BIT(1);
5501 }
5502
5503 wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
5504 wl->hw->wiphy->addresses = wl->addresses;
5505 }
5506
5507 static int wl12xx_get_hw_info(struct wl1271 *wl)
5508 {
5509 int ret;
5510
5511 ret = wl12xx_set_power_on(wl);
5512 if (ret < 0)
5513 return ret;
5514
5515 ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
5516 if (ret < 0)
5517 goto out;
5518
5519 wl->fuse_oui_addr = 0;
5520 wl->fuse_nic_addr = 0;
5521
5522 ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
5523 if (ret < 0)
5524 goto out;
5525
5526 if (wl->ops->get_mac)
5527 ret = wl->ops->get_mac(wl);
5528
5529 out:
5530 wl1271_power_off(wl);
5531 return ret;
5532 }
5533
5534 static int wl1271_register_hw(struct wl1271 *wl)
5535 {
5536 int ret;
5537 u32 oui_addr = 0, nic_addr = 0;
5538
5539 if (wl->mac80211_registered)
5540 return 0;
5541
5542 if (wl->nvs_len >= 12) {
5543 /* NOTE: The wl->nvs->nvs element must be first, in
5544 * order to simplify the casting, we assume it is at
5545 * the beginning of the wl->nvs structure.
5546 */
5547 u8 *nvs_ptr = (u8 *)wl->nvs;
5548
5549 oui_addr =
5550 (nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
5551 nic_addr =
5552 (nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
5553 }
5554
5555 /* if the MAC address is zeroed in the NVS derive from fuse */
5556 if (oui_addr == 0 && nic_addr == 0) {
5557 oui_addr = wl->fuse_oui_addr;
5558 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
5559 nic_addr = wl->fuse_nic_addr + 1;
5560 }
5561
5562 wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
5563
5564 ret = ieee80211_register_hw(wl->hw);
5565 if (ret < 0) {
5566 wl1271_error("unable to register mac80211 hw: %d", ret);
5567 goto out;
5568 }
5569
5570 wl->mac80211_registered = true;
5571
5572 wl1271_debugfs_init(wl);
5573
5574 wl1271_notice("loaded");
5575
5576 out:
5577 return ret;
5578 }
5579
5580 static void wl1271_unregister_hw(struct wl1271 *wl)
5581 {
5582 if (wl->plt)
5583 wl1271_plt_stop(wl);
5584
5585 ieee80211_unregister_hw(wl->hw);
5586 wl->mac80211_registered = false;
5587
5588 }
5589
5590 static const struct ieee80211_iface_limit wlcore_iface_limits[] = {
5591 {
5592 .max = 3,
5593 .types = BIT(NL80211_IFTYPE_STATION),
5594 },
5595 {
5596 .max = 1,
5597 .types = BIT(NL80211_IFTYPE_AP) |
5598 BIT(NL80211_IFTYPE_P2P_GO) |
5599 BIT(NL80211_IFTYPE_P2P_CLIENT),
5600 },
5601 };
5602
5603 static struct ieee80211_iface_combination
5604 wlcore_iface_combinations[] = {
5605 {
5606 .max_interfaces = 3,
5607 .limits = wlcore_iface_limits,
5608 .n_limits = ARRAY_SIZE(wlcore_iface_limits),
5609 },
5610 };
5611
5612 static int wl1271_init_ieee80211(struct wl1271 *wl)
5613 {
5614 int i;
5615 static const u32 cipher_suites[] = {
5616 WLAN_CIPHER_SUITE_WEP40,
5617 WLAN_CIPHER_SUITE_WEP104,
5618 WLAN_CIPHER_SUITE_TKIP,
5619 WLAN_CIPHER_SUITE_CCMP,
5620 WL1271_CIPHER_SUITE_GEM,
5621 };
5622
5623 /* The tx descriptor buffer */
5624 wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
5625
5626 if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
5627 wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
5628
5629 /* unit us */
5630 /* FIXME: find a proper value */
5631 wl->hw->channel_change_time = 10000;
5632 wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
5633
5634 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
5635 IEEE80211_HW_SUPPORTS_PS |
5636 IEEE80211_HW_SUPPORTS_DYNAMIC_PS |
5637 IEEE80211_HW_SUPPORTS_UAPSD |
5638 IEEE80211_HW_HAS_RATE_CONTROL |
5639 IEEE80211_HW_CONNECTION_MONITOR |
5640 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
5641 IEEE80211_HW_SPECTRUM_MGMT |
5642 IEEE80211_HW_AP_LINK_PS |
5643 IEEE80211_HW_AMPDU_AGGREGATION |
5644 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
5645 IEEE80211_HW_SCAN_WHILE_IDLE |
5646 IEEE80211_HW_QUEUE_CONTROL;
5647
5648 wl->hw->wiphy->cipher_suites = cipher_suites;
5649 wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
5650
5651 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
5652 BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) |
5653 BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO);
5654 wl->hw->wiphy->max_scan_ssids = 1;
5655 wl->hw->wiphy->max_sched_scan_ssids = 16;
5656 wl->hw->wiphy->max_match_sets = 16;
5657 /*
5658 * Maximum length of elements in scanning probe request templates
5659 * should be the maximum length possible for a template, without
5660 * the IEEE80211 header of the template
5661 */
5662 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5663 sizeof(struct ieee80211_header);
5664
5665 wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
5666 sizeof(struct ieee80211_header);
5667
5668 wl->hw->wiphy->max_remain_on_channel_duration = 5000;
5669
5670 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
5671 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
5672
5673 /* make sure all our channels fit in the scanned_ch bitmask */
5674 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
5675 ARRAY_SIZE(wl1271_channels_5ghz) >
5676 WL1271_MAX_CHANNELS);
5677 /*
5678 * clear channel flags from the previous usage
5679 * and restore max_power & max_antenna_gain values.
5680 */
5681 for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
5682 wl1271_band_2ghz.channels[i].flags = 0;
5683 wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
5684 wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
5685 }
5686
5687 for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
5688 wl1271_band_5ghz.channels[i].flags = 0;
5689 wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
5690 wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
5691 }
5692
5693 /*
5694 * We keep local copies of the band structs because we need to
5695 * modify them on a per-device basis.
5696 */
5697 memcpy(&wl->bands[IEEE80211_BAND_2GHZ], &wl1271_band_2ghz,
5698 sizeof(wl1271_band_2ghz));
5699 memcpy(&wl->bands[IEEE80211_BAND_2GHZ].ht_cap,
5700 &wl->ht_cap[IEEE80211_BAND_2GHZ],
5701 sizeof(*wl->ht_cap));
5702 memcpy(&wl->bands[IEEE80211_BAND_5GHZ], &wl1271_band_5ghz,
5703 sizeof(wl1271_band_5ghz));
5704 memcpy(&wl->bands[IEEE80211_BAND_5GHZ].ht_cap,
5705 &wl->ht_cap[IEEE80211_BAND_5GHZ],
5706 sizeof(*wl->ht_cap));
5707
5708 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
5709 &wl->bands[IEEE80211_BAND_2GHZ];
5710 wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
5711 &wl->bands[IEEE80211_BAND_5GHZ];
5712
5713 /*
5714 * allow 4 queues per mac address we support +
5715 * 1 cab queue per mac + one global offchannel Tx queue
5716 */
5717 wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
5718
5719 /* the last queue is the offchannel queue */
5720 wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
5721 wl->hw->max_rates = 1;
5722
5723 wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
5724
5725 /* the FW answers probe-requests in AP-mode */
5726 wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
5727 wl->hw->wiphy->probe_resp_offload =
5728 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
5729 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
5730 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
5731
5732 /* allowed interface combinations */
5733 wlcore_iface_combinations[0].num_different_channels = wl->num_channels;
5734 wl->hw->wiphy->iface_combinations = wlcore_iface_combinations;
5735 wl->hw->wiphy->n_iface_combinations =
5736 ARRAY_SIZE(wlcore_iface_combinations);
5737
5738 SET_IEEE80211_DEV(wl->hw, wl->dev);
5739
5740 wl->hw->sta_data_size = sizeof(struct wl1271_station);
5741 wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
5742
5743 wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
5744
5745 return 0;
5746 }
5747
5748 #define WL1271_DEFAULT_CHANNEL 0
5749
5750 struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
5751 u32 mbox_size)
5752 {
5753 struct ieee80211_hw *hw;
5754 struct wl1271 *wl;
5755 int i, j, ret;
5756 unsigned int order;
5757
5758 BUILD_BUG_ON(AP_MAX_STATIONS > WL12XX_MAX_LINKS);
5759
5760 hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
5761 if (!hw) {
5762 wl1271_error("could not alloc ieee80211_hw");
5763 ret = -ENOMEM;
5764 goto err_hw_alloc;
5765 }
5766
5767 wl = hw->priv;
5768 memset(wl, 0, sizeof(*wl));
5769
5770 wl->priv = kzalloc(priv_size, GFP_KERNEL);
5771 if (!wl->priv) {
5772 wl1271_error("could not alloc wl priv");
5773 ret = -ENOMEM;
5774 goto err_priv_alloc;
5775 }
5776
5777 INIT_LIST_HEAD(&wl->wlvif_list);
5778
5779 wl->hw = hw;
5780
5781 for (i = 0; i < NUM_TX_QUEUES; i++)
5782 for (j = 0; j < WL12XX_MAX_LINKS; j++)
5783 skb_queue_head_init(&wl->links[j].tx_queue[i]);
5784
5785 skb_queue_head_init(&wl->deferred_rx_queue);
5786 skb_queue_head_init(&wl->deferred_tx_queue);
5787
5788 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
5789 INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
5790 INIT_WORK(&wl->tx_work, wl1271_tx_work);
5791 INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
5792 INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
5793 INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
5794 INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
5795
5796 wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
5797 if (!wl->freezable_wq) {
5798 ret = -ENOMEM;
5799 goto err_hw;
5800 }
5801
5802 wl->channel = WL1271_DEFAULT_CHANNEL;
5803 wl->rx_counter = 0;
5804 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
5805 wl->band = IEEE80211_BAND_2GHZ;
5806 wl->channel_type = NL80211_CHAN_NO_HT;
5807 wl->flags = 0;
5808 wl->sg_enabled = true;
5809 wl->sleep_auth = WL1271_PSM_ILLEGAL;
5810 wl->recovery_count = 0;
5811 wl->hw_pg_ver = -1;
5812 wl->ap_ps_map = 0;
5813 wl->ap_fw_ps_map = 0;
5814 wl->quirks = 0;
5815 wl->platform_quirks = 0;
5816 wl->system_hlid = WL12XX_SYSTEM_HLID;
5817 wl->active_sta_count = 0;
5818 wl->active_link_count = 0;
5819 wl->fwlog_size = 0;
5820 init_waitqueue_head(&wl->fwlog_waitq);
5821
5822 /* The system link is always allocated */
5823 __set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
5824
5825 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
5826 for (i = 0; i < wl->num_tx_desc; i++)
5827 wl->tx_frames[i] = NULL;
5828
5829 spin_lock_init(&wl->wl_lock);
5830
5831 wl->state = WLCORE_STATE_OFF;
5832 wl->fw_type = WL12XX_FW_TYPE_NONE;
5833 mutex_init(&wl->mutex);
5834 mutex_init(&wl->flush_mutex);
5835 init_completion(&wl->nvs_loading_complete);
5836
5837 order = get_order(aggr_buf_size);
5838 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
5839 if (!wl->aggr_buf) {
5840 ret = -ENOMEM;
5841 goto err_wq;
5842 }
5843 wl->aggr_buf_size = aggr_buf_size;
5844
5845 wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
5846 if (!wl->dummy_packet) {
5847 ret = -ENOMEM;
5848 goto err_aggr;
5849 }
5850
5851 /* Allocate one page for the FW log */
5852 wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
5853 if (!wl->fwlog) {
5854 ret = -ENOMEM;
5855 goto err_dummy_packet;
5856 }
5857
5858 wl->mbox_size = mbox_size;
5859 wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
5860 if (!wl->mbox) {
5861 ret = -ENOMEM;
5862 goto err_fwlog;
5863 }
5864
5865 wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
5866 if (!wl->buffer_32) {
5867 ret = -ENOMEM;
5868 goto err_mbox;
5869 }
5870
5871 return hw;
5872
5873 err_mbox:
5874 kfree(wl->mbox);
5875
5876 err_fwlog:
5877 free_page((unsigned long)wl->fwlog);
5878
5879 err_dummy_packet:
5880 dev_kfree_skb(wl->dummy_packet);
5881
5882 err_aggr:
5883 free_pages((unsigned long)wl->aggr_buf, order);
5884
5885 err_wq:
5886 destroy_workqueue(wl->freezable_wq);
5887
5888 err_hw:
5889 wl1271_debugfs_exit(wl);
5890 kfree(wl->priv);
5891
5892 err_priv_alloc:
5893 ieee80211_free_hw(hw);
5894
5895 err_hw_alloc:
5896
5897 return ERR_PTR(ret);
5898 }
5899 EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
5900
5901 int wlcore_free_hw(struct wl1271 *wl)
5902 {
5903 /* Unblock any fwlog readers */
5904 mutex_lock(&wl->mutex);
5905 wl->fwlog_size = -1;
5906 wake_up_interruptible_all(&wl->fwlog_waitq);
5907 mutex_unlock(&wl->mutex);
5908
5909 device_remove_bin_file(wl->dev, &fwlog_attr);
5910
5911 device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
5912
5913 device_remove_file(wl->dev, &dev_attr_bt_coex_state);
5914 kfree(wl->buffer_32);
5915 kfree(wl->mbox);
5916 free_page((unsigned long)wl->fwlog);
5917 dev_kfree_skb(wl->dummy_packet);
5918 free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
5919
5920 wl1271_debugfs_exit(wl);
5921
5922 vfree(wl->fw);
5923 wl->fw = NULL;
5924 wl->fw_type = WL12XX_FW_TYPE_NONE;
5925 kfree(wl->nvs);
5926 wl->nvs = NULL;
5927
5928 kfree(wl->fw_status_1);
5929 kfree(wl->tx_res_if);
5930 destroy_workqueue(wl->freezable_wq);
5931
5932 kfree(wl->priv);
5933 ieee80211_free_hw(wl->hw);
5934
5935 return 0;
5936 }
5937 EXPORT_SYMBOL_GPL(wlcore_free_hw);
5938
5939 static irqreturn_t wl12xx_hardirq(int irq, void *cookie)
5940 {
5941 struct wl1271 *wl = cookie;
5942 unsigned long flags;
5943
5944 wl1271_debug(DEBUG_IRQ, "IRQ");
5945
5946 /* complete the ELP completion */
5947 spin_lock_irqsave(&wl->wl_lock, flags);
5948 set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
5949 if (wl->elp_compl) {
5950 complete(wl->elp_compl);
5951 wl->elp_compl = NULL;
5952 }
5953
5954 if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
5955 /* don't enqueue a work right now. mark it as pending */
5956 set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
5957 wl1271_debug(DEBUG_IRQ, "should not enqueue work");
5958 disable_irq_nosync(wl->irq);
5959 pm_wakeup_event(wl->dev, 0);
5960 spin_unlock_irqrestore(&wl->wl_lock, flags);
5961 return IRQ_HANDLED;
5962 }
5963 spin_unlock_irqrestore(&wl->wl_lock, flags);
5964
5965 return IRQ_WAKE_THREAD;
5966 }
5967
5968 static void wlcore_nvs_cb(const struct firmware *fw, void *context)
5969 {
5970 struct wl1271 *wl = context;
5971 struct platform_device *pdev = wl->pdev;
5972 struct wl12xx_platform_data *pdata = pdev->dev.platform_data;
5973 unsigned long irqflags;
5974 int ret;
5975
5976 if (fw) {
5977 wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
5978 if (!wl->nvs) {
5979 wl1271_error("Could not allocate nvs data");
5980 goto out;
5981 }
5982 wl->nvs_len = fw->size;
5983 } else {
5984 wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
5985 WL12XX_NVS_NAME);
5986 wl->nvs = NULL;
5987 wl->nvs_len = 0;
5988 }
5989
5990 ret = wl->ops->setup(wl);
5991 if (ret < 0)
5992 goto out_free_nvs;
5993
5994 BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
5995
5996 /* adjust some runtime configuration parameters */
5997 wlcore_adjust_conf(wl);
5998
5999 wl->irq = platform_get_irq(pdev, 0);
6000 wl->platform_quirks = pdata->platform_quirks;
6001 wl->set_power = pdata->set_power;
6002 wl->if_ops = pdata->ops;
6003
6004 if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ)
6005 irqflags = IRQF_TRIGGER_RISING;
6006 else
6007 irqflags = IRQF_TRIGGER_HIGH | IRQF_ONESHOT;
6008
6009 ret = request_threaded_irq(wl->irq, wl12xx_hardirq, wlcore_irq,
6010 irqflags,
6011 pdev->name, wl);
6012 if (ret < 0) {
6013 wl1271_error("request_irq() failed: %d", ret);
6014 goto out_free_nvs;
6015 }
6016
6017 #ifdef CONFIG_PM
6018 ret = enable_irq_wake(wl->irq);
6019 if (!ret) {
6020 wl->irq_wake_enabled = true;
6021 device_init_wakeup(wl->dev, 1);
6022 if (pdata->pwr_in_suspend) {
6023 wl->hw->wiphy->wowlan.flags = WIPHY_WOWLAN_ANY;
6024 wl->hw->wiphy->wowlan.n_patterns =
6025 WL1271_MAX_RX_FILTERS;
6026 wl->hw->wiphy->wowlan.pattern_min_len = 1;
6027 wl->hw->wiphy->wowlan.pattern_max_len =
6028 WL1271_RX_FILTER_MAX_PATTERN_SIZE;
6029 }
6030 }
6031 #endif
6032 disable_irq(wl->irq);
6033
6034 ret = wl12xx_get_hw_info(wl);
6035 if (ret < 0) {
6036 wl1271_error("couldn't get hw info");
6037 goto out_irq;
6038 }
6039
6040 ret = wl->ops->identify_chip(wl);
6041 if (ret < 0)
6042 goto out_irq;
6043
6044 ret = wl1271_init_ieee80211(wl);
6045 if (ret)
6046 goto out_irq;
6047
6048 ret = wl1271_register_hw(wl);
6049 if (ret)
6050 goto out_irq;
6051
6052 /* Create sysfs file to control bt coex state */
6053 ret = device_create_file(wl->dev, &dev_attr_bt_coex_state);
6054 if (ret < 0) {
6055 wl1271_error("failed to create sysfs file bt_coex_state");
6056 goto out_unreg;
6057 }
6058
6059 /* Create sysfs file to get HW PG version */
6060 ret = device_create_file(wl->dev, &dev_attr_hw_pg_ver);
6061 if (ret < 0) {
6062 wl1271_error("failed to create sysfs file hw_pg_ver");
6063 goto out_bt_coex_state;
6064 }
6065
6066 /* Create sysfs file for the FW log */
6067 ret = device_create_bin_file(wl->dev, &fwlog_attr);
6068 if (ret < 0) {
6069 wl1271_error("failed to create sysfs file fwlog");
6070 goto out_hw_pg_ver;
6071 }
6072
6073 wl->initialized = true;
6074 goto out;
6075
6076 out_hw_pg_ver:
6077 device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
6078
6079 out_bt_coex_state:
6080 device_remove_file(wl->dev, &dev_attr_bt_coex_state);
6081
6082 out_unreg:
6083 wl1271_unregister_hw(wl);
6084
6085 out_irq:
6086 free_irq(wl->irq, wl);
6087
6088 out_free_nvs:
6089 kfree(wl->nvs);
6090
6091 out:
6092 release_firmware(fw);
6093 complete_all(&wl->nvs_loading_complete);
6094 }
6095
6096 int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
6097 {
6098 int ret;
6099
6100 if (!wl->ops || !wl->ptable)
6101 return -EINVAL;
6102
6103 wl->dev = &pdev->dev;
6104 wl->pdev = pdev;
6105 platform_set_drvdata(pdev, wl);
6106
6107 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG,
6108 WL12XX_NVS_NAME, &pdev->dev, GFP_KERNEL,
6109 wl, wlcore_nvs_cb);
6110 if (ret < 0) {
6111 wl1271_error("request_firmware_nowait failed: %d", ret);
6112 complete_all(&wl->nvs_loading_complete);
6113 }
6114
6115 return ret;
6116 }
6117 EXPORT_SYMBOL_GPL(wlcore_probe);
6118
6119 int wlcore_remove(struct platform_device *pdev)
6120 {
6121 struct wl1271 *wl = platform_get_drvdata(pdev);
6122
6123 wait_for_completion(&wl->nvs_loading_complete);
6124 if (!wl->initialized)
6125 return 0;
6126
6127 if (wl->irq_wake_enabled) {
6128 device_init_wakeup(wl->dev, 0);
6129 disable_irq_wake(wl->irq);
6130 }
6131 wl1271_unregister_hw(wl);
6132 free_irq(wl->irq, wl);
6133 wlcore_free_hw(wl);
6134
6135 return 0;
6136 }
6137 EXPORT_SYMBOL_GPL(wlcore_remove);
6138
6139 u32 wl12xx_debug_level = DEBUG_NONE;
6140 EXPORT_SYMBOL_GPL(wl12xx_debug_level);
6141 module_param_named(debug_level, wl12xx_debug_level, uint, S_IRUSR | S_IWUSR);
6142 MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
6143
6144 module_param_named(fwlog, fwlog_param, charp, 0);
6145 MODULE_PARM_DESC(fwlog,
6146 "FW logger options: continuous, ondemand, dbgpins or disable");
6147
6148 module_param(bug_on_recovery, int, S_IRUSR | S_IWUSR);
6149 MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
6150
6151 module_param(no_recovery, int, S_IRUSR | S_IWUSR);
6152 MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
6153
6154 MODULE_LICENSE("GPL");
6155 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6156 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
6157 MODULE_FIRMWARE(WL12XX_NVS_NAME);