3 * This file is part of wl1271
5 * Copyright (C) 2008-2010 Nokia Corporation
7 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * version 2 as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
25 #include <linux/module.h>
26 #include <linux/firmware.h>
27 #include <linux/delay.h>
28 #include <linux/spi/spi.h>
29 #include <linux/crc32.h>
30 #include <linux/etherdevice.h>
31 #include <linux/vmalloc.h>
32 #include <linux/platform_device.h>
33 #include <linux/slab.h>
34 #include <linux/wl12xx.h>
35 #include <linux/sched.h>
36 #include <linux/interrupt.h>
40 #include "wl12xx_80211.h"
54 #define WL1271_BOOT_RETRIES 3
56 #define WL1271_BOOT_RETRIES 3
58 static char *fwlog_param
;
59 static int bug_on_recovery
= -1;
60 static int no_recovery
= -1;
62 static void __wl1271_op_remove_interface(struct wl1271
*wl
,
63 struct ieee80211_vif
*vif
,
64 bool reset_tx_queues
);
65 static void wlcore_op_stop_locked(struct wl1271
*wl
);
66 static void wl1271_free_ap_keys(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
);
68 static int wl12xx_set_authorized(struct wl1271
*wl
,
69 struct wl12xx_vif
*wlvif
)
73 if (WARN_ON(wlvif
->bss_type
!= BSS_TYPE_STA_BSS
))
76 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
79 if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT
, &wlvif
->flags
))
82 ret
= wl12xx_cmd_set_peer_state(wl
, wlvif
, wlvif
->sta
.hlid
);
86 wl1271_info("Association completed.");
90 static int wl1271_reg_notify(struct wiphy
*wiphy
,
91 struct regulatory_request
*request
)
93 struct ieee80211_supported_band
*band
;
94 struct ieee80211_channel
*ch
;
96 struct ieee80211_hw
*hw
= wiphy_to_ieee80211_hw(wiphy
);
97 struct wl1271
*wl
= hw
->priv
;
99 band
= wiphy
->bands
[IEEE80211_BAND_5GHZ
];
100 for (i
= 0; i
< band
->n_channels
; i
++) {
101 ch
= &band
->channels
[i
];
102 if (ch
->flags
& IEEE80211_CHAN_DISABLED
)
105 if (ch
->flags
& IEEE80211_CHAN_RADAR
)
106 ch
->flags
|= IEEE80211_CHAN_NO_IBSS
|
107 IEEE80211_CHAN_PASSIVE_SCAN
;
111 if (likely(wl
->state
== WLCORE_STATE_ON
))
112 wlcore_regdomain_config(wl
);
117 static int wl1271_set_rx_streaming(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
122 /* we should hold wl->mutex */
123 ret
= wl1271_acx_ps_rx_streaming(wl
, wlvif
, enable
);
128 set_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
);
130 clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
);
136 * this function is being called when the rx_streaming interval
137 * has beed changed or rx_streaming should be disabled
139 int wl1271_recalc_rx_streaming(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
142 int period
= wl
->conf
.rx_streaming
.interval
;
144 /* don't reconfigure if rx_streaming is disabled */
145 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
))
148 /* reconfigure/disable according to new streaming_period */
150 test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
) &&
151 (wl
->conf
.rx_streaming
.always
||
152 test_bit(WL1271_FLAG_SOFT_GEMINI
, &wl
->flags
)))
153 ret
= wl1271_set_rx_streaming(wl
, wlvif
, true);
155 ret
= wl1271_set_rx_streaming(wl
, wlvif
, false);
156 /* don't cancel_work_sync since we might deadlock */
157 del_timer_sync(&wlvif
->rx_streaming_timer
);
163 static void wl1271_rx_streaming_enable_work(struct work_struct
*work
)
166 struct wl12xx_vif
*wlvif
= container_of(work
, struct wl12xx_vif
,
167 rx_streaming_enable_work
);
168 struct wl1271
*wl
= wlvif
->wl
;
170 mutex_lock(&wl
->mutex
);
172 if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
) ||
173 !test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
) ||
174 (!wl
->conf
.rx_streaming
.always
&&
175 !test_bit(WL1271_FLAG_SOFT_GEMINI
, &wl
->flags
)))
178 if (!wl
->conf
.rx_streaming
.interval
)
181 ret
= wl1271_ps_elp_wakeup(wl
);
185 ret
= wl1271_set_rx_streaming(wl
, wlvif
, true);
189 /* stop it after some time of inactivity */
190 mod_timer(&wlvif
->rx_streaming_timer
,
191 jiffies
+ msecs_to_jiffies(wl
->conf
.rx_streaming
.duration
));
194 wl1271_ps_elp_sleep(wl
);
196 mutex_unlock(&wl
->mutex
);
199 static void wl1271_rx_streaming_disable_work(struct work_struct
*work
)
202 struct wl12xx_vif
*wlvif
= container_of(work
, struct wl12xx_vif
,
203 rx_streaming_disable_work
);
204 struct wl1271
*wl
= wlvif
->wl
;
206 mutex_lock(&wl
->mutex
);
208 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED
, &wlvif
->flags
))
211 ret
= wl1271_ps_elp_wakeup(wl
);
215 ret
= wl1271_set_rx_streaming(wl
, wlvif
, false);
220 wl1271_ps_elp_sleep(wl
);
222 mutex_unlock(&wl
->mutex
);
225 static void wl1271_rx_streaming_timer(unsigned long data
)
227 struct wl12xx_vif
*wlvif
= (struct wl12xx_vif
*)data
;
228 struct wl1271
*wl
= wlvif
->wl
;
229 ieee80211_queue_work(wl
->hw
, &wlvif
->rx_streaming_disable_work
);
232 /* wl->mutex must be taken */
233 void wl12xx_rearm_tx_watchdog_locked(struct wl1271
*wl
)
235 /* if the watchdog is not armed, don't do anything */
236 if (wl
->tx_allocated_blocks
== 0)
239 cancel_delayed_work(&wl
->tx_watchdog_work
);
240 ieee80211_queue_delayed_work(wl
->hw
, &wl
->tx_watchdog_work
,
241 msecs_to_jiffies(wl
->conf
.tx
.tx_watchdog_timeout
));
244 static void wl12xx_tx_watchdog_work(struct work_struct
*work
)
246 struct delayed_work
*dwork
;
249 dwork
= container_of(work
, struct delayed_work
, work
);
250 wl
= container_of(dwork
, struct wl1271
, tx_watchdog_work
);
252 mutex_lock(&wl
->mutex
);
254 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
257 /* Tx went out in the meantime - everything is ok */
258 if (unlikely(wl
->tx_allocated_blocks
== 0))
262 * if a ROC is in progress, we might not have any Tx for a long
263 * time (e.g. pending Tx on the non-ROC channels)
265 if (find_first_bit(wl
->roc_map
, WL12XX_MAX_ROLES
) < WL12XX_MAX_ROLES
) {
266 wl1271_debug(DEBUG_TX
, "No Tx (in FW) for %d ms due to ROC",
267 wl
->conf
.tx
.tx_watchdog_timeout
);
268 wl12xx_rearm_tx_watchdog_locked(wl
);
273 * if a scan is in progress, we might not have any Tx for a long
276 if (wl
->scan
.state
!= WL1271_SCAN_STATE_IDLE
) {
277 wl1271_debug(DEBUG_TX
, "No Tx (in FW) for %d ms due to scan",
278 wl
->conf
.tx
.tx_watchdog_timeout
);
279 wl12xx_rearm_tx_watchdog_locked(wl
);
284 * AP might cache a frame for a long time for a sleeping station,
285 * so rearm the timer if there's an AP interface with stations. If
286 * Tx is genuinely stuck we will most hopefully discover it when all
287 * stations are removed due to inactivity.
289 if (wl
->active_sta_count
) {
290 wl1271_debug(DEBUG_TX
, "No Tx (in FW) for %d ms. AP has "
292 wl
->conf
.tx
.tx_watchdog_timeout
,
293 wl
->active_sta_count
);
294 wl12xx_rearm_tx_watchdog_locked(wl
);
298 wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
299 wl
->conf
.tx
.tx_watchdog_timeout
);
300 wl12xx_queue_recovery_work(wl
);
303 mutex_unlock(&wl
->mutex
);
306 static void wlcore_adjust_conf(struct wl1271
*wl
)
308 /* Adjust settings according to optional module parameters */
311 if (!strcmp(fwlog_param
, "continuous")) {
312 wl
->conf
.fwlog
.mode
= WL12XX_FWLOG_CONTINUOUS
;
313 } else if (!strcmp(fwlog_param
, "ondemand")) {
314 wl
->conf
.fwlog
.mode
= WL12XX_FWLOG_ON_DEMAND
;
315 } else if (!strcmp(fwlog_param
, "dbgpins")) {
316 wl
->conf
.fwlog
.mode
= WL12XX_FWLOG_CONTINUOUS
;
317 wl
->conf
.fwlog
.output
= WL12XX_FWLOG_OUTPUT_DBG_PINS
;
318 } else if (!strcmp(fwlog_param
, "disable")) {
319 wl
->conf
.fwlog
.mem_blocks
= 0;
320 wl
->conf
.fwlog
.output
= WL12XX_FWLOG_OUTPUT_NONE
;
322 wl1271_error("Unknown fwlog parameter %s", fwlog_param
);
326 if (bug_on_recovery
!= -1)
327 wl
->conf
.recovery
.bug_on_recovery
= (u8
) bug_on_recovery
;
329 if (no_recovery
!= -1)
330 wl
->conf
.recovery
.no_recovery
= (u8
) no_recovery
;
333 static void wl12xx_irq_ps_regulate_link(struct wl1271
*wl
,
334 struct wl12xx_vif
*wlvif
,
337 bool fw_ps
, single_link
;
339 fw_ps
= test_bit(hlid
, (unsigned long *)&wl
->ap_fw_ps_map
);
340 single_link
= (wl
->active_link_count
== 1);
343 * Wake up from high level PS if the STA is asleep with too little
344 * packets in FW or if the STA is awake.
346 if (!fw_ps
|| tx_pkts
< WL1271_PS_STA_MAX_PACKETS
)
347 wl12xx_ps_link_end(wl
, wlvif
, hlid
);
350 * Start high-level PS if the STA is asleep with enough blocks in FW.
351 * Make an exception if this is the only connected link. In this
352 * case FW-memory congestion is less of a problem.
354 else if (!single_link
&& fw_ps
&& tx_pkts
>= WL1271_PS_STA_MAX_PACKETS
)
355 wl12xx_ps_link_start(wl
, wlvif
, hlid
, true);
358 static void wl12xx_irq_update_links_status(struct wl1271
*wl
,
359 struct wl12xx_vif
*wlvif
,
360 struct wl_fw_status_2
*status
)
365 cur_fw_ps_map
= le32_to_cpu(status
->link_ps_bitmap
);
366 if (wl
->ap_fw_ps_map
!= cur_fw_ps_map
) {
367 wl1271_debug(DEBUG_PSM
,
368 "link ps prev 0x%x cur 0x%x changed 0x%x",
369 wl
->ap_fw_ps_map
, cur_fw_ps_map
,
370 wl
->ap_fw_ps_map
^ cur_fw_ps_map
);
372 wl
->ap_fw_ps_map
= cur_fw_ps_map
;
375 for_each_set_bit(hlid
, wlvif
->ap
.sta_hlid_map
, WL12XX_MAX_LINKS
)
376 wl12xx_irq_ps_regulate_link(wl
, wlvif
, hlid
,
377 wl
->links
[hlid
].allocated_pkts
);
380 static int wlcore_fw_status(struct wl1271
*wl
,
381 struct wl_fw_status_1
*status_1
,
382 struct wl_fw_status_2
*status_2
)
384 struct wl12xx_vif
*wlvif
;
386 u32 old_tx_blk_count
= wl
->tx_blocks_available
;
387 int avail
, freed_blocks
;
391 struct wl1271_link
*lnk
;
393 status_len
= WLCORE_FW_STATUS_1_LEN(wl
->num_rx_desc
) +
394 sizeof(*status_2
) + wl
->fw_status_priv_len
;
396 ret
= wlcore_raw_read_data(wl
, REG_RAW_FW_STATUS_ADDR
, status_1
,
401 wl1271_debug(DEBUG_IRQ
, "intr: 0x%x (fw_rx_counter = %d, "
402 "drv_rx_counter = %d, tx_results_counter = %d)",
404 status_1
->fw_rx_counter
,
405 status_1
->drv_rx_counter
,
406 status_1
->tx_results_counter
);
408 for (i
= 0; i
< NUM_TX_QUEUES
; i
++) {
409 /* prevent wrap-around in freed-packets counter */
410 wl
->tx_allocated_pkts
[i
] -=
411 (status_2
->counters
.tx_released_pkts
[i
] -
412 wl
->tx_pkts_freed
[i
]) & 0xff;
414 wl
->tx_pkts_freed
[i
] = status_2
->counters
.tx_released_pkts
[i
];
418 for_each_set_bit(i
, wl
->links_map
, WL12XX_MAX_LINKS
) {
420 /* prevent wrap-around in freed-packets counter */
421 lnk
->allocated_pkts
-=
422 (status_2
->counters
.tx_lnk_free_pkts
[i
] -
423 lnk
->prev_freed_pkts
) & 0xff;
425 lnk
->prev_freed_pkts
= status_2
->counters
.tx_lnk_free_pkts
[i
];
428 /* prevent wrap-around in total blocks counter */
429 if (likely(wl
->tx_blocks_freed
<=
430 le32_to_cpu(status_2
->total_released_blks
)))
431 freed_blocks
= le32_to_cpu(status_2
->total_released_blks
) -
434 freed_blocks
= 0x100000000LL
- wl
->tx_blocks_freed
+
435 le32_to_cpu(status_2
->total_released_blks
);
437 wl
->tx_blocks_freed
= le32_to_cpu(status_2
->total_released_blks
);
439 wl
->tx_allocated_blocks
-= freed_blocks
;
442 * If the FW freed some blocks:
443 * If we still have allocated blocks - re-arm the timer, Tx is
444 * not stuck. Otherwise, cancel the timer (no Tx currently).
447 if (wl
->tx_allocated_blocks
)
448 wl12xx_rearm_tx_watchdog_locked(wl
);
450 cancel_delayed_work(&wl
->tx_watchdog_work
);
453 avail
= le32_to_cpu(status_2
->tx_total
) - wl
->tx_allocated_blocks
;
456 * The FW might change the total number of TX memblocks before
457 * we get a notification about blocks being released. Thus, the
458 * available blocks calculation might yield a temporary result
459 * which is lower than the actual available blocks. Keeping in
460 * mind that only blocks that were allocated can be moved from
461 * TX to RX, tx_blocks_available should never decrease here.
463 wl
->tx_blocks_available
= max((int)wl
->tx_blocks_available
,
466 /* if more blocks are available now, tx work can be scheduled */
467 if (wl
->tx_blocks_available
> old_tx_blk_count
)
468 clear_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
);
470 /* for AP update num of allocated TX blocks per link and ps status */
471 wl12xx_for_each_wlvif_ap(wl
, wlvif
) {
472 wl12xx_irq_update_links_status(wl
, wlvif
, status_2
);
475 /* update the host-chipset time offset */
477 wl
->time_offset
= (timespec_to_ns(&ts
) >> 10) -
478 (s64
)le32_to_cpu(status_2
->fw_localtime
);
480 wl
->fw_fast_lnk_map
= le32_to_cpu(status_2
->link_fast_bitmap
);
485 static void wl1271_flush_deferred_work(struct wl1271
*wl
)
489 /* Pass all received frames to the network stack */
490 while ((skb
= skb_dequeue(&wl
->deferred_rx_queue
)))
491 ieee80211_rx_ni(wl
->hw
, skb
);
493 /* Return sent skbs to the network stack */
494 while ((skb
= skb_dequeue(&wl
->deferred_tx_queue
)))
495 ieee80211_tx_status_ni(wl
->hw
, skb
);
498 static void wl1271_netstack_work(struct work_struct
*work
)
501 container_of(work
, struct wl1271
, netstack_work
);
504 wl1271_flush_deferred_work(wl
);
505 } while (skb_queue_len(&wl
->deferred_rx_queue
));
508 #define WL1271_IRQ_MAX_LOOPS 256
510 static int wlcore_irq_locked(struct wl1271
*wl
)
514 int loopcount
= WL1271_IRQ_MAX_LOOPS
;
516 unsigned int defer_count
;
520 * In case edge triggered interrupt must be used, we cannot iterate
521 * more than once without introducing race conditions with the hardirq.
523 if (wl
->platform_quirks
& WL12XX_PLATFORM_QUIRK_EDGE_IRQ
)
526 wl1271_debug(DEBUG_IRQ
, "IRQ work");
528 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
531 ret
= wl1271_ps_elp_wakeup(wl
);
535 while (!done
&& loopcount
--) {
537 * In order to avoid a race with the hardirq, clear the flag
538 * before acknowledging the chip. Since the mutex is held,
539 * wl1271_ps_elp_wakeup cannot be called concurrently.
541 clear_bit(WL1271_FLAG_IRQ_RUNNING
, &wl
->flags
);
542 smp_mb__after_clear_bit();
544 ret
= wlcore_fw_status(wl
, wl
->fw_status_1
, wl
->fw_status_2
);
548 wlcore_hw_tx_immediate_compl(wl
);
550 intr
= le32_to_cpu(wl
->fw_status_1
->intr
);
551 intr
&= WLCORE_ALL_INTR_MASK
;
557 if (unlikely(intr
& WL1271_ACX_INTR_WATCHDOG
)) {
558 wl1271_error("HW watchdog interrupt received! starting recovery.");
559 wl
->watchdog_recovery
= true;
562 /* restarting the chip. ignore any other interrupt. */
566 if (unlikely(intr
& WL1271_ACX_SW_INTR_WATCHDOG
)) {
567 wl1271_error("SW watchdog interrupt received! "
568 "starting recovery.");
569 wl
->watchdog_recovery
= true;
572 /* restarting the chip. ignore any other interrupt. */
576 if (likely(intr
& WL1271_ACX_INTR_DATA
)) {
577 wl1271_debug(DEBUG_IRQ
, "WL1271_ACX_INTR_DATA");
579 ret
= wlcore_rx(wl
, wl
->fw_status_1
);
583 /* Check if any tx blocks were freed */
584 spin_lock_irqsave(&wl
->wl_lock
, flags
);
585 if (!test_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
) &&
586 wl1271_tx_total_queue_count(wl
) > 0) {
587 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
589 * In order to avoid starvation of the TX path,
590 * call the work function directly.
592 ret
= wlcore_tx_work_locked(wl
);
596 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
599 /* check for tx results */
600 ret
= wlcore_hw_tx_delayed_compl(wl
);
604 /* Make sure the deferred queues don't get too long */
605 defer_count
= skb_queue_len(&wl
->deferred_tx_queue
) +
606 skb_queue_len(&wl
->deferred_rx_queue
);
607 if (defer_count
> WL1271_DEFERRED_QUEUE_LIMIT
)
608 wl1271_flush_deferred_work(wl
);
611 if (intr
& WL1271_ACX_INTR_EVENT_A
) {
612 wl1271_debug(DEBUG_IRQ
, "WL1271_ACX_INTR_EVENT_A");
613 ret
= wl1271_event_handle(wl
, 0);
618 if (intr
& WL1271_ACX_INTR_EVENT_B
) {
619 wl1271_debug(DEBUG_IRQ
, "WL1271_ACX_INTR_EVENT_B");
620 ret
= wl1271_event_handle(wl
, 1);
625 if (intr
& WL1271_ACX_INTR_INIT_COMPLETE
)
626 wl1271_debug(DEBUG_IRQ
,
627 "WL1271_ACX_INTR_INIT_COMPLETE");
629 if (intr
& WL1271_ACX_INTR_HW_AVAILABLE
)
630 wl1271_debug(DEBUG_IRQ
, "WL1271_ACX_INTR_HW_AVAILABLE");
633 wl1271_ps_elp_sleep(wl
);
639 static irqreturn_t
wlcore_irq(int irq
, void *cookie
)
643 struct wl1271
*wl
= cookie
;
645 /* TX might be handled here, avoid redundant work */
646 set_bit(WL1271_FLAG_TX_PENDING
, &wl
->flags
);
647 cancel_work_sync(&wl
->tx_work
);
649 mutex_lock(&wl
->mutex
);
651 ret
= wlcore_irq_locked(wl
);
653 wl12xx_queue_recovery_work(wl
);
655 spin_lock_irqsave(&wl
->wl_lock
, flags
);
656 /* In case TX was not handled here, queue TX work */
657 clear_bit(WL1271_FLAG_TX_PENDING
, &wl
->flags
);
658 if (!test_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
) &&
659 wl1271_tx_total_queue_count(wl
) > 0)
660 ieee80211_queue_work(wl
->hw
, &wl
->tx_work
);
661 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
663 mutex_unlock(&wl
->mutex
);
668 struct vif_counter_data
{
671 struct ieee80211_vif
*cur_vif
;
672 bool cur_vif_running
;
675 static void wl12xx_vif_count_iter(void *data
, u8
*mac
,
676 struct ieee80211_vif
*vif
)
678 struct vif_counter_data
*counter
= data
;
681 if (counter
->cur_vif
== vif
)
682 counter
->cur_vif_running
= true;
685 /* caller must not hold wl->mutex, as it might deadlock */
686 static void wl12xx_get_vif_count(struct ieee80211_hw
*hw
,
687 struct ieee80211_vif
*cur_vif
,
688 struct vif_counter_data
*data
)
690 memset(data
, 0, sizeof(*data
));
691 data
->cur_vif
= cur_vif
;
693 ieee80211_iterate_active_interfaces(hw
, IEEE80211_IFACE_ITER_RESUME_ALL
,
694 wl12xx_vif_count_iter
, data
);
697 static int wl12xx_fetch_firmware(struct wl1271
*wl
, bool plt
)
699 const struct firmware
*fw
;
701 enum wl12xx_fw_type fw_type
;
705 fw_type
= WL12XX_FW_TYPE_PLT
;
706 fw_name
= wl
->plt_fw_name
;
709 * we can't call wl12xx_get_vif_count() here because
710 * wl->mutex is taken, so use the cached last_vif_count value
712 if (wl
->last_vif_count
> 1 && wl
->mr_fw_name
) {
713 fw_type
= WL12XX_FW_TYPE_MULTI
;
714 fw_name
= wl
->mr_fw_name
;
716 fw_type
= WL12XX_FW_TYPE_NORMAL
;
717 fw_name
= wl
->sr_fw_name
;
721 if (wl
->fw_type
== fw_type
)
724 wl1271_debug(DEBUG_BOOT
, "booting firmware %s", fw_name
);
726 ret
= request_firmware(&fw
, fw_name
, wl
->dev
);
729 wl1271_error("could not get firmware %s: %d", fw_name
, ret
);
734 wl1271_error("firmware size is not multiple of 32 bits: %zu",
741 wl
->fw_type
= WL12XX_FW_TYPE_NONE
;
742 wl
->fw_len
= fw
->size
;
743 wl
->fw
= vmalloc(wl
->fw_len
);
746 wl1271_error("could not allocate memory for the firmware");
751 memcpy(wl
->fw
, fw
->data
, wl
->fw_len
);
753 wl
->fw_type
= fw_type
;
755 release_firmware(fw
);
760 void wl12xx_queue_recovery_work(struct wl1271
*wl
)
762 WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
));
764 /* Avoid a recursive recovery */
765 if (wl
->state
== WLCORE_STATE_ON
) {
766 wl
->state
= WLCORE_STATE_RESTARTING
;
767 set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
);
768 wlcore_disable_interrupts_nosync(wl
);
769 ieee80211_queue_work(wl
->hw
, &wl
->recovery_work
);
773 size_t wl12xx_copy_fwlog(struct wl1271
*wl
, u8
*memblock
, size_t maxlen
)
777 /* The FW log is a length-value list, find where the log end */
778 while (len
< maxlen
) {
779 if (memblock
[len
] == 0)
781 if (len
+ memblock
[len
] + 1 > maxlen
)
783 len
+= memblock
[len
] + 1;
786 /* Make sure we have enough room */
787 len
= min(len
, (size_t)(PAGE_SIZE
- wl
->fwlog_size
));
789 /* Fill the FW log file, consumed by the sysfs fwlog entry */
790 memcpy(wl
->fwlog
+ wl
->fwlog_size
, memblock
, len
);
791 wl
->fwlog_size
+= len
;
796 #define WLCORE_FW_LOG_END 0x2000000
798 static void wl12xx_read_fwlog_panic(struct wl1271
*wl
)
806 if ((wl
->quirks
& WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED
) ||
807 (wl
->conf
.fwlog
.mem_blocks
== 0))
810 wl1271_info("Reading FW panic log");
812 block
= kmalloc(WL12XX_HW_BLOCK_SIZE
, GFP_KERNEL
);
817 * Make sure the chip is awake and the logger isn't active.
818 * Do not send a stop fwlog command if the fw is hanged or if
819 * dbgpins are used (due to some fw bug).
821 if (wl1271_ps_elp_wakeup(wl
))
823 if (!wl
->watchdog_recovery
&&
824 wl
->conf
.fwlog
.output
!= WL12XX_FWLOG_OUTPUT_DBG_PINS
)
825 wl12xx_cmd_stop_fwlog(wl
);
827 /* Read the first memory block address */
828 ret
= wlcore_fw_status(wl
, wl
->fw_status_1
, wl
->fw_status_2
);
832 addr
= le32_to_cpu(wl
->fw_status_2
->log_start_addr
);
836 if (wl
->conf
.fwlog
.mode
== WL12XX_FWLOG_CONTINUOUS
) {
837 offset
= sizeof(addr
) + sizeof(struct wl1271_rx_descriptor
);
838 end_of_log
= WLCORE_FW_LOG_END
;
840 offset
= sizeof(addr
);
844 /* Traverse the memory blocks linked list */
846 memset(block
, 0, WL12XX_HW_BLOCK_SIZE
);
847 ret
= wlcore_read_hwaddr(wl
, addr
, block
, WL12XX_HW_BLOCK_SIZE
,
853 * Memory blocks are linked to one another. The first 4 bytes
854 * of each memory block hold the hardware address of the next
855 * one. The last memory block points to the first one in
856 * on demand mode and is equal to 0x2000000 in continuous mode.
858 addr
= le32_to_cpup((__le32
*)block
);
859 if (!wl12xx_copy_fwlog(wl
, block
+ offset
,
860 WL12XX_HW_BLOCK_SIZE
- offset
))
862 } while (addr
&& (addr
!= end_of_log
));
864 wake_up_interruptible(&wl
->fwlog_waitq
);
870 static void wlcore_print_recovery(struct wl1271
*wl
)
876 wl1271_info("Hardware recovery in progress. FW ver: %s",
877 wl
->chip
.fw_ver_str
);
879 /* change partitions momentarily so we can read the FW pc */
880 ret
= wlcore_set_partition(wl
, &wl
->ptable
[PART_BOOT
]);
884 ret
= wlcore_read_reg(wl
, REG_PC_ON_RECOVERY
, &pc
);
888 ret
= wlcore_read_reg(wl
, REG_INTERRUPT_NO_CLEAR
, &hint_sts
);
892 wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
893 pc
, hint_sts
, ++wl
->recovery_count
);
895 wlcore_set_partition(wl
, &wl
->ptable
[PART_WORK
]);
899 static void wl1271_recovery_work(struct work_struct
*work
)
902 container_of(work
, struct wl1271
, recovery_work
);
903 struct wl12xx_vif
*wlvif
;
904 struct ieee80211_vif
*vif
;
906 mutex_lock(&wl
->mutex
);
908 if (wl
->state
== WLCORE_STATE_OFF
|| wl
->plt
)
911 if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
)) {
912 wl12xx_read_fwlog_panic(wl
);
913 wlcore_print_recovery(wl
);
916 BUG_ON(wl
->conf
.recovery
.bug_on_recovery
&&
917 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
));
919 if (wl
->conf
.recovery
.no_recovery
) {
920 wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
925 * Advance security sequence number to overcome potential progress
926 * in the firmware during recovery. This doens't hurt if the network is
929 wl12xx_for_each_wlvif(wl
, wlvif
) {
930 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
) ||
931 test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
))
932 wlvif
->tx_security_seq
+=
933 WL1271_TX_SQN_POST_RECOVERY_PADDING
;
936 /* Prevent spurious TX during FW restart */
937 wlcore_stop_queues(wl
, WLCORE_QUEUE_STOP_REASON_FW_RESTART
);
939 /* reboot the chipset */
940 while (!list_empty(&wl
->wlvif_list
)) {
941 wlvif
= list_first_entry(&wl
->wlvif_list
,
942 struct wl12xx_vif
, list
);
943 vif
= wl12xx_wlvif_to_vif(wlvif
);
944 __wl1271_op_remove_interface(wl
, vif
, false);
947 wlcore_op_stop_locked(wl
);
949 ieee80211_restart_hw(wl
->hw
);
952 * Its safe to enable TX now - the queues are stopped after a request
955 wlcore_wake_queues(wl
, WLCORE_QUEUE_STOP_REASON_FW_RESTART
);
958 wl
->watchdog_recovery
= false;
959 clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
);
960 mutex_unlock(&wl
->mutex
);
963 static int wlcore_fw_wakeup(struct wl1271
*wl
)
965 return wlcore_raw_write32(wl
, HW_ACCESS_ELP_CTRL_REG
, ELPCTRL_WAKE_UP
);
968 static int wl1271_setup(struct wl1271
*wl
)
970 wl
->fw_status_1
= kmalloc(WLCORE_FW_STATUS_1_LEN(wl
->num_rx_desc
) +
971 sizeof(*wl
->fw_status_2
) +
972 wl
->fw_status_priv_len
, GFP_KERNEL
);
973 if (!wl
->fw_status_1
)
976 wl
->fw_status_2
= (struct wl_fw_status_2
*)
977 (((u8
*) wl
->fw_status_1
) +
978 WLCORE_FW_STATUS_1_LEN(wl
->num_rx_desc
));
980 wl
->tx_res_if
= kmalloc(sizeof(*wl
->tx_res_if
), GFP_KERNEL
);
981 if (!wl
->tx_res_if
) {
982 kfree(wl
->fw_status_1
);
989 static int wl12xx_set_power_on(struct wl1271
*wl
)
993 msleep(WL1271_PRE_POWER_ON_SLEEP
);
994 ret
= wl1271_power_on(wl
);
997 msleep(WL1271_POWER_ON_SLEEP
);
1001 ret
= wlcore_set_partition(wl
, &wl
->ptable
[PART_BOOT
]);
1005 /* ELP module wake up */
1006 ret
= wlcore_fw_wakeup(wl
);
1014 wl1271_power_off(wl
);
1018 static int wl12xx_chip_wakeup(struct wl1271
*wl
, bool plt
)
1022 ret
= wl12xx_set_power_on(wl
);
1027 * For wl127x based devices we could use the default block
1028 * size (512 bytes), but due to a bug in the sdio driver, we
1029 * need to set it explicitly after the chip is powered on. To
1030 * simplify the code and since the performance impact is
1031 * negligible, we use the same block size for all different
1034 * Check if the bus supports blocksize alignment and, if it
1035 * doesn't, make sure we don't have the quirk.
1037 if (!wl1271_set_block_size(wl
))
1038 wl
->quirks
&= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN
;
1040 /* TODO: make sure the lower driver has set things up correctly */
1042 ret
= wl1271_setup(wl
);
1046 ret
= wl12xx_fetch_firmware(wl
, plt
);
1054 int wl1271_plt_start(struct wl1271
*wl
, const enum plt_mode plt_mode
)
1056 int retries
= WL1271_BOOT_RETRIES
;
1057 struct wiphy
*wiphy
= wl
->hw
->wiphy
;
1059 static const char* const PLT_MODE
[] = {
1067 mutex_lock(&wl
->mutex
);
1069 wl1271_notice("power up");
1071 if (wl
->state
!= WLCORE_STATE_OFF
) {
1072 wl1271_error("cannot go into PLT state because not "
1073 "in off state: %d", wl
->state
);
1078 /* Indicate to lower levels that we are now in PLT mode */
1080 wl
->plt_mode
= plt_mode
;
1084 ret
= wl12xx_chip_wakeup(wl
, true);
1088 ret
= wl
->ops
->plt_init(wl
);
1092 wl
->state
= WLCORE_STATE_ON
;
1093 wl1271_notice("firmware booted in PLT mode %s (%s)",
1095 wl
->chip
.fw_ver_str
);
1097 /* update hw/fw version info in wiphy struct */
1098 wiphy
->hw_version
= wl
->chip
.id
;
1099 strncpy(wiphy
->fw_version
, wl
->chip
.fw_ver_str
,
1100 sizeof(wiphy
->fw_version
));
1105 wl1271_power_off(wl
);
1109 wl
->plt_mode
= PLT_OFF
;
1111 wl1271_error("firmware boot in PLT mode failed despite %d retries",
1112 WL1271_BOOT_RETRIES
);
1114 mutex_unlock(&wl
->mutex
);
1119 int wl1271_plt_stop(struct wl1271
*wl
)
1123 wl1271_notice("power down");
1126 * Interrupts must be disabled before setting the state to OFF.
1127 * Otherwise, the interrupt handler might be called and exit without
1128 * reading the interrupt status.
1130 wlcore_disable_interrupts(wl
);
1131 mutex_lock(&wl
->mutex
);
1133 mutex_unlock(&wl
->mutex
);
1136 * This will not necessarily enable interrupts as interrupts
1137 * may have been disabled when op_stop was called. It will,
1138 * however, balance the above call to disable_interrupts().
1140 wlcore_enable_interrupts(wl
);
1142 wl1271_error("cannot power down because not in PLT "
1143 "state: %d", wl
->state
);
1148 mutex_unlock(&wl
->mutex
);
1150 wl1271_flush_deferred_work(wl
);
1151 cancel_work_sync(&wl
->netstack_work
);
1152 cancel_work_sync(&wl
->recovery_work
);
1153 cancel_delayed_work_sync(&wl
->elp_work
);
1154 cancel_delayed_work_sync(&wl
->tx_watchdog_work
);
1156 mutex_lock(&wl
->mutex
);
1157 wl1271_power_off(wl
);
1159 wl
->sleep_auth
= WL1271_PSM_ILLEGAL
;
1160 wl
->state
= WLCORE_STATE_OFF
;
1162 wl
->plt_mode
= PLT_OFF
;
1164 mutex_unlock(&wl
->mutex
);
1170 static void wl1271_op_tx(struct ieee80211_hw
*hw
,
1171 struct ieee80211_tx_control
*control
,
1172 struct sk_buff
*skb
)
1174 struct wl1271
*wl
= hw
->priv
;
1175 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
1176 struct ieee80211_vif
*vif
= info
->control
.vif
;
1177 struct wl12xx_vif
*wlvif
= NULL
;
1178 unsigned long flags
;
1183 wl1271_debug(DEBUG_TX
, "DROP skb with no vif");
1184 ieee80211_free_txskb(hw
, skb
);
1188 wlvif
= wl12xx_vif_to_data(vif
);
1189 mapping
= skb_get_queue_mapping(skb
);
1190 q
= wl1271_tx_get_queue(mapping
);
1192 hlid
= wl12xx_tx_get_hlid(wl
, wlvif
, skb
, control
->sta
);
1194 spin_lock_irqsave(&wl
->wl_lock
, flags
);
1197 * drop the packet if the link is invalid or the queue is stopped
1198 * for any reason but watermark. Watermark is a "soft"-stop so we
1199 * allow these packets through.
1201 if (hlid
== WL12XX_INVALID_LINK_ID
||
1202 (!test_bit(hlid
, wlvif
->links_map
)) ||
1203 (wlcore_is_queue_stopped_locked(wl
, wlvif
, q
) &&
1204 !wlcore_is_queue_stopped_by_reason_locked(wl
, wlvif
, q
,
1205 WLCORE_QUEUE_STOP_REASON_WATERMARK
))) {
1206 wl1271_debug(DEBUG_TX
, "DROP skb hlid %d q %d", hlid
, q
);
1207 ieee80211_free_txskb(hw
, skb
);
1211 wl1271_debug(DEBUG_TX
, "queue skb hlid %d q %d len %d",
1213 skb_queue_tail(&wl
->links
[hlid
].tx_queue
[q
], skb
);
1215 wl
->tx_queue_count
[q
]++;
1216 wlvif
->tx_queue_count
[q
]++;
1219 * The workqueue is slow to process the tx_queue and we need stop
1220 * the queue here, otherwise the queue will get too long.
1222 if (wlvif
->tx_queue_count
[q
] >= WL1271_TX_QUEUE_HIGH_WATERMARK
&&
1223 !wlcore_is_queue_stopped_by_reason_locked(wl
, wlvif
, q
,
1224 WLCORE_QUEUE_STOP_REASON_WATERMARK
)) {
1225 wl1271_debug(DEBUG_TX
, "op_tx: stopping queues for q %d", q
);
1226 wlcore_stop_queue_locked(wl
, wlvif
, q
,
1227 WLCORE_QUEUE_STOP_REASON_WATERMARK
);
1231 * The chip specific setup must run before the first TX packet -
1232 * before that, the tx_work will not be initialized!
1235 if (!test_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
) &&
1236 !test_bit(WL1271_FLAG_TX_PENDING
, &wl
->flags
))
1237 ieee80211_queue_work(wl
->hw
, &wl
->tx_work
);
1240 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
1243 int wl1271_tx_dummy_packet(struct wl1271
*wl
)
1245 unsigned long flags
;
1248 /* no need to queue a new dummy packet if one is already pending */
1249 if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING
, &wl
->flags
))
1252 q
= wl1271_tx_get_queue(skb_get_queue_mapping(wl
->dummy_packet
));
1254 spin_lock_irqsave(&wl
->wl_lock
, flags
);
1255 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING
, &wl
->flags
);
1256 wl
->tx_queue_count
[q
]++;
1257 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
1259 /* The FW is low on RX memory blocks, so send the dummy packet asap */
1260 if (!test_bit(WL1271_FLAG_FW_TX_BUSY
, &wl
->flags
))
1261 return wlcore_tx_work_locked(wl
);
1264 * If the FW TX is busy, TX work will be scheduled by the threaded
1265 * interrupt handler function
1271 * The size of the dummy packet should be at least 1400 bytes. However, in
1272 * order to minimize the number of bus transactions, aligning it to 512 bytes
1273 * boundaries could be beneficial, performance wise
1275 #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
1277 static struct sk_buff
*wl12xx_alloc_dummy_packet(struct wl1271
*wl
)
1279 struct sk_buff
*skb
;
1280 struct ieee80211_hdr_3addr
*hdr
;
1281 unsigned int dummy_packet_size
;
1283 dummy_packet_size
= TOTAL_TX_DUMMY_PACKET_SIZE
-
1284 sizeof(struct wl1271_tx_hw_descr
) - sizeof(*hdr
);
1286 skb
= dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE
);
1288 wl1271_warning("Failed to allocate a dummy packet skb");
1292 skb_reserve(skb
, sizeof(struct wl1271_tx_hw_descr
));
1294 hdr
= (struct ieee80211_hdr_3addr
*) skb_put(skb
, sizeof(*hdr
));
1295 memset(hdr
, 0, sizeof(*hdr
));
1296 hdr
->frame_control
= cpu_to_le16(IEEE80211_FTYPE_DATA
|
1297 IEEE80211_STYPE_NULLFUNC
|
1298 IEEE80211_FCTL_TODS
);
1300 memset(skb_put(skb
, dummy_packet_size
), 0, dummy_packet_size
);
1302 /* Dummy packets require the TID to be management */
1303 skb
->priority
= WL1271_TID_MGMT
;
1305 /* Initialize all fields that might be used */
1306 skb_set_queue_mapping(skb
, 0);
1307 memset(IEEE80211_SKB_CB(skb
), 0, sizeof(struct ieee80211_tx_info
));
1315 wl1271_validate_wowlan_pattern(struct cfg80211_wowlan_trig_pkt_pattern
*p
)
1317 int num_fields
= 0, in_field
= 0, fields_size
= 0;
1318 int i
, pattern_len
= 0;
1321 wl1271_warning("No mask in WoWLAN pattern");
1326 * The pattern is broken up into segments of bytes at different offsets
1327 * that need to be checked by the FW filter. Each segment is called
1328 * a field in the FW API. We verify that the total number of fields
1329 * required for this pattern won't exceed FW limits (8)
1330 * as well as the total fields buffer won't exceed the FW limit.
1331 * Note that if there's a pattern which crosses Ethernet/IP header
1332 * boundary a new field is required.
1334 for (i
= 0; i
< p
->pattern_len
; i
++) {
1335 if (test_bit(i
, (unsigned long *)p
->mask
)) {
1340 if (i
== WL1271_RX_FILTER_ETH_HEADER_SIZE
) {
1342 fields_size
+= pattern_len
+
1343 RX_FILTER_FIELD_OVERHEAD
;
1351 fields_size
+= pattern_len
+
1352 RX_FILTER_FIELD_OVERHEAD
;
1359 fields_size
+= pattern_len
+ RX_FILTER_FIELD_OVERHEAD
;
1363 if (num_fields
> WL1271_RX_FILTER_MAX_FIELDS
) {
1364 wl1271_warning("RX Filter too complex. Too many segments");
1368 if (fields_size
> WL1271_RX_FILTER_MAX_FIELDS_SIZE
) {
1369 wl1271_warning("RX filter pattern is too big");
1376 struct wl12xx_rx_filter
*wl1271_rx_filter_alloc(void)
1378 return kzalloc(sizeof(struct wl12xx_rx_filter
), GFP_KERNEL
);
1381 void wl1271_rx_filter_free(struct wl12xx_rx_filter
*filter
)
1388 for (i
= 0; i
< filter
->num_fields
; i
++)
1389 kfree(filter
->fields
[i
].pattern
);
1394 int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter
*filter
,
1395 u16 offset
, u8 flags
,
1396 u8
*pattern
, u8 len
)
1398 struct wl12xx_rx_filter_field
*field
;
1400 if (filter
->num_fields
== WL1271_RX_FILTER_MAX_FIELDS
) {
1401 wl1271_warning("Max fields per RX filter. can't alloc another");
1405 field
= &filter
->fields
[filter
->num_fields
];
1407 field
->pattern
= kzalloc(len
, GFP_KERNEL
);
1408 if (!field
->pattern
) {
1409 wl1271_warning("Failed to allocate RX filter pattern");
1413 filter
->num_fields
++;
1415 field
->offset
= cpu_to_le16(offset
);
1416 field
->flags
= flags
;
1418 memcpy(field
->pattern
, pattern
, len
);
1423 int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter
*filter
)
1425 int i
, fields_size
= 0;
1427 for (i
= 0; i
< filter
->num_fields
; i
++)
1428 fields_size
+= filter
->fields
[i
].len
+
1429 sizeof(struct wl12xx_rx_filter_field
) -
1435 void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter
*filter
,
1439 struct wl12xx_rx_filter_field
*field
;
1441 for (i
= 0; i
< filter
->num_fields
; i
++) {
1442 field
= (struct wl12xx_rx_filter_field
*)buf
;
1444 field
->offset
= filter
->fields
[i
].offset
;
1445 field
->flags
= filter
->fields
[i
].flags
;
1446 field
->len
= filter
->fields
[i
].len
;
1448 memcpy(&field
->pattern
, filter
->fields
[i
].pattern
, field
->len
);
1449 buf
+= sizeof(struct wl12xx_rx_filter_field
) -
1450 sizeof(u8
*) + field
->len
;
1455 * Allocates an RX filter returned through f
1456 * which needs to be freed using rx_filter_free()
1458 static int wl1271_convert_wowlan_pattern_to_rx_filter(
1459 struct cfg80211_wowlan_trig_pkt_pattern
*p
,
1460 struct wl12xx_rx_filter
**f
)
1463 struct wl12xx_rx_filter
*filter
;
1467 filter
= wl1271_rx_filter_alloc();
1469 wl1271_warning("Failed to alloc rx filter");
1475 while (i
< p
->pattern_len
) {
1476 if (!test_bit(i
, (unsigned long *)p
->mask
)) {
1481 for (j
= i
; j
< p
->pattern_len
; j
++) {
1482 if (!test_bit(j
, (unsigned long *)p
->mask
))
1485 if (i
< WL1271_RX_FILTER_ETH_HEADER_SIZE
&&
1486 j
>= WL1271_RX_FILTER_ETH_HEADER_SIZE
)
1490 if (i
< WL1271_RX_FILTER_ETH_HEADER_SIZE
) {
1492 flags
= WL1271_RX_FILTER_FLAG_ETHERNET_HEADER
;
1494 offset
= i
- WL1271_RX_FILTER_ETH_HEADER_SIZE
;
1495 flags
= WL1271_RX_FILTER_FLAG_IP_HEADER
;
1500 ret
= wl1271_rx_filter_alloc_field(filter
,
1503 &p
->pattern
[i
], len
);
1510 filter
->action
= FILTER_SIGNAL
;
1516 wl1271_rx_filter_free(filter
);
1522 static int wl1271_configure_wowlan(struct wl1271
*wl
,
1523 struct cfg80211_wowlan
*wow
)
1527 if (!wow
|| wow
->any
|| !wow
->n_patterns
) {
1528 ret
= wl1271_acx_default_rx_filter_enable(wl
, 0,
1533 ret
= wl1271_rx_filter_clear_all(wl
);
1540 if (WARN_ON(wow
->n_patterns
> WL1271_MAX_RX_FILTERS
))
1543 /* Validate all incoming patterns before clearing current FW state */
1544 for (i
= 0; i
< wow
->n_patterns
; i
++) {
1545 ret
= wl1271_validate_wowlan_pattern(&wow
->patterns
[i
]);
1547 wl1271_warning("Bad wowlan pattern %d", i
);
1552 ret
= wl1271_acx_default_rx_filter_enable(wl
, 0, FILTER_SIGNAL
);
1556 ret
= wl1271_rx_filter_clear_all(wl
);
1560 /* Translate WoWLAN patterns into filters */
1561 for (i
= 0; i
< wow
->n_patterns
; i
++) {
1562 struct cfg80211_wowlan_trig_pkt_pattern
*p
;
1563 struct wl12xx_rx_filter
*filter
= NULL
;
1565 p
= &wow
->patterns
[i
];
1567 ret
= wl1271_convert_wowlan_pattern_to_rx_filter(p
, &filter
);
1569 wl1271_warning("Failed to create an RX filter from "
1570 "wowlan pattern %d", i
);
1574 ret
= wl1271_rx_filter_enable(wl
, i
, 1, filter
);
1576 wl1271_rx_filter_free(filter
);
1581 ret
= wl1271_acx_default_rx_filter_enable(wl
, 1, FILTER_DROP
);
1587 static int wl1271_configure_suspend_sta(struct wl1271
*wl
,
1588 struct wl12xx_vif
*wlvif
,
1589 struct cfg80211_wowlan
*wow
)
1593 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
1596 ret
= wl1271_ps_elp_wakeup(wl
);
1600 ret
= wl1271_configure_wowlan(wl
, wow
);
1604 if ((wl
->conf
.conn
.suspend_wake_up_event
==
1605 wl
->conf
.conn
.wake_up_event
) &&
1606 (wl
->conf
.conn
.suspend_listen_interval
==
1607 wl
->conf
.conn
.listen_interval
))
1610 ret
= wl1271_acx_wake_up_conditions(wl
, wlvif
,
1611 wl
->conf
.conn
.suspend_wake_up_event
,
1612 wl
->conf
.conn
.suspend_listen_interval
);
1615 wl1271_error("suspend: set wake up conditions failed: %d", ret
);
1618 wl1271_ps_elp_sleep(wl
);
1624 static int wl1271_configure_suspend_ap(struct wl1271
*wl
,
1625 struct wl12xx_vif
*wlvif
)
1629 if (!test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
))
1632 ret
= wl1271_ps_elp_wakeup(wl
);
1636 ret
= wl1271_acx_beacon_filter_opt(wl
, wlvif
, true);
1638 wl1271_ps_elp_sleep(wl
);
1644 static int wl1271_configure_suspend(struct wl1271
*wl
,
1645 struct wl12xx_vif
*wlvif
,
1646 struct cfg80211_wowlan
*wow
)
1648 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
)
1649 return wl1271_configure_suspend_sta(wl
, wlvif
, wow
);
1650 if (wlvif
->bss_type
== BSS_TYPE_AP_BSS
)
1651 return wl1271_configure_suspend_ap(wl
, wlvif
);
1655 static void wl1271_configure_resume(struct wl1271
*wl
,
1656 struct wl12xx_vif
*wlvif
)
1659 bool is_ap
= wlvif
->bss_type
== BSS_TYPE_AP_BSS
;
1660 bool is_sta
= wlvif
->bss_type
== BSS_TYPE_STA_BSS
;
1662 if ((!is_ap
) && (!is_sta
))
1665 if (is_sta
&& !test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
1668 ret
= wl1271_ps_elp_wakeup(wl
);
1673 wl1271_configure_wowlan(wl
, NULL
);
1675 if ((wl
->conf
.conn
.suspend_wake_up_event
==
1676 wl
->conf
.conn
.wake_up_event
) &&
1677 (wl
->conf
.conn
.suspend_listen_interval
==
1678 wl
->conf
.conn
.listen_interval
))
1681 ret
= wl1271_acx_wake_up_conditions(wl
, wlvif
,
1682 wl
->conf
.conn
.wake_up_event
,
1683 wl
->conf
.conn
.listen_interval
);
1686 wl1271_error("resume: wake up conditions failed: %d",
1690 ret
= wl1271_acx_beacon_filter_opt(wl
, wlvif
, false);
1694 wl1271_ps_elp_sleep(wl
);
1697 static int wl1271_op_suspend(struct ieee80211_hw
*hw
,
1698 struct cfg80211_wowlan
*wow
)
1700 struct wl1271
*wl
= hw
->priv
;
1701 struct wl12xx_vif
*wlvif
;
1704 wl1271_debug(DEBUG_MAC80211
, "mac80211 suspend wow=%d", !!wow
);
1707 /* we want to perform the recovery before suspending */
1708 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
)) {
1709 wl1271_warning("postponing suspend to perform recovery");
1713 wl1271_tx_flush(wl
);
1715 mutex_lock(&wl
->mutex
);
1716 wl
->wow_enabled
= true;
1717 wl12xx_for_each_wlvif(wl
, wlvif
) {
1718 ret
= wl1271_configure_suspend(wl
, wlvif
, wow
);
1720 mutex_unlock(&wl
->mutex
);
1721 wl1271_warning("couldn't prepare device to suspend");
1725 mutex_unlock(&wl
->mutex
);
1726 /* flush any remaining work */
1727 wl1271_debug(DEBUG_MAC80211
, "flushing remaining works");
1730 * disable and re-enable interrupts in order to flush
1733 wlcore_disable_interrupts(wl
);
1736 * set suspended flag to avoid triggering a new threaded_irq
1737 * work. no need for spinlock as interrupts are disabled.
1739 set_bit(WL1271_FLAG_SUSPENDED
, &wl
->flags
);
1741 wlcore_enable_interrupts(wl
);
1742 flush_work(&wl
->tx_work
);
1743 flush_delayed_work(&wl
->elp_work
);
1748 static int wl1271_op_resume(struct ieee80211_hw
*hw
)
1750 struct wl1271
*wl
= hw
->priv
;
1751 struct wl12xx_vif
*wlvif
;
1752 unsigned long flags
;
1753 bool run_irq_work
= false, pending_recovery
;
1756 wl1271_debug(DEBUG_MAC80211
, "mac80211 resume wow=%d",
1758 WARN_ON(!wl
->wow_enabled
);
1761 * re-enable irq_work enqueuing, and call irq_work directly if
1762 * there is a pending work.
1764 spin_lock_irqsave(&wl
->wl_lock
, flags
);
1765 clear_bit(WL1271_FLAG_SUSPENDED
, &wl
->flags
);
1766 if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK
, &wl
->flags
))
1767 run_irq_work
= true;
1768 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
1770 mutex_lock(&wl
->mutex
);
1772 /* test the recovery flag before calling any SDIO functions */
1773 pending_recovery
= test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
,
1777 wl1271_debug(DEBUG_MAC80211
,
1778 "run postponed irq_work directly");
1780 /* don't talk to the HW if recovery is pending */
1781 if (!pending_recovery
) {
1782 ret
= wlcore_irq_locked(wl
);
1784 wl12xx_queue_recovery_work(wl
);
1787 wlcore_enable_interrupts(wl
);
1790 if (pending_recovery
) {
1791 wl1271_warning("queuing forgotten recovery on resume");
1792 ieee80211_queue_work(wl
->hw
, &wl
->recovery_work
);
1796 wl12xx_for_each_wlvif(wl
, wlvif
) {
1797 wl1271_configure_resume(wl
, wlvif
);
1801 wl
->wow_enabled
= false;
1802 mutex_unlock(&wl
->mutex
);
1808 static int wl1271_op_start(struct ieee80211_hw
*hw
)
1810 wl1271_debug(DEBUG_MAC80211
, "mac80211 start");
1813 * We have to delay the booting of the hardware because
1814 * we need to know the local MAC address before downloading and
1815 * initializing the firmware. The MAC address cannot be changed
1816 * after boot, and without the proper MAC address, the firmware
1817 * will not function properly.
1819 * The MAC address is first known when the corresponding interface
1820 * is added. That is where we will initialize the hardware.
1826 static void wlcore_op_stop_locked(struct wl1271
*wl
)
1830 if (wl
->state
== WLCORE_STATE_OFF
) {
1831 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
,
1833 wlcore_enable_interrupts(wl
);
1839 * this must be before the cancel_work calls below, so that the work
1840 * functions don't perform further work.
1842 wl
->state
= WLCORE_STATE_OFF
;
1845 * Use the nosync variant to disable interrupts, so the mutex could be
1846 * held while doing so without deadlocking.
1848 wlcore_disable_interrupts_nosync(wl
);
1850 mutex_unlock(&wl
->mutex
);
1852 wlcore_synchronize_interrupts(wl
);
1853 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
))
1854 cancel_work_sync(&wl
->recovery_work
);
1855 wl1271_flush_deferred_work(wl
);
1856 cancel_delayed_work_sync(&wl
->scan_complete_work
);
1857 cancel_work_sync(&wl
->netstack_work
);
1858 cancel_work_sync(&wl
->tx_work
);
1859 cancel_delayed_work_sync(&wl
->elp_work
);
1860 cancel_delayed_work_sync(&wl
->tx_watchdog_work
);
1862 /* let's notify MAC80211 about the remaining pending TX frames */
1863 mutex_lock(&wl
->mutex
);
1864 wl12xx_tx_reset(wl
);
1866 wl1271_power_off(wl
);
1868 * In case a recovery was scheduled, interrupts were disabled to avoid
1869 * an interrupt storm. Now that the power is down, it is safe to
1870 * re-enable interrupts to balance the disable depth
1872 if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
))
1873 wlcore_enable_interrupts(wl
);
1875 wl
->band
= IEEE80211_BAND_2GHZ
;
1878 wl
->power_level
= WL1271_DEFAULT_POWER_LEVEL
;
1879 wl
->channel_type
= NL80211_CHAN_NO_HT
;
1880 wl
->tx_blocks_available
= 0;
1881 wl
->tx_allocated_blocks
= 0;
1882 wl
->tx_results_count
= 0;
1883 wl
->tx_packets_count
= 0;
1884 wl
->time_offset
= 0;
1885 wl
->ap_fw_ps_map
= 0;
1887 wl
->sleep_auth
= WL1271_PSM_ILLEGAL
;
1888 memset(wl
->roles_map
, 0, sizeof(wl
->roles_map
));
1889 memset(wl
->links_map
, 0, sizeof(wl
->links_map
));
1890 memset(wl
->roc_map
, 0, sizeof(wl
->roc_map
));
1891 memset(wl
->session_ids
, 0, sizeof(wl
->session_ids
));
1892 wl
->active_sta_count
= 0;
1893 wl
->active_link_count
= 0;
1895 /* The system link is always allocated */
1896 wl
->links
[WL12XX_SYSTEM_HLID
].allocated_pkts
= 0;
1897 wl
->links
[WL12XX_SYSTEM_HLID
].prev_freed_pkts
= 0;
1898 __set_bit(WL12XX_SYSTEM_HLID
, wl
->links_map
);
1901 * this is performed after the cancel_work calls and the associated
1902 * mutex_lock, so that wl1271_op_add_interface does not accidentally
1903 * get executed before all these vars have been reset.
1907 wl
->tx_blocks_freed
= 0;
1909 for (i
= 0; i
< NUM_TX_QUEUES
; i
++) {
1910 wl
->tx_pkts_freed
[i
] = 0;
1911 wl
->tx_allocated_pkts
[i
] = 0;
1914 wl1271_debugfs_reset(wl
);
1916 kfree(wl
->fw_status_1
);
1917 wl
->fw_status_1
= NULL
;
1918 wl
->fw_status_2
= NULL
;
1919 kfree(wl
->tx_res_if
);
1920 wl
->tx_res_if
= NULL
;
1921 kfree(wl
->target_mem_map
);
1922 wl
->target_mem_map
= NULL
;
1925 * FW channels must be re-calibrated after recovery,
1926 * clear the last Reg-Domain channel configuration.
1928 memset(wl
->reg_ch_conf_last
, 0, sizeof(wl
->reg_ch_conf_last
));
1931 static void wlcore_op_stop(struct ieee80211_hw
*hw
)
1933 struct wl1271
*wl
= hw
->priv
;
1935 wl1271_debug(DEBUG_MAC80211
, "mac80211 stop");
1937 mutex_lock(&wl
->mutex
);
1939 wlcore_op_stop_locked(wl
);
1941 mutex_unlock(&wl
->mutex
);
1944 static void wlcore_channel_switch_work(struct work_struct
*work
)
1946 struct delayed_work
*dwork
;
1948 struct ieee80211_vif
*vif
;
1949 struct wl12xx_vif
*wlvif
;
1952 dwork
= container_of(work
, struct delayed_work
, work
);
1953 wlvif
= container_of(dwork
, struct wl12xx_vif
, channel_switch_work
);
1956 wl1271_info("channel switch failed (role_id: %d).", wlvif
->role_id
);
1958 mutex_lock(&wl
->mutex
);
1960 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
1963 /* check the channel switch is still ongoing */
1964 if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS
, &wlvif
->flags
))
1967 vif
= wl12xx_wlvif_to_vif(wlvif
);
1968 ieee80211_chswitch_done(vif
, false);
1970 ret
= wl1271_ps_elp_wakeup(wl
);
1974 wl12xx_cmd_stop_channel_switch(wl
, wlvif
);
1976 wl1271_ps_elp_sleep(wl
);
1978 mutex_unlock(&wl
->mutex
);
1981 static void wlcore_connection_loss_work(struct work_struct
*work
)
1983 struct delayed_work
*dwork
;
1985 struct ieee80211_vif
*vif
;
1986 struct wl12xx_vif
*wlvif
;
1988 dwork
= container_of(work
, struct delayed_work
, work
);
1989 wlvif
= container_of(dwork
, struct wl12xx_vif
, connection_loss_work
);
1992 wl1271_info("Connection loss work (role_id: %d).", wlvif
->role_id
);
1994 mutex_lock(&wl
->mutex
);
1996 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
1999 /* Call mac80211 connection loss */
2000 if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
2003 vif
= wl12xx_wlvif_to_vif(wlvif
);
2004 ieee80211_connection_loss(vif
);
2006 mutex_unlock(&wl
->mutex
);
2009 static int wl12xx_allocate_rate_policy(struct wl1271
*wl
, u8
*idx
)
2011 u8 policy
= find_first_zero_bit(wl
->rate_policies_map
,
2012 WL12XX_MAX_RATE_POLICIES
);
2013 if (policy
>= WL12XX_MAX_RATE_POLICIES
)
2016 __set_bit(policy
, wl
->rate_policies_map
);
2021 static void wl12xx_free_rate_policy(struct wl1271
*wl
, u8
*idx
)
2023 if (WARN_ON(*idx
>= WL12XX_MAX_RATE_POLICIES
))
2026 __clear_bit(*idx
, wl
->rate_policies_map
);
2027 *idx
= WL12XX_MAX_RATE_POLICIES
;
2030 static int wlcore_allocate_klv_template(struct wl1271
*wl
, u8
*idx
)
2032 u8 policy
= find_first_zero_bit(wl
->klv_templates_map
,
2033 WLCORE_MAX_KLV_TEMPLATES
);
2034 if (policy
>= WLCORE_MAX_KLV_TEMPLATES
)
2037 __set_bit(policy
, wl
->klv_templates_map
);
2042 static void wlcore_free_klv_template(struct wl1271
*wl
, u8
*idx
)
2044 if (WARN_ON(*idx
>= WLCORE_MAX_KLV_TEMPLATES
))
2047 __clear_bit(*idx
, wl
->klv_templates_map
);
2048 *idx
= WLCORE_MAX_KLV_TEMPLATES
;
2051 static u8
wl12xx_get_role_type(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
2053 switch (wlvif
->bss_type
) {
2054 case BSS_TYPE_AP_BSS
:
2056 return WL1271_ROLE_P2P_GO
;
2058 return WL1271_ROLE_AP
;
2060 case BSS_TYPE_STA_BSS
:
2062 return WL1271_ROLE_P2P_CL
;
2064 return WL1271_ROLE_STA
;
2067 return WL1271_ROLE_IBSS
;
2070 wl1271_error("invalid bss_type: %d", wlvif
->bss_type
);
2072 return WL12XX_INVALID_ROLE_TYPE
;
2075 static int wl12xx_init_vif_data(struct wl1271
*wl
, struct ieee80211_vif
*vif
)
2077 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
2080 /* clear everything but the persistent data */
2081 memset(wlvif
, 0, offsetof(struct wl12xx_vif
, persistent
));
2083 switch (ieee80211_vif_type_p2p(vif
)) {
2084 case NL80211_IFTYPE_P2P_CLIENT
:
2087 case NL80211_IFTYPE_STATION
:
2088 wlvif
->bss_type
= BSS_TYPE_STA_BSS
;
2090 case NL80211_IFTYPE_ADHOC
:
2091 wlvif
->bss_type
= BSS_TYPE_IBSS
;
2093 case NL80211_IFTYPE_P2P_GO
:
2096 case NL80211_IFTYPE_AP
:
2097 wlvif
->bss_type
= BSS_TYPE_AP_BSS
;
2100 wlvif
->bss_type
= MAX_BSS_TYPE
;
2104 wlvif
->role_id
= WL12XX_INVALID_ROLE_ID
;
2105 wlvif
->dev_role_id
= WL12XX_INVALID_ROLE_ID
;
2106 wlvif
->dev_hlid
= WL12XX_INVALID_LINK_ID
;
2108 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
||
2109 wlvif
->bss_type
== BSS_TYPE_IBSS
) {
2110 /* init sta/ibss data */
2111 wlvif
->sta
.hlid
= WL12XX_INVALID_LINK_ID
;
2112 wl12xx_allocate_rate_policy(wl
, &wlvif
->sta
.basic_rate_idx
);
2113 wl12xx_allocate_rate_policy(wl
, &wlvif
->sta
.ap_rate_idx
);
2114 wl12xx_allocate_rate_policy(wl
, &wlvif
->sta
.p2p_rate_idx
);
2115 wlcore_allocate_klv_template(wl
, &wlvif
->sta
.klv_template_id
);
2116 wlvif
->basic_rate_set
= CONF_TX_RATE_MASK_BASIC
;
2117 wlvif
->basic_rate
= CONF_TX_RATE_MASK_BASIC
;
2118 wlvif
->rate_set
= CONF_TX_RATE_MASK_BASIC
;
2121 wlvif
->ap
.bcast_hlid
= WL12XX_INVALID_LINK_ID
;
2122 wlvif
->ap
.global_hlid
= WL12XX_INVALID_LINK_ID
;
2123 wl12xx_allocate_rate_policy(wl
, &wlvif
->ap
.mgmt_rate_idx
);
2124 wl12xx_allocate_rate_policy(wl
, &wlvif
->ap
.bcast_rate_idx
);
2125 for (i
= 0; i
< CONF_TX_MAX_AC_COUNT
; i
++)
2126 wl12xx_allocate_rate_policy(wl
,
2127 &wlvif
->ap
.ucast_rate_idx
[i
]);
2128 wlvif
->basic_rate_set
= CONF_TX_ENABLED_RATES
;
2130 * TODO: check if basic_rate shouldn't be
2131 * wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
2132 * instead (the same thing for STA above).
2134 wlvif
->basic_rate
= CONF_TX_ENABLED_RATES
;
2135 /* TODO: this seems to be used only for STA, check it */
2136 wlvif
->rate_set
= CONF_TX_ENABLED_RATES
;
2139 wlvif
->bitrate_masks
[IEEE80211_BAND_2GHZ
] = wl
->conf
.tx
.basic_rate
;
2140 wlvif
->bitrate_masks
[IEEE80211_BAND_5GHZ
] = wl
->conf
.tx
.basic_rate_5
;
2141 wlvif
->beacon_int
= WL1271_DEFAULT_BEACON_INT
;
2144 * mac80211 configures some values globally, while we treat them
2145 * per-interface. thus, on init, we have to copy them from wl
2147 wlvif
->band
= wl
->band
;
2148 wlvif
->channel
= wl
->channel
;
2149 wlvif
->power_level
= wl
->power_level
;
2150 wlvif
->channel_type
= wl
->channel_type
;
2152 INIT_WORK(&wlvif
->rx_streaming_enable_work
,
2153 wl1271_rx_streaming_enable_work
);
2154 INIT_WORK(&wlvif
->rx_streaming_disable_work
,
2155 wl1271_rx_streaming_disable_work
);
2156 INIT_DELAYED_WORK(&wlvif
->channel_switch_work
,
2157 wlcore_channel_switch_work
);
2158 INIT_DELAYED_WORK(&wlvif
->connection_loss_work
,
2159 wlcore_connection_loss_work
);
2160 INIT_LIST_HEAD(&wlvif
->list
);
2162 setup_timer(&wlvif
->rx_streaming_timer
, wl1271_rx_streaming_timer
,
2163 (unsigned long) wlvif
);
2167 static bool wl12xx_init_fw(struct wl1271
*wl
)
2169 int retries
= WL1271_BOOT_RETRIES
;
2170 bool booted
= false;
2171 struct wiphy
*wiphy
= wl
->hw
->wiphy
;
2176 ret
= wl12xx_chip_wakeup(wl
, false);
2180 ret
= wl
->ops
->boot(wl
);
2184 ret
= wl1271_hw_init(wl
);
2192 mutex_unlock(&wl
->mutex
);
2193 /* Unlocking the mutex in the middle of handling is
2194 inherently unsafe. In this case we deem it safe to do,
2195 because we need to let any possibly pending IRQ out of
2196 the system (and while we are WLCORE_STATE_OFF the IRQ
2197 work function will not do anything.) Also, any other
2198 possible concurrent operations will fail due to the
2199 current state, hence the wl1271 struct should be safe. */
2200 wlcore_disable_interrupts(wl
);
2201 wl1271_flush_deferred_work(wl
);
2202 cancel_work_sync(&wl
->netstack_work
);
2203 mutex_lock(&wl
->mutex
);
2205 wl1271_power_off(wl
);
2209 wl1271_error("firmware boot failed despite %d retries",
2210 WL1271_BOOT_RETRIES
);
2214 wl1271_info("firmware booted (%s)", wl
->chip
.fw_ver_str
);
2216 /* update hw/fw version info in wiphy struct */
2217 wiphy
->hw_version
= wl
->chip
.id
;
2218 strncpy(wiphy
->fw_version
, wl
->chip
.fw_ver_str
,
2219 sizeof(wiphy
->fw_version
));
2222 * Now we know if 11a is supported (info from the NVS), so disable
2223 * 11a channels if not supported
2225 if (!wl
->enable_11a
)
2226 wiphy
->bands
[IEEE80211_BAND_5GHZ
]->n_channels
= 0;
2228 wl1271_debug(DEBUG_MAC80211
, "11a is %ssupported",
2229 wl
->enable_11a
? "" : "not ");
2231 wl
->state
= WLCORE_STATE_ON
;
2236 static bool wl12xx_dev_role_started(struct wl12xx_vif
*wlvif
)
2238 return wlvif
->dev_hlid
!= WL12XX_INVALID_LINK_ID
;
2242 * Check whether a fw switch (i.e. moving from one loaded
2243 * fw to another) is needed. This function is also responsible
2244 * for updating wl->last_vif_count, so it must be called before
2245 * loading a non-plt fw (so the correct fw (single-role/multi-role)
2248 static bool wl12xx_need_fw_change(struct wl1271
*wl
,
2249 struct vif_counter_data vif_counter_data
,
2252 enum wl12xx_fw_type current_fw
= wl
->fw_type
;
2253 u8 vif_count
= vif_counter_data
.counter
;
2255 if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS
, &wl
->flags
))
2258 /* increase the vif count if this is a new vif */
2259 if (add
&& !vif_counter_data
.cur_vif_running
)
2262 wl
->last_vif_count
= vif_count
;
2264 /* no need for fw change if the device is OFF */
2265 if (wl
->state
== WLCORE_STATE_OFF
)
2268 /* no need for fw change if a single fw is used */
2269 if (!wl
->mr_fw_name
)
2272 if (vif_count
> 1 && current_fw
== WL12XX_FW_TYPE_NORMAL
)
2274 if (vif_count
<= 1 && current_fw
== WL12XX_FW_TYPE_MULTI
)
2281 * Enter "forced psm". Make sure the sta is in psm against the ap,
2282 * to make the fw switch a bit more disconnection-persistent.
2284 static void wl12xx_force_active_psm(struct wl1271
*wl
)
2286 struct wl12xx_vif
*wlvif
;
2288 wl12xx_for_each_wlvif_sta(wl
, wlvif
) {
2289 wl1271_ps_set_mode(wl
, wlvif
, STATION_POWER_SAVE_MODE
);
2293 struct wlcore_hw_queue_iter_data
{
2294 unsigned long hw_queue_map
[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES
)];
2296 struct ieee80211_vif
*vif
;
2297 /* is the current vif among those iterated */
2301 static void wlcore_hw_queue_iter(void *data
, u8
*mac
,
2302 struct ieee80211_vif
*vif
)
2304 struct wlcore_hw_queue_iter_data
*iter_data
= data
;
2306 if (WARN_ON_ONCE(vif
->hw_queue
[0] == IEEE80211_INVAL_HW_QUEUE
))
2309 if (iter_data
->cur_running
|| vif
== iter_data
->vif
) {
2310 iter_data
->cur_running
= true;
2314 __set_bit(vif
->hw_queue
[0] / NUM_TX_QUEUES
, iter_data
->hw_queue_map
);
2317 static int wlcore_allocate_hw_queue_base(struct wl1271
*wl
,
2318 struct wl12xx_vif
*wlvif
)
2320 struct ieee80211_vif
*vif
= wl12xx_wlvif_to_vif(wlvif
);
2321 struct wlcore_hw_queue_iter_data iter_data
= {};
2324 iter_data
.vif
= vif
;
2326 /* mark all bits taken by active interfaces */
2327 ieee80211_iterate_active_interfaces_atomic(wl
->hw
,
2328 IEEE80211_IFACE_ITER_RESUME_ALL
,
2329 wlcore_hw_queue_iter
, &iter_data
);
2331 /* the current vif is already running in mac80211 (resume/recovery) */
2332 if (iter_data
.cur_running
) {
2333 wlvif
->hw_queue_base
= vif
->hw_queue
[0];
2334 wl1271_debug(DEBUG_MAC80211
,
2335 "using pre-allocated hw queue base %d",
2336 wlvif
->hw_queue_base
);
2338 /* interface type might have changed type */
2339 goto adjust_cab_queue
;
2342 q_base
= find_first_zero_bit(iter_data
.hw_queue_map
,
2343 WLCORE_NUM_MAC_ADDRESSES
);
2344 if (q_base
>= WLCORE_NUM_MAC_ADDRESSES
)
2347 wlvif
->hw_queue_base
= q_base
* NUM_TX_QUEUES
;
2348 wl1271_debug(DEBUG_MAC80211
, "allocating hw queue base: %d",
2349 wlvif
->hw_queue_base
);
2351 for (i
= 0; i
< NUM_TX_QUEUES
; i
++) {
2352 wl
->queue_stop_reasons
[wlvif
->hw_queue_base
+ i
] = 0;
2353 /* register hw queues in mac80211 */
2354 vif
->hw_queue
[i
] = wlvif
->hw_queue_base
+ i
;
2358 /* the last places are reserved for cab queues per interface */
2359 if (wlvif
->bss_type
== BSS_TYPE_AP_BSS
)
2360 vif
->cab_queue
= NUM_TX_QUEUES
* WLCORE_NUM_MAC_ADDRESSES
+
2361 wlvif
->hw_queue_base
/ NUM_TX_QUEUES
;
2363 vif
->cab_queue
= IEEE80211_INVAL_HW_QUEUE
;
2368 static int wl1271_op_add_interface(struct ieee80211_hw
*hw
,
2369 struct ieee80211_vif
*vif
)
2371 struct wl1271
*wl
= hw
->priv
;
2372 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
2373 struct vif_counter_data vif_count
;
2376 bool booted
= false;
2378 vif
->driver_flags
|= IEEE80211_VIF_BEACON_FILTER
|
2379 IEEE80211_VIF_SUPPORTS_CQM_RSSI
;
2381 wl1271_debug(DEBUG_MAC80211
, "mac80211 add interface type %d mac %pM",
2382 ieee80211_vif_type_p2p(vif
), vif
->addr
);
2384 wl12xx_get_vif_count(hw
, vif
, &vif_count
);
2386 mutex_lock(&wl
->mutex
);
2387 ret
= wl1271_ps_elp_wakeup(wl
);
2392 * in some very corner case HW recovery scenarios its possible to
2393 * get here before __wl1271_op_remove_interface is complete, so
2394 * opt out if that is the case.
2396 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
) ||
2397 test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
)) {
2403 ret
= wl12xx_init_vif_data(wl
, vif
);
2408 role_type
= wl12xx_get_role_type(wl
, wlvif
);
2409 if (role_type
== WL12XX_INVALID_ROLE_TYPE
) {
2414 ret
= wlcore_allocate_hw_queue_base(wl
, wlvif
);
2418 if (wl12xx_need_fw_change(wl
, vif_count
, true)) {
2419 wl12xx_force_active_psm(wl
);
2420 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
);
2421 mutex_unlock(&wl
->mutex
);
2422 wl1271_recovery_work(&wl
->recovery_work
);
2427 * TODO: after the nvs issue will be solved, move this block
2428 * to start(), and make sure here the driver is ON.
2430 if (wl
->state
== WLCORE_STATE_OFF
) {
2432 * we still need this in order to configure the fw
2433 * while uploading the nvs
2435 memcpy(wl
->addresses
[0].addr
, vif
->addr
, ETH_ALEN
);
2437 booted
= wl12xx_init_fw(wl
);
2444 ret
= wl12xx_cmd_role_enable(wl
, vif
->addr
,
2445 role_type
, &wlvif
->role_id
);
2449 ret
= wl1271_init_vif_specific(wl
, vif
);
2453 list_add(&wlvif
->list
, &wl
->wlvif_list
);
2454 set_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
);
2456 if (wlvif
->bss_type
== BSS_TYPE_AP_BSS
)
2461 wl1271_ps_elp_sleep(wl
);
2463 mutex_unlock(&wl
->mutex
);
2468 static void __wl1271_op_remove_interface(struct wl1271
*wl
,
2469 struct ieee80211_vif
*vif
,
2470 bool reset_tx_queues
)
2472 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
2474 bool is_ap
= (wlvif
->bss_type
== BSS_TYPE_AP_BSS
);
2476 wl1271_debug(DEBUG_MAC80211
, "mac80211 remove interface");
2478 if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
))
2481 /* because of hardware recovery, we may get here twice */
2482 if (wl
->state
== WLCORE_STATE_OFF
)
2485 wl1271_info("down");
2487 if (wl
->scan
.state
!= WL1271_SCAN_STATE_IDLE
&&
2488 wl
->scan_wlvif
== wlvif
) {
2490 * Rearm the tx watchdog just before idling scan. This
2491 * prevents just-finished scans from triggering the watchdog
2493 wl12xx_rearm_tx_watchdog_locked(wl
);
2495 wl
->scan
.state
= WL1271_SCAN_STATE_IDLE
;
2496 memset(wl
->scan
.scanned_ch
, 0, sizeof(wl
->scan
.scanned_ch
));
2497 wl
->scan_wlvif
= NULL
;
2498 wl
->scan
.req
= NULL
;
2499 ieee80211_scan_completed(wl
->hw
, true);
2502 if (wl
->sched_vif
== wlvif
) {
2503 ieee80211_sched_scan_stopped(wl
->hw
);
2504 wl
->sched_vif
= NULL
;
2507 if (wl
->roc_vif
== vif
) {
2509 ieee80211_remain_on_channel_expired(wl
->hw
);
2512 if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
)) {
2513 /* disable active roles */
2514 ret
= wl1271_ps_elp_wakeup(wl
);
2518 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
||
2519 wlvif
->bss_type
== BSS_TYPE_IBSS
) {
2520 if (wl12xx_dev_role_started(wlvif
))
2521 wl12xx_stop_dev(wl
, wlvif
);
2524 ret
= wl12xx_cmd_role_disable(wl
, &wlvif
->role_id
);
2528 wl1271_ps_elp_sleep(wl
);
2531 /* clear all hlids (except system_hlid) */
2532 wlvif
->dev_hlid
= WL12XX_INVALID_LINK_ID
;
2534 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
||
2535 wlvif
->bss_type
== BSS_TYPE_IBSS
) {
2536 wlvif
->sta
.hlid
= WL12XX_INVALID_LINK_ID
;
2537 wl12xx_free_rate_policy(wl
, &wlvif
->sta
.basic_rate_idx
);
2538 wl12xx_free_rate_policy(wl
, &wlvif
->sta
.ap_rate_idx
);
2539 wl12xx_free_rate_policy(wl
, &wlvif
->sta
.p2p_rate_idx
);
2540 wlcore_free_klv_template(wl
, &wlvif
->sta
.klv_template_id
);
2542 wlvif
->ap
.bcast_hlid
= WL12XX_INVALID_LINK_ID
;
2543 wlvif
->ap
.global_hlid
= WL12XX_INVALID_LINK_ID
;
2544 wl12xx_free_rate_policy(wl
, &wlvif
->ap
.mgmt_rate_idx
);
2545 wl12xx_free_rate_policy(wl
, &wlvif
->ap
.bcast_rate_idx
);
2546 for (i
= 0; i
< CONF_TX_MAX_AC_COUNT
; i
++)
2547 wl12xx_free_rate_policy(wl
,
2548 &wlvif
->ap
.ucast_rate_idx
[i
]);
2549 wl1271_free_ap_keys(wl
, wlvif
);
2552 dev_kfree_skb(wlvif
->probereq
);
2553 wlvif
->probereq
= NULL
;
2554 wl12xx_tx_reset_wlvif(wl
, wlvif
);
2555 if (wl
->last_wlvif
== wlvif
)
2556 wl
->last_wlvif
= NULL
;
2557 list_del(&wlvif
->list
);
2558 memset(wlvif
->ap
.sta_hlid_map
, 0, sizeof(wlvif
->ap
.sta_hlid_map
));
2559 wlvif
->role_id
= WL12XX_INVALID_ROLE_ID
;
2560 wlvif
->dev_role_id
= WL12XX_INVALID_ROLE_ID
;
2568 * Last AP, have more stations. Configure sleep auth according to STA.
2569 * Don't do thin on unintended recovery.
2571 if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS
, &wl
->flags
) &&
2572 !test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
))
2575 if (wl
->ap_count
== 0 && is_ap
&& wl
->sta_count
) {
2576 u8 sta_auth
= wl
->conf
.conn
.sta_sleep_auth
;
2577 /* Configure for power according to debugfs */
2578 if (sta_auth
!= WL1271_PSM_ILLEGAL
)
2579 wl1271_acx_sleep_auth(wl
, sta_auth
);
2580 /* Configure for ELP power saving */
2582 wl1271_acx_sleep_auth(wl
, WL1271_PSM_ELP
);
2586 mutex_unlock(&wl
->mutex
);
2588 del_timer_sync(&wlvif
->rx_streaming_timer
);
2589 cancel_work_sync(&wlvif
->rx_streaming_enable_work
);
2590 cancel_work_sync(&wlvif
->rx_streaming_disable_work
);
2591 cancel_delayed_work_sync(&wlvif
->connection_loss_work
);
2593 mutex_lock(&wl
->mutex
);
2596 static void wl1271_op_remove_interface(struct ieee80211_hw
*hw
,
2597 struct ieee80211_vif
*vif
)
2599 struct wl1271
*wl
= hw
->priv
;
2600 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
2601 struct wl12xx_vif
*iter
;
2602 struct vif_counter_data vif_count
;
2604 wl12xx_get_vif_count(hw
, vif
, &vif_count
);
2605 mutex_lock(&wl
->mutex
);
2607 if (wl
->state
== WLCORE_STATE_OFF
||
2608 !test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
))
2612 * wl->vif can be null here if someone shuts down the interface
2613 * just when hardware recovery has been started.
2615 wl12xx_for_each_wlvif(wl
, iter
) {
2619 __wl1271_op_remove_interface(wl
, vif
, true);
2622 WARN_ON(iter
!= wlvif
);
2623 if (wl12xx_need_fw_change(wl
, vif_count
, false)) {
2624 wl12xx_force_active_psm(wl
);
2625 set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY
, &wl
->flags
);
2626 wl12xx_queue_recovery_work(wl
);
2629 mutex_unlock(&wl
->mutex
);
2632 static int wl12xx_op_change_interface(struct ieee80211_hw
*hw
,
2633 struct ieee80211_vif
*vif
,
2634 enum nl80211_iftype new_type
, bool p2p
)
2636 struct wl1271
*wl
= hw
->priv
;
2639 set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS
, &wl
->flags
);
2640 wl1271_op_remove_interface(hw
, vif
);
2642 vif
->type
= new_type
;
2644 ret
= wl1271_op_add_interface(hw
, vif
);
2646 clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS
, &wl
->flags
);
2650 static int wlcore_join(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
2653 bool is_ibss
= (wlvif
->bss_type
== BSS_TYPE_IBSS
);
2656 * One of the side effects of the JOIN command is that is clears
2657 * WPA/WPA2 keys from the chipset. Performing a JOIN while associated
2658 * to a WPA/WPA2 access point will therefore kill the data-path.
2659 * Currently the only valid scenario for JOIN during association
2660 * is on roaming, in which case we will also be given new keys.
2661 * Keep the below message for now, unless it starts bothering
2662 * users who really like to roam a lot :)
2664 if (test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
2665 wl1271_info("JOIN while associated.");
2667 /* clear encryption type */
2668 wlvif
->encryption_type
= KEY_NONE
;
2671 ret
= wl12xx_cmd_role_start_ibss(wl
, wlvif
);
2673 if (wl
->quirks
& WLCORE_QUIRK_START_STA_FAILS
) {
2675 * TODO: this is an ugly workaround for wl12xx fw
2676 * bug - we are not able to tx/rx after the first
2677 * start_sta, so make dummy start+stop calls,
2678 * and then call start_sta again.
2679 * this should be fixed in the fw.
2681 wl12xx_cmd_role_start_sta(wl
, wlvif
);
2682 wl12xx_cmd_role_stop_sta(wl
, wlvif
);
2685 ret
= wl12xx_cmd_role_start_sta(wl
, wlvif
);
2691 static int wl1271_ssid_set(struct wl12xx_vif
*wlvif
, struct sk_buff
*skb
,
2695 const u8
*ptr
= cfg80211_find_ie(WLAN_EID_SSID
, skb
->data
+ offset
,
2699 wl1271_error("No SSID in IEs!");
2704 if (ssid_len
> IEEE80211_MAX_SSID_LEN
) {
2705 wl1271_error("SSID is too long!");
2709 wlvif
->ssid_len
= ssid_len
;
2710 memcpy(wlvif
->ssid
, ptr
+2, ssid_len
);
2714 static int wlcore_set_ssid(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
2716 struct ieee80211_vif
*vif
= wl12xx_wlvif_to_vif(wlvif
);
2717 struct sk_buff
*skb
;
2720 /* we currently only support setting the ssid from the ap probe req */
2721 if (wlvif
->bss_type
!= BSS_TYPE_STA_BSS
)
2724 skb
= ieee80211_ap_probereq_get(wl
->hw
, vif
);
2728 ieoffset
= offsetof(struct ieee80211_mgmt
,
2729 u
.probe_req
.variable
);
2730 wl1271_ssid_set(wlvif
, skb
, ieoffset
);
2736 static int wlcore_set_assoc(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
2737 struct ieee80211_bss_conf
*bss_conf
,
2743 wlvif
->aid
= bss_conf
->aid
;
2744 wlvif
->channel_type
= cfg80211_get_chandef_type(&bss_conf
->chandef
);
2745 wlvif
->beacon_int
= bss_conf
->beacon_int
;
2746 wlvif
->wmm_enabled
= bss_conf
->qos
;
2748 set_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
);
2751 * with wl1271, we don't need to update the
2752 * beacon_int and dtim_period, because the firmware
2753 * updates it by itself when the first beacon is
2754 * received after a join.
2756 ret
= wl1271_cmd_build_ps_poll(wl
, wlvif
, wlvif
->aid
);
2761 * Get a template for hardware connection maintenance
2763 dev_kfree_skb(wlvif
->probereq
);
2764 wlvif
->probereq
= wl1271_cmd_build_ap_probe_req(wl
,
2767 ieoffset
= offsetof(struct ieee80211_mgmt
,
2768 u
.probe_req
.variable
);
2769 wl1271_ssid_set(wlvif
, wlvif
->probereq
, ieoffset
);
2771 /* enable the connection monitoring feature */
2772 ret
= wl1271_acx_conn_monit_params(wl
, wlvif
, true);
2777 * The join command disable the keep-alive mode, shut down its process,
2778 * and also clear the template config, so we need to reset it all after
2779 * the join. The acx_aid starts the keep-alive process, and the order
2780 * of the commands below is relevant.
2782 ret
= wl1271_acx_keep_alive_mode(wl
, wlvif
, true);
2786 ret
= wl1271_acx_aid(wl
, wlvif
, wlvif
->aid
);
2790 ret
= wl12xx_cmd_build_klv_null_data(wl
, wlvif
);
2794 ret
= wl1271_acx_keep_alive_config(wl
, wlvif
,
2795 wlvif
->sta
.klv_template_id
,
2796 ACX_KEEP_ALIVE_TPL_VALID
);
2801 * The default fw psm configuration is AUTO, while mac80211 default
2802 * setting is off (ACTIVE), so sync the fw with the correct value.
2804 ret
= wl1271_ps_set_mode(wl
, wlvif
, STATION_ACTIVE_MODE
);
2810 wl1271_tx_enabled_rates_get(wl
,
2813 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
2821 static int wlcore_unset_assoc(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
2824 bool sta
= wlvif
->bss_type
== BSS_TYPE_STA_BSS
;
2826 /* make sure we are connected (sta) joined */
2828 !test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
))
2831 /* make sure we are joined (ibss) */
2833 test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED
, &wlvif
->flags
))
2837 /* use defaults when not associated */
2840 /* free probe-request template */
2841 dev_kfree_skb(wlvif
->probereq
);
2842 wlvif
->probereq
= NULL
;
2844 /* disable connection monitor features */
2845 ret
= wl1271_acx_conn_monit_params(wl
, wlvif
, false);
2849 /* Disable the keep-alive feature */
2850 ret
= wl1271_acx_keep_alive_mode(wl
, wlvif
, false);
2855 if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS
, &wlvif
->flags
)) {
2856 struct ieee80211_vif
*vif
= wl12xx_wlvif_to_vif(wlvif
);
2858 wl12xx_cmd_stop_channel_switch(wl
, wlvif
);
2859 ieee80211_chswitch_done(vif
, false);
2860 cancel_delayed_work(&wlvif
->channel_switch_work
);
2863 /* invalidate keep-alive template */
2864 wl1271_acx_keep_alive_config(wl
, wlvif
,
2865 wlvif
->sta
.klv_template_id
,
2866 ACX_KEEP_ALIVE_TPL_INVALID
);
2868 /* reset TX security counters on a clean disconnect */
2869 wlvif
->tx_security_last_seq_lsb
= 0;
2870 wlvif
->tx_security_seq
= 0;
2875 static void wl1271_set_band_rate(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
2877 wlvif
->basic_rate_set
= wlvif
->bitrate_masks
[wlvif
->band
];
2878 wlvif
->rate_set
= wlvif
->basic_rate_set
;
2881 static int wl12xx_config_vif(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
2882 struct ieee80211_conf
*conf
, u32 changed
)
2886 if (conf
->power_level
!= wlvif
->power_level
) {
2887 ret
= wl1271_acx_tx_power(wl
, wlvif
, conf
->power_level
);
2891 wlvif
->power_level
= conf
->power_level
;
2897 static int wl1271_op_config(struct ieee80211_hw
*hw
, u32 changed
)
2899 struct wl1271
*wl
= hw
->priv
;
2900 struct wl12xx_vif
*wlvif
;
2901 struct ieee80211_conf
*conf
= &hw
->conf
;
2904 wl1271_debug(DEBUG_MAC80211
, "mac80211 config psm %s power %d %s"
2906 conf
->flags
& IEEE80211_CONF_PS
? "on" : "off",
2908 conf
->flags
& IEEE80211_CONF_IDLE
? "idle" : "in use",
2911 mutex_lock(&wl
->mutex
);
2913 if (changed
& IEEE80211_CONF_CHANGE_POWER
)
2914 wl
->power_level
= conf
->power_level
;
2916 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
2919 ret
= wl1271_ps_elp_wakeup(wl
);
2923 /* configure each interface */
2924 wl12xx_for_each_wlvif(wl
, wlvif
) {
2925 ret
= wl12xx_config_vif(wl
, wlvif
, conf
, changed
);
2931 wl1271_ps_elp_sleep(wl
);
2934 mutex_unlock(&wl
->mutex
);
2939 struct wl1271_filter_params
{
2942 u8 mc_list
[ACX_MC_ADDRESS_GROUP_MAX
][ETH_ALEN
];
2945 static u64
wl1271_op_prepare_multicast(struct ieee80211_hw
*hw
,
2946 struct netdev_hw_addr_list
*mc_list
)
2948 struct wl1271_filter_params
*fp
;
2949 struct netdev_hw_addr
*ha
;
2951 fp
= kzalloc(sizeof(*fp
), GFP_ATOMIC
);
2953 wl1271_error("Out of memory setting filters.");
2957 /* update multicast filtering parameters */
2958 fp
->mc_list_length
= 0;
2959 if (netdev_hw_addr_list_count(mc_list
) > ACX_MC_ADDRESS_GROUP_MAX
) {
2960 fp
->enabled
= false;
2963 netdev_hw_addr_list_for_each(ha
, mc_list
) {
2964 memcpy(fp
->mc_list
[fp
->mc_list_length
],
2965 ha
->addr
, ETH_ALEN
);
2966 fp
->mc_list_length
++;
2970 return (u64
)(unsigned long)fp
;
2973 #define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
2976 FIF_BCN_PRBRESP_PROMISC | \
2980 static void wl1271_op_configure_filter(struct ieee80211_hw
*hw
,
2981 unsigned int changed
,
2982 unsigned int *total
, u64 multicast
)
2984 struct wl1271_filter_params
*fp
= (void *)(unsigned long)multicast
;
2985 struct wl1271
*wl
= hw
->priv
;
2986 struct wl12xx_vif
*wlvif
;
2990 wl1271_debug(DEBUG_MAC80211
, "mac80211 configure filter changed %x"
2991 " total %x", changed
, *total
);
2993 mutex_lock(&wl
->mutex
);
2995 *total
&= WL1271_SUPPORTED_FILTERS
;
2996 changed
&= WL1271_SUPPORTED_FILTERS
;
2998 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
3001 ret
= wl1271_ps_elp_wakeup(wl
);
3005 wl12xx_for_each_wlvif(wl
, wlvif
) {
3006 if (wlvif
->bss_type
!= BSS_TYPE_AP_BSS
) {
3007 if (*total
& FIF_ALLMULTI
)
3008 ret
= wl1271_acx_group_address_tbl(wl
, wlvif
,
3012 ret
= wl1271_acx_group_address_tbl(wl
, wlvif
,
3015 fp
->mc_list_length
);
3022 * the fw doesn't provide an api to configure the filters. instead,
3023 * the filters configuration is based on the active roles / ROC
3028 wl1271_ps_elp_sleep(wl
);
3031 mutex_unlock(&wl
->mutex
);
3035 static int wl1271_record_ap_key(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
3036 u8 id
, u8 key_type
, u8 key_size
,
3037 const u8
*key
, u8 hlid
, u32 tx_seq_32
,
3040 struct wl1271_ap_key
*ap_key
;
3043 wl1271_debug(DEBUG_CRYPT
, "record ap key id %d", (int)id
);
3045 if (key_size
> MAX_KEY_SIZE
)
3049 * Find next free entry in ap_keys. Also check we are not replacing
3052 for (i
= 0; i
< MAX_NUM_KEYS
; i
++) {
3053 if (wlvif
->ap
.recorded_keys
[i
] == NULL
)
3056 if (wlvif
->ap
.recorded_keys
[i
]->id
== id
) {
3057 wl1271_warning("trying to record key replacement");
3062 if (i
== MAX_NUM_KEYS
)
3065 ap_key
= kzalloc(sizeof(*ap_key
), GFP_KERNEL
);
3070 ap_key
->key_type
= key_type
;
3071 ap_key
->key_size
= key_size
;
3072 memcpy(ap_key
->key
, key
, key_size
);
3073 ap_key
->hlid
= hlid
;
3074 ap_key
->tx_seq_32
= tx_seq_32
;
3075 ap_key
->tx_seq_16
= tx_seq_16
;
3077 wlvif
->ap
.recorded_keys
[i
] = ap_key
;
3081 static void wl1271_free_ap_keys(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
3085 for (i
= 0; i
< MAX_NUM_KEYS
; i
++) {
3086 kfree(wlvif
->ap
.recorded_keys
[i
]);
3087 wlvif
->ap
.recorded_keys
[i
] = NULL
;
3091 static int wl1271_ap_init_hwenc(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
3094 struct wl1271_ap_key
*key
;
3095 bool wep_key_added
= false;
3097 for (i
= 0; i
< MAX_NUM_KEYS
; i
++) {
3099 if (wlvif
->ap
.recorded_keys
[i
] == NULL
)
3102 key
= wlvif
->ap
.recorded_keys
[i
];
3104 if (hlid
== WL12XX_INVALID_LINK_ID
)
3105 hlid
= wlvif
->ap
.bcast_hlid
;
3107 ret
= wl1271_cmd_set_ap_key(wl
, wlvif
, KEY_ADD_OR_REPLACE
,
3108 key
->id
, key
->key_type
,
3109 key
->key_size
, key
->key
,
3110 hlid
, key
->tx_seq_32
,
3115 if (key
->key_type
== KEY_WEP
)
3116 wep_key_added
= true;
3119 if (wep_key_added
) {
3120 ret
= wl12xx_cmd_set_default_wep_key(wl
, wlvif
->default_key
,
3121 wlvif
->ap
.bcast_hlid
);
3127 wl1271_free_ap_keys(wl
, wlvif
);
3131 static int wl1271_set_key(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
3132 u16 action
, u8 id
, u8 key_type
,
3133 u8 key_size
, const u8
*key
, u32 tx_seq_32
,
3134 u16 tx_seq_16
, struct ieee80211_sta
*sta
)
3137 bool is_ap
= (wlvif
->bss_type
== BSS_TYPE_AP_BSS
);
3140 struct wl1271_station
*wl_sta
;
3144 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
3145 hlid
= wl_sta
->hlid
;
3147 hlid
= wlvif
->ap
.bcast_hlid
;
3150 if (!test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
)) {
3152 * We do not support removing keys after AP shutdown.
3153 * Pretend we do to make mac80211 happy.
3155 if (action
!= KEY_ADD_OR_REPLACE
)
3158 ret
= wl1271_record_ap_key(wl
, wlvif
, id
,
3160 key
, hlid
, tx_seq_32
,
3163 ret
= wl1271_cmd_set_ap_key(wl
, wlvif
, action
,
3164 id
, key_type
, key_size
,
3165 key
, hlid
, tx_seq_32
,
3173 static const u8 bcast_addr
[ETH_ALEN
] = {
3174 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
3177 addr
= sta
? sta
->addr
: bcast_addr
;
3179 if (is_zero_ether_addr(addr
)) {
3180 /* We dont support TX only encryption */
3184 /* The wl1271 does not allow to remove unicast keys - they
3185 will be cleared automatically on next CMD_JOIN. Ignore the
3186 request silently, as we dont want the mac80211 to emit
3187 an error message. */
3188 if (action
== KEY_REMOVE
&& !is_broadcast_ether_addr(addr
))
3191 /* don't remove key if hlid was already deleted */
3192 if (action
== KEY_REMOVE
&&
3193 wlvif
->sta
.hlid
== WL12XX_INVALID_LINK_ID
)
3196 ret
= wl1271_cmd_set_sta_key(wl
, wlvif
, action
,
3197 id
, key_type
, key_size
,
3198 key
, addr
, tx_seq_32
,
3203 /* the default WEP key needs to be configured at least once */
3204 if (key_type
== KEY_WEP
) {
3205 ret
= wl12xx_cmd_set_default_wep_key(wl
,
3216 static int wlcore_op_set_key(struct ieee80211_hw
*hw
, enum set_key_cmd cmd
,
3217 struct ieee80211_vif
*vif
,
3218 struct ieee80211_sta
*sta
,
3219 struct ieee80211_key_conf
*key_conf
)
3221 struct wl1271
*wl
= hw
->priv
;
3223 bool might_change_spare
=
3224 key_conf
->cipher
== WL1271_CIPHER_SUITE_GEM
||
3225 key_conf
->cipher
== WLAN_CIPHER_SUITE_TKIP
;
3227 if (might_change_spare
) {
3229 * stop the queues and flush to ensure the next packets are
3230 * in sync with FW spare block accounting
3232 wlcore_stop_queues(wl
, WLCORE_QUEUE_STOP_REASON_SPARE_BLK
);
3233 wl1271_tx_flush(wl
);
3236 mutex_lock(&wl
->mutex
);
3238 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
3240 goto out_wake_queues
;
3243 ret
= wl1271_ps_elp_wakeup(wl
);
3245 goto out_wake_queues
;
3247 ret
= wlcore_hw_set_key(wl
, cmd
, vif
, sta
, key_conf
);
3249 wl1271_ps_elp_sleep(wl
);
3252 if (might_change_spare
)
3253 wlcore_wake_queues(wl
, WLCORE_QUEUE_STOP_REASON_SPARE_BLK
);
3255 mutex_unlock(&wl
->mutex
);
3260 int wlcore_set_key(struct wl1271
*wl
, enum set_key_cmd cmd
,
3261 struct ieee80211_vif
*vif
,
3262 struct ieee80211_sta
*sta
,
3263 struct ieee80211_key_conf
*key_conf
)
3265 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3271 wl1271_debug(DEBUG_MAC80211
, "mac80211 set key");
3273 wl1271_debug(DEBUG_CRYPT
, "CMD: 0x%x sta: %p", cmd
, sta
);
3274 wl1271_debug(DEBUG_CRYPT
, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
3275 key_conf
->cipher
, key_conf
->keyidx
,
3276 key_conf
->keylen
, key_conf
->flags
);
3277 wl1271_dump(DEBUG_CRYPT
, "KEY: ", key_conf
->key
, key_conf
->keylen
);
3279 switch (key_conf
->cipher
) {
3280 case WLAN_CIPHER_SUITE_WEP40
:
3281 case WLAN_CIPHER_SUITE_WEP104
:
3284 key_conf
->hw_key_idx
= key_conf
->keyidx
;
3286 case WLAN_CIPHER_SUITE_TKIP
:
3287 key_type
= KEY_TKIP
;
3289 key_conf
->hw_key_idx
= key_conf
->keyidx
;
3290 tx_seq_32
= WL1271_TX_SECURITY_HI32(wlvif
->tx_security_seq
);
3291 tx_seq_16
= WL1271_TX_SECURITY_LO16(wlvif
->tx_security_seq
);
3293 case WLAN_CIPHER_SUITE_CCMP
:
3296 key_conf
->flags
|= IEEE80211_KEY_FLAG_PUT_IV_SPACE
;
3297 tx_seq_32
= WL1271_TX_SECURITY_HI32(wlvif
->tx_security_seq
);
3298 tx_seq_16
= WL1271_TX_SECURITY_LO16(wlvif
->tx_security_seq
);
3300 case WL1271_CIPHER_SUITE_GEM
:
3302 tx_seq_32
= WL1271_TX_SECURITY_HI32(wlvif
->tx_security_seq
);
3303 tx_seq_16
= WL1271_TX_SECURITY_LO16(wlvif
->tx_security_seq
);
3306 wl1271_error("Unknown key algo 0x%x", key_conf
->cipher
);
3313 ret
= wl1271_set_key(wl
, wlvif
, KEY_ADD_OR_REPLACE
,
3314 key_conf
->keyidx
, key_type
,
3315 key_conf
->keylen
, key_conf
->key
,
3316 tx_seq_32
, tx_seq_16
, sta
);
3318 wl1271_error("Could not add or replace key");
3323 * reconfiguring arp response if the unicast (or common)
3324 * encryption key type was changed
3326 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
&&
3327 (sta
|| key_type
== KEY_WEP
) &&
3328 wlvif
->encryption_type
!= key_type
) {
3329 wlvif
->encryption_type
= key_type
;
3330 ret
= wl1271_cmd_build_arp_rsp(wl
, wlvif
);
3332 wl1271_warning("build arp rsp failed: %d", ret
);
3339 ret
= wl1271_set_key(wl
, wlvif
, KEY_REMOVE
,
3340 key_conf
->keyidx
, key_type
,
3341 key_conf
->keylen
, key_conf
->key
,
3344 wl1271_error("Could not remove key");
3350 wl1271_error("Unsupported key cmd 0x%x", cmd
);
3356 EXPORT_SYMBOL_GPL(wlcore_set_key
);
3358 void wlcore_regdomain_config(struct wl1271
*wl
)
3362 if (!(wl
->quirks
& WLCORE_QUIRK_REGDOMAIN_CONF
))
3365 mutex_lock(&wl
->mutex
);
3366 ret
= wl1271_ps_elp_wakeup(wl
);
3370 ret
= wlcore_cmd_regdomain_config_locked(wl
);
3372 wl12xx_queue_recovery_work(wl
);
3376 wl1271_ps_elp_sleep(wl
);
3378 mutex_unlock(&wl
->mutex
);
3381 static int wl1271_op_hw_scan(struct ieee80211_hw
*hw
,
3382 struct ieee80211_vif
*vif
,
3383 struct cfg80211_scan_request
*req
)
3385 struct wl1271
*wl
= hw
->priv
;
3390 wl1271_debug(DEBUG_MAC80211
, "mac80211 hw scan");
3393 ssid
= req
->ssids
[0].ssid
;
3394 len
= req
->ssids
[0].ssid_len
;
3397 mutex_lock(&wl
->mutex
);
3399 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
3401 * We cannot return -EBUSY here because cfg80211 will expect
3402 * a call to ieee80211_scan_completed if we do - in this case
3403 * there won't be any call.
3409 ret
= wl1271_ps_elp_wakeup(wl
);
3413 /* fail if there is any role in ROC */
3414 if (find_first_bit(wl
->roc_map
, WL12XX_MAX_ROLES
) < WL12XX_MAX_ROLES
) {
3415 /* don't allow scanning right now */
3420 ret
= wlcore_scan(hw
->priv
, vif
, ssid
, len
, req
);
3422 wl1271_ps_elp_sleep(wl
);
3424 mutex_unlock(&wl
->mutex
);
3429 static void wl1271_op_cancel_hw_scan(struct ieee80211_hw
*hw
,
3430 struct ieee80211_vif
*vif
)
3432 struct wl1271
*wl
= hw
->priv
;
3433 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3436 wl1271_debug(DEBUG_MAC80211
, "mac80211 cancel hw scan");
3438 mutex_lock(&wl
->mutex
);
3440 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
3443 if (wl
->scan
.state
== WL1271_SCAN_STATE_IDLE
)
3446 ret
= wl1271_ps_elp_wakeup(wl
);
3450 if (wl
->scan
.state
!= WL1271_SCAN_STATE_DONE
) {
3451 ret
= wl
->ops
->scan_stop(wl
, wlvif
);
3457 * Rearm the tx watchdog just before idling scan. This
3458 * prevents just-finished scans from triggering the watchdog
3460 wl12xx_rearm_tx_watchdog_locked(wl
);
3462 wl
->scan
.state
= WL1271_SCAN_STATE_IDLE
;
3463 memset(wl
->scan
.scanned_ch
, 0, sizeof(wl
->scan
.scanned_ch
));
3464 wl
->scan_wlvif
= NULL
;
3465 wl
->scan
.req
= NULL
;
3466 ieee80211_scan_completed(wl
->hw
, true);
3469 wl1271_ps_elp_sleep(wl
);
3471 mutex_unlock(&wl
->mutex
);
3473 cancel_delayed_work_sync(&wl
->scan_complete_work
);
3476 static int wl1271_op_sched_scan_start(struct ieee80211_hw
*hw
,
3477 struct ieee80211_vif
*vif
,
3478 struct cfg80211_sched_scan_request
*req
,
3479 struct ieee80211_sched_scan_ies
*ies
)
3481 struct wl1271
*wl
= hw
->priv
;
3482 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3485 wl1271_debug(DEBUG_MAC80211
, "wl1271_op_sched_scan_start");
3487 mutex_lock(&wl
->mutex
);
3489 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
3494 ret
= wl1271_ps_elp_wakeup(wl
);
3498 ret
= wl
->ops
->sched_scan_start(wl
, wlvif
, req
, ies
);
3502 wl
->sched_vif
= wlvif
;
3505 wl1271_ps_elp_sleep(wl
);
3507 mutex_unlock(&wl
->mutex
);
3511 static void wl1271_op_sched_scan_stop(struct ieee80211_hw
*hw
,
3512 struct ieee80211_vif
*vif
)
3514 struct wl1271
*wl
= hw
->priv
;
3515 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3518 wl1271_debug(DEBUG_MAC80211
, "wl1271_op_sched_scan_stop");
3520 mutex_lock(&wl
->mutex
);
3522 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
3525 ret
= wl1271_ps_elp_wakeup(wl
);
3529 wl
->ops
->sched_scan_stop(wl
, wlvif
);
3531 wl1271_ps_elp_sleep(wl
);
3533 mutex_unlock(&wl
->mutex
);
3536 static int wl1271_op_set_frag_threshold(struct ieee80211_hw
*hw
, u32 value
)
3538 struct wl1271
*wl
= hw
->priv
;
3541 mutex_lock(&wl
->mutex
);
3543 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
3548 ret
= wl1271_ps_elp_wakeup(wl
);
3552 ret
= wl1271_acx_frag_threshold(wl
, value
);
3554 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret
);
3556 wl1271_ps_elp_sleep(wl
);
3559 mutex_unlock(&wl
->mutex
);
3564 static int wl1271_op_set_rts_threshold(struct ieee80211_hw
*hw
, u32 value
)
3566 struct wl1271
*wl
= hw
->priv
;
3567 struct wl12xx_vif
*wlvif
;
3570 mutex_lock(&wl
->mutex
);
3572 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
3577 ret
= wl1271_ps_elp_wakeup(wl
);
3581 wl12xx_for_each_wlvif(wl
, wlvif
) {
3582 ret
= wl1271_acx_rts_threshold(wl
, wlvif
, value
);
3584 wl1271_warning("set rts threshold failed: %d", ret
);
3586 wl1271_ps_elp_sleep(wl
);
3589 mutex_unlock(&wl
->mutex
);
3594 static void wl12xx_remove_ie(struct sk_buff
*skb
, u8 eid
, int ieoffset
)
3597 const u8
*next
, *end
= skb
->data
+ skb
->len
;
3598 u8
*ie
= (u8
*)cfg80211_find_ie(eid
, skb
->data
+ ieoffset
,
3599 skb
->len
- ieoffset
);
3604 memmove(ie
, next
, end
- next
);
3605 skb_trim(skb
, skb
->len
- len
);
3608 static void wl12xx_remove_vendor_ie(struct sk_buff
*skb
,
3609 unsigned int oui
, u8 oui_type
,
3613 const u8
*next
, *end
= skb
->data
+ skb
->len
;
3614 u8
*ie
= (u8
*)cfg80211_find_vendor_ie(oui
, oui_type
,
3615 skb
->data
+ ieoffset
,
3616 skb
->len
- ieoffset
);
3621 memmove(ie
, next
, end
- next
);
3622 skb_trim(skb
, skb
->len
- len
);
3625 static int wl1271_ap_set_probe_resp_tmpl(struct wl1271
*wl
, u32 rates
,
3626 struct ieee80211_vif
*vif
)
3628 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3629 struct sk_buff
*skb
;
3632 skb
= ieee80211_proberesp_get(wl
->hw
, vif
);
3636 ret
= wl1271_cmd_template_set(wl
, wlvif
->role_id
,
3637 CMD_TEMPL_AP_PROBE_RESPONSE
,
3646 wl1271_debug(DEBUG_AP
, "probe response updated");
3647 set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET
, &wlvif
->flags
);
3653 static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271
*wl
,
3654 struct ieee80211_vif
*vif
,
3656 size_t probe_rsp_len
,
3659 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3660 struct ieee80211_bss_conf
*bss_conf
= &vif
->bss_conf
;
3661 u8 probe_rsp_templ
[WL1271_CMD_TEMPL_MAX_SIZE
];
3662 int ssid_ie_offset
, ie_offset
, templ_len
;
3665 /* no need to change probe response if the SSID is set correctly */
3666 if (wlvif
->ssid_len
> 0)
3667 return wl1271_cmd_template_set(wl
, wlvif
->role_id
,
3668 CMD_TEMPL_AP_PROBE_RESPONSE
,
3673 if (probe_rsp_len
+ bss_conf
->ssid_len
> WL1271_CMD_TEMPL_MAX_SIZE
) {
3674 wl1271_error("probe_rsp template too big");
3678 /* start searching from IE offset */
3679 ie_offset
= offsetof(struct ieee80211_mgmt
, u
.probe_resp
.variable
);
3681 ptr
= cfg80211_find_ie(WLAN_EID_SSID
, probe_rsp_data
+ ie_offset
,
3682 probe_rsp_len
- ie_offset
);
3684 wl1271_error("No SSID in beacon!");
3688 ssid_ie_offset
= ptr
- probe_rsp_data
;
3689 ptr
+= (ptr
[1] + 2);
3691 memcpy(probe_rsp_templ
, probe_rsp_data
, ssid_ie_offset
);
3693 /* insert SSID from bss_conf */
3694 probe_rsp_templ
[ssid_ie_offset
] = WLAN_EID_SSID
;
3695 probe_rsp_templ
[ssid_ie_offset
+ 1] = bss_conf
->ssid_len
;
3696 memcpy(probe_rsp_templ
+ ssid_ie_offset
+ 2,
3697 bss_conf
->ssid
, bss_conf
->ssid_len
);
3698 templ_len
= ssid_ie_offset
+ 2 + bss_conf
->ssid_len
;
3700 memcpy(probe_rsp_templ
+ ssid_ie_offset
+ 2 + bss_conf
->ssid_len
,
3701 ptr
, probe_rsp_len
- (ptr
- probe_rsp_data
));
3702 templ_len
+= probe_rsp_len
- (ptr
- probe_rsp_data
);
3704 return wl1271_cmd_template_set(wl
, wlvif
->role_id
,
3705 CMD_TEMPL_AP_PROBE_RESPONSE
,
3711 static int wl1271_bss_erp_info_changed(struct wl1271
*wl
,
3712 struct ieee80211_vif
*vif
,
3713 struct ieee80211_bss_conf
*bss_conf
,
3716 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3719 if (changed
& BSS_CHANGED_ERP_SLOT
) {
3720 if (bss_conf
->use_short_slot
)
3721 ret
= wl1271_acx_slot(wl
, wlvif
, SLOT_TIME_SHORT
);
3723 ret
= wl1271_acx_slot(wl
, wlvif
, SLOT_TIME_LONG
);
3725 wl1271_warning("Set slot time failed %d", ret
);
3730 if (changed
& BSS_CHANGED_ERP_PREAMBLE
) {
3731 if (bss_conf
->use_short_preamble
)
3732 wl1271_acx_set_preamble(wl
, wlvif
, ACX_PREAMBLE_SHORT
);
3734 wl1271_acx_set_preamble(wl
, wlvif
, ACX_PREAMBLE_LONG
);
3737 if (changed
& BSS_CHANGED_ERP_CTS_PROT
) {
3738 if (bss_conf
->use_cts_prot
)
3739 ret
= wl1271_acx_cts_protect(wl
, wlvif
,
3742 ret
= wl1271_acx_cts_protect(wl
, wlvif
,
3743 CTSPROTECT_DISABLE
);
3745 wl1271_warning("Set ctsprotect failed %d", ret
);
3754 static int wlcore_set_beacon_template(struct wl1271
*wl
,
3755 struct ieee80211_vif
*vif
,
3758 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3759 struct ieee80211_hdr
*hdr
;
3762 int ieoffset
= offsetof(struct ieee80211_mgmt
,
3764 struct sk_buff
*beacon
= ieee80211_beacon_get(wl
->hw
, vif
);
3772 wl1271_debug(DEBUG_MASTER
, "beacon updated");
3774 ret
= wl1271_ssid_set(wlvif
, beacon
, ieoffset
);
3776 dev_kfree_skb(beacon
);
3779 min_rate
= wl1271_tx_min_rate_get(wl
, wlvif
->basic_rate_set
);
3780 tmpl_id
= is_ap
? CMD_TEMPL_AP_BEACON
:
3782 ret
= wl1271_cmd_template_set(wl
, wlvif
->role_id
, tmpl_id
,
3787 dev_kfree_skb(beacon
);
3791 wlvif
->wmm_enabled
=
3792 cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT
,
3793 WLAN_OUI_TYPE_MICROSOFT_WMM
,
3794 beacon
->data
+ ieoffset
,
3795 beacon
->len
- ieoffset
);
3798 * In case we already have a probe-resp beacon set explicitly
3799 * by usermode, don't use the beacon data.
3801 if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET
, &wlvif
->flags
))
3804 /* remove TIM ie from probe response */
3805 wl12xx_remove_ie(beacon
, WLAN_EID_TIM
, ieoffset
);
3808 * remove p2p ie from probe response.
3809 * the fw reponds to probe requests that don't include
3810 * the p2p ie. probe requests with p2p ie will be passed,
3811 * and will be responded by the supplicant (the spec
3812 * forbids including the p2p ie when responding to probe
3813 * requests that didn't include it).
3815 wl12xx_remove_vendor_ie(beacon
, WLAN_OUI_WFA
,
3816 WLAN_OUI_TYPE_WFA_P2P
, ieoffset
);
3818 hdr
= (struct ieee80211_hdr
*) beacon
->data
;
3819 hdr
->frame_control
= cpu_to_le16(IEEE80211_FTYPE_MGMT
|
3820 IEEE80211_STYPE_PROBE_RESP
);
3822 ret
= wl1271_ap_set_probe_resp_tmpl_legacy(wl
, vif
,
3827 ret
= wl1271_cmd_template_set(wl
, wlvif
->role_id
,
3828 CMD_TEMPL_PROBE_RESPONSE
,
3833 dev_kfree_skb(beacon
);
3841 static int wl1271_bss_beacon_info_changed(struct wl1271
*wl
,
3842 struct ieee80211_vif
*vif
,
3843 struct ieee80211_bss_conf
*bss_conf
,
3846 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3847 bool is_ap
= (wlvif
->bss_type
== BSS_TYPE_AP_BSS
);
3850 if (changed
& BSS_CHANGED_BEACON_INT
) {
3851 wl1271_debug(DEBUG_MASTER
, "beacon interval updated: %d",
3852 bss_conf
->beacon_int
);
3854 wlvif
->beacon_int
= bss_conf
->beacon_int
;
3857 if ((changed
& BSS_CHANGED_AP_PROBE_RESP
) && is_ap
) {
3858 u32 rate
= wl1271_tx_min_rate_get(wl
, wlvif
->basic_rate_set
);
3860 wl1271_ap_set_probe_resp_tmpl(wl
, rate
, vif
);
3863 if (changed
& BSS_CHANGED_BEACON
) {
3864 ret
= wlcore_set_beacon_template(wl
, vif
, is_ap
);
3871 wl1271_error("beacon info change failed: %d", ret
);
3875 /* AP mode changes */
3876 static void wl1271_bss_info_changed_ap(struct wl1271
*wl
,
3877 struct ieee80211_vif
*vif
,
3878 struct ieee80211_bss_conf
*bss_conf
,
3881 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
3884 if (changed
& BSS_CHANGED_BASIC_RATES
) {
3885 u32 rates
= bss_conf
->basic_rates
;
3887 wlvif
->basic_rate_set
= wl1271_tx_enabled_rates_get(wl
, rates
,
3889 wlvif
->basic_rate
= wl1271_tx_min_rate_get(wl
,
3890 wlvif
->basic_rate_set
);
3892 ret
= wl1271_init_ap_rates(wl
, wlvif
);
3894 wl1271_error("AP rate policy change failed %d", ret
);
3898 ret
= wl1271_ap_init_templates(wl
, vif
);
3902 ret
= wl1271_ap_set_probe_resp_tmpl(wl
, wlvif
->basic_rate
, vif
);
3906 ret
= wlcore_set_beacon_template(wl
, vif
, true);
3911 ret
= wl1271_bss_beacon_info_changed(wl
, vif
, bss_conf
, changed
);
3915 if (changed
& BSS_CHANGED_BEACON_ENABLED
) {
3916 if (bss_conf
->enable_beacon
) {
3917 if (!test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
)) {
3918 ret
= wl12xx_cmd_role_start_ap(wl
, wlvif
);
3922 ret
= wl1271_ap_init_hwenc(wl
, wlvif
);
3926 set_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
);
3927 wl1271_debug(DEBUG_AP
, "started AP");
3930 if (test_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
)) {
3931 ret
= wl12xx_cmd_role_stop_ap(wl
, wlvif
);
3935 clear_bit(WLVIF_FLAG_AP_STARTED
, &wlvif
->flags
);
3936 clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET
,
3938 wl1271_debug(DEBUG_AP
, "stopped AP");
3943 ret
= wl1271_bss_erp_info_changed(wl
, vif
, bss_conf
, changed
);
3947 /* Handle HT information change */
3948 if ((changed
& BSS_CHANGED_HT
) &&
3949 (bss_conf
->chandef
.width
!= NL80211_CHAN_WIDTH_20_NOHT
)) {
3950 ret
= wl1271_acx_set_ht_information(wl
, wlvif
,
3951 bss_conf
->ht_operation_mode
);
3953 wl1271_warning("Set ht information failed %d", ret
);
3962 static int wlcore_set_bssid(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
,
3963 struct ieee80211_bss_conf
*bss_conf
,
3969 wl1271_debug(DEBUG_MAC80211
,
3970 "changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
3971 bss_conf
->bssid
, bss_conf
->aid
,
3972 bss_conf
->beacon_int
,
3973 bss_conf
->basic_rates
, sta_rate_set
);
3975 wlvif
->beacon_int
= bss_conf
->beacon_int
;
3976 rates
= bss_conf
->basic_rates
;
3977 wlvif
->basic_rate_set
=
3978 wl1271_tx_enabled_rates_get(wl
, rates
,
3981 wl1271_tx_min_rate_get(wl
,
3982 wlvif
->basic_rate_set
);
3986 wl1271_tx_enabled_rates_get(wl
,
3990 /* we only support sched_scan while not connected */
3991 if (wl
->sched_vif
== wlvif
)
3992 wl
->ops
->sched_scan_stop(wl
, wlvif
);
3994 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
3998 ret
= wl12xx_cmd_build_null_data(wl
, wlvif
);
4002 ret
= wl1271_build_qos_null_data(wl
, wl12xx_wlvif_to_vif(wlvif
));
4006 wlcore_set_ssid(wl
, wlvif
);
4008 set_bit(WLVIF_FLAG_IN_USE
, &wlvif
->flags
);
4013 static int wlcore_clear_bssid(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
)
4017 /* revert back to minimum rates for the current band */
4018 wl1271_set_band_rate(wl
, wlvif
);
4019 wlvif
->basic_rate
= wl1271_tx_min_rate_get(wl
, wlvif
->basic_rate_set
);
4021 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
4025 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
&&
4026 test_bit(WLVIF_FLAG_IN_USE
, &wlvif
->flags
)) {
4027 ret
= wl12xx_cmd_role_stop_sta(wl
, wlvif
);
4032 clear_bit(WLVIF_FLAG_IN_USE
, &wlvif
->flags
);
4035 /* STA/IBSS mode changes */
4036 static void wl1271_bss_info_changed_sta(struct wl1271
*wl
,
4037 struct ieee80211_vif
*vif
,
4038 struct ieee80211_bss_conf
*bss_conf
,
4041 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4042 bool do_join
= false;
4043 bool is_ibss
= (wlvif
->bss_type
== BSS_TYPE_IBSS
);
4044 bool ibss_joined
= false;
4045 u32 sta_rate_set
= 0;
4047 struct ieee80211_sta
*sta
;
4048 bool sta_exists
= false;
4049 struct ieee80211_sta_ht_cap sta_ht_cap
;
4052 ret
= wl1271_bss_beacon_info_changed(wl
, vif
, bss_conf
,
4058 if (changed
& BSS_CHANGED_IBSS
) {
4059 if (bss_conf
->ibss_joined
) {
4060 set_bit(WLVIF_FLAG_IBSS_JOINED
, &wlvif
->flags
);
4063 wlcore_unset_assoc(wl
, wlvif
);
4064 wl12xx_cmd_role_stop_sta(wl
, wlvif
);
4068 if ((changed
& BSS_CHANGED_BEACON_INT
) && ibss_joined
)
4071 /* Need to update the SSID (for filtering etc) */
4072 if ((changed
& BSS_CHANGED_BEACON
) && ibss_joined
)
4075 if ((changed
& BSS_CHANGED_BEACON_ENABLED
) && ibss_joined
) {
4076 wl1271_debug(DEBUG_ADHOC
, "ad-hoc beaconing: %s",
4077 bss_conf
->enable_beacon
? "enabled" : "disabled");
4082 if (changed
& BSS_CHANGED_CQM
) {
4083 bool enable
= false;
4084 if (bss_conf
->cqm_rssi_thold
)
4086 ret
= wl1271_acx_rssi_snr_trigger(wl
, wlvif
, enable
,
4087 bss_conf
->cqm_rssi_thold
,
4088 bss_conf
->cqm_rssi_hyst
);
4091 wlvif
->rssi_thold
= bss_conf
->cqm_rssi_thold
;
4094 if (changed
& (BSS_CHANGED_BSSID
| BSS_CHANGED_HT
|
4095 BSS_CHANGED_ASSOC
)) {
4097 sta
= ieee80211_find_sta(vif
, bss_conf
->bssid
);
4099 u8
*rx_mask
= sta
->ht_cap
.mcs
.rx_mask
;
4101 /* save the supp_rates of the ap */
4102 sta_rate_set
= sta
->supp_rates
[wlvif
->band
];
4103 if (sta
->ht_cap
.ht_supported
)
4105 (rx_mask
[0] << HW_HT_RATES_OFFSET
) |
4106 (rx_mask
[1] << HW_MIMO_RATES_OFFSET
);
4107 sta_ht_cap
= sta
->ht_cap
;
4114 if (changed
& BSS_CHANGED_BSSID
) {
4115 if (!is_zero_ether_addr(bss_conf
->bssid
)) {
4116 ret
= wlcore_set_bssid(wl
, wlvif
, bss_conf
,
4121 /* Need to update the BSSID (for filtering etc) */
4124 ret
= wlcore_clear_bssid(wl
, wlvif
);
4130 if (changed
& BSS_CHANGED_IBSS
) {
4131 wl1271_debug(DEBUG_ADHOC
, "ibss_joined: %d",
4132 bss_conf
->ibss_joined
);
4134 if (bss_conf
->ibss_joined
) {
4135 u32 rates
= bss_conf
->basic_rates
;
4136 wlvif
->basic_rate_set
=
4137 wl1271_tx_enabled_rates_get(wl
, rates
,
4140 wl1271_tx_min_rate_get(wl
,
4141 wlvif
->basic_rate_set
);
4143 /* by default, use 11b + OFDM rates */
4144 wlvif
->rate_set
= CONF_TX_IBSS_DEFAULT_RATES
;
4145 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
4151 ret
= wl1271_bss_erp_info_changed(wl
, vif
, bss_conf
, changed
);
4156 ret
= wlcore_join(wl
, wlvif
);
4158 wl1271_warning("cmd join failed %d", ret
);
4163 if (changed
& BSS_CHANGED_ASSOC
) {
4164 if (bss_conf
->assoc
) {
4165 ret
= wlcore_set_assoc(wl
, wlvif
, bss_conf
,
4170 if (test_bit(WLVIF_FLAG_STA_AUTHORIZED
, &wlvif
->flags
))
4171 wl12xx_set_authorized(wl
, wlvif
);
4173 wlcore_unset_assoc(wl
, wlvif
);
4177 if (changed
& BSS_CHANGED_PS
) {
4178 if ((bss_conf
->ps
) &&
4179 test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
) &&
4180 !test_bit(WLVIF_FLAG_IN_PS
, &wlvif
->flags
)) {
4184 if (wl
->conf
.conn
.forced_ps
) {
4185 ps_mode
= STATION_POWER_SAVE_MODE
;
4186 ps_mode_str
= "forced";
4188 ps_mode
= STATION_AUTO_PS_MODE
;
4189 ps_mode_str
= "auto";
4192 wl1271_debug(DEBUG_PSM
, "%s ps enabled", ps_mode_str
);
4194 ret
= wl1271_ps_set_mode(wl
, wlvif
, ps_mode
);
4196 wl1271_warning("enter %s ps failed %d",
4198 } else if (!bss_conf
->ps
&&
4199 test_bit(WLVIF_FLAG_IN_PS
, &wlvif
->flags
)) {
4200 wl1271_debug(DEBUG_PSM
, "auto ps disabled");
4202 ret
= wl1271_ps_set_mode(wl
, wlvif
,
4203 STATION_ACTIVE_MODE
);
4205 wl1271_warning("exit auto ps failed %d", ret
);
4209 /* Handle new association with HT. Do this after join. */
4211 (changed
& BSS_CHANGED_HT
)) {
4213 bss_conf
->chandef
.width
!= NL80211_CHAN_WIDTH_20_NOHT
;
4215 ret
= wlcore_hw_set_peer_cap(wl
,
4221 wl1271_warning("Set ht cap failed %d", ret
);
4227 ret
= wl1271_acx_set_ht_information(wl
, wlvif
,
4228 bss_conf
->ht_operation_mode
);
4230 wl1271_warning("Set ht information failed %d",
4237 /* Handle arp filtering. Done after join. */
4238 if ((changed
& BSS_CHANGED_ARP_FILTER
) ||
4239 (!is_ibss
&& (changed
& BSS_CHANGED_QOS
))) {
4240 __be32 addr
= bss_conf
->arp_addr_list
[0];
4241 wlvif
->sta
.qos
= bss_conf
->qos
;
4242 WARN_ON(wlvif
->bss_type
!= BSS_TYPE_STA_BSS
);
4244 if (bss_conf
->arp_addr_cnt
== 1 &&
4245 bss_conf
->arp_filter_enabled
) {
4246 wlvif
->ip_addr
= addr
;
4248 * The template should have been configured only upon
4249 * association. however, it seems that the correct ip
4250 * isn't being set (when sending), so we have to
4251 * reconfigure the template upon every ip change.
4253 ret
= wl1271_cmd_build_arp_rsp(wl
, wlvif
);
4255 wl1271_warning("build arp rsp failed: %d", ret
);
4259 ret
= wl1271_acx_arp_ip_filter(wl
, wlvif
,
4260 (ACX_ARP_FILTER_ARP_FILTERING
|
4261 ACX_ARP_FILTER_AUTO_ARP
),
4265 ret
= wl1271_acx_arp_ip_filter(wl
, wlvif
, 0, addr
);
4276 static void wl1271_op_bss_info_changed(struct ieee80211_hw
*hw
,
4277 struct ieee80211_vif
*vif
,
4278 struct ieee80211_bss_conf
*bss_conf
,
4281 struct wl1271
*wl
= hw
->priv
;
4282 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4283 bool is_ap
= (wlvif
->bss_type
== BSS_TYPE_AP_BSS
);
4286 wl1271_debug(DEBUG_MAC80211
, "mac80211 bss info role %d changed 0x%x",
4287 wlvif
->role_id
, (int)changed
);
4290 * make sure to cancel pending disconnections if our association
4293 if (!is_ap
&& (changed
& BSS_CHANGED_ASSOC
))
4294 cancel_delayed_work_sync(&wlvif
->connection_loss_work
);
4296 if (is_ap
&& (changed
& BSS_CHANGED_BEACON_ENABLED
) &&
4297 !bss_conf
->enable_beacon
)
4298 wl1271_tx_flush(wl
);
4300 mutex_lock(&wl
->mutex
);
4302 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
4305 if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
)))
4308 ret
= wl1271_ps_elp_wakeup(wl
);
4313 wl1271_bss_info_changed_ap(wl
, vif
, bss_conf
, changed
);
4315 wl1271_bss_info_changed_sta(wl
, vif
, bss_conf
, changed
);
4317 wl1271_ps_elp_sleep(wl
);
4320 mutex_unlock(&wl
->mutex
);
4323 static int wlcore_op_add_chanctx(struct ieee80211_hw
*hw
,
4324 struct ieee80211_chanctx_conf
*ctx
)
4326 wl1271_debug(DEBUG_MAC80211
, "mac80211 add chanctx %d (type %d)",
4327 ieee80211_frequency_to_channel(ctx
->def
.chan
->center_freq
),
4328 cfg80211_get_chandef_type(&ctx
->def
));
4332 static void wlcore_op_remove_chanctx(struct ieee80211_hw
*hw
,
4333 struct ieee80211_chanctx_conf
*ctx
)
4335 wl1271_debug(DEBUG_MAC80211
, "mac80211 remove chanctx %d (type %d)",
4336 ieee80211_frequency_to_channel(ctx
->def
.chan
->center_freq
),
4337 cfg80211_get_chandef_type(&ctx
->def
));
4340 static void wlcore_op_change_chanctx(struct ieee80211_hw
*hw
,
4341 struct ieee80211_chanctx_conf
*ctx
,
4344 wl1271_debug(DEBUG_MAC80211
,
4345 "mac80211 change chanctx %d (type %d) changed 0x%x",
4346 ieee80211_frequency_to_channel(ctx
->def
.chan
->center_freq
),
4347 cfg80211_get_chandef_type(&ctx
->def
), changed
);
4350 static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw
*hw
,
4351 struct ieee80211_vif
*vif
,
4352 struct ieee80211_chanctx_conf
*ctx
)
4354 struct wl1271
*wl
= hw
->priv
;
4355 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4356 int channel
= ieee80211_frequency_to_channel(
4357 ctx
->def
.chan
->center_freq
);
4359 wl1271_debug(DEBUG_MAC80211
,
4360 "mac80211 assign chanctx (role %d) %d (type %d)",
4361 wlvif
->role_id
, channel
, cfg80211_get_chandef_type(&ctx
->def
));
4363 mutex_lock(&wl
->mutex
);
4365 wlvif
->band
= ctx
->def
.chan
->band
;
4366 wlvif
->channel
= channel
;
4367 wlvif
->channel_type
= cfg80211_get_chandef_type(&ctx
->def
);
4369 /* update default rates according to the band */
4370 wl1271_set_band_rate(wl
, wlvif
);
4372 mutex_unlock(&wl
->mutex
);
4377 static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw
*hw
,
4378 struct ieee80211_vif
*vif
,
4379 struct ieee80211_chanctx_conf
*ctx
)
4381 struct wl1271
*wl
= hw
->priv
;
4382 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4384 wl1271_debug(DEBUG_MAC80211
,
4385 "mac80211 unassign chanctx (role %d) %d (type %d)",
4387 ieee80211_frequency_to_channel(ctx
->def
.chan
->center_freq
),
4388 cfg80211_get_chandef_type(&ctx
->def
));
4390 wl1271_tx_flush(wl
);
4393 static int wl1271_op_conf_tx(struct ieee80211_hw
*hw
,
4394 struct ieee80211_vif
*vif
, u16 queue
,
4395 const struct ieee80211_tx_queue_params
*params
)
4397 struct wl1271
*wl
= hw
->priv
;
4398 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4402 mutex_lock(&wl
->mutex
);
4404 wl1271_debug(DEBUG_MAC80211
, "mac80211 conf tx %d", queue
);
4407 ps_scheme
= CONF_PS_SCHEME_UPSD_TRIGGER
;
4409 ps_scheme
= CONF_PS_SCHEME_LEGACY
;
4411 if (!test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
))
4414 ret
= wl1271_ps_elp_wakeup(wl
);
4419 * the txop is confed in units of 32us by the mac80211,
4422 ret
= wl1271_acx_ac_cfg(wl
, wlvif
, wl1271_tx_get_queue(queue
),
4423 params
->cw_min
, params
->cw_max
,
4424 params
->aifs
, params
->txop
<< 5);
4428 ret
= wl1271_acx_tid_cfg(wl
, wlvif
, wl1271_tx_get_queue(queue
),
4429 CONF_CHANNEL_TYPE_EDCF
,
4430 wl1271_tx_get_queue(queue
),
4431 ps_scheme
, CONF_ACK_POLICY_LEGACY
,
4435 wl1271_ps_elp_sleep(wl
);
4438 mutex_unlock(&wl
->mutex
);
4443 static u64
wl1271_op_get_tsf(struct ieee80211_hw
*hw
,
4444 struct ieee80211_vif
*vif
)
4447 struct wl1271
*wl
= hw
->priv
;
4448 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4449 u64 mactime
= ULLONG_MAX
;
4452 wl1271_debug(DEBUG_MAC80211
, "mac80211 get tsf");
4454 mutex_lock(&wl
->mutex
);
4456 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
4459 ret
= wl1271_ps_elp_wakeup(wl
);
4463 ret
= wl12xx_acx_tsf_info(wl
, wlvif
, &mactime
);
4468 wl1271_ps_elp_sleep(wl
);
4471 mutex_unlock(&wl
->mutex
);
4475 static int wl1271_op_get_survey(struct ieee80211_hw
*hw
, int idx
,
4476 struct survey_info
*survey
)
4478 struct ieee80211_conf
*conf
= &hw
->conf
;
4483 survey
->channel
= conf
->channel
;
4488 static int wl1271_allocate_sta(struct wl1271
*wl
,
4489 struct wl12xx_vif
*wlvif
,
4490 struct ieee80211_sta
*sta
)
4492 struct wl1271_station
*wl_sta
;
4496 if (wl
->active_sta_count
>= AP_MAX_STATIONS
) {
4497 wl1271_warning("could not allocate HLID - too much stations");
4501 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4502 ret
= wl12xx_allocate_link(wl
, wlvif
, &wl_sta
->hlid
);
4504 wl1271_warning("could not allocate HLID - too many links");
4508 set_bit(wl_sta
->hlid
, wlvif
->ap
.sta_hlid_map
);
4509 memcpy(wl
->links
[wl_sta
->hlid
].addr
, sta
->addr
, ETH_ALEN
);
4510 wl
->active_sta_count
++;
4514 void wl1271_free_sta(struct wl1271
*wl
, struct wl12xx_vif
*wlvif
, u8 hlid
)
4516 if (!test_bit(hlid
, wlvif
->ap
.sta_hlid_map
))
4519 clear_bit(hlid
, wlvif
->ap
.sta_hlid_map
);
4520 __clear_bit(hlid
, &wl
->ap_ps_map
);
4521 __clear_bit(hlid
, (unsigned long *)&wl
->ap_fw_ps_map
);
4522 wl12xx_free_link(wl
, wlvif
, &hlid
);
4523 wl
->active_sta_count
--;
4526 * rearm the tx watchdog when the last STA is freed - give the FW a
4527 * chance to return STA-buffered packets before complaining.
4529 if (wl
->active_sta_count
== 0)
4530 wl12xx_rearm_tx_watchdog_locked(wl
);
4533 static int wl12xx_sta_add(struct wl1271
*wl
,
4534 struct wl12xx_vif
*wlvif
,
4535 struct ieee80211_sta
*sta
)
4537 struct wl1271_station
*wl_sta
;
4541 wl1271_debug(DEBUG_MAC80211
, "mac80211 add sta %d", (int)sta
->aid
);
4543 ret
= wl1271_allocate_sta(wl
, wlvif
, sta
);
4547 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4548 hlid
= wl_sta
->hlid
;
4550 ret
= wl12xx_cmd_add_peer(wl
, wlvif
, sta
, hlid
);
4552 wl1271_free_sta(wl
, wlvif
, hlid
);
4557 static int wl12xx_sta_remove(struct wl1271
*wl
,
4558 struct wl12xx_vif
*wlvif
,
4559 struct ieee80211_sta
*sta
)
4561 struct wl1271_station
*wl_sta
;
4564 wl1271_debug(DEBUG_MAC80211
, "mac80211 remove sta %d", (int)sta
->aid
);
4566 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4568 if (WARN_ON(!test_bit(id
, wlvif
->ap
.sta_hlid_map
)))
4571 ret
= wl12xx_cmd_remove_peer(wl
, wl_sta
->hlid
);
4575 wl1271_free_sta(wl
, wlvif
, wl_sta
->hlid
);
4579 static void wlcore_roc_if_possible(struct wl1271
*wl
,
4580 struct wl12xx_vif
*wlvif
)
4582 if (find_first_bit(wl
->roc_map
,
4583 WL12XX_MAX_ROLES
) < WL12XX_MAX_ROLES
)
4586 if (WARN_ON(wlvif
->role_id
== WL12XX_INVALID_ROLE_ID
))
4589 wl12xx_roc(wl
, wlvif
, wlvif
->role_id
, wlvif
->band
, wlvif
->channel
);
4592 static void wlcore_update_inconn_sta(struct wl1271
*wl
,
4593 struct wl12xx_vif
*wlvif
,
4594 struct wl1271_station
*wl_sta
,
4597 if (in_connection
) {
4598 if (WARN_ON(wl_sta
->in_connection
))
4600 wl_sta
->in_connection
= true;
4601 if (!wlvif
->inconn_count
++)
4602 wlcore_roc_if_possible(wl
, wlvif
);
4604 if (!wl_sta
->in_connection
)
4607 wl_sta
->in_connection
= false;
4608 wlvif
->inconn_count
--;
4609 if (WARN_ON(wlvif
->inconn_count
< 0))
4612 if (!wlvif
->inconn_count
)
4613 if (test_bit(wlvif
->role_id
, wl
->roc_map
))
4614 wl12xx_croc(wl
, wlvif
->role_id
);
4618 static int wl12xx_update_sta_state(struct wl1271
*wl
,
4619 struct wl12xx_vif
*wlvif
,
4620 struct ieee80211_sta
*sta
,
4621 enum ieee80211_sta_state old_state
,
4622 enum ieee80211_sta_state new_state
)
4624 struct wl1271_station
*wl_sta
;
4626 bool is_ap
= wlvif
->bss_type
== BSS_TYPE_AP_BSS
;
4627 bool is_sta
= wlvif
->bss_type
== BSS_TYPE_STA_BSS
;
4630 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4631 hlid
= wl_sta
->hlid
;
4633 /* Add station (AP mode) */
4635 old_state
== IEEE80211_STA_NOTEXIST
&&
4636 new_state
== IEEE80211_STA_NONE
) {
4637 ret
= wl12xx_sta_add(wl
, wlvif
, sta
);
4641 wlcore_update_inconn_sta(wl
, wlvif
, wl_sta
, true);
4644 /* Remove station (AP mode) */
4646 old_state
== IEEE80211_STA_NONE
&&
4647 new_state
== IEEE80211_STA_NOTEXIST
) {
4649 wl12xx_sta_remove(wl
, wlvif
, sta
);
4651 wlcore_update_inconn_sta(wl
, wlvif
, wl_sta
, false);
4654 /* Authorize station (AP mode) */
4656 new_state
== IEEE80211_STA_AUTHORIZED
) {
4657 ret
= wl12xx_cmd_set_peer_state(wl
, wlvif
, hlid
);
4661 ret
= wl1271_acx_set_ht_capabilities(wl
, &sta
->ht_cap
, true,
4666 wlcore_update_inconn_sta(wl
, wlvif
, wl_sta
, false);
4669 /* Authorize station */
4671 new_state
== IEEE80211_STA_AUTHORIZED
) {
4672 set_bit(WLVIF_FLAG_STA_AUTHORIZED
, &wlvif
->flags
);
4673 ret
= wl12xx_set_authorized(wl
, wlvif
);
4679 old_state
== IEEE80211_STA_AUTHORIZED
&&
4680 new_state
== IEEE80211_STA_ASSOC
) {
4681 clear_bit(WLVIF_FLAG_STA_AUTHORIZED
, &wlvif
->flags
);
4682 clear_bit(WLVIF_FLAG_STA_STATE_SENT
, &wlvif
->flags
);
4685 /* clear ROCs on failure or authorization */
4687 (new_state
== IEEE80211_STA_AUTHORIZED
||
4688 new_state
== IEEE80211_STA_NOTEXIST
)) {
4689 if (test_bit(wlvif
->role_id
, wl
->roc_map
))
4690 wl12xx_croc(wl
, wlvif
->role_id
);
4694 old_state
== IEEE80211_STA_NOTEXIST
&&
4695 new_state
== IEEE80211_STA_NONE
) {
4696 if (find_first_bit(wl
->roc_map
,
4697 WL12XX_MAX_ROLES
) >= WL12XX_MAX_ROLES
) {
4698 WARN_ON(wlvif
->role_id
== WL12XX_INVALID_ROLE_ID
);
4699 wl12xx_roc(wl
, wlvif
, wlvif
->role_id
,
4700 wlvif
->band
, wlvif
->channel
);
4706 static int wl12xx_op_sta_state(struct ieee80211_hw
*hw
,
4707 struct ieee80211_vif
*vif
,
4708 struct ieee80211_sta
*sta
,
4709 enum ieee80211_sta_state old_state
,
4710 enum ieee80211_sta_state new_state
)
4712 struct wl1271
*wl
= hw
->priv
;
4713 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4716 wl1271_debug(DEBUG_MAC80211
, "mac80211 sta %d state=%d->%d",
4717 sta
->aid
, old_state
, new_state
);
4719 mutex_lock(&wl
->mutex
);
4721 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
4726 ret
= wl1271_ps_elp_wakeup(wl
);
4730 ret
= wl12xx_update_sta_state(wl
, wlvif
, sta
, old_state
, new_state
);
4732 wl1271_ps_elp_sleep(wl
);
4734 mutex_unlock(&wl
->mutex
);
4735 if (new_state
< old_state
)
4740 static int wl1271_op_ampdu_action(struct ieee80211_hw
*hw
,
4741 struct ieee80211_vif
*vif
,
4742 enum ieee80211_ampdu_mlme_action action
,
4743 struct ieee80211_sta
*sta
, u16 tid
, u16
*ssn
,
4746 struct wl1271
*wl
= hw
->priv
;
4747 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4749 u8 hlid
, *ba_bitmap
;
4751 wl1271_debug(DEBUG_MAC80211
, "mac80211 ampdu action %d tid %d", action
,
4754 /* sanity check - the fields in FW are only 8bits wide */
4755 if (WARN_ON(tid
> 0xFF))
4758 mutex_lock(&wl
->mutex
);
4760 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
4765 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
) {
4766 hlid
= wlvif
->sta
.hlid
;
4767 } else if (wlvif
->bss_type
== BSS_TYPE_AP_BSS
) {
4768 struct wl1271_station
*wl_sta
;
4770 wl_sta
= (struct wl1271_station
*)sta
->drv_priv
;
4771 hlid
= wl_sta
->hlid
;
4777 ba_bitmap
= &wl
->links
[hlid
].ba_bitmap
;
4779 ret
= wl1271_ps_elp_wakeup(wl
);
4783 wl1271_debug(DEBUG_MAC80211
, "mac80211 ampdu: Rx tid %d action %d",
4787 case IEEE80211_AMPDU_RX_START
:
4788 if (!wlvif
->ba_support
|| !wlvif
->ba_allowed
) {
4793 if (wl
->ba_rx_session_count
>= RX_BA_MAX_SESSIONS
) {
4795 wl1271_error("exceeded max RX BA sessions");
4799 if (*ba_bitmap
& BIT(tid
)) {
4801 wl1271_error("cannot enable RX BA session on active "
4806 ret
= wl12xx_acx_set_ba_receiver_session(wl
, tid
, *ssn
, true,
4809 *ba_bitmap
|= BIT(tid
);
4810 wl
->ba_rx_session_count
++;
4814 case IEEE80211_AMPDU_RX_STOP
:
4815 if (!(*ba_bitmap
& BIT(tid
))) {
4817 * this happens on reconfig - so only output a debug
4818 * message for now, and don't fail the function.
4820 wl1271_debug(DEBUG_MAC80211
,
4821 "no active RX BA session on tid: %d",
4827 ret
= wl12xx_acx_set_ba_receiver_session(wl
, tid
, 0, false,
4830 *ba_bitmap
&= ~BIT(tid
);
4831 wl
->ba_rx_session_count
--;
4836 * The BA initiator session management in FW independently.
4837 * Falling break here on purpose for all TX APDU commands.
4839 case IEEE80211_AMPDU_TX_START
:
4840 case IEEE80211_AMPDU_TX_STOP_CONT
:
4841 case IEEE80211_AMPDU_TX_STOP_FLUSH
:
4842 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT
:
4843 case IEEE80211_AMPDU_TX_OPERATIONAL
:
4848 wl1271_error("Incorrect ampdu action id=%x\n", action
);
4852 wl1271_ps_elp_sleep(wl
);
4855 mutex_unlock(&wl
->mutex
);
4860 static int wl12xx_set_bitrate_mask(struct ieee80211_hw
*hw
,
4861 struct ieee80211_vif
*vif
,
4862 const struct cfg80211_bitrate_mask
*mask
)
4864 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4865 struct wl1271
*wl
= hw
->priv
;
4868 wl1271_debug(DEBUG_MAC80211
, "mac80211 set_bitrate_mask 0x%x 0x%x",
4869 mask
->control
[NL80211_BAND_2GHZ
].legacy
,
4870 mask
->control
[NL80211_BAND_5GHZ
].legacy
);
4872 mutex_lock(&wl
->mutex
);
4874 for (i
= 0; i
< WLCORE_NUM_BANDS
; i
++)
4875 wlvif
->bitrate_masks
[i
] =
4876 wl1271_tx_enabled_rates_get(wl
,
4877 mask
->control
[i
].legacy
,
4880 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
4883 if (wlvif
->bss_type
== BSS_TYPE_STA_BSS
&&
4884 !test_bit(WLVIF_FLAG_STA_ASSOCIATED
, &wlvif
->flags
)) {
4886 ret
= wl1271_ps_elp_wakeup(wl
);
4890 wl1271_set_band_rate(wl
, wlvif
);
4892 wl1271_tx_min_rate_get(wl
, wlvif
->basic_rate_set
);
4893 ret
= wl1271_acx_sta_rate_policies(wl
, wlvif
);
4895 wl1271_ps_elp_sleep(wl
);
4898 mutex_unlock(&wl
->mutex
);
4903 static void wl12xx_op_channel_switch(struct ieee80211_hw
*hw
,
4904 struct ieee80211_channel_switch
*ch_switch
)
4906 struct wl1271
*wl
= hw
->priv
;
4907 struct wl12xx_vif
*wlvif
;
4910 wl1271_debug(DEBUG_MAC80211
, "mac80211 channel switch");
4912 wl1271_tx_flush(wl
);
4914 mutex_lock(&wl
->mutex
);
4916 if (unlikely(wl
->state
== WLCORE_STATE_OFF
)) {
4917 wl12xx_for_each_wlvif_sta(wl
, wlvif
) {
4918 struct ieee80211_vif
*vif
= wl12xx_wlvif_to_vif(wlvif
);
4919 ieee80211_chswitch_done(vif
, false);
4922 } else if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
4926 ret
= wl1271_ps_elp_wakeup(wl
);
4930 /* TODO: change mac80211 to pass vif as param */
4931 wl12xx_for_each_wlvif_sta(wl
, wlvif
) {
4932 unsigned long delay_usec
;
4934 ret
= wl
->ops
->channel_switch(wl
, wlvif
, ch_switch
);
4938 set_bit(WLVIF_FLAG_CS_PROGRESS
, &wlvif
->flags
);
4940 /* indicate failure 5 seconds after channel switch time */
4941 delay_usec
= ieee80211_tu_to_usec(wlvif
->beacon_int
) *
4943 ieee80211_queue_delayed_work(hw
, &wlvif
->channel_switch_work
,
4944 usecs_to_jiffies(delay_usec
) +
4945 msecs_to_jiffies(5000));
4949 wl1271_ps_elp_sleep(wl
);
4952 mutex_unlock(&wl
->mutex
);
4955 static void wlcore_op_flush(struct ieee80211_hw
*hw
, bool drop
)
4957 struct wl1271
*wl
= hw
->priv
;
4959 wl1271_tx_flush(wl
);
4962 static int wlcore_op_remain_on_channel(struct ieee80211_hw
*hw
,
4963 struct ieee80211_vif
*vif
,
4964 struct ieee80211_channel
*chan
,
4967 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
4968 struct wl1271
*wl
= hw
->priv
;
4969 int channel
, ret
= 0;
4971 channel
= ieee80211_frequency_to_channel(chan
->center_freq
);
4973 wl1271_debug(DEBUG_MAC80211
, "mac80211 roc %d (%d)",
4974 channel
, wlvif
->role_id
);
4976 mutex_lock(&wl
->mutex
);
4978 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
4981 /* return EBUSY if we can't ROC right now */
4982 if (WARN_ON(wl
->roc_vif
||
4983 find_first_bit(wl
->roc_map
,
4984 WL12XX_MAX_ROLES
) < WL12XX_MAX_ROLES
)) {
4989 ret
= wl1271_ps_elp_wakeup(wl
);
4993 ret
= wl12xx_start_dev(wl
, wlvif
, chan
->band
, channel
);
4998 ieee80211_queue_delayed_work(hw
, &wl
->roc_complete_work
,
4999 msecs_to_jiffies(duration
));
5001 wl1271_ps_elp_sleep(wl
);
5003 mutex_unlock(&wl
->mutex
);
5007 static int __wlcore_roc_completed(struct wl1271
*wl
)
5009 struct wl12xx_vif
*wlvif
;
5012 /* already completed */
5013 if (unlikely(!wl
->roc_vif
))
5016 wlvif
= wl12xx_vif_to_data(wl
->roc_vif
);
5018 if (!test_bit(WLVIF_FLAG_INITIALIZED
, &wlvif
->flags
))
5021 ret
= wl12xx_stop_dev(wl
, wlvif
);
5030 static int wlcore_roc_completed(struct wl1271
*wl
)
5034 wl1271_debug(DEBUG_MAC80211
, "roc complete");
5036 mutex_lock(&wl
->mutex
);
5038 if (unlikely(wl
->state
!= WLCORE_STATE_ON
)) {
5043 ret
= wl1271_ps_elp_wakeup(wl
);
5047 ret
= __wlcore_roc_completed(wl
);
5049 wl1271_ps_elp_sleep(wl
);
5051 mutex_unlock(&wl
->mutex
);
5056 static void wlcore_roc_complete_work(struct work_struct
*work
)
5058 struct delayed_work
*dwork
;
5062 dwork
= container_of(work
, struct delayed_work
, work
);
5063 wl
= container_of(dwork
, struct wl1271
, roc_complete_work
);
5065 ret
= wlcore_roc_completed(wl
);
5067 ieee80211_remain_on_channel_expired(wl
->hw
);
5070 static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw
*hw
)
5072 struct wl1271
*wl
= hw
->priv
;
5074 wl1271_debug(DEBUG_MAC80211
, "mac80211 croc");
5077 wl1271_tx_flush(wl
);
5080 * we can't just flush_work here, because it might deadlock
5081 * (as we might get called from the same workqueue)
5083 cancel_delayed_work_sync(&wl
->roc_complete_work
);
5084 wlcore_roc_completed(wl
);
5089 static void wlcore_op_sta_rc_update(struct ieee80211_hw
*hw
,
5090 struct ieee80211_vif
*vif
,
5091 struct ieee80211_sta
*sta
,
5094 struct wl12xx_vif
*wlvif
= wl12xx_vif_to_data(vif
);
5095 struct wl1271
*wl
= hw
->priv
;
5097 wlcore_hw_sta_rc_update(wl
, wlvif
, sta
, changed
);
5100 static bool wl1271_tx_frames_pending(struct ieee80211_hw
*hw
)
5102 struct wl1271
*wl
= hw
->priv
;
5105 mutex_lock(&wl
->mutex
);
5107 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
5110 /* packets are considered pending if in the TX queue or the FW */
5111 ret
= (wl1271_tx_total_queue_count(wl
) > 0) || (wl
->tx_frames_cnt
> 0);
5113 mutex_unlock(&wl
->mutex
);
5118 /* can't be const, mac80211 writes to this */
5119 static struct ieee80211_rate wl1271_rates
[] = {
5121 .hw_value
= CONF_HW_BIT_RATE_1MBPS
,
5122 .hw_value_short
= CONF_HW_BIT_RATE_1MBPS
, },
5124 .hw_value
= CONF_HW_BIT_RATE_2MBPS
,
5125 .hw_value_short
= CONF_HW_BIT_RATE_2MBPS
,
5126 .flags
= IEEE80211_RATE_SHORT_PREAMBLE
},
5128 .hw_value
= CONF_HW_BIT_RATE_5_5MBPS
,
5129 .hw_value_short
= CONF_HW_BIT_RATE_5_5MBPS
,
5130 .flags
= IEEE80211_RATE_SHORT_PREAMBLE
},
5132 .hw_value
= CONF_HW_BIT_RATE_11MBPS
,
5133 .hw_value_short
= CONF_HW_BIT_RATE_11MBPS
,
5134 .flags
= IEEE80211_RATE_SHORT_PREAMBLE
},
5136 .hw_value
= CONF_HW_BIT_RATE_6MBPS
,
5137 .hw_value_short
= CONF_HW_BIT_RATE_6MBPS
, },
5139 .hw_value
= CONF_HW_BIT_RATE_9MBPS
,
5140 .hw_value_short
= CONF_HW_BIT_RATE_9MBPS
, },
5142 .hw_value
= CONF_HW_BIT_RATE_12MBPS
,
5143 .hw_value_short
= CONF_HW_BIT_RATE_12MBPS
, },
5145 .hw_value
= CONF_HW_BIT_RATE_18MBPS
,
5146 .hw_value_short
= CONF_HW_BIT_RATE_18MBPS
, },
5148 .hw_value
= CONF_HW_BIT_RATE_24MBPS
,
5149 .hw_value_short
= CONF_HW_BIT_RATE_24MBPS
, },
5151 .hw_value
= CONF_HW_BIT_RATE_36MBPS
,
5152 .hw_value_short
= CONF_HW_BIT_RATE_36MBPS
, },
5154 .hw_value
= CONF_HW_BIT_RATE_48MBPS
,
5155 .hw_value_short
= CONF_HW_BIT_RATE_48MBPS
, },
5157 .hw_value
= CONF_HW_BIT_RATE_54MBPS
,
5158 .hw_value_short
= CONF_HW_BIT_RATE_54MBPS
, },
5161 /* can't be const, mac80211 writes to this */
5162 static struct ieee80211_channel wl1271_channels
[] = {
5163 { .hw_value
= 1, .center_freq
= 2412, .max_power
= WLCORE_MAX_TXPWR
},
5164 { .hw_value
= 2, .center_freq
= 2417, .max_power
= WLCORE_MAX_TXPWR
},
5165 { .hw_value
= 3, .center_freq
= 2422, .max_power
= WLCORE_MAX_TXPWR
},
5166 { .hw_value
= 4, .center_freq
= 2427, .max_power
= WLCORE_MAX_TXPWR
},
5167 { .hw_value
= 5, .center_freq
= 2432, .max_power
= WLCORE_MAX_TXPWR
},
5168 { .hw_value
= 6, .center_freq
= 2437, .max_power
= WLCORE_MAX_TXPWR
},
5169 { .hw_value
= 7, .center_freq
= 2442, .max_power
= WLCORE_MAX_TXPWR
},
5170 { .hw_value
= 8, .center_freq
= 2447, .max_power
= WLCORE_MAX_TXPWR
},
5171 { .hw_value
= 9, .center_freq
= 2452, .max_power
= WLCORE_MAX_TXPWR
},
5172 { .hw_value
= 10, .center_freq
= 2457, .max_power
= WLCORE_MAX_TXPWR
},
5173 { .hw_value
= 11, .center_freq
= 2462, .max_power
= WLCORE_MAX_TXPWR
},
5174 { .hw_value
= 12, .center_freq
= 2467, .max_power
= WLCORE_MAX_TXPWR
},
5175 { .hw_value
= 13, .center_freq
= 2472, .max_power
= WLCORE_MAX_TXPWR
},
5176 { .hw_value
= 14, .center_freq
= 2484, .max_power
= WLCORE_MAX_TXPWR
},
5179 /* can't be const, mac80211 writes to this */
5180 static struct ieee80211_supported_band wl1271_band_2ghz
= {
5181 .channels
= wl1271_channels
,
5182 .n_channels
= ARRAY_SIZE(wl1271_channels
),
5183 .bitrates
= wl1271_rates
,
5184 .n_bitrates
= ARRAY_SIZE(wl1271_rates
),
5187 /* 5 GHz data rates for WL1273 */
5188 static struct ieee80211_rate wl1271_rates_5ghz
[] = {
5190 .hw_value
= CONF_HW_BIT_RATE_6MBPS
,
5191 .hw_value_short
= CONF_HW_BIT_RATE_6MBPS
, },
5193 .hw_value
= CONF_HW_BIT_RATE_9MBPS
,
5194 .hw_value_short
= CONF_HW_BIT_RATE_9MBPS
, },
5196 .hw_value
= CONF_HW_BIT_RATE_12MBPS
,
5197 .hw_value_short
= CONF_HW_BIT_RATE_12MBPS
, },
5199 .hw_value
= CONF_HW_BIT_RATE_18MBPS
,
5200 .hw_value_short
= CONF_HW_BIT_RATE_18MBPS
, },
5202 .hw_value
= CONF_HW_BIT_RATE_24MBPS
,
5203 .hw_value_short
= CONF_HW_BIT_RATE_24MBPS
, },
5205 .hw_value
= CONF_HW_BIT_RATE_36MBPS
,
5206 .hw_value_short
= CONF_HW_BIT_RATE_36MBPS
, },
5208 .hw_value
= CONF_HW_BIT_RATE_48MBPS
,
5209 .hw_value_short
= CONF_HW_BIT_RATE_48MBPS
, },
5211 .hw_value
= CONF_HW_BIT_RATE_54MBPS
,
5212 .hw_value_short
= CONF_HW_BIT_RATE_54MBPS
, },
5215 /* 5 GHz band channels for WL1273 */
5216 static struct ieee80211_channel wl1271_channels_5ghz
[] = {
5217 { .hw_value
= 7, .center_freq
= 5035, .max_power
= WLCORE_MAX_TXPWR
},
5218 { .hw_value
= 8, .center_freq
= 5040, .max_power
= WLCORE_MAX_TXPWR
},
5219 { .hw_value
= 9, .center_freq
= 5045, .max_power
= WLCORE_MAX_TXPWR
},
5220 { .hw_value
= 11, .center_freq
= 5055, .max_power
= WLCORE_MAX_TXPWR
},
5221 { .hw_value
= 12, .center_freq
= 5060, .max_power
= WLCORE_MAX_TXPWR
},
5222 { .hw_value
= 16, .center_freq
= 5080, .max_power
= WLCORE_MAX_TXPWR
},
5223 { .hw_value
= 34, .center_freq
= 5170, .max_power
= WLCORE_MAX_TXPWR
},
5224 { .hw_value
= 36, .center_freq
= 5180, .max_power
= WLCORE_MAX_TXPWR
},
5225 { .hw_value
= 38, .center_freq
= 5190, .max_power
= WLCORE_MAX_TXPWR
},
5226 { .hw_value
= 40, .center_freq
= 5200, .max_power
= WLCORE_MAX_TXPWR
},
5227 { .hw_value
= 42, .center_freq
= 5210, .max_power
= WLCORE_MAX_TXPWR
},
5228 { .hw_value
= 44, .center_freq
= 5220, .max_power
= WLCORE_MAX_TXPWR
},
5229 { .hw_value
= 46, .center_freq
= 5230, .max_power
= WLCORE_MAX_TXPWR
},
5230 { .hw_value
= 48, .center_freq
= 5240, .max_power
= WLCORE_MAX_TXPWR
},
5231 { .hw_value
= 52, .center_freq
= 5260, .max_power
= WLCORE_MAX_TXPWR
},
5232 { .hw_value
= 56, .center_freq
= 5280, .max_power
= WLCORE_MAX_TXPWR
},
5233 { .hw_value
= 60, .center_freq
= 5300, .max_power
= WLCORE_MAX_TXPWR
},
5234 { .hw_value
= 64, .center_freq
= 5320, .max_power
= WLCORE_MAX_TXPWR
},
5235 { .hw_value
= 100, .center_freq
= 5500, .max_power
= WLCORE_MAX_TXPWR
},
5236 { .hw_value
= 104, .center_freq
= 5520, .max_power
= WLCORE_MAX_TXPWR
},
5237 { .hw_value
= 108, .center_freq
= 5540, .max_power
= WLCORE_MAX_TXPWR
},
5238 { .hw_value
= 112, .center_freq
= 5560, .max_power
= WLCORE_MAX_TXPWR
},
5239 { .hw_value
= 116, .center_freq
= 5580, .max_power
= WLCORE_MAX_TXPWR
},
5240 { .hw_value
= 120, .center_freq
= 5600, .max_power
= WLCORE_MAX_TXPWR
},
5241 { .hw_value
= 124, .center_freq
= 5620, .max_power
= WLCORE_MAX_TXPWR
},
5242 { .hw_value
= 128, .center_freq
= 5640, .max_power
= WLCORE_MAX_TXPWR
},
5243 { .hw_value
= 132, .center_freq
= 5660, .max_power
= WLCORE_MAX_TXPWR
},
5244 { .hw_value
= 136, .center_freq
= 5680, .max_power
= WLCORE_MAX_TXPWR
},
5245 { .hw_value
= 140, .center_freq
= 5700, .max_power
= WLCORE_MAX_TXPWR
},
5246 { .hw_value
= 149, .center_freq
= 5745, .max_power
= WLCORE_MAX_TXPWR
},
5247 { .hw_value
= 153, .center_freq
= 5765, .max_power
= WLCORE_MAX_TXPWR
},
5248 { .hw_value
= 157, .center_freq
= 5785, .max_power
= WLCORE_MAX_TXPWR
},
5249 { .hw_value
= 161, .center_freq
= 5805, .max_power
= WLCORE_MAX_TXPWR
},
5250 { .hw_value
= 165, .center_freq
= 5825, .max_power
= WLCORE_MAX_TXPWR
},
5253 static struct ieee80211_supported_band wl1271_band_5ghz
= {
5254 .channels
= wl1271_channels_5ghz
,
5255 .n_channels
= ARRAY_SIZE(wl1271_channels_5ghz
),
5256 .bitrates
= wl1271_rates_5ghz
,
5257 .n_bitrates
= ARRAY_SIZE(wl1271_rates_5ghz
),
5260 static const struct ieee80211_ops wl1271_ops
= {
5261 .start
= wl1271_op_start
,
5262 .stop
= wlcore_op_stop
,
5263 .add_interface
= wl1271_op_add_interface
,
5264 .remove_interface
= wl1271_op_remove_interface
,
5265 .change_interface
= wl12xx_op_change_interface
,
5267 .suspend
= wl1271_op_suspend
,
5268 .resume
= wl1271_op_resume
,
5270 .config
= wl1271_op_config
,
5271 .prepare_multicast
= wl1271_op_prepare_multicast
,
5272 .configure_filter
= wl1271_op_configure_filter
,
5274 .set_key
= wlcore_op_set_key
,
5275 .hw_scan
= wl1271_op_hw_scan
,
5276 .cancel_hw_scan
= wl1271_op_cancel_hw_scan
,
5277 .sched_scan_start
= wl1271_op_sched_scan_start
,
5278 .sched_scan_stop
= wl1271_op_sched_scan_stop
,
5279 .bss_info_changed
= wl1271_op_bss_info_changed
,
5280 .set_frag_threshold
= wl1271_op_set_frag_threshold
,
5281 .set_rts_threshold
= wl1271_op_set_rts_threshold
,
5282 .conf_tx
= wl1271_op_conf_tx
,
5283 .get_tsf
= wl1271_op_get_tsf
,
5284 .get_survey
= wl1271_op_get_survey
,
5285 .sta_state
= wl12xx_op_sta_state
,
5286 .ampdu_action
= wl1271_op_ampdu_action
,
5287 .tx_frames_pending
= wl1271_tx_frames_pending
,
5288 .set_bitrate_mask
= wl12xx_set_bitrate_mask
,
5289 .channel_switch
= wl12xx_op_channel_switch
,
5290 .flush
= wlcore_op_flush
,
5291 .remain_on_channel
= wlcore_op_remain_on_channel
,
5292 .cancel_remain_on_channel
= wlcore_op_cancel_remain_on_channel
,
5293 .add_chanctx
= wlcore_op_add_chanctx
,
5294 .remove_chanctx
= wlcore_op_remove_chanctx
,
5295 .change_chanctx
= wlcore_op_change_chanctx
,
5296 .assign_vif_chanctx
= wlcore_op_assign_vif_chanctx
,
5297 .unassign_vif_chanctx
= wlcore_op_unassign_vif_chanctx
,
5298 .sta_rc_update
= wlcore_op_sta_rc_update
,
5299 CFG80211_TESTMODE_CMD(wl1271_tm_cmd
)
5303 u8
wlcore_rate_to_idx(struct wl1271
*wl
, u8 rate
, enum ieee80211_band band
)
5309 if (unlikely(rate
>= wl
->hw_tx_rate_tbl_size
)) {
5310 wl1271_error("Illegal RX rate from HW: %d", rate
);
5314 idx
= wl
->band_rate_to_idx
[band
][rate
];
5315 if (unlikely(idx
== CONF_HW_RXTX_RATE_UNSUPPORTED
)) {
5316 wl1271_error("Unsupported RX rate from HW: %d", rate
);
5323 static ssize_t
wl1271_sysfs_show_bt_coex_state(struct device
*dev
,
5324 struct device_attribute
*attr
,
5327 struct wl1271
*wl
= dev_get_drvdata(dev
);
5332 mutex_lock(&wl
->mutex
);
5333 len
= snprintf(buf
, len
, "%d\n\n0 - off\n1 - on\n",
5335 mutex_unlock(&wl
->mutex
);
5341 static ssize_t
wl1271_sysfs_store_bt_coex_state(struct device
*dev
,
5342 struct device_attribute
*attr
,
5343 const char *buf
, size_t count
)
5345 struct wl1271
*wl
= dev_get_drvdata(dev
);
5349 ret
= kstrtoul(buf
, 10, &res
);
5351 wl1271_warning("incorrect value written to bt_coex_mode");
5355 mutex_lock(&wl
->mutex
);
5359 if (res
== wl
->sg_enabled
)
5362 wl
->sg_enabled
= res
;
5364 if (unlikely(wl
->state
!= WLCORE_STATE_ON
))
5367 ret
= wl1271_ps_elp_wakeup(wl
);
5371 wl1271_acx_sg_enable(wl
, wl
->sg_enabled
);
5372 wl1271_ps_elp_sleep(wl
);
5375 mutex_unlock(&wl
->mutex
);
5379 static DEVICE_ATTR(bt_coex_state
, S_IRUGO
| S_IWUSR
,
5380 wl1271_sysfs_show_bt_coex_state
,
5381 wl1271_sysfs_store_bt_coex_state
);
5383 static ssize_t
wl1271_sysfs_show_hw_pg_ver(struct device
*dev
,
5384 struct device_attribute
*attr
,
5387 struct wl1271
*wl
= dev_get_drvdata(dev
);
5392 mutex_lock(&wl
->mutex
);
5393 if (wl
->hw_pg_ver
>= 0)
5394 len
= snprintf(buf
, len
, "%d\n", wl
->hw_pg_ver
);
5396 len
= snprintf(buf
, len
, "n/a\n");
5397 mutex_unlock(&wl
->mutex
);
5402 static DEVICE_ATTR(hw_pg_ver
, S_IRUGO
,
5403 wl1271_sysfs_show_hw_pg_ver
, NULL
);
5405 static ssize_t
wl1271_sysfs_read_fwlog(struct file
*filp
, struct kobject
*kobj
,
5406 struct bin_attribute
*bin_attr
,
5407 char *buffer
, loff_t pos
, size_t count
)
5409 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
5410 struct wl1271
*wl
= dev_get_drvdata(dev
);
5414 ret
= mutex_lock_interruptible(&wl
->mutex
);
5416 return -ERESTARTSYS
;
5418 /* Let only one thread read the log at a time, blocking others */
5419 while (wl
->fwlog_size
== 0) {
5422 prepare_to_wait_exclusive(&wl
->fwlog_waitq
,
5424 TASK_INTERRUPTIBLE
);
5426 if (wl
->fwlog_size
!= 0) {
5427 finish_wait(&wl
->fwlog_waitq
, &wait
);
5431 mutex_unlock(&wl
->mutex
);
5434 finish_wait(&wl
->fwlog_waitq
, &wait
);
5436 if (signal_pending(current
))
5437 return -ERESTARTSYS
;
5439 ret
= mutex_lock_interruptible(&wl
->mutex
);
5441 return -ERESTARTSYS
;
5444 /* Check if the fwlog is still valid */
5445 if (wl
->fwlog_size
< 0) {
5446 mutex_unlock(&wl
->mutex
);
5450 /* Seeking is not supported - old logs are not kept. Disregard pos. */
5451 len
= min(count
, (size_t)wl
->fwlog_size
);
5452 wl
->fwlog_size
-= len
;
5453 memcpy(buffer
, wl
->fwlog
, len
);
5455 /* Make room for new messages */
5456 memmove(wl
->fwlog
, wl
->fwlog
+ len
, wl
->fwlog_size
);
5458 mutex_unlock(&wl
->mutex
);
5463 static struct bin_attribute fwlog_attr
= {
5464 .attr
= {.name
= "fwlog", .mode
= S_IRUSR
},
5465 .read
= wl1271_sysfs_read_fwlog
,
5468 static void wl12xx_derive_mac_addresses(struct wl1271
*wl
, u32 oui
, u32 nic
)
5472 wl1271_debug(DEBUG_PROBE
, "base address: oui %06x nic %06x",
5475 if (nic
+ WLCORE_NUM_MAC_ADDRESSES
- wl
->num_mac_addr
> 0xffffff)
5476 wl1271_warning("NIC part of the MAC address wraps around!");
5478 for (i
= 0; i
< wl
->num_mac_addr
; i
++) {
5479 wl
->addresses
[i
].addr
[0] = (u8
)(oui
>> 16);
5480 wl
->addresses
[i
].addr
[1] = (u8
)(oui
>> 8);
5481 wl
->addresses
[i
].addr
[2] = (u8
) oui
;
5482 wl
->addresses
[i
].addr
[3] = (u8
)(nic
>> 16);
5483 wl
->addresses
[i
].addr
[4] = (u8
)(nic
>> 8);
5484 wl
->addresses
[i
].addr
[5] = (u8
) nic
;
5488 /* we may be one address short at the most */
5489 WARN_ON(wl
->num_mac_addr
+ 1 < WLCORE_NUM_MAC_ADDRESSES
);
5492 * turn on the LAA bit in the first address and use it as
5495 if (wl
->num_mac_addr
< WLCORE_NUM_MAC_ADDRESSES
) {
5496 int idx
= WLCORE_NUM_MAC_ADDRESSES
- 1;
5497 memcpy(&wl
->addresses
[idx
], &wl
->addresses
[0],
5498 sizeof(wl
->addresses
[0]));
5500 wl
->addresses
[idx
].addr
[2] |= BIT(1);
5503 wl
->hw
->wiphy
->n_addresses
= WLCORE_NUM_MAC_ADDRESSES
;
5504 wl
->hw
->wiphy
->addresses
= wl
->addresses
;
5507 static int wl12xx_get_hw_info(struct wl1271
*wl
)
5511 ret
= wl12xx_set_power_on(wl
);
5515 ret
= wlcore_read_reg(wl
, REG_CHIP_ID_B
, &wl
->chip
.id
);
5519 wl
->fuse_oui_addr
= 0;
5520 wl
->fuse_nic_addr
= 0;
5522 ret
= wl
->ops
->get_pg_ver(wl
, &wl
->hw_pg_ver
);
5526 if (wl
->ops
->get_mac
)
5527 ret
= wl
->ops
->get_mac(wl
);
5530 wl1271_power_off(wl
);
5534 static int wl1271_register_hw(struct wl1271
*wl
)
5537 u32 oui_addr
= 0, nic_addr
= 0;
5539 if (wl
->mac80211_registered
)
5542 if (wl
->nvs_len
>= 12) {
5543 /* NOTE: The wl->nvs->nvs element must be first, in
5544 * order to simplify the casting, we assume it is at
5545 * the beginning of the wl->nvs structure.
5547 u8
*nvs_ptr
= (u8
*)wl
->nvs
;
5550 (nvs_ptr
[11] << 16) + (nvs_ptr
[10] << 8) + nvs_ptr
[6];
5552 (nvs_ptr
[5] << 16) + (nvs_ptr
[4] << 8) + nvs_ptr
[3];
5555 /* if the MAC address is zeroed in the NVS derive from fuse */
5556 if (oui_addr
== 0 && nic_addr
== 0) {
5557 oui_addr
= wl
->fuse_oui_addr
;
5558 /* fuse has the BD_ADDR, the WLAN addresses are the next two */
5559 nic_addr
= wl
->fuse_nic_addr
+ 1;
5562 wl12xx_derive_mac_addresses(wl
, oui_addr
, nic_addr
);
5564 ret
= ieee80211_register_hw(wl
->hw
);
5566 wl1271_error("unable to register mac80211 hw: %d", ret
);
5570 wl
->mac80211_registered
= true;
5572 wl1271_debugfs_init(wl
);
5574 wl1271_notice("loaded");
5580 static void wl1271_unregister_hw(struct wl1271
*wl
)
5583 wl1271_plt_stop(wl
);
5585 ieee80211_unregister_hw(wl
->hw
);
5586 wl
->mac80211_registered
= false;
5590 static const struct ieee80211_iface_limit wlcore_iface_limits
[] = {
5593 .types
= BIT(NL80211_IFTYPE_STATION
),
5597 .types
= BIT(NL80211_IFTYPE_AP
) |
5598 BIT(NL80211_IFTYPE_P2P_GO
) |
5599 BIT(NL80211_IFTYPE_P2P_CLIENT
),
5603 static struct ieee80211_iface_combination
5604 wlcore_iface_combinations
[] = {
5606 .max_interfaces
= 3,
5607 .limits
= wlcore_iface_limits
,
5608 .n_limits
= ARRAY_SIZE(wlcore_iface_limits
),
5612 static int wl1271_init_ieee80211(struct wl1271
*wl
)
5615 static const u32 cipher_suites
[] = {
5616 WLAN_CIPHER_SUITE_WEP40
,
5617 WLAN_CIPHER_SUITE_WEP104
,
5618 WLAN_CIPHER_SUITE_TKIP
,
5619 WLAN_CIPHER_SUITE_CCMP
,
5620 WL1271_CIPHER_SUITE_GEM
,
5623 /* The tx descriptor buffer */
5624 wl
->hw
->extra_tx_headroom
= sizeof(struct wl1271_tx_hw_descr
);
5626 if (wl
->quirks
& WLCORE_QUIRK_TKIP_HEADER_SPACE
)
5627 wl
->hw
->extra_tx_headroom
+= WL1271_EXTRA_SPACE_TKIP
;
5630 /* FIXME: find a proper value */
5631 wl
->hw
->channel_change_time
= 10000;
5632 wl
->hw
->max_listen_interval
= wl
->conf
.conn
.max_listen_interval
;
5634 wl
->hw
->flags
= IEEE80211_HW_SIGNAL_DBM
|
5635 IEEE80211_HW_SUPPORTS_PS
|
5636 IEEE80211_HW_SUPPORTS_DYNAMIC_PS
|
5637 IEEE80211_HW_SUPPORTS_UAPSD
|
5638 IEEE80211_HW_HAS_RATE_CONTROL
|
5639 IEEE80211_HW_CONNECTION_MONITOR
|
5640 IEEE80211_HW_REPORTS_TX_ACK_STATUS
|
5641 IEEE80211_HW_SPECTRUM_MGMT
|
5642 IEEE80211_HW_AP_LINK_PS
|
5643 IEEE80211_HW_AMPDU_AGGREGATION
|
5644 IEEE80211_HW_TX_AMPDU_SETUP_IN_HW
|
5645 IEEE80211_HW_SCAN_WHILE_IDLE
|
5646 IEEE80211_HW_QUEUE_CONTROL
;
5648 wl
->hw
->wiphy
->cipher_suites
= cipher_suites
;
5649 wl
->hw
->wiphy
->n_cipher_suites
= ARRAY_SIZE(cipher_suites
);
5651 wl
->hw
->wiphy
->interface_modes
= BIT(NL80211_IFTYPE_STATION
) |
5652 BIT(NL80211_IFTYPE_ADHOC
) | BIT(NL80211_IFTYPE_AP
) |
5653 BIT(NL80211_IFTYPE_P2P_CLIENT
) | BIT(NL80211_IFTYPE_P2P_GO
);
5654 wl
->hw
->wiphy
->max_scan_ssids
= 1;
5655 wl
->hw
->wiphy
->max_sched_scan_ssids
= 16;
5656 wl
->hw
->wiphy
->max_match_sets
= 16;
5658 * Maximum length of elements in scanning probe request templates
5659 * should be the maximum length possible for a template, without
5660 * the IEEE80211 header of the template
5662 wl
->hw
->wiphy
->max_scan_ie_len
= WL1271_CMD_TEMPL_MAX_SIZE
-
5663 sizeof(struct ieee80211_header
);
5665 wl
->hw
->wiphy
->max_sched_scan_ie_len
= WL1271_CMD_TEMPL_MAX_SIZE
-
5666 sizeof(struct ieee80211_header
);
5668 wl
->hw
->wiphy
->max_remain_on_channel_duration
= 5000;
5670 wl
->hw
->wiphy
->flags
|= WIPHY_FLAG_AP_UAPSD
|
5671 WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL
;
5673 /* make sure all our channels fit in the scanned_ch bitmask */
5674 BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels
) +
5675 ARRAY_SIZE(wl1271_channels_5ghz
) >
5676 WL1271_MAX_CHANNELS
);
5678 * clear channel flags from the previous usage
5679 * and restore max_power & max_antenna_gain values.
5681 for (i
= 0; i
< ARRAY_SIZE(wl1271_channels
); i
++) {
5682 wl1271_band_2ghz
.channels
[i
].flags
= 0;
5683 wl1271_band_2ghz
.channels
[i
].max_power
= WLCORE_MAX_TXPWR
;
5684 wl1271_band_2ghz
.channels
[i
].max_antenna_gain
= 0;
5687 for (i
= 0; i
< ARRAY_SIZE(wl1271_channels_5ghz
); i
++) {
5688 wl1271_band_5ghz
.channels
[i
].flags
= 0;
5689 wl1271_band_5ghz
.channels
[i
].max_power
= WLCORE_MAX_TXPWR
;
5690 wl1271_band_5ghz
.channels
[i
].max_antenna_gain
= 0;
5694 * We keep local copies of the band structs because we need to
5695 * modify them on a per-device basis.
5697 memcpy(&wl
->bands
[IEEE80211_BAND_2GHZ
], &wl1271_band_2ghz
,
5698 sizeof(wl1271_band_2ghz
));
5699 memcpy(&wl
->bands
[IEEE80211_BAND_2GHZ
].ht_cap
,
5700 &wl
->ht_cap
[IEEE80211_BAND_2GHZ
],
5701 sizeof(*wl
->ht_cap
));
5702 memcpy(&wl
->bands
[IEEE80211_BAND_5GHZ
], &wl1271_band_5ghz
,
5703 sizeof(wl1271_band_5ghz
));
5704 memcpy(&wl
->bands
[IEEE80211_BAND_5GHZ
].ht_cap
,
5705 &wl
->ht_cap
[IEEE80211_BAND_5GHZ
],
5706 sizeof(*wl
->ht_cap
));
5708 wl
->hw
->wiphy
->bands
[IEEE80211_BAND_2GHZ
] =
5709 &wl
->bands
[IEEE80211_BAND_2GHZ
];
5710 wl
->hw
->wiphy
->bands
[IEEE80211_BAND_5GHZ
] =
5711 &wl
->bands
[IEEE80211_BAND_5GHZ
];
5714 * allow 4 queues per mac address we support +
5715 * 1 cab queue per mac + one global offchannel Tx queue
5717 wl
->hw
->queues
= (NUM_TX_QUEUES
+ 1) * WLCORE_NUM_MAC_ADDRESSES
+ 1;
5719 /* the last queue is the offchannel queue */
5720 wl
->hw
->offchannel_tx_hw_queue
= wl
->hw
->queues
- 1;
5721 wl
->hw
->max_rates
= 1;
5723 wl
->hw
->wiphy
->reg_notifier
= wl1271_reg_notify
;
5725 /* the FW answers probe-requests in AP-mode */
5726 wl
->hw
->wiphy
->flags
|= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD
;
5727 wl
->hw
->wiphy
->probe_resp_offload
=
5728 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS
|
5729 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2
|
5730 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P
;
5732 /* allowed interface combinations */
5733 wlcore_iface_combinations
[0].num_different_channels
= wl
->num_channels
;
5734 wl
->hw
->wiphy
->iface_combinations
= wlcore_iface_combinations
;
5735 wl
->hw
->wiphy
->n_iface_combinations
=
5736 ARRAY_SIZE(wlcore_iface_combinations
);
5738 SET_IEEE80211_DEV(wl
->hw
, wl
->dev
);
5740 wl
->hw
->sta_data_size
= sizeof(struct wl1271_station
);
5741 wl
->hw
->vif_data_size
= sizeof(struct wl12xx_vif
);
5743 wl
->hw
->max_rx_aggregation_subframes
= wl
->conf
.ht
.rx_ba_win_size
;
5748 #define WL1271_DEFAULT_CHANNEL 0
5750 struct ieee80211_hw
*wlcore_alloc_hw(size_t priv_size
, u32 aggr_buf_size
,
5753 struct ieee80211_hw
*hw
;
5758 BUILD_BUG_ON(AP_MAX_STATIONS
> WL12XX_MAX_LINKS
);
5760 hw
= ieee80211_alloc_hw(sizeof(*wl
), &wl1271_ops
);
5762 wl1271_error("could not alloc ieee80211_hw");
5768 memset(wl
, 0, sizeof(*wl
));
5770 wl
->priv
= kzalloc(priv_size
, GFP_KERNEL
);
5772 wl1271_error("could not alloc wl priv");
5774 goto err_priv_alloc
;
5777 INIT_LIST_HEAD(&wl
->wlvif_list
);
5781 for (i
= 0; i
< NUM_TX_QUEUES
; i
++)
5782 for (j
= 0; j
< WL12XX_MAX_LINKS
; j
++)
5783 skb_queue_head_init(&wl
->links
[j
].tx_queue
[i
]);
5785 skb_queue_head_init(&wl
->deferred_rx_queue
);
5786 skb_queue_head_init(&wl
->deferred_tx_queue
);
5788 INIT_DELAYED_WORK(&wl
->elp_work
, wl1271_elp_work
);
5789 INIT_WORK(&wl
->netstack_work
, wl1271_netstack_work
);
5790 INIT_WORK(&wl
->tx_work
, wl1271_tx_work
);
5791 INIT_WORK(&wl
->recovery_work
, wl1271_recovery_work
);
5792 INIT_DELAYED_WORK(&wl
->scan_complete_work
, wl1271_scan_complete_work
);
5793 INIT_DELAYED_WORK(&wl
->roc_complete_work
, wlcore_roc_complete_work
);
5794 INIT_DELAYED_WORK(&wl
->tx_watchdog_work
, wl12xx_tx_watchdog_work
);
5796 wl
->freezable_wq
= create_freezable_workqueue("wl12xx_wq");
5797 if (!wl
->freezable_wq
) {
5802 wl
->channel
= WL1271_DEFAULT_CHANNEL
;
5804 wl
->power_level
= WL1271_DEFAULT_POWER_LEVEL
;
5805 wl
->band
= IEEE80211_BAND_2GHZ
;
5806 wl
->channel_type
= NL80211_CHAN_NO_HT
;
5808 wl
->sg_enabled
= true;
5809 wl
->sleep_auth
= WL1271_PSM_ILLEGAL
;
5810 wl
->recovery_count
= 0;
5813 wl
->ap_fw_ps_map
= 0;
5815 wl
->platform_quirks
= 0;
5816 wl
->system_hlid
= WL12XX_SYSTEM_HLID
;
5817 wl
->active_sta_count
= 0;
5818 wl
->active_link_count
= 0;
5820 init_waitqueue_head(&wl
->fwlog_waitq
);
5822 /* The system link is always allocated */
5823 __set_bit(WL12XX_SYSTEM_HLID
, wl
->links_map
);
5825 memset(wl
->tx_frames_map
, 0, sizeof(wl
->tx_frames_map
));
5826 for (i
= 0; i
< wl
->num_tx_desc
; i
++)
5827 wl
->tx_frames
[i
] = NULL
;
5829 spin_lock_init(&wl
->wl_lock
);
5831 wl
->state
= WLCORE_STATE_OFF
;
5832 wl
->fw_type
= WL12XX_FW_TYPE_NONE
;
5833 mutex_init(&wl
->mutex
);
5834 mutex_init(&wl
->flush_mutex
);
5835 init_completion(&wl
->nvs_loading_complete
);
5837 order
= get_order(aggr_buf_size
);
5838 wl
->aggr_buf
= (u8
*)__get_free_pages(GFP_KERNEL
, order
);
5839 if (!wl
->aggr_buf
) {
5843 wl
->aggr_buf_size
= aggr_buf_size
;
5845 wl
->dummy_packet
= wl12xx_alloc_dummy_packet(wl
);
5846 if (!wl
->dummy_packet
) {
5851 /* Allocate one page for the FW log */
5852 wl
->fwlog
= (u8
*)get_zeroed_page(GFP_KERNEL
);
5855 goto err_dummy_packet
;
5858 wl
->mbox_size
= mbox_size
;
5859 wl
->mbox
= kmalloc(wl
->mbox_size
, GFP_KERNEL
| GFP_DMA
);
5865 wl
->buffer_32
= kmalloc(sizeof(*wl
->buffer_32
), GFP_KERNEL
);
5866 if (!wl
->buffer_32
) {
5877 free_page((unsigned long)wl
->fwlog
);
5880 dev_kfree_skb(wl
->dummy_packet
);
5883 free_pages((unsigned long)wl
->aggr_buf
, order
);
5886 destroy_workqueue(wl
->freezable_wq
);
5889 wl1271_debugfs_exit(wl
);
5893 ieee80211_free_hw(hw
);
5897 return ERR_PTR(ret
);
5899 EXPORT_SYMBOL_GPL(wlcore_alloc_hw
);
5901 int wlcore_free_hw(struct wl1271
*wl
)
5903 /* Unblock any fwlog readers */
5904 mutex_lock(&wl
->mutex
);
5905 wl
->fwlog_size
= -1;
5906 wake_up_interruptible_all(&wl
->fwlog_waitq
);
5907 mutex_unlock(&wl
->mutex
);
5909 device_remove_bin_file(wl
->dev
, &fwlog_attr
);
5911 device_remove_file(wl
->dev
, &dev_attr_hw_pg_ver
);
5913 device_remove_file(wl
->dev
, &dev_attr_bt_coex_state
);
5914 kfree(wl
->buffer_32
);
5916 free_page((unsigned long)wl
->fwlog
);
5917 dev_kfree_skb(wl
->dummy_packet
);
5918 free_pages((unsigned long)wl
->aggr_buf
, get_order(wl
->aggr_buf_size
));
5920 wl1271_debugfs_exit(wl
);
5924 wl
->fw_type
= WL12XX_FW_TYPE_NONE
;
5928 kfree(wl
->fw_status_1
);
5929 kfree(wl
->tx_res_if
);
5930 destroy_workqueue(wl
->freezable_wq
);
5933 ieee80211_free_hw(wl
->hw
);
5937 EXPORT_SYMBOL_GPL(wlcore_free_hw
);
5939 static irqreturn_t
wl12xx_hardirq(int irq
, void *cookie
)
5941 struct wl1271
*wl
= cookie
;
5942 unsigned long flags
;
5944 wl1271_debug(DEBUG_IRQ
, "IRQ");
5946 /* complete the ELP completion */
5947 spin_lock_irqsave(&wl
->wl_lock
, flags
);
5948 set_bit(WL1271_FLAG_IRQ_RUNNING
, &wl
->flags
);
5949 if (wl
->elp_compl
) {
5950 complete(wl
->elp_compl
);
5951 wl
->elp_compl
= NULL
;
5954 if (test_bit(WL1271_FLAG_SUSPENDED
, &wl
->flags
)) {
5955 /* don't enqueue a work right now. mark it as pending */
5956 set_bit(WL1271_FLAG_PENDING_WORK
, &wl
->flags
);
5957 wl1271_debug(DEBUG_IRQ
, "should not enqueue work");
5958 disable_irq_nosync(wl
->irq
);
5959 pm_wakeup_event(wl
->dev
, 0);
5960 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
5963 spin_unlock_irqrestore(&wl
->wl_lock
, flags
);
5965 return IRQ_WAKE_THREAD
;
5968 static void wlcore_nvs_cb(const struct firmware
*fw
, void *context
)
5970 struct wl1271
*wl
= context
;
5971 struct platform_device
*pdev
= wl
->pdev
;
5972 struct wl12xx_platform_data
*pdata
= pdev
->dev
.platform_data
;
5973 unsigned long irqflags
;
5977 wl
->nvs
= kmemdup(fw
->data
, fw
->size
, GFP_KERNEL
);
5979 wl1271_error("Could not allocate nvs data");
5982 wl
->nvs_len
= fw
->size
;
5984 wl1271_debug(DEBUG_BOOT
, "Could not get nvs file %s",
5990 ret
= wl
->ops
->setup(wl
);
5994 BUG_ON(wl
->num_tx_desc
> WLCORE_MAX_TX_DESCRIPTORS
);
5996 /* adjust some runtime configuration parameters */
5997 wlcore_adjust_conf(wl
);
5999 wl
->irq
= platform_get_irq(pdev
, 0);
6000 wl
->platform_quirks
= pdata
->platform_quirks
;
6001 wl
->set_power
= pdata
->set_power
;
6002 wl
->if_ops
= pdata
->ops
;
6004 if (wl
->platform_quirks
& WL12XX_PLATFORM_QUIRK_EDGE_IRQ
)
6005 irqflags
= IRQF_TRIGGER_RISING
;
6007 irqflags
= IRQF_TRIGGER_HIGH
| IRQF_ONESHOT
;
6009 ret
= request_threaded_irq(wl
->irq
, wl12xx_hardirq
, wlcore_irq
,
6013 wl1271_error("request_irq() failed: %d", ret
);
6018 ret
= enable_irq_wake(wl
->irq
);
6020 wl
->irq_wake_enabled
= true;
6021 device_init_wakeup(wl
->dev
, 1);
6022 if (pdata
->pwr_in_suspend
) {
6023 wl
->hw
->wiphy
->wowlan
.flags
= WIPHY_WOWLAN_ANY
;
6024 wl
->hw
->wiphy
->wowlan
.n_patterns
=
6025 WL1271_MAX_RX_FILTERS
;
6026 wl
->hw
->wiphy
->wowlan
.pattern_min_len
= 1;
6027 wl
->hw
->wiphy
->wowlan
.pattern_max_len
=
6028 WL1271_RX_FILTER_MAX_PATTERN_SIZE
;
6032 disable_irq(wl
->irq
);
6034 ret
= wl12xx_get_hw_info(wl
);
6036 wl1271_error("couldn't get hw info");
6040 ret
= wl
->ops
->identify_chip(wl
);
6044 ret
= wl1271_init_ieee80211(wl
);
6048 ret
= wl1271_register_hw(wl
);
6052 /* Create sysfs file to control bt coex state */
6053 ret
= device_create_file(wl
->dev
, &dev_attr_bt_coex_state
);
6055 wl1271_error("failed to create sysfs file bt_coex_state");
6059 /* Create sysfs file to get HW PG version */
6060 ret
= device_create_file(wl
->dev
, &dev_attr_hw_pg_ver
);
6062 wl1271_error("failed to create sysfs file hw_pg_ver");
6063 goto out_bt_coex_state
;
6066 /* Create sysfs file for the FW log */
6067 ret
= device_create_bin_file(wl
->dev
, &fwlog_attr
);
6069 wl1271_error("failed to create sysfs file fwlog");
6073 wl
->initialized
= true;
6077 device_remove_file(wl
->dev
, &dev_attr_hw_pg_ver
);
6080 device_remove_file(wl
->dev
, &dev_attr_bt_coex_state
);
6083 wl1271_unregister_hw(wl
);
6086 free_irq(wl
->irq
, wl
);
6092 release_firmware(fw
);
6093 complete_all(&wl
->nvs_loading_complete
);
6096 int wlcore_probe(struct wl1271
*wl
, struct platform_device
*pdev
)
6100 if (!wl
->ops
|| !wl
->ptable
)
6103 wl
->dev
= &pdev
->dev
;
6105 platform_set_drvdata(pdev
, wl
);
6107 ret
= request_firmware_nowait(THIS_MODULE
, FW_ACTION_HOTPLUG
,
6108 WL12XX_NVS_NAME
, &pdev
->dev
, GFP_KERNEL
,
6111 wl1271_error("request_firmware_nowait failed: %d", ret
);
6112 complete_all(&wl
->nvs_loading_complete
);
6117 EXPORT_SYMBOL_GPL(wlcore_probe
);
6119 int wlcore_remove(struct platform_device
*pdev
)
6121 struct wl1271
*wl
= platform_get_drvdata(pdev
);
6123 wait_for_completion(&wl
->nvs_loading_complete
);
6124 if (!wl
->initialized
)
6127 if (wl
->irq_wake_enabled
) {
6128 device_init_wakeup(wl
->dev
, 0);
6129 disable_irq_wake(wl
->irq
);
6131 wl1271_unregister_hw(wl
);
6132 free_irq(wl
->irq
, wl
);
6137 EXPORT_SYMBOL_GPL(wlcore_remove
);
6139 u32 wl12xx_debug_level
= DEBUG_NONE
;
6140 EXPORT_SYMBOL_GPL(wl12xx_debug_level
);
6141 module_param_named(debug_level
, wl12xx_debug_level
, uint
, S_IRUSR
| S_IWUSR
);
6142 MODULE_PARM_DESC(debug_level
, "wl12xx debugging level");
6144 module_param_named(fwlog
, fwlog_param
, charp
, 0);
6145 MODULE_PARM_DESC(fwlog
,
6146 "FW logger options: continuous, ondemand, dbgpins or disable");
6148 module_param(bug_on_recovery
, int, S_IRUSR
| S_IWUSR
);
6149 MODULE_PARM_DESC(bug_on_recovery
, "BUG() on fw recovery");
6151 module_param(no_recovery
, int, S_IRUSR
| S_IWUSR
);
6152 MODULE_PARM_DESC(no_recovery
, "Prevent HW recovery. FW will remain stuck.");
6154 MODULE_LICENSE("GPL");
6155 MODULE_AUTHOR("Luciano Coelho <coelho@ti.com>");
6156 MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
6157 MODULE_FIRMWARE(WL12XX_NVS_NAME
);