2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 static void ath10k_report_offchan_tx(struct ath10k
*ar
, struct sk_buff
*skb
)
26 struct ieee80211_tx_info
*info
= IEEE80211_SKB_CB(skb
);
28 if (likely(!(info
->flags
& IEEE80211_TX_CTL_TX_OFFCHAN
)))
31 if (ath10k_mac_tx_frm_has_freq(ar
))
34 /* If the original wait_for_completion() timed out before
35 * {data,mgmt}_tx_completed() was called then we could complete
36 * offchan_tx_completed for a different skb. Prevent this by using
38 spin_lock_bh(&ar
->data_lock
);
39 if (ar
->offchan_tx_skb
!= skb
) {
40 ath10k_warn(ar
, "completed old offchannel frame\n");
44 complete(&ar
->offchan_tx_completed
);
45 ar
->offchan_tx_skb
= NULL
; /* just for sanity */
47 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "completed offchannel skb %p\n", skb
);
49 spin_unlock_bh(&ar
->data_lock
);
52 void ath10k_txrx_tx_unref(struct ath10k_htt
*htt
,
53 const struct htt_tx_done
*tx_done
)
55 struct ath10k
*ar
= htt
->ar
;
56 struct device
*dev
= ar
->dev
;
57 struct ieee80211_tx_info
*info
;
58 struct ath10k_skb_cb
*skb_cb
;
60 bool limit_mgmt_desc
= false;
62 ath10k_dbg(ar
, ATH10K_DBG_HTT
,
63 "htt tx completion msdu_id %u discard %d no_ack %d success %d\n",
64 tx_done
->msdu_id
, !!tx_done
->discard
,
65 !!tx_done
->no_ack
, !!tx_done
->success
);
67 if (tx_done
->msdu_id
>= htt
->max_num_pending_tx
) {
68 ath10k_warn(ar
, "warning: msdu_id %d too big, ignoring\n",
73 spin_lock_bh(&htt
->tx_lock
);
74 msdu
= idr_find(&htt
->pending_tx
, tx_done
->msdu_id
);
76 ath10k_warn(ar
, "received tx completion for invalid msdu_id: %d\n",
78 spin_unlock_bh(&htt
->tx_lock
);
82 skb_cb
= ATH10K_SKB_CB(msdu
);
84 if (unlikely(skb_cb
->flags
& ATH10K_SKB_F_MGMT
) &&
85 ar
->hw_params
.max_probe_resp_desc_thres
)
86 limit_mgmt_desc
= true;
88 ath10k_htt_tx_free_msdu_id(htt
, tx_done
->msdu_id
);
89 __ath10k_htt_tx_dec_pending(htt
, limit_mgmt_desc
);
90 if (htt
->num_pending_tx
== 0)
91 wake_up(&htt
->empty_tx_wq
);
92 spin_unlock_bh(&htt
->tx_lock
);
94 dma_unmap_single(dev
, skb_cb
->paddr
, msdu
->len
, DMA_TO_DEVICE
);
96 ath10k_report_offchan_tx(htt
->ar
, msdu
);
98 info
= IEEE80211_SKB_CB(msdu
);
99 memset(&info
->status
, 0, sizeof(info
->status
));
100 trace_ath10k_txrx_tx_unref(ar
, tx_done
->msdu_id
);
102 if (tx_done
->discard
) {
103 ieee80211_free_txskb(htt
->ar
->hw
, msdu
);
107 if (!(info
->flags
& IEEE80211_TX_CTL_NO_ACK
))
108 info
->flags
|= IEEE80211_TX_STAT_ACK
;
111 info
->flags
&= ~IEEE80211_TX_STAT_ACK
;
113 if (tx_done
->success
&& (info
->flags
& IEEE80211_TX_CTL_NO_ACK
))
114 info
->flags
|= IEEE80211_TX_STAT_NOACK_TRANSMITTED
;
116 ieee80211_tx_status(htt
->ar
->hw
, msdu
);
117 /* we do not own the msdu anymore */
120 struct ath10k_peer
*ath10k_peer_find(struct ath10k
*ar
, int vdev_id
,
123 struct ath10k_peer
*peer
;
125 lockdep_assert_held(&ar
->data_lock
);
127 list_for_each_entry(peer
, &ar
->peers
, list
) {
128 if (peer
->vdev_id
!= vdev_id
)
130 if (memcmp(peer
->addr
, addr
, ETH_ALEN
))
139 struct ath10k_peer
*ath10k_peer_find_by_id(struct ath10k
*ar
, int peer_id
)
141 struct ath10k_peer
*peer
;
143 lockdep_assert_held(&ar
->data_lock
);
145 list_for_each_entry(peer
, &ar
->peers
, list
)
146 if (test_bit(peer_id
, peer
->peer_ids
))
152 static int ath10k_wait_for_peer_common(struct ath10k
*ar
, int vdev_id
,
153 const u8
*addr
, bool expect_mapped
)
157 time_left
= wait_event_timeout(ar
->peer_mapping_wq
, ({
160 spin_lock_bh(&ar
->data_lock
);
161 mapped
= !!ath10k_peer_find(ar
, vdev_id
, addr
);
162 spin_unlock_bh(&ar
->data_lock
);
164 (mapped
== expect_mapped
||
165 test_bit(ATH10K_FLAG_CRASH_FLUSH
, &ar
->dev_flags
));
174 int ath10k_wait_for_peer_created(struct ath10k
*ar
, int vdev_id
, const u8
*addr
)
176 return ath10k_wait_for_peer_common(ar
, vdev_id
, addr
, true);
179 int ath10k_wait_for_peer_deleted(struct ath10k
*ar
, int vdev_id
, const u8
*addr
)
181 return ath10k_wait_for_peer_common(ar
, vdev_id
, addr
, false);
184 void ath10k_peer_map_event(struct ath10k_htt
*htt
,
185 struct htt_peer_map_event
*ev
)
187 struct ath10k
*ar
= htt
->ar
;
188 struct ath10k_peer
*peer
;
190 spin_lock_bh(&ar
->data_lock
);
191 peer
= ath10k_peer_find(ar
, ev
->vdev_id
, ev
->addr
);
193 peer
= kzalloc(sizeof(*peer
), GFP_ATOMIC
);
197 peer
->vdev_id
= ev
->vdev_id
;
198 ether_addr_copy(peer
->addr
, ev
->addr
);
199 list_add(&peer
->list
, &ar
->peers
);
200 wake_up(&ar
->peer_mapping_wq
);
203 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt peer map vdev %d peer %pM id %d\n",
204 ev
->vdev_id
, ev
->addr
, ev
->peer_id
);
206 set_bit(ev
->peer_id
, peer
->peer_ids
);
208 spin_unlock_bh(&ar
->data_lock
);
211 void ath10k_peer_unmap_event(struct ath10k_htt
*htt
,
212 struct htt_peer_unmap_event
*ev
)
214 struct ath10k
*ar
= htt
->ar
;
215 struct ath10k_peer
*peer
;
217 spin_lock_bh(&ar
->data_lock
);
218 peer
= ath10k_peer_find_by_id(ar
, ev
->peer_id
);
220 ath10k_warn(ar
, "peer-unmap-event: unknown peer id %d\n",
225 ath10k_dbg(ar
, ATH10K_DBG_HTT
, "htt peer unmap vdev %d peer %pM id %d\n",
226 peer
->vdev_id
, peer
->addr
, ev
->peer_id
);
228 clear_bit(ev
->peer_id
, peer
->peer_ids
);
230 if (bitmap_empty(peer
->peer_ids
, ATH10K_MAX_NUM_PEER_IDS
)) {
231 list_del(&peer
->list
);
233 wake_up(&ar
->peer_mapping_wq
);
237 spin_unlock_bh(&ar
->data_lock
);