lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o insn-eval.o
lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
+ lib-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
lib-$(CONFIG_RETPOLINE) += retpoline.o
+OBJECT_FILES_NON_STANDARD_retpoline.o :=y
obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o
void free_tx_desc(struct adapter *adap, struct sge_txq *q,
unsigned int n, bool unmap);
void free_txq(struct adapter *adap, struct sge_txq *q);
+void cxgb4_reclaim_completed_tx(struct adapter *adap,
+ struct sge_txq *q, bool unmap);
+int cxgb4_map_skb(struct device *dev, const struct sk_buff *skb,
+ dma_addr_t *addr);
+void cxgb4_inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
+ void *pos);
+void cxgb4_write_sgl(const struct sk_buff *skb, struct sge_txq *q,
+ struct ulptx_sgl *sgl, u64 *end, unsigned int start,
+ const dma_addr_t *addr);
+void cxgb4_ring_tx_db(struct adapter *adap, struct sge_txq *q, int n);
+ int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf,
+ u16 vlan);
#endif /* __CXGB4_H__ */
return NETDEV_TX_BUSY;
}
- if (is_eth_imm(skb))
+ if (is_eth_imm(skb, chip_ver))
immediate = true;
+ if (skb->encapsulation && chip_ver > CHELSIO_T5)
+ tnl_type = cxgb_encap_offload_supported(skb);
+
if (!immediate &&
- unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) {
+ unlikely(cxgb4_map_skb(adap->pdev_dev, skb, addr) < 0)) {
q->mapping_err++;
if (ptp_enabled)
spin_unlock(&adap->ptp_lock);
return rc;
}
-static unsigned int smc_accept_poll(struct sock *parent)
+static __poll_t smc_accept_poll(struct sock *parent)
{
- struct smc_sock *isk;
- struct sock *sk;
-
- lock_sock(parent);
- list_for_each_entry(isk, &smc_sk(parent)->accept_q, accept_q) {
- sk = (struct sock *)isk;
+ struct smc_sock *isk = smc_sk(parent);
+ int mask = 0;
- if (sk->sk_state == SMC_ACTIVE) {
- release_sock(parent);
- return POLLIN | POLLRDNORM;
- }
- }
- release_sock(parent);
+ spin_lock(&isk->accept_q_lock);
+ if (!list_empty(&isk->accept_q))
+ mask = POLLIN | POLLRDNORM;
+ spin_unlock(&isk->accept_q_lock);
- return 0;
+ return mask;
}
-static unsigned int smc_poll(struct file *file, struct socket *sock,
+static __poll_t smc_poll(struct file *file, struct socket *sock,
poll_table *wait)
{
struct sock *sk = sock->sk;
}
/* receive the complete CLC message */
- vec.iov_base = buf;
- vec.iov_len = buflen;
memset(&msg, 0, sizeof(struct msghdr));
+ iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &vec, 1, buflen);
krflags = MSG_WAITALL;
smc->clcsock->sk->sk_rcvtimeo = CLC_WAIT_TIME;
- len = kernel_recvmsg(smc->clcsock, &msg, &vec, 1, datlen, krflags);
+ len = sock_recvmsg(smc->clcsock, &msg, krflags);
- if (len < datlen) {
+ if (len < datlen || !smc_clc_msg_hdr_valid(clcm)) {
smc->sk.sk_err = EPROTO;
reason_code = -EPROTO;
goto out;
{
struct sock *sk = sock->sk;
struct tipc_sock *tsk = tipc_sk(sk);
- struct tipc_group *grp = tsk->group;
- u32 revents = 0;
+ __poll_t revents = 0;
sock_poll_wait(file, sk_sleep(sk), wait);