X-Git-Url: https://git.proxmox.com/?p=ceph.git;a=blobdiff_plain;f=ceph%2Fsrc%2Fspdk%2Fdpdk%2Fdrivers%2Fnet%2Fqede%2Fqede_filter.c;h=86a2e0dc9a6110a5e82aad0f09e7292813451500;hp=7aeef700dad493246a62c8d5aab998b4bec01380;hb=f67539c23b11f3b8a2ecaeeddf7a403ae1c442a8;hpb=64a4c04e6850c6d9086e4c37f57c4eada541b05e diff --git a/ceph/src/spdk/dpdk/drivers/net/qede/qede_filter.c b/ceph/src/spdk/dpdk/drivers/net/qede/qede_filter.c index 7aeef700d..86a2e0dc9 100644 --- a/ceph/src/spdk/dpdk/drivers/net/qede/qede_filter.c +++ b/ceph/src/spdk/dpdk/drivers/net/qede/qede_filter.c @@ -221,7 +221,7 @@ qede_fdir_to_arfs_filter(struct rte_eth_dev *eth_dev, case RTE_ETH_FLOW_NONFRAG_IPV4_TCP: case RTE_ETH_FLOW_NONFRAG_IPV4_UDP: /* fill the common ip header */ - arfs->tuple.eth_proto = ETHER_TYPE_IPv4; + arfs->tuple.eth_proto = RTE_ETHER_TYPE_IPV4; arfs->tuple.dst_ipv4 = input->flow.ip4_flow.dst_ip; arfs->tuple.src_ipv4 = input->flow.ip4_flow.src_ip; arfs->tuple.ip_proto = next_proto[input->flow_type]; @@ -237,7 +237,7 @@ qede_fdir_to_arfs_filter(struct rte_eth_dev *eth_dev, break; case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: - arfs->tuple.eth_proto = ETHER_TYPE_IPv6; + arfs->tuple.eth_proto = RTE_ETHER_TYPE_IPV6; arfs->tuple.ip_proto = next_proto[input->flow_type]; rte_memcpy(arfs->tuple.dst_ipv6, &input->flow.ipv6_flow.dst_ip, @@ -272,6 +272,7 @@ qede_config_arfs_filter(struct rte_eth_dev *eth_dev, { struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct ecore_ntuple_filter_params params; char mz_name[RTE_MEMZONE_NAMESIZE] = {0}; struct qede_arfs_entry *tmp = NULL; const struct rte_memzone *mz; @@ -290,7 +291,7 @@ qede_config_arfs_filter(struct rte_eth_dev *eth_dev, /* soft_id could have been used as memzone string, but soft_id is * not currently used so it has no significance. */ - snprintf(mz_name, sizeof(mz_name) - 1, "%lx", + snprintf(mz_name, sizeof(mz_name), "%lx", (unsigned long)rte_get_timer_cycles()); mz = rte_memzone_reserve_aligned(mz_name, QEDE_MAX_FDIR_PKT_LEN, SOCKET_ID_ANY, 0, RTE_CACHE_LINE_SIZE); @@ -344,12 +345,18 @@ qede_config_arfs_filter(struct rte_eth_dev *eth_dev, ecore_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt, &qdev->arfs_info.arfs); } + + memset(¶ms, 0, sizeof(params)); + params.addr = (dma_addr_t)mz->iova; + params.length = pkt_len; + params.qid = arfs->rx_queue; + params.vport_id = 0; + params.b_is_add = add; + params.b_is_drop = arfs->is_drop; + /* configure filter with ECORE_SPQ_MODE_EBLOCK */ rc = ecore_configure_rfs_ntuple_filter(p_hwfn, NULL, - (dma_addr_t)mz->iova, - pkt_len, - arfs->rx_queue, - 0, add); + ¶ms); if (rc == ECORE_SUCCESS) { if (add) { arfs->pkt_len = pkt_len; @@ -431,7 +438,7 @@ qede_fdir_filter_add(struct rte_eth_dev *eth_dev, return -EINVAL; } - if (fdir->action.rx_queue >= QEDE_RSS_COUNT(qdev)) { + if (fdir->action.rx_queue >= QEDE_RSS_COUNT(eth_dev)) { DP_ERR(edev, "invalid queue number %u\n", fdir->action.rx_queue); return -EINVAL; @@ -457,57 +464,57 @@ qede_arfs_construct_pkt(struct rte_eth_dev *eth_dev, struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); uint16_t *ether_type; uint8_t *raw_pkt; - struct ipv4_hdr *ip; - struct ipv6_hdr *ip6; - struct udp_hdr *udp; - struct tcp_hdr *tcp; + struct rte_ipv4_hdr *ip; + struct rte_ipv6_hdr *ip6; + struct rte_udp_hdr *udp; + struct rte_tcp_hdr *tcp; uint16_t len; raw_pkt = (uint8_t *)buff; - len = 2 * sizeof(struct ether_addr); - raw_pkt += 2 * sizeof(struct ether_addr); + len = 2 * sizeof(struct rte_ether_addr); + raw_pkt += 2 * sizeof(struct rte_ether_addr); ether_type = (uint16_t *)raw_pkt; raw_pkt += sizeof(uint16_t); len += sizeof(uint16_t); *ether_type = rte_cpu_to_be_16(arfs->tuple.eth_proto); switch (arfs->tuple.eth_proto) { - case ETHER_TYPE_IPv4: - ip = (struct ipv4_hdr *)raw_pkt; + case RTE_ETHER_TYPE_IPV4: + ip = (struct rte_ipv4_hdr *)raw_pkt; ip->version_ihl = QEDE_FDIR_IP_DEFAULT_VERSION_IHL; - ip->total_length = sizeof(struct ipv4_hdr); + ip->total_length = sizeof(struct rte_ipv4_hdr); ip->next_proto_id = arfs->tuple.ip_proto; ip->time_to_live = QEDE_FDIR_IPV4_DEF_TTL; ip->dst_addr = arfs->tuple.dst_ipv4; ip->src_addr = arfs->tuple.src_ipv4; - len += sizeof(struct ipv4_hdr); + len += sizeof(struct rte_ipv4_hdr); params->ipv4 = true; raw_pkt = (uint8_t *)buff; /* UDP */ if (arfs->tuple.ip_proto == IPPROTO_UDP) { - udp = (struct udp_hdr *)(raw_pkt + len); + udp = (struct rte_udp_hdr *)(raw_pkt + len); udp->dst_port = arfs->tuple.dst_port; udp->src_port = arfs->tuple.src_port; - udp->dgram_len = sizeof(struct udp_hdr); - len += sizeof(struct udp_hdr); + udp->dgram_len = sizeof(struct rte_udp_hdr); + len += sizeof(struct rte_udp_hdr); /* adjust ip total_length */ - ip->total_length += sizeof(struct udp_hdr); + ip->total_length += sizeof(struct rte_udp_hdr); params->udp = true; } else { /* TCP */ - tcp = (struct tcp_hdr *)(raw_pkt + len); + tcp = (struct rte_tcp_hdr *)(raw_pkt + len); tcp->src_port = arfs->tuple.src_port; tcp->dst_port = arfs->tuple.dst_port; tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF; - len += sizeof(struct tcp_hdr); + len += sizeof(struct rte_tcp_hdr); /* adjust ip total_length */ - ip->total_length += sizeof(struct tcp_hdr); + ip->total_length += sizeof(struct rte_tcp_hdr); params->tcp = true; } break; - case ETHER_TYPE_IPv6: - ip6 = (struct ipv6_hdr *)raw_pkt; + case RTE_ETHER_TYPE_IPV6: + ip6 = (struct rte_ipv6_hdr *)raw_pkt; ip6->proto = arfs->tuple.ip_proto; ip6->vtc_flow = rte_cpu_to_be_32(QEDE_FDIR_IPV6_DEFAULT_VTC_FLOW); @@ -516,23 +523,23 @@ qede_arfs_construct_pkt(struct rte_eth_dev *eth_dev, IPV6_ADDR_LEN); rte_memcpy(&ip6->dst_addr, arfs->tuple.dst_ipv6, IPV6_ADDR_LEN); - len += sizeof(struct ipv6_hdr); + len += sizeof(struct rte_ipv6_hdr); params->ipv6 = true; raw_pkt = (uint8_t *)buff; /* UDP */ if (arfs->tuple.ip_proto == IPPROTO_UDP) { - udp = (struct udp_hdr *)(raw_pkt + len); + udp = (struct rte_udp_hdr *)(raw_pkt + len); udp->src_port = arfs->tuple.src_port; udp->dst_port = arfs->tuple.dst_port; - len += sizeof(struct udp_hdr); + len += sizeof(struct rte_udp_hdr); params->udp = true; } else { /* TCP */ - tcp = (struct tcp_hdr *)(raw_pkt + len); + tcp = (struct rte_tcp_hdr *)(raw_pkt + len); tcp->src_port = arfs->tuple.src_port; tcp->dst_port = arfs->tuple.dst_port; tcp->data_off = QEDE_FDIR_TCP_DEFAULT_DATAOFF; - len += sizeof(struct tcp_hdr); + len += sizeof(struct rte_tcp_hdr); params->tcp = true; } break; @@ -992,25 +999,25 @@ qede_set_ucast_tunn_cmn_param(struct ecore_filter_ucast *ucast, break; case ECORE_FILTER_MAC: memcpy(ucast->mac, conf->outer_mac.addr_bytes, - ETHER_ADDR_LEN); + RTE_ETHER_ADDR_LEN); break; case ECORE_FILTER_INNER_MAC: memcpy(ucast->mac, conf->inner_mac.addr_bytes, - ETHER_ADDR_LEN); + RTE_ETHER_ADDR_LEN); break; case ECORE_FILTER_MAC_VNI_PAIR: memcpy(ucast->mac, conf->outer_mac.addr_bytes, - ETHER_ADDR_LEN); + RTE_ETHER_ADDR_LEN); ucast->vni = conf->tenant_id; break; case ECORE_FILTER_INNER_MAC_VNI_PAIR: memcpy(ucast->mac, conf->inner_mac.addr_bytes, - ETHER_ADDR_LEN); + RTE_ETHER_ADDR_LEN); ucast->vni = conf->tenant_id; break; case ECORE_FILTER_INNER_PAIR: memcpy(ucast->mac, conf->inner_mac.addr_bytes, - ETHER_ADDR_LEN); + RTE_ETHER_ADDR_LEN); ucast->vlan = conf->inner_vlan; break; default: @@ -1023,7 +1030,7 @@ qede_set_ucast_tunn_cmn_param(struct ecore_filter_ucast *ucast, static int _qede_tunn_filter_config(struct rte_eth_dev *eth_dev, const struct rte_eth_tunnel_filter_conf *conf, - __attribute__((unused)) enum rte_filter_op filter_op, + __rte_unused enum rte_filter_op filter_op, enum ecore_tunn_clss *clss, bool add) { @@ -1161,7 +1168,7 @@ qede_tunn_filter_config(struct rte_eth_dev *eth_dev, } static int -qede_flow_validate_attr(__attribute__((unused))struct rte_eth_dev *dev, +qede_flow_validate_attr(__rte_unused struct rte_eth_dev *dev, const struct rte_flow_attr *attr, struct rte_flow_error *error) { @@ -1211,7 +1218,7 @@ qede_flow_validate_attr(__attribute__((unused))struct rte_eth_dev *dev, } static int -qede_flow_parse_pattern(__attribute__((unused))struct rte_eth_dev *dev, +qede_flow_parse_pattern(__rte_unused struct rte_eth_dev *dev, const struct rte_flow_item pattern[], struct rte_flow_error *error, struct rte_flow *flow) @@ -1266,7 +1273,8 @@ qede_flow_parse_pattern(__attribute__((unused))struct rte_eth_dev *dev, spec = pattern->spec; flow->entry.tuple.src_ipv4 = spec->hdr.src_addr; flow->entry.tuple.dst_ipv4 = spec->hdr.dst_addr; - flow->entry.tuple.eth_proto = ETHER_TYPE_IPv4; + flow->entry.tuple.eth_proto = + RTE_ETHER_TYPE_IPV4; } break; @@ -1283,7 +1291,8 @@ qede_flow_parse_pattern(__attribute__((unused))struct rte_eth_dev *dev, rte_memcpy(flow->entry.tuple.dst_ipv6, spec->hdr.dst_addr, IPV6_ADDR_LEN); - flow->entry.tuple.eth_proto = ETHER_TYPE_IPv6; + flow->entry.tuple.eth_proto = + RTE_ETHER_TYPE_IPV6; } break; @@ -1343,7 +1352,6 @@ qede_flow_parse_actions(struct rte_eth_dev *dev, struct rte_flow_error *error, struct rte_flow *flow) { - struct qede_dev *qdev = QEDE_INIT_QDEV(dev); const struct rte_flow_action_queue *queue; if (actions == NULL) { @@ -1358,7 +1366,7 @@ qede_flow_parse_actions(struct rte_eth_dev *dev, case RTE_FLOW_ACTION_TYPE_QUEUE: queue = actions->conf; - if (queue->index >= QEDE_RSS_COUNT(qdev)) { + if (queue->index >= QEDE_RSS_COUNT(dev)) { rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, actions, @@ -1370,12 +1378,15 @@ qede_flow_parse_actions(struct rte_eth_dev *dev, flow->entry.rx_queue = queue->index; break; - + case RTE_FLOW_ACTION_TYPE_DROP: + if (flow) + flow->entry.is_drop = true; + break; default: rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, actions, - "Action is not supported - only ACTION_TYPE_QUEUE supported"); + "Action is not supported - only ACTION_TYPE_QUEUE and ACTION_TYPE_DROP supported"); return -rte_errno; } } @@ -1475,10 +1486,32 @@ qede_flow_destroy(struct rte_eth_dev *eth_dev, return rc; } +static int +qede_flow_flush(struct rte_eth_dev *eth_dev, + struct rte_flow_error *error) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct qede_arfs_entry *tmp = NULL; + int rc = 0; + + while (!SLIST_EMPTY(&qdev->arfs_info.arfs_list_head)) { + tmp = SLIST_FIRST(&qdev->arfs_info.arfs_list_head); + + rc = qede_config_arfs_filter(eth_dev, tmp, false); + if (rc < 0) + rte_flow_error_set(error, rc, + RTE_FLOW_ERROR_TYPE_HANDLE, NULL, + "Failed to flush flow filter"); + } + + return rc; +} + const struct rte_flow_ops qede_flow_ops = { .validate = qede_flow_validate, .create = qede_flow_create, .destroy = qede_flow_destroy, + .flush = qede_flow_flush, }; int qede_dev_filter_ctrl(struct rte_eth_dev *eth_dev,