Andrew Murray <amurray@thegoodpenguin.co.uk> <andrew.murray@arm.com>
Andrew Vasquez <andrew.vasquez@qlogic.com>
Andrey Ryabinin <ryabinin.a.a@gmail.com> <a.ryabinin@samsung.com>
+Andrey Ryabinin <ryabinin.a.a@gmail.com> <aryabinin@virtuozzo.com>
Andy Adamson <andros@citi.umich.edu>
Antoine Tenart <atenart@kernel.org> <antoine.tenart@bootlin.com>
Antoine Tenart <atenart@kernel.org> <antoine.tenart@free-electrons.com>
- ``kasan=off`` or ``=on`` controls whether KASAN is enabled (default: ``on``).
- ``kasan.stacktrace=off`` or ``=on`` disables or enables alloc and free stack
- traces collection (default: ``on`` for ``CONFIG_DEBUG_KERNEL=y``, otherwise
- ``off``).
+ traces collection (default: ``on``).
- ``kasan.fault=report`` or ``=panic`` controls whether to only print a KASAN
report or also panic the kernel (default: ``report``).
F: drivers/hwmon/k8temp.c
KASAN
-M: Andrey Ryabinin <aryabinin@virtuozzo.com>
+M: Andrey Ryabinin <ryabinin.a.a@gmail.com>
R: Alexander Potapenko <glider@google.com>
R: Dmitry Vyukov <dvyukov@google.com>
L: kasan-dev@googlegroups.com
union acpi_operand_object **return_object_ptr)
{
union acpi_operand_object *return_object = *return_object_ptr;
- char *dest;
+ union acpi_operand_object *new_string;
char *source;
+ char *dest;
ACPI_FUNCTION_NAME(ns_repair_HID);
return_ACPI_STATUS(AE_OK);
}
+ /* It is simplest to always create a new string object */
+
+ new_string = acpi_ut_create_string_object(return_object->string.length);
+ if (!new_string) {
+ return_ACPI_STATUS(AE_NO_MEMORY);
+ }
+
/*
* Remove a leading asterisk if present. For some unknown reason, there
* are many machines in the field that contains IDs like this.
source = return_object->string.pointer;
if (*source == '*') {
source++;
- return_object->string.length--;
+ new_string->string.length--;
ACPI_DEBUG_PRINT((ACPI_DB_REPAIR,
"%s: Removed invalid leading asterisk\n",
* "NNNN####" where N is an uppercase letter or decimal digit, and
* # is a hex digit.
*/
- for (dest = return_object->string.pointer; *source; dest++, source++) {
+ for (dest = new_string->string.pointer; *source; dest++, source++) {
*dest = (char)toupper((int)*source);
}
- return_object->string.pointer[return_object->string.length] = 0;
+ acpi_ut_remove_reference(return_object);
+ *return_object_ptr = new_string;
return_ACPI_STATUS(AE_OK);
}
"%s called while %d clients hold a reference\n",
__func__, chan->client_count);
mutex_lock(&dma_list_mutex);
- list_del(&chan->device_node);
device->chancnt--;
chan->dev->chan = NULL;
mutex_unlock(&dma_list_mutex);
dev_vdbg(chan2dev(chan), "%s\n", __func__);
- pm_runtime_get_sync(dw->dma.dev);
-
/* ASSERT: channel is idle */
if (dma_readl(dw, CH_EN) & dwc->mask) {
- pm_runtime_put_sync_suspend(dw->dma.dev);
dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
return -EIO;
}
* We need controller-specific data to set up slave transfers.
*/
if (chan->private && !dw_dma_filter(chan, chan->private)) {
- pm_runtime_put_sync_suspend(dw->dma.dev);
dev_warn(chan2dev(chan), "Wrong controller-specific data\n");
return -EINVAL;
}
if (!dw->in_use)
do_dw_dma_off(dw);
- pm_runtime_put_sync_suspend(dw->dma.dev);
-
dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
}
return false;
}
+static inline bool idxd_device_is_halted(struct idxd_device *idxd)
+{
+ union gensts_reg gensts;
+
+ gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET);
+
+ return (gensts.state == IDXD_DEVICE_STATE_HALT);
+}
+
/*
* This is function is only used for reset during probe and will
* poll for completion. Once the device is setup with interrupts,
* all commands will be done via interrupt completion.
*/
-void idxd_device_init_reset(struct idxd_device *idxd)
+int idxd_device_init_reset(struct idxd_device *idxd)
{
struct device *dev = &idxd->pdev->dev;
union idxd_command_reg cmd;
unsigned long flags;
+ if (idxd_device_is_halted(idxd)) {
+ dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
+ return -ENXIO;
+ }
+
memset(&cmd, 0, sizeof(cmd));
cmd.cmd = IDXD_CMD_RESET_DEVICE;
dev_dbg(dev, "%s: sending reset for init.\n", __func__);
IDXD_CMDSTS_ACTIVE)
cpu_relax();
spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ return 0;
}
static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand,
DECLARE_COMPLETION_ONSTACK(done);
unsigned long flags;
+ if (idxd_device_is_halted(idxd)) {
+ dev_warn(&idxd->pdev->dev, "Device is HALTED!\n");
+ *status = IDXD_CMDSTS_HW_ERR;
+ return;
+ }
+
memset(&cmd, 0, sizeof(cmd));
cmd.cmd = cmd_code;
cmd.operand = operand;
void idxd_unregister_dma_channel(struct idxd_wq *wq)
{
- dma_async_device_channel_unregister(&wq->idxd->dma_dev, &wq->dma_chan);
+ struct dma_chan *chan = &wq->dma_chan;
+
+ dma_async_device_channel_unregister(&wq->idxd->dma_dev, chan);
+ list_del(&chan->device_node);
}
void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id);
/* device control */
-void idxd_device_init_reset(struct idxd_device *idxd);
+int idxd_device_init_reset(struct idxd_device *idxd);
int idxd_device_enable(struct idxd_device *idxd);
int idxd_device_disable(struct idxd_device *idxd);
void idxd_device_reset(struct idxd_device *idxd);
int rc;
dev_dbg(dev, "%s entered and resetting device\n", __func__);
- idxd_device_init_reset(idxd);
+ rc = idxd_device_init_reset(idxd);
+ if (rc < 0)
+ return rc;
+
dev_dbg(dev, "IDXD reset complete\n");
if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM)) {
return IRQ_WAKE_THREAD;
}
-irqreturn_t idxd_misc_thread(int vec, void *data)
+static int process_misc_interrupts(struct idxd_device *idxd, u32 cause)
{
- struct idxd_irq_entry *irq_entry = data;
- struct idxd_device *idxd = irq_entry->idxd;
struct device *dev = &idxd->pdev->dev;
union gensts_reg gensts;
- u32 cause, val = 0;
+ u32 val = 0;
int i;
bool err = false;
- cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
- iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
-
if (cause & IDXD_INTC_ERR) {
spin_lock_bh(&idxd->dev_lock);
for (i = 0; i < 4; i++)
val);
if (!err)
- goto out;
+ return 0;
/*
* This case should rarely happen and typically is due to software
gensts.reset_type == IDXD_DEVICE_RESET_FLR ?
"FLR" : "system reset");
spin_unlock_bh(&idxd->dev_lock);
+ return -ENXIO;
}
}
- out:
+ return 0;
+}
+
+irqreturn_t idxd_misc_thread(int vec, void *data)
+{
+ struct idxd_irq_entry *irq_entry = data;
+ struct idxd_device *idxd = irq_entry->idxd;
+ int rc;
+ u32 cause;
+
+ cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
+ if (cause)
+ iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
+
+ while (cause) {
+ rc = process_misc_interrupts(idxd, cause);
+ if (rc < 0)
+ break;
+ cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET);
+ if (cause)
+ iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET);
+ }
+
idxd_unmask_msix_vector(idxd, irq_entry->id);
return IRQ_HANDLED;
}
-static bool process_fault(struct idxd_desc *desc, u64 fault_addr)
+static inline bool match_fault(struct idxd_desc *desc, u64 fault_addr)
{
/*
* Completion address can be bad as well. Check fault address match for descriptor
* and completion address.
*/
- if ((u64)desc->hw == fault_addr ||
- (u64)desc->completion == fault_addr) {
- idxd_dma_complete_txd(desc, IDXD_COMPLETE_DEV_FAIL);
+ if ((u64)desc->hw == fault_addr || (u64)desc->completion == fault_addr) {
+ struct idxd_device *idxd = desc->wq->idxd;
+ struct device *dev = &idxd->pdev->dev;
+
+ dev_warn(dev, "desc with fault address: %#llx\n", fault_addr);
return true;
}
return false;
}
-static bool complete_desc(struct idxd_desc *desc)
+static inline void complete_desc(struct idxd_desc *desc, enum idxd_complete_type reason)
{
- if (desc->completion->status) {
- idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL);
- return true;
- }
-
- return false;
+ idxd_dma_complete_txd(desc, reason);
+ idxd_free_desc(desc->wq, desc);
}
static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
struct idxd_desc *desc, *t;
struct llist_node *head;
int queued = 0;
- bool completed = false;
unsigned long flags;
+ enum idxd_complete_type reason;
*processed = 0;
head = llist_del_all(&irq_entry->pending_llist);
if (!head)
goto out;
- llist_for_each_entry_safe(desc, t, head, llnode) {
- if (wtype == IRQ_WORK_NORMAL)
- completed = complete_desc(desc);
- else if (wtype == IRQ_WORK_PROCESS_FAULT)
- completed = process_fault(desc, data);
+ if (wtype == IRQ_WORK_NORMAL)
+ reason = IDXD_COMPLETE_NORMAL;
+ else
+ reason = IDXD_COMPLETE_DEV_FAIL;
- if (completed) {
- idxd_free_desc(desc->wq, desc);
+ llist_for_each_entry_safe(desc, t, head, llnode) {
+ if (desc->completion->status) {
+ if ((desc->completion->status & DSA_COMP_STATUS_MASK) != DSA_COMP_SUCCESS)
+ match_fault(desc, data);
+ complete_desc(desc, reason);
(*processed)++;
- if (wtype == IRQ_WORK_PROCESS_FAULT)
- break;
} else {
spin_lock_irqsave(&irq_entry->list_lock, flags);
list_add_tail(&desc->list,
enum irq_work_type wtype,
int *processed, u64 data)
{
- struct list_head *node, *next;
int queued = 0;
- bool completed = false;
unsigned long flags;
+ LIST_HEAD(flist);
+ struct idxd_desc *desc, *n;
+ enum idxd_complete_type reason;
*processed = 0;
- spin_lock_irqsave(&irq_entry->list_lock, flags);
- if (list_empty(&irq_entry->work_list))
- goto out;
-
- list_for_each_safe(node, next, &irq_entry->work_list) {
- struct idxd_desc *desc =
- container_of(node, struct idxd_desc, list);
+ if (wtype == IRQ_WORK_NORMAL)
+ reason = IDXD_COMPLETE_NORMAL;
+ else
+ reason = IDXD_COMPLETE_DEV_FAIL;
+ /*
+ * This lock protects list corruption from access of list outside of the irq handler
+ * thread.
+ */
+ spin_lock_irqsave(&irq_entry->list_lock, flags);
+ if (list_empty(&irq_entry->work_list)) {
spin_unlock_irqrestore(&irq_entry->list_lock, flags);
- if (wtype == IRQ_WORK_NORMAL)
- completed = complete_desc(desc);
- else if (wtype == IRQ_WORK_PROCESS_FAULT)
- completed = process_fault(desc, data);
+ return 0;
+ }
- if (completed) {
- spin_lock_irqsave(&irq_entry->list_lock, flags);
+ list_for_each_entry_safe(desc, n, &irq_entry->work_list, list) {
+ if (desc->completion->status) {
list_del(&desc->list);
- spin_unlock_irqrestore(&irq_entry->list_lock, flags);
- idxd_free_desc(desc->wq, desc);
(*processed)++;
- if (wtype == IRQ_WORK_PROCESS_FAULT)
- return queued;
+ list_add_tail(&desc->list, &flist);
} else {
queued++;
}
- spin_lock_irqsave(&irq_entry->list_lock, flags);
}
- out:
spin_unlock_irqrestore(&irq_entry->list_lock, flags);
+
+ list_for_each_entry(desc, &flist, list) {
+ if ((desc->completion->status & DSA_COMP_STATUS_MASK) != DSA_COMP_SUCCESS)
+ match_fault(desc, data);
+ complete_desc(desc, reason);
+ }
+
return queued;
}
dev_err(ud->ddev.dev,
"Descriptor pool allocation failed\n");
uc->use_dma_pool = false;
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_res_free;
}
uc->use_dma_pool = true;
return 0;
}
-static const struct __maybe_unused of_device_id i3c_hci_of_match[] = {
+static const __maybe_unused struct of_device_id i3c_hci_of_match[] = {
{ .compatible = "mipi-i3c-hci", },
{},
};
{
struct ocelot *ocelot = ds->priv;
struct ocelot_port *ocelot_port = ocelot->ports[port];
+ int err;
+
+ ocelot_port_rmwl(ocelot_port, 0, DEV_MAC_ENA_CFG_RX_ENA,
+ DEV_MAC_ENA_CFG);
- ocelot_port_writel(ocelot_port, 0, DEV_MAC_ENA_CFG);
ocelot_fields_write(ocelot, port, QSYS_SWITCH_PORT_MODE_PORT_ENA, 0);
+
+ err = ocelot_port_flush(ocelot, port);
+ if (err)
+ dev_err(ocelot->dev, "failed to flush port %d: %d\n",
+ port, err);
+
+ /* Put the port in reset. */
+ ocelot_port_writel(ocelot_port,
+ DEV_CLOCK_CFG_MAC_TX_RST |
+ DEV_CLOCK_CFG_MAC_RX_RST |
+ DEV_CLOCK_CFG_LINK_SPEED(OCELOT_SPEED_1000),
+ DEV_CLOCK_CFG);
}
static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port,
if (unlikely(!xdpf)) {
trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
xdp_stat = &rx_ring->rx_stats.xdp_aborted;
+ verdict = XDP_ABORTED;
break;
}
xdp_stat = &rx_ring->rx_stats.xdp_redirect;
break;
}
- fallthrough;
+ trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
+ xdp_stat = &rx_ring->rx_stats.xdp_aborted;
+ verdict = XDP_ABORTED;
+ break;
case XDP_ABORTED:
trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
xdp_stat = &rx_ring->rx_stats.xdp_aborted;
CH_PCI_ID_TABLE_FENTRY(0x6089), /* Custom T62100-KR */
CH_PCI_ID_TABLE_FENTRY(0x608a), /* Custom T62100-CR */
CH_PCI_ID_TABLE_FENTRY(0x608b), /* Custom T6225-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x6092), /* Custom T62100-CR-LOM */
CH_PCI_DEVICE_ID_TABLE_DEFINE_END;
#endif /* __T4_PCI_ID_TBL_H__ */
struct xdp_frame **init_xdpf)
{
struct xdp_frame *new_xdpf, *xdpf = *init_xdpf;
- void *new_buff;
+ void *new_buff, *aligned_data;
struct page *p;
+ u32 data_shift;
+ int headroom;
/* Check the data alignment and make sure the headroom is large
* enough to store the xdpf backpointer. Use an aligned headroom
* byte frame headroom. If the XDP program uses all of it, copy the
* data to a new buffer and make room for storing the backpointer.
*/
- if (PTR_IS_ALIGNED(xdpf->data, DPAA_A050385_ALIGN) &&
+ if (PTR_IS_ALIGNED(xdpf->data, DPAA_FD_DATA_ALIGNMENT) &&
xdpf->headroom >= priv->tx_headroom) {
xdpf->headroom = priv->tx_headroom;
return 0;
}
+ /* Try to move the data inside the buffer just enough to align it and
+ * store the xdpf backpointer. If the available headroom isn't large
+ * enough, resort to allocating a new buffer and copying the data.
+ */
+ aligned_data = PTR_ALIGN_DOWN(xdpf->data, DPAA_FD_DATA_ALIGNMENT);
+ data_shift = xdpf->data - aligned_data;
+
+ /* The XDP frame's headroom needs to be large enough to accommodate
+ * shifting the data as well as storing the xdpf backpointer.
+ */
+ if (xdpf->headroom >= data_shift + priv->tx_headroom) {
+ memmove(aligned_data, xdpf->data, xdpf->len);
+ xdpf->data = aligned_data;
+ xdpf->headroom = priv->tx_headroom;
+ return 0;
+ }
+
+ /* The new xdp_frame is stored in the new buffer. Reserve enough space
+ * in the headroom for storing it along with the driver's private
+ * info. The headroom needs to be aligned to DPAA_FD_DATA_ALIGNMENT to
+ * guarantee the data's alignment in the buffer.
+ */
+ headroom = ALIGN(sizeof(*new_xdpf) + priv->tx_headroom,
+ DPAA_FD_DATA_ALIGNMENT);
+
+ /* Assure the extended headroom and data don't overflow the buffer,
+ * while maintaining the mandatory tailroom.
+ */
+ if (headroom + xdpf->len > DPAA_BP_RAW_SIZE -
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+ return -ENOMEM;
+
p = dev_alloc_pages(0);
if (unlikely(!p))
return -ENOMEM;
/* Copy the data to the new buffer at a properly aligned offset */
new_buff = page_address(p);
- memcpy(new_buff + priv->tx_headroom, xdpf->data, xdpf->len);
+ memcpy(new_buff + headroom, xdpf->data, xdpf->len);
/* Create an XDP frame around the new buffer in a similar fashion
* to xdp_convert_buff_to_frame.
*/
new_xdpf = new_buff;
- new_xdpf->data = new_buff + priv->tx_headroom;
+ new_xdpf->data = new_buff + headroom;
new_xdpf->len = xdpf->len;
new_xdpf->headroom = priv->tx_headroom;
new_xdpf->frame_sz = DPAA_BP_RAW_SIZE;
#define ENETC_CBS_BW_MASK GENMASK(6, 0)
#define ENETC_PTCCBSR1(n) (0x1114 + (n) * 8) /* n = 0 to 7*/
#define ENETC_RSSHASH_KEY_SIZE 40
+#define ENETC_PRSSCAPR 0x1404
+#define ENETC_PRSSCAPR_GET_NUM_RSS(val) (BIT((val) & 0xf) * 32)
#define ENETC_PRSSK(n) (0x1410 + (n) * 4) /* n = [0..9] */
#define ENETC_PSIVLANFMR 0x1700
#define ENETC_PSIVLANFMR_VS BIT(0)
phylink_destroy(priv->phylink);
}
+/* Initialize the entire shared memory for the flow steering entries
+ * of this port (PF + VFs)
+ */
+static int enetc_init_port_rfs_memory(struct enetc_si *si)
+{
+ struct enetc_cmd_rfse rfse = {0};
+ struct enetc_hw *hw = &si->hw;
+ int num_rfs, i, err = 0;
+ u32 val;
+
+ val = enetc_port_rd(hw, ENETC_PRFSCAPR);
+ num_rfs = ENETC_PRFSCAPR_GET_NUM_RFS(val);
+
+ for (i = 0; i < num_rfs; i++) {
+ err = enetc_set_fs_entry(si, &rfse, i);
+ if (err)
+ break;
+ }
+
+ return err;
+}
+
+static int enetc_init_port_rss_memory(struct enetc_si *si)
+{
+ struct enetc_hw *hw = &si->hw;
+ int num_rss, err;
+ int *rss_table;
+ u32 val;
+
+ val = enetc_port_rd(hw, ENETC_PRSSCAPR);
+ num_rss = ENETC_PRSSCAPR_GET_NUM_RSS(val);
+ if (!num_rss)
+ return 0;
+
+ rss_table = kcalloc(num_rss, sizeof(*rss_table), GFP_KERNEL);
+ if (!rss_table)
+ return -ENOMEM;
+
+ err = enetc_set_rss_table(si, rss_table, num_rss);
+
+ kfree(rss_table);
+
+ return err;
+}
+
static int enetc_pf_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
goto err_alloc_si_res;
}
+ err = enetc_init_port_rfs_memory(si);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize RFS memory\n");
+ goto err_init_port_rfs;
+ }
+
+ err = enetc_init_port_rss_memory(si);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to initialize RSS memory\n");
+ goto err_init_port_rss;
+ }
+
err = enetc_alloc_msix(priv);
if (err) {
dev_err(&pdev->dev, "MSIX alloc failed\n");
enetc_mdiobus_destroy(pf);
err_mdiobus_create:
enetc_free_msix(priv);
+err_init_port_rss:
+err_init_port_rfs:
err_alloc_msix:
enetc_free_si_resources(priv);
err_alloc_si_res:
void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
{
+ struct hnae3_handle *handle = &vport->nic;
struct hclge_dev *hdev = vport->back;
int reset_try_times = 0;
int reset_status;
u16 queue_gid;
int ret;
+ if (queue_id >= handle->kinfo.num_tqps) {
+ dev_warn(&hdev->pdev->dev, "Invalid vf queue id(%u)\n",
+ queue_id);
+ return;
+ }
+
queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
struct hclge_vport *vport)
{
struct hnae3_ring_chain_node *cur_chain, *new_chain;
+ struct hclge_dev *hdev = vport->back;
int ring_num;
- int i = 0;
+ int i;
ring_num = req->msg.ring_num;
if (ring_num > HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM)
return -ENOMEM;
+ for (i = 0; i < ring_num; i++) {
+ if (req->msg.param[i].tqp_index >= vport->nic.kinfo.rss_size) {
+ dev_err(&hdev->pdev->dev, "tqp index(%u) is out of range(0-%u)\n",
+ req->msg.param[i].tqp_index,
+ vport->nic.kinfo.rss_size - 1);
+ return -EINVAL;
+ }
+ }
+
hnae3_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B,
- req->msg.param[i].ring_type);
+ req->msg.param[0].ring_type);
ring_chain->tqp_index =
hclge_get_queue_id(vport->nic.kinfo.tqp
- [req->msg.param[i].tqp_index]);
+ [req->msg.param[0].tqp_index]);
hnae3_set_field(ring_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
- HNAE3_RING_GL_IDX_S, req->msg.param[i].int_gl_index);
+ HNAE3_RING_GL_IDX_S, req->msg.param[0].int_gl_index);
cur_chain = ring_chain;
index = mbx_req->msg.data[0];
+ /* Check the query index of rss_hash_key from VF, make sure no
+ * more than the size of rss_hash_key.
+ */
+ if (((index + 1) * HCLGE_RSS_MBX_RESP_LEN) >
+ sizeof(vport[0].rss_hash_key)) {
+ dev_warn(&hdev->pdev->dev,
+ "failed to get the rss hash key, the index(%u) invalid !\n",
+ index);
+ return;
+ }
+
memcpy(resp_msg->data,
&hdev->vport[0].rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN],
HCLGE_RSS_MBX_RESP_LEN);
complete(&adapter->init_done);
adapter->init_done_rc = -EIO;
}
- ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
+ rc = ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
+ if (rc && rc != -EBUSY) {
+ /* We were unable to schedule the failover
+ * reset either because the adapter was still
+ * probing (eg: during kexec) or we could not
+ * allocate memory. Clear the failover_pending
+ * flag since no one else will. We ignore
+ * EBUSY because it means either FAILOVER reset
+ * is already scheduled or the adapter is
+ * being removed.
+ */
+ netdev_err(netdev,
+ "Error %ld scheduling failover reset\n",
+ rc);
+ adapter->failover_pending = false;
+ }
break;
case IBMVNIC_CRQ_INIT_COMPLETE:
dev_info(dev, "Partner initialization complete\n");
}
}
+static u32 ocelot_read_eq_avail(struct ocelot *ocelot, int port)
+{
+ return ocelot_read_rix(ocelot, QSYS_SW_STATUS, port);
+}
+
+int ocelot_port_flush(struct ocelot *ocelot, int port)
+{
+ int err, val;
+
+ /* Disable dequeuing from the egress queues */
+ ocelot_rmw_rix(ocelot, QSYS_PORT_MODE_DEQUEUE_DIS,
+ QSYS_PORT_MODE_DEQUEUE_DIS,
+ QSYS_PORT_MODE, port);
+
+ /* Disable flow control */
+ ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0);
+
+ /* Disable priority flow control */
+ ocelot_fields_write(ocelot, port,
+ QSYS_SWITCH_PORT_MODE_TX_PFC_ENA, 0);
+
+ /* Wait at least the time it takes to receive a frame of maximum length
+ * at the port.
+ * Worst-case delays for 10 kilobyte jumbo frames are:
+ * 8 ms on a 10M port
+ * 800 μs on a 100M port
+ * 80 μs on a 1G port
+ * 32 μs on a 2.5G port
+ */
+ usleep_range(8000, 10000);
+
+ /* Disable half duplex backpressure. */
+ ocelot_rmw_rix(ocelot, 0, SYS_FRONT_PORT_MODE_HDX_MODE,
+ SYS_FRONT_PORT_MODE, port);
+
+ /* Flush the queues associated with the port. */
+ ocelot_rmw_gix(ocelot, REW_PORT_CFG_FLUSH_ENA, REW_PORT_CFG_FLUSH_ENA,
+ REW_PORT_CFG, port);
+
+ /* Enable dequeuing from the egress queues. */
+ ocelot_rmw_rix(ocelot, 0, QSYS_PORT_MODE_DEQUEUE_DIS, QSYS_PORT_MODE,
+ port);
+
+ /* Wait until flushing is complete. */
+ err = read_poll_timeout(ocelot_read_eq_avail, val, !val,
+ 100, 2000000, false, ocelot, port);
+
+ /* Clear flushing again. */
+ ocelot_rmw_gix(ocelot, 0, REW_PORT_CFG_FLUSH_ENA, REW_PORT_CFG, port);
+
+ return err;
+}
+EXPORT_SYMBOL(ocelot_port_flush);
+
void ocelot_adjust_link(struct ocelot *ocelot, int port,
struct phy_device *phydev)
{
}
EXPORT_SYMBOL(ocelot_port_writel);
+void ocelot_port_rmwl(struct ocelot_port *port, u32 val, u32 mask, u32 reg)
+{
+ u32 cur = ocelot_port_readl(port, reg);
+
+ ocelot_port_writel(port, (cur & (~mask)) | val, reg);
+}
+EXPORT_SYMBOL(ocelot_port_rmwl);
+
u32 __ocelot_target_read_ix(struct ocelot *ocelot, enum ocelot_target target,
u32 reg, u32 offset)
{
priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
} else if (!qopt->enable) {
- return stmmac_dma_qmode(priv, priv->ioaddr, queue, MTL_QUEUE_DCB);
+ ret = stmmac_dma_qmode(priv, priv->ioaddr, queue,
+ MTL_QUEUE_DCB);
+ if (ret)
+ return ret;
+
+ priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
}
/* Port Transmit Rate and Speed Divider */
ret = rndis_filter_receive(ndev, net_device,
nvchan, data, buflen);
- if (unlikely(ret != NVSP_STAT_SUCCESS))
+ if (unlikely(ret != NVSP_STAT_SUCCESS)) {
+ /* Drop incomplete packet */
+ nvchan->rsc.cnt = 0;
status = NVSP_STAT_FAIL;
+ }
}
enq_receive_complete(ndev, net_device, q_idx,
return ret;
drop:
- /* Drop incomplete packet */
- nvchan->rsc.cnt = 0;
return NVSP_STAT_FAIL;
}
if (!channel->gsi)
continue; /* Ignore uninitialized channels */
+ ret = -EINVAL;
dev_err(gsi->dev, "channel %u not supported by hardware\n",
channel_id - 1);
channel_id = gsi->channel_count;
{QMI_FIXED_INTF(0x1e2d, 0x0082, 5)}, /* Cinterion PHxx,PXxx (2 RmNet) */
{QMI_FIXED_INTF(0x1e2d, 0x0083, 4)}, /* Cinterion PHxx,PXxx (1 RmNet + USB Audio)*/
{QMI_QUIRK_SET_DTR(0x1e2d, 0x00b0, 4)}, /* Cinterion CLS8 */
+ {QMI_FIXED_INTF(0x1e2d, 0x00b7, 0)}, /* Cinterion MV31 RmNet */
{QMI_FIXED_INTF(0x413c, 0x81a2, 8)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */
{QMI_FIXED_INTF(0x413c, 0x81a3, 8)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */
{QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
result = lapb_register(dev, &cb);
if (result != LAPB_OK)
- return result;
+ return -ENOMEM;
result = lapb_getparms(dev, ¶ms);
if (result != LAPB_OK)
- return result;
+ return -EINVAL;
if (state(hdlc)->settings.dce)
params.mode = params.mode | LAPB_DCE;
result = lapb_setparms(dev, ¶ms);
if (result != LAPB_OK)
- return result;
+ return -EINVAL;
return 0;
}
config ATH9K
tristate "Atheros 802.11n wireless cards support"
depends on MAC80211 && HAS_DMA
+ select MAC80211_LEDS if LEDS_CLASS=y || LEDS_CLASS=MAC80211
select ATH9K_HW
select ATH9K_COMMON
- imply NEW_LEDS
- imply LEDS_CLASS
- imply MAC80211_LEDS
help
This module adds support for wireless adapters based on
Atheros IEEE 802.11n AR5008, AR9001 and AR9002 family
config ATH9K_HTC
tristate "Atheros HTC based wireless cards support"
depends on USB && MAC80211
+ select MAC80211_LEDS if LEDS_CLASS=y || LEDS_CLASS=MAC80211
select ATH9K_HW
select ATH9K_COMMON
- imply NEW_LEDS
- imply LEDS_CLASS
- imply MAC80211_LEDS
help
Support for Atheros HTC based cards.
Chipsets supported: AR9271
mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
int len, bool more)
{
- struct page *page = virt_to_head_page(data);
- int offset = data - page_address(page);
struct sk_buff *skb = q->rx_head;
struct skb_shared_info *shinfo = skb_shinfo(skb);
if (shinfo->nr_frags < ARRAY_SIZE(shinfo->frags)) {
- offset += q->buf_offset;
+ struct page *page = virt_to_head_page(data);
+ int offset = data - page_address(page) + q->buf_offset;
+
skb_add_rx_frag(skb, shinfo->nr_frags, page, offset, len,
q->buf_size);
+ } else {
+ skb_free_frag(data);
}
if (more)
RING_IDX prod, cons;
struct sk_buff *skb;
int needed;
+ unsigned long flags;
+
+ spin_lock_irqsave(&queue->rx_queue.lock, flags);
skb = skb_peek(&queue->rx_queue);
- if (!skb)
+ if (!skb) {
+ spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
return false;
+ }
needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
if (skb_is_gso(skb))
if (skb->sw_hash)
needed++;
+ spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
+
do {
prod = queue->rx.sring->req_prod;
cons = queue->rx.req_cons;
config TMPFS_INODE64
bool "Use 64-bit ino_t by default in tmpfs"
- depends on TMPFS && 64BIT
+ depends on TMPFS && 64BIT && !(S390 || ALPHA)
default n
help
tmpfs has historically used only inode numbers as wide as an unsigned
/* .release = nilfs_release_file, */
.fsync = nilfs_sync_file,
.splice_read = generic_file_splice_read,
+ .splice_write = iter_file_splice_write,
};
const struct inode_operations nilfs_file_inode_operations = {
length = SQUASHFS_COMPRESSED_SIZE(length);
index += 2;
- TRACE("Block @ 0x%llx, %scompressed size %d\n", index,
+ TRACE("Block @ 0x%llx, %scompressed size %d\n", index - 2,
compressed ? "" : "un", length);
}
+ if (length < 0 || length > output->length ||
+ (index + length) > msblk->bytes_used) {
+ res = -EIO;
+ goto out;
+ }
+
if (next_index)
*next_index = index + length;
struct squashfs_sb_info *msblk = sb->s_fs_info;
int blk = SQUASHFS_LOOKUP_BLOCK(ino_num - 1);
int offset = SQUASHFS_LOOKUP_BLOCK_OFFSET(ino_num - 1);
- u64 start = le64_to_cpu(msblk->inode_lookup_table[blk]);
+ u64 start;
__le64 ino;
int err;
TRACE("Entered squashfs_inode_lookup, inode_number = %d\n", ino_num);
+ if (ino_num == 0 || (ino_num - 1) >= msblk->inodes)
+ return -EINVAL;
+
+ start = le64_to_cpu(msblk->inode_lookup_table[blk]);
+
err = squashfs_read_metadata(sb, &ino, &start, &offset, sizeof(ino));
if (err < 0)
return err;
u64 lookup_table_start, u64 next_table, unsigned int inodes)
{
unsigned int length = SQUASHFS_LOOKUP_BLOCK_BYTES(inodes);
+ unsigned int indexes = SQUASHFS_LOOKUP_BLOCKS(inodes);
+ int n;
__le64 *table;
+ u64 start, end;
TRACE("In read_inode_lookup_table, length %d\n", length);
if (inodes == 0)
return ERR_PTR(-EINVAL);
- /* length bytes should not extend into the next table - this check
- * also traps instances where lookup_table_start is incorrectly larger
- * than the next table start
+ /*
+ * The computed size of the lookup table (length bytes) should exactly
+ * match the table start and end points
*/
- if (lookup_table_start + length > next_table)
+ if (length != (next_table - lookup_table_start))
return ERR_PTR(-EINVAL);
table = squashfs_read_table(sb, lookup_table_start, length);
+ if (IS_ERR(table))
+ return table;
/*
- * table[0] points to the first inode lookup table metadata block,
- * this should be less than lookup_table_start
+ * table0], table[1], ... table[indexes - 1] store the locations
+ * of the compressed inode lookup blocks. Each entry should be
+ * less than the next (i.e. table[0] < table[1]), and the difference
+ * between them should be SQUASHFS_METADATA_SIZE or less.
+ * table[indexes - 1] should be less than lookup_table_start, and
+ * again the difference should be SQUASHFS_METADATA_SIZE or less
*/
- if (!IS_ERR(table) && le64_to_cpu(table[0]) >= lookup_table_start) {
+ for (n = 0; n < (indexes - 1); n++) {
+ start = le64_to_cpu(table[n]);
+ end = le64_to_cpu(table[n + 1]);
+
+ if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) {
+ kfree(table);
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ start = le64_to_cpu(table[indexes - 1]);
+ if (start >= lookup_table_start || (lookup_table_start - start) > SQUASHFS_METADATA_SIZE) {
kfree(table);
return ERR_PTR(-EINVAL);
}
struct squashfs_sb_info *msblk = sb->s_fs_info;
int block = SQUASHFS_ID_BLOCK(index);
int offset = SQUASHFS_ID_BLOCK_OFFSET(index);
- u64 start_block = le64_to_cpu(msblk->id_table[block]);
+ u64 start_block;
__le32 disk_id;
int err;
+ if (index >= msblk->ids)
+ return -EINVAL;
+
+ start_block = le64_to_cpu(msblk->id_table[block]);
+
err = squashfs_read_metadata(sb, &disk_id, &start_block, &offset,
sizeof(disk_id));
if (err < 0)
u64 id_table_start, u64 next_table, unsigned short no_ids)
{
unsigned int length = SQUASHFS_ID_BLOCK_BYTES(no_ids);
+ unsigned int indexes = SQUASHFS_ID_BLOCKS(no_ids);
+ int n;
__le64 *table;
+ u64 start, end;
TRACE("In read_id_index_table, length %d\n", length);
return ERR_PTR(-EINVAL);
/*
- * length bytes should not extend into the next table - this check
- * also traps instances where id_table_start is incorrectly larger
- * than the next table start
+ * The computed size of the index table (length bytes) should exactly
+ * match the table start and end points
*/
- if (id_table_start + length > next_table)
+ if (length != (next_table - id_table_start))
return ERR_PTR(-EINVAL);
table = squashfs_read_table(sb, id_table_start, length);
+ if (IS_ERR(table))
+ return table;
/*
- * table[0] points to the first id lookup table metadata block, this
- * should be less than id_table_start
+ * table[0], table[1], ... table[indexes - 1] store the locations
+ * of the compressed id blocks. Each entry should be less than
+ * the next (i.e. table[0] < table[1]), and the difference between them
+ * should be SQUASHFS_METADATA_SIZE or less. table[indexes - 1]
+ * should be less than id_table_start, and again the difference
+ * should be SQUASHFS_METADATA_SIZE or less
*/
- if (!IS_ERR(table) && le64_to_cpu(table[0]) >= id_table_start) {
+ for (n = 0; n < (indexes - 1); n++) {
+ start = le64_to_cpu(table[n]);
+ end = le64_to_cpu(table[n + 1]);
+
+ if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) {
+ kfree(table);
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ start = le64_to_cpu(table[indexes - 1]);
+ if (start >= id_table_start || (id_table_start - start) > SQUASHFS_METADATA_SIZE) {
kfree(table);
return ERR_PTR(-EINVAL);
}
unsigned int inodes;
unsigned int fragments;
int xattr_ids;
+ unsigned int ids;
};
#endif
msblk->directory_table = le64_to_cpu(sblk->directory_table_start);
msblk->inodes = le32_to_cpu(sblk->inodes);
msblk->fragments = le32_to_cpu(sblk->fragments);
+ msblk->ids = le16_to_cpu(sblk->no_ids);
flags = le16_to_cpu(sblk->flags);
TRACE("Found valid superblock on %pg\n", sb->s_bdev);
TRACE("Block size %d\n", msblk->block_size);
TRACE("Number of inodes %d\n", msblk->inodes);
TRACE("Number of fragments %d\n", msblk->fragments);
- TRACE("Number of ids %d\n", le16_to_cpu(sblk->no_ids));
+ TRACE("Number of ids %d\n", msblk->ids);
TRACE("sblk->inode_table_start %llx\n", msblk->inode_table);
TRACE("sblk->directory_table_start %llx\n", msblk->directory_table);
TRACE("sblk->fragment_table_start %llx\n",
allocate_id_index_table:
/* Allocate and read id index table */
msblk->id_table = squashfs_read_id_index_table(sb,
- le64_to_cpu(sblk->id_table_start), next_table,
- le16_to_cpu(sblk->no_ids));
+ le64_to_cpu(sblk->id_table_start), next_table, msblk->ids);
if (IS_ERR(msblk->id_table)) {
errorf(fc, "unable to read id index table");
err = PTR_ERR(msblk->id_table);
static inline __le64 *squashfs_read_xattr_id_table(struct super_block *sb,
u64 start, u64 *xattr_table_start, int *xattr_ids)
{
+ struct squashfs_xattr_id_table *id_table;
+
+ id_table = squashfs_read_table(sb, start, sizeof(*id_table));
+ if (IS_ERR(id_table))
+ return (__le64 *) id_table;
+
+ *xattr_table_start = le64_to_cpu(id_table->xattr_table_start);
+ kfree(id_table);
+
ERROR("Xattrs in filesystem, these will be ignored\n");
- *xattr_table_start = start;
return ERR_PTR(-ENOTSUPP);
}
struct squashfs_sb_info *msblk = sb->s_fs_info;
int block = SQUASHFS_XATTR_BLOCK(index);
int offset = SQUASHFS_XATTR_BLOCK_OFFSET(index);
- u64 start_block = le64_to_cpu(msblk->xattr_id_table[block]);
+ u64 start_block;
struct squashfs_xattr_id id;
int err;
+ if (index >= msblk->xattr_ids)
+ return -EINVAL;
+
+ start_block = le64_to_cpu(msblk->xattr_id_table[block]);
+
err = squashfs_read_metadata(sb, &id, &start_block, &offset,
sizeof(id));
if (err < 0)
/*
* Read uncompressed xattr id lookup table indexes from disk into memory
*/
-__le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 start,
+__le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 table_start,
u64 *xattr_table_start, int *xattr_ids)
{
- unsigned int len;
+ struct squashfs_sb_info *msblk = sb->s_fs_info;
+ unsigned int len, indexes;
struct squashfs_xattr_id_table *id_table;
+ __le64 *table;
+ u64 start, end;
+ int n;
- id_table = squashfs_read_table(sb, start, sizeof(*id_table));
+ id_table = squashfs_read_table(sb, table_start, sizeof(*id_table));
if (IS_ERR(id_table))
return (__le64 *) id_table;
if (*xattr_ids == 0)
return ERR_PTR(-EINVAL);
- /* xattr_table should be less than start */
- if (*xattr_table_start >= start)
+ len = SQUASHFS_XATTR_BLOCK_BYTES(*xattr_ids);
+ indexes = SQUASHFS_XATTR_BLOCKS(*xattr_ids);
+
+ /*
+ * The computed size of the index table (len bytes) should exactly
+ * match the table start and end points
+ */
+ start = table_start + sizeof(*id_table);
+ end = msblk->bytes_used;
+
+ if (len != (end - start))
return ERR_PTR(-EINVAL);
- len = SQUASHFS_XATTR_BLOCK_BYTES(*xattr_ids);
+ table = squashfs_read_table(sb, start, len);
+ if (IS_ERR(table))
+ return table;
+
+ /* table[0], table[1], ... table[indexes - 1] store the locations
+ * of the compressed xattr id blocks. Each entry should be less than
+ * the next (i.e. table[0] < table[1]), and the difference between them
+ * should be SQUASHFS_METADATA_SIZE or less. table[indexes - 1]
+ * should be less than table_start, and again the difference
+ * shouls be SQUASHFS_METADATA_SIZE or less.
+ *
+ * Finally xattr_table_start should be less than table[0].
+ */
+ for (n = 0; n < (indexes - 1); n++) {
+ start = le64_to_cpu(table[n]);
+ end = le64_to_cpu(table[n + 1]);
+
+ if (start >= end || (end - start) > SQUASHFS_METADATA_SIZE) {
+ kfree(table);
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ start = le64_to_cpu(table[indexes - 1]);
+ if (start >= table_start || (table_start - start) > SQUASHFS_METADATA_SIZE) {
+ kfree(table);
+ return ERR_PTR(-EINVAL);
+ }
- TRACE("In read_xattr_index_table, length %d\n", len);
+ if (*xattr_table_start >= le64_to_cpu(table[0])) {
+ kfree(table);
+ return ERR_PTR(-EINVAL);
+ }
- return squashfs_read_table(sb, start + sizeof(*id_table), len);
+ return table;
}
} \
\
/* Built-in firmware blobs */ \
- .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \
+ .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) ALIGN(8) { \
__start_builtin_fw = .; \
KEEP(*(.builtin_fw)) \
__end_builtin_fw = .; \
local_bh_disable();
cpu = smp_processor_id();
+ spin_lock(&dev->tx_global_lock);
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
netif_tx_stop_queue(txq);
__netif_tx_unlock(txq);
}
+ spin_unlock(&dev->tx_global_lock);
local_bh_enable();
}
{
i->count = count;
}
-size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump, struct iov_iter *i);
+
+struct csum_state {
+ __wsum csum;
+ size_t off;
+};
+
+size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csstate, struct iov_iter *i);
size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED,
SWITCHDEV_ATTR_ID_BRIDGE_MROUTER,
#if IS_ENABLED(CONFIG_BRIDGE_MRP)
- SWITCHDEV_ATTR_ID_MRP_PORT_STATE,
SWITCHDEV_ATTR_ID_MRP_PORT_ROLE,
#endif
};
u16 vlan_protocol; /* BRIDGE_VLAN_PROTOCOL */
bool mc_disabled; /* MC_DISABLED */
#if IS_ENABLED(CONFIG_BRIDGE_MRP)
- u8 mrp_port_state; /* MRP_PORT_STATE */
u8 mrp_port_role; /* MRP_PORT_ROLE */
#endif
} u;
/* I/O */
u32 ocelot_port_readl(struct ocelot_port *port, u32 reg);
void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg);
+void ocelot_port_rmwl(struct ocelot_port *port, u32 val, u32 mask, u32 reg);
u32 __ocelot_read_ix(struct ocelot *ocelot, u32 reg, u32 offset);
void __ocelot_write_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 offset);
void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 mask, u32 reg,
int ocelot_get_ts_info(struct ocelot *ocelot, int port,
struct ethtool_ts_info *info);
void ocelot_set_ageing_time(struct ocelot *ocelot, unsigned int msecs);
+int ocelot_port_flush(struct ocelot *ocelot, int port);
void ocelot_adjust_link(struct ocelot *ocelot, int port,
struct phy_device *phydev);
int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port, bool enabled,
/* hash table size must be power of 2 */
n_buckets = roundup_pow_of_two(attr->max_entries);
+ if (!n_buckets)
+ return ERR_PTR(-E2BIG);
cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap);
cost += n_buckets * (value_size + sizeof(struct stack_map_bucket));
case BPF_JSGT:
if (reg->s32_min_value > sval)
return 1;
- else if (reg->s32_max_value < sval)
+ else if (reg->s32_max_value <= sval)
return 0;
break;
case BPF_JLT:
case BPF_JSGT:
if (reg->smin_value > sval)
return 1;
- else if (reg->smax_value < sval)
+ else if (reg->smax_value <= sval)
return 0;
break;
case BPF_JLT:
return old->umin_value <= cur->umin_value &&
old->umax_value >= cur->umax_value &&
old->smin_value <= cur->smin_value &&
- old->smax_value >= cur->smax_value;
+ old->smax_value >= cur->smax_value &&
+ old->u32_min_value <= cur->u32_min_value &&
+ old->u32_max_value >= cur->u32_max_value &&
+ old->s32_min_value <= cur->s32_min_value &&
+ old->s32_max_value >= cur->s32_max_value;
}
/* Maximum number of register states that can exist at once */
insn->code == (BPF_ALU | BPF_MOD | BPF_X) ||
insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
- struct bpf_insn mask_and_div[] = {
- BPF_MOV32_REG(insn->src_reg, insn->src_reg),
+ bool isdiv = BPF_OP(insn->code) == BPF_DIV;
+ struct bpf_insn *patchlet;
+ struct bpf_insn chk_and_div[] = {
/* Rx div 0 -> 0 */
- BPF_JMP_IMM(BPF_JNE, insn->src_reg, 0, 2),
+ BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
+ BPF_JNE | BPF_K, insn->src_reg,
+ 0, 2, 0),
BPF_ALU32_REG(BPF_XOR, insn->dst_reg, insn->dst_reg),
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
*insn,
};
- struct bpf_insn mask_and_mod[] = {
- BPF_MOV32_REG(insn->src_reg, insn->src_reg),
+ struct bpf_insn chk_and_mod[] = {
/* Rx mod 0 -> Rx */
- BPF_JMP_IMM(BPF_JEQ, insn->src_reg, 0, 1),
+ BPF_RAW_INSN((is64 ? BPF_JMP : BPF_JMP32) |
+ BPF_JEQ | BPF_K, insn->src_reg,
+ 0, 1, 0),
*insn,
};
- struct bpf_insn *patchlet;
- if (insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
- insn->code == (BPF_ALU | BPF_DIV | BPF_X)) {
- patchlet = mask_and_div + (is64 ? 1 : 0);
- cnt = ARRAY_SIZE(mask_and_div) - (is64 ? 1 : 0);
- } else {
- patchlet = mask_and_mod + (is64 ? 1 : 0);
- cnt = ARRAY_SIZE(mask_and_mod) - (is64 ? 1 : 0);
- }
+ patchlet = isdiv ? chk_and_div : chk_and_mod;
+ cnt = isdiv ? ARRAY_SIZE(chk_and_div) :
+ ARRAY_SIZE(chk_and_mod);
new_prog = bpf_patch_insn_data(env, i + delta, patchlet, cnt);
if (!new_prog)
{
unsigned int ret;
- if (in_nmi()) /* not supported yet */
- return 1;
-
cant_sleep();
if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
mutex_lock(&event_mutex);
list_for_each_entry(file, &tr->events, list) {
call = file->event_call;
- if (!trace_event_name(call) || !call->class || !call->class->reg)
+ if ((call->flags & TRACE_EVENT_FL_IGNORE_ENABLE) ||
+ !trace_event_name(call) || !call->class || !call->class->reg)
continue;
if (system && strcmp(call->class->system, system->name) != 0)
}
static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
- __wsum *csum, struct iov_iter *i)
+ struct csum_state *csstate,
+ struct iov_iter *i)
{
struct pipe_inode_info *pipe = i->pipe;
unsigned int p_mask = pipe->ring_size - 1;
+ __wsum sum = csstate->csum;
+ size_t off = csstate->off;
unsigned int i_head;
size_t n, r;
- size_t off = 0;
- __wsum sum = *csum;
if (!sanity(i))
return 0;
i_head++;
} while (n);
i->count -= bytes;
- *csum = sum;
+ csstate->csum = sum;
+ csstate->off = off;
return bytes;
}
}
EXPORT_SYMBOL(csum_and_copy_from_iter_full);
-size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csump,
+size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
struct iov_iter *i)
{
+ struct csum_state *csstate = _csstate;
const char *from = addr;
- __wsum *csum = csump;
__wsum sum, next;
- size_t off = 0;
+ size_t off;
if (unlikely(iov_iter_is_pipe(i)))
- return csum_and_copy_to_pipe_iter(addr, bytes, csum, i);
+ return csum_and_copy_to_pipe_iter(addr, bytes, _csstate, i);
- sum = *csum;
+ sum = csstate->csum;
+ off = csstate->off;
if (unlikely(iov_iter_is_discard(i))) {
WARN_ON(1); /* for now */
return 0;
off += v.iov_len;
})
)
- *csum = sum;
+ csstate->csum = sum;
+ csstate->off = off;
return bytes;
}
EXPORT_SYMBOL(csum_and_copy_to_iter);
switch (kasan_arg_stacktrace) {
case KASAN_ARG_STACKTRACE_DEFAULT:
- /*
- * Default to enabling stack trace collection for
- * debug kernels.
- */
- if (IS_ENABLED(CONFIG_DEBUG_KERNEL))
- static_branch_enable(&kasan_flag_stacktrace);
+ /* Default to enabling stack trace collection. */
+ static_branch_enable(&kasan_flag_stacktrace);
break;
case KASAN_ARG_STACKTRACE_OFF:
/* Do nothing, kasan_flag_stacktrace keeps its default value. */
if (err)
return err;
+ page_counter_set_high(&memcg->memory, high);
+
for (;;) {
unsigned long nr_pages = page_counter_read(&memcg->memory);
unsigned long reclaimed;
break;
}
- page_counter_set_high(&memcg->memory, high);
-
memcg_wb_domain_size_changed(memcg);
-
return nbytes;
}
* valid. Else returns a smaller extent bounded by the end of the source and
* destination pgt_entry.
*/
-static unsigned long get_extent(enum pgt_entry entry, unsigned long old_addr,
- unsigned long old_end, unsigned long new_addr)
+static __always_inline unsigned long get_extent(enum pgt_entry entry,
+ unsigned long old_addr, unsigned long old_end,
+ unsigned long new_addr)
{
unsigned long next, extent, mask, size;
unsigned int order;
unsigned int min_objects;
unsigned int max_objects;
+ unsigned int nr_cpus;
/*
* Attempt to find best configuration for a slab. This
* we reduce the minimum objects required in a slab.
*/
min_objects = slub_min_objects;
- if (!min_objects)
- min_objects = 4 * (fls(num_online_cpus()) + 1);
+ if (!min_objects) {
+ /*
+ * Some architectures will only update present cpus when
+ * onlining them, so don't trust the number if it's just 1. But
+ * we also don't want to use nr_cpu_ids always, as on some other
+ * architectures, there can be many possible cpus, but never
+ * onlined. Here we compromise between trying to avoid too high
+ * order on systems that appear larger than they are, and too
+ * low order on systems that appear smaller than they are.
+ */
+ nr_cpus = num_present_cpus();
+ if (nr_cpus <= 1)
+ nr_cpus = nr_cpu_ids;
+ min_objects = 4 * (fls(nr_cpus) + 1);
+ }
max_objects = order_objects(slub_max_order, size);
min_objects = min(min_objects, max_objects);
int br_mrp_set_port_state(struct net_bridge_port *p,
enum br_mrp_port_state_type state)
{
+ u32 port_state;
+
if (!p || !(p->flags & BR_MRP_AWARE))
return -EINVAL;
spin_lock_bh(&p->br->lock);
if (state == BR_MRP_PORT_STATE_FORWARDING)
- p->state = BR_STATE_FORWARDING;
+ port_state = BR_STATE_FORWARDING;
else
- p->state = BR_STATE_BLOCKING;
+ port_state = BR_STATE_BLOCKING;
+ p->state = port_state;
spin_unlock_bh(&p->br->lock);
- br_mrp_port_switchdev_set_state(p, state);
+ br_mrp_port_switchdev_set_state(p, port_state);
return 0;
}
return err;
}
-int br_mrp_port_switchdev_set_state(struct net_bridge_port *p,
- enum br_mrp_port_state_type state)
+int br_mrp_port_switchdev_set_state(struct net_bridge_port *p, u32 state)
{
struct switchdev_attr attr = {
.orig_dev = p->dev,
- .id = SWITCHDEV_ATTR_ID_MRP_PORT_STATE,
- .u.mrp_port_state = state,
+ .id = SWITCHDEV_ATTR_ID_PORT_STP_STATE,
+ .u.stp_state = state,
};
int err;
int br_mrp_switchdev_send_ring_test(struct net_bridge *br, struct br_mrp *mrp,
u32 interval, u8 max_miss, u32 period,
bool monitor);
-int br_mrp_port_switchdev_set_state(struct net_bridge_port *p,
- enum br_mrp_port_state_type state);
+int br_mrp_port_switchdev_set_state(struct net_bridge_port *p, u32 state);
int br_mrp_port_switchdev_set_role(struct net_bridge_port *p,
enum br_mrp_port_role_type role);
int br_mrp_switchdev_set_in_role(struct net_bridge *br, struct br_mrp *mrp,
struct iov_iter *to, int len,
__wsum *csump)
{
- return __skb_datagram_iter(skb, offset, to, len, true,
- csum_and_copy_to_iter, csump);
+ struct csum_state csdata = { .csum = *csump };
+ int ret;
+
+ ret = __skb_datagram_iter(skb, offset, to, len, true,
+ csum_and_copy_to_iter, &csdata);
+ if (ret)
+ return ret;
+
+ *csump = csdata.csum;
+ return 0;
}
/**
/* Queue one GRO_NORMAL SKB up for list processing. If batch size exceeded,
* pass the whole batch up to the stack.
*/
-static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb)
+static void gro_normal_one(struct napi_struct *napi, struct sk_buff *skb, int segs)
{
list_add_tail(&skb->list, &napi->rx_list);
- if (++napi->rx_count >= gro_normal_batch)
+ napi->rx_count += segs;
+ if (napi->rx_count >= gro_normal_batch)
gro_normal_list(napi);
}
}
out:
- gro_normal_one(napi, skb);
+ gro_normal_one(napi, skb, NAPI_GRO_CB(skb)->count);
return NET_RX_SUCCESS;
}
{
switch (ret) {
case GRO_NORMAL:
- gro_normal_one(napi, skb);
+ gro_normal_one(napi, skb, 1);
break;
case GRO_DROP:
__skb_push(skb, ETH_HLEN);
skb->protocol = eth_type_trans(skb, skb->dev);
if (ret == GRO_NORMAL)
- gro_normal_one(napi, skb);
+ gro_normal_one(napi, skb, 1);
break;
case GRO_DROP:
ds->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
if (!ds->slave_mii_bus) {
err = -ENOMEM;
- goto unregister_notifier;
+ goto teardown;
}
dsa_slave_mii_bus_init(ds);
err = mdiobus_register(ds->slave_mii_bus);
if (err < 0)
- goto unregister_notifier;
+ goto teardown;
}
ds->setup = true;
return 0;
+teardown:
+ if (ds->ops->teardown)
+ ds->ops->teardown(ds);
unregister_notifier:
dsa_switch_unregister_notifier(ds);
unregister_devlink_ports:
config MAC80211_LEDS
bool "Enable LED triggers"
depends on MAC80211
- depends on LEDS_CLASS
+ depends on LEDS_CLASS=y || LEDS_CLASS=MAC80211
select LEDS_TRIGGERS
help
This option enables a few LED triggers for different
* Let nf_ct_resolve_clash() deal with this later.
*/
if (nf_ct_tuple_equal(&ignored_conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
- &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple))
+ &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple) &&
+ nf_ct_zone_equal(ct, zone, IP_CT_DIR_ORIGINAL))
continue;
NF_CT_STAT_INC_ATOMIC(net, found);
return -1;
tcph = (void *)(skb_network_header(skb) + thoff);
- inet_proto_csum_replace2(&tcph->check, skb, port, new_port, true);
+ inet_proto_csum_replace2(&tcph->check, skb, port, new_port, false);
return 0;
}
udph = (void *)(skb_network_header(skb) + thoff);
if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
inet_proto_csum_replace2(&udph->check, skb, port,
- new_port, true);
+ new_port, false);
if (!udph->check)
udph->check = CSUM_MANGLED_0;
}
struct nft_expr *expr_array[NFT_SET_EXPR_MAX] = {};
struct nlattr *nla[NFTA_SET_ELEM_MAX + 1];
u8 genmask = nft_genmask_next(ctx->net);
+ u32 flags = 0, size = 0, num_exprs = 0;
struct nft_set_ext_tmpl tmpl;
struct nft_set_ext *ext, *ext2;
struct nft_set_elem elem;
struct nft_data_desc desc;
enum nft_registers dreg;
struct nft_trans *trans;
- u32 flags = 0, size = 0;
u64 timeout;
u64 expiration;
int err, i;
if (nla[NFTA_SET_ELEM_EXPR]) {
struct nft_expr *expr;
- if (set->num_exprs != 1)
+ if (set->num_exprs && set->num_exprs != 1)
return -EOPNOTSUPP;
expr = nft_set_elem_expr_alloc(ctx, set,
return PTR_ERR(expr);
expr_array[0] = expr;
+ num_exprs = 1;
- if (set->exprs[0] && set->exprs[0]->ops != expr->ops) {
+ if (set->num_exprs && set->exprs[0]->ops != expr->ops) {
err = -EOPNOTSUPP;
goto err_set_elem_expr;
}
struct nlattr *tmp;
int left;
- if (set->num_exprs == 0)
- return -EOPNOTSUPP;
-
i = 0;
nla_for_each_nested(tmp, nla[NFTA_SET_ELEM_EXPRESSIONS], left) {
- if (i == set->num_exprs) {
+ if (i == NFT_SET_EXPR_MAX ||
+ (set->num_exprs && set->num_exprs == i)) {
err = -E2BIG;
goto err_set_elem_expr;
}
goto err_set_elem_expr;
}
expr_array[i] = expr;
+ num_exprs++;
- if (expr->ops != set->exprs[i]->ops) {
+ if (set->num_exprs && expr->ops != set->exprs[i]->ops) {
err = -EOPNOTSUPP;
goto err_set_elem_expr;
}
i++;
}
- if (set->num_exprs != i) {
+ if (set->num_exprs && set->num_exprs != i) {
err = -EOPNOTSUPP;
goto err_set_elem_expr;
}
err = nft_set_elem_expr_clone(ctx, set, expr_array);
if (err < 0)
goto err_set_elem_expr_clone;
+
+ num_exprs = set->num_exprs;
}
err = nft_setelem_parse_key(ctx, set, &elem.key.val,
nft_set_ext_add(&tmpl, NFT_SET_EXT_TIMEOUT);
}
- if (set->num_exprs) {
- for (i = 0; i < set->num_exprs; i++)
+ if (num_exprs) {
+ for (i = 0; i < num_exprs; i++)
size += expr_array[i]->ops->size;
nft_set_ext_add_length(&tmpl, NFT_SET_EXT_EXPRESSIONS,
*nft_set_ext_obj(ext) = obj;
obj->use++;
}
- for (i = 0; i < set->num_exprs; i++)
+ for (i = 0; i < num_exprs; i++)
nft_set_elem_expr_setup(ext, i, expr_array);
trans = nft_trans_elem_alloc(ctx, NFT_MSG_NEWSETELEM, set);
err_parse_key:
nft_data_release(&elem.key.val, NFT_DATA_VALUE);
err_set_elem_expr:
- for (i = 0; i < set->num_exprs && expr_array[i]; i++)
+ for (i = 0; i < num_exprs && expr_array[i]; i++)
nft_expr_destroy(ctx, expr_array[i]);
err_set_elem_expr_clone:
return err;
}
EXPORT_SYMBOL_GPL(__nft_release_basechain);
+static void __nft_release_hooks(struct net *net)
+{
+ struct nft_table *table;
+ struct nft_chain *chain;
+
+ list_for_each_entry(table, &net->nft.tables, list) {
+ list_for_each_entry(chain, &table->chains, list)
+ nf_tables_unregister_hook(net, table, chain);
+ }
+}
+
static void __nft_release_tables(struct net *net)
{
struct nft_flowtable *flowtable, *nf;
list_for_each_entry_safe(table, nt, &net->nft.tables, list) {
ctx.family = table->family;
-
- list_for_each_entry(chain, &table->chains, list)
- nf_tables_unregister_hook(net, table, chain);
- /* No packets are walking on these chains anymore. */
ctx.table = table;
list_for_each_entry(chain, &table->chains, list) {
ctx.chain = chain;
return 0;
}
+static void __net_exit nf_tables_pre_exit_net(struct net *net)
+{
+ __nft_release_hooks(net);
+}
+
static void __net_exit nf_tables_exit_net(struct net *net)
{
mutex_lock(&net->nft.commit_mutex);
}
static struct pernet_operations nf_tables_net_ops = {
- .init = nf_tables_init_net,
- .exit = nf_tables_exit_net,
+ .init = nf_tables_init_net,
+ .pre_exit = nf_tables_pre_exit_net,
+ .exit = nf_tables_exit_net,
};
static int __init nf_tables_module_init(void)
/*
* Drop entries with timestamps older then 'time'.
*/
-static void recent_entry_reap(struct recent_table *t, unsigned long time)
+static void recent_entry_reap(struct recent_table *t, unsigned long time,
+ struct recent_entry *working, bool update)
{
struct recent_entry *e;
*/
e = list_entry(t->lru_list.next, struct recent_entry, lru_list);
+ /*
+ * Do not reap the entry which are going to be updated.
+ */
+ if (e == working && update)
+ return;
+
/*
* The last time stamp is the most recent.
*/
/* info->seconds must be non-zero */
if (info->check_set & XT_RECENT_REAP)
- recent_entry_reap(t, time);
+ recent_entry_reap(t, time, e,
+ info->check_set & XT_RECENT_UPDATE && ret);
}
if (info->check_set & XT_RECENT_SET ||
ssize_t ret;
void *kbuf;
+ if (!len)
+ return -EINVAL;
+
+ if (len > KMALLOC_MAX_SIZE)
+ return -ENOMEM;
+
kbuf = kzalloc(len, GFP_KERNEL);
if (!kbuf)
return -ENOMEM;
rxrpc_disconnect_call(call);
if (call->security)
call->security->free_call_crypto(call);
-
- rxrpc_cleanup_ring(call);
_leave("");
}
{
struct sctp_ht_iter *iter = seq->private;
+ if (v && v != SEQ_START_TOKEN) {
+ struct sctp_transport *transport = v;
+
+ sctp_transport_put(transport);
+ }
+
sctp_transport_walk_stop(&iter->hti);
}
{
struct sctp_ht_iter *iter = seq->private;
+ if (v && v != SEQ_START_TOKEN) {
+ struct sctp_transport *transport = v;
+
+ sctp_transport_put(transport);
+ }
+
++*pos;
return sctp_transport_get_next(seq_file_net(seq), &iter->hti);
sk->sk_rcvbuf);
seq_printf(seq, "\n");
- sctp_transport_put(transport);
-
return 0;
}
seq_printf(seq, "\n");
}
- sctp_transport_put(transport);
-
return 0;
}
*/
sk = sock->sk;
+
+ lock_sock(sk);
if (sock->state == SS_UNCONNECTED) {
err = -ENOTCONN;
if (sk->sk_type == SOCK_STREAM)
- return err;
+ goto out;
} else {
sock->state = SS_DISCONNECTING;
err = 0;
/* Receive and send shutdowns are treated alike. */
mode = mode & (RCV_SHUTDOWN | SEND_SHUTDOWN);
if (mode) {
- lock_sock(sk);
sk->sk_shutdown |= mode;
sk->sk_state_change(sk);
- release_sock(sk);
if (sk->sk_type == SOCK_STREAM) {
sock_reset_flag(sk, SOCK_DONE);
}
}
+out:
+ release_sock(sk);
return err;
}
{
const struct vsock_transport *transport = vsk->transport;
- if (!transport->cancel_pkt)
+ if (!transport || !transport->cancel_pkt)
return -EOPNOTSUPP;
return transport->cancel_pkt(vsk);
{
struct sock *sk;
struct vsock_sock *vsk;
- int cancel = 0;
vsk = container_of(work, struct vsock_sock, connect_work.work);
sk = sk_vsock(vsk);
sk->sk_state = TCP_CLOSE;
sk->sk_err = ETIMEDOUT;
sk->sk_error_report(sk);
- cancel = 1;
+ vsock_transport_cancel_pkt(vsk);
}
release_sock(sk);
- if (cancel)
- vsock_transport_cancel_pkt(vsk);
sock_put(sk);
}
static int hvs_shutdown(struct vsock_sock *vsk, int mode)
{
- struct sock *sk = sk_vsock(vsk);
-
if (!(mode & SEND_SHUTDOWN))
return 0;
- lock_sock(sk);
hvs_shutdown_lock_held(vsk->trans, mode);
- release_sock(sk);
return 0;
}
vsk = vsock_sk(sk);
- space_available = virtio_transport_space_update(sk, pkt);
-
lock_sock(sk);
/* Check if sk has been closed before lock_sock */
goto free_pkt;
}
+ space_available = virtio_transport_space_update(sk, pkt);
+
/* Update CID in case it has changed after a transport reset event */
vsk->local_addr.svm_cid = dst.svm_cid;
FIXTURE_SETUP(tls)
{
- union tls_crypto_context tls12;
+ union {
+ struct tls12_crypto_info_aes_gcm_128 aes128;
+ struct tls12_crypto_info_chacha20_poly1305 chacha20;
+ } tls12;
struct sockaddr_in addr;
socklen_t len;
int sfd, ret;
len = sizeof(addr);
memset(&tls12, 0, sizeof(tls12));
- tls12.info.version = variant->tls_version;
- tls12.info.cipher_type = variant->cipher_type;
switch (variant->cipher_type) {
case TLS_CIPHER_CHACHA20_POLY1305:
- tls12_sz = sizeof(tls12_crypto_info_chacha20_poly1305);
+ tls12_sz = sizeof(struct tls12_crypto_info_chacha20_poly1305);
+ tls12.chacha20.info.version = variant->tls_version;
+ tls12.chacha20.info.cipher_type = variant->cipher_type;
break;
case TLS_CIPHER_AES_GCM_128:
- tls12_sz = sizeof(tls12_crypto_info_aes_gcm_128);
+ tls12_sz = sizeof(struct tls12_crypto_info_aes_gcm_128);
+ tls12.aes128.info.version = variant->tls_version;
+ tls12.aes128.info.cipher_type = variant->cipher_type;
break;
default:
tls12_sz = 0;
#include <inttypes.h>
#include <linux/errqueue.h>
#include <linux/if_ether.h>
+#include <linux/if_packet.h>
#include <linux/ipv6.h>
#include <linux/net_tstamp.h>
#include <netdb.h>
#include <netinet/ip.h>
#include <netinet/udp.h>
#include <netinet/tcp.h>
-#include <netpacket/packet.h>
#include <poll.h>
#include <stdarg.h>
#include <stdbool.h>
total_len = cfg_payload_len;
if (cfg_use_pf_packet || cfg_proto == SOCK_RAW) {
total_len += sizeof(struct udphdr);
- if (cfg_use_pf_packet || cfg_ipproto == IPPROTO_RAW)
+ if (cfg_use_pf_packet || cfg_ipproto == IPPROTO_RAW) {
if (family == PF_INET)
total_len += sizeof(struct iphdr);
else
total_len += sizeof(struct ipv6hdr);
-
+ }
/* special case, only rawv6_sendmsg:
* pass proto in sin6_port if not connected
* also see ANK comment in net/ipv4/raw.c
trap cleanup EXIT
-currentyear=$(date +%G)
+currentyear=$(date +%Y)
lastyear=$((currentyear-1))
ip netns exec "$ns0" nft -f /dev/stdin <<EOF
table inet filter {
+++ /dev/null
-#!/bin/bash
-# SPDX-License-Identifier: GPL-2.0
-#please run as root
-
-# Kselftest framework requirement - SKIP code is 4.
-ksft_skip=4
-
-mnt=./huge
-exitcode=0
-
-#get huge pagesize and freepages from /proc/meminfo
-while read name size unit; do
- if [ "$name" = "HugePages_Free:" ]; then
- freepgs=$size
- fi
- if [ "$name" = "Hugepagesize:" ]; then
- hpgsize_KB=$size
- fi
-done < /proc/meminfo
-
-# Simple hugetlbfs tests have a hardcoded minimum requirement of
-# huge pages totaling 256MB (262144KB) in size. The userfaultfd
-# hugetlb test requires a minimum of 2 * nr_cpus huge pages. Take
-# both of these requirements into account and attempt to increase
-# number of huge pages available.
-nr_cpus=$(nproc)
-hpgsize_MB=$((hpgsize_KB / 1024))
-half_ufd_size_MB=$((((nr_cpus * hpgsize_MB + 127) / 128) * 128))
-needmem_KB=$((half_ufd_size_MB * 2 * 1024))
-
-#set proper nr_hugepages
-if [ -n "$freepgs" ] && [ -n "$hpgsize_KB" ]; then
- nr_hugepgs=`cat /proc/sys/vm/nr_hugepages`
- needpgs=$((needmem_KB / hpgsize_KB))
- tries=2
- while [ $tries -gt 0 ] && [ $freepgs -lt $needpgs ]; do
- lackpgs=$(( $needpgs - $freepgs ))
- echo 3 > /proc/sys/vm/drop_caches
- echo $(( $lackpgs + $nr_hugepgs )) > /proc/sys/vm/nr_hugepages
- if [ $? -ne 0 ]; then
- echo "Please run this test as root"
- exit $ksft_skip
- fi
- while read name size unit; do
- if [ "$name" = "HugePages_Free:" ]; then
- freepgs=$size
- fi
- done < /proc/meminfo
- tries=$((tries - 1))
- done
- if [ $freepgs -lt $needpgs ]; then
- printf "Not enough huge pages available (%d < %d)\n" \
- $freepgs $needpgs
- exit 1
- fi
-else
- echo "no hugetlbfs support in kernel?"
- exit 1
-fi
-
-#filter 64bit architectures
-ARCH64STR="arm64 ia64 mips64 parisc64 ppc64 ppc64le riscv64 s390x sh64 sparc64 x86_64"
-if [ -z $ARCH ]; then
- ARCH=`uname -m 2>/dev/null | sed -e 's/aarch64.*/arm64/'`
-fi
-VADDR64=0
-echo "$ARCH64STR" | grep $ARCH && VADDR64=1
-
-mkdir $mnt
-mount -t hugetlbfs none $mnt
-
-echo "---------------------"
-echo "running hugepage-mmap"
-echo "---------------------"
-./hugepage-mmap
-if [ $? -ne 0 ]; then
- echo "[FAIL]"
- exitcode=1
-else
- echo "[PASS]"
-fi
-
-shmmax=`cat /proc/sys/kernel/shmmax`
-shmall=`cat /proc/sys/kernel/shmall`
-echo 268435456 > /proc/sys/kernel/shmmax
-echo 4194304 > /proc/sys/kernel/shmall
-echo "--------------------"
-echo "running hugepage-shm"
-echo "--------------------"
-./hugepage-shm
-if [ $? -ne 0 ]; then
- echo "[FAIL]"
- exitcode=1
-else
- echo "[PASS]"
-fi
-echo $shmmax > /proc/sys/kernel/shmmax
-echo $shmall > /proc/sys/kernel/shmall
-
-echo "-------------------"
-echo "running map_hugetlb"
-echo "-------------------"
-./map_hugetlb
-if [ $? -ne 0 ]; then
- echo "[FAIL]"
- exitcode=1
-else
- echo "[PASS]"
-fi
-
-echo "NOTE: The above hugetlb tests provide minimal coverage. Use"
-echo " https://github.com/libhugetlbfs/libhugetlbfs.git for"
-echo " hugetlb regression testing."
-
-echo "---------------------------"
-echo "running map_fixed_noreplace"
-echo "---------------------------"
-./map_fixed_noreplace
-if [ $? -ne 0 ]; then
- echo "[FAIL]"
- exitcode=1
-else
- echo "[PASS]"
-fi
-
-echo "------------------------------------------------------"
-echo "running: gup_test -u # get_user_pages_fast() benchmark"
-echo "------------------------------------------------------"
-./gup_test -u
-if [ $? -ne 0 ]; then
- echo "[FAIL]"
- exitcode=1
-else
- echo "[PASS]"
-fi
-
-echo "------------------------------------------------------"
-echo "running: gup_test -a # pin_user_pages_fast() benchmark"
-echo "------------------------------------------------------"
-./gup_test -a
-if [ $? -ne 0 ]; then
- echo "[FAIL]"
- exitcode=1
-else
- echo "[PASS]"
-fi
-
-echo "------------------------------------------------------------"
-echo "# Dump pages 0, 19, and 4096, using pin_user_pages:"
-echo "running: gup_test -ct -F 0x1 0 19 0x1000 # dump_page() test"
-echo "------------------------------------------------------------"
-./gup_test -ct -F 0x1 0 19 0x1000
-if [ $? -ne 0 ]; then
- echo "[FAIL]"
- exitcode=1
-else
- echo "[PASS]"
-fi
-
-echo "-------------------"
-echo "running userfaultfd"
-echo "-------------------"
-./userfaultfd anon 20 16
-if [ $? -ne 0 ]; then
- echo "[FAIL]"
- exitcode=1
-else
- echo "[PASS]"
-fi
-
-echo "---------------------------"
-echo "running userfaultfd_hugetlb"
-echo "---------------------------"
-# Test requires source and destination huge pages. Size of source
-# (half_ufd_size_MB) is passed as argument to test.
-./userfaultfd hugetlb $half_ufd_size_MB 32 $mnt/ufd_test_file
-if [ $? -ne 0 ]; then
- echo "[FAIL]"
- exitcode=1
-else
- echo "[PASS]"
-fi
-rm -f $mnt/ufd_test_file
-
-echo "-------------------------"
-echo "running userfaultfd_shmem"
-echo "-------------------------"
-./userfaultfd shmem 20 16
-if [ $? -ne 0 ]; then
- echo "[FAIL]"
- exitcode=1
-else
- echo "[PASS]"
-fi
-
-#cleanup
-umount $mnt
-rm -rf $mnt
-echo $nr_hugepgs > /proc/sys/vm/nr_hugepages
-
-echo "-----------------------"
-echo "running compaction_test"
-echo "-----------------------"
-./compaction_test
-if [ $? -ne 0 ]; then
- echo "[FAIL]"
- exitcode=1
-else
- echo "[PASS]"
-fi
-
-echo "----------------------"
-echo "running on-fault-limit"
-echo "----------------------"
-sudo -u nobody ./on-fault-limit
-if [ $? -ne 0 ]; then
- echo "[FAIL]"
- exitcode=1
-else
- echo "[PASS]"
-fi
-
-echo "--------------------"
-echo "running map_populate"
-echo "--------------------"
-./map_populate
-if [ $? -ne 0 ]; then
- echo "[FAIL]"
- exitcode=1
-else
- echo "[PASS]"
-fi
-
-echo "-------------------------"
-echo "running mlock-random-test"
-echo "-------------------------"
-./mlock-random-test
-if [ $? -ne 0 ]; then
- echo "[FAIL]"
- exitcode=1
-else
- echo "[PASS]"
-fi
-
-echo "--------------------"
-echo "running mlock2-tests"
-echo "--------------------"
-./mlock2-tests
-if [ $? -ne 0 ]; then
- echo "[FAIL]"
- exitcode=1
-else
- echo "[PASS]"
-fi
-
-echo "-------------------"
-echo "running mremap_test"
-echo "-------------------"
-./mremap_test
-if [ $? -ne 0 ]; then
- echo "[FAIL]"
- exitcode=1
-else
- echo "[PASS]"
-fi
-
-echo "-----------------"
-echo "running thuge-gen"
-echo "-----------------"
-./thuge-gen
-if [ $? -ne 0 ]; then
- echo "[FAIL]"
- exitcode=1
-else
- echo "[PASS]"
-fi
-
-if [ $VADDR64 -ne 0 ]; then
-echo "-----------------------------"
-echo "running virtual_address_range"
-echo "-----------------------------"
-./virtual_address_range
-if [ $? -ne 0 ]; then
- echo "[FAIL]"
- exitcode=1
-else
- echo "[PASS]"
-fi
-
-echo "-----------------------------"
-echo "running virtual address 128TB switch test"
-echo "-----------------------------"
-./va_128TBswitch
-if [ $? -ne 0 ]; then
- echo "[FAIL]"
- exitcode=1
-else
- echo "[PASS]"
-fi
-fi # VADDR64
-
-echo "------------------------------------"
-echo "running vmalloc stability smoke test"
-echo "------------------------------------"
-./test_vmalloc.sh smoke
-ret_val=$?
-
-if [ $ret_val -eq 0 ]; then
- echo "[PASS]"
-elif [ $ret_val -eq $ksft_skip ]; then
- echo "[SKIP]"
- exitcode=$ksft_skip
-else
- echo "[FAIL]"
- exitcode=1
-fi
-
-echo "------------------------------------"
-echo "running MREMAP_DONTUNMAP smoke test"
-echo "------------------------------------"
-./mremap_dontunmap
-ret_val=$?
-
-if [ $ret_val -eq 0 ]; then
- echo "[PASS]"
-elif [ $ret_val -eq $ksft_skip ]; then
- echo "[SKIP]"
- exitcode=$ksft_skip
-else
- echo "[FAIL]"
- exitcode=1
-fi
-
-echo "running HMM smoke test"
-echo "------------------------------------"
-./test_hmm.sh smoke
-ret_val=$?
-
-if [ $ret_val -eq 0 ]; then
- echo "[PASS]"
-elif [ $ret_val -eq $ksft_skip ]; then
- echo "[SKIP]"
- exitcode=$ksft_skip
-else
- echo "[FAIL]"
- exitcode=1
-fi
-
-exit $exitcode
--- /dev/null
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0
+#please run as root
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+mnt=./huge
+exitcode=0
+
+#get huge pagesize and freepages from /proc/meminfo
+while read name size unit; do
+ if [ "$name" = "HugePages_Free:" ]; then
+ freepgs=$size
+ fi
+ if [ "$name" = "Hugepagesize:" ]; then
+ hpgsize_KB=$size
+ fi
+done < /proc/meminfo
+
+# Simple hugetlbfs tests have a hardcoded minimum requirement of
+# huge pages totaling 256MB (262144KB) in size. The userfaultfd
+# hugetlb test requires a minimum of 2 * nr_cpus huge pages. Take
+# both of these requirements into account and attempt to increase
+# number of huge pages available.
+nr_cpus=$(nproc)
+hpgsize_MB=$((hpgsize_KB / 1024))
+half_ufd_size_MB=$((((nr_cpus * hpgsize_MB + 127) / 128) * 128))
+needmem_KB=$((half_ufd_size_MB * 2 * 1024))
+
+#set proper nr_hugepages
+if [ -n "$freepgs" ] && [ -n "$hpgsize_KB" ]; then
+ nr_hugepgs=`cat /proc/sys/vm/nr_hugepages`
+ needpgs=$((needmem_KB / hpgsize_KB))
+ tries=2
+ while [ $tries -gt 0 ] && [ $freepgs -lt $needpgs ]; do
+ lackpgs=$(( $needpgs - $freepgs ))
+ echo 3 > /proc/sys/vm/drop_caches
+ echo $(( $lackpgs + $nr_hugepgs )) > /proc/sys/vm/nr_hugepages
+ if [ $? -ne 0 ]; then
+ echo "Please run this test as root"
+ exit $ksft_skip
+ fi
+ while read name size unit; do
+ if [ "$name" = "HugePages_Free:" ]; then
+ freepgs=$size
+ fi
+ done < /proc/meminfo
+ tries=$((tries - 1))
+ done
+ if [ $freepgs -lt $needpgs ]; then
+ printf "Not enough huge pages available (%d < %d)\n" \
+ $freepgs $needpgs
+ exit 1
+ fi
+else
+ echo "no hugetlbfs support in kernel?"
+ exit 1
+fi
+
+#filter 64bit architectures
+ARCH64STR="arm64 ia64 mips64 parisc64 ppc64 ppc64le riscv64 s390x sh64 sparc64 x86_64"
+if [ -z $ARCH ]; then
+ ARCH=`uname -m 2>/dev/null | sed -e 's/aarch64.*/arm64/'`
+fi
+VADDR64=0
+echo "$ARCH64STR" | grep $ARCH && VADDR64=1
+
+mkdir $mnt
+mount -t hugetlbfs none $mnt
+
+echo "---------------------"
+echo "running hugepage-mmap"
+echo "---------------------"
+./hugepage-mmap
+if [ $? -ne 0 ]; then
+ echo "[FAIL]"
+ exitcode=1
+else
+ echo "[PASS]"
+fi
+
+shmmax=`cat /proc/sys/kernel/shmmax`
+shmall=`cat /proc/sys/kernel/shmall`
+echo 268435456 > /proc/sys/kernel/shmmax
+echo 4194304 > /proc/sys/kernel/shmall
+echo "--------------------"
+echo "running hugepage-shm"
+echo "--------------------"
+./hugepage-shm
+if [ $? -ne 0 ]; then
+ echo "[FAIL]"
+ exitcode=1
+else
+ echo "[PASS]"
+fi
+echo $shmmax > /proc/sys/kernel/shmmax
+echo $shmall > /proc/sys/kernel/shmall
+
+echo "-------------------"
+echo "running map_hugetlb"
+echo "-------------------"
+./map_hugetlb
+if [ $? -ne 0 ]; then
+ echo "[FAIL]"
+ exitcode=1
+else
+ echo "[PASS]"
+fi
+
+echo "NOTE: The above hugetlb tests provide minimal coverage. Use"
+echo " https://github.com/libhugetlbfs/libhugetlbfs.git for"
+echo " hugetlb regression testing."
+
+echo "---------------------------"
+echo "running map_fixed_noreplace"
+echo "---------------------------"
+./map_fixed_noreplace
+if [ $? -ne 0 ]; then
+ echo "[FAIL]"
+ exitcode=1
+else
+ echo "[PASS]"
+fi
+
+echo "------------------------------------------------------"
+echo "running: gup_test -u # get_user_pages_fast() benchmark"
+echo "------------------------------------------------------"
+./gup_test -u
+if [ $? -ne 0 ]; then
+ echo "[FAIL]"
+ exitcode=1
+else
+ echo "[PASS]"
+fi
+
+echo "------------------------------------------------------"
+echo "running: gup_test -a # pin_user_pages_fast() benchmark"
+echo "------------------------------------------------------"
+./gup_test -a
+if [ $? -ne 0 ]; then
+ echo "[FAIL]"
+ exitcode=1
+else
+ echo "[PASS]"
+fi
+
+echo "------------------------------------------------------------"
+echo "# Dump pages 0, 19, and 4096, using pin_user_pages:"
+echo "running: gup_test -ct -F 0x1 0 19 0x1000 # dump_page() test"
+echo "------------------------------------------------------------"
+./gup_test -ct -F 0x1 0 19 0x1000
+if [ $? -ne 0 ]; then
+ echo "[FAIL]"
+ exitcode=1
+else
+ echo "[PASS]"
+fi
+
+echo "-------------------"
+echo "running userfaultfd"
+echo "-------------------"
+./userfaultfd anon 20 16
+if [ $? -ne 0 ]; then
+ echo "[FAIL]"
+ exitcode=1
+else
+ echo "[PASS]"
+fi
+
+echo "---------------------------"
+echo "running userfaultfd_hugetlb"
+echo "---------------------------"
+# Test requires source and destination huge pages. Size of source
+# (half_ufd_size_MB) is passed as argument to test.
+./userfaultfd hugetlb $half_ufd_size_MB 32 $mnt/ufd_test_file
+if [ $? -ne 0 ]; then
+ echo "[FAIL]"
+ exitcode=1
+else
+ echo "[PASS]"
+fi
+rm -f $mnt/ufd_test_file
+
+echo "-------------------------"
+echo "running userfaultfd_shmem"
+echo "-------------------------"
+./userfaultfd shmem 20 16
+if [ $? -ne 0 ]; then
+ echo "[FAIL]"
+ exitcode=1
+else
+ echo "[PASS]"
+fi
+
+#cleanup
+umount $mnt
+rm -rf $mnt
+echo $nr_hugepgs > /proc/sys/vm/nr_hugepages
+
+echo "-----------------------"
+echo "running compaction_test"
+echo "-----------------------"
+./compaction_test
+if [ $? -ne 0 ]; then
+ echo "[FAIL]"
+ exitcode=1
+else
+ echo "[PASS]"
+fi
+
+echo "----------------------"
+echo "running on-fault-limit"
+echo "----------------------"
+sudo -u nobody ./on-fault-limit
+if [ $? -ne 0 ]; then
+ echo "[FAIL]"
+ exitcode=1
+else
+ echo "[PASS]"
+fi
+
+echo "--------------------"
+echo "running map_populate"
+echo "--------------------"
+./map_populate
+if [ $? -ne 0 ]; then
+ echo "[FAIL]"
+ exitcode=1
+else
+ echo "[PASS]"
+fi
+
+echo "-------------------------"
+echo "running mlock-random-test"
+echo "-------------------------"
+./mlock-random-test
+if [ $? -ne 0 ]; then
+ echo "[FAIL]"
+ exitcode=1
+else
+ echo "[PASS]"
+fi
+
+echo "--------------------"
+echo "running mlock2-tests"
+echo "--------------------"
+./mlock2-tests
+if [ $? -ne 0 ]; then
+ echo "[FAIL]"
+ exitcode=1
+else
+ echo "[PASS]"
+fi
+
+echo "-------------------"
+echo "running mremap_test"
+echo "-------------------"
+./mremap_test
+if [ $? -ne 0 ]; then
+ echo "[FAIL]"
+ exitcode=1
+else
+ echo "[PASS]"
+fi
+
+echo "-----------------"
+echo "running thuge-gen"
+echo "-----------------"
+./thuge-gen
+if [ $? -ne 0 ]; then
+ echo "[FAIL]"
+ exitcode=1
+else
+ echo "[PASS]"
+fi
+
+if [ $VADDR64 -ne 0 ]; then
+echo "-----------------------------"
+echo "running virtual_address_range"
+echo "-----------------------------"
+./virtual_address_range
+if [ $? -ne 0 ]; then
+ echo "[FAIL]"
+ exitcode=1
+else
+ echo "[PASS]"
+fi
+
+echo "-----------------------------"
+echo "running virtual address 128TB switch test"
+echo "-----------------------------"
+./va_128TBswitch
+if [ $? -ne 0 ]; then
+ echo "[FAIL]"
+ exitcode=1
+else
+ echo "[PASS]"
+fi
+fi # VADDR64
+
+echo "------------------------------------"
+echo "running vmalloc stability smoke test"
+echo "------------------------------------"
+./test_vmalloc.sh smoke
+ret_val=$?
+
+if [ $ret_val -eq 0 ]; then
+ echo "[PASS]"
+elif [ $ret_val -eq $ksft_skip ]; then
+ echo "[SKIP]"
+ exitcode=$ksft_skip
+else
+ echo "[FAIL]"
+ exitcode=1
+fi
+
+echo "------------------------------------"
+echo "running MREMAP_DONTUNMAP smoke test"
+echo "------------------------------------"
+./mremap_dontunmap
+ret_val=$?
+
+if [ $ret_val -eq 0 ]; then
+ echo "[PASS]"
+elif [ $ret_val -eq $ksft_skip ]; then
+ echo "[SKIP]"
+ exitcode=$ksft_skip
+else
+ echo "[FAIL]"
+ exitcode=1
+fi
+
+echo "running HMM smoke test"
+echo "------------------------------------"
+./test_hmm.sh smoke
+ret_val=$?
+
+if [ $ret_val -eq 0 ]; then
+ echo "[PASS]"
+elif [ $ret_val -eq $ksft_skip ]; then
+ echo "[SKIP]"
+ exitcode=$ksft_skip
+else
+ echo "[FAIL]"
+ exitcode=1
+fi
+
+exit $exitcode