return readl_relaxed(priv->swth_base[0] + offset);
}
-static inline u32 mvpp2_cpu_to_thread(int cpu)
+static inline u32 mvpp2_cpu_to_thread(struct mvpp2 *priv, int cpu)
{
- return cpu;
+ return cpu % priv->nthreads;
}
/* These accessors should be used to access:
dma_addr_t *dma_addr,
phys_addr_t *phys_addr)
{
- unsigned int thread = mvpp2_cpu_to_thread(get_cpu());
+ unsigned int thread = mvpp2_cpu_to_thread(priv, get_cpu());
*dma_addr = mvpp2_percpu_read(priv, thread,
MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
dma_addr_t buf_dma_addr,
phys_addr_t buf_phys_addr)
{
- unsigned int thread = mvpp2_cpu_to_thread(get_cpu());
+ unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
+ unsigned long flags = 0;
+
+ if (test_bit(thread, &port->priv->lock_map))
+ spin_lock_irqsave(&port->bm_lock[thread], flags);
if (port->priv->hw_version == MVPP22) {
u32 val = 0;
mvpp2_percpu_write_relaxed(port->priv, thread,
MVPP2_BM_PHY_RLS_REG(pool), buf_dma_addr);
+ if (test_bit(thread, &port->priv->lock_map))
+ spin_unlock_irqrestore(&port->bm_lock[thread], flags);
+
put_cpu();
}
{
struct mvpp2_port *port = arg;
- mvpp2_percpu_write(port->priv, mvpp2_cpu_to_thread(smp_processor_id()),
+ /* If the thread isn't used, don't do anything */
+ if (smp_processor_id() > port->priv->nthreads)
+ return;
+
+ mvpp2_percpu_write(port->priv,
+ mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
}
struct mvpp2_port *port = arg;
u32 val;
+ /* If the thread isn't used, don't do anything */
+ if (smp_processor_id() > port->priv->nthreads)
+ return;
+
val = MVPP2_CAUSE_MISC_SUM_MASK |
MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK(port->priv->hw_version);
if (port->has_tx_irqs)
val |= MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
- mvpp2_percpu_write(port->priv, mvpp2_cpu_to_thread(smp_processor_id()),
+ mvpp2_percpu_write(port->priv,
+ mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
MVPP2_ISR_RX_TX_MASK_REG(port->id), val);
}
static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
{
/* aggregated access - relevant TXQ number is written in TX desc */
- mvpp2_percpu_write(port->priv, mvpp2_cpu_to_thread(smp_processor_id()),
+ mvpp2_percpu_write(port->priv,
+ mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
MVPP2_AGGR_TXQ_UPDATE_REG, pending);
}
* Called only from mvpp2_tx(), so migration is disabled, using
* smp_processor_id() is OK.
*/
-static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv,
+static int mvpp2_aggr_desc_num_check(struct mvpp2_port *port,
struct mvpp2_tx_queue *aggr_txq, int num)
{
if ((aggr_txq->count + num) > MVPP2_AGGR_TXQ_SIZE) {
/* Update number of occupied aggregated Tx descriptors */
- unsigned int thread = mvpp2_cpu_to_thread(smp_processor_id());
- u32 val = mvpp2_read_relaxed(priv,
+ unsigned int thread =
+ mvpp2_cpu_to_thread(port->priv, smp_processor_id());
+ u32 val = mvpp2_read_relaxed(port->priv,
MVPP2_AGGR_TXQ_STATUS_REG(thread));
aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
* only by mvpp2_tx(), so migration is disabled, using
* smp_processor_id() is OK.
*/
-static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv,
+static int mvpp2_txq_alloc_reserved_desc(struct mvpp2_port *port,
struct mvpp2_tx_queue *txq, int num)
{
+ unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
+ struct mvpp2 *priv = port->priv;
u32 val;
- unsigned int thread = mvpp2_cpu_to_thread(smp_processor_id());
val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
mvpp2_percpu_write_relaxed(priv, thread, MVPP2_TXQ_RSVD_REQ_REG, val);
struct mvpp2_txq_pcpu *txq_pcpu,
int num)
{
- struct mvpp2 *priv = port->priv;
int req, desc_count;
unsigned int thread;
desc_count = 0;
/* Compute total of used descriptors */
- for (thread = 0; thread < port->nthreads; thread++) {
+ for (thread = 0; thread < port->priv->nthreads; thread++) {
struct mvpp2_txq_pcpu *txq_pcpu_aux;
txq_pcpu_aux = per_cpu_ptr(txq->pcpu, thread);
(txq->size - (MVPP2_MAX_THREADS * MVPP2_CPU_DESC_CHUNK)))
return -ENOMEM;
- txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(priv, txq, req);
+ txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(port, txq, req);
/* OK, the descriptor could have been updated: check again. */
if (txq_pcpu->reserved_num < num)
/* Reading status reg resets transmitted descriptor counter */
val = mvpp2_percpu_read_relaxed(port->priv,
- mvpp2_cpu_to_thread(smp_processor_id()),
+ mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
MVPP2_TXQ_SENT_REG(txq->id));
return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
struct mvpp2_port *port = arg;
int queue;
+ /* If the thread isn't used, don't do anything */
+ if (smp_processor_id() > port->priv->nthreads)
+ return;
+
for (queue = 0; queue < port->ntxqs; queue++) {
int id = port->txqs[queue]->id;
mvpp2_percpu_read(port->priv,
- mvpp2_cpu_to_thread(smp_processor_id()),
+ mvpp2_cpu_to_thread(port->priv, smp_processor_id()),
MVPP2_TXQ_SENT_REG(id));
}
}
static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
struct mvpp2_rx_queue *rxq)
{
- unsigned int thread = mvpp2_cpu_to_thread(get_cpu());
+ unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
if (rxq->pkts_coal > MVPP2_OCCUPIED_THRESH_MASK)
rxq->pkts_coal = MVPP2_OCCUPIED_THRESH_MASK;
static void mvpp2_tx_pkts_coal_set(struct mvpp2_port *port,
struct mvpp2_tx_queue *txq)
{
- unsigned int thread = mvpp2_cpu_to_thread(get_cpu());
+ unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
u32 val;
if (txq->done_pkts_coal > MVPP2_TXQ_THRESH_MASK)
struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id);
int tx_done;
- if (txq_pcpu->thread != mvpp2_cpu_to_thread(smp_processor_id()))
+ if (txq_pcpu->thread != mvpp2_cpu_to_thread(port->priv, smp_processor_id()))
netdev_err(port->dev, "wrong cpu on the end of Tx processing\n");
tx_done = mvpp2_txq_sent_desc_proc(port, txq);
mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
/* Set Rx descriptors queue starting address - indirect access */
- thread = mvpp2_cpu_to_thread(get_cpu());
+ thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
mvpp2_percpu_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
if (port->priv->hw_version == MVPP21)
rxq_dma = rxq->descs_dma;
* free descriptor number
*/
mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
- thread = mvpp2_cpu_to_thread(get_cpu());
+ thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
mvpp2_percpu_write(port->priv, thread, MVPP2_RXQ_NUM_REG, rxq->id);
mvpp2_percpu_write(port->priv, thread, MVPP2_RXQ_DESC_ADDR_REG, 0);
mvpp2_percpu_write(port->priv, thread, MVPP2_RXQ_DESC_SIZE_REG, 0);
txq->last_desc = txq->size - 1;
/* Set Tx descriptors queue starting address - indirect access */
- thread = mvpp2_cpu_to_thread(get_cpu());
+ thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
mvpp2_percpu_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
mvpp2_percpu_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG,
txq->descs_dma);
mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
val);
- for (thread = 0; thread < port->nthreads; thread++) {
+ for (thread = 0; thread < port->priv->nthreads; thread++) {
txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
txq_pcpu->size = txq->size;
txq_pcpu->buffs = kmalloc_array(txq_pcpu->size,
struct mvpp2_txq_pcpu *txq_pcpu;
unsigned int thread;
- for (thread = 0; thread < port->nthreads; thread++) {
+ for (thread = 0; thread < port->priv->nthreads; thread++) {
txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
kfree(txq_pcpu->buffs);
mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
/* Set Tx descriptors queue starting address and size */
- thread = mvpp2_cpu_to_thread(get_cpu());
+ thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
mvpp2_percpu_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
mvpp2_percpu_write(port->priv, thread, MVPP2_TXQ_DESC_ADDR_REG, 0);
mvpp2_percpu_write(port->priv, thread, MVPP2_TXQ_DESC_SIZE_REG, 0);
{
struct mvpp2_txq_pcpu *txq_pcpu;
int delay, pending;
- unsigned int thread = mvpp2_cpu_to_thread(get_cpu());
+ unsigned int thread = mvpp2_cpu_to_thread(port->priv, get_cpu());
u32 val;
mvpp2_percpu_write(port->priv, thread, MVPP2_TXQ_NUM_REG, txq->id);
mvpp2_percpu_write(port->priv, thread, MVPP2_TXQ_PREF_BUF_REG, val);
put_cpu();
- for (thread = 0; thread < port->nthreads; thread++) {
+ for (thread = 0; thread < port->priv->nthreads; thread++) {
txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
/* Release all packets */
unsigned int tx_todo, cause;
port_pcpu = per_cpu_ptr(port->pcpu,
- mvpp2_cpu_to_thread(smp_processor_id()));
+ mvpp2_cpu_to_thread(port->priv, smp_processor_id()));
if (!netif_running(dev))
return;
/* Process all the Tx queues */
cause = (1 << port->ntxqs) - 1;
tx_todo = mvpp2_tx_done(port, cause,
- mvpp2_cpu_to_thread(smp_processor_id()));
+ mvpp2_cpu_to_thread(port->priv, smp_processor_id()));
/* Set the timer in case not all the packets were processed */
if (tx_todo)
tx_desc_unmap_put(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
struct mvpp2_tx_desc *desc)
{
- unsigned int thread = mvpp2_cpu_to_thread(smp_processor_id());
+ unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
dma_addr_t buf_dma_addr =
struct mvpp2_tx_queue *aggr_txq,
struct mvpp2_tx_queue *txq)
{
- unsigned int thread = mvpp2_cpu_to_thread(smp_processor_id());
+ unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
struct mvpp2_txq_pcpu *txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
struct mvpp2_tx_desc *tx_desc;
int i;
int i, len, descs = 0;
/* Check number of available descriptors */
- if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq,
- tso_count_descs(skb)) ||
+ if (mvpp2_aggr_desc_num_check(port, aggr_txq, tso_count_descs(skb)) ||
mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu,
tso_count_descs(skb)))
return 0;
struct mvpp2_txq_pcpu *txq_pcpu;
struct mvpp2_tx_desc *tx_desc;
dma_addr_t buf_dma_addr;
+ unsigned long flags = 0;
unsigned int thread;
int frags = 0;
u16 txq_id;
u32 tx_cmd;
- thread = mvpp2_cpu_to_thread(smp_processor_id());
+ thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
txq_id = skb_get_queue_mapping(skb);
txq = port->txqs[txq_id];
txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
aggr_txq = &port->priv->aggr_txqs[thread];
+ if (test_bit(thread, &port->priv->lock_map))
+ spin_lock_irqsave(&port->tx_lock[thread], flags);
+
if (skb_is_gso(skb)) {
frags = mvpp2_tx_tso(skb, dev, txq, aggr_txq, txq_pcpu);
goto out;
frags = skb_shinfo(skb)->nr_frags + 1;
/* Check number of available descriptors */
- if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, frags) ||
+ if (mvpp2_aggr_desc_num_check(port, aggr_txq, frags) ||
mvpp2_txq_reserved_desc_num_proc(port, txq, txq_pcpu, frags)) {
frags = 0;
goto out;
mvpp2_timer_set(port_pcpu);
}
+ if (test_bit(thread, &port->priv->lock_map))
+ spin_unlock_irqrestore(&port->tx_lock[thread], flags);
+
return NETDEV_TX_OK;
}
int rx_done = 0;
struct mvpp2_port *port = netdev_priv(napi->dev);
struct mvpp2_queue_vector *qv;
- unsigned int thread = mvpp2_cpu_to_thread(smp_processor_id());
+ unsigned int thread = mvpp2_cpu_to_thread(port->priv, smp_processor_id());
qv = container_of(napi, struct mvpp2_queue_vector, napi);
if (err)
goto err;
- if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE)
- irq_set_affinity_hint(qv->irq,
- cpumask_of(qv->sw_thread_id));
+ if (qv->type == MVPP2_QUEUE_VECTOR_PRIVATE) {
+ unsigned long mask = 0;
+ unsigned int cpu;
+
+ for_each_present_cpu(cpu) {
+ if (mvpp2_cpu_to_thread(port->priv, cpu) ==
+ qv->sw_thread_id)
+ mask |= BIT(cpu);
+ }
+
+ irq_set_affinity_hint(qv->irq, to_cpumask(&mask));
+ }
}
return 0;
mvpp2_stop_dev(port);
- /* Mask interrupts on all CPUs */
+ /* Mask interrupts on all threads */
on_each_cpu(mvpp2_interrupts_mask, port, 1);
mvpp2_shared_interrupt_mask_unmask(port, true);
mvpp2_irqs_deinit(port);
if (!port->has_tx_irqs) {
- for (thread = 0; thread < port->nthreads; thread++) {
+ for (thread = 0; thread < port->priv->nthreads; thread++) {
port_pcpu = per_cpu_ptr(port->pcpu, thread);
hrtimer_cancel(&port_pcpu->tx_done_timer);
static int mvpp2_multi_queue_vectors_init(struct mvpp2_port *port,
struct device_node *port_node)
{
+ struct mvpp2 *priv = port->priv;
struct mvpp2_queue_vector *v;
int i, ret;
- port->nqvecs = num_possible_cpus();
- if (queue_mode == MVPP2_QDIST_SINGLE_MODE)
- port->nqvecs += 1;
+ switch (queue_mode) {
+ case MVPP2_QDIST_SINGLE_MODE:
+ port->nqvecs = priv->nthreads + 1;
+ break;
+ case MVPP2_QDIST_MULTI_MODE:
+ port->nqvecs = priv->nthreads;
+ break;
+ }
for (i = 0; i < port->nqvecs; i++) {
char irqname[16];
txq->id = queue_phy_id;
txq->log_id = queue;
txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
- for (thread = 0; thread < port->nthreads; thread++) {
+ for (thread = 0; thread < priv->nthreads; thread++) {
txq_pcpu = per_cpu_ptr(txq->pcpu, thread);
txq_pcpu->thread = thread;
}
port->has_tx_irqs = has_tx_irqs;
port->flags = flags;
- port->nthreads = min_t(unsigned int, num_present_cpus(),
- MVPP2_MAX_THREADS);
-
err = mvpp2_queue_vectors_init(port, port_node);
if (err)
goto err_free_netdev;
}
if (!port->has_tx_irqs) {
- for (thread = 0; thread < port->nthreads; thread++) {
+ for (thread = 0; thread < priv->nthreads; thread++) {
port_pcpu = per_cpu_ptr(port->pcpu, thread);
hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
struct mvpp2 *priv;
struct resource *res;
void __iomem *base;
- int i;
+ int i, shared;
int err;
priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
mvpp2_setup_bm_pool();
+
+ priv->nthreads = min_t(unsigned int, num_present_cpus(),
+ MVPP2_MAX_THREADS);
+
+ shared = num_present_cpus() - priv->nthreads;
+ if (shared > 0)
+ bitmap_fill(&priv->lock_map,
+ min_t(int, shared, MVPP2_MAX_THREADS));
+
for (i = 0; i < MVPP2_MAX_THREADS; i++) {
u32 addr_space_sz;