select ARCH_HAS_SG_CHAIN
select CPU_NO_EFFICIENT_FFS
select HAVE_ARCH_HARDENED_USERCOPY
+ select PROVE_LOCKING_SMALL if PROVE_LOCKING
config SPARC32
def_bool !64BIT
config ARCH_PROC_KCORE_TEXT
def_bool y
+config ARCH_ATU
+ bool
+ default y if SPARC64
+
+config ARCH_DMA_ADDR_T_64BIT
+ bool
+ default y if ARCH_ATU
+
config IOMMU_HELPER
bool
default y if SPARC64
config ARCH_SPARSEMEM_DEFAULT
def_bool y if SPARC64
+config FORCE_MAX_ZONEORDER
+ int "Maximum zone order"
+ default "13"
+ help
+ The kernel memory allocator divides physically contiguous memory
+ blocks into "zones", where each zone is a power of two number of
+ pages. This option selects the largest power of two that the kernel
+ keeps in the memory allocator. If you need to allocate very large
+ blocks of physically contiguous memory, then you may need to
+ increase this value.
+
+ This config option is actually maximum order plus one. For example,
+ a value of 13 means that the largest free memory block is 2^12 pages.
+
source "mm/Kconfig"
if SPARC64
*/
#define HV_FAST_PCI_MSG_SETVALID 0xd3
+/* PCI IOMMU v2 definitions and services
+ *
+ * While the PCI IO definitions above is valid IOMMU v2 adds new PCI IO
+ * definitions and services.
+ *
+ * CTE Clump Table Entry. First level table entry in the ATU.
+ *
+ * pci_device_list
+ * A 32-bit aligned list of pci_devices.
+ *
+ * pci_device_listp
+ * real address of a pci_device_list. 32-bit aligned.
+ *
+ * iotte IOMMU translation table entry.
+ *
+ * iotte_attributes
+ * IO Attributes for IOMMU v2 mappings. In addition to
+ * read, write IOMMU v2 supports relax ordering
+ *
+ * io_page_list A 64-bit aligned list of real addresses. Each real
+ * address in an io_page_list must be properly aligned
+ * to the pagesize of the given IOTSB.
+ *
+ * io_page_list_p Real address of an io_page_list, 64-bit aligned.
+ *
+ * IOTSB IO Translation Storage Buffer. An aligned table of
+ * IOTTEs. Each IOTSB has a pagesize, table size, and
+ * virtual address associated with it that must match
+ * a pagesize and table size supported by the un-derlying
+ * hardware implementation. The alignment requirements
+ * for an IOTSB depend on the pagesize used for that IOTSB.
+ * Each IOTTE in an IOTSB maps one pagesize-sized page.
+ * The size of the IOTSB dictates how large of a virtual
+ * address space the IOTSB is capable of mapping.
+ *
+ * iotsb_handle An opaque identifier for an IOTSB. A devhandle plus
+ * iotsb_handle represents a binding of an IOTSB to a
+ * PCI root complex.
+ *
+ * iotsb_index Zero-based IOTTE number within an IOTSB.
+ */
+
+/* The index_count argument consists of two fields:
+ * bits 63:48 #iottes and bits 47:0 iotsb_index
+ */
+#define HV_PCI_IOTSB_INDEX_COUNT(__iottes, __iotsb_index) \
+ (((u64)(__iottes) << 48UL) | ((u64)(__iotsb_index)))
+
+/* pci_iotsb_conf()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_PCI_IOTSB_CONF
+ * ARG0: devhandle
+ * ARG1: r_addr
+ * ARG2: size
+ * ARG3: pagesize
+ * ARG4: iova
+ * RET0: status
+ * RET1: iotsb_handle
+ * ERRORS: EINVAL Invalid devhandle, size, iova, or pagesize
+ * EBADALIGN r_addr is not properly aligned
+ * ENORADDR r_addr is not a valid real address
+ * ETOOMANY No further IOTSBs may be configured
+ * EBUSY Duplicate devhandle, raddir, iova combination
+ *
+ * Create an IOTSB suitable for the PCI root complex identified by devhandle,
+ * for the DMA virtual address defined by the argument iova.
+ *
+ * r_addr is the properly aligned base address of the IOTSB and size is the
+ * IOTSB (table) size in bytes.The IOTSB is required to be zeroed prior to
+ * being configured. If it contains any values other than zeros then the
+ * behavior is undefined.
+ *
+ * pagesize is the size of each page in the IOTSB. Note that the combination of
+ * size (table size) and pagesize must be valid.
+ *
+ * virt is the DMA virtual address this IOTSB will map.
+ *
+ * If successful, the opaque 64-bit handle iotsb_handle is returned in ret1.
+ * Once configured, privileged access to the IOTSB memory is prohibited and
+ * creates undefined behavior. The only permitted access is indirect via these
+ * services.
+ */
+#define HV_FAST_PCI_IOTSB_CONF 0x190
+
+/* pci_iotsb_info()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_PCI_IOTSB_INFO
+ * ARG0: devhandle
+ * ARG1: iotsb_handle
+ * RET0: status
+ * RET1: r_addr
+ * RET2: size
+ * RET3: pagesize
+ * RET4: iova
+ * RET5: #bound
+ * ERRORS: EINVAL Invalid devhandle or iotsb_handle
+ *
+ * This service returns configuration information about an IOTSB previously
+ * created with pci_iotsb_conf.
+ *
+ * iotsb_handle value 0 may be used with this service to inquire about the
+ * legacy IOTSB that may or may not exist. If the service succeeds, the return
+ * values describe the legacy IOTSB and I/O virtual addresses mapped by that
+ * table. However, the table base address r_addr may contain the value -1 which
+ * indicates a memory range that cannot be accessed or be reclaimed.
+ *
+ * The return value #bound contains the number of PCI devices that iotsb_handle
+ * is currently bound to.
+ */
+#define HV_FAST_PCI_IOTSB_INFO 0x191
+
+/* pci_iotsb_unconf()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_PCI_IOTSB_UNCONF
+ * ARG0: devhandle
+ * ARG1: iotsb_handle
+ * RET0: status
+ * ERRORS: EINVAL Invalid devhandle or iotsb_handle
+ * EBUSY The IOTSB is bound and may not be unconfigured
+ *
+ * This service unconfigures the IOTSB identified by the devhandle and
+ * iotsb_handle arguments, previously created with pci_iotsb_conf.
+ * The IOTSB must not be currently bound to any device or the service will fail
+ *
+ * If the call succeeds, iotsb_handle is no longer valid.
+ */
+#define HV_FAST_PCI_IOTSB_UNCONF 0x192
+
+/* pci_iotsb_bind()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_PCI_IOTSB_BIND
+ * ARG0: devhandle
+ * ARG1: iotsb_handle
+ * ARG2: pci_device
+ * RET0: status
+ * ERRORS: EINVAL Invalid devhandle, iotsb_handle, or pci_device
+ * EBUSY A PCI function is already bound to an IOTSB at the same
+ * address range as specified by devhandle, iotsb_handle.
+ *
+ * This service binds the PCI function specified by the argument pci_device to
+ * the IOTSB specified by the arguments devhandle and iotsb_handle.
+ *
+ * The PCI device function is bound to the specified IOTSB with the IOVA range
+ * specified when the IOTSB was configured via pci_iotsb_conf. If the function
+ * is already bound then it is unbound first.
+ */
+#define HV_FAST_PCI_IOTSB_BIND 0x193
+
+/* pci_iotsb_unbind()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_PCI_IOTSB_UNBIND
+ * ARG0: devhandle
+ * ARG1: iotsb_handle
+ * ARG2: pci_device
+ * RET0: status
+ * ERRORS: EINVAL Invalid devhandle, iotsb_handle, or pci_device
+ * ENOMAP The PCI function was not bound to the specified IOTSB
+ *
+ * This service unbinds the PCI device specified by the argument pci_device
+ * from the IOTSB identified * by the arguments devhandle and iotsb_handle.
+ *
+ * If the PCI device is not bound to the specified IOTSB then this service will
+ * fail with status ENOMAP
+ */
+#define HV_FAST_PCI_IOTSB_UNBIND 0x194
+
+/* pci_iotsb_get_binding()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_PCI_IOTSB_GET_BINDING
+ * ARG0: devhandle
+ * ARG1: iotsb_handle
+ * ARG2: iova
+ * RET0: status
+ * RET1: iotsb_handle
+ * ERRORS: EINVAL Invalid devhandle, pci_device, or iova
+ * ENOMAP The PCI function is not bound to an IOTSB at iova
+ *
+ * This service returns the IOTSB binding, iotsb_handle, for a given pci_device
+ * and DMA virtual address, iova.
+ *
+ * iova must be the base address of a DMA virtual address range as defined by
+ * the iommu-address-ranges property in the root complex device node defined
+ * by the argument devhandle.
+ */
+#define HV_FAST_PCI_IOTSB_GET_BINDING 0x195
+
+/* pci_iotsb_map()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_PCI_IOTSB_MAP
+ * ARG0: devhandle
+ * ARG1: iotsb_handle
+ * ARG2: index_count
+ * ARG3: iotte_attributes
+ * ARG4: io_page_list_p
+ * RET0: status
+ * RET1: #mapped
+ * ERRORS: EINVAL Invalid devhandle, iotsb_handle, #iottes,
+ * iotsb_index or iotte_attributes
+ * EBADALIGN Improperly aligned io_page_list_p or I/O page
+ * address in the I/O page list.
+ * ENORADDR Invalid io_page_list_p or I/O page address in
+ * the I/O page list.
+ *
+ * This service creates and flushes mappings in the IOTSB defined by the
+ * arguments devhandle, iotsb.
+ *
+ * The index_count argument consists of two fields. Bits 63:48 contain #iotte
+ * and bits 47:0 contain iotsb_index
+ *
+ * The first mapping is created in the IOTSB index specified by iotsb_index.
+ * Subsequent mappings are created at iotsb_index+1 and so on.
+ *
+ * The attributes of each mapping are defined by the argument iotte_attributes.
+ *
+ * The io_page_list_p specifies the real address of the 64-bit-aligned list of
+ * #iottes I/O page addresses. Each page address must be a properly aligned
+ * real address of a page to be mapped in the IOTSB. The first entry in the I/O
+ * page list contains the real address of the first page, the 2nd entry for the
+ * 2nd page, and so on.
+ *
+ * #iottes must be greater than zero.
+ *
+ * The return value #mapped is the actual number of mappings created, which may
+ * be less than or equal to the argument #iottes. If the function returns
+ * successfully with a #mapped value less than the requested #iottes then the
+ * caller should continue to invoke the service with updated iotsb_index,
+ * #iottes, and io_page_list_p arguments until all pages are mapped.
+ *
+ * This service must not be used to demap a mapping. In other words, all
+ * mappings must be valid and have one or both of the RW attribute bits set.
+ *
+ * Note:
+ * It is implementation-defined whether I/O page real address validity checking
+ * is done at time mappings are established or deferred until they are
+ * accessed.
+ */
+#define HV_FAST_PCI_IOTSB_MAP 0x196
+
+/* pci_iotsb_map_one()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_PCI_IOTSB_MAP_ONE
+ * ARG0: devhandle
+ * ARG1: iotsb_handle
+ * ARG2: iotsb_index
+ * ARG3: iotte_attributes
+ * ARG4: r_addr
+ * RET0: status
+ * ERRORS: EINVAL Invalid devhandle,iotsb_handle, iotsb_index
+ * or iotte_attributes
+ * EBADALIGN Improperly aligned r_addr
+ * ENORADDR Invalid r_addr
+ *
+ * This service creates and flushes a single mapping in the IOTSB defined by the
+ * arguments devhandle, iotsb.
+ *
+ * The mapping for the page at r_addr is created at the IOTSB index specified by
+ * iotsb_index with the attributes iotte_attributes.
+ *
+ * This service must not be used to demap a mapping. In other words, the mapping
+ * must be valid and have one or both of the RW attribute bits set.
+ *
+ * Note:
+ * It is implementation-defined whether I/O page real address validity checking
+ * is done at time mappings are established or deferred until they are
+ * accessed.
+ */
+#define HV_FAST_PCI_IOTSB_MAP_ONE 0x197
+
+/* pci_iotsb_demap()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_PCI_IOTSB_DEMAP
+ * ARG0: devhandle
+ * ARG1: iotsb_handle
+ * ARG2: iotsb_index
+ * ARG3: #iottes
+ * RET0: status
+ * RET1: #unmapped
+ * ERRORS: EINVAL Invalid devhandle, iotsb_handle, iotsb_index or #iottes
+ *
+ * This service unmaps and flushes up to #iottes mappings starting at index
+ * iotsb_index from the IOTSB defined by the arguments devhandle, iotsb.
+ *
+ * #iottes must be greater than zero.
+ *
+ * The actual number of IOTTEs unmapped is returned in #unmapped and may be less
+ * than or equal to the requested number of IOTTEs, #iottes.
+ *
+ * If #unmapped is less than #iottes, the caller should continue to invoke this
+ * service with updated iotsb_index and #iottes arguments until all pages are
+ * demapped.
+ */
+#define HV_FAST_PCI_IOTSB_DEMAP 0x198
+
+/* pci_iotsb_getmap()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_PCI_IOTSB_GETMAP
+ * ARG0: devhandle
+ * ARG1: iotsb_handle
+ * ARG2: iotsb_index
+ * RET0: status
+ * RET1: r_addr
+ * RET2: iotte_attributes
+ * ERRORS: EINVAL Invalid devhandle, iotsb_handle, or iotsb_index
+ * ENOMAP No mapping was found
+ *
+ * This service returns the mapping specified by index iotsb_index from the
+ * IOTSB defined by the arguments devhandle, iotsb.
+ *
+ * Upon success, the real address of the mapping shall be returned in
+ * r_addr and thethe IOTTE mapping attributes shall be returned in
+ * iotte_attributes.
+ *
+ * The return value iotte_attributes may not include optional features used in
+ * the call to create the mapping.
+ */
+#define HV_FAST_PCI_IOTSB_GETMAP 0x199
+
+/* pci_iotsb_sync_mappings()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_PCI_IOTSB_SYNC_MAPPINGS
+ * ARG0: devhandle
+ * ARG1: iotsb_handle
+ * ARG2: iotsb_index
+ * ARG3: #iottes
+ * RET0: status
+ * RET1: #synced
+ * ERROS: EINVAL Invalid devhandle, iotsb_handle, iotsb_index, or #iottes
+ *
+ * This service synchronizes #iottes mappings starting at index iotsb_index in
+ * the IOTSB defined by the arguments devhandle, iotsb.
+ *
+ * #iottes must be greater than zero.
+ *
+ * The actual number of IOTTEs synchronized is returned in #synced, which may
+ * be less than or equal to the requested number, #iottes.
+ *
+ * Upon a successful return, #synced is less than #iottes, the caller should
+ * continue to invoke this service with updated iotsb_index and #iottes
+ * arguments until all pages are synchronized.
+ */
+#define HV_FAST_PCI_IOTSB_SYNC_MAPPINGS 0x19a
+
/* Logical Domain Channel services. */
#define LDC_CHANNEL_DOWN 0
#define HV_GRP_SDIO 0x0108
#define HV_GRP_SDIO_ERR 0x0109
#define HV_GRP_REBOOT_DATA 0x0110
+#define HV_GRP_ATU 0x0111
#define HV_GRP_M7_PERF 0x0114
#define HV_GRP_NIAG_PERF 0x0200
#define HV_GRP_FIRE_PERF 0x0201
unsigned int limit;
};
+#define ATU_64_SPACE_SIZE 0x800000000 /* 32G */
+
+/* Data structures for SPARC ATU architecture */
+struct atu_iotsb {
+ void *table; /* IOTSB table base virtual addr*/
+ u64 ra; /* IOTSB table real addr */
+ u64 dvma_size; /* ranges[3].size or OS slected 32G size */
+ u64 dvma_base; /* ranges[3].base */
+ u64 table_size; /* IOTSB table size */
+ u64 page_size; /* IO PAGE size for IOTSB */
+ u32 iotsb_num; /* tsbnum is same as iotsb_handle */
+};
+
+struct atu_ranges {
+ u64 base;
+ u64 size;
+};
+
+struct atu {
+ struct atu_ranges *ranges;
+ struct atu_iotsb *iotsb;
+ struct iommu_map_table tbl;
+ u64 base;
+ u64 size;
+ u64 dma_addr_mask;
+};
+
struct iommu {
struct iommu_map_table tbl;
+ struct atu *atu;
spinlock_t lock;
u32 dma_addr_mask;
iopte_t *page_table;
{ .group = HV_GRP_SDIO, },
{ .group = HV_GRP_SDIO_ERR, },
{ .group = HV_GRP_REBOOT_DATA, },
+ { .group = HV_GRP_ATU, .flags = FLAG_PRE_API },
{ .group = HV_GRP_NIAG_PERF, .flags = FLAG_PRE_API },
{ .group = HV_GRP_FIRE_PERF, },
{ .group = HV_GRP_N2_CPU, },
struct iommu *iommu = dev->archdata.iommu;
u64 dma_addr_mask = iommu->dma_addr_mask;
- if (device_mask >= (1UL << 32UL))
- return 0;
+ if (device_mask > DMA_BIT_MASK(32)) {
+ if (iommu->atu)
+ dma_addr_mask = iommu->atu->dma_addr_mask;
+ else
+ return 0;
+ }
if ((device_mask & dma_addr_mask) == dma_addr_mask)
return 1;
#include <linux/scatterlist.h>
#include <linux/device.h>
#include <linux/iommu-helper.h>
-#include <linux/scatterlist.h>
#include <asm/iommu.h>
{ .major = 1, .minor = 1 },
};
+static unsigned long vatu_major = 1;
+static unsigned long vatu_minor = 1;
+
#define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
struct iommu_batch {
}
/* Interrupts must be disabled. */
-static long iommu_batch_flush(struct iommu_batch *p)
+static long iommu_batch_flush(struct iommu_batch *p, u64 mask)
{
struct pci_pbm_info *pbm = p->dev->archdata.host_controller;
+ u64 *pglist = p->pglist;
+ u64 index_count;
unsigned long devhandle = pbm->devhandle;
unsigned long prot = p->prot;
unsigned long entry = p->entry;
- u64 *pglist = p->pglist;
unsigned long npages = p->npages;
+ unsigned long iotsb_num;
+ unsigned long ret;
+ long num;
/* VPCI maj=1, min=[0,1] only supports read and write */
if (vpci_major < 2)
prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE);
while (npages != 0) {
- long num;
-
- num = pci_sun4v_iommu_map(devhandle, HV_PCI_TSBID(0, entry),
- npages, prot, __pa(pglist));
- if (unlikely(num < 0)) {
- if (printk_ratelimit())
- printk("iommu_batch_flush: IOMMU map of "
- "[%08lx:%08llx:%lx:%lx:%lx] failed with "
- "status %ld\n",
- devhandle, HV_PCI_TSBID(0, entry),
- npages, prot, __pa(pglist), num);
- return -1;
+ if (mask <= DMA_BIT_MASK(32)) {
+ num = pci_sun4v_iommu_map(devhandle,
+ HV_PCI_TSBID(0, entry),
+ npages,
+ prot,
+ __pa(pglist));
+ if (unlikely(num < 0)) {
+ pr_err_ratelimited("%s: IOMMU map of [%08lx:%08llx:%lx:%lx:%lx] failed with status %ld\n",
+ __func__,
+ devhandle,
+ HV_PCI_TSBID(0, entry),
+ npages, prot, __pa(pglist),
+ num);
+ return -1;
+ }
+ } else {
+ index_count = HV_PCI_IOTSB_INDEX_COUNT(npages, entry),
+ iotsb_num = pbm->iommu->atu->iotsb->iotsb_num;
+ ret = pci_sun4v_iotsb_map(devhandle,
+ iotsb_num,
+ index_count,
+ prot,
+ __pa(pglist),
+ &num);
+ if (unlikely(ret != HV_EOK)) {
+ pr_err_ratelimited("%s: ATU map of [%08lx:%lx:%llx:%lx:%lx] failed with status %ld\n",
+ __func__,
+ devhandle, iotsb_num,
+ index_count, prot,
+ __pa(pglist), ret);
+ return -1;
+ }
}
-
entry += num;
npages -= num;
pglist += num;
return 0;
}
-static inline void iommu_batch_new_entry(unsigned long entry)
+static inline void iommu_batch_new_entry(unsigned long entry, u64 mask)
{
struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
if (p->entry + p->npages == entry)
return;
if (p->entry != ~0UL)
- iommu_batch_flush(p);
+ iommu_batch_flush(p, mask);
p->entry = entry;
}
/* Interrupts must be disabled. */
-static inline long iommu_batch_add(u64 phys_page)
+static inline long iommu_batch_add(u64 phys_page, u64 mask)
{
struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
p->pglist[p->npages++] = phys_page;
if (p->npages == PGLIST_NENTS)
- return iommu_batch_flush(p);
+ return iommu_batch_flush(p, mask);
return 0;
}
/* Interrupts must be disabled. */
-static inline long iommu_batch_end(void)
+static inline long iommu_batch_end(u64 mask)
{
struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
BUG_ON(p->npages >= PGLIST_NENTS);
- return iommu_batch_flush(p);
+ return iommu_batch_flush(p, mask);
}
static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_addrp, gfp_t gfp,
unsigned long attrs)
{
+ u64 mask;
unsigned long flags, order, first_page, npages, n;
unsigned long prot = 0;
struct iommu *iommu;
+ struct atu *atu;
+ struct iommu_map_table *tbl;
struct page *page;
void *ret;
long entry;
memset((char *)first_page, 0, PAGE_SIZE << order);
iommu = dev->archdata.iommu;
+ atu = iommu->atu;
+
+ mask = dev->coherent_dma_mask;
+ if (mask <= DMA_BIT_MASK(32))
+ tbl = &iommu->tbl;
+ else
+ tbl = &atu->tbl;
- entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
+ entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL,
(unsigned long)(-1), 0);
if (unlikely(entry == IOMMU_ERROR_CODE))
goto range_alloc_fail;
- *dma_addrp = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT));
+ *dma_addrp = (tbl->table_map_base + (entry << IO_PAGE_SHIFT));
ret = (void *) first_page;
first_page = __pa(first_page);
entry);
for (n = 0; n < npages; n++) {
- long err = iommu_batch_add(first_page + (n * PAGE_SIZE));
+ long err = iommu_batch_add(first_page + (n * PAGE_SIZE), mask);
if (unlikely(err < 0L))
goto iommu_map_fail;
}
- if (unlikely(iommu_batch_end() < 0L))
+ if (unlikely(iommu_batch_end(mask) < 0L))
goto iommu_map_fail;
local_irq_restore(flags);
return ret;
iommu_map_fail:
- iommu_tbl_range_free(&iommu->tbl, *dma_addrp, npages, IOMMU_ERROR_CODE);
+ iommu_tbl_range_free(tbl, *dma_addrp, npages, IOMMU_ERROR_CODE);
range_alloc_fail:
free_pages(first_page, order);
return NULL;
}
-static void dma_4v_iommu_demap(void *demap_arg, unsigned long entry,
- unsigned long npages)
+unsigned long dma_4v_iotsb_bind(unsigned long devhandle,
+ unsigned long iotsb_num,
+ struct pci_bus *bus_dev)
+{
+ struct pci_dev *pdev;
+ unsigned long err;
+ unsigned int bus;
+ unsigned int device;
+ unsigned int fun;
+
+ list_for_each_entry(pdev, &bus_dev->devices, bus_list) {
+ if (pdev->subordinate) {
+ /* No need to bind pci bridge */
+ dma_4v_iotsb_bind(devhandle, iotsb_num,
+ pdev->subordinate);
+ } else {
+ bus = bus_dev->number;
+ device = PCI_SLOT(pdev->devfn);
+ fun = PCI_FUNC(pdev->devfn);
+ err = pci_sun4v_iotsb_bind(devhandle, iotsb_num,
+ HV_PCI_DEVICE_BUILD(bus,
+ device,
+ fun));
+
+ /* If bind fails for one device it is going to fail
+ * for rest of the devices because we are sharing
+ * IOTSB. So in case of failure simply return with
+ * error.
+ */
+ if (err)
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void dma_4v_iommu_demap(struct device *dev, unsigned long devhandle,
+ dma_addr_t dvma, unsigned long iotsb_num,
+ unsigned long entry, unsigned long npages)
{
- u32 devhandle = *(u32 *)demap_arg;
unsigned long num, flags;
+ unsigned long ret;
local_irq_save(flags);
do {
- num = pci_sun4v_iommu_demap(devhandle,
- HV_PCI_TSBID(0, entry),
- npages);
-
+ if (dvma <= DMA_BIT_MASK(32)) {
+ num = pci_sun4v_iommu_demap(devhandle,
+ HV_PCI_TSBID(0, entry),
+ npages);
+ } else {
+ ret = pci_sun4v_iotsb_demap(devhandle, iotsb_num,
+ entry, npages, &num);
+ if (unlikely(ret != HV_EOK)) {
+ pr_err_ratelimited("pci_iotsb_demap() failed with error: %ld\n",
+ ret);
+ }
+ }
entry += num;
npages -= num;
} while (npages != 0);
{
struct pci_pbm_info *pbm;
struct iommu *iommu;
+ struct atu *atu;
+ struct iommu_map_table *tbl;
unsigned long order, npages, entry;
+ unsigned long iotsb_num;
u32 devhandle;
npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
iommu = dev->archdata.iommu;
pbm = dev->archdata.host_controller;
+ atu = iommu->atu;
devhandle = pbm->devhandle;
- entry = ((dvma - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT);
- dma_4v_iommu_demap(&devhandle, entry, npages);
- iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE);
+
+ if (dvma <= DMA_BIT_MASK(32)) {
+ tbl = &iommu->tbl;
+ iotsb_num = 0; /* we don't care for legacy iommu */
+ } else {
+ tbl = &atu->tbl;
+ iotsb_num = atu->iotsb->iotsb_num;
+ }
+ entry = ((dvma - tbl->table_map_base) >> IO_PAGE_SHIFT);
+ dma_4v_iommu_demap(dev, devhandle, dvma, iotsb_num, entry, npages);
+ iommu_tbl_range_free(tbl, dvma, npages, IOMMU_ERROR_CODE);
order = get_order(size);
if (order < 10)
free_pages((unsigned long)cpu, order);
unsigned long attrs)
{
struct iommu *iommu;
+ struct atu *atu;
+ struct iommu_map_table *tbl;
+ u64 mask;
unsigned long flags, npages, oaddr;
unsigned long i, base_paddr;
- u32 bus_addr, ret;
unsigned long prot;
+ dma_addr_t bus_addr, ret;
long entry;
iommu = dev->archdata.iommu;
+ atu = iommu->atu;
if (unlikely(direction == DMA_NONE))
goto bad;
npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT;
- entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
+ mask = *dev->dma_mask;
+ if (mask <= DMA_BIT_MASK(32))
+ tbl = &iommu->tbl;
+ else
+ tbl = &atu->tbl;
+
+ entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL,
(unsigned long)(-1), 0);
if (unlikely(entry == IOMMU_ERROR_CODE))
goto bad;
- bus_addr = (iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT));
+ bus_addr = (tbl->table_map_base + (entry << IO_PAGE_SHIFT));
ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
base_paddr = __pa(oaddr & IO_PAGE_MASK);
prot = HV_PCI_MAP_ATTR_READ;
iommu_batch_start(dev, prot, entry);
for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
- long err = iommu_batch_add(base_paddr);
+ long err = iommu_batch_add(base_paddr, mask);
if (unlikely(err < 0L))
goto iommu_map_fail;
}
- if (unlikely(iommu_batch_end() < 0L))
+ if (unlikely(iommu_batch_end(mask) < 0L))
goto iommu_map_fail;
local_irq_restore(flags);
return DMA_ERROR_CODE;
iommu_map_fail:
- iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE);
+ iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE);
return DMA_ERROR_CODE;
}
{
struct pci_pbm_info *pbm;
struct iommu *iommu;
+ struct atu *atu;
+ struct iommu_map_table *tbl;
unsigned long npages;
+ unsigned long iotsb_num;
long entry;
u32 devhandle;
iommu = dev->archdata.iommu;
pbm = dev->archdata.host_controller;
+ atu = iommu->atu;
devhandle = pbm->devhandle;
npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
npages >>= IO_PAGE_SHIFT;
bus_addr &= IO_PAGE_MASK;
- entry = (bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT;
- dma_4v_iommu_demap(&devhandle, entry, npages);
- iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE);
+
+ if (bus_addr <= DMA_BIT_MASK(32)) {
+ iotsb_num = 0; /* we don't care for legacy iommu */
+ tbl = &iommu->tbl;
+ } else {
+ iotsb_num = atu->iotsb->iotsb_num;
+ tbl = &atu->tbl;
+ }
+ entry = (bus_addr - tbl->table_map_base) >> IO_PAGE_SHIFT;
+ dma_4v_iommu_demap(dev, devhandle, bus_addr, iotsb_num, entry, npages);
+ iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE);
}
static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
unsigned long seg_boundary_size;
int outcount, incount, i;
struct iommu *iommu;
+ struct atu *atu;
+ struct iommu_map_table *tbl;
+ u64 mask;
unsigned long base_shift;
long err;
BUG_ON(direction == DMA_NONE);
iommu = dev->archdata.iommu;
+ atu = iommu->atu;
+
if (nelems == 0 || !iommu)
return 0;
max_seg_size = dma_get_max_seg_size(dev);
seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
- base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT;
+
+ mask = *dev->dma_mask;
+ if (mask <= DMA_BIT_MASK(32))
+ tbl = &iommu->tbl;
+ else
+ tbl = &atu->tbl;
+
+ base_shift = tbl->table_map_base >> IO_PAGE_SHIFT;
+
for_each_sg(sglist, s, nelems, i) {
unsigned long paddr, npages, entry, out_entry = 0, slen;
/* Allocate iommu entries for that segment */
paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
- entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages,
+ entry = iommu_tbl_range_alloc(dev, tbl, npages,
&handle, (unsigned long)(-1), 0);
/* Handle failure */
if (unlikely(entry == IOMMU_ERROR_CODE)) {
- if (printk_ratelimit())
- printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
- " npages %lx\n", iommu, paddr, npages);
+ pr_err_ratelimited("iommu_alloc failed, iommu %p paddr %lx npages %lx\n",
+ tbl, paddr, npages);
goto iommu_map_failed;
}
- iommu_batch_new_entry(entry);
+ iommu_batch_new_entry(entry, mask);
/* Convert entry to a dma_addr_t */
- dma_addr = iommu->tbl.table_map_base + (entry << IO_PAGE_SHIFT);
+ dma_addr = tbl->table_map_base + (entry << IO_PAGE_SHIFT);
dma_addr |= (s->offset & ~IO_PAGE_MASK);
/* Insert into HW table */
paddr &= IO_PAGE_MASK;
while (npages--) {
- err = iommu_batch_add(paddr);
+ err = iommu_batch_add(paddr, mask);
if (unlikely(err < 0L))
goto iommu_map_failed;
paddr += IO_PAGE_SIZE;
dma_next = dma_addr + slen;
}
- err = iommu_batch_end();
+ err = iommu_batch_end(mask);
if (unlikely(err < 0L))
goto iommu_map_failed;
vaddr = s->dma_address & IO_PAGE_MASK;
npages = iommu_num_pages(s->dma_address, s->dma_length,
IO_PAGE_SIZE);
- iommu_tbl_range_free(&iommu->tbl, vaddr, npages,
+ iommu_tbl_range_free(tbl, vaddr, npages,
IOMMU_ERROR_CODE);
/* XXX demap? XXX */
s->dma_address = DMA_ERROR_CODE;
struct pci_pbm_info *pbm;
struct scatterlist *sg;
struct iommu *iommu;
+ struct atu *atu;
unsigned long flags, entry;
+ unsigned long iotsb_num;
u32 devhandle;
BUG_ON(direction == DMA_NONE);
iommu = dev->archdata.iommu;
pbm = dev->archdata.host_controller;
+ atu = iommu->atu;
devhandle = pbm->devhandle;
local_irq_save(flags);
dma_addr_t dma_handle = sg->dma_address;
unsigned int len = sg->dma_length;
unsigned long npages;
- struct iommu_map_table *tbl = &iommu->tbl;
+ struct iommu_map_table *tbl;
unsigned long shift = IO_PAGE_SHIFT;
if (!len)
break;
npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
+
+ if (dma_handle <= DMA_BIT_MASK(32)) {
+ iotsb_num = 0; /* we don't care for legacy iommu */
+ tbl = &iommu->tbl;
+ } else {
+ iotsb_num = atu->iotsb->iotsb_num;
+ tbl = &atu->tbl;
+ }
entry = ((dma_handle - tbl->table_map_base) >> shift);
- dma_4v_iommu_demap(&devhandle, entry, npages);
- iommu_tbl_range_free(&iommu->tbl, dma_handle, npages,
+ dma_4v_iommu_demap(dev, devhandle, dma_handle, iotsb_num,
+ entry, npages);
+ iommu_tbl_range_free(tbl, dma_handle, npages,
IOMMU_ERROR_CODE);
sg = sg_next(sg);
}
return cnt;
}
+static int pci_sun4v_atu_alloc_iotsb(struct pci_pbm_info *pbm)
+{
+ struct atu *atu = pbm->iommu->atu;
+ struct atu_iotsb *iotsb;
+ void *table;
+ u64 table_size;
+ u64 iotsb_num;
+ unsigned long order;
+ unsigned long err;
+
+ iotsb = kzalloc(sizeof(*iotsb), GFP_KERNEL);
+ if (!iotsb) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+ atu->iotsb = iotsb;
+
+ /* calculate size of IOTSB */
+ table_size = (atu->size / IO_PAGE_SIZE) * 8;
+ order = get_order(table_size);
+ table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
+ if (!table) {
+ err = -ENOMEM;
+ goto table_failed;
+ }
+ iotsb->table = table;
+ iotsb->ra = __pa(table);
+ iotsb->dvma_size = atu->size;
+ iotsb->dvma_base = atu->base;
+ iotsb->table_size = table_size;
+ iotsb->page_size = IO_PAGE_SIZE;
+
+ /* configure and register IOTSB with HV */
+ err = pci_sun4v_iotsb_conf(pbm->devhandle,
+ iotsb->ra,
+ iotsb->table_size,
+ iotsb->page_size,
+ iotsb->dvma_base,
+ &iotsb_num);
+ if (err) {
+ pr_err(PFX "pci_iotsb_conf failed error: %ld\n", err);
+ goto iotsb_conf_failed;
+ }
+ iotsb->iotsb_num = iotsb_num;
+
+ err = dma_4v_iotsb_bind(pbm->devhandle, iotsb_num, pbm->pci_bus);
+ if (err) {
+ pr_err(PFX "pci_iotsb_bind failed error: %ld\n", err);
+ goto iotsb_conf_failed;
+ }
+
+ return 0;
+
+iotsb_conf_failed:
+ free_pages((unsigned long)table, order);
+table_failed:
+ kfree(iotsb);
+out_err:
+ return err;
+}
+
+static int pci_sun4v_atu_init(struct pci_pbm_info *pbm)
+{
+ struct atu *atu = pbm->iommu->atu;
+ unsigned long err;
+ const u64 *ranges;
+ u64 map_size, num_iotte;
+ u64 dma_mask;
+ const u32 *page_size;
+ int len;
+
+ ranges = of_get_property(pbm->op->dev.of_node, "iommu-address-ranges",
+ &len);
+ if (!ranges) {
+ pr_err(PFX "No iommu-address-ranges\n");
+ return -EINVAL;
+ }
+
+ page_size = of_get_property(pbm->op->dev.of_node, "iommu-pagesizes",
+ NULL);
+ if (!page_size) {
+ pr_err(PFX "No iommu-pagesizes\n");
+ return -EINVAL;
+ }
+
+ /* There are 4 iommu-address-ranges supported. Each range is pair of
+ * {base, size}. The ranges[0] and ranges[1] are 32bit address space
+ * while ranges[2] and ranges[3] are 64bit space. We want to use 64bit
+ * address ranges to support 64bit addressing. Because 'size' for
+ * address ranges[2] and ranges[3] are same we can select either of
+ * ranges[2] or ranges[3] for mapping. However due to 'size' is too
+ * large for OS to allocate IOTSB we are using fix size 32G
+ * (ATU_64_SPACE_SIZE) which is more than enough for all PCIe devices
+ * to share.
+ */
+ atu->ranges = (struct atu_ranges *)ranges;
+ atu->base = atu->ranges[3].base;
+ atu->size = ATU_64_SPACE_SIZE;
+
+ /* Create IOTSB */
+ err = pci_sun4v_atu_alloc_iotsb(pbm);
+ if (err) {
+ pr_err(PFX "Error creating ATU IOTSB\n");
+ return err;
+ }
+
+ /* Create ATU iommu map.
+ * One bit represents one iotte in IOTSB table.
+ */
+ dma_mask = (roundup_pow_of_two(atu->size) - 1UL);
+ num_iotte = atu->size / IO_PAGE_SIZE;
+ map_size = num_iotte / 8;
+ atu->tbl.table_map_base = atu->base;
+ atu->dma_addr_mask = dma_mask;
+ atu->tbl.map = kzalloc(map_size, GFP_KERNEL);
+ if (!atu->tbl.map)
+ return -ENOMEM;
+
+ iommu_tbl_pool_init(&atu->tbl, num_iotte, IO_PAGE_SHIFT,
+ NULL, false /* no large_pool */,
+ 0 /* default npools */,
+ false /* want span boundary checking */);
+
+ return 0;
+}
+
static int pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
{
static const u32 vdma_default[] = { 0x80000000, 0x80000000 };
pci_sun4v_scan_bus(pbm, &op->dev);
+ /* if atu_init fails its not complete failure.
+ * we can still continue using legacy iommu.
+ */
+ if (pbm->iommu->atu) {
+ err = pci_sun4v_atu_init(pbm);
+ if (err) {
+ kfree(pbm->iommu->atu);
+ pbm->iommu->atu = NULL;
+ pr_err(PFX "ATU init failed, err=%d\n", err);
+ }
+ }
+
pbm->next = pci_pbm_root;
pci_pbm_root = pbm;
struct pci_pbm_info *pbm;
struct device_node *dp;
struct iommu *iommu;
+ struct atu *atu;
u32 devhandle;
int i, err = -ENODEV;
+ static bool hv_atu = true;
dp = op->dev.of_node;
pr_info(PFX "Registered hvapi major[%lu] minor[%lu]\n",
vpci_major, vpci_minor);
+ err = sun4v_hvapi_register(HV_GRP_ATU, vatu_major, &vatu_minor);
+ if (err) {
+ /* don't return an error if we fail to register the
+ * ATU group, but ATU hcalls won't be available.
+ */
+ hv_atu = false;
+ pr_err(PFX "Could not register hvapi ATU err=%d\n",
+ err);
+ } else {
+ pr_info(PFX "Registered hvapi ATU major[%lu] minor[%lu]\n",
+ vatu_major, vatu_minor);
+ }
+
dma_ops = &sun4v_dma_ops;
}
}
pbm->iommu = iommu;
+ iommu->atu = NULL;
+ if (hv_atu) {
+ atu = kzalloc(sizeof(*atu), GFP_KERNEL);
+ if (!atu)
+ pr_err(PFX "Could not allocate atu\n");
+ else
+ iommu->atu = atu;
+ }
err = pci_sun4v_pbm_init(pbm, op, devhandle);
if (err)
return 0;
out_free_iommu:
+ kfree(iommu->atu);
kfree(pbm->iommu);
out_free_controller:
unsigned long msinum,
unsigned long valid);
+/* Sun4v HV IOMMU v2 APIs */
+unsigned long pci_sun4v_iotsb_conf(unsigned long devhandle,
+ unsigned long ra,
+ unsigned long table_size,
+ unsigned long page_size,
+ unsigned long dvma_base,
+ u64 *iotsb_num);
+unsigned long pci_sun4v_iotsb_bind(unsigned long devhandle,
+ unsigned long iotsb_num,
+ unsigned int pci_device);
+unsigned long pci_sun4v_iotsb_map(unsigned long devhandle,
+ unsigned long iotsb_num,
+ unsigned long iotsb_index_iottes,
+ unsigned long io_attributes,
+ unsigned long io_page_list_pa,
+ long *mapped);
+unsigned long pci_sun4v_iotsb_demap(unsigned long devhandle,
+ unsigned long iotsb_num,
+ unsigned long iotsb_index,
+ unsigned long iottes,
+ unsigned long *demapped);
#endif /* !(_PCI_SUN4V_H) */
mov %o0, %o0
ENDPROC(pci_sun4v_msg_setvalid)
+ /*
+ * %o0: devhandle
+ * %o1: r_addr
+ * %o2: size
+ * %o3: pagesize
+ * %o4: virt
+ * %o5: &iotsb_num/&iotsb_handle
+ *
+ * returns %o0: status
+ * %o1: iotsb_num/iotsb_handle
+ */
+ENTRY(pci_sun4v_iotsb_conf)
+ mov %o5, %g1
+ mov HV_FAST_PCI_IOTSB_CONF, %o5
+ ta HV_FAST_TRAP
+ retl
+ stx %o1, [%g1]
+ENDPROC(pci_sun4v_iotsb_conf)
+
+ /*
+ * %o0: devhandle
+ * %o1: iotsb_num/iotsb_handle
+ * %o2: pci_device
+ *
+ * returns %o0: status
+ */
+ENTRY(pci_sun4v_iotsb_bind)
+ mov HV_FAST_PCI_IOTSB_BIND, %o5
+ ta HV_FAST_TRAP
+ retl
+ nop
+ENDPROC(pci_sun4v_iotsb_bind)
+
+ /*
+ * %o0: devhandle
+ * %o1: iotsb_num/iotsb_handle
+ * %o2: index_count
+ * %o3: iotte_attributes
+ * %o4: io_page_list_p
+ * %o5: &mapped
+ *
+ * returns %o0: status
+ * %o1: #mapped
+ */
+ENTRY(pci_sun4v_iotsb_map)
+ mov %o5, %g1
+ mov HV_FAST_PCI_IOTSB_MAP, %o5
+ ta HV_FAST_TRAP
+ retl
+ stx %o1, [%g1]
+ENDPROC(pci_sun4v_iotsb_map)
+
+ /*
+ * %o0: devhandle
+ * %o1: iotsb_num/iotsb_handle
+ * %o2: iotsb_index
+ * %o3: #iottes
+ * %o4: &demapped
+ *
+ * returns %o0: status
+ * %o1: #demapped
+ */
+ENTRY(pci_sun4v_iotsb_demap)
+ mov HV_FAST_PCI_IOTSB_DEMAP, %o5
+ ta HV_FAST_TRAP
+ retl
+ stx %o1, [%o4]
+ENDPROC(pci_sun4v_iotsb_demap)
sf = (struct signal_frame __user *) regs->u_regs[UREG_FP];
/* 1. Make sure we are not getting garbage from the user */
- if (!invalid_frame_pointer(sf, sizeof(*sf)))
+ if (invalid_frame_pointer(sf, sizeof(*sf)))
goto segv_and_exit;
if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP]))
synchronize_user_stack();
sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP];
- if (!invalid_frame_pointer(sf, sizeof(*sf)))
+ if (invalid_frame_pointer(sf, sizeof(*sf)))
goto segv;
if (get_user(ufp, &sf->regs.u_regs[UREG_FP]))
};
static struct mdesc_mblock *mblocks;
static int num_mblocks;
+static int find_numa_node_for_addr(unsigned long pa,
+ struct node_mem_mask *pnode_mask);
-static unsigned long ra_to_pa(unsigned long addr)
+static unsigned long __init ra_to_pa(unsigned long addr)
{
int i;
return addr;
}
-static int find_node(unsigned long addr)
+static int __init find_node(unsigned long addr)
{
+ static bool search_mdesc = true;
+ static struct node_mem_mask last_mem_mask = { ~0UL, ~0UL };
+ static int last_index;
int i;
addr = ra_to_pa(addr);
if ((addr & p->mask) == p->val)
return i;
}
- /* The following condition has been observed on LDOM guests.*/
- WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node"
- " rule. Some physical memory will be owned by node 0.");
- return 0;
+ /* The following condition has been observed on LDOM guests because
+ * node_masks only contains the best latency mask and value.
+ * LDOM guest's mdesc can contain a single latency group to
+ * cover multiple address range. Print warning message only if the
+ * address cannot be found in node_masks nor mdesc.
+ */
+ if ((search_mdesc) &&
+ ((addr & last_mem_mask.mask) != last_mem_mask.val)) {
+ /* find the available node in the mdesc */
+ last_index = find_numa_node_for_addr(addr, &last_mem_mask);
+ numadbg("find_node: latency group for address 0x%lx is %d\n",
+ addr, last_index);
+ if ((last_index < 0) || (last_index >= num_node_masks)) {
+ /* WARN_ONCE() and use default group 0 */
+ WARN_ONCE(1, "find_node: A physical address doesn't match a NUMA node rule. Some physical memory will be owned by node 0.");
+ search_mdesc = false;
+ last_index = 0;
+ }
+ }
+
+ return last_index;
}
-static u64 memblock_nid_range(u64 start, u64 end, int *nid)
+static u64 __init memblock_nid_range(u64 start, u64 end, int *nid)
{
*nid = find_node(start);
start += PAGE_SIZE;
return numa_latency[from][to];
}
+static int find_numa_node_for_addr(unsigned long pa,
+ struct node_mem_mask *pnode_mask)
+{
+ struct mdesc_handle *md = mdesc_grab();
+ u64 node, arc;
+ int i = 0;
+
+ node = mdesc_node_by_name(md, MDESC_NODE_NULL, "latency-groups");
+ if (node == MDESC_NODE_NULL)
+ goto out;
+
+ mdesc_for_each_node_by_name(md, node, "group") {
+ mdesc_for_each_arc(arc, md, node, MDESC_ARC_TYPE_FWD) {
+ u64 target = mdesc_arc_target(md, arc);
+ struct mdesc_mlgroup *m = find_mlgroup(target);
+
+ if (!m)
+ continue;
+ if ((pa & m->mask) == m->match) {
+ if (pnode_mask) {
+ pnode_mask->mask = m->mask;
+ pnode_mask->val = m->match;
+ }
+ mdesc_release(md);
+ return i;
+ }
+ }
+ i++;
+ }
+
+out:
+ mdesc_release(md);
+ return -1;
+}
+
static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp)
{
int i;
*/
unsigned long long sched_clock(void)
{
- return clocksource_cyc2ns(get_cycles(),
- sched_clock_mult, SCHED_CLOCK_SHIFT);
+ return mult_frac(get_cycles(),
+ sched_clock_mult, 1ULL << SCHED_CLOCK_SHIFT);
}
int setup_profiling_timer(unsigned int multiplier)
UBSAN_SANITIZE :=n
LDFLAGS := -m elf_$(UTS_MACHINE)
-ifeq ($(CONFIG_RELOCATABLE),y)
-# If kernel is relocatable, build compressed kernel as PIE.
+# Compressed kernel should be built as PIE since it may be loaded at any
+# address by the bootloader.
ifeq ($(CONFIG_X86_32),y)
LDFLAGS += $(call ld-option, -pie) $(call ld-option, --no-dynamic-linker)
else
LDFLAGS += $(shell $(LD) --help 2>&1 | grep -q "\-z noreloc-overflow" \
&& echo "-z noreloc-overflow -pie --no-dynamic-linker")
endif
-endif
LDFLAGS_vmlinux := -T
hostprogs-y := mkpiggy
return -1;
}
+ if (CONFIG_X86_MINIMUM_CPU_FAMILY <= 4 && !IS_ENABLED(CONFIG_M486) &&
+ !has_eflag(X86_EFLAGS_ID)) {
+ printf("This kernel requires a CPU with the CPUID instruction. Build with CONFIG_M486=y to run on this CPU.\n");
+ return -1;
+ }
+
if (err_flags) {
puts("This kernel requires the following features "
"not present on the CPU:\n");
pr_cont("Fam15h ");
x86_pmu.get_event_constraints = amd_get_event_constraints_f15h;
break;
-
+ case 0x17:
+ pr_cont("Fam17h ");
+ /*
+ * In family 17h, there are no event constraints in the PMC hardware.
+ * We fallback to using default amd_get_event_constraints.
+ */
+ break;
default:
pr_err("core perfctr but no constraints; unknown hardware!\n");
return -ENODEV;
frame.next_frame = 0;
frame.return_address = 0;
- if (!access_ok(VERIFY_READ, fp, 8))
+ if (!valid_user_frame(fp, sizeof(frame)))
break;
bytes = __copy_from_user_nmi(&frame.next_frame, fp, 4);
if (bytes != 0)
break;
- if (!valid_user_frame(fp, sizeof(frame)))
- break;
-
perf_callchain_store(entry, cs_base + frame.return_address);
fp = compat_ptr(ss_base + frame.next_frame);
}
frame.next_frame = NULL;
frame.return_address = 0;
- if (!access_ok(VERIFY_READ, fp, sizeof(*fp) * 2))
+ if (!valid_user_frame(fp, sizeof(frame)))
break;
bytes = __copy_from_user_nmi(&frame.next_frame, fp, sizeof(*fp));
if (bytes != 0)
break;
- if (!valid_user_frame(fp, sizeof(frame)))
- break;
-
perf_callchain_store(entry, frame.return_address);
fp = (void __user *)frame.next_frame;
}
}
/*
- * We use the interrupt regs as a base because the PEBS record
- * does not contain a full regs set, specifically it seems to
- * lack segment descriptors, which get used by things like
- * user_mode().
+ * We use the interrupt regs as a base because the PEBS record does not
+ * contain a full regs set, specifically it seems to lack segment
+ * descriptors, which get used by things like user_mode().
*
- * In the simple case fix up only the IP and BP,SP regs, for
- * PERF_SAMPLE_IP and PERF_SAMPLE_CALLCHAIN to function properly.
- * A possible PERF_SAMPLE_REGS will have to transfer all regs.
+ * In the simple case fix up only the IP for PERF_SAMPLE_IP.
+ *
+ * We must however always use BP,SP from iregs for the unwinder to stay
+ * sane; the record BP,SP can point into thin air when the record is
+ * from a previous PMI context or an (I)RET happend between the record
+ * and PMI.
*/
*regs = *iregs;
regs->flags = pebs->flags;
set_linear_ip(regs, pebs->ip);
- regs->bp = pebs->bp;
- regs->sp = pebs->sp;
if (sample_type & PERF_SAMPLE_REGS_INTR) {
regs->ax = pebs->ax;
regs->dx = pebs->dx;
regs->si = pebs->si;
regs->di = pebs->di;
- regs->bp = pebs->bp;
- regs->sp = pebs->sp;
- regs->flags = pebs->flags;
+ /*
+ * Per the above; only set BP,SP if we don't need callchains.
+ *
+ * XXX: does this make sense?
+ */
+ if (!(sample_type & PERF_SAMPLE_CALLCHAIN)) {
+ regs->bp = pebs->bp;
+ regs->sp = pebs->sp;
+ }
+
+ /*
+ * Preserve PERF_EFLAGS_VM from set_linear_ip().
+ */
+ regs->flags = pebs->flags | (regs->flags & PERF_EFLAGS_VM);
#ifndef CONFIG_X86_32
regs->r8 = pebs->r8;
regs->r9 = pebs->r9;
*/
static int uncore_pmu_event_init(struct perf_event *event);
-static bool is_uncore_event(struct perf_event *event)
+static bool is_box_event(struct intel_uncore_box *box, struct perf_event *event)
{
- return event->pmu->event_init == uncore_pmu_event_init;
+ return &box->pmu->pmu == event->pmu;
}
static int
n = box->n_events;
- if (is_uncore_event(leader)) {
+ if (is_box_event(box, leader)) {
box->event_list[n] = leader;
n++;
}
return n;
list_for_each_entry(event, &leader->sibling_list, group_entry) {
- if (!is_uncore_event(event) ||
+ if (!is_box_event(box, event) ||
event->state <= PERF_EVENT_STATE_OFF)
continue;
snb_uncore_imc_event_start(event, 0);
- box->n_events++;
-
return 0;
}
static void snb_uncore_imc_event_del(struct perf_event *event, int flags)
{
- struct intel_uncore_box *box = uncore_event_to_box(event);
- int i;
-
snb_uncore_imc_event_stop(event, PERF_EF_UPDATE);
-
- for (i = 0; i < box->n_events; i++) {
- if (event == box->event_list[i]) {
- --box->n_events;
- break;
- }
- }
}
int snb_pci2phy_map_init(int devid)
* Per register state.
*/
struct er_account {
- raw_spinlock_t lock; /* per-core: protect structure */
+ raw_spinlock_t lock; /* per-core: protect structure */
u64 config; /* extra MSR config */
u64 reg; /* extra MSR number */
atomic_t ref; /* reference count */
for (; stack < stack_info.end; stack++) {
unsigned long real_addr;
int reliable = 0;
- unsigned long addr = *stack;
+ unsigned long addr = READ_ONCE_NOCHECK(*stack);
unsigned long *ret_addr_p =
unwind_get_return_address_ptr(&state);
{
WARN_ON_FPU(fpu != ¤t->thread.fpu); /* Almost certainly an anomaly */
- if (!use_eager_fpu() || !static_cpu_has(X86_FEATURE_FPU)) {
- /* FPU state will be reallocated lazily at the first use. */
- fpu__drop(fpu);
- } else {
- if (!fpu->fpstate_active) {
- fpu__activate_curr(fpu);
- user_fpu_begin();
- }
+ fpu__drop(fpu);
+
+ /*
+ * Make sure fpstate is cleared and initialized.
+ */
+ if (static_cpu_has(X86_FEATURE_FPU)) {
+ fpu__activate_curr(fpu);
+ user_fpu_begin();
copy_init_fpstate_to_fpregs();
}
}
initial_pg_pmd:
.fill 1024*KPMDS,4,0
#else
-ENTRY(initial_page_table)
+.globl initial_page_table
+initial_page_table:
.fill 1024,4,0
#endif
initial_pg_fixmap:
.fill 1024,4,0
-ENTRY(empty_zero_page)
+.globl empty_zero_page
+empty_zero_page:
.fill 4096,1,0
-ENTRY(swapper_pg_dir)
+.globl swapper_pg_dir
+swapper_pg_dir:
.fill 1024,4,0
EXPORT_SYMBOL(empty_zero_page)
{
struct platform_device *pd;
struct resource res;
- unsigned long len;
+ u64 base, size;
+ u32 length;
- /* don't use lfb_size as it may contain the whole VMEM instead of only
- * the part that is occupied by the framebuffer */
- len = mode->height * mode->stride;
- len = PAGE_ALIGN(len);
- if (len > (u64)si->lfb_size << 16) {
+ /*
+ * If the 64BIT_BASE capability is set, ext_lfb_base will contain the
+ * upper half of the base address. Assemble the address, then make sure
+ * it is valid and we can actually access it.
+ */
+ base = si->lfb_base;
+ if (si->capabilities & VIDEO_CAPABILITY_64BIT_BASE)
+ base |= (u64)si->ext_lfb_base << 32;
+ if (!base || (u64)(resource_size_t)base != base) {
+ printk(KERN_DEBUG "sysfb: inaccessible VRAM base\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Don't use lfb_size as IORESOURCE size, since it may contain the
+ * entire VMEM, and thus require huge mappings. Use just the part we
+ * need, that is, the part where the framebuffer is located. But verify
+ * that it does not exceed the advertised VMEM.
+ * Note that in case of VBE, the lfb_size is shifted by 16 bits for
+ * historical reasons.
+ */
+ size = si->lfb_size;
+ if (si->orig_video_isVGA == VIDEO_TYPE_VLFB)
+ size <<= 16;
+ length = mode->height * mode->stride;
+ length = PAGE_ALIGN(length);
+ if (length > size) {
printk(KERN_WARNING "sysfb: VRAM smaller than advertised\n");
return -EINVAL;
}
memset(&res, 0, sizeof(res));
res.flags = IORESOURCE_MEM | IORESOURCE_BUSY;
res.name = simplefb_resname;
- res.start = si->lfb_base;
- res.end = si->lfb_base + len - 1;
+ res.start = base;
+ res.end = res.start + length - 1;
if (res.end <= res.start)
return -EINVAL;
unsigned long unwind_get_return_address(struct unwind_state *state)
{
+ unsigned long addr = READ_ONCE_NOCHECK(*state->sp);
+
if (unwind_done(state))
return 0;
return ftrace_graph_ret_addr(state->task, &state->graph_idx,
- *state->sp, state->sp);
+ addr, state->sp);
}
EXPORT_SYMBOL_GPL(unwind_get_return_address);
return false;
do {
+ unsigned long addr = READ_ONCE_NOCHECK(*state->sp);
+
for (state->sp++; state->sp < info->end; state->sp++)
- if (__kernel_text_address(*state->sp))
+ if (__kernel_text_address(addr))
return true;
state->sp = info->next_sp;
if (early_recursion_flag > 2)
goto halt_loop;
- if (regs->cs != __KERNEL_CS)
+ /*
+ * Old CPUs leave the high bits of CS on the stack
+ * undefined. I'm not sure which CPUs do this, but at least
+ * the 486 DX works this way.
+ */
+ if ((regs->cs & 0xFFFF) != __KERNEL_CS)
goto fail;
/*
obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_tca6416.o
# MISC Devices
obj-$(subst m,y,$(CONFIG_KEYBOARD_GPIO)) += platform_gpio_keys.o
-obj-$(subst m,y,$(CONFIG_INTEL_MID_WATCHDOG)) += platform_wdt.o
+obj-$(subst m,y,$(CONFIG_INTEL_MID_WATCHDOG)) += platform_mrfld_wdt.o
--- /dev/null
+/*
+ * Intel Merrifield watchdog platform device library file
+ *
+ * (C) Copyright 2014 Intel Corporation
+ * Author: David Cohen <david.a.cohen@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; version 2
+ * of the License.
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/platform_data/intel-mid_wdt.h>
+
+#include <asm/intel-mid.h>
+#include <asm/intel_scu_ipc.h>
+#include <asm/io_apic.h>
+
+#define TANGIER_EXT_TIMER0_MSI 15
+
+static struct platform_device wdt_dev = {
+ .name = "intel_mid_wdt",
+ .id = -1,
+};
+
+static int tangier_probe(struct platform_device *pdev)
+{
+ int gsi;
+ struct irq_alloc_info info;
+ struct intel_mid_wdt_pdata *pdata = pdev->dev.platform_data;
+
+ if (!pdata)
+ return -EINVAL;
+
+ /* IOAPIC builds identity mapping between GSI and IRQ on MID */
+ gsi = pdata->irq;
+ ioapic_set_alloc_attr(&info, cpu_to_node(0), 1, 0);
+ if (mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC, &info) <= 0) {
+ dev_warn(&pdev->dev, "cannot find interrupt %d in ioapic\n",
+ gsi);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct intel_mid_wdt_pdata tangier_pdata = {
+ .irq = TANGIER_EXT_TIMER0_MSI,
+ .probe = tangier_probe,
+};
+
+static int wdt_scu_status_change(struct notifier_block *nb,
+ unsigned long code, void *data)
+{
+ if (code == SCU_DOWN) {
+ platform_device_unregister(&wdt_dev);
+ return 0;
+ }
+
+ return platform_device_register(&wdt_dev);
+}
+
+static struct notifier_block wdt_scu_notifier = {
+ .notifier_call = wdt_scu_status_change,
+};
+
+static int __init register_mid_wdt(void)
+{
+ if (intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_TANGIER)
+ return -ENODEV;
+
+ wdt_dev.dev.platform_data = &tangier_pdata;
+
+ /*
+ * We need to be sure that the SCU IPC is ready before watchdog device
+ * can be registered:
+ */
+ intel_scu_notifier_add(&wdt_scu_notifier);
+
+ return 0;
+}
+rootfs_initcall(register_mid_wdt);
+++ /dev/null
-/*
- * platform_wdt.c: Watchdog platform library file
- *
- * (C) Copyright 2014 Intel Corporation
- * Author: David Cohen <david.a.cohen@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
- */
-
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/platform_data/intel-mid_wdt.h>
-#include <asm/intel-mid.h>
-#include <asm/io_apic.h>
-
-#define TANGIER_EXT_TIMER0_MSI 15
-
-static struct platform_device wdt_dev = {
- .name = "intel_mid_wdt",
- .id = -1,
-};
-
-static int tangier_probe(struct platform_device *pdev)
-{
- int gsi;
- struct irq_alloc_info info;
- struct intel_mid_wdt_pdata *pdata = pdev->dev.platform_data;
-
- if (!pdata)
- return -EINVAL;
-
- /* IOAPIC builds identity mapping between GSI and IRQ on MID */
- gsi = pdata->irq;
- ioapic_set_alloc_attr(&info, cpu_to_node(0), 1, 0);
- if (mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC, &info) <= 0) {
- dev_warn(&pdev->dev, "cannot find interrupt %d in ioapic\n",
- gsi);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static struct intel_mid_wdt_pdata tangier_pdata = {
- .irq = TANGIER_EXT_TIMER0_MSI,
- .probe = tangier_probe,
-};
-
-static int __init register_mid_wdt(void)
-{
- if (intel_mid_identify_cpu() == INTEL_MID_CPU_CHIP_TANGIER) {
- wdt_dev.dev.platform_data = &tangier_pdata;
- return platform_device_register(&wdt_dev);
- }
-
- return -ENODEV;
-}
-
-rootfs_initcall(register_mid_wdt);
ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0);
- if (!result) {
+ if (!result && !ctx->more) {
err = af_alg_wait_for_completion(
crypto_ahash_init(&ctx->req),
&ctx->completion);
sg = scatterwalk_ffwd(tmp, sg, start);
- if (sg_page(sg) == virt_to_page(buf) &&
- sg->offset == offset_in_page(buf))
- return;
-
scatterwalk_start(&walk, sg);
scatterwalk_copychunks(buf, &walk, nbytes, out);
scatterwalk_done(&walk, out, 0);
}
/* register clk-provider */
- of_clk_add_hw_provider(np, of_clk_hw_onecell_get, &clk_data);
+ of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
return;
}
/* register clk-provider */
- of_clk_add_hw_provider(np, of_clk_hw_onecell_get, &clk_data);
+ of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
return;
hws[clk_HFPERCLKDAC0] = clk_hw_register_gate(NULL, "HFPERCLK.DAC0",
"HFXO", 0, base + CMU_HFPERCLKEN0, 17, 0, NULL);
- of_clk_add_hw_provider(np, of_clk_hw_onecell_get, &clk_data);
+ of_clk_add_hw_provider(np, of_clk_hw_onecell_get, clk_data);
}
CLK_OF_DECLARE(efm32ggcmu, "efm32gg,cmu", efm32gg_cmu_init);
static SUNXI_CCU_DIV_TABLE(axi_clk, "axi", "cpu",
0x050, 0, 3, axi_div_table, 0);
+#define SUN6I_A31_AHB1_REG 0x054
+
static const char * const ahb1_parents[] = { "osc32k", "osc24M",
"axi", "pll-periph" };
val &= BIT(16);
writel(val, reg + SUN6I_A31_PLL_MIPI_REG);
+ /* Force AHB1 to PLL6 / 3 */
+ val = readl(reg + SUN6I_A31_AHB1_REG);
+ /* set PLL6 pre-div = 3 */
+ val &= ~GENMASK(7, 6);
+ val |= 0x2 << 6;
+ /* select PLL6 / pre-div */
+ val &= ~GENMASK(13, 12);
+ val |= 0x3 << 12;
+ writel(val, reg + SUN6I_A31_AHB1_REG);
+
sunxi_ccu_probe(node, reg, &sun6i_a31_ccu_desc);
ccu_mux_notifier_register(pll_cpu_clk.common.hw.clk,
else
calcp = 3;
- calcm = (req->parent_rate >> calcp) - 1;
+ calcm = (div >> calcp) - 1;
req->rate = (req->parent_rate >> calcp) / (calcm + 1);
req->m = calcm;
static struct amdgpu_atpx_priv {
bool atpx_detected;
+ bool bridge_pm_usable;
/* handle for device - and atpx */
acpi_handle dhandle;
acpi_handle other_handle;
atpx->is_hybrid = false;
if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
printk("ATPX Hybrid Graphics\n");
- atpx->functions.power_cntl = false;
+ /*
+ * Disable legacy PM methods only when pcie port PM is usable,
+ * otherwise the device might fail to power off or power on.
+ */
+ atpx->functions.power_cntl = !amdgpu_atpx_priv.bridge_pm_usable;
atpx->is_hybrid = true;
}
*/
static bool amdgpu_atpx_pci_probe_handle(struct pci_dev *pdev)
{
+ struct pci_dev *parent_pdev = pci_upstream_bridge(pdev);
acpi_handle dhandle, atpx_handle;
acpi_status status;
}
amdgpu_atpx_priv.dhandle = dhandle;
amdgpu_atpx_priv.atpx.handle = atpx_handle;
+ amdgpu_atpx_priv.bridge_pm_usable = parent_pdev && parent_pdev->bridge_d3;
return true;
}
if (!(data->mc_micro_code_feature & DISABLE_MC_LOADMICROCODE) && memory_clock > data->highest_mclk)
data->highest_mclk = memory_clock;
- performance_level = &(ps->performance_levels
- [ps->performance_level_count++]);
-
PP_ASSERT_WITH_CODE(
(ps->performance_level_count < smum_get_mac_definition(hwmgr->smumgr, SMU_MAX_LEVELS_GRAPHICS)),
"Performance levels exceeds SMC limit!",
return -EINVAL);
PP_ASSERT_WITH_CODE(
- (ps->performance_level_count <=
+ (ps->performance_level_count <
hwmgr->platform_descriptor.hardwareActivityPerformanceLevels),
- "Performance levels exceeds Driver limit!",
- return -EINVAL);
+ "Performance levels exceeds Driver limit, Skip!",
+ return 0);
+
+ performance_level = &(ps->performance_levels
+ [ps->performance_level_count++]);
/* Performance levels are arranged from low to high. */
performance_level->memory_clock = memory_clock;
clk_prepare_enable(hdlcd->clk);
hdlcd_crtc_mode_set_nofb(crtc);
hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 1);
+ drm_crtc_vblank_on(crtc);
}
static void hdlcd_crtc_disable(struct drm_crtc *crtc)
{
struct hdlcd_drm_private *hdlcd = crtc_to_hdlcd_priv(crtc);
- if (!crtc->state->active)
- return;
-
+ drm_crtc_vblank_off(crtc);
hdlcd_write(hdlcd, HDLCD_REG_COMMAND, 0);
clk_disable_unprepare(hdlcd->clk);
}
err_hdmiphy:
if (hdata->hdmiphy_port)
put_device(&hdata->hdmiphy_port->dev);
+ if (hdata->regs_hdmiphy)
+ iounmap(hdata->regs_hdmiphy);
err_ddc:
put_device(&hdata->ddc_adpt->dev);
if (hdata->hdmiphy_port)
put_device(&hdata->hdmiphy_port->dev);
+ if (hdata->regs_hdmiphy)
+ iounmap(hdata->regs_hdmiphy);
+
put_device(&hdata->ddc_adpt->dev);
return 0;
if (irq < 0)
return irq;
- ret = devm_request_irq(dev, irq, mtk_disp_ovl_irq_handler,
- IRQF_TRIGGER_NONE, dev_name(dev), priv);
- if (ret < 0) {
- dev_err(dev, "Failed to request irq %d: %d\n", irq, ret);
- return ret;
- }
-
comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DISP_OVL);
if (comp_id < 0) {
dev_err(dev, "Failed to identify by alias: %d\n", comp_id);
platform_set_drvdata(pdev, priv);
+ ret = devm_request_irq(dev, irq, mtk_disp_ovl_irq_handler,
+ IRQF_TRIGGER_NONE, dev_name(dev), priv);
+ if (ret < 0) {
+ dev_err(dev, "Failed to request irq %d: %d\n", irq, ret);
+ return ret;
+ }
+
ret = component_add(dev, &mtk_disp_ovl_component_ops);
if (ret)
dev_err(dev, "Failed to add component: %d\n", ret);
unsigned int bpc)
{
writel(w << 16 | h, comp->regs + DISP_OD_SIZE);
- writel(OD_RELAYMODE, comp->regs + OD_RELAYMODE);
+ writel(OD_RELAYMODE, comp->regs + DISP_OD_CFG);
mtk_dither_set(comp, bpc, DISP_OD_CFG);
}
#define DSI_PHY_TIMECON0 0x110
#define LPX (0xff << 0)
-#define HS_PRPR (0xff << 8)
+#define HS_PREP (0xff << 8)
#define HS_ZERO (0xff << 16)
#define HS_TRAIL (0xff << 24)
#define CLK_TRAIL (0xff << 24)
#define DSI_PHY_TIMECON3 0x11c
-#define CLK_HS_PRPR (0xff << 0)
+#define CLK_HS_PREP (0xff << 0)
#define CLK_HS_POST (0xff << 8)
#define CLK_HS_EXIT (0xff << 16)
+#define T_LPX 5
+#define T_HS_PREP 6
+#define T_HS_TRAIL 8
+#define T_HS_EXIT 7
+#define T_HS_ZERO 10
+
#define NS_TO_CYCLE(n, c) ((n) / (c) + (((n) % (c)) ? 1 : 0))
struct phy;
static void dsi_phy_timconfig(struct mtk_dsi *dsi)
{
u32 timcon0, timcon1, timcon2, timcon3;
- unsigned int ui, cycle_time;
- unsigned int lpx;
+ u32 ui, cycle_time;
ui = 1000 / dsi->data_rate + 0x01;
cycle_time = 8000 / dsi->data_rate + 0x01;
- lpx = 5;
- timcon0 = (8 << 24) | (0xa << 16) | (0x6 << 8) | lpx;
- timcon1 = (7 << 24) | (5 * lpx << 16) | ((3 * lpx) / 2) << 8 |
- (4 * lpx);
+ timcon0 = T_LPX | T_HS_PREP << 8 | T_HS_ZERO << 16 | T_HS_TRAIL << 24;
+ timcon1 = 4 * T_LPX | (3 * T_LPX / 2) << 8 | 5 * T_LPX << 16 |
+ T_HS_EXIT << 24;
timcon2 = ((NS_TO_CYCLE(0x64, cycle_time) + 0xa) << 24) |
(NS_TO_CYCLE(0x150, cycle_time) << 16);
- timcon3 = (2 * lpx) << 16 | NS_TO_CYCLE(80 + 52 * ui, cycle_time) << 8 |
- NS_TO_CYCLE(0x40, cycle_time);
+ timcon3 = NS_TO_CYCLE(0x40, cycle_time) | (2 * T_LPX) << 16 |
+ NS_TO_CYCLE(80 + 52 * ui, cycle_time) << 8;
writel(timcon0, dsi->regs + DSI_PHY_TIMECON0);
writel(timcon1, dsi->regs + DSI_PHY_TIMECON1);
{
struct device *dev = dsi->dev;
int ret;
+ u64 pixel_clock, total_bits;
+ u32 htotal, htotal_bits, bit_per_pixel, overhead_cycles, overhead_bits;
if (++dsi->refcount != 1)
return 0;
+ switch (dsi->format) {
+ case MIPI_DSI_FMT_RGB565:
+ bit_per_pixel = 16;
+ break;
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ bit_per_pixel = 18;
+ break;
+ case MIPI_DSI_FMT_RGB666:
+ case MIPI_DSI_FMT_RGB888:
+ default:
+ bit_per_pixel = 24;
+ break;
+ }
+
/**
- * data_rate = (pixel_clock / 1000) * pixel_dipth * mipi_ratio;
- * pixel_clock unit is Khz, data_rata unit is MHz, so need divide 1000.
- * mipi_ratio is mipi clk coefficient for balance the pixel clk in mipi.
- * we set mipi_ratio is 1.05.
+ * vm.pixelclock is in kHz, pixel_clock unit is Hz, so multiply by 1000
+ * htotal_time = htotal * byte_per_pixel / num_lanes
+ * overhead_time = lpx + hs_prepare + hs_zero + hs_trail + hs_exit
+ * mipi_ratio = (htotal_time + overhead_time) / htotal_time
+ * data_rate = pixel_clock * bit_per_pixel * mipi_ratio / num_lanes;
*/
- dsi->data_rate = dsi->vm.pixelclock * 3 * 21 / (1 * 1000 * 10);
+ pixel_clock = dsi->vm.pixelclock * 1000;
+ htotal = dsi->vm.hactive + dsi->vm.hback_porch + dsi->vm.hfront_porch +
+ dsi->vm.hsync_len;
+ htotal_bits = htotal * bit_per_pixel;
+
+ overhead_cycles = T_LPX + T_HS_PREP + T_HS_ZERO + T_HS_TRAIL +
+ T_HS_EXIT;
+ overhead_bits = overhead_cycles * dsi->lanes * 8;
+ total_bits = htotal_bits + overhead_bits;
+
+ dsi->data_rate = DIV_ROUND_UP_ULL(pixel_clock * total_bits,
+ htotal * dsi->lanes);
- ret = clk_set_rate(dsi->hs_clk, dsi->data_rate * 1000000);
+ ret = clk_set_rate(dsi->hs_clk, dsi->data_rate);
if (ret < 0) {
dev_err(dev, "Failed to set data rate: %d\n", ret);
goto err_refcount;
static struct radeon_atpx_priv {
bool atpx_detected;
+ bool bridge_pm_usable;
/* handle for device - and atpx */
acpi_handle dhandle;
struct radeon_atpx atpx;
atpx->is_hybrid = false;
if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
printk("ATPX Hybrid Graphics\n");
- atpx->functions.power_cntl = false;
+ /*
+ * Disable legacy PM methods only when pcie port PM is usable,
+ * otherwise the device might fail to power off or power on.
+ */
+ atpx->functions.power_cntl = !radeon_atpx_priv.bridge_pm_usable;
atpx->is_hybrid = true;
}
*/
static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
{
+ struct pci_dev *parent_pdev = pci_upstream_bridge(pdev);
acpi_handle dhandle, atpx_handle;
acpi_status status;
radeon_atpx_priv.dhandle = dhandle;
radeon_atpx_priv.atpx.handle = atpx_handle;
+ radeon_atpx_priv.bridge_pm_usable = parent_pdev && parent_pdev->bridge_d3;
return true;
}
#include <linux/usb/ch9.h>
#include "hid-ids.h"
+#define CP2112_REPORT_MAX_LENGTH 64
+#define CP2112_GPIO_CONFIG_LENGTH 5
+#define CP2112_GPIO_GET_LENGTH 2
+#define CP2112_GPIO_SET_LENGTH 3
+
enum {
CP2112_GPIO_CONFIG = 0x02,
CP2112_GPIO_GET = 0x03,
atomic_t read_avail;
atomic_t xfer_avail;
struct gpio_chip gc;
+ u8 *in_out_buffer;
+ spinlock_t lock;
};
static int gpio_push_pull = 0xFF;
{
struct cp2112_device *dev = gpiochip_get_data(chip);
struct hid_device *hdev = dev->hdev;
- u8 buf[5];
+ u8 *buf = dev->in_out_buffer;
+ unsigned long flags;
int ret;
+ spin_lock_irqsave(&dev->lock, flags);
+
ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
- sizeof(buf), HID_FEATURE_REPORT,
- HID_REQ_GET_REPORT);
- if (ret != sizeof(buf)) {
+ CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
+ HID_REQ_GET_REPORT);
+ if (ret != CP2112_GPIO_CONFIG_LENGTH) {
hid_err(hdev, "error requesting GPIO config: %d\n", ret);
- return ret;
+ goto exit;
}
buf[1] &= ~(1 << offset);
buf[2] = gpio_push_pull;
- ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, sizeof(buf),
- HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+ ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
+ CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
+ HID_REQ_SET_REPORT);
if (ret < 0) {
hid_err(hdev, "error setting GPIO config: %d\n", ret);
- return ret;
+ goto exit;
}
- return 0;
+ ret = 0;
+
+exit:
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return ret <= 0 ? ret : -EIO;
}
static void cp2112_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
{
struct cp2112_device *dev = gpiochip_get_data(chip);
struct hid_device *hdev = dev->hdev;
- u8 buf[3];
+ u8 *buf = dev->in_out_buffer;
+ unsigned long flags;
int ret;
+ spin_lock_irqsave(&dev->lock, flags);
+
buf[0] = CP2112_GPIO_SET;
buf[1] = value ? 0xff : 0;
buf[2] = 1 << offset;
- ret = hid_hw_raw_request(hdev, CP2112_GPIO_SET, buf, sizeof(buf),
- HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+ ret = hid_hw_raw_request(hdev, CP2112_GPIO_SET, buf,
+ CP2112_GPIO_SET_LENGTH, HID_FEATURE_REPORT,
+ HID_REQ_SET_REPORT);
if (ret < 0)
hid_err(hdev, "error setting GPIO values: %d\n", ret);
+
+ spin_unlock_irqrestore(&dev->lock, flags);
}
static int cp2112_gpio_get(struct gpio_chip *chip, unsigned offset)
{
struct cp2112_device *dev = gpiochip_get_data(chip);
struct hid_device *hdev = dev->hdev;
- u8 buf[2];
+ u8 *buf = dev->in_out_buffer;
+ unsigned long flags;
int ret;
- ret = hid_hw_raw_request(hdev, CP2112_GPIO_GET, buf, sizeof(buf),
- HID_FEATURE_REPORT, HID_REQ_GET_REPORT);
- if (ret != sizeof(buf)) {
+ spin_lock_irqsave(&dev->lock, flags);
+
+ ret = hid_hw_raw_request(hdev, CP2112_GPIO_GET, buf,
+ CP2112_GPIO_GET_LENGTH, HID_FEATURE_REPORT,
+ HID_REQ_GET_REPORT);
+ if (ret != CP2112_GPIO_GET_LENGTH) {
hid_err(hdev, "error requesting GPIO values: %d\n", ret);
- return ret;
+ ret = ret < 0 ? ret : -EIO;
+ goto exit;
}
- return (buf[1] >> offset) & 1;
+ ret = (buf[1] >> offset) & 1;
+
+exit:
+ spin_unlock_irqrestore(&dev->lock, flags);
+
+ return ret;
}
static int cp2112_gpio_direction_output(struct gpio_chip *chip,
{
struct cp2112_device *dev = gpiochip_get_data(chip);
struct hid_device *hdev = dev->hdev;
- u8 buf[5];
+ u8 *buf = dev->in_out_buffer;
+ unsigned long flags;
int ret;
+ spin_lock_irqsave(&dev->lock, flags);
+
ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
- sizeof(buf), HID_FEATURE_REPORT,
- HID_REQ_GET_REPORT);
- if (ret != sizeof(buf)) {
+ CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
+ HID_REQ_GET_REPORT);
+ if (ret != CP2112_GPIO_CONFIG_LENGTH) {
hid_err(hdev, "error requesting GPIO config: %d\n", ret);
- return ret;
+ goto fail;
}
buf[1] |= 1 << offset;
buf[2] = gpio_push_pull;
- ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf, sizeof(buf),
- HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+ ret = hid_hw_raw_request(hdev, CP2112_GPIO_CONFIG, buf,
+ CP2112_GPIO_CONFIG_LENGTH, HID_FEATURE_REPORT,
+ HID_REQ_SET_REPORT);
if (ret < 0) {
hid_err(hdev, "error setting GPIO config: %d\n", ret);
- return ret;
+ goto fail;
}
+ spin_unlock_irqrestore(&dev->lock, flags);
+
/*
* Set gpio value when output direction is already set,
* as specified in AN495, Rev. 0.2, cpt. 4.4
cp2112_gpio_set(chip, offset, value);
return 0;
+
+fail:
+ spin_unlock_irqrestore(&dev->lock, flags);
+ return ret < 0 ? ret : -EIO;
}
static int cp2112_hid_get(struct hid_device *hdev, unsigned char report_number,
struct cp2112_smbus_config_report config;
int ret;
+ dev = devm_kzalloc(&hdev->dev, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return -ENOMEM;
+
+ dev->in_out_buffer = devm_kzalloc(&hdev->dev, CP2112_REPORT_MAX_LENGTH,
+ GFP_KERNEL);
+ if (!dev->in_out_buffer)
+ return -ENOMEM;
+
+ spin_lock_init(&dev->lock);
+
ret = hid_parse(hdev);
if (ret) {
hid_err(hdev, "parse failed\n");
goto err_power_normal;
}
- dev = kzalloc(sizeof(*dev), GFP_KERNEL);
- if (!dev) {
- ret = -ENOMEM;
- goto err_power_normal;
- }
-
hid_set_drvdata(hdev, (void *)dev);
dev->hdev = hdev;
dev->adap.owner = THIS_MODULE;
if (ret) {
hid_err(hdev, "error registering i2c adapter\n");
- goto err_free_dev;
+ goto err_power_normal;
}
hid_dbg(hdev, "adapter registered\n");
gpiochip_remove(&dev->gc);
err_free_i2c:
i2c_del_adapter(&dev->adap);
-err_free_dev:
- kfree(dev);
err_power_normal:
hid_hw_power(hdev, PM_HINT_NORMAL);
err_hid_close:
*/
hid_hw_close(hdev);
hid_hw_stop(hdev);
- kfree(dev);
}
static int cp2112_raw_event(struct hid_device *hdev, struct hid_report *report,
/* Setup wireless link with Logitech Wii wheel */
if (hdev->product == USB_DEVICE_ID_LOGITECH_WII_WHEEL) {
- unsigned char buf[] = { 0x00, 0xAF, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+ const unsigned char cbuf[] = { 0x00, 0xAF, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
+ u8 *buf = kmemdup(cbuf, sizeof(cbuf), GFP_KERNEL);
- ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(buf),
- HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_free;
+ }
+ ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(cbuf),
+ HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
if (ret >= 0) {
/* insert a little delay of 10 jiffies ~ 40ms */
wait_queue_head_t wait;
buf[1] = 0xB2;
get_random_bytes(&buf[2], 2);
- ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(buf),
+ ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(cbuf),
HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
}
+ kfree(buf);
}
if (drv_data->quirks & LG_FF)
static int magicmouse_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
- __u8 feature[] = { 0xd7, 0x01 };
+ const u8 feature[] = { 0xd7, 0x01 };
+ u8 *buf;
struct magicmouse_sc *msc;
struct hid_report *report;
int ret;
}
report->size = 6;
+ buf = kmemdup(feature, sizeof(feature), GFP_KERNEL);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto err_stop_hw;
+ }
+
/*
* Some devices repond with 'invalid report id' when feature
* report switching it into multitouch mode is sent to it.
* but there seems to be no other way of switching the mode.
* Thus the super-ugly hacky success check below.
*/
- ret = hid_hw_raw_request(hdev, feature[0], feature, sizeof(feature),
+ ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(feature),
HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+ kfree(buf);
if (ret != -EIO && ret != sizeof(feature)) {
hid_err(hdev, "unable to request touch data (%d)\n", ret);
goto err_stop_hw;
static int rmi_set_mode(struct hid_device *hdev, u8 mode)
{
int ret;
- u8 txbuf[2] = {RMI_SET_RMI_MODE_REPORT_ID, mode};
+ const u8 txbuf[2] = {RMI_SET_RMI_MODE_REPORT_ID, mode};
+ u8 *buf;
- ret = hid_hw_raw_request(hdev, RMI_SET_RMI_MODE_REPORT_ID, txbuf,
+ buf = kmemdup(txbuf, sizeof(txbuf), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = hid_hw_raw_request(hdev, RMI_SET_RMI_MODE_REPORT_ID, buf,
sizeof(txbuf), HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
+ kfree(buf);
if (ret < 0) {
dev_err(&hdev->dev, "unable to set rmi mode to %d (%d)\n", mode,
ret);
__s32 value;
int ret = 0;
+ memset(buffer, 0, buffer_size);
mutex_lock(&data->mutex);
report = sensor_hub_report(report_id, hsdev->hdev, HID_FEATURE_REPORT);
if (!report || (field_index >= report->maxfield)) {
int i;
tuner_dbg("%s called\n", __func__);
+ /* free allocated f/w string */
+ if (priv->fname != firmware_name)
+ kfree(priv->fname);
+ priv->fname = NULL;
+
+ priv->state = XC2028_NO_FIRMWARE;
+ memset(&priv->cur_fw, 0, sizeof(priv->cur_fw));
+
if (!priv->firm)
return;
priv->firm = NULL;
priv->firm_size = 0;
- priv->state = XC2028_NO_FIRMWARE;
-
- memset(&priv->cur_fw, 0, sizeof(priv->cur_fw));
}
static int load_all_firmwares(struct dvb_frontend *fe,
return 0;
fail:
- priv->state = XC2028_NO_FIRMWARE;
+ free_firmware(priv);
- memset(&priv->cur_fw, 0, sizeof(priv->cur_fw));
if (retry_count < 8) {
msleep(50);
retry_count++;
mutex_lock(&xc2028_list_mutex);
/* only perform final cleanup if this is the last instance */
- if (hybrid_tuner_report_instance_count(priv) == 1) {
+ if (hybrid_tuner_report_instance_count(priv) == 1)
free_firmware(priv);
- kfree(priv->ctrl.fname);
- priv->ctrl.fname = NULL;
- }
if (priv)
hybrid_tuner_release_state(priv);
/*
* Copy the config data.
- * For the firmware name, keep a local copy of the string,
- * in order to avoid troubles during device release.
*/
- kfree(priv->ctrl.fname);
- priv->ctrl.fname = NULL;
memcpy(&priv->ctrl, p, sizeof(priv->ctrl));
- if (p->fname) {
- priv->ctrl.fname = kstrdup(p->fname, GFP_KERNEL);
- if (priv->ctrl.fname == NULL) {
- rc = -ENOMEM;
- goto unlock;
- }
- }
/*
* If firmware name changed, frees firmware. As free_firmware will
if (priv->state == XC2028_NO_FIRMWARE) {
if (!firmware_name[0])
- priv->fname = priv->ctrl.fname;
+ priv->fname = kstrdup(p->fname, GFP_KERNEL);
else
priv->fname = firmware_name;
+ if (!priv->fname) {
+ rc = -ENOMEM;
+ goto unlock;
+ }
+
rc = request_firmware_nowait(THIS_MODULE, 1,
priv->fname,
priv->i2c_props.adap->dev.parent,
spin_unlock_irqrestore(&host->irq_lock, irqflags);
if (host->dma_ops->start(host, sg_len)) {
+ host->dma_ops->stop(host);
/* We can't do DMA, try PIO for this one */
dev_dbg(host->dev,
"%s: fall back to PIO mode for current transfer\n",
return ret;
}
}
+ /*
+ * The DAT[3:0] line signal levels and the CMD line signal level are
+ * not compatible with standard SDHC register. The line signal levels
+ * DAT[7:0] are at bits 31:24 and the command line signal level is at
+ * bit 23. All other bits are the same as in the standard SDHC
+ * register.
+ */
+ if (spec_reg == SDHCI_PRESENT_STATE) {
+ ret = value & 0x000fffff;
+ ret |= (value >> 4) & SDHCI_DATA_LVL_MASK;
+ ret |= (value << 1) & SDHCI_CMD_LVL;
+ return ret;
+ }
+
ret = value;
return ret;
}
#define SDHCI_DATA_LVL_MASK 0x00F00000
#define SDHCI_DATA_LVL_SHIFT 20
#define SDHCI_DATA_0_LVL_MASK 0x00100000
+#define SDHCI_CMD_LVL 0x01000000
#define SDHCI_HOST_CONTROL 0x28
#define SDHCI_CTRL_LED 0x01
vl->members |= BIT(port) | BIT(cpu_port);
if (untagged)
- vl->untag |= BIT(port) | BIT(cpu_port);
+ vl->untag |= BIT(port);
else
- vl->untag &= ~(BIT(port) | BIT(cpu_port));
+ vl->untag &= ~BIT(port);
+ vl->untag &= ~BIT(cpu_port);
b53_set_vlan_entry(dev, vid, vl);
b53_fast_age_vlan(dev, vid);
if (pvid) {
b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port),
vlan->vid_end);
- b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(cpu_port),
- vlan->vid_end);
b53_fast_age_vlan(dev, vid);
}
}
{
struct b53_device *dev = ds->priv;
bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
- unsigned int cpu_port = dev->cpu_port;
struct b53_vlan *vl;
u16 vid;
u16 pvid;
b53_get_vlan_entry(dev, vid, vl);
vl->members &= ~BIT(port);
- if ((vl->members & BIT(cpu_port)) == BIT(cpu_port))
- vl->members = 0;
if (pvid == vid) {
if (is5325(dev) || is5365(dev))
pvid = 0;
}
- if (untagged) {
+ if (untagged)
vl->untag &= ~(BIT(port));
- if ((vl->untag & BIT(cpu_port)) == BIT(cpu_port))
- vl->untag = 0;
- }
b53_set_vlan_entry(dev, vid, vl);
b53_fast_age_vlan(dev, vid);
}
b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(port), pvid);
- b53_write16(dev, B53_VLAN_PAGE, B53_VLAN_PORT_DEF_TAG(cpu_port), pvid);
b53_fast_age_vlan(dev, pvid);
return 0;
if (ndev->flags & IFF_ALLMULTI) {
arc_reg_set(priv, R_LAFL, ~0);
arc_reg_set(priv, R_LAFH, ~0);
- } else {
+ } else if (ndev->flags & IFF_MULTICAST) {
struct netdev_hw_addr *ha;
unsigned int filter[2] = { 0, 0 };
int bit;
arc_reg_set(priv, R_LAFL, filter[0]);
arc_reg_set(priv, R_LAFH, filter[1]);
+ } else {
+ arc_reg_set(priv, R_LAFL, 0);
+ arc_reg_set(priv, R_LAFH, 0);
}
}
}
ndev->netdev_ops = &arc_emac_netdev_ops;
ndev->ethtool_ops = &arc_emac_ethtool_ops;
ndev->watchdog_timeo = TX_TIMEOUT;
- /* FIXME :: no multicast support yet */
- ndev->flags &= ~IFF_MULTICAST;
priv = netdev_priv(ndev);
priv->dev = dev;
napi_hash_del(&bnapi->napi);
netif_napi_del(&bnapi->napi);
}
+ /* We called napi_hash_del() before netif_napi_del(), we need
+ * to respect an RCU grace period before freeing napi structures.
+ */
+ synchronize_net();
}
static void bnxt_init_napi(struct bnxt *bp)
lp->skb_length = skb->len;
lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len,
DMA_TO_DEVICE);
+ if (dma_mapping_error(NULL, lp->skb_physaddr)) {
+ dev_kfree_skb_any(skb);
+ dev->stats.tx_dropped++;
+ netdev_err(dev, "%s: DMA mapping error\n", __func__);
+ return NETDEV_TX_OK;
+ }
/* Set address of the data in the Transmit Address register */
macb_writel(lp, TAR, lp->skb_physaddr);
/* Min/Max packet size */
#define NIC_HW_MIN_FRS 64
-#define NIC_HW_MAX_FRS 9200 /* 9216 max packet including FCS */
+#define NIC_HW_MAX_FRS 9190 /* Excluding L2 header and FCS */
/* Max pkinds */
#define NIC_MAX_PKIND 16
struct nicvf_hw_stats {
u64 rx_bytes;
+ u64 rx_frames;
u64 rx_ucast_frames;
u64 rx_bcast_frames;
u64 rx_mcast_frames;
- u64 rx_fcs_errors;
- u64 rx_l2_errors;
+ u64 rx_drops;
u64 rx_drop_red;
u64 rx_drop_red_bytes;
u64 rx_drop_overrun;
u64 rx_drop_mcast;
u64 rx_drop_l3_bcast;
u64 rx_drop_l3_mcast;
+ u64 rx_fcs_errors;
+ u64 rx_l2_errors;
+
+ u64 tx_bytes;
+ u64 tx_frames;
+ u64 tx_ucast_frames;
+ u64 tx_bcast_frames;
+ u64 tx_mcast_frames;
+ u64 tx_drops;
+};
+
+struct nicvf_drv_stats {
+ /* CQE Rx errs */
u64 rx_bgx_truncated_pkts;
u64 rx_jabber_errs;
u64 rx_fcs_errs;
u64 rx_l4_pclp;
u64 rx_truncated_pkts;
- u64 tx_bytes_ok;
- u64 tx_ucast_frames_ok;
- u64 tx_bcast_frames_ok;
- u64 tx_mcast_frames_ok;
- u64 tx_drops;
-};
-
-struct nicvf_drv_stats {
- /* Rx */
- u64 rx_frames_ok;
- u64 rx_frames_64;
- u64 rx_frames_127;
- u64 rx_frames_255;
- u64 rx_frames_511;
- u64 rx_frames_1023;
- u64 rx_frames_1518;
- u64 rx_frames_jumbo;
- u64 rx_drops;
-
+ /* CQE Tx errs */
+ u64 tx_desc_fault;
+ u64 tx_hdr_cons_err;
+ u64 tx_subdesc_err;
+ u64 tx_max_size_exceeded;
+ u64 tx_imm_size_oflow;
+ u64 tx_data_seq_err;
+ u64 tx_mem_seq_err;
+ u64 tx_lock_viol;
+ u64 tx_data_fault;
+ u64 tx_tstmp_conflict;
+ u64 tx_tstmp_timeout;
+ u64 tx_mem_fault;
+ u64 tx_csum_overlap;
+ u64 tx_csum_overflow;
+
+ /* driver debug stats */
u64 rcv_buffer_alloc_failures;
-
- /* Tx */
- u64 tx_frames_ok;
- u64 tx_drops;
u64 tx_tso;
u64 tx_timeout;
u64 txq_stop;
u64 txq_wake;
+
+ struct u64_stats_sync syncp;
};
struct nicvf {
u8 node;
u8 cpi_alg;
- u16 mtu;
bool link_up;
u8 duplex;
u32 speed;
/* Stats */
struct nicvf_hw_stats hw_stats;
- struct nicvf_drv_stats drv_stats;
+ struct nicvf_drv_stats __percpu *drv_stats;
struct bgx_stats bgx_stats;
/* MSI-X */
#include <linux/pci.h>
#include <linux/etherdevice.h>
#include <linux/of.h>
+#include <linux/if_vlan.h>
#include "nic_reg.h"
#include "nic.h"
/* Update hardware min/max frame size */
static int nic_update_hw_frs(struct nicpf *nic, int new_frs, int vf)
{
- if ((new_frs > NIC_HW_MAX_FRS) || (new_frs < NIC_HW_MIN_FRS)) {
- dev_err(&nic->pdev->dev,
- "Invalid MTU setting from VF%d rejected, should be between %d and %d\n",
- vf, NIC_HW_MIN_FRS, NIC_HW_MAX_FRS);
+ int bgx, lmac, lmac_cnt;
+ u64 lmac_credits;
+
+ if ((new_frs > NIC_HW_MAX_FRS) || (new_frs < NIC_HW_MIN_FRS))
return 1;
- }
- new_frs += ETH_HLEN;
- if (new_frs <= nic->pkind.maxlen)
- return 0;
- nic->pkind.maxlen = new_frs;
- nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG, *(u64 *)&nic->pkind);
+ bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+ lmac += bgx * MAX_LMAC_PER_BGX;
+
+ new_frs += VLAN_ETH_HLEN + ETH_FCS_LEN + 4;
+
+ /* Update corresponding LMAC credits */
+ lmac_cnt = bgx_get_lmac_count(nic->node, bgx);
+ lmac_credits = nic_reg_read(nic, NIC_PF_LMAC_0_7_CREDIT + (lmac * 8));
+ lmac_credits &= ~(0xFFFFFULL << 12);
+ lmac_credits |= (((((48 * 1024) / lmac_cnt) - new_frs) / 16) << 12);
+ nic_reg_write(nic, NIC_PF_LMAC_0_7_CREDIT + (lmac * 8), lmac_credits);
+
+ /* Enforce MTU in HW
+ * This config is supported only from 88xx pass 2.0 onwards.
+ */
+ if (!pass1_silicon(nic->pdev))
+ nic_reg_write(nic,
+ NIC_PF_LMAC_0_7_CFG2 + (lmac * 8), new_frs);
return 0;
}
/* PKIND configuration */
nic->pkind.minlen = 0;
- nic->pkind.maxlen = NIC_HW_MAX_FRS + ETH_HLEN;
+ nic->pkind.maxlen = NIC_HW_MAX_FRS + VLAN_ETH_HLEN + ETH_FCS_LEN + 4;
nic->pkind.lenerr_en = 1;
nic->pkind.rx_hdr = 0;
nic->pkind.hdr_sl = 0;
nic_reg_write(nic, reg_addr, 0);
}
}
+
return 0;
}
#define NIC_PF_MPI_0_2047_CFG (0x210000)
#define NIC_PF_RSSI_0_4097_RQ (0x220000)
#define NIC_PF_LMAC_0_7_CFG (0x240000)
+#define NIC_PF_LMAC_0_7_CFG2 (0x240100)
#define NIC_PF_LMAC_0_7_SW_XOFF (0x242000)
#define NIC_PF_LMAC_0_7_CREDIT (0x244000)
#define NIC_PF_CHAN_0_255_TX_CFG (0x400000)
static const struct nicvf_stat nicvf_hw_stats[] = {
NICVF_HW_STAT(rx_bytes),
+ NICVF_HW_STAT(rx_frames),
NICVF_HW_STAT(rx_ucast_frames),
NICVF_HW_STAT(rx_bcast_frames),
NICVF_HW_STAT(rx_mcast_frames),
- NICVF_HW_STAT(rx_fcs_errors),
- NICVF_HW_STAT(rx_l2_errors),
+ NICVF_HW_STAT(rx_drops),
NICVF_HW_STAT(rx_drop_red),
NICVF_HW_STAT(rx_drop_red_bytes),
NICVF_HW_STAT(rx_drop_overrun),
NICVF_HW_STAT(rx_drop_mcast),
NICVF_HW_STAT(rx_drop_l3_bcast),
NICVF_HW_STAT(rx_drop_l3_mcast),
- NICVF_HW_STAT(rx_bgx_truncated_pkts),
- NICVF_HW_STAT(rx_jabber_errs),
- NICVF_HW_STAT(rx_fcs_errs),
- NICVF_HW_STAT(rx_bgx_errs),
- NICVF_HW_STAT(rx_prel2_errs),
- NICVF_HW_STAT(rx_l2_hdr_malformed),
- NICVF_HW_STAT(rx_oversize),
- NICVF_HW_STAT(rx_undersize),
- NICVF_HW_STAT(rx_l2_len_mismatch),
- NICVF_HW_STAT(rx_l2_pclp),
- NICVF_HW_STAT(rx_ip_ver_errs),
- NICVF_HW_STAT(rx_ip_csum_errs),
- NICVF_HW_STAT(rx_ip_hdr_malformed),
- NICVF_HW_STAT(rx_ip_payload_malformed),
- NICVF_HW_STAT(rx_ip_ttl_errs),
- NICVF_HW_STAT(rx_l3_pclp),
- NICVF_HW_STAT(rx_l4_malformed),
- NICVF_HW_STAT(rx_l4_csum_errs),
- NICVF_HW_STAT(rx_udp_len_errs),
- NICVF_HW_STAT(rx_l4_port_errs),
- NICVF_HW_STAT(rx_tcp_flag_errs),
- NICVF_HW_STAT(rx_tcp_offset_errs),
- NICVF_HW_STAT(rx_l4_pclp),
- NICVF_HW_STAT(rx_truncated_pkts),
- NICVF_HW_STAT(tx_bytes_ok),
- NICVF_HW_STAT(tx_ucast_frames_ok),
- NICVF_HW_STAT(tx_bcast_frames_ok),
- NICVF_HW_STAT(tx_mcast_frames_ok),
+ NICVF_HW_STAT(rx_fcs_errors),
+ NICVF_HW_STAT(rx_l2_errors),
+ NICVF_HW_STAT(tx_bytes),
+ NICVF_HW_STAT(tx_frames),
+ NICVF_HW_STAT(tx_ucast_frames),
+ NICVF_HW_STAT(tx_bcast_frames),
+ NICVF_HW_STAT(tx_mcast_frames),
+ NICVF_HW_STAT(tx_drops),
};
static const struct nicvf_stat nicvf_drv_stats[] = {
- NICVF_DRV_STAT(rx_frames_ok),
- NICVF_DRV_STAT(rx_frames_64),
- NICVF_DRV_STAT(rx_frames_127),
- NICVF_DRV_STAT(rx_frames_255),
- NICVF_DRV_STAT(rx_frames_511),
- NICVF_DRV_STAT(rx_frames_1023),
- NICVF_DRV_STAT(rx_frames_1518),
- NICVF_DRV_STAT(rx_frames_jumbo),
- NICVF_DRV_STAT(rx_drops),
+ NICVF_DRV_STAT(rx_bgx_truncated_pkts),
+ NICVF_DRV_STAT(rx_jabber_errs),
+ NICVF_DRV_STAT(rx_fcs_errs),
+ NICVF_DRV_STAT(rx_bgx_errs),
+ NICVF_DRV_STAT(rx_prel2_errs),
+ NICVF_DRV_STAT(rx_l2_hdr_malformed),
+ NICVF_DRV_STAT(rx_oversize),
+ NICVF_DRV_STAT(rx_undersize),
+ NICVF_DRV_STAT(rx_l2_len_mismatch),
+ NICVF_DRV_STAT(rx_l2_pclp),
+ NICVF_DRV_STAT(rx_ip_ver_errs),
+ NICVF_DRV_STAT(rx_ip_csum_errs),
+ NICVF_DRV_STAT(rx_ip_hdr_malformed),
+ NICVF_DRV_STAT(rx_ip_payload_malformed),
+ NICVF_DRV_STAT(rx_ip_ttl_errs),
+ NICVF_DRV_STAT(rx_l3_pclp),
+ NICVF_DRV_STAT(rx_l4_malformed),
+ NICVF_DRV_STAT(rx_l4_csum_errs),
+ NICVF_DRV_STAT(rx_udp_len_errs),
+ NICVF_DRV_STAT(rx_l4_port_errs),
+ NICVF_DRV_STAT(rx_tcp_flag_errs),
+ NICVF_DRV_STAT(rx_tcp_offset_errs),
+ NICVF_DRV_STAT(rx_l4_pclp),
+ NICVF_DRV_STAT(rx_truncated_pkts),
+
+ NICVF_DRV_STAT(tx_desc_fault),
+ NICVF_DRV_STAT(tx_hdr_cons_err),
+ NICVF_DRV_STAT(tx_subdesc_err),
+ NICVF_DRV_STAT(tx_max_size_exceeded),
+ NICVF_DRV_STAT(tx_imm_size_oflow),
+ NICVF_DRV_STAT(tx_data_seq_err),
+ NICVF_DRV_STAT(tx_mem_seq_err),
+ NICVF_DRV_STAT(tx_lock_viol),
+ NICVF_DRV_STAT(tx_data_fault),
+ NICVF_DRV_STAT(tx_tstmp_conflict),
+ NICVF_DRV_STAT(tx_tstmp_timeout),
+ NICVF_DRV_STAT(tx_mem_fault),
+ NICVF_DRV_STAT(tx_csum_overlap),
+ NICVF_DRV_STAT(tx_csum_overflow),
+
NICVF_DRV_STAT(rcv_buffer_alloc_failures),
- NICVF_DRV_STAT(tx_frames_ok),
NICVF_DRV_STAT(tx_tso),
- NICVF_DRV_STAT(tx_drops),
NICVF_DRV_STAT(tx_timeout),
NICVF_DRV_STAT(txq_stop),
NICVF_DRV_STAT(txq_wake),
struct ethtool_stats *stats, u64 *data)
{
struct nicvf *nic = netdev_priv(netdev);
- int stat;
- int sqs;
+ int stat, tmp_stats;
+ int sqs, cpu;
nicvf_update_stats(nic);
for (stat = 0; stat < nicvf_n_hw_stats; stat++)
*(data++) = ((u64 *)&nic->hw_stats)
[nicvf_hw_stats[stat].index];
- for (stat = 0; stat < nicvf_n_drv_stats; stat++)
- *(data++) = ((u64 *)&nic->drv_stats)
- [nicvf_drv_stats[stat].index];
+ for (stat = 0; stat < nicvf_n_drv_stats; stat++) {
+ tmp_stats = 0;
+ for_each_possible_cpu(cpu)
+ tmp_stats += ((u64 *)per_cpu_ptr(nic->drv_stats, cpu))
+ [nicvf_drv_stats[stat].index];
+ *(data++) = tmp_stats;
+ }
nicvf_get_qset_stats(nic, stats, &data);
return qidx;
}
-static inline void nicvf_set_rx_frame_cnt(struct nicvf *nic,
- struct sk_buff *skb)
-{
- if (skb->len <= 64)
- nic->drv_stats.rx_frames_64++;
- else if (skb->len <= 127)
- nic->drv_stats.rx_frames_127++;
- else if (skb->len <= 255)
- nic->drv_stats.rx_frames_255++;
- else if (skb->len <= 511)
- nic->drv_stats.rx_frames_511++;
- else if (skb->len <= 1023)
- nic->drv_stats.rx_frames_1023++;
- else if (skb->len <= 1518)
- nic->drv_stats.rx_frames_1518++;
- else
- nic->drv_stats.rx_frames_jumbo++;
-}
-
/* The Cavium ThunderX network controller can *only* be found in SoCs
* containing the ThunderX ARM64 CPU implementation. All accesses to the device
* registers on this platform are implicitly strongly ordered with respect
static int nicvf_init_resources(struct nicvf *nic)
{
int err;
- union nic_mbx mbx = {};
-
- mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
/* Enable Qset */
nicvf_qset_config(nic, true);
return err;
}
- /* Send VF config done msg to PF */
- nicvf_write_to_mbx(nic, &mbx);
-
return 0;
}
static void nicvf_snd_pkt_handler(struct net_device *netdev,
- struct cmp_queue *cq,
struct cqe_send_t *cqe_tx,
int cqe_type, int budget,
unsigned int *tx_pkts, unsigned int *tx_bytes)
__func__, cqe_tx->sq_qs, cqe_tx->sq_idx,
cqe_tx->sqe_ptr, hdr->subdesc_cnt);
- nicvf_check_cqe_tx_errs(nic, cq, cqe_tx);
+ nicvf_check_cqe_tx_errs(nic, cqe_tx);
skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr];
if (skb) {
/* Check for dummy descriptor used for HW TSO offload on 88xx */
return;
}
- nicvf_set_rx_frame_cnt(nic, skb);
-
nicvf_set_rxhash(netdev, cqe_rx, skb);
skb_record_rx_queue(skb, rq_idx);
work_done++;
break;
case CQE_TYPE_SEND:
- nicvf_snd_pkt_handler(netdev, cq,
+ nicvf_snd_pkt_handler(netdev,
(void *)cq_desc, CQE_TYPE_SEND,
budget, &tx_pkts, &tx_bytes);
tx_done++;
nic = nic->pnicvf;
if (netif_tx_queue_stopped(txq) && netif_carrier_ok(netdev)) {
netif_tx_start_queue(txq);
- nic->drv_stats.txq_wake++;
+ this_cpu_inc(nic->drv_stats->txq_wake);
if (netif_msg_tx_err(nic))
netdev_warn(netdev,
"%s: Transmit queue wakeup SQ%d\n",
if (!netif_tx_queue_stopped(txq) && !nicvf_sq_append_skb(nic, skb)) {
netif_tx_stop_queue(txq);
- nic->drv_stats.txq_stop++;
+ this_cpu_inc(nic->drv_stats->txq_stop);
if (netif_msg_tx_err(nic))
netdev_warn(netdev,
"%s: Transmit ring full, stopping SQ%d\n",
return 0;
}
+static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
+{
+ union nic_mbx mbx = {};
+
+ mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS;
+ mbx.frs.max_frs = mtu;
+ mbx.frs.vf_id = nic->vf_id;
+
+ return nicvf_send_msg_to_pf(nic, &mbx);
+}
+
int nicvf_open(struct net_device *netdev)
{
- int err, qidx;
+ int cpu, err, qidx;
struct nicvf *nic = netdev_priv(netdev);
struct queue_set *qs = nic->qs;
struct nicvf_cq_poll *cq_poll = NULL;
-
- nic->mtu = netdev->mtu;
+ union nic_mbx mbx = {};
netif_carrier_off(netdev);
if (nic->sqs_mode)
nicvf_get_primary_vf_struct(nic);
- /* Configure receive side scaling */
- if (!nic->sqs_mode)
+ /* Configure receive side scaling and MTU */
+ if (!nic->sqs_mode) {
nicvf_rss_init(nic);
+ if (nicvf_update_hw_max_frs(nic, netdev->mtu))
+ goto cleanup;
+
+ /* Clear percpu stats */
+ for_each_possible_cpu(cpu)
+ memset(per_cpu_ptr(nic->drv_stats, cpu), 0,
+ sizeof(struct nicvf_drv_stats));
+ }
err = nicvf_register_interrupts(nic);
if (err)
for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
- nic->drv_stats.txq_stop = 0;
- nic->drv_stats.txq_wake = 0;
+ /* Send VF config done msg to PF */
+ mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
+ nicvf_write_to_mbx(nic, &mbx);
return 0;
cleanup:
return err;
}
-static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
-{
- union nic_mbx mbx = {};
-
- mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS;
- mbx.frs.max_frs = mtu;
- mbx.frs.vf_id = nic->vf_id;
-
- return nicvf_send_msg_to_pf(nic, &mbx);
-}
-
static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
{
struct nicvf *nic = netdev_priv(netdev);
if (new_mtu < NIC_HW_MIN_FRS)
return -EINVAL;
+ netdev->mtu = new_mtu;
+
+ if (!netif_running(netdev))
+ return 0;
+
if (nicvf_update_hw_max_frs(nic, new_mtu))
return -EINVAL;
- netdev->mtu = new_mtu;
- nic->mtu = new_mtu;
return 0;
}
void nicvf_update_stats(struct nicvf *nic)
{
- int qidx;
+ int qidx, cpu;
+ u64 tmp_stats = 0;
struct nicvf_hw_stats *stats = &nic->hw_stats;
- struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
+ struct nicvf_drv_stats *drv_stats;
struct queue_set *qs = nic->qs;
#define GET_RX_STATS(reg) \
stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST);
stats->rx_drop_l3_mcast = GET_RX_STATS(RX_DRP_L3MCAST);
- stats->tx_bytes_ok = GET_TX_STATS(TX_OCTS);
- stats->tx_ucast_frames_ok = GET_TX_STATS(TX_UCAST);
- stats->tx_bcast_frames_ok = GET_TX_STATS(TX_BCAST);
- stats->tx_mcast_frames_ok = GET_TX_STATS(TX_MCAST);
+ stats->tx_bytes = GET_TX_STATS(TX_OCTS);
+ stats->tx_ucast_frames = GET_TX_STATS(TX_UCAST);
+ stats->tx_bcast_frames = GET_TX_STATS(TX_BCAST);
+ stats->tx_mcast_frames = GET_TX_STATS(TX_MCAST);
stats->tx_drops = GET_TX_STATS(TX_DROP);
- drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok +
- stats->tx_bcast_frames_ok +
- stats->tx_mcast_frames_ok;
- drv_stats->rx_frames_ok = stats->rx_ucast_frames +
- stats->rx_bcast_frames +
- stats->rx_mcast_frames;
- drv_stats->rx_drops = stats->rx_drop_red +
- stats->rx_drop_overrun;
- drv_stats->tx_drops = stats->tx_drops;
+ /* On T88 pass 2.0, the dummy SQE added for TSO notification
+ * via CQE has 'dont_send' set. Hence HW drops the pkt pointed
+ * pointed by dummy SQE and results in tx_drops counter being
+ * incremented. Subtracting it from tx_tso counter will give
+ * exact tx_drops counter.
+ */
+ if (nic->t88 && nic->hw_tso) {
+ for_each_possible_cpu(cpu) {
+ drv_stats = per_cpu_ptr(nic->drv_stats, cpu);
+ tmp_stats += drv_stats->tx_tso;
+ }
+ stats->tx_drops = tmp_stats - stats->tx_drops;
+ }
+ stats->tx_frames = stats->tx_ucast_frames +
+ stats->tx_bcast_frames +
+ stats->tx_mcast_frames;
+ stats->rx_frames = stats->rx_ucast_frames +
+ stats->rx_bcast_frames +
+ stats->rx_mcast_frames;
+ stats->rx_drops = stats->rx_drop_red +
+ stats->rx_drop_overrun;
/* Update RQ and SQ stats */
for (qidx = 0; qidx < qs->rq_cnt; qidx++)
{
struct nicvf *nic = netdev_priv(netdev);
struct nicvf_hw_stats *hw_stats = &nic->hw_stats;
- struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
nicvf_update_stats(nic);
stats->rx_bytes = hw_stats->rx_bytes;
- stats->rx_packets = drv_stats->rx_frames_ok;
- stats->rx_dropped = drv_stats->rx_drops;
+ stats->rx_packets = hw_stats->rx_frames;
+ stats->rx_dropped = hw_stats->rx_drops;
stats->multicast = hw_stats->rx_mcast_frames;
- stats->tx_bytes = hw_stats->tx_bytes_ok;
- stats->tx_packets = drv_stats->tx_frames_ok;
- stats->tx_dropped = drv_stats->tx_drops;
+ stats->tx_bytes = hw_stats->tx_bytes;
+ stats->tx_packets = hw_stats->tx_frames;
+ stats->tx_dropped = hw_stats->tx_drops;
return stats;
}
netdev_warn(dev, "%s: Transmit timed out, resetting\n",
dev->name);
- nic->drv_stats.tx_timeout++;
+ this_cpu_inc(nic->drv_stats->tx_timeout);
schedule_work(&nic->reset_task);
}
goto err_free_netdev;
}
+ nic->drv_stats = netdev_alloc_pcpu_stats(struct nicvf_drv_stats);
+ if (!nic->drv_stats) {
+ err = -ENOMEM;
+ goto err_free_netdev;
+ }
+
err = nicvf_set_qset_resources(nic);
if (err)
goto err_free_netdev;
nicvf_unregister_interrupts(nic);
err_free_netdev:
pci_set_drvdata(pdev, NULL);
+ if (nic->drv_stats)
+ free_percpu(nic->drv_stats);
free_netdev(netdev);
err_release_regions:
pci_release_regions(pdev);
unregister_netdev(pnetdev);
nicvf_unregister_interrupts(nic);
pci_set_drvdata(pdev, NULL);
+ if (nic->drv_stats)
+ free_percpu(nic->drv_stats);
free_netdev(netdev);
pci_release_regions(pdev);
pci_disable_device(pdev);
nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
order);
if (!nic->rb_page) {
- nic->drv_stats.rcv_buffer_alloc_failures++;
+ this_cpu_inc(nic->pnicvf->drv_stats->
+ rcv_buffer_alloc_failures);
return -ENOMEM;
}
nic->rb_page_offset = 0;
rbdr_idx, new_rb);
next_rbdr:
/* Re-enable RBDR interrupts only if buffer allocation is success */
- if (!nic->rb_alloc_fail && rbdr->enable)
+ if (!nic->rb_alloc_fail && rbdr->enable &&
+ netif_running(nic->pnicvf->netdev))
nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx);
if (rbdr_idx)
static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
{
+ struct sk_buff *skb;
+
if (!sq)
return;
if (!sq->dmem.base)
sq->dmem.q_len * TSO_HEADER_SIZE,
sq->tso_hdrs, sq->tso_hdrs_phys);
+ /* Free pending skbs in the queue */
+ smp_rmb();
+ while (sq->head != sq->tail) {
+ skb = (struct sk_buff *)sq->skbuff[sq->head];
+ if (skb)
+ dev_kfree_skb_any(skb);
+ sq->head++;
+ sq->head &= (sq->dmem.q_len - 1);
+ }
kfree(sq->skbuff);
nicvf_free_q_desc_mem(nic, &sq->dmem);
}
{
union nic_mbx mbx = {};
- /* Reset all RXQ's stats */
+ /* Reset all RQ/SQ and VF stats */
mbx.reset_stat.msg = NIC_MBOX_MSG_RESET_STAT_COUNTER;
+ mbx.reset_stat.rx_stat_mask = 0x3FFF;
+ mbx.reset_stat.tx_stat_mask = 0x1F;
mbx.reset_stat.rq_stat_mask = 0xFFFF;
+ mbx.reset_stat.sq_stat_mask = 0xFFFF;
nicvf_send_msg_to_pf(nic, &mbx);
}
mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8);
nicvf_send_msg_to_pf(nic, &mbx);
- nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 0x00);
- if (!nic->sqs_mode)
+ if (!nic->sqs_mode && (qidx == 0)) {
+ /* Enable checking L3/L4 length and TCP/UDP checksums */
+ nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0,
+ (BIT(24) | BIT(23) | BIT(21)));
nicvf_config_vlan_stripping(nic, nic->netdev->features);
+ }
/* Enable Receive queue */
memset(&rq_cfg, 0, sizeof(struct rq_cfg));
hdr->tso_max_paysize = skb_shinfo(skb)->gso_size;
/* For non-tunneled pkts, point this to L2 ethertype */
hdr->inner_l3_offset = skb_network_offset(skb) - 2;
- nic->drv_stats.tx_tso++;
+ this_cpu_inc(nic->pnicvf->drv_stats->tx_tso);
}
}
nicvf_sq_doorbell(nic, skb, sq_num, desc_cnt);
- nic->drv_stats.tx_tso++;
+ this_cpu_inc(nic->pnicvf->drv_stats->tx_tso);
return 1;
}
/* Check for errors in the receive cmp.queue entry */
int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
{
- struct nicvf_hw_stats *stats = &nic->hw_stats;
-
if (!cqe_rx->err_level && !cqe_rx->err_opcode)
return 0;
switch (cqe_rx->err_opcode) {
case CQ_RX_ERROP_RE_PARTIAL:
- stats->rx_bgx_truncated_pkts++;
+ this_cpu_inc(nic->drv_stats->rx_bgx_truncated_pkts);
break;
case CQ_RX_ERROP_RE_JABBER:
- stats->rx_jabber_errs++;
+ this_cpu_inc(nic->drv_stats->rx_jabber_errs);
break;
case CQ_RX_ERROP_RE_FCS:
- stats->rx_fcs_errs++;
+ this_cpu_inc(nic->drv_stats->rx_fcs_errs);
break;
case CQ_RX_ERROP_RE_RX_CTL:
- stats->rx_bgx_errs++;
+ this_cpu_inc(nic->drv_stats->rx_bgx_errs);
break;
case CQ_RX_ERROP_PREL2_ERR:
- stats->rx_prel2_errs++;
+ this_cpu_inc(nic->drv_stats->rx_prel2_errs);
break;
case CQ_RX_ERROP_L2_MAL:
- stats->rx_l2_hdr_malformed++;
+ this_cpu_inc(nic->drv_stats->rx_l2_hdr_malformed);
break;
case CQ_RX_ERROP_L2_OVERSIZE:
- stats->rx_oversize++;
+ this_cpu_inc(nic->drv_stats->rx_oversize);
break;
case CQ_RX_ERROP_L2_UNDERSIZE:
- stats->rx_undersize++;
+ this_cpu_inc(nic->drv_stats->rx_undersize);
break;
case CQ_RX_ERROP_L2_LENMISM:
- stats->rx_l2_len_mismatch++;
+ this_cpu_inc(nic->drv_stats->rx_l2_len_mismatch);
break;
case CQ_RX_ERROP_L2_PCLP:
- stats->rx_l2_pclp++;
+ this_cpu_inc(nic->drv_stats->rx_l2_pclp);
break;
case CQ_RX_ERROP_IP_NOT:
- stats->rx_ip_ver_errs++;
+ this_cpu_inc(nic->drv_stats->rx_ip_ver_errs);
break;
case CQ_RX_ERROP_IP_CSUM_ERR:
- stats->rx_ip_csum_errs++;
+ this_cpu_inc(nic->drv_stats->rx_ip_csum_errs);
break;
case CQ_RX_ERROP_IP_MAL:
- stats->rx_ip_hdr_malformed++;
+ this_cpu_inc(nic->drv_stats->rx_ip_hdr_malformed);
break;
case CQ_RX_ERROP_IP_MALD:
- stats->rx_ip_payload_malformed++;
+ this_cpu_inc(nic->drv_stats->rx_ip_payload_malformed);
break;
case CQ_RX_ERROP_IP_HOP:
- stats->rx_ip_ttl_errs++;
+ this_cpu_inc(nic->drv_stats->rx_ip_ttl_errs);
break;
case CQ_RX_ERROP_L3_PCLP:
- stats->rx_l3_pclp++;
+ this_cpu_inc(nic->drv_stats->rx_l3_pclp);
break;
case CQ_RX_ERROP_L4_MAL:
- stats->rx_l4_malformed++;
+ this_cpu_inc(nic->drv_stats->rx_l4_malformed);
break;
case CQ_RX_ERROP_L4_CHK:
- stats->rx_l4_csum_errs++;
+ this_cpu_inc(nic->drv_stats->rx_l4_csum_errs);
break;
case CQ_RX_ERROP_UDP_LEN:
- stats->rx_udp_len_errs++;
+ this_cpu_inc(nic->drv_stats->rx_udp_len_errs);
break;
case CQ_RX_ERROP_L4_PORT:
- stats->rx_l4_port_errs++;
+ this_cpu_inc(nic->drv_stats->rx_l4_port_errs);
break;
case CQ_RX_ERROP_TCP_FLAG:
- stats->rx_tcp_flag_errs++;
+ this_cpu_inc(nic->drv_stats->rx_tcp_flag_errs);
break;
case CQ_RX_ERROP_TCP_OFFSET:
- stats->rx_tcp_offset_errs++;
+ this_cpu_inc(nic->drv_stats->rx_tcp_offset_errs);
break;
case CQ_RX_ERROP_L4_PCLP:
- stats->rx_l4_pclp++;
+ this_cpu_inc(nic->drv_stats->rx_l4_pclp);
break;
case CQ_RX_ERROP_RBDR_TRUNC:
- stats->rx_truncated_pkts++;
+ this_cpu_inc(nic->drv_stats->rx_truncated_pkts);
break;
}
}
/* Check for errors in the send cmp.queue entry */
-int nicvf_check_cqe_tx_errs(struct nicvf *nic,
- struct cmp_queue *cq, struct cqe_send_t *cqe_tx)
+int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cqe_send_t *cqe_tx)
{
- struct cmp_queue_stats *stats = &cq->stats;
-
switch (cqe_tx->send_status) {
case CQ_TX_ERROP_GOOD:
- stats->tx.good++;
return 0;
case CQ_TX_ERROP_DESC_FAULT:
- stats->tx.desc_fault++;
+ this_cpu_inc(nic->drv_stats->tx_desc_fault);
break;
case CQ_TX_ERROP_HDR_CONS_ERR:
- stats->tx.hdr_cons_err++;
+ this_cpu_inc(nic->drv_stats->tx_hdr_cons_err);
break;
case CQ_TX_ERROP_SUBDC_ERR:
- stats->tx.subdesc_err++;
+ this_cpu_inc(nic->drv_stats->tx_subdesc_err);
+ break;
+ case CQ_TX_ERROP_MAX_SIZE_VIOL:
+ this_cpu_inc(nic->drv_stats->tx_max_size_exceeded);
break;
case CQ_TX_ERROP_IMM_SIZE_OFLOW:
- stats->tx.imm_size_oflow++;
+ this_cpu_inc(nic->drv_stats->tx_imm_size_oflow);
break;
case CQ_TX_ERROP_DATA_SEQUENCE_ERR:
- stats->tx.data_seq_err++;
+ this_cpu_inc(nic->drv_stats->tx_data_seq_err);
break;
case CQ_TX_ERROP_MEM_SEQUENCE_ERR:
- stats->tx.mem_seq_err++;
+ this_cpu_inc(nic->drv_stats->tx_mem_seq_err);
break;
case CQ_TX_ERROP_LOCK_VIOL:
- stats->tx.lock_viol++;
+ this_cpu_inc(nic->drv_stats->tx_lock_viol);
break;
case CQ_TX_ERROP_DATA_FAULT:
- stats->tx.data_fault++;
+ this_cpu_inc(nic->drv_stats->tx_data_fault);
break;
case CQ_TX_ERROP_TSTMP_CONFLICT:
- stats->tx.tstmp_conflict++;
+ this_cpu_inc(nic->drv_stats->tx_tstmp_conflict);
break;
case CQ_TX_ERROP_TSTMP_TIMEOUT:
- stats->tx.tstmp_timeout++;
+ this_cpu_inc(nic->drv_stats->tx_tstmp_timeout);
break;
case CQ_TX_ERROP_MEM_FAULT:
- stats->tx.mem_fault++;
+ this_cpu_inc(nic->drv_stats->tx_mem_fault);
break;
case CQ_TX_ERROP_CK_OVERLAP:
- stats->tx.csum_overlap++;
+ this_cpu_inc(nic->drv_stats->tx_csum_overlap);
break;
case CQ_TX_ERROP_CK_OFLOW:
- stats->tx.csum_overflow++;
+ this_cpu_inc(nic->drv_stats->tx_csum_overflow);
break;
}
CQ_TX_ERROP_DESC_FAULT = 0x10,
CQ_TX_ERROP_HDR_CONS_ERR = 0x11,
CQ_TX_ERROP_SUBDC_ERR = 0x12,
+ CQ_TX_ERROP_MAX_SIZE_VIOL = 0x13,
CQ_TX_ERROP_IMM_SIZE_OFLOW = 0x80,
CQ_TX_ERROP_DATA_SEQUENCE_ERR = 0x81,
CQ_TX_ERROP_MEM_SEQUENCE_ERR = 0x82,
CQ_TX_ERROP_ENUM_LAST = 0x8a,
};
-struct cmp_queue_stats {
- struct tx_stats {
- u64 good;
- u64 desc_fault;
- u64 hdr_cons_err;
- u64 subdesc_err;
- u64 imm_size_oflow;
- u64 data_seq_err;
- u64 mem_seq_err;
- u64 lock_viol;
- u64 data_fault;
- u64 tstmp_conflict;
- u64 tstmp_timeout;
- u64 mem_fault;
- u64 csum_overlap;
- u64 csum_overflow;
- } tx;
-} ____cacheline_aligned_in_smp;
-
enum RQ_SQ_STATS {
RQ_SQ_STATS_OCTS,
RQ_SQ_STATS_PKTS,
spinlock_t lock; /* lock to serialize processing CQEs */
void *desc;
struct q_desc_mem dmem;
- struct cmp_queue_stats stats;
int irq;
} ____cacheline_aligned_in_smp;
void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx);
void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx);
int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx);
-int nicvf_check_cqe_tx_errs(struct nicvf *nic,
- struct cmp_queue *cq, struct cqe_send_t *cqe_tx);
+int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cqe_send_t *cqe_tx);
#endif /* NICVF_QUEUES_H */
pci_read_config_word(pdev, PCI_DEVICE_ID, &sdevid);
if (sdevid != PCI_DEVICE_ID_THUNDER_RGX) {
- bgx->bgx_id =
- (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24) & 1;
+ bgx->bgx_id = (pci_resource_start(pdev,
+ PCI_CFG_REG_BAR_NUM) >> 24) & BGX_ID_MASK;
bgx->bgx_id += nic_get_node_id(pdev) * MAX_BGX_PER_NODE;
bgx->max_lmac = MAX_LMAC_PER_BGX;
bgx_vnic[bgx->bgx_id] = bgx;
#define MAX_DMAC_PER_LMAC 8
#define MAX_FRAME_SIZE 9216
+#define BGX_ID_MASK 0x3
+
#define MAX_DMAC_PER_LMAC_TNS_BYPASS_MODE 2
/* Registers */
rq->cntxt_id, fl_id, 0xffff);
dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
rq->desc, rq->phys_addr);
- napi_hash_del(&rq->napi);
netif_napi_del(&rq->napi);
rq->netdev = NULL;
rq->cntxt_id = rq->abs_id = 0;
if (eqo->q.created) {
be_eq_clean(eqo);
be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
- napi_hash_del(&eqo->napi);
netif_napi_del(&eqo->napi);
free_cpumask_var(eqo->affinity_mask);
}
static void sky2_shutdown(struct pci_dev *pdev)
{
+ struct sky2_hw *hw = pci_get_drvdata(pdev);
+ int port;
+
+ for (port = 0; port < hw->ports; port++) {
+ struct net_device *ndev = hw->dev[port];
+
+ rtnl_lock();
+ if (netif_running(ndev)) {
+ dev_close(ndev);
+ netif_device_detach(ndev);
+ }
+ rtnl_unlock();
+ }
sky2_suspend(&pdev->dev);
pci_wake_from_d3(pdev, device_may_wakeup(&pdev->dev));
pci_set_power_state(pdev, PCI_D3hot);
config DWMAC_STM32
tristate "STM32 DWMAC support"
default ARCH_STM32
- depends on OF && HAS_IOMEM
+ depends on OF && HAS_IOMEM && (ARCH_STM32 || COMPILE_TEST)
select MFD_SYSCON
---help---
Support for ethernet controller on STM32 SOCs.
#define TSE_PCS_SGMII_LINK_TIMER_0 0x0D40
#define TSE_PCS_SGMII_LINK_TIMER_1 0x0003
#define TSE_PCS_SW_RESET_TIMEOUT 100
-#define TSE_PCS_USE_SGMII_AN_MASK BIT(2)
-#define TSE_PCS_USE_SGMII_ENA BIT(1)
+#define TSE_PCS_USE_SGMII_AN_MASK BIT(1)
+#define TSE_PCS_USE_SGMII_ENA BIT(0)
#define SGMII_ADAPTER_CTRL_REG 0x00
#define SGMII_ADAPTER_DISABLE 0x0001
unsigned long ip_csum_bypassed;
unsigned long ipv4_pkt_rcvd;
unsigned long ipv6_pkt_rcvd;
- unsigned long rx_msg_type_ext_no_ptp;
- unsigned long rx_msg_type_sync;
- unsigned long rx_msg_type_follow_up;
- unsigned long rx_msg_type_delay_req;
- unsigned long rx_msg_type_delay_resp;
- unsigned long rx_msg_type_pdelay_req;
- unsigned long rx_msg_type_pdelay_resp;
- unsigned long rx_msg_type_pdelay_follow_up;
+ unsigned long no_ptp_rx_msg_type_ext;
+ unsigned long ptp_rx_msg_type_sync;
+ unsigned long ptp_rx_msg_type_follow_up;
+ unsigned long ptp_rx_msg_type_delay_req;
+ unsigned long ptp_rx_msg_type_delay_resp;
+ unsigned long ptp_rx_msg_type_pdelay_req;
+ unsigned long ptp_rx_msg_type_pdelay_resp;
+ unsigned long ptp_rx_msg_type_pdelay_follow_up;
+ unsigned long ptp_rx_msg_type_announce;
+ unsigned long ptp_rx_msg_type_management;
+ unsigned long ptp_rx_msg_pkt_reserved_type;
unsigned long ptp_frame_type;
unsigned long ptp_ver;
unsigned long timestamp_dropped;
/* PTP and HW Timer helpers */
struct stmmac_hwtimestamp {
void (*config_hw_tstamping) (void __iomem *ioaddr, u32 data);
- u32 (*config_sub_second_increment) (void __iomem *ioaddr, u32 clk_rate);
+ u32 (*config_sub_second_increment)(void __iomem *ioaddr, u32 ptp_clock,
+ int gmac4);
int (*init_systime) (void __iomem *ioaddr, u32 sec, u32 nsec);
int (*config_addend) (void __iomem *ioaddr, u32 addend);
int (*adjust_systime) (void __iomem *ioaddr, u32 sec, u32 nsec,
- int add_sub);
+ int add_sub, int gmac4);
u64(*get_systime) (void __iomem *ioaddr);
};
#define ERDES4_L3_L4_FILT_NO_MATCH_MASK GENMASK(27, 26)
/* Extended RDES4 message type definitions */
-#define RDES_EXT_NO_PTP 0
-#define RDES_EXT_SYNC 1
-#define RDES_EXT_FOLLOW_UP 2
-#define RDES_EXT_DELAY_REQ 3
-#define RDES_EXT_DELAY_RESP 4
-#define RDES_EXT_PDELAY_REQ 5
-#define RDES_EXT_PDELAY_RESP 6
-#define RDES_EXT_PDELAY_FOLLOW_UP 7
+#define RDES_EXT_NO_PTP 0x0
+#define RDES_EXT_SYNC 0x1
+#define RDES_EXT_FOLLOW_UP 0x2
+#define RDES_EXT_DELAY_REQ 0x3
+#define RDES_EXT_DELAY_RESP 0x4
+#define RDES_EXT_PDELAY_REQ 0x5
+#define RDES_EXT_PDELAY_RESP 0x6
+#define RDES_EXT_PDELAY_FOLLOW_UP 0x7
+#define RDES_PTP_ANNOUNCE 0x8
+#define RDES_PTP_MANAGEMENT 0x9
+#define RDES_PTP_SIGNALING 0xa
+#define RDES_PTP_PKT_RESERVED_TYPE 0xf
/* Basic descriptor structure for normal and alternate descriptors */
struct dma_desc {
x->ipv4_pkt_rcvd++;
if (rdes1 & RDES1_IPV6_HEADER)
x->ipv6_pkt_rcvd++;
- if (message_type == RDES_EXT_SYNC)
- x->rx_msg_type_sync++;
+
+ if (message_type == RDES_EXT_NO_PTP)
+ x->no_ptp_rx_msg_type_ext++;
+ else if (message_type == RDES_EXT_SYNC)
+ x->ptp_rx_msg_type_sync++;
else if (message_type == RDES_EXT_FOLLOW_UP)
- x->rx_msg_type_follow_up++;
+ x->ptp_rx_msg_type_follow_up++;
else if (message_type == RDES_EXT_DELAY_REQ)
- x->rx_msg_type_delay_req++;
+ x->ptp_rx_msg_type_delay_req++;
else if (message_type == RDES_EXT_DELAY_RESP)
- x->rx_msg_type_delay_resp++;
+ x->ptp_rx_msg_type_delay_resp++;
else if (message_type == RDES_EXT_PDELAY_REQ)
- x->rx_msg_type_pdelay_req++;
+ x->ptp_rx_msg_type_pdelay_req++;
else if (message_type == RDES_EXT_PDELAY_RESP)
- x->rx_msg_type_pdelay_resp++;
+ x->ptp_rx_msg_type_pdelay_resp++;
else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP)
- x->rx_msg_type_pdelay_follow_up++;
- else
- x->rx_msg_type_ext_no_ptp++;
+ x->ptp_rx_msg_type_pdelay_follow_up++;
+ else if (message_type == RDES_PTP_ANNOUNCE)
+ x->ptp_rx_msg_type_announce++;
+ else if (message_type == RDES_PTP_MANAGEMENT)
+ x->ptp_rx_msg_type_management++;
+ else if (message_type == RDES_PTP_PKT_RESERVED_TYPE)
+ x->ptp_rx_msg_pkt_reserved_type++;
if (rdes1 & RDES1_PTP_PACKET_TYPE)
x->ptp_frame_type++;
static int dwmac4_wrback_get_tx_timestamp_status(struct dma_desc *p)
{
- return (p->des3 & TDES3_TIMESTAMP_STATUS)
- >> TDES3_TIMESTAMP_STATUS_SHIFT;
+ /* Context type from W/B descriptor must be zero */
+ if (p->des3 & TDES3_CONTEXT_TYPE)
+ return -EINVAL;
+
+ /* Tx Timestamp Status is 1 so des0 and des1'll have valid values */
+ if (p->des3 & TDES3_TIMESTAMP_STATUS)
+ return 0;
+
+ return 1;
}
-/* NOTE: For RX CTX bit has to be checked before
- * HAVE a specific function for TX and another one for RX
- */
-static u64 dwmac4_wrback_get_timestamp(void *desc, u32 ats)
+static inline u64 dwmac4_get_timestamp(void *desc, u32 ats)
{
struct dma_desc *p = (struct dma_desc *)desc;
u64 ns;
return ns;
}
-static int dwmac4_context_get_rx_timestamp_status(void *desc, u32 ats)
+static int dwmac4_rx_check_timestamp(void *desc)
+{
+ struct dma_desc *p = (struct dma_desc *)desc;
+ u32 own, ctxt;
+ int ret = 1;
+
+ own = p->des3 & RDES3_OWN;
+ ctxt = ((p->des3 & RDES3_CONTEXT_DESCRIPTOR)
+ >> RDES3_CONTEXT_DESCRIPTOR_SHIFT);
+
+ if (likely(!own && ctxt)) {
+ if ((p->des0 == 0xffffffff) && (p->des1 == 0xffffffff))
+ /* Corrupted value */
+ ret = -EINVAL;
+ else
+ /* A valid Timestamp is ready to be read */
+ ret = 0;
+ }
+
+ /* Timestamp not ready */
+ return ret;
+}
+
+static int dwmac4_wrback_get_rx_timestamp_status(void *desc, u32 ats)
{
struct dma_desc *p = (struct dma_desc *)desc;
+ int ret = -EINVAL;
+
+ /* Get the status from normal w/b descriptor */
+ if (likely(p->des3 & TDES3_RS1V)) {
+ if (likely(p->des1 & RDES1_TIMESTAMP_AVAILABLE)) {
+ int i = 0;
+
+ /* Check if timestamp is OK from context descriptor */
+ do {
+ ret = dwmac4_rx_check_timestamp(desc);
+ if (ret < 0)
+ goto exit;
+ i++;
- return (p->des1 & RDES1_TIMESTAMP_AVAILABLE)
- >> RDES1_TIMESTAMP_AVAILABLE_SHIFT;
+ } while ((ret == 1) || (i < 10));
+
+ if (i == 10)
+ ret = -EBUSY;
+ }
+ }
+exit:
+ return ret;
}
static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
.get_rx_frame_len = dwmac4_wrback_get_rx_frame_len,
.enable_tx_timestamp = dwmac4_rd_enable_tx_timestamp,
.get_tx_timestamp_status = dwmac4_wrback_get_tx_timestamp_status,
- .get_timestamp = dwmac4_wrback_get_timestamp,
- .get_rx_timestamp_status = dwmac4_context_get_rx_timestamp_status,
+ .get_rx_timestamp_status = dwmac4_wrback_get_rx_timestamp_status,
+ .get_timestamp = dwmac4_get_timestamp,
.set_tx_ic = dwmac4_rd_set_tx_ic,
.prepare_tx_desc = dwmac4_rd_prepare_tx_desc,
.prepare_tso_tx_desc = dwmac4_rd_prepare_tso_tx_desc,
#define TDES3_CTXT_TCMSSV BIT(26)
/* TDES3 Common */
+#define TDES3_RS1V BIT(26)
+#define TDES3_RS1V_SHIFT 26
#define TDES3_LAST_DESCRIPTOR BIT(28)
#define TDES3_LAST_DESCRIPTOR_SHIFT 28
#define TDES3_FIRST_DESCRIPTOR BIT(29)
#define TDES3_CONTEXT_TYPE BIT(30)
+#define TDES3_CONTEXT_TYPE_SHIFT 30
/* TDS3 use for both format (read and write back) */
#define TDES3_OWN BIT(31)
#define RDES3_LAST_DESCRIPTOR BIT(28)
#define RDES3_FIRST_DESCRIPTOR BIT(29)
#define RDES3_CONTEXT_DESCRIPTOR BIT(30)
+#define RDES3_CONTEXT_DESCRIPTOR_SHIFT 30
/* RDES3 (read format) */
#define RDES3_BUFFER1_VALID_ADDR BIT(24)
x->ipv4_pkt_rcvd++;
if (rdes4 & ERDES4_IPV6_PKT_RCVD)
x->ipv6_pkt_rcvd++;
- if (message_type == RDES_EXT_SYNC)
- x->rx_msg_type_sync++;
+
+ if (message_type == RDES_EXT_NO_PTP)
+ x->no_ptp_rx_msg_type_ext++;
+ else if (message_type == RDES_EXT_SYNC)
+ x->ptp_rx_msg_type_sync++;
else if (message_type == RDES_EXT_FOLLOW_UP)
- x->rx_msg_type_follow_up++;
+ x->ptp_rx_msg_type_follow_up++;
else if (message_type == RDES_EXT_DELAY_REQ)
- x->rx_msg_type_delay_req++;
+ x->ptp_rx_msg_type_delay_req++;
else if (message_type == RDES_EXT_DELAY_RESP)
- x->rx_msg_type_delay_resp++;
+ x->ptp_rx_msg_type_delay_resp++;
else if (message_type == RDES_EXT_PDELAY_REQ)
- x->rx_msg_type_pdelay_req++;
+ x->ptp_rx_msg_type_pdelay_req++;
else if (message_type == RDES_EXT_PDELAY_RESP)
- x->rx_msg_type_pdelay_resp++;
+ x->ptp_rx_msg_type_pdelay_resp++;
else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP)
- x->rx_msg_type_pdelay_follow_up++;
- else
- x->rx_msg_type_ext_no_ptp++;
+ x->ptp_rx_msg_type_pdelay_follow_up++;
+ else if (message_type == RDES_PTP_ANNOUNCE)
+ x->ptp_rx_msg_type_announce++;
+ else if (message_type == RDES_PTP_MANAGEMENT)
+ x->ptp_rx_msg_type_management++;
+ else if (message_type == RDES_PTP_PKT_RESERVED_TYPE)
+ x->ptp_rx_msg_pkt_reserved_type++;
+
if (rdes4 & ERDES4_PTP_FRAME_TYPE)
x->ptp_frame_type++;
if (rdes4 & ERDES4_PTP_VER)
int irq_wake;
spinlock_t ptp_lock;
void __iomem *mmcaddr;
+ void __iomem *ptpaddr;
u32 rx_tail_addr;
u32 tx_tail_addr;
u32 mss;
STMMAC_STAT(ip_csum_bypassed),
STMMAC_STAT(ipv4_pkt_rcvd),
STMMAC_STAT(ipv6_pkt_rcvd),
- STMMAC_STAT(rx_msg_type_ext_no_ptp),
- STMMAC_STAT(rx_msg_type_sync),
- STMMAC_STAT(rx_msg_type_follow_up),
- STMMAC_STAT(rx_msg_type_delay_req),
- STMMAC_STAT(rx_msg_type_delay_resp),
- STMMAC_STAT(rx_msg_type_pdelay_req),
- STMMAC_STAT(rx_msg_type_pdelay_resp),
- STMMAC_STAT(rx_msg_type_pdelay_follow_up),
+ STMMAC_STAT(no_ptp_rx_msg_type_ext),
+ STMMAC_STAT(ptp_rx_msg_type_sync),
+ STMMAC_STAT(ptp_rx_msg_type_follow_up),
+ STMMAC_STAT(ptp_rx_msg_type_delay_req),
+ STMMAC_STAT(ptp_rx_msg_type_delay_resp),
+ STMMAC_STAT(ptp_rx_msg_type_pdelay_req),
+ STMMAC_STAT(ptp_rx_msg_type_pdelay_resp),
+ STMMAC_STAT(ptp_rx_msg_type_pdelay_follow_up),
+ STMMAC_STAT(ptp_rx_msg_type_announce),
+ STMMAC_STAT(ptp_rx_msg_type_management),
+ STMMAC_STAT(ptp_rx_msg_pkt_reserved_type),
STMMAC_STAT(ptp_frame_type),
STMMAC_STAT(ptp_ver),
STMMAC_STAT(timestamp_dropped),
}
static u32 stmmac_config_sub_second_increment(void __iomem *ioaddr,
- u32 ptp_clock)
+ u32 ptp_clock, int gmac4)
{
u32 value = readl(ioaddr + PTP_TCR);
unsigned long data;
- /* Convert the ptp_clock to nano second
- * formula = (2/ptp_clock) * 1000000000
- * where, ptp_clock = 50MHz.
+ /* For GMAC3.x, 4.x versions, convert the ptp_clock to nano second
+ * formula = (1/ptp_clock) * 1000000000
+ * where ptp_clock is 50MHz if fine method is used to update system
*/
- data = (2000000000ULL / ptp_clock);
+ if (value & PTP_TCR_TSCFUPDT)
+ data = (1000000000ULL / 50000000);
+ else
+ data = (1000000000ULL / ptp_clock);
/* 0.465ns accuracy */
if (!(value & PTP_TCR_TSCTRLSSR))
data = (data * 1000) / 465;
+ data &= PTP_SSIR_SSINC_MASK;
+
+ if (gmac4)
+ data = data << GMAC4_PTP_SSIR_SSINC_SHIFT;
+
writel(data, ioaddr + PTP_SSIR);
return data;
}
static int stmmac_adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec,
- int add_sub)
+ int add_sub, int gmac4)
{
u32 value;
int limit;
+ if (add_sub) {
+ /* If the new sec value needs to be subtracted with
+ * the system time, then MAC_STSUR reg should be
+ * programmed with (2^32 – <new_sec_value>)
+ */
+ if (gmac4)
+ sec = (100000000ULL - sec);
+
+ value = readl(ioaddr + PTP_TCR);
+ if (value & PTP_TCR_TSCTRLSSR)
+ nsec = (PTP_DIGITAL_ROLLOVER_MODE - nsec);
+ else
+ nsec = (PTP_BINARY_ROLLOVER_MODE - nsec);
+ }
+
writel(sec, ioaddr + PTP_STSUR);
- writel(((add_sub << PTP_STNSUR_ADDSUB_SHIFT) | nsec),
- ioaddr + PTP_STNSUR);
+ value = (add_sub << PTP_STNSUR_ADDSUB_SHIFT) | nsec;
+ writel(value, ioaddr + PTP_STNSUR);
+
/* issue command to initialize the system time value */
value = readl(ioaddr + PTP_TCR);
value |= PTP_TCR_TSUPDT;
{
u64 ns;
+ /* Get the TSSS value */
ns = readl(ioaddr + PTP_STNSR);
- /* convert sec time value to nanosecond */
+ /* Get the TSS and convert sec time value to nanosecond */
ns += readl(ioaddr + PTP_STSR) * 1000000000ULL;
return ns;
/* stmmac_get_tx_hwtstamp - get HW TX timestamps
* @priv: driver private structure
- * @entry : descriptor index to be used.
+ * @p : descriptor pointer
* @skb : the socket buffer
* Description :
* This function will read timestamp from the descriptor & pass it to stack.
* and also perform some sanity checks.
*/
static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
- unsigned int entry, struct sk_buff *skb)
+ struct dma_desc *p, struct sk_buff *skb)
{
struct skb_shared_hwtstamps shhwtstamp;
u64 ns;
- void *desc = NULL;
if (!priv->hwts_tx_en)
return;
if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
return;
- if (priv->adv_ts)
- desc = (priv->dma_etx + entry);
- else
- desc = (priv->dma_tx + entry);
-
/* check tx tstamp status */
- if (!priv->hw->desc->get_tx_timestamp_status((struct dma_desc *)desc))
- return;
+ if (!priv->hw->desc->get_tx_timestamp_status(p)) {
+ /* get the valid tstamp */
+ ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
- /* get the valid tstamp */
- ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
+ memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
+ shhwtstamp.hwtstamp = ns_to_ktime(ns);
- memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
- shhwtstamp.hwtstamp = ns_to_ktime(ns);
- /* pass tstamp to stack */
- skb_tstamp_tx(skb, &shhwtstamp);
+ netdev_info(priv->dev, "get valid TX hw timestamp %llu\n", ns);
+ /* pass tstamp to stack */
+ skb_tstamp_tx(skb, &shhwtstamp);
+ }
return;
}
/* stmmac_get_rx_hwtstamp - get HW RX timestamps
* @priv: driver private structure
- * @entry : descriptor index to be used.
+ * @p : descriptor pointer
+ * @np : next descriptor pointer
* @skb : the socket buffer
* Description :
* This function will read received packet's timestamp from the descriptor
* and pass it to stack. It also perform some sanity checks.
*/
-static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv,
- unsigned int entry, struct sk_buff *skb)
+static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
+ struct dma_desc *np, struct sk_buff *skb)
{
struct skb_shared_hwtstamps *shhwtstamp = NULL;
u64 ns;
- void *desc = NULL;
if (!priv->hwts_rx_en)
return;
- if (priv->adv_ts)
- desc = (priv->dma_erx + entry);
- else
- desc = (priv->dma_rx + entry);
-
- /* exit if rx tstamp is not valid */
- if (!priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts))
- return;
+ /* Check if timestamp is available */
+ if (!priv->hw->desc->get_rx_timestamp_status(p, priv->adv_ts)) {
+ /* For GMAC4, the valid timestamp is from CTX next desc. */
+ if (priv->plat->has_gmac4)
+ ns = priv->hw->desc->get_timestamp(np, priv->adv_ts);
+ else
+ ns = priv->hw->desc->get_timestamp(p, priv->adv_ts);
- /* get valid tstamp */
- ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
- shhwtstamp = skb_hwtstamps(skb);
- memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
- shhwtstamp->hwtstamp = ns_to_ktime(ns);
+ netdev_info(priv->dev, "get valid RX hw timestamp %llu\n", ns);
+ shhwtstamp = skb_hwtstamps(skb);
+ memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
+ shhwtstamp->hwtstamp = ns_to_ktime(ns);
+ } else {
+ netdev_err(priv->dev, "cannot get RX hw timestamp\n");
+ }
}
/**
priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
if (!priv->hwts_tx_en && !priv->hwts_rx_en)
- priv->hw->ptp->config_hw_tstamping(priv->ioaddr, 0);
+ priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, 0);
else {
value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
tstamp_all | ptp_v2 | ptp_over_ethernet |
ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
ts_master_en | snap_type_sel);
- priv->hw->ptp->config_hw_tstamping(priv->ioaddr, value);
+ priv->hw->ptp->config_hw_tstamping(priv->ptpaddr, value);
/* program Sub Second Increment reg */
sec_inc = priv->hw->ptp->config_sub_second_increment(
- priv->ioaddr, priv->clk_ptp_rate);
+ priv->ptpaddr, priv->clk_ptp_rate,
+ priv->plat->has_gmac4);
temp = div_u64(1000000000ULL, sec_inc);
/* calculate default added value:
*/
temp = (u64)(temp << 32);
priv->default_addend = div_u64(temp, priv->clk_ptp_rate);
- priv->hw->ptp->config_addend(priv->ioaddr,
+ priv->hw->ptp->config_addend(priv->ptpaddr,
priv->default_addend);
/* initialize system time */
ktime_get_real_ts64(&now);
/* lower 32 bits of tv_sec are safe until y2106 */
- priv->hw->ptp->init_systime(priv->ioaddr, (u32)now.tv_sec,
+ priv->hw->ptp->init_systime(priv->ptpaddr, (u32)now.tv_sec,
now.tv_nsec);
}
priv->dev->stats.tx_packets++;
priv->xstats.tx_pkt_n++;
}
- stmmac_get_tx_hwtstamp(priv, entry, skb);
+ stmmac_get_tx_hwtstamp(priv, p, skb);
}
if (likely(priv->tx_skbuff_dma[entry].buf)) {
unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
- if (priv->synopsys_id >= DWMAC_CORE_4_00)
+ if (priv->synopsys_id >= DWMAC_CORE_4_00) {
+ priv->ptpaddr = priv->ioaddr + PTP_GMAC4_OFFSET;
priv->mmcaddr = priv->ioaddr + MMC_GMAC4_OFFSET;
- else
+ } else {
+ priv->ptpaddr = priv->ioaddr + PTP_GMAC3_X_OFFSET;
priv->mmcaddr = priv->ioaddr + MMC_GMAC3_X_OFFSET;
+ }
dwmac_mmc_intr_all_mask(priv->mmcaddr);
if (netif_msg_rx_status(priv)) {
void *rx_head;
- pr_debug("%s: descriptor ring:\n", __func__);
+ pr_info(">>>>>> %s: descriptor ring:\n", __func__);
if (priv->extend_desc)
rx_head = (void *)priv->dma_erx;
else
while (count < limit) {
int status;
struct dma_desc *p;
+ struct dma_desc *np;
if (priv->extend_desc)
p = (struct dma_desc *)(priv->dma_erx + entry);
next_entry = priv->cur_rx;
if (priv->extend_desc)
- prefetch(priv->dma_erx + next_entry);
+ np = (struct dma_desc *)(priv->dma_erx + next_entry);
else
- prefetch(priv->dma_rx + next_entry);
+ np = priv->dma_rx + next_entry;
+
+ prefetch(np);
if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
priv->hw->desc->rx_extended_status(&priv->dev->stats,
frame_len -= ETH_FCS_LEN;
if (netif_msg_rx_status(priv)) {
- pr_debug("\tdesc: %p [entry %d] buff=0x%x\n",
+ pr_info("\tdesc: %p [entry %d] buff=0x%x\n",
p, entry, des);
if (frame_len > ETH_FRAME_LEN)
pr_debug("\tframe size %d, COE: %d\n",
DMA_FROM_DEVICE);
}
- stmmac_get_rx_hwtstamp(priv, entry, skb);
-
if (netif_msg_pktdata(priv)) {
pr_debug("frame received (%dbytes)", frame_len);
print_pkt(skb->data, frame_len);
}
+ stmmac_get_rx_hwtstamp(priv, p, np, skb);
+
stmmac_rx_vlan(priv->dev, skb);
skb->protocol = eth_type_trans(skb, priv->dev);
spin_lock_irqsave(&priv->ptp_lock, flags);
- priv->hw->ptp->config_addend(priv->ioaddr, addend);
+ priv->hw->ptp->config_addend(priv->ptpaddr, addend);
spin_unlock_irqrestore(&priv->ptp_lock, flags);
spin_lock_irqsave(&priv->ptp_lock, flags);
- priv->hw->ptp->adjust_systime(priv->ioaddr, sec, nsec, neg_adj);
+ priv->hw->ptp->adjust_systime(priv->ptpaddr, sec, nsec, neg_adj,
+ priv->plat->has_gmac4);
spin_unlock_irqrestore(&priv->ptp_lock, flags);
spin_lock_irqsave(&priv->ptp_lock, flags);
- ns = priv->hw->ptp->get_systime(priv->ioaddr);
+ ns = priv->hw->ptp->get_systime(priv->ptpaddr);
spin_unlock_irqrestore(&priv->ptp_lock, flags);
spin_lock_irqsave(&priv->ptp_lock, flags);
- priv->hw->ptp->init_systime(priv->ioaddr, ts->tv_sec, ts->tv_nsec);
+ priv->hw->ptp->init_systime(priv->ptpaddr, ts->tv_sec, ts->tv_nsec);
spin_unlock_irqrestore(&priv->ptp_lock, flags);
Author: Rayagond Kokatanur <rayagond@vayavyalabs.com>
******************************************************************************/
-#ifndef __STMMAC_PTP_H__
-#define __STMMAC_PTP_H__
+#ifndef __STMMAC_PTP_H__
+#define __STMMAC_PTP_H__
-/* IEEE 1588 PTP register offsets */
-#define PTP_TCR 0x0700 /* Timestamp Control Reg */
-#define PTP_SSIR 0x0704 /* Sub-Second Increment Reg */
-#define PTP_STSR 0x0708 /* System Time – Seconds Regr */
-#define PTP_STNSR 0x070C /* System Time – Nanoseconds Reg */
-#define PTP_STSUR 0x0710 /* System Time – Seconds Update Reg */
-#define PTP_STNSUR 0x0714 /* System Time – Nanoseconds Update Reg */
-#define PTP_TAR 0x0718 /* Timestamp Addend Reg */
-#define PTP_TTSR 0x071C /* Target Time Seconds Reg */
-#define PTP_TTNSR 0x0720 /* Target Time Nanoseconds Reg */
-#define PTP_STHWSR 0x0724 /* System Time - Higher Word Seconds Reg */
-#define PTP_TSR 0x0728 /* Timestamp Status */
+#define PTP_GMAC4_OFFSET 0xb00
+#define PTP_GMAC3_X_OFFSET 0x700
-#define PTP_STNSUR_ADDSUB_SHIFT 31
+/* IEEE 1588 PTP register offsets */
+#define PTP_TCR 0x00 /* Timestamp Control Reg */
+#define PTP_SSIR 0x04 /* Sub-Second Increment Reg */
+#define PTP_STSR 0x08 /* System Time – Seconds Regr */
+#define PTP_STNSR 0x0c /* System Time – Nanoseconds Reg */
+#define PTP_STSUR 0x10 /* System Time – Seconds Update Reg */
+#define PTP_STNSUR 0x14 /* System Time – Nanoseconds Update Reg */
+#define PTP_TAR 0x18 /* Timestamp Addend Reg */
-/* PTP TCR defines */
-#define PTP_TCR_TSENA 0x00000001 /* Timestamp Enable */
-#define PTP_TCR_TSCFUPDT 0x00000002 /* Timestamp Fine/Coarse Update */
-#define PTP_TCR_TSINIT 0x00000004 /* Timestamp Initialize */
-#define PTP_TCR_TSUPDT 0x00000008 /* Timestamp Update */
-/* Timestamp Interrupt Trigger Enable */
-#define PTP_TCR_TSTRIG 0x00000010
-#define PTP_TCR_TSADDREG 0x00000020 /* Addend Reg Update */
-#define PTP_TCR_TSENALL 0x00000100 /* Enable Timestamp for All Frames */
-/* Timestamp Digital or Binary Rollover Control */
-#define PTP_TCR_TSCTRLSSR 0x00000200
+#define PTP_STNSUR_ADDSUB_SHIFT 31
+#define PTP_DIGITAL_ROLLOVER_MODE 0x3B9ACA00 /* 10e9-1 ns */
+#define PTP_BINARY_ROLLOVER_MODE 0x80000000 /* ~0.466 ns */
+/* PTP Timestamp control register defines */
+#define PTP_TCR_TSENA BIT(0) /* Timestamp Enable */
+#define PTP_TCR_TSCFUPDT BIT(1) /* Timestamp Fine/Coarse Update */
+#define PTP_TCR_TSINIT BIT(2) /* Timestamp Initialize */
+#define PTP_TCR_TSUPDT BIT(3) /* Timestamp Update */
+#define PTP_TCR_TSTRIG BIT(4) /* Timestamp Interrupt Trigger Enable */
+#define PTP_TCR_TSADDREG BIT(5) /* Addend Reg Update */
+#define PTP_TCR_TSENALL BIT(8) /* Enable Timestamp for All Frames */
+#define PTP_TCR_TSCTRLSSR BIT(9) /* Digital or Binary Rollover Control */
/* Enable PTP packet Processing for Version 2 Format */
-#define PTP_TCR_TSVER2ENA 0x00000400
+#define PTP_TCR_TSVER2ENA BIT(10)
/* Enable Processing of PTP over Ethernet Frames */
-#define PTP_TCR_TSIPENA 0x00000800
+#define PTP_TCR_TSIPENA BIT(11)
/* Enable Processing of PTP Frames Sent over IPv6-UDP */
-#define PTP_TCR_TSIPV6ENA 0x00001000
+#define PTP_TCR_TSIPV6ENA BIT(12)
/* Enable Processing of PTP Frames Sent over IPv4-UDP */
-#define PTP_TCR_TSIPV4ENA 0x00002000
+#define PTP_TCR_TSIPV4ENA BIT(13)
/* Enable Timestamp Snapshot for Event Messages */
-#define PTP_TCR_TSEVNTENA 0x00004000
+#define PTP_TCR_TSEVNTENA BIT(14)
/* Enable Snapshot for Messages Relevant to Master */
-#define PTP_TCR_TSMSTRENA 0x00008000
+#define PTP_TCR_TSMSTRENA BIT(15)
/* Select PTP packets for Taking Snapshots */
-#define PTP_TCR_SNAPTYPSEL_1 0x00010000
+#define PTP_TCR_SNAPTYPSEL_1 GENMASK(17, 16)
/* Enable MAC address for PTP Frame Filtering */
-#define PTP_TCR_TSENMACADDR 0x00040000
+#define PTP_TCR_TSENMACADDR BIT(18)
+
+/* SSIR defines */
+#define PTP_SSIR_SSINC_MASK 0xff
+#define GMAC4_PTP_SSIR_SSINC_SHIFT 16
-#endif /* __STMMAC_PTP_H__ */
+#endif /* __STMMAC_PTP_H__ */
void __iomem *gregs = bp->gregs;
void __iomem *cregs = bp->creg;
void __iomem *bregs = bp->bregs;
+ __u32 bblk_dvma = (__u32)bp->bblock_dvma;
unsigned char *e = &bp->dev->dev_addr[0];
/* Latch current counters into statistics. */
bregs + BMAC_XIFCFG);
/* Tell the QEC where the ring descriptors are. */
- sbus_writel(bp->bblock_dvma + bib_offset(be_rxd, 0),
+ sbus_writel(bblk_dvma + bib_offset(be_rxd, 0),
cregs + CREG_RXDS);
- sbus_writel(bp->bblock_dvma + bib_offset(be_txd, 0),
+ sbus_writel(bblk_dvma + bib_offset(be_txd, 0),
cregs + CREG_TXDS);
/* Setup the FIFO pointers into QEC local memory. */
void __iomem *bregs; /* BigMAC Registers */
void __iomem *tregs; /* BigMAC Transceiver */
struct bmac_init_block *bmac_block; /* RX and TX descriptors */
- __u32 bblock_dvma; /* RX and TX descriptors */
+ dma_addr_t bblock_dvma; /* RX and TX descriptors */
spinlock_t lock;
{
struct qe_init_block *qb = qep->qe_block;
struct sunqe_buffers *qbufs = qep->buffers;
- __u32 qbufs_dvma = qep->buffers_dvma;
+ __u32 qbufs_dvma = (__u32)qep->buffers_dvma;
int i;
qep->rx_new = qep->rx_old = qep->tx_new = qep->tx_old = 0;
void __iomem *mregs = qep->mregs;
void __iomem *gregs = qecp->gregs;
unsigned char *e = &qep->dev->dev_addr[0];
+ __u32 qblk_dvma = (__u32)qep->qblock_dvma;
u32 tmp;
int i;
return -EAGAIN;
/* Setup initial rx/tx init block pointers. */
- sbus_writel(qep->qblock_dvma + qib_offset(qe_rxd, 0), cregs + CREG_RXDS);
- sbus_writel(qep->qblock_dvma + qib_offset(qe_txd, 0), cregs + CREG_TXDS);
+ sbus_writel(qblk_dvma + qib_offset(qe_rxd, 0), cregs + CREG_RXDS);
+ sbus_writel(qblk_dvma + qib_offset(qe_txd, 0), cregs + CREG_TXDS);
/* Enable/mask the various irq's. */
sbus_writel(0, cregs + CREG_RIMASK);
struct net_device *dev = qep->dev;
struct qe_rxd *this;
struct sunqe_buffers *qbufs = qep->buffers;
- __u32 qbufs_dvma = qep->buffers_dvma;
+ __u32 qbufs_dvma = (__u32)qep->buffers_dvma;
int elem = qep->rx_new;
u32 flags;
{
struct sunqe *qep = netdev_priv(dev);
struct sunqe_buffers *qbufs = qep->buffers;
- __u32 txbuf_dvma, qbufs_dvma = qep->buffers_dvma;
+ __u32 txbuf_dvma, qbufs_dvma = (__u32)qep->buffers_dvma;
unsigned char *txbuf;
int len, entry;
void __iomem *qcregs; /* QEC per-channel Registers */
void __iomem *mregs; /* Per-channel MACE Registers */
struct qe_init_block *qe_block; /* RX and TX descriptors */
- __u32 qblock_dvma; /* RX and TX descriptors */
+ dma_addr_t qblock_dvma; /* RX and TX descriptors */
spinlock_t lock; /* Protects txfull state */
int rx_new, rx_old; /* RX ring extents */
int tx_new, tx_old; /* TX ring extents */
struct sunqe_buffers *buffers; /* CPU visible address. */
- __u32 buffers_dvma; /* DVMA visible address. */
+ dma_addr_t buffers_dvma; /* DVMA visible address. */
struct sunqec *parent;
u8 mconfig; /* Base MACE mconfig value */
struct platform_device *op; /* QE's OF device struct */
* to the PHY is the Ethernet MAC DT node.
*/
ret = of_phy_register_fixed_link(slave_node);
- if (ret)
+ if (ret) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to register fixed-link phy: %d\n", ret);
return ret;
+ }
slave_data->phy_node = of_node_get(slave_node);
} else if (parp) {
u32 phyid;
}
snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
PHY_ID_FMT, mdio->name, phyid);
+ put_device(&mdio->dev);
} else {
dev_err(&pdev->dev,
"No slave[%d] phy_id, phy-handle, or fixed-link property\n",
return 0;
}
+static void cpsw_remove_dt(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+ struct cpsw_platform_data *data = &cpsw->data;
+ struct device_node *node = pdev->dev.of_node;
+ struct device_node *slave_node;
+ int i = 0;
+
+ for_each_available_child_of_node(node, slave_node) {
+ struct cpsw_slave_data *slave_data = &data->slave_data[i];
+
+ if (strcmp(slave_node->name, "slave"))
+ continue;
+
+ if (of_phy_is_fixed_link(slave_node)) {
+ struct phy_device *phydev;
+
+ phydev = of_phy_find_device(slave_node);
+ if (phydev) {
+ fixed_phy_unregister(phydev);
+ /* Put references taken by
+ * of_phy_find_device() and
+ * of_phy_register_fixed_link().
+ */
+ phy_device_free(phydev);
+ phy_device_free(phydev);
+ }
+ }
+
+ of_node_put(slave_data->phy_node);
+
+ i++;
+ if (i == data->slaves)
+ break;
+ }
+
+ of_platform_depopulate(&pdev->dev);
+}
+
static int cpsw_probe_dual_emac(struct cpsw_priv *priv)
{
struct cpsw_common *cpsw = priv->cpsw;
int irq;
cpsw = devm_kzalloc(&pdev->dev, sizeof(struct cpsw_common), GFP_KERNEL);
+ if (!cpsw)
+ return -ENOMEM;
+
cpsw->dev = &pdev->dev;
ndev = alloc_etherdev_mq(sizeof(struct cpsw_priv), CPSW_MAX_QUEUES);
/* Select default pin state */
pinctrl_pm_select_default_state(&pdev->dev);
- if (cpsw_probe_dt(&cpsw->data, pdev)) {
- dev_err(&pdev->dev, "cpsw: platform data missing\n");
- ret = -ENODEV;
+ /* Need to enable clocks with runtime PM api to access module
+ * registers
+ */
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&pdev->dev);
goto clean_runtime_disable_ret;
}
+
+ ret = cpsw_probe_dt(&cpsw->data, pdev);
+ if (ret)
+ goto clean_dt_ret;
+
data = &cpsw->data;
cpsw->rx_ch_num = 1;
cpsw->tx_ch_num = 1;
GFP_KERNEL);
if (!cpsw->slaves) {
ret = -ENOMEM;
- goto clean_runtime_disable_ret;
+ goto clean_dt_ret;
}
for (i = 0; i < data->slaves; i++)
cpsw->slaves[i].slave_num = i;
if (IS_ERR(clk)) {
dev_err(priv->dev, "fck is not found\n");
ret = -ENODEV;
- goto clean_runtime_disable_ret;
+ goto clean_dt_ret;
}
cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000;
ss_regs = devm_ioremap_resource(&pdev->dev, ss_res);
if (IS_ERR(ss_regs)) {
ret = PTR_ERR(ss_regs);
- goto clean_runtime_disable_ret;
+ goto clean_dt_ret;
}
cpsw->regs = ss_regs;
- /* Need to enable clocks with runtime PM api to access module
- * registers
- */
- ret = pm_runtime_get_sync(&pdev->dev);
- if (ret < 0) {
- pm_runtime_put_noidle(&pdev->dev);
- goto clean_runtime_disable_ret;
- }
cpsw->version = readl(&cpsw->regs->id_ver);
- pm_runtime_put_sync(&pdev->dev);
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
cpsw->wr_regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(cpsw->wr_regs)) {
ret = PTR_ERR(cpsw->wr_regs);
- goto clean_runtime_disable_ret;
+ goto clean_dt_ret;
}
memset(&dma_params, 0, sizeof(dma_params));
default:
dev_err(priv->dev, "unknown version 0x%08x\n", cpsw->version);
ret = -ENODEV;
- goto clean_runtime_disable_ret;
+ goto clean_dt_ret;
}
for (i = 0; i < cpsw->data.slaves; i++) {
struct cpsw_slave *slave = &cpsw->slaves[i];
if (!cpsw->dma) {
dev_err(priv->dev, "error initializing dma\n");
ret = -ENOMEM;
- goto clean_runtime_disable_ret;
+ goto clean_dt_ret;
}
cpsw->txch[0] = cpdma_chan_create(cpsw->dma, 0, cpsw_tx_handler, 0);
ret = cpsw_probe_dual_emac(priv);
if (ret) {
cpsw_err(priv, probe, "error probe slave 2 emac interface\n");
- goto clean_ale_ret;
+ goto clean_unregister_netdev_ret;
}
}
+ pm_runtime_put(&pdev->dev);
+
return 0;
+clean_unregister_netdev_ret:
+ unregister_netdev(ndev);
clean_ale_ret:
cpsw_ale_destroy(cpsw->ale);
clean_dma_ret:
cpdma_ctlr_destroy(cpsw->dma);
+clean_dt_ret:
+ cpsw_remove_dt(pdev);
+ pm_runtime_put_sync(&pdev->dev);
clean_runtime_disable_ret:
pm_runtime_disable(&pdev->dev);
clean_ndev_ret:
cpsw_ale_destroy(cpsw->ale);
cpdma_ctlr_destroy(cpsw->dma);
- of_platform_depopulate(&pdev->dev);
+ cpsw_remove_dt(pdev);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
if (cpsw->data.dual_emac)
void fixed_phy_unregister(struct phy_device *phy)
{
phy_device_remove(phy);
-
+ of_node_put(phy->mdio.dev.of_node);
fixed_phy_del(phy->mdio.addr);
}
EXPORT_SYMBOL_GPL(fixed_phy_unregister);
/* Vitesse Extended Page Access Register */
#define MII_VSC82X4_EXT_PAGE_ACCESS 0x1f
+/* Vitesse VSC8601 Extended PHY Control Register 1 */
+#define MII_VSC8601_EPHY_CTL 0x17
+#define MII_VSC8601_EPHY_CTL_RGMII_SKEW (1 << 8)
+
#define PHY_ID_VSC8234 0x000fc620
#define PHY_ID_VSC8244 0x000fc6c0
#define PHY_ID_VSC8514 0x00070670
return err;
}
+/* This adds a skew for both TX and RX clocks, so the skew should only be
+ * applied to "rgmii-id" interfaces. It may not work as expected
+ * on "rgmii-txid", "rgmii-rxid" or "rgmii" interfaces. */
+static int vsc8601_add_skew(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_read(phydev, MII_VSC8601_EPHY_CTL);
+ if (ret < 0)
+ return ret;
+
+ ret |= MII_VSC8601_EPHY_CTL_RGMII_SKEW;
+ return phy_write(phydev, MII_VSC8601_EPHY_CTL, ret);
+}
+
+static int vsc8601_config_init(struct phy_device *phydev)
+{
+ int ret = 0;
+
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
+ ret = vsc8601_add_skew(phydev);
+
+ if (ret < 0)
+ return ret;
+
+ return genphy_config_init(phydev);
+}
+
static int vsc824x_ack_interrupt(struct phy_device *phydev)
{
int err = 0;
.phy_id_mask = 0x000ffff0,
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
- .config_init = &genphy_config_init,
+ .config_init = &vsc8601_config_init,
.config_aneg = &genphy_config_aneg,
.read_status = &genphy_read_status,
.ack_interrupt = &vsc824x_ack_interrupt,
netif_napi_del(&vi->rq[i].napi);
}
+ /* We called napi_hash_del() before netif_napi_del(),
+ * we need to respect an RCU grace period before freeing vi->rq
+ */
+ synchronize_net();
+
kfree(vi->rq);
kfree(vi->sq);
}
data->bcn_delta = do_div(delta, bcn_int);
} else {
data->tsf_offset -= delta;
- data->bcn_delta = -do_div(delta, bcn_int);
+ data->bcn_delta = -(s64)do_div(delta, bcn_int);
}
}
mdiodev = to_mdio_device(d);
if (mdiodev->flags & MDIO_DEVICE_FLAG_PHY)
return to_phy_device(d);
+ put_device(d);
}
return NULL;
status.link = 1;
status.duplex = of_property_read_bool(fixed_link_node,
"full-duplex");
- if (of_property_read_u32(fixed_link_node, "speed", &status.speed))
+ if (of_property_read_u32(fixed_link_node, "speed",
+ &status.speed)) {
+ of_node_put(fixed_link_node);
return -EINVAL;
+ }
status.pause = of_property_read_bool(fixed_link_node, "pause");
status.asym_pause = of_property_read_bool(fixed_link_node,
"asym-pause");
struct twl4030_usb *twl = phy_get_drvdata(phy);
dev_dbg(twl->dev, "%s\n", __func__);
- pm_runtime_mark_last_busy(twl->dev);
- pm_runtime_put_autosuspend(twl->dev);
return 0;
}
dev_dbg(twl->dev, "%s\n", __func__);
pm_runtime_get_sync(twl->dev);
schedule_delayed_work(&twl->id_workaround_work, HZ);
+ pm_runtime_mark_last_busy(twl->dev);
+ pm_runtime_put_autosuspend(twl->dev);
return 0;
}
SAM_STAT_CHECK_CONDITION;
}
-
+static inline bool ata_12_16_cmd(struct scsi_cmnd *scmd)
+{
+ return (scmd->cmnd[0] == ATA_12 || scmd->cmnd[0] == ATA_16);
+}
/**
* scsih_qcmd - main scsi request entry point
if (ioc->logging_level & MPT_DEBUG_SCSI)
scsi_print_command(scmd);
+ /*
+ * Lock the device for any subsequent command until command is
+ * done.
+ */
+ if (ata_12_16_cmd(scmd))
+ scsi_internal_device_block(scmd->device);
+
sas_device_priv_data = scmd->device->hostdata;
if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
scmd->result = DID_NO_CONNECT << 16;
if (scmd == NULL)
return 1;
+ if (ata_12_16_cmd(scmd))
+ scsi_internal_device_unblock(scmd->device, SDEV_RUNNING);
+
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
if (mpi_reply == NULL) {
for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) {
sp = req->outstanding_cmds[cnt];
if (sp) {
- /* Get a reference to the sp and drop the lock.
- * The reference ensures this sp->done() call
- * - and not the call in qla2xxx_eh_abort() -
- * ends the SCSI command (with result 'res').
+ /* Don't abort commands in adapter during EEH
+ * recovery as it's not accessible/responding.
*/
- sp_get(sp);
- spin_unlock_irqrestore(&ha->hardware_lock, flags);
- qla2xxx_eh_abort(GET_CMD_SP(sp));
- spin_lock_irqsave(&ha->hardware_lock, flags);
+ if (!ha->flags.eeh_busy) {
+ /* Get a reference to the sp and drop the lock.
+ * The reference ensures this sp->done() call
+ * - and not the call in qla2xxx_eh_abort() -
+ * ends the SCSI command (with result 'res').
+ */
+ sp_get(sp);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ qla2xxx_eh_abort(GET_CMD_SP(sp));
+ spin_lock_irqsave(&ha->hardware_lock, flags);
+ }
req->outstanding_cmds[cnt] = NULL;
sp->done(vha, sp, res);
}
.set_cur_state = powerclamp_set_cur_state,
};
+static const struct x86_cpu_id __initconst intel_powerclamp_ids[] = {
+ { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_MWAIT },
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids);
+
static int __init powerclamp_probe(void)
{
- if (!boot_cpu_has(X86_FEATURE_MWAIT)) {
+
+ if (!x86_match_cpu(intel_powerclamp_ids)) {
pr_err("CPU does not support MWAIT");
return -ENODEV;
}
if (!ci)
return -ENOMEM;
+ spin_lock_init(&ci->lock);
ci->dev = dev;
ci->platdata = dev_get_platdata(dev);
ci->imx28_write_fix = !!(ci->platdata->flags &
struct usb_otg_caps *otg_caps = &ci->platdata->ci_otg_caps;
int retval = 0;
- spin_lock_init(&ci->lock);
-
ci->gadget.ops = &usb_gadget_ops;
ci->gadget.speed = USB_SPEED_UNKNOWN;
ci->gadget.max_speed = USB_SPEED_HIGH;
switch (creq->bRequestType & USB_RECIP_MASK) {
case USB_RECIP_INTERFACE:
- return ffs_func_revmap_intf(func,
- le16_to_cpu(creq->wIndex) >= 0);
+ return (ffs_func_revmap_intf(func,
+ le16_to_cpu(creq->wIndex)) >= 0);
case USB_RECIP_ENDPOINT:
- return ffs_func_revmap_ep(func,
- le16_to_cpu(creq->wIndex) >= 0);
+ return (ffs_func_revmap_ep(func,
+ le16_to_cpu(creq->wIndex)) >= 0);
default:
return (bool) (func->ffs->user_flags &
FUNCTIONFS_ALL_CTRL_RECIP);
}
#endif
- schedule_work(&musb->irq_work);
+ schedule_delayed_work(&musb->irq_work, 0);
return handled;
}
MUSB_DEVCTL_HR;
switch (devctl & ~s) {
case MUSB_QUIRK_B_INVALID_VBUS_91:
- if (!musb->session && !musb->quirk_invalid_vbus) {
- musb->quirk_invalid_vbus = true;
+ if (musb->quirk_retries--) {
musb_dbg(musb,
- "First invalid vbus, assume no session");
+ "Poll devctl on invalid vbus, assume no session");
+ schedule_delayed_work(&musb->irq_work,
+ msecs_to_jiffies(1000));
+
return;
}
- break;
case MUSB_QUIRK_A_DISCONNECT_19:
+ if (musb->quirk_retries--) {
+ musb_dbg(musb,
+ "Poll devctl on possible host mode disconnect");
+ schedule_delayed_work(&musb->irq_work,
+ msecs_to_jiffies(1000));
+
+ return;
+ }
if (!musb->session)
break;
musb_dbg(musb, "Allow PM on possible host mode disconnect");
if (error < 0)
dev_err(musb->controller, "Could not enable: %i\n",
error);
+ musb->quirk_retries = 3;
} else {
musb_dbg(musb, "Allow PM with no session: %02x", devctl);
- musb->quirk_invalid_vbus = false;
pm_runtime_mark_last_busy(musb->controller);
pm_runtime_put_autosuspend(musb->controller);
}
/* Only used to provide driver mode change events */
static void musb_irq_work(struct work_struct *data)
{
- struct musb *musb = container_of(data, struct musb, irq_work);
+ struct musb *musb = container_of(data, struct musb, irq_work.work);
musb_pm_runtime_check_session(musb);
INIT_LIST_HEAD(&musb->control);
INIT_LIST_HEAD(&musb->in_bulk);
INIT_LIST_HEAD(&musb->out_bulk);
+ INIT_LIST_HEAD(&musb->pending_list);
musb->vbuserr_retry = VBUSERR_RETRY_COUNT;
musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON;
musb_host_free(musb);
}
+struct musb_pending_work {
+ int (*callback)(struct musb *musb, void *data);
+ void *data;
+ struct list_head node;
+};
+
+/*
+ * Called from musb_runtime_resume(), musb_resume(), and
+ * musb_queue_resume_work(). Callers must take musb->lock.
+ */
+static int musb_run_resume_work(struct musb *musb)
+{
+ struct musb_pending_work *w, *_w;
+ unsigned long flags;
+ int error = 0;
+
+ spin_lock_irqsave(&musb->list_lock, flags);
+ list_for_each_entry_safe(w, _w, &musb->pending_list, node) {
+ if (w->callback) {
+ error = w->callback(musb, w->data);
+ if (error < 0) {
+ dev_err(musb->controller,
+ "resume callback %p failed: %i\n",
+ w->callback, error);
+ }
+ }
+ list_del(&w->node);
+ devm_kfree(musb->controller, w);
+ }
+ spin_unlock_irqrestore(&musb->list_lock, flags);
+
+ return error;
+}
+
+/*
+ * Called to run work if device is active or else queue the work to happen
+ * on resume. Caller must take musb->lock and must hold an RPM reference.
+ *
+ * Note that we cowardly refuse queuing work after musb PM runtime
+ * resume is done calling musb_run_resume_work() and return -EINPROGRESS
+ * instead.
+ */
+int musb_queue_resume_work(struct musb *musb,
+ int (*callback)(struct musb *musb, void *data),
+ void *data)
+{
+ struct musb_pending_work *w;
+ unsigned long flags;
+ int error;
+
+ if (WARN_ON(!callback))
+ return -EINVAL;
+
+ if (pm_runtime_active(musb->controller))
+ return callback(musb, data);
+
+ w = devm_kzalloc(musb->controller, sizeof(*w), GFP_ATOMIC);
+ if (!w)
+ return -ENOMEM;
+
+ w->callback = callback;
+ w->data = data;
+ spin_lock_irqsave(&musb->list_lock, flags);
+ if (musb->is_runtime_suspended) {
+ list_add_tail(&w->node, &musb->pending_list);
+ error = 0;
+ } else {
+ dev_err(musb->controller, "could not add resume work %p\n",
+ callback);
+ devm_kfree(musb->controller, w);
+ error = -EINPROGRESS;
+ }
+ spin_unlock_irqrestore(&musb->list_lock, flags);
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(musb_queue_resume_work);
+
static void musb_deassert_reset(struct work_struct *work)
{
struct musb *musb;
}
spin_lock_init(&musb->lock);
+ spin_lock_init(&musb->list_lock);
musb->board_set_power = plat->set_power;
musb->min_power = plat->min_power;
musb->ops = plat->platform_ops;
musb_generic_disable(musb);
/* Init IRQ workqueue before request_irq */
- INIT_WORK(&musb->irq_work, musb_irq_work);
+ INIT_DELAYED_WORK(&musb->irq_work, musb_irq_work);
INIT_DELAYED_WORK(&musb->deassert_reset_work, musb_deassert_reset);
INIT_DELAYED_WORK(&musb->finish_resume_work, musb_host_finish_resume);
if (status)
goto fail5;
+ musb->is_initialized = 1;
pm_runtime_mark_last_busy(musb->controller);
pm_runtime_put_autosuspend(musb->controller);
musb_host_cleanup(musb);
fail3:
- cancel_work_sync(&musb->irq_work);
+ cancel_delayed_work_sync(&musb->irq_work);
cancel_delayed_work_sync(&musb->finish_resume_work);
cancel_delayed_work_sync(&musb->deassert_reset_work);
if (musb->dma_controller)
*/
musb_exit_debugfs(musb);
- cancel_work_sync(&musb->irq_work);
+ cancel_delayed_work_sync(&musb->irq_work);
cancel_delayed_work_sync(&musb->finish_resume_work);
cancel_delayed_work_sync(&musb->deassert_reset_work);
pm_runtime_get_sync(musb->controller);
musb_platform_disable(musb);
musb_generic_disable(musb);
+ WARN_ON(!list_empty(&musb->pending_list));
spin_lock_irqsave(&musb->lock, flags);
static int musb_resume(struct device *dev)
{
- struct musb *musb = dev_to_musb(dev);
- u8 devctl;
- u8 mask;
+ struct musb *musb = dev_to_musb(dev);
+ unsigned long flags;
+ int error;
+ u8 devctl;
+ u8 mask;
/*
* For static cmos like DaVinci, register values were preserved
musb_start(musb);
+ spin_lock_irqsave(&musb->lock, flags);
+ error = musb_run_resume_work(musb);
+ if (error)
+ dev_err(musb->controller, "resume work failed with %i\n",
+ error);
+ spin_unlock_irqrestore(&musb->lock, flags);
+
return 0;
}
struct musb *musb = dev_to_musb(dev);
musb_save_context(musb);
+ musb->is_runtime_suspended = 1;
return 0;
}
static int musb_runtime_resume(struct device *dev)
{
- struct musb *musb = dev_to_musb(dev);
- static int first = 1;
+ struct musb *musb = dev_to_musb(dev);
+ unsigned long flags;
+ int error;
/*
* When pm_runtime_get_sync called for the first time in driver
* Also context restore without save does not make
* any sense
*/
- if (!first)
- musb_restore_context(musb);
- first = 0;
+ if (!musb->is_initialized)
+ return 0;
+
+ musb_restore_context(musb);
if (musb->need_finish_resume) {
musb->need_finish_resume = 0;
msecs_to_jiffies(USB_RESUME_TIMEOUT));
}
+ spin_lock_irqsave(&musb->lock, flags);
+ error = musb_run_resume_work(musb);
+ if (error)
+ dev_err(musb->controller, "resume work failed with %i\n",
+ error);
+ musb->is_runtime_suspended = 0;
+ spin_unlock_irqrestore(&musb->lock, flags);
+
return 0;
}
struct musb {
/* device lock */
spinlock_t lock;
+ spinlock_t list_lock; /* resume work list lock */
struct musb_io io;
const struct musb_platform_ops *ops;
struct musb_context_registers context;
irqreturn_t (*isr)(int, void *);
- struct work_struct irq_work;
+ struct delayed_work irq_work;
struct delayed_work deassert_reset_work;
struct delayed_work finish_resume_work;
struct delayed_work gadget_work;
struct list_head control; /* of musb_qh */
struct list_head in_bulk; /* of musb_qh */
struct list_head out_bulk; /* of musb_qh */
+ struct list_head pending_list; /* pending work list */
struct timer_list otg_timer;
struct notifier_block nb;
int port_mode; /* MUSB_PORT_MODE_* */
bool session;
- bool quirk_invalid_vbus;
+ unsigned long quirk_retries;
bool is_host;
int a_wait_bcon; /* VBUS timeout in msecs */
unsigned long idle_timeout; /* Next timeout in jiffies */
+ unsigned is_initialized:1;
+ unsigned is_runtime_suspended:1;
+
/* active means connected and not suspended */
unsigned is_active:1;
extern void musb_hnp_stop(struct musb *musb);
+int musb_queue_resume_work(struct musb *musb,
+ int (*callback)(struct musb *musb, void *data),
+ void *data);
+
static inline void musb_platform_set_vbus(struct musb *musb, int is_on)
{
if (musb->ops->set_vbus)
musb_writel(reg_base, wrp->coreintr_clear, wrp->usb_bitmap);
musb_writel(reg_base, wrp->epintr_clear,
wrp->txep_bitmap | wrp->rxep_bitmap);
+ del_timer_sync(&glue->timer);
musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
}
-static void otg_timer(unsigned long _musb)
+/* Caller must take musb->lock */
+static int dsps_check_status(struct musb *musb, void *unused)
{
- struct musb *musb = (void *)_musb;
void __iomem *mregs = musb->mregs;
struct device *dev = musb->controller;
struct dsps_glue *glue = dev_get_drvdata(dev->parent);
const struct dsps_musb_wrapper *wrp = glue->wrp;
u8 devctl;
- unsigned long flags;
int skip_session = 0;
- int err;
-
- err = pm_runtime_get_sync(dev);
- if (err < 0)
- dev_err(dev, "Poll could not pm_runtime_get: %i\n", err);
/*
* We poll because DSPS IP's won't expose several OTG-critical
dev_dbg(musb->controller, "Poll devctl %02x (%s)\n", devctl,
usb_otg_state_string(musb->xceiv->otg->state));
- spin_lock_irqsave(&musb->lock, flags);
switch (musb->xceiv->otg->state) {
case OTG_STATE_A_WAIT_VRISE:
mod_timer(&glue->timer, jiffies +
default:
break;
}
- spin_unlock_irqrestore(&musb->lock, flags);
+ return 0;
+}
+
+static void otg_timer(unsigned long _musb)
+{
+ struct musb *musb = (void *)_musb;
+ struct device *dev = musb->controller;
+ unsigned long flags;
+ int err;
+
+ err = pm_runtime_get(dev);
+ if ((err != -EINPROGRESS) && err < 0) {
+ dev_err(dev, "Poll could not pm_runtime_get: %i\n", err);
+ pm_runtime_put_noidle(dev);
+
+ return;
+ }
+
+ spin_lock_irqsave(&musb->lock, flags);
+ err = musb_queue_resume_work(musb, dsps_check_status, NULL);
+ if (err < 0)
+ dev_err(dev, "%s resume work: %i\n", __func__, err);
+ spin_unlock_irqrestore(&musb->lock, flags);
pm_runtime_mark_last_busy(dev);
pm_runtime_put_autosuspend(dev);
}
platform_set_drvdata(pdev, glue);
pm_runtime_enable(&pdev->dev);
- pm_runtime_use_autosuspend(&pdev->dev);
- pm_runtime_set_autosuspend_delay(&pdev->dev, 200);
-
- ret = pm_runtime_get_sync(&pdev->dev);
- if (ret < 0) {
- dev_err(&pdev->dev, "pm_runtime_get_sync FAILED");
- goto err2;
- }
-
ret = dsps_create_musb_pdev(glue, pdev);
if (ret)
- goto err3;
-
- pm_runtime_mark_last_busy(&pdev->dev);
- pm_runtime_put_autosuspend(&pdev->dev);
+ goto err;
return 0;
-err3:
- pm_runtime_put_sync(&pdev->dev);
-err2:
- pm_runtime_dont_use_autosuspend(&pdev->dev);
+err:
pm_runtime_disable(&pdev->dev);
return ret;
}
platform_device_unregister(glue->musb);
- /* disable usbss clocks */
- pm_runtime_dont_use_autosuspend(&pdev->dev);
- pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return 0;
musb_ep->dma ? "dma, " : "",
musb_ep->packet_sz);
- schedule_work(&musb->irq_work);
+ schedule_delayed_work(&musb->irq_work, 0);
fail:
spin_unlock_irqrestore(&musb->lock, flags);
musb_ep->desc = NULL;
musb_ep->end_point.desc = NULL;
- schedule_work(&musb->irq_work);
+ schedule_delayed_work(&musb->irq_work, 0);
spin_unlock_irqrestore(&(musb->lock), flags);
rxstate(musb, req);
}
+static int musb_ep_restart_resume_work(struct musb *musb, void *data)
+{
+ struct musb_request *req = data;
+
+ musb_ep_restart(musb, req);
+
+ return 0;
+}
+
static int musb_gadget_queue(struct usb_ep *ep, struct usb_request *req,
gfp_t gfp_flags)
{
struct musb_ep *musb_ep;
struct musb_request *request;
struct musb *musb;
- int status = 0;
+ int status;
unsigned long lockflags;
if (!ep || !req)
if (request->ep != musb_ep)
return -EINVAL;
+ status = pm_runtime_get(musb->controller);
+ if ((status != -EINPROGRESS) && status < 0) {
+ dev_err(musb->controller,
+ "pm runtime get failed in %s\n",
+ __func__);
+ pm_runtime_put_noidle(musb->controller);
+
+ return status;
+ }
+ status = 0;
+
trace_musb_req_enq(request);
/* request is mine now... */
map_dma_buffer(request, musb, musb_ep);
- pm_runtime_get_sync(musb->controller);
spin_lock_irqsave(&musb->lock, lockflags);
/* don't queue if the ep is down */
list_add_tail(&request->list, &musb_ep->req_list);
/* it this is the head of the queue, start i/o ... */
- if (!musb_ep->busy && &request->list == musb_ep->req_list.next)
- musb_ep_restart(musb, request);
+ if (!musb_ep->busy && &request->list == musb_ep->req_list.next) {
+ status = musb_queue_resume_work(musb,
+ musb_ep_restart_resume_work,
+ request);
+ if (status < 0)
+ dev_err(musb->controller, "%s resume work: %i\n",
+ __func__, status);
+ }
unlock:
spin_unlock_irqrestore(&musb->lock, lockflags);
*/
/* Force check of devctl register for PM runtime */
- schedule_work(&musb->irq_work);
+ schedule_delayed_work(&musb->irq_work, 0);
pm_runtime_mark_last_busy(musb->controller);
pm_runtime_put_autosuspend(musb->controller);
}
pm_runtime_enable(glue->dev);
- pm_runtime_use_autosuspend(glue->dev);
- pm_runtime_set_autosuspend_delay(glue->dev, 100);
ret = platform_device_add(musb);
if (ret) {
dev_err(&pdev->dev, "failed to register musb device\n");
- goto err2;
+ goto err3;
}
return 0;
+err3:
+ pm_runtime_disable(glue->dev);
+
err2:
platform_device_put(musb);
{
struct omap2430_glue *glue = platform_get_drvdata(pdev);
- pm_runtime_get_sync(glue->dev);
platform_device_unregister(glue->musb);
- pm_runtime_put_sync(glue->dev);
- pm_runtime_dont_use_autosuspend(glue->dev);
pm_runtime_disable(glue->dev);
return 0;
dev_dbg(musb->controller, "vbus change, %s, otg %03x\n",
usb_otg_state_string(musb->xceiv->otg->state), otg_stat);
idle_timeout = jiffies + (1 * HZ);
- schedule_work(&musb->irq_work);
+ schedule_delayed_work(&musb->irq_work, 0);
} else /* A-dev state machine */ {
dev_dbg(musb->controller, "vbus change, %s, otg %03x\n",
break;
}
}
- schedule_work(&musb->irq_work);
+ schedule_delayed_work(&musb->irq_work, 0);
return idle_timeout;
}
musb_writel(tbase, TUSB_PRCM_WAKEUP_CLEAR, reg);
if (reg & ~TUSB_PRCM_WNORCS) {
musb->is_active = 1;
- schedule_work(&musb->irq_work);
+ schedule_delayed_work(&musb->irq_work, 0);
}
dev_dbg(musb->controller, "wake %sactive %02x\n",
musb->is_active ? "" : "in", reg);
{ USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
{ USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
{ USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
+ { USB_DEVICE(0x10C4, 0x8962) }, /* Brim Brothers charging dock */
{ USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
{ USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
{ USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7561U_PID) },
{ USB_DEVICE(ICPDAS_VID, ICPDAS_I7563U_PID) },
{ USB_DEVICE(WICED_VID, WICED_USB20706V2_PID) },
+ { USB_DEVICE(TI_VID, TI_CC3200_LAUNCHPAD_PID),
+ .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ } /* Terminating entry */
};
#define ATMEL_VID 0x03eb /* Vendor ID */
#define STK541_PID 0x2109 /* Zigbee Controller */
+/*
+ * Texas Instruments
+ */
+#define TI_VID 0x0451
+#define TI_CC3200_LAUNCHPAD_PID 0xC32A /* SimpleLink Wi-Fi CC3200 LaunchPad */
+
/*
* Blackfin gnICE JTAG
* http://docs.blackfin.uclinux.org/doku.php?id=hw:jtag:gnice
/* COMMAND STAGE */
/* let's send the command via the control pipe */
+ /*
+ * Command is sometime (f.e. after scsi_eh_prep_cmnd) on the stack.
+ * Stack may be vmallocated. So no DMA for us. Make a copy.
+ */
+ memcpy(us->iobuf, srb->cmnd, srb->cmd_len);
result = usb_stor_ctrl_transfer(us, us->send_ctrl_pipe,
US_CBI_ADSC,
USB_TYPE_CLASS | USB_RECIP_INTERFACE, 0,
- us->ifnum, srb->cmnd, srb->cmd_len);
+ us->ifnum, us->iobuf, srb->cmd_len);
/* check the return code for the command */
usb_stor_dbg(us, "Call to usb_stor_ctrl_transfer() returned %d\n",
}
ret = -EPROTONOSUPPORT;
- if (minorversion == 0)
+ if (!IS_ENABLED(CONFIG_NFS_V4_1) || minorversion == 0)
ret = nfs4_callback_up_net(serv, net);
else if (xprt->ops->bc_up)
ret = xprt->ops->bc_up(serv, net);
return test_bit(NFS_STATE_RECOVERY_FAILED, &state->flags) == 0;
}
+static inline bool nfs4_state_match_open_stateid_other(const struct nfs4_state *state,
+ const nfs4_stateid *stateid)
+{
+ return test_bit(NFS_OPEN_STATE, &state->flags) &&
+ nfs4_stateid_match_other(&state->open_stateid, stateid);
+}
+
#else
#define nfs4_close_state(a, b) do { } while (0)
}
static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
- nfs4_stateid *arg_stateid,
nfs4_stateid *stateid, fmode_t fmode)
{
clear_bit(NFS_O_RDWR_STATE, &state->flags);
}
if (stateid == NULL)
return;
- /* Handle races with OPEN */
- if (!nfs4_stateid_match_other(arg_stateid, &state->open_stateid) ||
- (nfs4_stateid_match_other(stateid, &state->open_stateid) &&
- !nfs4_stateid_is_newer(stateid, &state->open_stateid))) {
+ /* Handle OPEN+OPEN_DOWNGRADE races */
+ if (nfs4_stateid_match_other(stateid, &state->open_stateid) &&
+ !nfs4_stateid_is_newer(stateid, &state->open_stateid)) {
nfs_resync_open_stateid_locked(state);
return;
}
nfs4_stateid *stateid, fmode_t fmode)
{
write_seqlock(&state->seqlock);
- nfs_clear_open_stateid_locked(state, arg_stateid, stateid, fmode);
+ /* Ignore, if the CLOSE argment doesn't match the current stateid */
+ if (nfs4_state_match_open_stateid_other(state, arg_stateid))
+ nfs_clear_open_stateid_locked(state, stateid, fmode);
write_sequnlock(&state->seqlock);
if (test_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags))
nfs4_schedule_state_manager(state->owner->so_server->nfs_client);
static int nfs41_check_expired_locks(struct nfs4_state *state)
{
int status, ret = NFS_OK;
- struct nfs4_lock_state *lsp;
+ struct nfs4_lock_state *lsp, *prev = NULL;
struct nfs_server *server = NFS_SERVER(state->inode);
if (!test_bit(LK_STATE_IN_USE, &state->flags))
goto out;
+
+ spin_lock(&state->state_lock);
list_for_each_entry(lsp, &state->lock_states, ls_locks) {
if (test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags)) {
struct rpc_cred *cred = lsp->ls_state->owner->so_cred;
+ atomic_inc(&lsp->ls_count);
+ spin_unlock(&state->state_lock);
+
+ nfs4_put_lock_state(prev);
+ prev = lsp;
+
status = nfs41_test_and_free_expired_stateid(server,
&lsp->ls_stateid,
cred);
set_bit(NFS_LOCK_LOST, &lsp->ls_flags);
} else if (status != NFS_OK) {
ret = status;
- break;
+ nfs4_put_lock_state(prev);
+ goto out;
}
+ spin_lock(&state->state_lock);
}
- };
+ }
+ spin_unlock(&state->state_lock);
+ nfs4_put_lock_state(prev);
out:
return ret;
}
} else if (is_rdwr)
calldata->arg.fmode |= FMODE_READ|FMODE_WRITE;
- if (!nfs4_valid_open_stateid(state))
+ if (!nfs4_valid_open_stateid(state) ||
+ test_bit(NFS_OPEN_STATE, &state->flags) == 0)
call_close = 0;
spin_unlock(&state->owner->so_lock);
switch (task->tk_status) {
case 0:
renew_lease(data->res.server, data->timestamp);
+ break;
case -NFS4ERR_ADMIN_REVOKED:
case -NFS4ERR_DELEG_REVOKED:
case -NFS4ERR_EXPIRED:
case -NFS4ERR_OLD_STATEID:
case -NFS4ERR_STALE_STATEID:
task->tk_status = 0;
- if (data->roc)
- pnfs_roc_set_barrier(data->inode, data->roc_barrier);
break;
default:
if (nfs4_async_handle_error(task, data->res.server,
}
}
data->rpc_status = task->tk_status;
+ if (data->roc && data->rpc_status == 0)
+ pnfs_roc_set_barrier(data->inode, data->roc_barrier);
}
static void nfs4_delegreturn_release(void *calldata)
ssleep(1);
case -NFS4ERR_ADMIN_REVOKED:
case -NFS4ERR_STALE_STATEID:
+ case -NFS4ERR_OLD_STATEID:
case -NFS4ERR_BAD_STATEID:
case -NFS4ERR_RECLAIM_BAD:
case -NFS4ERR_RECLAIM_CONFLICT:
* are obviously wrong for any sort of memory access.
*/
#define BPF_REGISTER_MAX_RANGE (1024 * 1024 * 1024)
-#define BPF_REGISTER_MIN_RANGE -(1024 * 1024 * 1024)
+#define BPF_REGISTER_MIN_RANGE -1
struct bpf_reg_state {
enum bpf_reg_type type;
* Used to determine if any memory access using this register will
* result in a bad access.
*/
- u64 min_value, max_value;
+ s64 min_value;
+ u64 max_value;
union {
/* valid when type == CONST_IMM | PTR_TO_STACK | UNKNOWN_VALUE */
s64 imm;
extern void sched_autogroup_detach(struct task_struct *p);
extern void sched_autogroup_fork(struct signal_struct *sig);
extern void sched_autogroup_exit(struct signal_struct *sig);
+extern void sched_autogroup_exit_task(struct task_struct *p);
#ifdef CONFIG_PROC_FS
extern void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m);
extern int proc_sched_autogroup_set_nice(struct task_struct *p, int nice);
static inline void sched_autogroup_detach(struct task_struct *p) { }
static inline void sched_autogroup_fork(struct signal_struct *sig) { }
static inline void sched_autogroup_exit(struct signal_struct *sig) { }
+static inline void sched_autogroup_exit_task(struct task_struct *p) { }
#endif
extern int yield_to(struct task_struct *p, bool preempt);
struct gro_cell *cell = per_cpu_ptr(gcells->cells, i);
__skb_queue_head_init(&cell->napi_skbs);
+
+ set_bit(NAPI_STATE_NO_BUSY_POLL, &cell->napi.state);
+
netif_napi_add(dev, &cell->napi, gro_cell_poll, 64);
napi_enable(&cell->napi);
}
struct netlink_callback *cb);
int fib_table_flush(struct net *net, struct fib_table *table);
struct fib_table *fib_trie_unmerge(struct fib_table *main_tb);
+void fib_table_flush_external(struct fib_table *table);
void fib_free_table(struct fib_table *tb);
#ifndef CONFIG_IP_MULTIPLE_TABLES
extern struct list_head net_namespace_list;
struct net *get_net_ns_by_pid(pid_t pid);
-struct net *get_net_ns_by_fd(int pid);
+struct net *get_net_ns_by_fd(int fd);
#ifdef CONFIG_SYSCTL
void ipx_register_sysctl(void);
sys_write(out_fd, buf, BLOCK_SIZE);
#if !defined(CONFIG_S390)
if (!(i % 16)) {
- printk("%c\b", rotator[rotate & 0x3]);
+ pr_cont("%c\b", rotator[rotate & 0x3]);
rotate++;
}
#endif
reg->map_ptr->key_size,
reg->map_ptr->value_size);
if (reg->min_value != BPF_REGISTER_MIN_RANGE)
- verbose(",min_value=%llu",
- (unsigned long long)reg->min_value);
+ verbose(",min_value=%lld",
+ (long long)reg->min_value);
if (reg->max_value != BPF_REGISTER_MAX_RANGE)
verbose(",max_value=%llu",
(unsigned long long)reg->max_value);
* index'es we need to make sure that whatever we use
* will have a set floor within our range.
*/
- if ((s64)reg->min_value < 0) {
+ if (reg->min_value < 0) {
verbose("R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
regno);
return -EACCES;
{
if (reg->max_value > BPF_REGISTER_MAX_RANGE)
reg->max_value = BPF_REGISTER_MAX_RANGE;
- if ((s64)reg->min_value < BPF_REGISTER_MIN_RANGE)
+ if (reg->min_value < BPF_REGISTER_MIN_RANGE ||
+ reg->min_value > BPF_REGISTER_MAX_RANGE)
reg->min_value = BPF_REGISTER_MIN_RANGE;
}
struct bpf_insn *insn)
{
struct bpf_reg_state *regs = env->cur_state.regs, *dst_reg;
- u64 min_val = BPF_REGISTER_MIN_RANGE, max_val = BPF_REGISTER_MAX_RANGE;
+ s64 min_val = BPF_REGISTER_MIN_RANGE;
+ u64 max_val = BPF_REGISTER_MAX_RANGE;
bool min_set = false, max_set = false;
u8 opcode = BPF_OP(insn->code);
return;
}
+ /* If one of our values was at the end of our ranges then we can't just
+ * do our normal operations to the register, we need to set the values
+ * to the min/max since they are undefined.
+ */
+ if (min_val == BPF_REGISTER_MIN_RANGE)
+ dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
+ if (max_val == BPF_REGISTER_MAX_RANGE)
+ dst_reg->max_value = BPF_REGISTER_MAX_RANGE;
+
switch (opcode) {
case BPF_ADD:
- dst_reg->min_value += min_val;
- dst_reg->max_value += max_val;
+ if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE)
+ dst_reg->min_value += min_val;
+ if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
+ dst_reg->max_value += max_val;
break;
case BPF_SUB:
- dst_reg->min_value -= min_val;
- dst_reg->max_value -= max_val;
+ if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE)
+ dst_reg->min_value -= min_val;
+ if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
+ dst_reg->max_value -= max_val;
break;
case BPF_MUL:
- dst_reg->min_value *= min_val;
- dst_reg->max_value *= max_val;
+ if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE)
+ dst_reg->min_value *= min_val;
+ if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
+ dst_reg->max_value *= max_val;
break;
case BPF_AND:
- /* & is special since it could end up with 0 bits set. */
- dst_reg->min_value &= min_val;
+ /* Disallow AND'ing of negative numbers, ain't nobody got time
+ * for that. Otherwise the minimum is 0 and the max is the max
+ * value we could AND against.
+ */
+ if (min_val < 0)
+ dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
+ else
+ dst_reg->min_value = 0;
dst_reg->max_value = max_val;
break;
case BPF_LSH:
*/
if (min_val > ilog2(BPF_REGISTER_MAX_RANGE))
dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
- else
+ else if (dst_reg->min_value != BPF_REGISTER_MIN_RANGE)
dst_reg->min_value <<= min_val;
if (max_val > ilog2(BPF_REGISTER_MAX_RANGE))
dst_reg->max_value = BPF_REGISTER_MAX_RANGE;
- else
+ else if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
dst_reg->max_value <<= max_val;
break;
case BPF_RSH:
- dst_reg->min_value >>= min_val;
- dst_reg->max_value >>= max_val;
- break;
- case BPF_MOD:
- /* % is special since it is an unsigned modulus, so the floor
- * will always be 0.
+ /* RSH by a negative number is undefined, and the BPF_RSH is an
+ * unsigned shift, so make the appropriate casts.
*/
- dst_reg->min_value = 0;
- dst_reg->max_value = max_val - 1;
+ if (min_val < 0 || dst_reg->min_value < 0)
+ dst_reg->min_value = BPF_REGISTER_MIN_RANGE;
+ else
+ dst_reg->min_value =
+ (u64)(dst_reg->min_value) >> min_val;
+ if (dst_reg->max_value != BPF_REGISTER_MAX_RANGE)
+ dst_reg->max_value >>= max_val;
break;
default:
reset_reg_range_values(regs, insn->dst_reg);
* this will always be called from the right CPU.
*/
cpuctx = __get_cpu_context(ctx);
+
+ /* Only set/clear cpuctx->cgrp if current task uses event->cgrp. */
+ if (perf_cgroup_from_task(current, ctx) != event->cgrp) {
+ /*
+ * We are removing the last cpu event in this context.
+ * If that event is not active in this cpu, cpuctx->cgrp
+ * should've been cleared by perf_cgroup_switch.
+ */
+ WARN_ON_ONCE(!add && cpuctx->cgrp);
+ return;
+ }
cpuctx->cgrp = add ? event->cgrp : NULL;
}
* if <size> is not specified, the range is treated as a single address.
*/
enum {
+ IF_ACT_NONE = -1,
IF_ACT_FILTER,
IF_ACT_START,
IF_ACT_STOP,
{ IF_SRC_KERNEL, "%u/%u" },
{ IF_SRC_FILEADDR, "%u@%s" },
{ IF_SRC_KERNELADDR, "%u" },
+ { IF_ACT_NONE, NULL },
};
/*
*/
perf_event_exit_task(tsk);
+ sched_autogroup_exit_task(tsk);
cgroup_exit(tsk);
/*
#define LOCKF_USED_IN_IRQ_READ \
(LOCKF_USED_IN_HARDIRQ_READ | LOCKF_USED_IN_SOFTIRQ_READ)
+/*
+ * CONFIG_PROVE_LOCKING_SMALL is defined for sparc. Sparc requires .text,
+ * .data and .bss to fit in required 32MB limit for the kernel. With
+ * PROVE_LOCKING we could go over this limit and cause system boot-up problems.
+ * So, reduce the static allocations for lockdeps related structures so that
+ * everything fits in current required size limit.
+ */
+#ifdef CONFIG_PROVE_LOCKING_SMALL
/*
* MAX_LOCKDEP_ENTRIES is the maximum number of lock dependencies
* we track.
* table (if it's not there yet), and we check it for lock order
* conflicts and deadlocks.
*/
+#define MAX_LOCKDEP_ENTRIES 16384UL
+#define MAX_LOCKDEP_CHAINS_BITS 15
+#define MAX_STACK_TRACE_ENTRIES 262144UL
+#else
#define MAX_LOCKDEP_ENTRIES 32768UL
#define MAX_LOCKDEP_CHAINS_BITS 16
-#define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS)
-
-#define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
/*
* Stack-trace: tightly packed array of stack backtrace
* addresses. Protected by the hash_lock.
*/
#define MAX_STACK_TRACE_ENTRIES 524288UL
+#endif
+
+#define MAX_LOCKDEP_CHAINS (1UL << MAX_LOCKDEP_CHAINS_BITS)
+
+#define MAX_LOCKDEP_CHAIN_HLOCKS (MAX_LOCKDEP_CHAINS*5)
extern struct list_head all_lock_classes;
extern struct lock_chain lock_chains[];
{
if (tg != &root_task_group)
return false;
-
/*
- * We can only assume the task group can't go away on us if
- * autogroup_move_group() can see us on ->thread_group list.
+ * If we race with autogroup_move_group() the caller can use the old
+ * value of signal->autogroup but in this case sched_move_task() will
+ * be called again before autogroup_kref_put().
+ *
+ * However, there is no way sched_autogroup_exit_task() could tell us
+ * to avoid autogroup->tg, so we abuse PF_EXITING flag for this case.
*/
if (p->flags & PF_EXITING)
return false;
return true;
}
+void sched_autogroup_exit_task(struct task_struct *p)
+{
+ /*
+ * We are going to call exit_notify() and autogroup_move_group() can't
+ * see this thread after that: we can no longer use signal->autogroup.
+ * See the PF_EXITING check in task_wants_autogroup().
+ */
+ sched_move_task(p);
+}
+
static void
autogroup_move_group(struct task_struct *p, struct autogroup *ag)
{
}
p->signal->autogroup = autogroup_kref_get(ag);
-
- if (!READ_ONCE(sysctl_sched_autogroup_enabled))
- goto out;
-
+ /*
+ * We can't avoid sched_move_task() after we changed signal->autogroup,
+ * this process can already run with task_group() == prev->tg or we can
+ * race with cgroup code which can read autogroup = prev under rq->lock.
+ * In the latter case for_each_thread() can not miss a migrating thread,
+ * cpu_cgroup_attach() must not be possible after cgroup_exit() and it
+ * can't be removed from thread list, we hold ->siglock.
+ *
+ * If an exiting thread was already removed from thread list we rely on
+ * sched_autogroup_exit_task().
+ */
for_each_thread(p, t)
sched_move_task(t);
-out:
+
unlock_task_sighand(p, &flags);
autogroup_kref_put(prev);
}
For more details, see Documentation/locking/lockdep-design.txt.
+config PROVE_LOCKING_SMALL
+ bool
+
config LOCKDEP
bool
depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
batadv_softif_destroy_sysfs(hard_iface->soft_iface);
}
+ hard_iface->soft_iface = NULL;
batadv_hardif_put(hard_iface);
out:
primary_if = batadv_primary_if_get_selected(bat_priv);
if (unlikely(!primary_if)) {
err = BATADV_TP_REASON_DST_UNREACHABLE;
+ tp_vars->reason = err;
goto out;
}
bool alloc;
int id;
+ if (atomic_read(&net->count) == 0)
+ return NETNSA_NSID_NOT_ASSIGNED;
spin_lock_irqsave(&net->nsid_lock, flags);
alloc = atomic_read(&peer->count) == 0 ? false : true;
id = __peernet2id_alloc(net, peer, &alloc);
if (dev->dev.parent && dev_is_pci(dev->dev.parent) &&
(ext_filter_mask & RTEXT_FILTER_VF)) {
int num_vfs = dev_num_vf(dev->dev.parent);
- size_t size = nla_total_size(sizeof(struct nlattr));
- size += nla_total_size(num_vfs * sizeof(struct nlattr));
+ size_t size = nla_total_size(0);
size += num_vfs *
- (nla_total_size(sizeof(struct ifla_vf_mac)) +
- nla_total_size(MAX_VLAN_LIST_LEN *
- sizeof(struct nlattr)) +
+ (nla_total_size(0) +
+ nla_total_size(sizeof(struct ifla_vf_mac)) +
+ nla_total_size(sizeof(struct ifla_vf_vlan)) +
+ nla_total_size(0) + /* nest IFLA_VF_VLAN_LIST */
nla_total_size(MAX_VLAN_LIST_LEN *
sizeof(struct ifla_vf_vlan_info)) +
nla_total_size(sizeof(struct ifla_vf_spoofchk)) +
+ nla_total_size(sizeof(struct ifla_vf_tx_rate)) +
nla_total_size(sizeof(struct ifla_vf_rate)) +
nla_total_size(sizeof(struct ifla_vf_link_state)) +
nla_total_size(sizeof(struct ifla_vf_rss_query_en)) +
+ nla_total_size(0) + /* nest IFLA_VF_STATS */
/* IFLA_VF_STATS_RX_PACKETS */
nla_total_size_64bit(sizeof(__u64)) +
/* IFLA_VF_STATS_TX_PACKETS */
static size_t rtnl_xdp_size(const struct net_device *dev)
{
- size_t xdp_size = nla_total_size(1); /* XDP_ATTACHED */
+ size_t xdp_size = nla_total_size(0) + /* nest IFLA_XDP */
+ nla_total_size(1); /* XDP_ATTACHED */
if (!dev->netdev_ops->ndo_xdp)
return 0;
head = &net->dev_index_head[h];
hlist_for_each_entry(dev, head, index_hlist) {
if (link_dump_filtered(dev, master_idx, kind_ops))
- continue;
+ goto cont;
if (idx < s_idx)
goto cont;
err = rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
static inline size_t rtnl_fdb_nlmsg_size(void)
{
- return NLMSG_ALIGN(sizeof(struct ndmsg)) + nla_total_size(ETH_ALEN);
+ return NLMSG_ALIGN(sizeof(struct ndmsg)) +
+ nla_total_size(ETH_ALEN) + /* NDA_LLADDR */
+ nla_total_size(sizeof(u16)) + /* NDA_VLAN */
+ 0;
}
static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, u16 vid, int type,
int fib_unmerge(struct net *net)
{
- struct fib_table *old, *new;
+ struct fib_table *old, *new, *main_table;
/* attempt to fetch local table if it has been allocated */
old = fib_get_table(net, RT_TABLE_LOCAL);
if (!new)
return -ENOMEM;
+ /* table is already unmerged */
+ if (new == old)
+ return 0;
+
/* replace merged table with clean table */
- if (new != old) {
- fib_replace_table(net, old, new);
- fib_free_table(old);
- }
+ fib_replace_table(net, old, new);
+ fib_free_table(old);
+
+ /* attempt to fetch main table if it has been allocated */
+ main_table = fib_get_table(net, RT_TABLE_MAIN);
+ if (!main_table)
+ return 0;
+
+ /* flush local entries from main table */
+ fib_table_flush_external(main_table);
return 0;
}
local_l = fib_find_node(lt, &local_tp, l->key);
if (fib_insert_alias(lt, local_tp, local_l, new_fa,
- NULL, l->key))
+ NULL, l->key)) {
+ kmem_cache_free(fn_alias_kmem, new_fa);
goto out;
+ }
}
/* stop loop if key wrapped back to 0 */
return NULL;
}
+/* Caller must hold RTNL */
+void fib_table_flush_external(struct fib_table *tb)
+{
+ struct trie *t = (struct trie *)tb->tb_data;
+ struct key_vector *pn = t->kv;
+ unsigned long cindex = 1;
+ struct hlist_node *tmp;
+ struct fib_alias *fa;
+
+ /* walk trie in reverse order */
+ for (;;) {
+ unsigned char slen = 0;
+ struct key_vector *n;
+
+ if (!(cindex--)) {
+ t_key pkey = pn->key;
+
+ /* cannot resize the trie vector */
+ if (IS_TRIE(pn))
+ break;
+
+ /* resize completed node */
+ pn = resize(t, pn);
+ cindex = get_index(pkey, pn);
+
+ continue;
+ }
+
+ /* grab the next available node */
+ n = get_child(pn, cindex);
+ if (!n)
+ continue;
+
+ if (IS_TNODE(n)) {
+ /* record pn and cindex for leaf walking */
+ pn = n;
+ cindex = 1ul << n->bits;
+
+ continue;
+ }
+
+ hlist_for_each_entry_safe(fa, tmp, &n->leaf, fa_list) {
+ /* if alias was cloned to local then we just
+ * need to remove the local copy from main
+ */
+ if (tb->tb_id != fa->tb_id) {
+ hlist_del_rcu(&fa->fa_list);
+ alias_free_mem_rcu(fa);
+ continue;
+ }
+
+ /* record local slen */
+ slen = fa->fa_slen;
+ }
+
+ /* update leaf slen */
+ n->slen = slen;
+
+ if (hlist_empty(&n->leaf)) {
+ put_child_root(pn, n->key, NULL);
+ node_free(n);
+ }
+ }
+}
+
/* Caller must hold RTNL. */
int fib_table_flush(struct net *net, struct fib_table *tb)
{
}
static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im);
-static void igmpv3_del_delrec(struct in_device *in_dev, __be32 multiaddr);
+static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im);
static void igmpv3_clear_delrec(struct in_device *in_dev);
static int sf_setstate(struct ip_mc_list *pmc);
static void sf_markstate(struct ip_mc_list *pmc);
spin_unlock_bh(&in_dev->mc_tomb_lock);
}
-static void igmpv3_del_delrec(struct in_device *in_dev, __be32 multiaddr)
+/*
+ * restore ip_mc_list deleted records
+ */
+static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im)
{
struct ip_mc_list *pmc, *pmc_prev;
- struct ip_sf_list *psf, *psf_next;
+ struct ip_sf_list *psf;
+ struct net *net = dev_net(in_dev->dev);
+ __be32 multiaddr = im->multiaddr;
spin_lock_bh(&in_dev->mc_tomb_lock);
pmc_prev = NULL;
in_dev->mc_tomb = pmc->next;
}
spin_unlock_bh(&in_dev->mc_tomb_lock);
+
+ spin_lock_bh(&im->lock);
if (pmc) {
- for (psf = pmc->tomb; psf; psf = psf_next) {
- psf_next = psf->sf_next;
- kfree(psf);
+ im->interface = pmc->interface;
+ im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
+ im->sfmode = pmc->sfmode;
+ if (pmc->sfmode == MCAST_INCLUDE) {
+ im->tomb = pmc->tomb;
+ im->sources = pmc->sources;
+ for (psf = im->sources; psf; psf = psf->sf_next)
+ psf->sf_crcount = im->crcount;
}
in_dev_put(pmc->interface);
- kfree(pmc);
}
+ spin_unlock_bh(&im->lock);
}
+/*
+ * flush ip_mc_list deleted records
+ */
static void igmpv3_clear_delrec(struct in_device *in_dev)
{
struct ip_mc_list *pmc, *nextpmc;
ip_mc_hash_add(in_dev, im);
#ifdef CONFIG_IP_MULTICAST
- igmpv3_del_delrec(in_dev, im->multiaddr);
+ igmpv3_del_delrec(in_dev, im);
#endif
igmp_group_added(im);
if (!in_dev->dead)
ASSERT_RTNL();
- for_each_pmc_rtnl(in_dev, pmc)
+ for_each_pmc_rtnl(in_dev, pmc) {
+#ifdef CONFIG_IP_MULTICAST
+ igmpv3_del_delrec(in_dev, pmc);
+#endif
igmp_group_added(pmc);
+ }
}
/* Device going down */
in_dev->mr_gq_running = 0;
if (del_timer(&in_dev->mr_gq_timer))
__in_dev_put(in_dev);
- igmpv3_clear_delrec(in_dev);
#endif
ip_mc_dec_group(in_dev, IGMP_ALL_HOSTS);
#endif
ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS);
- for_each_pmc_rtnl(in_dev, pmc)
+ for_each_pmc_rtnl(in_dev, pmc) {
+#ifdef CONFIG_IP_MULTICAST
+ igmpv3_del_delrec(in_dev, pmc);
+#endif
igmp_group_added(pmc);
+ }
}
/*
/* Deactivate timers */
ip_mc_down(in_dev);
+#ifdef CONFIG_IP_MULTICAST
+ igmpv3_clear_delrec(in_dev);
+#endif
while ((i = rtnl_dereference(in_dev->mc_list)) != NULL) {
in_dev->mc_list = i->next_rcu;
in_dev->mc_count--;
-
- /* We've dropped the groups in ip_mc_down already */
- ip_mc_clear_src(i);
ip_ma_put(i);
}
}
icsk->icsk_ca_ops = ca;
icsk->icsk_ca_setsockopt = 1;
- if (sk->sk_state != TCP_CLOSE)
+ if (sk->sk_state != TCP_CLOSE) {
+ memset(icsk->icsk_ca_priv, 0, sizeof(icsk->icsk_ca_priv));
tcp_init_congestion_control(sk);
+ }
}
/* Manage refcounts on socket close. */
if (use_hash2) {
hash2_any = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum) &
- udp_table.mask;
- hash2 = udp4_portaddr_hash(net, daddr, hnum) & udp_table.mask;
+ udptable->mask;
+ hash2 = udp4_portaddr_hash(net, daddr, hnum) & udptable->mask;
start_lookup:
- hslot = &udp_table.hash2[hash2];
+ hslot = &udptable->hash2[hash2];
offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
}
int mtu;
unsigned int psh_hlen = sizeof(struct ipv6hdr) + t->encap_hlen;
unsigned int max_headroom = psh_hlen;
+ bool use_cache = false;
u8 hop_limit;
int err = -1;
memcpy(&fl6->daddr, addr6, sizeof(fl6->daddr));
neigh_release(neigh);
- } else if (!fl6->flowi6_mark)
+ } else if (!(t->parms.flags &
+ (IP6_TNL_F_USE_ORIG_TCLASS | IP6_TNL_F_USE_ORIG_FWMARK))) {
+ /* enable the cache only only if the routing decision does
+ * not depend on the current inner header value
+ */
+ use_cache = true;
+ }
+
+ if (use_cache)
dst = dst_cache_get(&t->dst_cache);
if (!ip6_tnl_xmit_ctl(t, &fl6->saddr, &fl6->daddr))
if (t->encap.type != TUNNEL_ENCAP_NONE)
goto tx_err_dst_release;
} else {
- if (!fl6->flowi6_mark && ndst)
+ if (use_cache && ndst)
dst_cache_set_ip6(&t->dst_cache, ndst, &fl6->saddr);
}
skb_dst_set(skb, dst);
if (use_hash2) {
hash2_any = udp6_portaddr_hash(net, &in6addr_any, hnum) &
- udp_table.mask;
- hash2 = udp6_portaddr_hash(net, daddr, hnum) & udp_table.mask;
+ udptable->mask;
+ hash2 = udp6_portaddr_hash(net, daddr, hnum) & udptable->mask;
start_lookup:
- hslot = &udp_table.hash2[hash2];
+ hslot = &udptable->hash2[hash2];
offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
}
unsigned int len = skb->len;
int ret = l2tp_xmit_skb(session, skb, session->hdr_len);
- if (likely(ret == NET_XMIT_SUCCESS)) {
+ if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
atomic_long_add(len, &priv->tx_bytes);
atomic_long_inc(&priv->tx_packets);
} else {
int ret;
int chk_addr_ret;
- if (!sock_flag(sk, SOCK_ZAPPED))
- return -EINVAL;
if (addr_len < sizeof(struct sockaddr_l2tpip))
return -EINVAL;
if (addr->l2tp_family != AF_INET)
read_unlock_bh(&l2tp_ip_lock);
lock_sock(sk);
+ if (!sock_flag(sk, SOCK_ZAPPED))
+ goto out;
+
if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_l2tpip))
goto out;
int addr_type;
int err;
- if (!sock_flag(sk, SOCK_ZAPPED))
- return -EINVAL;
if (addr->l2tp_family != AF_INET6)
return -EINVAL;
if (addr_len < sizeof(*addr))
lock_sock(sk);
err = -EINVAL;
+ if (!sock_flag(sk, SOCK_ZAPPED))
+ goto out_unlock;
+
if (sk->sk_state != TCP_CLOSE)
goto out_unlock;
}
/* No need to do anything if the driver does all */
- if (!local->ops->set_tim)
+ if (ieee80211_hw_check(&local->hw, AP_LINK_PS))
return;
if (sta->dead)
struct sta_info *sta,
struct sk_buff *skb)
{
- struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct fq *fq = &local->fq;
struct ieee80211_vif *vif;
struct txq_info *txqi;
if (!txqi)
return false;
- info->control.vif = vif;
-
spin_lock_bh(&fq->lock);
ieee80211_txq_enqueue(local, txqi, skb);
spin_unlock_bh(&fq->lock);
if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) {
tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
- *ieee80211_get_qos_ctl(hdr) = tid;
hdr->seq_ctrl = ieee80211_tx_next_seq(sta, tid);
} else {
info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ;
(tid_tx ? IEEE80211_TX_CTL_AMPDU : 0);
info->control.flags = IEEE80211_TX_CTRL_FAST_XMIT;
+ if (hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_QOS_DATA)) {
+ tid = skb->priority & IEEE80211_QOS_CTL_TAG1D_MASK;
+ *ieee80211_get_qos_ctl(hdr) = tid;
+ }
+
__skb_queue_head_init(&tx.skbs);
tx.flags = IEEE80211_TX_UNICAST;
goto begin;
}
+ if (test_bit(IEEE80211_TXQ_AMPDU, &txqi->flags))
+ info->flags |= IEEE80211_TX_CTL_AMPDU;
+ else
+ info->flags &= ~IEEE80211_TX_CTL_AMPDU;
+
if (info->control.flags & IEEE80211_TX_CTRL_FAST_XMIT) {
struct sta_info *sta = container_of(txq->sta, struct sta_info,
sta);
vht_cap->vht_mcs.tx_mcs_map |= cpu_to_le16(peer_tx << i * 2);
}
+ /*
+ * This is a workaround for VHT-enabled STAs which break the spec
+ * and have the VHT-MCS Rx map filled in with value 3 for all eight
+ * spacial streams, an example is AR9462.
+ *
+ * As per spec, in section 22.1.1 Introduction to the VHT PHY
+ * A VHT STA shall support at least single spactial stream VHT-MCSs
+ * 0 to 7 (transmit and receive) in all supported channel widths.
+ */
+ if (vht_cap->vht_mcs.rx_mcs_map == cpu_to_le16(0xFFFF)) {
+ vht_cap->vht_supported = false;
+ sdata_info(sdata, "Ignoring VHT IE from %pM due to invalid rx_mcs_map\n",
+ sta->addr);
+ return;
+ }
+
/* finally set up the bandwidth */
switch (vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ:
for (it_chain = chain; (tp = rtnl_dereference(*it_chain)) != NULL;
it_chain = &tp->next)
- tfilter_notify(net, oskb, n, tp, 0, event, false);
+ tfilter_notify(net, oskb, n, tp, n->nlmsg_flags, event, false);
}
/* Select new prio value from the range, managed by kernel. */
if (!skb)
return -ENOBUFS;
- if (tcf_fill_node(net, skb, tp, fh, portid, n->nlmsg_seq, 0, event) <= 0) {
+ if (tcf_fill_node(net, skb, tp, fh, portid, n->nlmsg_seq,
+ n->nlmsg_flags, event) <= 0) {
kfree_skb(skb);
return -EINVAL;
}
/*
* net/tipc/socket.c: TIPC socket API
*
- * Copyright (c) 2001-2007, 2012-2015, Ericsson AB
+ * Copyright (c) 2001-2007, 2012-2016, Ericsson AB
* Copyright (c) 2004-2008, 2010-2013, Wind River Systems
* All rights reserved.
*
static const struct proto_ops stream_ops;
static const struct proto_ops msg_ops;
static struct proto tipc_proto;
-
static const struct rhashtable_params tsk_rht_params;
-/*
- * Revised TIPC socket locking policy:
- *
- * Most socket operations take the standard socket lock when they start
- * and hold it until they finish (or until they need to sleep). Acquiring
- * this lock grants the owner exclusive access to the fields of the socket
- * data structures, with the exception of the backlog queue. A few socket
- * operations can be done without taking the socket lock because they only
- * read socket information that never changes during the life of the socket.
- *
- * Socket operations may acquire the lock for the associated TIPC port if they
- * need to perform an operation on the port. If any routine needs to acquire
- * both the socket lock and the port lock it must take the socket lock first
- * to avoid the risk of deadlock.
- *
- * The dispatcher handling incoming messages cannot grab the socket lock in
- * the standard fashion, since invoked it runs at the BH level and cannot block.
- * Instead, it checks to see if the socket lock is currently owned by someone,
- * and either handles the message itself or adds it to the socket's backlog
- * queue; in the latter case the queued message is processed once the process
- * owning the socket lock releases it.
- *
- * NOTE: Releasing the socket lock while an operation is sleeping overcomes
- * the problem of a blocked socket operation preventing any other operations
- * from occurring. However, applications must be careful if they have
- * multiple threads trying to send (or receive) on the same socket, as these
- * operations might interfere with each other. For example, doing a connect
- * and a receive at the same time might allow the receive to consume the
- * ACK message meant for the connect. While additional work could be done
- * to try and overcome this, it doesn't seem to be worthwhile at the present.
- *
- * NOTE: Releasing the socket lock while an operation is sleeping also ensures
- * that another operation that must be performed in a non-blocking manner is
- * not delayed for very long because the lock has already been taken.
- *
- * NOTE: This code assumes that certain fields of a port/socket pair are
- * constant over its lifetime; such fields can be examined without taking
- * the socket lock and/or port lock, and do not need to be re-read even
- * after resuming processing after waiting. These fields include:
- * - socket type
- * - pointer to socket sk structure (aka tipc_sock structure)
- * - pointer to port structure
- * - port reference
- */
-
static u32 tsk_own_node(struct tipc_sock *tsk)
{
return msg_prevnode(&tsk->phdr);
* Sleep until more data has arrived. But check for races..
*/
static long unix_stream_data_wait(struct sock *sk, long timeo,
- struct sk_buff *last, unsigned int last_len)
+ struct sk_buff *last, unsigned int last_len,
+ bool freezable)
{
struct sk_buff *tail;
DEFINE_WAIT(wait);
sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk);
unix_state_unlock(sk);
- timeo = freezable_schedule_timeout(timeo);
+ if (freezable)
+ timeo = freezable_schedule_timeout(timeo);
+ else
+ timeo = schedule_timeout(timeo);
unix_state_lock(sk);
if (sock_flag(sk, SOCK_DEAD))
unsigned int splice_flags;
};
-static int unix_stream_read_generic(struct unix_stream_read_state *state)
+static int unix_stream_read_generic(struct unix_stream_read_state *state,
+ bool freezable)
{
struct scm_cookie scm;
struct socket *sock = state->socket;
mutex_unlock(&u->iolock);
timeo = unix_stream_data_wait(sk, timeo, last,
- last_len);
+ last_len, freezable);
if (signal_pending(current)) {
err = sock_intr_errno(timeo);
.flags = flags
};
- return unix_stream_read_generic(&state);
+ return unix_stream_read_generic(&state, true);
}
static int unix_stream_splice_actor(struct sk_buff *skb,
flags & SPLICE_F_NONBLOCK)
state.flags = MSG_DONTWAIT;
- return unix_stream_read_generic(&state);
+ return unix_stream_read_generic(&state, false);
}
static int unix_shutdown(struct socket *sock, int mode)
struct list_head bss_list;
struct rb_root bss_tree;
u32 bss_generation;
+ u32 bss_entries;
struct cfg80211_scan_request *scan_req; /* protected by RTNL */
struct sk_buff *scan_msg;
struct cfg80211_sched_scan_request __rcu *sched_scan_req;
* also linked into the probe response struct.
*/
+/*
+ * Limit the number of BSS entries stored in mac80211. Each one is
+ * a bit over 4k at most, so this limits to roughly 4-5M of memory.
+ * If somebody wants to really attack this though, they'd likely
+ * use small beacons, and only one type of frame, limiting each of
+ * the entries to a much smaller size (in order to generate more
+ * entries in total, so overhead is bigger.)
+ */
+static int bss_entries_limit = 1000;
+module_param(bss_entries_limit, int, 0644);
+MODULE_PARM_DESC(bss_entries_limit,
+ "limit to number of scan BSS entries (per wiphy, default 1000)");
+
#define IEEE80211_SCAN_RESULT_EXPIRE (30 * HZ)
static void bss_free(struct cfg80211_internal_bss *bss)
list_del_init(&bss->list);
rb_erase(&bss->rbn, &rdev->bss_tree);
+ rdev->bss_entries--;
+ WARN_ONCE((rdev->bss_entries == 0) ^ list_empty(&rdev->bss_list),
+ "rdev bss entries[%d]/list[empty:%d] corruption\n",
+ rdev->bss_entries, list_empty(&rdev->bss_list));
bss_ref_put(rdev, bss);
return true;
}
rdev->bss_generation++;
}
+static bool cfg80211_bss_expire_oldest(struct cfg80211_registered_device *rdev)
+{
+ struct cfg80211_internal_bss *bss, *oldest = NULL;
+ bool ret;
+
+ lockdep_assert_held(&rdev->bss_lock);
+
+ list_for_each_entry(bss, &rdev->bss_list, list) {
+ if (atomic_read(&bss->hold))
+ continue;
+
+ if (!list_empty(&bss->hidden_list) &&
+ !bss->pub.hidden_beacon_bss)
+ continue;
+
+ if (oldest && time_before(oldest->ts, bss->ts))
+ continue;
+ oldest = bss;
+ }
+
+ if (WARN_ON(!oldest))
+ return false;
+
+ /*
+ * The callers make sure to increase rdev->bss_generation if anything
+ * gets removed (and a new entry added), so there's no need to also do
+ * it here.
+ */
+
+ ret = __cfg80211_unlink_bss(rdev, oldest);
+ WARN_ON(!ret);
+ return ret;
+}
+
void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev,
bool send_message)
{
const u8 *ie;
int i, ssidlen;
u8 fold = 0;
+ u32 n_entries = 0;
ies = rcu_access_pointer(new->pub.beacon_ies);
if (WARN_ON(!ies))
/* This is the bad part ... */
list_for_each_entry(bss, &rdev->bss_list, list) {
+ /*
+ * we're iterating all the entries anyway, so take the
+ * opportunity to validate the list length accounting
+ */
+ n_entries++;
+
if (!ether_addr_equal(bss->pub.bssid, new->pub.bssid))
continue;
if (bss->pub.channel != new->pub.channel)
new->pub.beacon_ies);
}
+ WARN_ONCE(n_entries != rdev->bss_entries,
+ "rdev bss entries[%d]/list[len:%d] corruption\n",
+ rdev->bss_entries, n_entries);
+
return true;
}
}
}
+ if (rdev->bss_entries >= bss_entries_limit &&
+ !cfg80211_bss_expire_oldest(rdev)) {
+ kfree(new);
+ goto drop;
+ }
+
list_add_tail(&new->list, &rdev->bss_list);
+ rdev->bss_entries++;
rb_insert_bss(rdev, new);
found = new;
}
58500000,
65000000,
78000000,
- 0,
+ /* not in the spec, but some devices use this: */
+ 86500000,
},
{ 13500000,
27000000,
/* released below */
cred = get_current_cred();
cxt = cred_cxt(cred);
- profile = aa_cred_profile(cred);
- previous_profile = cxt->previous;
+ profile = aa_get_newest_profile(aa_cred_profile(cred));
+ previous_profile = aa_get_newest_profile(cxt->previous);
if (unconfined(profile)) {
info = "unconfined";
out:
aa_put_profile(hat);
kfree(name);
+ aa_put_profile(profile);
+ aa_put_profile(previous_profile);
put_cred(cred);
return error;