N: Kees Cook
E: kees@outflux.net
-W: http://outflux.net/
-P: 1024D/17063E6D 9FA3 C49C 23C9 D1BC 2E30 1975 1FFF 4BA9 1706 3E6D
-D: Minor updates to SCSI types, added /proc/pid/maps protection
+E: kees@ubuntu.com
+E: keescook@chromium.org
+W: http://outflux.net/blog/
+P: 4096R/DC6DC026 A5C3 F68F 229D D60F 723E 6E13 8972 F4DF DC6D C026
+D: Various security things, bug fixes, and documentation.
S: (ask for current address)
+S: Portland, Oregon
S: USA
N: Robin Cornelius
$ echo <snap-name> > /sys/bus/rbd/devices/<dev-id>/snap_create
-rollback_snap
-
- Rolls back data to the specified snapshot. This goes over the entire
- list of rados blocks and sends a rollback command to each.
-
- $ echo <snap-name> > /sys/bus/rbd/devices/<dev-id>/snap_rollback
-
snap_*
A directory per each snapshot
--- /dev/null
+ <refentry>
+ <refmeta>
+ <refentrytitle>V4L2_PIX_FMT_NV24 ('NV24'), V4L2_PIX_FMT_NV42 ('NV42')</refentrytitle>
+ &manvol;
+ </refmeta>
+ <refnamediv>
+ <refname id="V4L2-PIX-FMT-NV24"><constant>V4L2_PIX_FMT_NV24</constant></refname>
+ <refname id="V4L2-PIX-FMT-NV42"><constant>V4L2_PIX_FMT_NV42</constant></refname>
+ <refpurpose>Formats with full horizontal and vertical
+chroma resolutions, also known as YUV 4:4:4. One luminance and one
+chrominance plane with alternating chroma samples as opposed to
+<constant>V4L2_PIX_FMT_YVU420</constant></refpurpose>
+ </refnamediv>
+ <refsect1>
+ <title>Description</title>
+
+ <para>These are two-plane versions of the YUV 4:4:4 format. The three
+ components are separated into two sub-images or planes. The Y plane is
+ first, with each Y sample stored in one byte per pixel. For
+ <constant>V4L2_PIX_FMT_NV24</constant>, a combined CbCr plane
+ immediately follows the Y plane in memory. The CbCr plane has the same
+ width and height, in pixels, as the Y plane (and the image). Each line
+ contains one CbCr pair per pixel, with each Cb and Cr sample stored in
+ one byte. <constant>V4L2_PIX_FMT_NV42</constant> is the same except that
+ the Cb and Cr samples are swapped, the CrCb plane starts with a Cr
+ sample.</para>
+
+ <para>If the Y plane has pad bytes after each row, then the CbCr plane
+ has twice as many pad bytes after its rows.</para>
+
+ <example>
+ <title><constant>V4L2_PIX_FMT_NV24</constant> 4 × 4
+pixel image</title>
+
+ <formalpara>
+ <title>Byte Order.</title>
+ <para>Each cell is one byte.
+ <informaltable frame="none">
+ <tgroup cols="9" align="center">
+ <colspec align="left" colwidth="2*" />
+ <tbody valign="top">
+ <row>
+ <entry>start + 0:</entry>
+ <entry>Y'<subscript>00</subscript></entry>
+ <entry>Y'<subscript>01</subscript></entry>
+ <entry>Y'<subscript>02</subscript></entry>
+ <entry>Y'<subscript>03</subscript></entry>
+ </row>
+ <row>
+ <entry>start + 4:</entry>
+ <entry>Y'<subscript>10</subscript></entry>
+ <entry>Y'<subscript>11</subscript></entry>
+ <entry>Y'<subscript>12</subscript></entry>
+ <entry>Y'<subscript>13</subscript></entry>
+ </row>
+ <row>
+ <entry>start + 8:</entry>
+ <entry>Y'<subscript>20</subscript></entry>
+ <entry>Y'<subscript>21</subscript></entry>
+ <entry>Y'<subscript>22</subscript></entry>
+ <entry>Y'<subscript>23</subscript></entry>
+ </row>
+ <row>
+ <entry>start + 12:</entry>
+ <entry>Y'<subscript>30</subscript></entry>
+ <entry>Y'<subscript>31</subscript></entry>
+ <entry>Y'<subscript>32</subscript></entry>
+ <entry>Y'<subscript>33</subscript></entry>
+ </row>
+ <row>
+ <entry>start + 16:</entry>
+ <entry>Cb<subscript>00</subscript></entry>
+ <entry>Cr<subscript>00</subscript></entry>
+ <entry>Cb<subscript>01</subscript></entry>
+ <entry>Cr<subscript>01</subscript></entry>
+ <entry>Cb<subscript>02</subscript></entry>
+ <entry>Cr<subscript>02</subscript></entry>
+ <entry>Cb<subscript>03</subscript></entry>
+ <entry>Cr<subscript>03</subscript></entry>
+ </row>
+ <row>
+ <entry>start + 24:</entry>
+ <entry>Cb<subscript>10</subscript></entry>
+ <entry>Cr<subscript>10</subscript></entry>
+ <entry>Cb<subscript>11</subscript></entry>
+ <entry>Cr<subscript>11</subscript></entry>
+ <entry>Cb<subscript>12</subscript></entry>
+ <entry>Cr<subscript>12</subscript></entry>
+ <entry>Cb<subscript>13</subscript></entry>
+ <entry>Cr<subscript>13</subscript></entry>
+ </row>
+ <row>
+ <entry>start + 32:</entry>
+ <entry>Cb<subscript>20</subscript></entry>
+ <entry>Cr<subscript>20</subscript></entry>
+ <entry>Cb<subscript>21</subscript></entry>
+ <entry>Cr<subscript>21</subscript></entry>
+ <entry>Cb<subscript>22</subscript></entry>
+ <entry>Cr<subscript>22</subscript></entry>
+ <entry>Cb<subscript>23</subscript></entry>
+ <entry>Cr<subscript>23</subscript></entry>
+ </row>
+ <row>
+ <entry>start + 40:</entry>
+ <entry>Cb<subscript>30</subscript></entry>
+ <entry>Cr<subscript>30</subscript></entry>
+ <entry>Cb<subscript>31</subscript></entry>
+ <entry>Cr<subscript>31</subscript></entry>
+ <entry>Cb<subscript>32</subscript></entry>
+ <entry>Cr<subscript>32</subscript></entry>
+ <entry>Cb<subscript>33</subscript></entry>
+ <entry>Cr<subscript>33</subscript></entry>
+ </row>
+ </tbody>
+ </tgroup>
+ </informaltable>
+ </para>
+ </formalpara>
+ </example>
+ </refsect1>
+ </refentry>
&sub-nv12m;
&sub-nv12mt;
&sub-nv16;
+ &sub-nv24;
&sub-m420;
</section>
--- /dev/null
+ The Frame Buffer Device API
+ ---------------------------
+
+Last revised: June 21, 2011
+
+
+0. Introduction
+---------------
+
+This document describes the frame buffer API used by applications to interact
+with frame buffer devices. In-kernel APIs between device drivers and the frame
+buffer core are not described.
+
+Due to a lack of documentation in the original frame buffer API, drivers
+behaviours differ in subtle (and not so subtle) ways. This document describes
+the recommended API implementation, but applications should be prepared to
+deal with different behaviours.
+
+
+1. Capabilities
+---------------
+
+Device and driver capabilities are reported in the fixed screen information
+capabilities field.
+
+struct fb_fix_screeninfo {
+ ...
+ __u16 capabilities; /* see FB_CAP_* */
+ ...
+};
+
+Application should use those capabilities to find out what features they can
+expect from the device and driver.
+
+- FB_CAP_FOURCC
+
+The driver supports the four character code (FOURCC) based format setting API.
+When supported, formats are configured using a FOURCC instead of manually
+specifying color components layout.
+
+
+2. Types and visuals
+--------------------
+
+Pixels are stored in memory in hardware-dependent formats. Applications need
+to be aware of the pixel storage format in order to write image data to the
+frame buffer memory in the format expected by the hardware.
+
+Formats are described by frame buffer types and visuals. Some visuals require
+additional information, which are stored in the variable screen information
+bits_per_pixel, grayscale, red, green, blue and transp fields.
+
+Visuals describe how color information is encoded and assembled to create
+macropixels. Types describe how macropixels are stored in memory. The following
+types and visuals are supported.
+
+- FB_TYPE_PACKED_PIXELS
+
+Macropixels are stored contiguously in a single plane. If the number of bits
+per macropixel is not a multiple of 8, whether macropixels are padded to the
+next multiple of 8 bits or packed together into bytes depends on the visual.
+
+Padding at end of lines may be present and is then reported through the fixed
+screen information line_length field.
+
+- FB_TYPE_PLANES
+
+Macropixels are split across multiple planes. The number of planes is equal to
+the number of bits per macropixel, with plane i'th storing i'th bit from all
+macropixels.
+
+Planes are located contiguously in memory.
+
+- FB_TYPE_INTERLEAVED_PLANES
+
+Macropixels are split across multiple planes. The number of planes is equal to
+the number of bits per macropixel, with plane i'th storing i'th bit from all
+macropixels.
+
+Planes are interleaved in memory. The interleave factor, defined as the
+distance in bytes between the beginning of two consecutive interleaved blocks
+belonging to different planes, is stored in the fixed screen information
+type_aux field.
+
+- FB_TYPE_FOURCC
+
+Macropixels are stored in memory as described by the format FOURCC identifier
+stored in the variable screen information grayscale field.
+
+- FB_VISUAL_MONO01
+
+Pixels are black or white and stored on a number of bits (typically one)
+specified by the variable screen information bpp field.
+
+Black pixels are represented by all bits set to 1 and white pixels by all bits
+set to 0. When the number of bits per pixel is smaller than 8, several pixels
+are packed together in a byte.
+
+FB_VISUAL_MONO01 is currently used with FB_TYPE_PACKED_PIXELS only.
+
+- FB_VISUAL_MONO10
+
+Pixels are black or white and stored on a number of bits (typically one)
+specified by the variable screen information bpp field.
+
+Black pixels are represented by all bits set to 0 and white pixels by all bits
+set to 1. When the number of bits per pixel is smaller than 8, several pixels
+are packed together in a byte.
+
+FB_VISUAL_MONO01 is currently used with FB_TYPE_PACKED_PIXELS only.
+
+- FB_VISUAL_TRUECOLOR
+
+Pixels are broken into red, green and blue components, and each component
+indexes a read-only lookup table for the corresponding value. Lookup tables
+are device-dependent, and provide linear or non-linear ramps.
+
+Each component is stored in a macropixel according to the variable screen
+information red, green, blue and transp fields.
+
+- FB_VISUAL_PSEUDOCOLOR and FB_VISUAL_STATIC_PSEUDOCOLOR
+
+Pixel values are encoded as indices into a colormap that stores red, green and
+blue components. The colormap is read-only for FB_VISUAL_STATIC_PSEUDOCOLOR
+and read-write for FB_VISUAL_PSEUDOCOLOR.
+
+Each pixel value is stored in the number of bits reported by the variable
+screen information bits_per_pixel field.
+
+- FB_VISUAL_DIRECTCOLOR
+
+Pixels are broken into red, green and blue components, and each component
+indexes a programmable lookup table for the corresponding value.
+
+Each component is stored in a macropixel according to the variable screen
+information red, green, blue and transp fields.
+
+- FB_VISUAL_FOURCC
+
+Pixels are encoded and interpreted as described by the format FOURCC
+identifier stored in the variable screen information grayscale field.
+
+
+3. Screen information
+---------------------
+
+Screen information are queried by applications using the FBIOGET_FSCREENINFO
+and FBIOGET_VSCREENINFO ioctls. Those ioctls take a pointer to a
+fb_fix_screeninfo and fb_var_screeninfo structure respectively.
+
+struct fb_fix_screeninfo stores device independent unchangeable information
+about the frame buffer device and the current format. Those information can't
+be directly modified by applications, but can be changed by the driver when an
+application modifies the format.
+
+struct fb_fix_screeninfo {
+ char id[16]; /* identification string eg "TT Builtin" */
+ unsigned long smem_start; /* Start of frame buffer mem */
+ /* (physical address) */
+ __u32 smem_len; /* Length of frame buffer mem */
+ __u32 type; /* see FB_TYPE_* */
+ __u32 type_aux; /* Interleave for interleaved Planes */
+ __u32 visual; /* see FB_VISUAL_* */
+ __u16 xpanstep; /* zero if no hardware panning */
+ __u16 ypanstep; /* zero if no hardware panning */
+ __u16 ywrapstep; /* zero if no hardware ywrap */
+ __u32 line_length; /* length of a line in bytes */
+ unsigned long mmio_start; /* Start of Memory Mapped I/O */
+ /* (physical address) */
+ __u32 mmio_len; /* Length of Memory Mapped I/O */
+ __u32 accel; /* Indicate to driver which */
+ /* specific chip/card we have */
+ __u16 capabilities; /* see FB_CAP_* */
+ __u16 reserved[2]; /* Reserved for future compatibility */
+};
+
+struct fb_var_screeninfo stores device independent changeable information
+about a frame buffer device, its current format and video mode, as well as
+other miscellaneous parameters.
+
+struct fb_var_screeninfo {
+ __u32 xres; /* visible resolution */
+ __u32 yres;
+ __u32 xres_virtual; /* virtual resolution */
+ __u32 yres_virtual;
+ __u32 xoffset; /* offset from virtual to visible */
+ __u32 yoffset; /* resolution */
+
+ __u32 bits_per_pixel; /* guess what */
+ __u32 grayscale; /* 0 = color, 1 = grayscale, */
+ /* >1 = FOURCC */
+ struct fb_bitfield red; /* bitfield in fb mem if true color, */
+ struct fb_bitfield green; /* else only length is significant */
+ struct fb_bitfield blue;
+ struct fb_bitfield transp; /* transparency */
+
+ __u32 nonstd; /* != 0 Non standard pixel format */
+
+ __u32 activate; /* see FB_ACTIVATE_* */
+
+ __u32 height; /* height of picture in mm */
+ __u32 width; /* width of picture in mm */
+
+ __u32 accel_flags; /* (OBSOLETE) see fb_info.flags */
+
+ /* Timing: All values in pixclocks, except pixclock (of course) */
+ __u32 pixclock; /* pixel clock in ps (pico seconds) */
+ __u32 left_margin; /* time from sync to picture */
+ __u32 right_margin; /* time from picture to sync */
+ __u32 upper_margin; /* time from sync to picture */
+ __u32 lower_margin;
+ __u32 hsync_len; /* length of horizontal sync */
+ __u32 vsync_len; /* length of vertical sync */
+ __u32 sync; /* see FB_SYNC_* */
+ __u32 vmode; /* see FB_VMODE_* */
+ __u32 rotate; /* angle we rotate counter clockwise */
+ __u32 colorspace; /* colorspace for FOURCC-based modes */
+ __u32 reserved[4]; /* Reserved for future compatibility */
+};
+
+To modify variable information, applications call the FBIOPUT_VSCREENINFO
+ioctl with a pointer to a fb_var_screeninfo structure. If the call is
+successful, the driver will update the fixed screen information accordingly.
+
+Instead of filling the complete fb_var_screeninfo structure manually,
+applications should call the FBIOGET_VSCREENINFO ioctl and modify only the
+fields they care about.
+
+
+4. Format configuration
+-----------------------
+
+Frame buffer devices offer two ways to configure the frame buffer format: the
+legacy API and the FOURCC-based API.
+
+
+The legacy API has been the only frame buffer format configuration API for a
+long time and is thus widely used by application. It is the recommended API
+for applications when using RGB and grayscale formats, as well as legacy
+non-standard formats.
+
+To select a format, applications set the fb_var_screeninfo bits_per_pixel field
+to the desired frame buffer depth. Values up to 8 will usually map to
+monochrome, grayscale or pseudocolor visuals, although this is not required.
+
+- For grayscale formats, applications set the grayscale field to one. The red,
+ blue, green and transp fields must be set to 0 by applications and ignored by
+ drivers. Drivers must fill the red, blue and green offsets to 0 and lengths
+ to the bits_per_pixel value.
+
+- For pseudocolor formats, applications set the grayscale field to zero. The
+ red, blue, green and transp fields must be set to 0 by applications and
+ ignored by drivers. Drivers must fill the red, blue and green offsets to 0
+ and lengths to the bits_per_pixel value.
+
+- For truecolor and directcolor formats, applications set the grayscale field
+ to zero, and the red, blue, green and transp fields to describe the layout of
+ color components in memory.
+
+struct fb_bitfield {
+ __u32 offset; /* beginning of bitfield */
+ __u32 length; /* length of bitfield */
+ __u32 msb_right; /* != 0 : Most significant bit is */
+ /* right */
+};
+
+ Pixel values are bits_per_pixel wide and are split in non-overlapping red,
+ green, blue and alpha (transparency) components. Location and size of each
+ component in the pixel value are described by the fb_bitfield offset and
+ length fields. Offset are computed from the right.
+
+ Pixels are always stored in an integer number of bytes. If the number of
+ bits per pixel is not a multiple of 8, pixel values are padded to the next
+ multiple of 8 bits.
+
+Upon successful format configuration, drivers update the fb_fix_screeninfo
+type, visual and line_length fields depending on the selected format.
+
+
+The FOURCC-based API replaces format descriptions by four character codes
+(FOURCC). FOURCCs are abstract identifiers that uniquely define a format
+without explicitly describing it. This is the only API that supports YUV
+formats. Drivers are also encouraged to implement the FOURCC-based API for RGB
+and grayscale formats.
+
+Drivers that support the FOURCC-based API report this capability by setting
+the FB_CAP_FOURCC bit in the fb_fix_screeninfo capabilities field.
+
+FOURCC definitions are located in the linux/videodev2.h header. However, and
+despite starting with the V4L2_PIX_FMT_prefix, they are not restricted to V4L2
+and don't require usage of the V4L2 subsystem. FOURCC documentation is
+available in Documentation/DocBook/v4l/pixfmt.xml.
+
+To select a format, applications set the grayscale field to the desired FOURCC.
+For YUV formats, they should also select the appropriate colorspace by setting
+the colorspace field to one of the colorspaces listed in linux/videodev2.h and
+documented in Documentation/DocBook/v4l/colorspaces.xml.
+
+The red, green, blue and transp fields are not used with the FOURCC-based API.
+For forward compatibility reasons applications must zero those fields, and
+drivers must ignore them. Values other than 0 may get a meaning in future
+extensions.
+
+Upon successful format configuration, drivers update the fb_fix_screeninfo
+type, visual and line_length fields depending on the selected format. The type
+and visual fields are set to FB_TYPE_FOURCC and FB_VISUAL_FOURCC respectively.
CPU-intensive style benchmark, and it can vary highly in
a microbenchmark depending on workload and compiler.
- 1: only for 32-bit processes
- 2: only for 64-bit processes
+ 32: only for 32-bit processes
+ 64: only for 64-bit processes
on: enable for both 32- and 64-bit processes
off: disable for both 32- and 64-bit processes
- amd_iommu= [HW,X86-84]
+ amd_iommu= [HW,X86-64]
Pass parameters to the AMD IOMMU driver in the system.
Possible values are:
fullflush - enable flushing of IO/TLB entries when
Default: 0 (off)
tcp_max_syn_backlog - INTEGER
- Maximal number of remembered connection requests, which are
- still did not receive an acknowledgment from connecting client.
- Default value is 1024 for systems with more than 128Mb of memory,
- and 128 for low memory machines. If server suffers of overload,
- try to increase this number.
+ Maximal number of remembered connection requests, which have not
+ received an acknowledgment from connecting client.
+ The minimal value is 128 for low memory machines, and it will
+ increase in proportion to the memory of machine.
+ If server suffers from overload, try increasing this number.
tcp_max_tw_buckets - INTEGER
Maximal number of timewait sockets held by system simultaneously.
The machine DAI configuration glues all the codec and CPU DAIs together. It can
also be used to set up the DAI system clock and for any machine related DAI
initialisation e.g. the machine audio map can be connected to the codec audio
-map, unconnected codec pins can be set as such. Please see corgi.c, spitz.c
-for examples.
+map, unconnected codec pins can be set as such.
struct snd_soc_dai_link is used to set up each DAI in your machine. e.g.
The machine driver can optionally extend the codec power map and to become an
audio power map of the audio subsystem. This allows for automatic power up/down
of speaker/HP amplifiers, etc. Codec pins can be connected to the machines jack
-sockets in the machine init function. See soc/pxa/spitz.c and dapm.txt for
-details.
+sockets in the machine init function.
Machine Controls
[SourceDisksFiles]
[SourceDisksNames]
[DeviceList]
-%DESCRIPTION%=DriverInstall, USB\VID_0525&PID_A4A7, USB\VID_1D6B&PID_0104&MI_02
+%DESCRIPTION%=DriverInstall, USB\VID_0525&PID_A4A7, USB\VID_1D6B&PID_0104&MI_02, USB\VID_1D6B&PID_0106&MI_00
[DeviceList.NTamd64]
-%DESCRIPTION%=DriverInstall, USB\VID_0525&PID_A4A7, USB\VID_1D6B&PID_0104&MI_02
+%DESCRIPTION%=DriverInstall, USB\VID_0525&PID_A4A7, USB\VID_1D6B&PID_0104&MI_02, USB\VID_1D6B&PID_0106&MI_00
;------------------------------------------------------------------------------
L: iommu@lists.linux-foundation.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/joro/linux-2.6-iommu.git
S: Supported
-F: arch/x86/kernel/amd_iommu*.c
-F: arch/x86/include/asm/amd_iommu*.h
+F: drivers/iommu/amd_iommu*.[ch]
+F: include/linux/amd-iommu.h
AMD MICROCODE UPDATE SUPPORT
M: Andreas Herrmann <andreas.herrmann3@amd.com>
M: Ben Dooks <ben-linux@fluff.org>
M: Kukjin Kim <kgene.kim@samsung.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
W: http://www.fluff.org/ben/linux/
S: Maintained
F: arch/arm/plat-samsung/
F: arch/arm/plat-s3c24xx/
F: arch/arm/plat-s5p/
+F: arch/arm/mach-s3c24*/
+F: arch/arm/mach-s3c64xx/
F: drivers/*/*s3c2410*
F: drivers/*/*/*s3c2410*
-
-ARM/S3C2410 ARM ARCHITECTURE
-M: Ben Dooks <ben-linux@fluff.org>
-L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W: http://www.fluff.org/ben/linux/
-S: Maintained
-F: arch/arm/mach-s3c2410/
-
-ARM/S3C244x ARM ARCHITECTURE
-M: Ben Dooks <ben-linux@fluff.org>
-L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W: http://www.fluff.org/ben/linux/
-S: Maintained
-F: arch/arm/mach-s3c2440/
-F: arch/arm/mach-s3c2443/
-
-ARM/S3C64xx ARM ARCHITECTURE
-M: Ben Dooks <ben-linux@fluff.org>
-L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W: http://www.fluff.org/ben/linux/
-S: Maintained
-F: arch/arm/mach-s3c64xx/
+F: drivers/spi/spi-s3c*
+F: sound/soc/samsung/*
ARM/S5P EXYNOS ARM ARCHITECTURES
M: Kukjin Kim <kgene.kim@samsung.com>
F: mm/
MEMORY RESOURCE CONTROLLER
+M: Johannes Weiner <hannes@cmpxchg.org>
+M: Michal Hocko <mhocko@suse.cz>
M: Balbir Singh <bsingharora@gmail.com>
-M: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
M: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
L: cgroups@vger.kernel.org
L: linux-mm@kvack.org
S: Supported
F: sound/soc/samsung
+SAMSUNG FRAMEBUFFER DRIVER
+M: Jingoo Han <jg1.han@samsung.com>
+L: linux-fbdev@vger.kernel.org
+S: Maintained
+F: drivers/video/s3c-fb.c
+
SERIAL DRIVERS
M: Alan Cox <alan@linux.intel.com>
L: linux-serial@vger.kernel.org
VERSION = 3
PATCHLEVEL = 2
SUBLEVEL = 0
-EXTRAVERSION = -rc4
+EXTRAVERSION = -rc6
NAME = Saber-toothed Squirrel
# *DOCUMENTATION*
be avoided when possible.
config PHYS_OFFSET
- hex "Physical address of main memory"
+ hex "Physical address of main memory" if MMU
depends on !ARM_PATCH_PHYS_VIRT && !NEED_MACH_MEMORY_H
+ default DRAM_BASE if !MMU
help
Please provide the physical address corresponding to the
location of main memory in your system.
CONFIG_MACH_NOKIA770=y
CONFIG_MACH_AMS_DELTA=y
CONFIG_MACH_OMAP_GENERIC=y
-CONFIG_OMAP_ARM_216MHZ=y
-CONFIG_OMAP_ARM_195MHZ=y
-CONFIG_OMAP_ARM_192MHZ=y
CONFIG_OMAP_ARM_182MHZ=y
-CONFIG_OMAP_ARM_168MHZ=y
-# CONFIG_OMAP_ARM_60MHZ is not set
# CONFIG_ARM_THUMB is not set
CONFIG_PCCARD=y
CONFIG_OMAP_CF=y
};
struct unwind_idx {
- unsigned long addr;
+ unsigned long addr_offset;
unsigned long insn;
};
struct unwind_table {
struct list_head list;
- struct unwind_idx *start;
- struct unwind_idx *stop;
+ const struct unwind_idx *start;
+ const struct unwind_idx *origin;
+ const struct unwind_idx *stop;
unsigned long begin_addr;
unsigned long end_addr;
};
extern void unwind_table_del(struct unwind_table *tab);
extern void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk);
-#ifdef CONFIG_ARM_UNWIND
-extern int __init unwind_init(void);
-#else
-static inline int __init unwind_init(void)
-{
- return 0;
-}
-#endif
-
#endif /* !__ASSEMBLY__ */
#ifdef CONFIG_ARM_UNWIND
fake_pmu.used_mask = fake_used_mask;
if (!validate_event(&fake_pmu, leader))
- return -ENOSPC;
+ return -EINVAL;
list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
if (!validate_event(&fake_pmu, sibling))
- return -ENOSPC;
+ return -EINVAL;
}
if (!validate_event(&fake_pmu, event))
- return -ENOSPC;
+ return -EINVAL;
return 0;
}
static int __devinit armpmu_device_probe(struct platform_device *pdev)
{
+ if (!cpu_pmu)
+ return -ENODEV;
+
cpu_pmu->plat_device = pdev;
return 0;
}
{
struct machine_desc *mdesc;
- unwind_init();
-
setup_processor();
mdesc = setup_machine_fdt(__atags_pointer);
if (!mdesc)
machine_desc = mdesc;
machine_name = mdesc->name;
+#ifdef CONFIG_ZONE_DMA
+ if (mdesc->dma_zone_size) {
+ extern unsigned long arm_dma_zone_size;
+ arm_dma_zone_size = mdesc->dma_zone_size;
+ }
+#endif
if (mdesc->soft_reboot)
reboot_setup("s");
tcm_init();
-#ifdef CONFIG_ZONE_DMA
- if (mdesc->dma_zone_size) {
- extern unsigned long arm_dma_zone_size;
- arm_dma_zone_size = mdesc->dma_zone_size;
- }
-#endif
#ifdef CONFIG_MULTI_IRQ_HANDLER
handle_arch_irq = mdesc->handle_irq;
#endif
struct unwind_ctrl_block {
unsigned long vrs[16]; /* virtual register set */
- unsigned long *insn; /* pointer to the current instructions word */
+ const unsigned long *insn; /* pointer to the current instructions word */
int entries; /* number of entries left to interpret */
int byte; /* current byte number in the instructions word */
};
PC = 15
};
-extern struct unwind_idx __start_unwind_idx[];
-extern struct unwind_idx __stop_unwind_idx[];
+extern const struct unwind_idx __start_unwind_idx[];
+static const struct unwind_idx *__origin_unwind_idx;
+extern const struct unwind_idx __stop_unwind_idx[];
static DEFINE_SPINLOCK(unwind_lock);
static LIST_HEAD(unwind_tables);
})
/*
- * Binary search in the unwind index. The entries entries are
+ * Binary search in the unwind index. The entries are
* guaranteed to be sorted in ascending order by the linker.
+ *
+ * start = first entry
+ * origin = first entry with positive offset (or stop if there is no such entry)
+ * stop - 1 = last entry
*/
-static struct unwind_idx *search_index(unsigned long addr,
- struct unwind_idx *first,
- struct unwind_idx *last)
+static const struct unwind_idx *search_index(unsigned long addr,
+ const struct unwind_idx *start,
+ const struct unwind_idx *origin,
+ const struct unwind_idx *stop)
{
- pr_debug("%s(%08lx, %p, %p)\n", __func__, addr, first, last);
+ unsigned long addr_prel31;
+
+ pr_debug("%s(%08lx, %p, %p, %p)\n",
+ __func__, addr, start, origin, stop);
+
+ /*
+ * only search in the section with the matching sign. This way the
+ * prel31 numbers can be compared as unsigned longs.
+ */
+ if (addr < (unsigned long)start)
+ /* negative offsets: [start; origin) */
+ stop = origin;
+ else
+ /* positive offsets: [origin; stop) */
+ start = origin;
+
+ /* prel31 for address relavive to start */
+ addr_prel31 = (addr - (unsigned long)start) & 0x7fffffff;
- if (addr < first->addr) {
+ while (start < stop - 1) {
+ const struct unwind_idx *mid = start + ((stop - start) >> 1);
+
+ /*
+ * As addr_prel31 is relative to start an offset is needed to
+ * make it relative to mid.
+ */
+ if (addr_prel31 - ((unsigned long)mid - (unsigned long)start) <
+ mid->addr_offset)
+ stop = mid;
+ else {
+ /* keep addr_prel31 relative to start */
+ addr_prel31 -= ((unsigned long)mid -
+ (unsigned long)start);
+ start = mid;
+ }
+ }
+
+ if (likely(start->addr_offset <= addr_prel31))
+ return start;
+ else {
pr_warning("unwind: Unknown symbol address %08lx\n", addr);
return NULL;
- } else if (addr >= last->addr)
- return last;
+ }
+}
- while (first < last - 1) {
- struct unwind_idx *mid = first + ((last - first + 1) >> 1);
+static const struct unwind_idx *unwind_find_origin(
+ const struct unwind_idx *start, const struct unwind_idx *stop)
+{
+ pr_debug("%s(%p, %p)\n", __func__, start, stop);
+ while (start < stop) {
+ const struct unwind_idx *mid = start + ((stop - start) >> 1);
- if (addr < mid->addr)
- last = mid;
+ if (mid->addr_offset >= 0x40000000)
+ /* negative offset */
+ start = mid + 1;
else
- first = mid;
+ /* positive offset */
+ stop = mid;
}
-
- return first;
+ pr_debug("%s -> %p\n", __func__, stop);
+ return stop;
}
-static struct unwind_idx *unwind_find_idx(unsigned long addr)
+static const struct unwind_idx *unwind_find_idx(unsigned long addr)
{
- struct unwind_idx *idx = NULL;
+ const struct unwind_idx *idx = NULL;
unsigned long flags;
pr_debug("%s(%08lx)\n", __func__, addr);
- if (core_kernel_text(addr))
+ if (core_kernel_text(addr)) {
+ if (unlikely(!__origin_unwind_idx))
+ __origin_unwind_idx =
+ unwind_find_origin(__start_unwind_idx,
+ __stop_unwind_idx);
+
/* main unwind table */
idx = search_index(addr, __start_unwind_idx,
- __stop_unwind_idx - 1);
- else {
+ __origin_unwind_idx,
+ __stop_unwind_idx);
+ } else {
/* module unwind tables */
struct unwind_table *table;
if (addr >= table->begin_addr &&
addr < table->end_addr) {
idx = search_index(addr, table->start,
- table->stop - 1);
+ table->origin,
+ table->stop);
/* Move-to-front to exploit common traces */
list_move(&table->list, &unwind_tables);
break;
int unwind_frame(struct stackframe *frame)
{
unsigned long high, low;
- struct unwind_idx *idx;
+ const struct unwind_idx *idx;
struct unwind_ctrl_block ctrl;
/* only go to a higher address on the stack */
unsigned long text_size)
{
unsigned long flags;
- struct unwind_idx *idx;
struct unwind_table *tab = kmalloc(sizeof(*tab), GFP_KERNEL);
pr_debug("%s(%08lx, %08lx, %08lx, %08lx)\n", __func__, start, size,
if (!tab)
return tab;
- tab->start = (struct unwind_idx *)start;
- tab->stop = (struct unwind_idx *)(start + size);
+ tab->start = (const struct unwind_idx *)start;
+ tab->stop = (const struct unwind_idx *)(start + size);
+ tab->origin = unwind_find_origin(tab->start, tab->stop);
tab->begin_addr = text_addr;
tab->end_addr = text_addr + text_size;
- /* Convert the symbol addresses to absolute values */
- for (idx = tab->start; idx < tab->stop; idx++)
- idx->addr = prel31_to_addr(&idx->addr);
-
spin_lock_irqsave(&unwind_lock, flags);
list_add_tail(&tab->list, &unwind_tables);
spin_unlock_irqrestore(&unwind_lock, flags);
kfree(tab);
}
-
-int __init unwind_init(void)
-{
- struct unwind_idx *idx;
-
- /* Convert the symbol addresses to absolute values */
- for (idx = __start_unwind_idx; idx < __stop_unwind_idx; idx++)
- idx->addr = prel31_to_addr(&idx->addr);
-
- pr_debug("unwind: ARM stack unwinding initialised\n");
-
- return 0;
-}
* USB Device (Gadget)
* -------------------------------------------------------------------- */
-#ifdef CONFIG_USB_GADGET_AT91
+#ifdef CONFIG_USB_AT91
static struct at91_udc_data udc_data;
static struct resource udc_resources[] = {
CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.0", &tc0_clk),
CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.0", &tc1_clk),
CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.0", &tc2_clk),
- CLKDEV_CON_DEV_ID("t3_clk", "atmel_tcb.1", &tc3_clk),
- CLKDEV_CON_DEV_ID("t4_clk", "atmel_tcb.1", &tc4_clk),
- CLKDEV_CON_DEV_ID("t5_clk", "atmel_tcb.1", &tc5_clk),
+ CLKDEV_CON_DEV_ID("t0_clk", "atmel_tcb.1", &tc3_clk),
+ CLKDEV_CON_DEV_ID("t1_clk", "atmel_tcb.1", &tc4_clk),
+ CLKDEV_CON_DEV_ID("t2_clk", "atmel_tcb.1", &tc5_clk),
CLKDEV_CON_DEV_ID("pclk", "ssc.0", &ssc_clk),
/* more usart lookup table for DT entries */
CLKDEV_CON_DEV_ID("usart", "fffff200.serial", &mck),
* USB Device (Gadget)
* -------------------------------------------------------------------- */
-#ifdef CONFIG_USB_GADGET_AT91
+#ifdef CONFIG_USB_AT91
static struct at91_udc_data udc_data;
static struct resource udc_resources[] = {
* USB Device (Gadget)
* -------------------------------------------------------------------- */
-#ifdef CONFIG_USB_GADGET_AT91
+#ifdef CONFIG_USB_AT91
static struct at91_udc_data udc_data;
static struct resource udc_resources[] = {
* USB Device (Gadget)
* -------------------------------------------------------------------- */
-#ifdef CONFIG_USB_GADGET_AT91
+#ifdef CONFIG_USB_AT91
static struct at91_udc_data udc_data;
static struct resource udc_resources[] = {
.xres = 240, .yres = 320,
.pixclock = KHZ2PICOS(4965),
- .left_margin = 1, .right_margin = 33,
+ .left_margin = 1, .right_margin = 34,
.upper_margin = 1, .lower_margin = 0,
.hsync_len = 5, .vsync_len = 1,
.xres = 240, .yres = 320,
.pixclock = KHZ2PICOS(5000),
- .left_margin = 1, .right_margin = 33,
+ .left_margin = 1, .right_margin = 34,
.upper_margin = 1, .lower_margin = 0,
.hsync_len = 5, .vsync_len = 1,
.xres = 320, .yres = 240,
.pixclock = KHZ2PICOS(1440),
- .left_margin = 1, .right_margin = 1,
+ .left_margin = 1, .right_margin = 2,
.upper_margin = 0, .lower_margin = 0,
.hsync_len = 1, .vsync_len = 1,
.xres = 240, .yres = 320,
.pixclock = KHZ2PICOS(4965),
- .left_margin = 1, .right_margin = 33,
+ .left_margin = 1, .right_margin = 34,
.upper_margin = 1, .lower_margin = 0,
.hsync_len = 5, .vsync_len = 1,
.xres = 240, .yres = 320,
.pixclock = KHZ2PICOS(4965),
- .left_margin = 1, .right_margin = 33,
+ .left_margin = 1, .right_margin = 34,
.upper_margin = 1, .lower_margin = 0,
.hsync_len = 5, .vsync_len = 1,
.xres = 480, .yres = 272,
.pixclock = KHZ2PICOS(9000),
- .left_margin = 1, .right_margin = 1,
+ .left_margin = 1, .right_margin = 2,
.upper_margin = 40, .lower_margin = 1,
.hsync_len = 45, .vsync_len = 1,
.xres = 240, .yres = 320,
.pixclock = KHZ2PICOS(4965),
- .left_margin = 1, .right_margin = 33,
+ .left_margin = 1, .right_margin = 34,
.upper_margin = 1, .lower_margin = 0,
.hsync_len = 5, .vsync_len = 1,
#define BOARD_HAVE_NAND_16BIT (1 << 31)
static inline int board_have_nand_16bit(void)
{
- return system_rev & BOARD_HAVE_NAND_16BIT;
+ return (system_rev & BOARD_HAVE_NAND_16BIT) ? 1 : 0;
}
#endif /* __ARCH_SYSTEM_REV_H__ */
.num_serializer = ARRAY_SIZE(da850_iis_serializer_direction),
.tdm_slots = 2,
.serial_dir = da850_iis_serializer_direction,
- .asp_chan_q = EVENTQ_1,
+ .asp_chan_q = EVENTQ_0,
.version = MCASP_VERSION_2,
.txnumevt = 1,
.rxnumevt = 1,
/* UBL (a few copies) plus U-Boot */
.name = "bootloader",
.offset = 0,
- .size = 28 * NAND_BLOCK_SIZE,
+ .size = 30 * NAND_BLOCK_SIZE,
.mask_flags = MTD_WRITEABLE, /* force read-only */
}, {
/* U-Boot environment */
int val;
u32 value;
- if (!vpif_vsclkdis_reg || !cpld_client)
+ if (!vpif_vidclkctl_reg || !cpld_client)
return -ENXIO;
val = i2c_smbus_read_byte(cpld_client);
return val;
spin_lock_irqsave(&vpif_reg_lock, flags);
- value = __raw_readl(vpif_vsclkdis_reg);
+ value = __raw_readl(vpif_vidclkctl_reg);
if (mux_mode) {
val &= VPIF_INPUT_TWO_CHANNEL;
value |= VIDCH1CLK;
val |= VPIF_INPUT_ONE_CHANNEL;
value &= ~VIDCH1CLK;
}
- __raw_writel(value, vpif_vsclkdis_reg);
+ __raw_writel(value, vpif_vidclkctl_reg);
spin_unlock_irqrestore(&vpif_reg_lock, flags);
err = i2c_smbus_write_byte(cpld_client, val);
.name = "dsp",
.parent = &pll1_sysclk1,
.lpsc = DM646X_LPSC_C64X_CPU,
- .flags = PSC_DSP,
.usecount = 1, /* REVISIT how to disable? */
};
#define PTCMD 0x120
#define PTSTAT 0x128
#define PDSTAT 0x200
-#define PDCTL1 0x304
+#define PDCTL 0x300
#define MDSTAT 0x800
#define MDCTL 0xA00
#define PSC_STATE_ENABLE 3
#define MDSTAT_STATE_MASK 0x3f
+#define PDSTAT_STATE_MASK 0x1f
#define MDCTL_FORCE BIT(31)
+#define PDCTL_NEXT BIT(1)
+#define PDCTL_EPCGOOD BIT(8)
#ifndef __ASSEMBLER__
void davinci_psc_config(unsigned int domain, unsigned int ctlr,
unsigned int id, bool enable, u32 flags)
{
- u32 epcpr, ptcmd, ptstat, pdstat, pdctl1, mdstat, mdctl;
+ u32 epcpr, ptcmd, ptstat, pdstat, pdctl, mdstat, mdctl;
void __iomem *psc_base;
struct davinci_soc_info *soc_info = &davinci_soc_info;
u32 next_state = PSC_STATE_ENABLE;
mdctl |= MDCTL_FORCE;
__raw_writel(mdctl, psc_base + MDCTL + 4 * id);
- pdstat = __raw_readl(psc_base + PDSTAT);
- if ((pdstat & 0x00000001) == 0) {
- pdctl1 = __raw_readl(psc_base + PDCTL1);
- pdctl1 |= 0x1;
- __raw_writel(pdctl1, psc_base + PDCTL1);
+ pdstat = __raw_readl(psc_base + PDSTAT + 4 * domain);
+ if ((pdstat & PDSTAT_STATE_MASK) == 0) {
+ pdctl = __raw_readl(psc_base + PDCTL + 4 * domain);
+ pdctl |= PDCTL_NEXT;
+ __raw_writel(pdctl, psc_base + PDCTL + 4 * domain);
ptcmd = 1 << domain;
__raw_writel(ptcmd, psc_base + PTCMD);
epcpr = __raw_readl(psc_base + EPCPR);
} while ((((epcpr >> domain) & 1) == 0));
- pdctl1 = __raw_readl(psc_base + PDCTL1);
- pdctl1 |= 0x100;
- __raw_writel(pdctl1, psc_base + PDCTL1);
+ pdctl = __raw_readl(psc_base + PDCTL + 4 * domain);
+ pdctl |= PDCTL_EPCGOOD;
+ __raw_writel(pdctl, psc_base + PDCTL + 4 * domain);
} else {
ptcmd = 1 << domain;
__raw_writel(ptcmd, psc_base + PTCMD);
char name[10];
};
-static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick);
-
static void exynos4_mct_write(unsigned int value, void *addr)
{
void __iomem *stat_addr;
}
#ifdef CONFIG_LOCAL_TIMERS
+
+static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick);
+
/* Clock event handling */
static void exynos4_mct_tick_stop(struct mct_clock_event_device *mevt)
{
void local_timer_stop(struct clock_event_device *evt)
{
+ unsigned int cpu = smp_processor_id();
evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
if (mct_int_type == MCT_INT_SPI)
- disable_irq(evt->irq);
+ if (cpu == 0)
+ remove_irq(evt->irq, &mct_tick0_event_irq);
+ else
+ remove_irq(evt->irq, &mct_tick1_event_irq);
else
disable_percpu_irq(IRQ_MCT_LOCALTIMER);
}
clk_rate = clk_get_rate(mct_clk);
+#ifdef CONFIG_LOCAL_TIMERS
if (mct_int_type == MCT_INT_PPI) {
int err;
WARN(err, "MCT: can't request IRQ %d (%d)\n",
IRQ_MCT_LOCALTIMER, err);
}
+#endif /* CONFIG_LOCAL_TIMERS */
}
static void __init exynos4_timer_init(void)
imx6q_clock_map_io();
}
-static void __init imx6q_gpio_add_irq_domain(struct device_node *np,
+static int __init imx6q_gpio_add_irq_domain(struct device_node *np,
struct device_node *interrupt_parent)
{
- static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS -
- 32 * 7; /* imx6q gets 7 gpio ports */
+ static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS;
+ gpio_irq_base -= 32;
irq_domain_add_simple(np, gpio_irq_base);
- gpio_irq_base += 32;
+
+ return 0;
}
static const struct of_device_id imx6q_irq_match[] __initconst = {
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <linux/bootmem.h>
+#include <linux/module.h>
#include <mach/irqs.h>
#include <mach/iommu.h>
{
iomux_v3_cfg_t usbh1stp = MX51_PAD_USBH1_STP__USBH1_STP;
iomux_v3_cfg_t power_key = NEW_PAD_CTRL(MX51_PAD_EIM_A27__GPIO2_21,
- PAD_CTL_SRE_FAST | PAD_CTL_DSE_HIGH | PAD_CTL_PUS_100K_UP);
+ PAD_CTL_SRE_FAST | PAD_CTL_DSE_HIGH);
imx51_soc_init();
gpio_set_value(MX53_EVK_FEC_PHY_RST, 1);
}
-static struct fec_platform_data mx53_evk_fec_pdata = {
+static const struct fec_platform_data mx53_evk_fec_pdata __initconst = {
.phy = PHY_INTERFACE_MODE_RMII,
};
gpio_set_value(LOCO_FEC_PHY_RST, 1);
}
-static struct fec_platform_data mx53_loco_fec_data = {
+static const struct fec_platform_data mx53_loco_fec_data __initconst = {
.phy = PHY_INTERFACE_MODE_RMII,
};
gpio_set_value(SMD_FEC_PHY_RST, 1);
}
-static struct fec_platform_data mx53_smd_fec_data = {
+static const struct fec_platform_data mx53_smd_fec_data __initconst = {
.phy = PHY_INTERFACE_MODE_RMII,
};
{ /* sentinel */ }
};
-static void __init imx51_tzic_add_irq_domain(struct device_node *np,
+static int __init imx51_tzic_add_irq_domain(struct device_node *np,
struct device_node *interrupt_parent)
{
irq_domain_add_simple(np, 0);
+ return 0;
}
-static void __init imx51_gpio_add_irq_domain(struct device_node *np,
+static int __init imx51_gpio_add_irq_domain(struct device_node *np,
struct device_node *interrupt_parent)
{
- static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS -
- 32 * 4; /* imx51 gets 4 gpio ports */
+ static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS;
+ gpio_irq_base -= 32;
irq_domain_add_simple(np, gpio_irq_base);
- gpio_irq_base += 32;
+
+ return 0;
}
static const struct of_device_id imx51_irq_match[] __initconst = {
{ /* sentinel */ }
};
-static void __init imx53_tzic_add_irq_domain(struct device_node *np,
+static int __init imx53_tzic_add_irq_domain(struct device_node *np,
struct device_node *interrupt_parent)
{
irq_domain_add_simple(np, 0);
+ return 0;
}
-static void __init imx53_gpio_add_irq_domain(struct device_node *np,
+static int __init imx53_gpio_add_irq_domain(struct device_node *np,
struct device_node *interrupt_parent)
{
- static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS -
- 32 * 7; /* imx53 gets 7 gpio ports */
+ static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS;
+ gpio_irq_base -= 32;
irq_domain_add_simple(np, gpio_irq_base);
- gpio_irq_base += 32;
+
+ return 0;
}
static const struct of_device_id imx53_irq_match[] __initconst = {
#define MX28_INT_CAN1 9
#define MX28_INT_LRADC_TOUCH 10
#define MX28_INT_HSADC 13
-#define MX28_INT_IRADC_THRESH0 14
-#define MX28_INT_IRADC_THRESH1 15
+#define MX28_INT_LRADC_THRESH0 14
+#define MX28_INT_LRADC_THRESH1 15
#define MX28_INT_LRADC_CH0 16
#define MX28_INT_LRADC_CH1 17
#define MX28_INT_LRADC_CH2 18
*/
#define cpu_is_mx23() ( \
machine_is_mx23evk() || \
+ machine_is_stmp378x() || \
0)
#define cpu_is_mx28() ( \
machine_is_mx28evk() || \
MACHINE_START(M28EVK, "DENX M28 EVK")
.map_io = mx28_map_io,
.init_irq = mx28_init_irq,
- .init_machine = m28evk_init,
.timer = &m28evk_timer,
+ .init_machine = m28evk_init,
MACHINE_END
MACHINE_START(STMP378X, "STMP378X")
.map_io = mx23_map_io,
.init_irq = mx23_init_irq,
- .init_machine = stmp378x_dvb_init,
.timer = &stmp378x_dvb_timer,
+ .init_machine = stmp378x_dvb_init,
MACHINE_END
MX28_PAD_ENET0_CRS__ENET1_RX_EN,
};
-static struct fec_platform_data tx28_fec0_data = {
+static const struct fec_platform_data tx28_fec0_data __initconst = {
.phy = PHY_INTERFACE_MODE_RMII,
};
-static struct fec_platform_data tx28_fec1_data = {
+static const struct fec_platform_data tx28_fec1_data __initconst = {
.phy = PHY_INTERFACE_MODE_RMII,
};
#include <linux/kernel.h>
#include <linux/clk.h>
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
#include <linux/io.h>
#include <asm/mach-types.h> /* for machine_is_* */
void __init omap1_clk_late_init(void)
{
- if (ck_dpll1.rate >= OMAP1_DPLL1_SANE_VALUE)
+ unsigned long rate = ck_dpll1.rate;
+
+ if (rate >= OMAP1_DPLL1_SANE_VALUE)
return;
+ /* System booting at unusable rate, force reprogramming of DPLL1 */
+ ck_dpll1_p->rate = 0;
+
/* Find the highest supported frequency and enable it */
if (omap1_select_table_rate(&virtual_ck_mpu, ~0)) {
pr_err("System frequencies not set, using default. Check your config.\n");
omap_writew(0x2290, DPLL_CTL);
- omap_writew(cpu_is_omap7xx() ? 0x3005 : 0x1005, ARM_CKCTL);
+ omap_writew(cpu_is_omap7xx() ? 0x2005 : 0x0005, ARM_CKCTL);
ck_dpll1.rate = OMAP1_DPLL1_SANE_VALUE;
}
propagate_rate(&ck_dpll1);
omap1_show_rates();
+ loops_per_jiffy = cpufreq_scale(loops_per_jiffy, rate, ck_dpll1.rate);
}
static void __init rx51_charger_init(void)
{
WARN_ON(gpio_request_one(RX51_USB_TRANSCEIVER_RST_GPIO,
- GPIOF_OUT_INIT_LOW, "isp1704_reset"));
+ GPIOF_OUT_INIT_HIGH, "isp1704_reset"));
platform_device_register(&rx51_charger_device);
}
pdata->reg_size = 4;
pdata->has_ccr = true;
}
+ pdata->set_clk_src = omap2_mcbsp_set_clk_src;
+ if (id == 1)
+ pdata->mux_signal = omap2_mcbsp1_mux_rx_clk;
if (oh->class->rev == MCBSP_CONFIG_TYPE3) {
if (id == 2)
name, oh->name);
return PTR_ERR(pdev);
}
- pdata->set_clk_src = omap2_mcbsp_set_clk_src;
- if (id == 1)
- pdata->mux_signal = omap2_mcbsp1_mux_rx_clk;
omap_mcbsp_count++;
return 0;
}
#include <linux/kernel.h>
#include <linux/suspend.h>
#include <linux/slab.h>
+#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <asm/sizes.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <linux/of.h>
#include <linux/kernel.h>
#include <linux/string.h>
+#include <linux/export.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/gpio.h>
s3c64xx_init_irq(~0 & ~(0xf << 5), ~0);
}
-struct sysdev_class s3c6400_sysclass = {
+static struct sysdev_class s3c6400_sysclass = {
.name = "s3c6400-core",
};
#include <plat/fb.h>
#include <plat/gpio-cfg.h>
-extern void s3c64xx_fb_gpio_setup_24bpp(void)
+void s3c64xx_fb_gpio_setup_24bpp(void)
{
s3c_gpio_cfgrange_nopull(S3C64XX_GPI(0), 16, S3C_GPIO_SFN(2));
s3c_gpio_cfgrange_nopull(S3C64XX_GPJ(0), 12, S3C_GPIO_SFN(2));
static struct platform_pwm_backlight_data smdkv210_bl_data = {
.pwm_id = 3,
+ .pwm_period_ns = 1000,
};
static void __init smdkv210_map_io(void)
-ifeq ($(CONFIG_ARCH_SA1100),y)
- zreladdr-$(CONFIG_SA1111) += 0xc0208000
+ifeq ($(CONFIG_SA1111),y)
+ zreladdr-y += 0xc0208000
else
zreladdr-y += 0xc0008000
endif
.flags = LCDC_FLAGS_DWPOL,
.lcd_size_cfg.width = 44,
.lcd_size_cfg.height = 79,
- .bpp = 16,
+ .fourcc = V4L2_PIX_FMT_RGB565,
.lcd_cfg = lcdc0_modes,
.num_cfg = ARRAY_SIZE(lcdc0_modes),
.board_cfg = {
},
};
+#define DSI0PHYCR 0xe615006c
+static int sh_mipi_set_dot_clock(struct platform_device *pdev,
+ void __iomem *base,
+ int enable)
+{
+ struct clk *pck;
+ int ret;
+
+ pck = clk_get(&pdev->dev, "dsip_clk");
+ if (IS_ERR(pck)) {
+ ret = PTR_ERR(pck);
+ goto sh_mipi_set_dot_clock_pck_err;
+ }
+
+ if (enable) {
+ clk_set_rate(pck, clk_round_rate(pck, 24000000));
+ __raw_writel(0x2a809010, DSI0PHYCR);
+ clk_enable(pck);
+ } else {
+ clk_disable(pck);
+ }
+
+ ret = 0;
+
+ clk_put(pck);
+
+sh_mipi_set_dot_clock_pck_err:
+ return ret;
+}
+
static struct sh_mipi_dsi_info mipidsi0_info = {
.data_format = MIPI_RGB888,
.lcd_chan = &lcdc0_info.ch[0],
+ .lane = 2,
.vsynw_offset = 20,
.clksrc = 1,
- .flags = SH_MIPI_DSI_HSABM,
+ .flags = SH_MIPI_DSI_HSABM |
+ SH_MIPI_DSI_SYNC_PULSES_MODE |
+ SH_MIPI_DSI_HSbyteCLK,
+ .set_dot_clock = sh_mipi_set_dot_clock,
};
static struct platform_device mipidsi0_device = {
shmobile_setup_console();
}
-#define DSI0PHYCR 0xe615006c
-
static void __init ag5evm_init(void)
{
sh73a0_pinmux_init();
gpio_direction_output(GPIO_PORT235, 0);
lcd_backlight_reset();
- /* MIPI-DSI clock setup */
- __raw_writel(0x2a809010, DSI0PHYCR);
-
/* enable SDHI0 on CN15 [SD I/F] */
gpio_request(GPIO_FN_SDHICD0, NULL);
gpio_request(GPIO_FN_SDHIWP0, NULL);
.meram_dev = &meram_info,
.ch[0] = {
.chan = LCDC_CHAN_MAINLCD,
- .bpp = 16,
+ .fourcc = V4L2_PIX_FMT_RGB565,
.lcd_cfg = ap4evb_lcdc_modes,
.num_cfg = ARRAY_SIZE(ap4evb_lcdc_modes),
.meram_cfg = &lcd_meram_cfg,
};
/* MIPI-DSI */
+#define PHYCTRL 0x0070
+static int sh_mipi_set_dot_clock(struct platform_device *pdev,
+ void __iomem *base,
+ int enable)
+{
+ struct clk *pck = clk_get(&pdev->dev, "dsip_clk");
+ void __iomem *phy = base + PHYCTRL;
+
+ if (IS_ERR(pck))
+ return PTR_ERR(pck);
+
+ if (enable) {
+ clk_set_rate(pck, clk_round_rate(pck, 24000000));
+ iowrite32(ioread32(phy) | (0xb << 8), phy);
+ clk_enable(pck);
+ } else {
+ clk_disable(pck);
+ }
+
+ clk_put(pck);
+
+ return 0;
+}
+
static struct resource mipidsi0_resources[] = {
[0] = {
.start = 0xffc60000,
static struct sh_mipi_dsi_info mipidsi0_info = {
.data_format = MIPI_RGB888,
.lcd_chan = &lcdc_info.ch[0],
+ .lane = 2,
.vsynw_offset = 17,
+ .flags = SH_MIPI_DSI_SYNC_PULSES_MODE |
+ SH_MIPI_DSI_HSbyteCLK,
+ .set_dot_clock = sh_mipi_set_dot_clock,
};
static struct platform_device mipidsi0_device = {
.meram_dev = &meram_info,
.ch[0] = {
.chan = LCDC_CHAN_MAINLCD,
- .bpp = 16,
+ .fourcc = V4L2_PIX_FMT_RGB565,
.interface_type = RGB24,
.clock_divider = 1,
.flags = LCDC_FLAGS_DWPOL,
.clock_source = LCDC_CLK_BUS,
.ch[0] = {
.chan = LCDC_CHAN_MAINLCD,
- .bpp = 16,
+ .fourcc = V4L2_PIX_FMT_RGB565,
.lcd_cfg = mackerel_lcdc_modes,
.num_cfg = ARRAY_SIZE(mackerel_lcdc_modes),
.interface_type = RGB24,
.clock_source = LCDC_CLK_EXTERNAL,
.ch[0] = {
.chan = LCDC_CHAN_MAINLCD,
- .bpp = 16,
+ .fourcc = V4L2_PIX_FMT_RGB565,
.interface_type = RGB24,
.clock_divider = 1,
.flags = LCDC_FLAGS_DWPOL,
CLKDEV_CON_ID("hdmi_clk", &div6_reparent_clks[DIV6_HDMI]),
CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSIT]),
CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.1", &div6_clks[DIV6_DSIT]),
- CLKDEV_ICK_ID("dsi0p_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSI0P]),
- CLKDEV_ICK_ID("dsi1p_clk", "sh-mipi-dsi.1", &div6_clks[DIV6_DSI1P]),
+ CLKDEV_ICK_ID("dsip_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSI0P]),
+ CLKDEV_ICK_ID("dsip_clk", "sh-mipi-dsi.1", &div6_clks[DIV6_DSI1P]),
/* MSTP32 clocks */
CLKDEV_DEV_ID("i2c-sh_mobile.2", &mstp_clks[MSTP001]), /* IIC2 */
CLKDEV_CON_ID("sdhi2_clk", &div6_clks[DIV6_SDHI2]),
CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSIT]),
CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.1", &div6_clks[DIV6_DSIT]),
- CLKDEV_ICK_ID("dsi0p_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSI0P]),
- CLKDEV_ICK_ID("dsi1p_clk", "sh-mipi-dsi.1", &div6_clks[DIV6_DSI1P]),
+ CLKDEV_ICK_ID("dsip_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSI0P]),
+ CLKDEV_ICK_ID("dsip_clk", "sh-mipi-dsi.1", &div6_clks[DIV6_DSI1P]),
/* MSTP32 clocks */
CLKDEV_DEV_ID("i2c-sh_mobile.2", &mstp_clks[MSTP001]), /* I2C2 */
* the CPU clock speed on the fly.
*/
+#include <linux/module.h>
#include <linux/cpufreq.h>
#include <linux/clk.h>
#include <linux/err.h>
#define MX3_PWMSAR 0x0C /* PWM Sample Register */
#define MX3_PWMPR 0x10 /* PWM Period Register */
#define MX3_PWMCR_PRESCALER(x) (((x - 1) & 0xFFF) << 4)
+#define MX3_PWMCR_DOZEEN (1 << 24)
+#define MX3_PWMCR_WAITEN (1 << 23)
+#define MX3_PWMCR_DBGEN (1 << 22)
#define MX3_PWMCR_CLKSRC_IPG_HIGH (2 << 16)
#define MX3_PWMCR_CLKSRC_IPG (1 << 16)
#define MX3_PWMCR_EN (1 << 0)
writel(duty_cycles, pwm->mmio_base + MX3_PWMSAR);
writel(period_cycles, pwm->mmio_base + MX3_PWMPR);
- cr = MX3_PWMCR_PRESCALER(prescale) | MX3_PWMCR_EN;
+ cr = MX3_PWMCR_PRESCALER(prescale) |
+ MX3_PWMCR_DOZEEN | MX3_PWMCR_WAITEN |
+ MX3_PWMCR_DBGEN | MX3_PWMCR_EN;
if (cpu_is_mx25())
cr |= MX3_PWMCR_CLKSRC_IPG;
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/pwm_backlight.h>
-#include <linux/slab.h>
#include <plat/devs.h>
#include <plat/gpio-cfg.h>
#define __NR_clock_adjtime 342
#define __NR_syncfs 343
#define __NR_setns 344
+#define __NR_process_vm_readv 345
+#define __NR_process_vm_writev 346
#ifdef __KERNEL__
-#define NR_syscalls 345
+#define NR_syscalls 347
#define __ARCH_WANT_IPC_PARSE_VERSION
#define __ARCH_WANT_OLD_READDIR
.long sys_clock_adjtime
.long sys_syncfs
.long sys_setns
+ .long sys_process_vm_readv /* 345 */
+ .long sys_process_vm_writev
if (!atomic_inc_not_zero(&active_events)) {
if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) {
atomic_dec(&active_events);
- return -ENOSPC;
+ return -EINVAL;
}
mutex_lock(&pmu_reserve_mutex);
memset(&fake_cpuc, 0, sizeof(fake_cpuc));
if (!validate_event(&fake_cpuc, leader))
- return -ENOSPC;
+ return -EINVAL;
list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
if (!validate_event(&fake_cpuc, sibling))
- return -ENOSPC;
+ return -EINVAL;
}
if (!validate_event(&fake_cpuc, event))
- return -ENOSPC;
+ return -EINVAL;
return 0;
}
skey = page_get_storage_key(address);
bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
/* Clear page changed & referenced bit in the storage key */
- if (bits) {
- skey ^= bits;
- page_set_storage_key(address, skey, 1);
- }
+ if (bits & _PAGE_CHANGED)
+ page_set_storage_key(address, skey ^ bits, 1);
+ else if (bits)
+ page_reset_referenced(address);
/* Transfer page changed & referenced bit to guest bits in pgste */
pgste_val(pgste) |= bits << 48; /* RCP_GR_BIT & RCP_GC_BIT */
/* Get host changed & referenced bits from pgste */
((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))))
/* Invalid psw mask. */
return -EINVAL;
- if (addr == (addr_t) &dummy->regs.psw.addr)
- /*
- * The debugger changed the instruction address,
- * reset system call restart, see signal.c:do_signal
- */
- task_thread_info(child)->system_call = 0;
-
*(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
} else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
/* Transfer 31 bit amode bit to psw mask. */
regs->psw.mask = (regs->psw.mask & ~PSW_MASK_BA) |
(__u64)(tmp & PSW32_ADDR_AMODE);
- /*
- * The debugger changed the instruction address,
- * reset system call restart, see signal.c:do_signal
- */
- task_thread_info(child)->system_call = 0;
} else {
/* gpr 0-15 */
*(__u32*)((addr_t) ®s->psw + addr*2 + 4) = tmp;
return 0;
}
+static int s390_last_break_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ return 0;
+}
+
#endif
static int s390_system_call_get(struct task_struct *target,
.size = sizeof(long),
.align = sizeof(long),
.get = s390_last_break_get,
+ .set = s390_last_break_set,
},
#endif
[REGSET_SYSTEM_CALL] = {
return 0;
}
+static int s390_compat_last_break_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ return 0;
+}
+
static const struct user_regset s390_compat_regsets[] = {
[REGSET_GENERAL] = {
.core_note_type = NT_PRSTATUS,
.size = sizeof(long),
.align = sizeof(long),
.get = s390_compat_last_break_get,
+ .set = s390_compat_last_break_set,
},
[REGSET_SYSTEM_CALL] = {
.core_note_type = NT_S390_SYSTEM_CALL,
*msg = "first memory chunk must be at least crashkernel size";
return 0;
}
- if (is_kdump_kernel() && (crash_size == OLDMEM_SIZE))
+ if (OLDMEM_BASE && crash_size == OLDMEM_SIZE)
return OLDMEM_BASE;
for (i = MEMORY_CHUNKS - 1; i >= 0; i--) {
regs->svc_code >> 16);
break;
}
- /* No longer in a system call */
- clear_thread_flag(TIF_SYSCALL);
}
+ /* No longer in a system call */
+ clear_thread_flag(TIF_SYSCALL);
if ((is_compat_task() ?
handle_signal32(signr, &ka, &info, oldset, regs) :
}
/* No handlers present - check for system call restart */
+ clear_thread_flag(TIF_SYSCALL);
if (current_thread_info()->system_call) {
regs->svc_code = current_thread_info()->system_call;
switch (regs->gprs[2]) {
regs->gprs[2] = regs->orig_gpr2;
set_thread_flag(TIF_SYSCALL);
break;
- default:
- clear_thread_flag(TIF_SYSCALL);
- break;
}
}
.clock_source = LCDC_CLK_EXTERNAL,
.ch[0] = {
.chan = LCDC_CHAN_MAINLCD,
- .bpp = 16,
+ .fourcc = V4L2_PIX_FMT_RGB565,
.interface_type = RGB18,
.clock_divider = 1,
.lcd_cfg = ap325rxa_lcdc_modes,
.ch[0] = {
.interface_type = RGB18,
.chan = LCDC_CHAN_MAINLCD,
- .bpp = 16,
+ .fourcc = V4L2_PIX_FMT_RGB565,
.lcd_size_cfg = { /* 7.0 inch */
.width = 152,
.height = 91,
.clock_source = LCDC_CLK_BUS,
.ch[0] = {
.chan = LCDC_CHAN_MAINLCD,
- .bpp = 16,
+ .fourcc = V4L2_PIX_FMT_RGB565,
.interface_type = SYS18,
.clock_divider = 6,
.flags = LCDC_FLAGS_DWPOL,
.clock_source = LCDC_CLK_BUS,
.ch[0] = {
.chan = LCDC_CHAN_MAINLCD,
- .bpp = 16,
+ .fourcc = V4L2_PIX_FMT_RGB565,
.interface_type = RGB16,
.clock_divider = 2,
.lcd_cfg = migor_lcd_modes,
.clock_source = LCDC_CLK_PERIPHERAL,
.ch[0] = {
.chan = LCDC_CHAN_MAINLCD,
- .bpp = 16,
+ .fourcc = V4L2_PIX_FMT_RGB565,
.interface_type = SYS16A,
.clock_divider = 10,
.lcd_cfg = migor_lcd_modes,
.clock_source = LCDC_CLK_EXTERNAL,
.ch[0] = {
.chan = LCDC_CHAN_MAINLCD,
- .bpp = 16,
+ .fourcc = V4L2_PIX_FMT_RGB565,
.clock_divider = 1,
.lcd_size_cfg = { /* 7.0 inch */
.width = 152,
dp->rcv_buf_len = 4096;
- dp->ds_states = kzalloc(sizeof(ds_states_template),
- GFP_KERNEL);
+ dp->ds_states = kmemdup(ds_states_template,
+ sizeof(ds_states_template), GFP_KERNEL);
if (!dp->ds_states)
goto out_free_rcv_buf;
- memcpy(dp->ds_states, ds_states_template,
- sizeof(ds_states_template));
dp->num_ds_states = ARRAY_SIZE(ds_states_template);
for (i = 0; i < dp->num_ds_states; i++)
void *new_val;
int err;
- new_val = kmalloc(len, GFP_KERNEL);
+ new_val = kmemdup(val, len, GFP_KERNEL);
if (!new_val)
return -ENOMEM;
- memcpy(new_val, val, len);
-
err = -ENODEV;
mutex_lock(&of_set_property_mutex);
case 'i': /* INT */
if ((insn & 0xc1c00000) == 0x01000000) /* %HI */
set_addr(addr, q[1], fmangled, (insn & 0xffc00000) | (p[1] >> 10));
- else if ((insn & 0x80002000) == 0x80002000 &&
- (insn & 0x01800000) != 0x01800000) /* %LO */
+ else if ((insn & 0x80002000) == 0x80002000) /* %LO */
set_addr(addr, q[1], fmangled, (insn & 0xffffe000) | (p[1] & 0x3ff));
else {
prom_printf(insn_i, p, addr, insn);
*/
void tile_irq_activate(unsigned int irq, int tile_irq_type);
-/*
- * For onboard, non-PCI (e.g. TILE_IRQ_PERCPU) devices, drivers know
- * how to use enable/disable_percpu_irq() to manage interrupts on each
- * core. We can't use the generic enable/disable_irq() because they
- * use a single reference count per irq, rather than per cpu per irq.
- */
-void enable_percpu_irq(unsigned int irq);
-void disable_percpu_irq(unsigned int irq);
-
-
void setup_irq_regs(void);
#endif /* _ASM_TILE_IRQ_H */
* Remove an irq from the disabled mask. If we're in an interrupt
* context, defer enabling the HW interrupt until we leave.
*/
-void enable_percpu_irq(unsigned int irq)
+static void tile_irq_chip_enable(struct irq_data *d)
{
- get_cpu_var(irq_disable_mask) &= ~(1UL << irq);
+ get_cpu_var(irq_disable_mask) &= ~(1UL << d->irq);
if (__get_cpu_var(irq_depth) == 0)
- unmask_irqs(1UL << irq);
+ unmask_irqs(1UL << d->irq);
put_cpu_var(irq_disable_mask);
}
-EXPORT_SYMBOL(enable_percpu_irq);
/*
* Add an irq to the disabled mask. We disable the HW interrupt
* in an interrupt context, the return path is careful to avoid
* unmasking a newly disabled interrupt.
*/
-void disable_percpu_irq(unsigned int irq)
+static void tile_irq_chip_disable(struct irq_data *d)
{
- get_cpu_var(irq_disable_mask) |= (1UL << irq);
- mask_irqs(1UL << irq);
+ get_cpu_var(irq_disable_mask) |= (1UL << d->irq);
+ mask_irqs(1UL << d->irq);
put_cpu_var(irq_disable_mask);
}
-EXPORT_SYMBOL(disable_percpu_irq);
/* Mask an interrupt. */
static void tile_irq_chip_mask(struct irq_data *d)
static struct irq_chip tile_irq_chip = {
.name = "tile_irq_chip",
+ .irq_enable = tile_irq_chip_enable,
+ .irq_disable = tile_irq_chip_disable,
.irq_ack = tile_irq_chip_ack,
.irq_eoi = tile_irq_chip_eoi,
.irq_mask = tile_irq_chip_mask,
#include <linux/mm.h>
#include <linux/dma-mapping.h>
#include <linux/vmalloc.h>
+#include <linux/export.h>
#include <asm/tlbflush.h>
#include <asm/homecache.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/uaccess.h>
+#include <linux/export.h>
#include <asm/processor.h>
#include <asm/sections.h>
#include <linux/cpu.h>
#include <linux/slab.h>
#include <linux/smp.h>
+#include <linux/stat.h>
#include <hv/hypervisor.h>
/* Return a string queried from the hypervisor, truncated to page size. */
EXPORT_SYMBOL(current_text_addr);
EXPORT_SYMBOL(dump_stack);
+/* arch/tile/kernel/head.S */
+EXPORT_SYMBOL(empty_zero_page);
+
/* arch/tile/lib/, various memcpy files */
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(__copy_to_user_inatomic);
VM_BUG_ON(!virt_addr_valid((void *)addr));
page = virt_to_page((void *)addr);
if (put_page_testzero(page)) {
- int pages = (1 << order);
homecache_change_page_home(page, order, initial_page_home());
- while (pages--)
- __free_page(page++);
+ if (order == 0) {
+ free_hot_cold_page(page, 0);
+ } else {
+ init_page_count(page);
+ __free_pages(page, order);
+ }
}
}
This option compiles in support for the CE4100 SOC for settop
boxes and media devices.
-config X86_INTEL_MID
+config X86_WANT_INTEL_MID
bool "Intel MID platform support"
depends on X86_32
depends on X86_EXTENDED_PLATFORM
systems which do not have the PCI legacy interfaces (Moorestown,
Medfield). If you are building for a PC class system say N here.
-if X86_INTEL_MID
+if X86_WANT_INTEL_MID
+
+config X86_INTEL_MID
+ bool
config X86_MRST
bool "Moorestown MID platform"
select SPI
select INTEL_SCU_IPC
select X86_PLATFORM_DEVICES
+ select X86_INTEL_MID
---help---
Moorestown is Intel's Low Power Intel Architecture (LPIA) based Moblin
Internet Device(MID) platform. Moorestown consists of two chips:
#include <linux/notifier.h>
-#define IPCMSG_VRTC 0xFA /* Set vRTC device */
-
-/* Command id associated with message IPCMSG_VRTC */
-#define IPC_CMD_VRTC_SETTIME 1 /* Set time */
-#define IPC_CMD_VRTC_SETALARM 2 /* Set alarm */
+#define IPCMSG_WARM_RESET 0xF0
+#define IPCMSG_COLD_RESET 0xF1
+#define IPCMSG_SOFT_RESET 0xF2
+#define IPCMSG_COLD_BOOT 0xF3
+
+#define IPCMSG_VRTC 0xFA /* Set vRTC device */
+ /* Command id associated with message IPCMSG_VRTC */
+ #define IPC_CMD_VRTC_SETTIME 1 /* Set time */
+ #define IPC_CMD_VRTC_SETALARM 2 /* Set alarm */
/* Read single register */
int intel_scu_ipc_ioread8(u16 addr, u8 *data);
};
extern enum mrst_cpu_type __mrst_cpu_chip;
+
+#ifdef CONFIG_X86_INTEL_MID
+
static inline enum mrst_cpu_type mrst_identify_cpu(void)
{
return __mrst_cpu_chip;
}
+#else /* !CONFIG_X86_INTEL_MID */
+
+#define mrst_identify_cpu() (0)
+
+#endif /* !CONFIG_X86_INTEL_MID */
+
enum mrst_timer_options {
MRST_TIMER_DEFAULT,
MRST_TIMER_APBT_ONLY,
return native_write_msr_safe(msr, low, high);
}
-/* rdmsr with exception handling */
+/*
+ * rdmsr with exception handling.
+ *
+ * Please note that the exception handling works only after we've
+ * switched to the "smart" #GP handler in trap_init() which knows about
+ * exception tables - using this macro earlier than that causes machine
+ * hangs on boxes which do not implement the @msr in the first argument.
+ */
#define rdmsr_safe(msr, p1, p2) \
({ \
int __err; \
extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
void default_idle(void);
+bool set_pm_idle_to_default(void);
void stop_this_cpu(void *dummy);
* (mathieu.desnoyers@polymtl.ca)
*
* -johnstul@us.ibm.com "math is hard, lets go shopping!"
+ *
+ * In:
+ *
+ * ns = cycles * cyc2ns_scale / SC
+ *
+ * Although we may still have enough bits to store the value of ns,
+ * in some cases, we may not have enough bits to store cycles * cyc2ns_scale,
+ * leading to an incorrect result.
+ *
+ * To avoid this, we can decompose 'cycles' into quotient and remainder
+ * of division by SC. Then,
+ *
+ * ns = (quot * SC + rem) * cyc2ns_scale / SC
+ * = quot * cyc2ns_scale + (rem * cyc2ns_scale) / SC
+ *
+ * - sqazi@google.com
*/
DECLARE_PER_CPU(unsigned long, cyc2ns);
static inline unsigned long long __cycles_2_ns(unsigned long long cyc)
{
+ unsigned long long quot;
+ unsigned long long rem;
int cpu = smp_processor_id();
unsigned long long ns = per_cpu(cyc2ns_offset, cpu);
- ns += cyc * per_cpu(cyc2ns, cpu) >> CYC2NS_SCALE_FACTOR;
+ quot = (cyc >> CYC2NS_SCALE_FACTOR);
+ rem = cyc & ((1ULL << CYC2NS_SCALE_FACTOR) - 1);
+ ns += quot * per_cpu(cyc2ns, cpu) +
+ ((rem * per_cpu(cyc2ns, cpu)) >> CYC2NS_SCALE_FACTOR);
return ns;
}
#define UV1_HUB_PART_NUMBER 0x88a5
#define UV2_HUB_PART_NUMBER 0x8eb8
+#define UV2_HUB_PART_NUMBER_X 0x1111
/* Compat: if this #define is present, UV headers support UV2 */
#define UV2_HUB_IS_SUPPORTED 1
if (node_id.s.part_number == UV2_HUB_PART_NUMBER)
uv_min_hub_revision_id += UV2_HUB_REVISION_BASE - 1;
+ if (node_id.s.part_number == UV2_HUB_PART_NUMBER_X)
+ uv_min_hub_revision_id += UV2_HUB_REVISION_BASE - 1;
uv_hub_info->hub_revision = uv_min_hub_revision_id;
pnode = (node_id.s.node_id >> 1) & ((1 << m_n_config.s.n_skt) - 1);
static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
{
- u32 dummy;
-
early_init_amd_mc(c);
/*
set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
}
#endif
-
- rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
}
static void __cpuinit init_amd(struct cpuinfo_x86 *c)
{
+ u32 dummy;
+
#ifdef CONFIG_SMP
unsigned long long value;
checking_wrmsrl(MSR_AMD64_MCx_MASK(4), mask);
}
}
+
+ rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
}
#ifdef CONFIG_X86_32
if (tmp != mask_lo) {
printk(KERN_WARNING "mtrr: your BIOS has configured an incorrect mask, fixing it.\n");
+ add_taint(TAINT_FIRMWARE_WORKAROUND);
mask_lo = tmp;
}
}
/* Disable MTRRs, and set the default type to uncached */
mtrr_wrmsr(MSR_MTRRdefType, deftype_lo & ~0xcff, deftype_hi);
+ wbinvd();
}
static void post_set(void) __releases(set_atomicity_lock)
return -EOPNOTSUPP;
}
- /*
- * Do not allow config1 (extended registers) to propagate,
- * there's no sane user-space generalization yet:
- */
if (attr->type == PERF_TYPE_RAW)
- return 0;
+ return x86_pmu_extra_regs(event->attr.config, event);
if (attr->type == PERF_TYPE_HW_CACHE)
return set_ext_hw_attr(hwc, event);
x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
}
}
- return num ? -ENOSPC : 0;
+ return num ? -EINVAL : 0;
}
/*
if (is_x86_event(leader)) {
if (n >= max_count)
- return -ENOSPC;
+ return -EINVAL;
cpuc->event_list[n] = leader;
n++;
}
continue;
if (n >= max_count)
- return -ENOSPC;
+ return -EINVAL;
cpuc->event_list[n] = event;
n++;
c = x86_pmu.get_event_constraints(fake_cpuc, event);
if (!c || !c->weight)
- ret = -ENOSPC;
+ ret = -EINVAL;
if (x86_pmu.put_event_constraints)
x86_pmu.put_event_constraints(fake_cpuc, event);
{
struct perf_event *leader = event->group_leader;
struct cpu_hw_events *fake_cpuc;
- int ret = -ENOSPC, n;
+ int ret = -EINVAL, n;
fake_cpuc = allocate_fake_cpuc();
if (IS_ERR(fake_cpuc))
goto out;
}
- pr_err(FW_BUG "using offset %d for IBS interrupts\n", offset);
- pr_err(FW_BUG "workaround enabled for IBS LVT offset\n");
+ pr_info("IBS: LVT offset %d assigned\n", offset);
return 0;
out:
static __init int amd_ibs_init(void)
{
u32 caps;
- int ret;
+ int ret = -EINVAL;
caps = __get_ibs_caps();
if (!caps)
return -ENODEV; /* ibs not supported by the cpu */
- if (!ibs_eilvt_valid()) {
- ret = force_ibs_eilvt_setup();
- if (ret) {
- pr_err("Failed to setup IBS, %d\n", ret);
- return ret;
- }
- }
+ /*
+ * Force LVT offset assignment for family 10h: The offsets are
+ * not assigned by the BIOS for this family, so the OS is
+ * responsible for doing it. If the OS assignment fails, fall
+ * back to BIOS settings and try to setup this.
+ */
+ if (boot_cpu_data.x86 == 0x10)
+ force_ibs_eilvt_setup();
+
+ if (!ibs_eilvt_valid())
+ goto out;
get_online_cpus();
ibs_caps = caps;
smp_call_function(setup_APIC_ibs, NULL, 1);
put_online_cpus();
- return perf_event_ibs_init();
+ ret = perf_event_ibs_init();
+out:
+ if (ret)
+ pr_err("Failed to setup IBS, %d\n", ret);
+ return ret;
}
/* Since we need the pci subsystem to init ibs we can't do this earlier: */
x86_pmu.pebs_constraints = NULL;
}
+static void intel_sandybridge_quirks(void)
+{
+ printk(KERN_WARNING "PEBS disabled due to CPU errata.\n");
+ x86_pmu.pebs = 0;
+ x86_pmu.pebs_constraints = NULL;
+}
+
__init int intel_pmu_init(void)
{
union cpuid10_edx edx;
break;
case 42: /* SandyBridge */
+ x86_pmu.quirks = intel_sandybridge_quirks;
case 45: /* SandyBridge, "Romely-EP" */
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
unsigned long from = cpuc->lbr_entries[0].from;
unsigned long old_to, to = cpuc->lbr_entries[0].to;
unsigned long ip = regs->ip;
+ int is_64bit = 0;
/*
* We don't need to fixup if the PEBS assist is fault like
} else
kaddr = (void *)to;
- kernel_insn_init(&insn, kaddr);
+#ifdef CONFIG_X86_64
+ is_64bit = kernel_ip(to) || !test_thread_flag(TIF_IA32);
+#endif
+ insn_init(&insn, kaddr, is_64bit);
insn_get_length(&insn);
to += insn.length;
} while (to < ip);
}
done:
- return num ? -ENOSPC : 0;
+ return num ? -EINVAL : 0;
}
static __initconst const struct x86_pmu p4_pmu = {
}
EXPORT_SYMBOL_GPL(hpet_rtc_timer_init);
+static void hpet_disable_rtc_channel(void)
+{
+ unsigned long cfg;
+ cfg = hpet_readl(HPET_T1_CFG);
+ cfg &= ~HPET_TN_ENABLE;
+ hpet_writel(cfg, HPET_T1_CFG);
+}
+
/*
* The functions below are called from rtc driver.
* Return 0 if HPET is not being used.
return 0;
hpet_rtc_flags &= ~bit_mask;
+ if (unlikely(!hpet_rtc_flags))
+ hpet_disable_rtc_channel();
+
return 1;
}
EXPORT_SYMBOL_GPL(hpet_mask_rtc_irq_bit);
static void hpet_rtc_timer_reinit(void)
{
- unsigned int cfg, delta;
+ unsigned int delta;
int lost_ints = -1;
- if (unlikely(!hpet_rtc_flags)) {
- cfg = hpet_readl(HPET_T1_CFG);
- cfg &= ~HPET_TN_ENABLE;
- hpet_writel(cfg, HPET_T1_CFG);
- return;
- }
+ if (unlikely(!hpet_rtc_flags))
+ hpet_disable_rtc_channel();
if (!(hpet_rtc_flags & RTC_PIE) || hpet_pie_limit)
delta = hpet_default_delta;
#ifdef CONFIG_DEBUG_STACKOVERFLOW
u64 curbase = (u64)task_stack_page(current);
+ if (user_mode_vm(regs))
+ return;
+
WARN_ONCE(regs->sp >= curbase &&
regs->sp <= curbase + THREAD_SIZE &&
regs->sp < curbase + sizeof(struct thread_info) +
return 0;
}
-static void microcode_dev_exit(void)
+static void __exit microcode_dev_exit(void)
{
misc_deregister(µcode_dev);
}
microcode_pdev = platform_device_register_simple("microcode", -1,
NULL, 0);
- if (IS_ERR(microcode_pdev)) {
- microcode_dev_exit();
+ if (IS_ERR(microcode_pdev))
return PTR_ERR(microcode_pdev);
- }
get_online_cpus();
mutex_lock(µcode_mutex);
mutex_unlock(µcode_mutex);
put_online_cpus();
- if (error) {
- platform_device_unregister(microcode_pdev);
- return error;
- }
+ if (error)
+ goto out_pdev;
error = microcode_dev_init();
if (error)
- return error;
+ goto out_sysdev_driver;
register_syscore_ops(&mc_syscore_ops);
register_hotcpu_notifier(&mc_cpu_notifier);
" <tigran@aivazian.fsnet.co.uk>, Peter Oruba\n");
return 0;
+
+out_sysdev_driver:
+ get_online_cpus();
+ mutex_lock(µcode_mutex);
+
+ sysdev_driver_unregister(&cpu_sysdev_class, &mc_sysdev_driver);
+
+ mutex_unlock(µcode_mutex);
+ put_online_cpus();
+
+out_pdev:
+ platform_device_unregister(microcode_pdev);
+ return error;
+
}
module_init(microcode_init);
}
#endif
+ set_bit(m->busid, mp_bus_not_pci);
if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA) - 1) == 0) {
- set_bit(m->busid, mp_bus_not_pci);
#if defined(CONFIG_EISA) || defined(CONFIG_MCA)
mp_bus_id_to_type[m->busid] = MP_BUS_ISA;
#endif
EXPORT_SYMBOL(default_idle);
#endif
+bool set_pm_idle_to_default(void)
+{
+ bool ret = !!pm_idle;
+
+ pm_idle = default_idle;
+
+ return ret;
+}
void stop_this_cpu(void *dummy)
{
local_irq_disable();
quirk_amd_nb_node);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_LINK,
quirk_amd_nb_node);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F0,
+ quirk_amd_nb_node);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F1,
+ quirk_amd_nb_node);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F2,
+ quirk_amd_nb_node);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3,
+ quirk_amd_nb_node);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4,
+ quirk_amd_nb_node);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F5,
+ quirk_amd_nb_node);
+
#endif
*/
/*
- * Some machines require the "reboot=b" commandline option,
+ * Some machines require the "reboot=b" or "reboot=k" commandline options,
* this quirk makes that automatic.
*/
static int __init set_bios_reboot(const struct dmi_system_id *d)
return 0;
}
+static int __init set_kbd_reboot(const struct dmi_system_id *d)
+{
+ if (reboot_type != BOOT_KBD) {
+ reboot_type = BOOT_KBD;
+ printk(KERN_INFO "%s series board detected. Selecting KBD-method for reboot.\n", d->ident);
+ }
+ return 0;
+}
+
static struct dmi_system_id __initdata reboot_dmi_table[] = {
{ /* Handle problems with rebooting on Dell E520's */
.callback = set_bios_reboot,
},
},
{ /* Handle reboot issue on Acer Aspire one */
- .callback = set_bios_reboot,
+ .callback = set_kbd_reboot,
.ident = "Acer Aspire One A110",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6420"),
},
},
+ { /* Handle problems with rebooting on the OptiPlex 990. */
+ .callback = set_pci_reboot,
+ .ident = "Dell OptiPlex 990",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 990"),
+ },
+ },
{ }
};
#include <asm/vsyscall.h>
#include <asm/x86_init.h>
#include <asm/time.h>
+#include <asm/mrst.h>
#ifdef CONFIG_X86_32
/*
if (of_have_populated_dt())
return 0;
+ /* Intel MID platforms don't have ioport rtc */
+ if (mrst_identify_cpu())
+ return -ENODEV;
+
platform_device_register(&rtc_device);
dev_info(&rtc_device.dev,
"registered platform RTC device (no PNP device found)\n");
do {
VM_BUG_ON(compound_head(page) != head);
pages[*nr] = page;
+ if (PageTail(page))
+ get_huge_page_tail(page);
(*nr)++;
page++;
refs++;
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
BUG_ON(!pte_none(*(kmap_pte-idx)));
set_pte(kmap_pte-idx, mk_pte(page, prot));
+ arch_flush_lazy_mmu_mode();
return (void *)vaddr;
}
*/
kpte_clear_flush(kmap_pte-idx, vaddr);
kmap_atomic_idx_pop();
+ arch_flush_lazy_mmu_mode();
}
#ifdef CONFIG_DEBUG_HIGHMEM
else {
extern void op_nmi_exit(void);
extern void x86_backtrace(struct pt_regs * const regs, unsigned int depth);
+static int nmi_timer;
int __init oprofile_arch_init(struct oprofile_operations *ops)
{
#ifdef CONFIG_X86_LOCAL_APIC
ret = op_nmi_init(ops);
#endif
+ nmi_timer = (ret != 0);
#ifdef CONFIG_X86_IO_APIC
- if (ret < 0)
+ if (nmi_timer)
ret = op_nmi_timer_init(ops);
#endif
ops->backtrace = x86_backtrace;
void oprofile_arch_exit(void)
{
#ifdef CONFIG_X86_LOCAL_APIC
- op_nmi_exit();
+ if (!nmi_timer)
+ op_nmi_exit();
#endif
}
*/
static unsigned long efi_rt_eflags;
-static pgd_t efi_bak_pg_dir_pointer[2];
void efi_call_phys_prelog(void)
{
- unsigned long cr4;
- unsigned long temp;
struct desc_ptr gdt_descr;
local_irq_save(efi_rt_eflags);
- /*
- * If I don't have PAE, I should just duplicate two entries in page
- * directory. If I have PAE, I just need to duplicate one entry in
- * page directory.
- */
- cr4 = read_cr4_safe();
-
- if (cr4 & X86_CR4_PAE) {
- efi_bak_pg_dir_pointer[0].pgd =
- swapper_pg_dir[pgd_index(0)].pgd;
- swapper_pg_dir[0].pgd =
- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
- } else {
- efi_bak_pg_dir_pointer[0].pgd =
- swapper_pg_dir[pgd_index(0)].pgd;
- efi_bak_pg_dir_pointer[1].pgd =
- swapper_pg_dir[pgd_index(0x400000)].pgd;
- swapper_pg_dir[pgd_index(0)].pgd =
- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
- temp = PAGE_OFFSET + 0x400000;
- swapper_pg_dir[pgd_index(0x400000)].pgd =
- swapper_pg_dir[pgd_index(temp)].pgd;
- }
-
- /*
- * After the lock is released, the original page table is restored.
- */
+ load_cr3(initial_page_table);
__flush_tlb_all();
gdt_descr.address = __pa(get_cpu_gdt_table(0));
void efi_call_phys_epilog(void)
{
- unsigned long cr4;
struct desc_ptr gdt_descr;
gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
gdt_descr.size = GDT_SIZE - 1;
load_gdt(&gdt_descr);
- cr4 = read_cr4_safe();
-
- if (cr4 & X86_CR4_PAE) {
- swapper_pg_dir[pgd_index(0)].pgd =
- efi_bak_pg_dir_pointer[0].pgd;
- } else {
- swapper_pg_dir[pgd_index(0)].pgd =
- efi_bak_pg_dir_pointer[0].pgd;
- swapper_pg_dir[pgd_index(0x400000)].pgd =
- efi_bak_pg_dir_pointer[1].pgd;
- }
-
- /*
- * After the lock is released, the original page table is restored.
- */
+ load_cr3(swapper_pg_dir);
__flush_tlb_all();
local_irq_restore(efi_rt_eflags);
EXPORT_SYMBOL_GPL(sfi_mrtc_array);
int sfi_mrtc_num;
+static void mrst_power_off(void)
+{
+ if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
+ intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 1);
+}
+
+static void mrst_reboot(void)
+{
+ if (__mrst_cpu_chip == MRST_CPU_CHIP_LINCROFT)
+ intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 0);
+ else
+ intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
+}
+
/* parse all the mtimer info to a static mtimer array */
static int __init sfi_parse_mtmr(struct sfi_table_header *table)
{
return 0;
}
-/* Reboot and power off are handled by the SCU on a MID device */
-static void mrst_power_off(void)
-{
- intel_scu_ipc_simple_command(0xf1, 1);
-}
-
-static void mrst_reboot(void)
-{
- intel_scu_ipc_simple_command(0xf1, 0);
-}
-
/*
* Moorestown does not have external NMI source nor port 0x61 to report
* NMI status. The possible NMI sources are from pmu as a result of NMI
return max7315;
}
+static void *tca6416_platform_data(void *info)
+{
+ static struct pca953x_platform_data tca6416;
+ struct i2c_board_info *i2c_info = info;
+ int gpio_base, intr;
+ char base_pin_name[SFI_NAME_LEN + 1];
+ char intr_pin_name[SFI_NAME_LEN + 1];
+
+ strcpy(i2c_info->type, "tca6416");
+ strcpy(base_pin_name, "tca6416_base");
+ strcpy(intr_pin_name, "tca6416_int");
+
+ gpio_base = get_gpio_by_name(base_pin_name);
+ intr = get_gpio_by_name(intr_pin_name);
+
+ if (gpio_base == -1)
+ return NULL;
+ tca6416.gpio_base = gpio_base;
+ if (intr != -1) {
+ i2c_info->irq = intr + MRST_IRQ_OFFSET;
+ tca6416.irq_base = gpio_base + MRST_IRQ_OFFSET;
+ } else {
+ i2c_info->irq = -1;
+ tca6416.irq_base = -1;
+ }
+ return &tca6416;
+}
+
+static void *mpu3050_platform_data(void *info)
+{
+ struct i2c_board_info *i2c_info = info;
+ int intr = get_gpio_by_name("mpu3050_int");
+
+ if (intr == -1)
+ return NULL;
+
+ i2c_info->irq = intr + MRST_IRQ_OFFSET;
+ return NULL;
+}
+
static void __init *emc1403_platform_data(void *info)
{
static short intr2nd_pdata;
static const struct devs_id __initconst device_ids[] = {
{"bma023", SFI_DEV_TYPE_I2C, 1, &no_platform_data},
{"pmic_gpio", SFI_DEV_TYPE_SPI, 1, &pmic_gpio_platform_data},
+ {"pmic_gpio", SFI_DEV_TYPE_IPC, 1, &pmic_gpio_platform_data},
{"spi_max3111", SFI_DEV_TYPE_SPI, 0, &max3111_platform_data},
{"i2c_max7315", SFI_DEV_TYPE_I2C, 1, &max7315_platform_data},
{"i2c_max7315_2", SFI_DEV_TYPE_I2C, 1, &max7315_platform_data},
+ {"tca6416", SFI_DEV_TYPE_I2C, 1, &tca6416_platform_data},
{"emc1403", SFI_DEV_TYPE_I2C, 1, &emc1403_platform_data},
{"i2c_accel", SFI_DEV_TYPE_I2C, 0, &lis331dl_platform_data},
{"pmic_audio", SFI_DEV_TYPE_IPC, 1, &no_platform_data},
+ {"mpu3050", SFI_DEV_TYPE_I2C, 1, &mpu3050_platform_data},
/* MSIC subdevices */
{"msic_battery", SFI_DEV_TYPE_IPC, 1, &msic_battery_platform_data},
domid_t domid = DOMID_SELF;
int ret;
- ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
- if (ret > 0)
- max_pages = ret;
+ /*
+ * For the initial domain we use the maximum reservation as
+ * the maximum page.
+ *
+ * For guest domains the current maximum reservation reflects
+ * the current maximum rather than the static maximum. In this
+ * case the e820 map provided to us will cover the static
+ * maximum region.
+ */
+ if (xen_initial_domain()) {
+ ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
+ if (ret > 0)
+ max_pages = ret;
+ }
+
return min(max_pages, MAX_DOMAIN_PAGES);
}
#endif
disable_cpuidle();
boot_option_idle_override = IDLE_HALT;
-
+ WARN_ON(set_pm_idle_to_default());
fiddle_vdso();
}
if (drain_all)
blk_throtl_drain(q);
- __blk_run_queue(q);
+ /*
+ * This function might be called on a queue which failed
+ * driver init after queue creation. Some drivers
+ * (e.g. fd) get unhappy in such cases. Kick queue iff
+ * dispatch queue has something on it.
+ */
+ if (!list_empty(&q->queue_head))
+ __blk_run_queue(q);
if (drain_all)
nr_rqs = q->rq.count[0] + q->rq.count[1];
q->backing_dev_info.state = 0;
q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
q->backing_dev_info.name = "block";
+ q->node = node_id;
err = bdi_init(&q->backing_dev_info);
if (err) {
if (!uninit_q)
return NULL;
- q = blk_init_allocated_queue_node(uninit_q, rfn, lock, node_id);
+ q = blk_init_allocated_queue(uninit_q, rfn, lock);
if (!q)
blk_cleanup_queue(uninit_q);
struct request_queue *
blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
spinlock_t *lock)
-{
- return blk_init_allocated_queue_node(q, rfn, lock, -1);
-}
-EXPORT_SYMBOL(blk_init_allocated_queue);
-
-struct request_queue *
-blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
- spinlock_t *lock, int node_id)
{
if (!q)
return NULL;
- q->node = node_id;
if (blk_init_free_list(q))
return NULL;
return NULL;
}
-EXPORT_SYMBOL(blk_init_allocated_queue_node);
+EXPORT_SYMBOL(blk_init_allocated_queue);
int blk_get_queue(struct request_queue *q)
{
}
}
- if (ret)
+ if (ret && ret != -EEXIST)
printk(KERN_ERR "cfq: cic link failed!\n");
return ret;
{
struct io_context *ioc = NULL;
struct cfq_io_context *cic;
+ int ret;
might_sleep_if(gfp_mask & __GFP_WAIT);
if (!ioc)
return NULL;
+retry:
cic = cfq_cic_lookup(cfqd, ioc);
if (cic)
goto out;
if (cic == NULL)
goto err;
- if (cfq_cic_link(cfqd, ioc, cic, gfp_mask))
+ ret = cfq_cic_link(cfqd, ioc, cic, gfp_mask);
+ if (ret == -EEXIST) {
+ /* someone has linked cic to ioc already */
+ cfq_cic_free(cic);
+ goto retry;
+ } else if (ret)
goto err_free;
out:
if (blkio_alloc_blkg_stats(&cfqg->blkg)) {
kfree(cfqg);
+
+ spin_lock(&cic_index_lock);
+ ida_remove(&cic_index_ida, cfqd->cic_index);
+ spin_unlock(&cic_index_lock);
+
kfree(cfqd);
return NULL;
}
*/
list_del_init(&dev->kobj.entry);
spin_unlock(&devices_kset->list_lock);
- /* Disable all device's runtime power management */
- pm_runtime_disable(dev);
+
+ /* Don't allow any more runtime suspends */
+ pm_runtime_get_noresume(dev);
+ pm_runtime_barrier(dev);
if (dev->bus && dev->bus->shutdown) {
dev_dbg(dev, "shutdown\n");
c->Request.Timeout = 0;
c->Request.CDB[0] = BMIC_WRITE;
c->Request.CDB[6] = BMIC_CACHE_FLUSH;
+ c->Request.CDB[7] = (size >> 8) & 0xFF;
+ c->Request.CDB[8] = size & 0xFF;
break;
case TEST_UNIT_READY:
c->Request.CDBLen = 6;
{
if (h->msix_vector || h->msi_vector) {
if (!request_irq(h->intr[h->intr_mode], msixhandler,
- IRQF_DISABLED, h->devname, h))
+ 0, h->devname, h))
return 0;
dev_err(&h->pdev->dev, "Unable to get msi irq %d"
" for %s\n", h->intr[h->intr_mode],
}
if (!request_irq(h->intr[h->intr_mode], intxhandler,
- IRQF_DISABLED, h->devname, h))
+ IRQF_SHARED, h->devname, h))
return 0;
dev_err(&h->pdev->dev, "Unable to get irq %d for %s\n",
h->intr[h->intr_mode], h->devname);
/*
* We use punch hole to reclaim the free space used by the
- * image a.k.a. discard. However we do support discard if
+ * image a.k.a. discard. However we do not support discard if
* encryption is enabled, because it may give an attacker
* useful information.
*/
}
q->limits.discard_granularity = inode->i_sb->s_blocksize;
- q->limits.discard_alignment = inode->i_sb->s_blocksize;
+ q->limits.discard_alignment = 0;
q->limits.max_discard_sectors = UINT_MAX >> 9;
q->limits.discard_zeroes_data = 1;
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
static int __rbd_init_snaps_header(struct rbd_device *rbd_dev);
static void rbd_dev_release(struct device *dev);
-static ssize_t rbd_snap_rollback(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t size);
static ssize_t rbd_snap_add(struct device *dev,
struct device_attribute *attr,
const char *buf,
u32 snap_count = le32_to_cpu(ondisk->snap_count);
int ret = -ENOMEM;
+ if (memcmp(ondisk, RBD_HEADER_TEXT, sizeof(RBD_HEADER_TEXT))) {
+ return -ENXIO;
+ }
+
init_rwsem(&header->snap_rwsem);
header->snap_names_len = le64_to_cpu(ondisk->snap_names_len);
header->snapc = kmalloc(sizeof(struct ceph_snap_context) +
return ret;
}
-/*
- * Request sync osd rollback
- */
-static int rbd_req_sync_rollback_obj(struct rbd_device *dev,
- u64 snapid,
- const char *obj)
-{
- struct ceph_osd_req_op *ops;
- int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_ROLLBACK, 0);
- if (ret < 0)
- return ret;
-
- ops[0].snap.snapid = snapid;
-
- ret = rbd_req_sync_op(dev, NULL,
- CEPH_NOSNAP,
- 0,
- CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
- ops,
- 1, obj, 0, 0, NULL, NULL, NULL);
-
- rbd_destroy_ops(ops);
-
- return ret;
-}
-
/*
* Request sync osd read
*/
goto out_dh;
rc = rbd_header_from_disk(header, dh, snap_count, GFP_KERNEL);
- if (rc < 0)
+ if (rc < 0) {
+ if (rc == -ENXIO) {
+ pr_warning("unrecognized header format"
+ " for image %s", rbd_dev->obj);
+ }
goto out_dh;
+ }
if (snap_count != header->total_snaps) {
snap_count = header->total_snaps;
static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
static DEVICE_ATTR(create_snap, S_IWUSR, NULL, rbd_snap_add);
-static DEVICE_ATTR(rollback_snap, S_IWUSR, NULL, rbd_snap_rollback);
static struct attribute *rbd_attrs[] = {
&dev_attr_size.attr,
&dev_attr_current_snap.attr,
&dev_attr_refresh.attr,
&dev_attr_create_snap.attr,
- &dev_attr_rollback_snap.attr,
NULL
};
return ret;
}
-static ssize_t rbd_snap_rollback(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t count)
-{
- struct rbd_device *rbd_dev = dev_to_rbd(dev);
- int ret;
- u64 snapid;
- u64 cur_ofs;
- char *seg_name = NULL;
- char *snap_name = kmalloc(count + 1, GFP_KERNEL);
- ret = -ENOMEM;
- if (!snap_name)
- return ret;
-
- /* parse snaps add command */
- snprintf(snap_name, count, "%s", buf);
- seg_name = kmalloc(RBD_MAX_SEG_NAME_LEN + 1, GFP_NOIO);
- if (!seg_name)
- goto done;
-
- mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
-
- ret = snap_by_name(&rbd_dev->header, snap_name, &snapid, NULL);
- if (ret < 0)
- goto done_unlock;
-
- dout("snapid=%lld\n", snapid);
-
- cur_ofs = 0;
- while (cur_ofs < rbd_dev->header.image_size) {
- cur_ofs += rbd_get_segment(&rbd_dev->header,
- rbd_dev->obj,
- cur_ofs, (u64)-1,
- seg_name, NULL);
- dout("seg_name=%s\n", seg_name);
-
- ret = rbd_req_sync_rollback_obj(rbd_dev, snapid, seg_name);
- if (ret < 0)
- pr_warning("could not roll back obj %s err=%d\n",
- seg_name, ret);
- }
-
- ret = __rbd_update_snaps(rbd_dev);
- if (ret < 0)
- goto done_unlock;
-
- ret = count;
-
-done_unlock:
- mutex_unlock(&ctl_mutex);
-done:
- kfree(seg_name);
- kfree(snap_name);
-
- return ret;
-}
-
static struct bus_attribute rbd_bus_attrs[] = {
__ATTR(add, S_IWUSR, NULL, rbd_add),
__ATTR(remove, S_IWUSR, NULL, rbd_remove),
* handle GCR disks
*/
+#undef DEBUG
+
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
-static DEFINE_MUTEX(swim3_mutex);
-static struct request_queue *swim3_queue;
-static struct gendisk *disks[2];
-static struct request *fd_req;
-
#define MAX_FLOPPIES 2
+static DEFINE_MUTEX(swim3_mutex);
+static struct gendisk *disks[MAX_FLOPPIES];
+
enum swim_state {
idle,
locating,
struct floppy_state {
enum swim_state state;
- spinlock_t lock;
struct swim3 __iomem *swim3; /* hardware registers */
struct dbdma_regs __iomem *dma; /* DMA controller registers */
int swim3_intr; /* interrupt number for SWIM3 */
int wanted;
struct macio_dev *mdev;
char dbdma_cmd_space[5 * sizeof(struct dbdma_cmd)];
+ int index;
+ struct request *cur_req;
};
+#define swim3_err(fmt, arg...) dev_err(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
+#define swim3_warn(fmt, arg...) dev_warn(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
+#define swim3_info(fmt, arg...) dev_info(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
+
+#ifdef DEBUG
+#define swim3_dbg(fmt, arg...) dev_dbg(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
+#else
+#define swim3_dbg(fmt, arg...) do { } while(0)
+#endif
+
static struct floppy_state floppy_states[MAX_FLOPPIES];
static int floppy_count = 0;
static DEFINE_SPINLOCK(swim3_lock);
0, 0, 0, 0, 0, 0
};
-static void swim3_select(struct floppy_state *fs, int sel);
-static void swim3_action(struct floppy_state *fs, int action);
-static int swim3_readbit(struct floppy_state *fs, int bit);
-static void do_fd_request(struct request_queue * q);
-static void start_request(struct floppy_state *fs);
-static void set_timeout(struct floppy_state *fs, int nticks,
- void (*proc)(unsigned long));
-static void scan_track(struct floppy_state *fs);
static void seek_track(struct floppy_state *fs, int n);
static void init_dma(struct dbdma_cmd *cp, int cmd, void *buf, int count);
-static void setup_transfer(struct floppy_state *fs);
static void act(struct floppy_state *fs);
static void scan_timeout(unsigned long data);
static void seek_timeout(unsigned long data);
unsigned int clearing);
static int floppy_revalidate(struct gendisk *disk);
-static bool swim3_end_request(int err, unsigned int nr_bytes)
+static bool swim3_end_request(struct floppy_state *fs, int err, unsigned int nr_bytes)
{
- if (__blk_end_request(fd_req, err, nr_bytes))
- return true;
+ struct request *req = fs->cur_req;
+ int rc;
- fd_req = NULL;
- return false;
-}
+ swim3_dbg(" end request, err=%d nr_bytes=%d, cur_req=%p\n",
+ err, nr_bytes, req);
-static bool swim3_end_request_cur(int err)
-{
- return swim3_end_request(err, blk_rq_cur_bytes(fd_req));
+ if (err)
+ nr_bytes = blk_rq_cur_bytes(req);
+ rc = __blk_end_request(req, err, nr_bytes);
+ if (rc)
+ return true;
+ fs->cur_req = NULL;
+ return false;
}
static void swim3_select(struct floppy_state *fs, int sel)
return (stat & DATA) == 0;
}
-static void do_fd_request(struct request_queue * q)
-{
- int i;
-
- for(i=0; i<floppy_count; i++) {
- struct floppy_state *fs = &floppy_states[i];
- if (fs->mdev->media_bay &&
- check_media_bay(fs->mdev->media_bay) != MB_FD)
- continue;
- start_request(fs);
- }
-}
-
static void start_request(struct floppy_state *fs)
{
struct request *req;
unsigned long x;
+ swim3_dbg("start request, initial state=%d\n", fs->state);
+
if (fs->state == idle && fs->wanted) {
fs->state = available;
wake_up(&fs->wait);
return;
}
while (fs->state == idle) {
- if (!fd_req) {
- fd_req = blk_fetch_request(swim3_queue);
- if (!fd_req)
+ swim3_dbg("start request, idle loop, cur_req=%p\n", fs->cur_req);
+ if (!fs->cur_req) {
+ fs->cur_req = blk_fetch_request(disks[fs->index]->queue);
+ swim3_dbg(" fetched request %p\n", fs->cur_req);
+ if (!fs->cur_req)
break;
}
- req = fd_req;
-#if 0
- printk("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n",
- req->rq_disk->disk_name, req->cmd,
- (long)blk_rq_pos(req), blk_rq_sectors(req), req->buffer);
- printk(" errors=%d current_nr_sectors=%u\n",
- req->errors, blk_rq_cur_sectors(req));
+ req = fs->cur_req;
+
+ if (fs->mdev->media_bay &&
+ check_media_bay(fs->mdev->media_bay) != MB_FD) {
+ swim3_dbg("%s", " media bay absent, dropping req\n");
+ swim3_end_request(fs, -ENODEV, 0);
+ continue;
+ }
+
+#if 0 /* This is really too verbose */
+ swim3_dbg("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n",
+ req->rq_disk->disk_name, req->cmd,
+ (long)blk_rq_pos(req), blk_rq_sectors(req),
+ req->buffer);
+ swim3_dbg(" errors=%d current_nr_sectors=%u\n",
+ req->errors, blk_rq_cur_sectors(req));
#endif
if (blk_rq_pos(req) >= fs->total_secs) {
- swim3_end_request_cur(-EIO);
+ swim3_dbg(" pos out of bounds (%ld, max is %ld)\n",
+ (long)blk_rq_pos(req), (long)fs->total_secs);
+ swim3_end_request(fs, -EIO, 0);
continue;
}
if (fs->ejected) {
- swim3_end_request_cur(-EIO);
+ swim3_dbg("%s", " disk ejected\n");
+ swim3_end_request(fs, -EIO, 0);
continue;
}
if (fs->write_prot < 0)
fs->write_prot = swim3_readbit(fs, WRITE_PROT);
if (fs->write_prot) {
- swim3_end_request_cur(-EIO);
+ swim3_dbg("%s", " try to write, disk write protected\n");
+ swim3_end_request(fs, -EIO, 0);
continue;
}
}
x = ((long)blk_rq_pos(req)) % fs->secpercyl;
fs->head = x / fs->secpertrack;
fs->req_sector = x % fs->secpertrack + 1;
- fd_req = req;
fs->state = do_transfer;
fs->retries = 0;
}
}
+static void do_fd_request(struct request_queue * q)
+{
+ start_request(q->queuedata);
+}
+
static void set_timeout(struct floppy_state *fs, int nticks,
void (*proc)(unsigned long))
{
- unsigned long flags;
-
- spin_lock_irqsave(&fs->lock, flags);
if (fs->timeout_pending)
del_timer(&fs->timeout);
fs->timeout.expires = jiffies + nticks;
fs->timeout.data = (unsigned long) fs;
add_timer(&fs->timeout);
fs->timeout_pending = 1;
- spin_unlock_irqrestore(&fs->lock, flags);
}
static inline void scan_track(struct floppy_state *fs)
struct swim3 __iomem *sw = fs->swim3;
struct dbdma_cmd *cp = fs->dma_cmd;
struct dbdma_regs __iomem *dr = fs->dma;
+ struct request *req = fs->cur_req;
- if (blk_rq_cur_sectors(fd_req) <= 0) {
- printk(KERN_ERR "swim3: transfer 0 sectors?\n");
+ if (blk_rq_cur_sectors(req) <= 0) {
+ swim3_warn("%s", "Transfer 0 sectors ?\n");
return;
}
- if (rq_data_dir(fd_req) == WRITE)
+ if (rq_data_dir(req) == WRITE)
n = 1;
else {
n = fs->secpertrack - fs->req_sector + 1;
- if (n > blk_rq_cur_sectors(fd_req))
- n = blk_rq_cur_sectors(fd_req);
+ if (n > blk_rq_cur_sectors(req))
+ n = blk_rq_cur_sectors(req);
}
+
+ swim3_dbg(" setup xfer at sect %d (of %d) head %d for %d\n",
+ fs->req_sector, fs->secpertrack, fs->head, n);
+
fs->scount = n;
swim3_select(fs, fs->head? READ_DATA_1: READ_DATA_0);
out_8(&sw->sector, fs->req_sector);
out_8(&sw->nsect, n);
out_8(&sw->gap3, 0);
out_le32(&dr->cmdptr, virt_to_bus(cp));
- if (rq_data_dir(fd_req) == WRITE) {
+ if (rq_data_dir(req) == WRITE) {
/* Set up 3 dma commands: write preamble, data, postamble */
init_dma(cp, OUTPUT_MORE, write_preamble, sizeof(write_preamble));
++cp;
- init_dma(cp, OUTPUT_MORE, fd_req->buffer, 512);
+ init_dma(cp, OUTPUT_MORE, req->buffer, 512);
++cp;
init_dma(cp, OUTPUT_LAST, write_postamble, sizeof(write_postamble));
} else {
- init_dma(cp, INPUT_LAST, fd_req->buffer, n * 512);
+ init_dma(cp, INPUT_LAST, req->buffer, n * 512);
}
++cp;
out_le16(&cp->command, DBDMA_STOP);
out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
in_8(&sw->error);
out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
- if (rq_data_dir(fd_req) == WRITE)
+ if (rq_data_dir(req) == WRITE)
out_8(&sw->control_bis, WRITE_SECTORS);
in_8(&sw->intr);
out_le32(&dr->control, (RUN << 16) | RUN);
static void act(struct floppy_state *fs)
{
for (;;) {
+ swim3_dbg(" act loop, state=%d, req_cyl=%d, cur_cyl=%d\n",
+ fs->state, fs->req_cyl, fs->cur_cyl);
+
switch (fs->state) {
case idle:
return; /* XXX shouldn't get here */
case locating:
if (swim3_readbit(fs, TRACK_ZERO)) {
+ swim3_dbg("%s", " locate track 0\n");
fs->cur_cyl = 0;
if (fs->req_cyl == 0)
fs->state = do_transfer;
break;
}
if (fs->req_cyl == fs->cur_cyl) {
- printk("whoops, seeking 0\n");
+ swim3_warn("%s", "Whoops, seeking 0\n");
fs->state = do_transfer;
break;
}
case do_transfer:
if (fs->cur_cyl != fs->req_cyl) {
if (fs->retries > 5) {
- swim3_end_request_cur(-EIO);
+ swim3_err("Wrong cylinder in transfer, want: %d got %d\n",
+ fs->req_cyl, fs->cur_cyl);
+ swim3_end_request(fs, -EIO, 0);
fs->state = idle;
return;
}
return;
default:
- printk(KERN_ERR"swim3: unknown state %d\n", fs->state);
+ swim3_err("Unknown state %d\n", fs->state);
return;
}
}
{
struct floppy_state *fs = (struct floppy_state *) data;
struct swim3 __iomem *sw = fs->swim3;
+ unsigned long flags;
+
+ swim3_dbg("* scan timeout, state=%d\n", fs->state);
+ spin_lock_irqsave(&swim3_lock, flags);
fs->timeout_pending = 0;
out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
out_8(&sw->select, RELAX);
out_8(&sw->intr_enable, 0);
fs->cur_cyl = -1;
if (fs->retries > 5) {
- swim3_end_request_cur(-EIO);
+ swim3_end_request(fs, -EIO, 0);
fs->state = idle;
start_request(fs);
} else {
fs->state = jogging;
act(fs);
}
+ spin_unlock_irqrestore(&swim3_lock, flags);
}
static void seek_timeout(unsigned long data)
{
struct floppy_state *fs = (struct floppy_state *) data;
struct swim3 __iomem *sw = fs->swim3;
+ unsigned long flags;
+
+ swim3_dbg("* seek timeout, state=%d\n", fs->state);
+ spin_lock_irqsave(&swim3_lock, flags);
fs->timeout_pending = 0;
out_8(&sw->control_bic, DO_SEEK);
out_8(&sw->select, RELAX);
out_8(&sw->intr_enable, 0);
- printk(KERN_ERR "swim3: seek timeout\n");
- swim3_end_request_cur(-EIO);
+ swim3_err("%s", "Seek timeout\n");
+ swim3_end_request(fs, -EIO, 0);
fs->state = idle;
start_request(fs);
+ spin_unlock_irqrestore(&swim3_lock, flags);
}
static void settle_timeout(unsigned long data)
{
struct floppy_state *fs = (struct floppy_state *) data;
struct swim3 __iomem *sw = fs->swim3;
+ unsigned long flags;
+
+ swim3_dbg("* settle timeout, state=%d\n", fs->state);
+ spin_lock_irqsave(&swim3_lock, flags);
fs->timeout_pending = 0;
if (swim3_readbit(fs, SEEK_COMPLETE)) {
out_8(&sw->select, RELAX);
fs->state = locating;
act(fs);
- return;
+ goto unlock;
}
out_8(&sw->select, RELAX);
if (fs->settle_time < 2*HZ) {
++fs->settle_time;
set_timeout(fs, 1, settle_timeout);
- return;
+ goto unlock;
}
- printk(KERN_ERR "swim3: seek settle timeout\n");
- swim3_end_request_cur(-EIO);
+ swim3_err("%s", "Seek settle timeout\n");
+ swim3_end_request(fs, -EIO, 0);
fs->state = idle;
start_request(fs);
+ unlock:
+ spin_unlock_irqrestore(&swim3_lock, flags);
}
static void xfer_timeout(unsigned long data)
struct floppy_state *fs = (struct floppy_state *) data;
struct swim3 __iomem *sw = fs->swim3;
struct dbdma_regs __iomem *dr = fs->dma;
+ unsigned long flags;
int n;
+ swim3_dbg("* xfer timeout, state=%d\n", fs->state);
+
+ spin_lock_irqsave(&swim3_lock, flags);
fs->timeout_pending = 0;
out_le32(&dr->control, RUN << 16);
/* We must wait a bit for dbdma to stop */
out_8(&sw->intr_enable, 0);
out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION);
out_8(&sw->select, RELAX);
- printk(KERN_ERR "swim3: timeout %sing sector %ld\n",
- (rq_data_dir(fd_req)==WRITE? "writ": "read"),
- (long)blk_rq_pos(fd_req));
- swim3_end_request_cur(-EIO);
+ swim3_err("Timeout %sing sector %ld\n",
+ (rq_data_dir(fs->cur_req)==WRITE? "writ": "read"),
+ (long)blk_rq_pos(fs->cur_req));
+ swim3_end_request(fs, -EIO, 0);
fs->state = idle;
start_request(fs);
+ spin_unlock_irqrestore(&swim3_lock, flags);
}
static irqreturn_t swim3_interrupt(int irq, void *dev_id)
int stat, resid;
struct dbdma_regs __iomem *dr;
struct dbdma_cmd *cp;
+ unsigned long flags;
+ struct request *req = fs->cur_req;
+
+ swim3_dbg("* interrupt, state=%d\n", fs->state);
+ spin_lock_irqsave(&swim3_lock, flags);
intr = in_8(&sw->intr);
err = (intr & ERROR_INTR)? in_8(&sw->error): 0;
if ((intr & ERROR_INTR) && fs->state != do_transfer)
- printk(KERN_ERR "swim3_interrupt, state=%d, dir=%x, intr=%x, err=%x\n",
- fs->state, rq_data_dir(fd_req), intr, err);
+ swim3_err("Non-transfer error interrupt: state=%d, dir=%x, intr=%x, err=%x\n",
+ fs->state, rq_data_dir(req), intr, err);
switch (fs->state) {
case locating:
if (intr & SEEN_SECTOR) {
del_timer(&fs->timeout);
fs->timeout_pending = 0;
if (sw->ctrack == 0xff) {
- printk(KERN_ERR "swim3: seen sector but cyl=ff?\n");
+ swim3_err("%s", "Seen sector but cyl=ff?\n");
fs->cur_cyl = -1;
if (fs->retries > 5) {
- swim3_end_request_cur(-EIO);
+ swim3_end_request(fs, -EIO, 0);
fs->state = idle;
start_request(fs);
} else {
fs->cur_cyl = sw->ctrack;
fs->cur_sector = sw->csect;
if (fs->expect_cyl != -1 && fs->expect_cyl != fs->cur_cyl)
- printk(KERN_ERR "swim3: expected cyl %d, got %d\n",
- fs->expect_cyl, fs->cur_cyl);
+ swim3_err("Expected cyl %d, got %d\n",
+ fs->expect_cyl, fs->cur_cyl);
fs->state = do_transfer;
act(fs);
}
fs->timeout_pending = 0;
dr = fs->dma;
cp = fs->dma_cmd;
- if (rq_data_dir(fd_req) == WRITE)
+ if (rq_data_dir(req) == WRITE)
++cp;
/*
* Check that the main data transfer has finished.
if (intr & ERROR_INTR) {
n = fs->scount - 1 - resid / 512;
if (n > 0) {
- blk_update_request(fd_req, 0, n << 9);
+ blk_update_request(req, 0, n << 9);
fs->req_sector += n;
}
if (fs->retries < 5) {
++fs->retries;
act(fs);
} else {
- printk("swim3: error %sing block %ld (err=%x)\n",
- rq_data_dir(fd_req) == WRITE? "writ": "read",
- (long)blk_rq_pos(fd_req), err);
- swim3_end_request_cur(-EIO);
+ swim3_err("Error %sing block %ld (err=%x)\n",
+ rq_data_dir(req) == WRITE? "writ": "read",
+ (long)blk_rq_pos(req), err);
+ swim3_end_request(fs, -EIO, 0);
fs->state = idle;
}
} else {
if ((stat & ACTIVE) == 0 || resid != 0) {
/* musta been an error */
- printk(KERN_ERR "swim3: fd dma: stat=%x resid=%d\n", stat, resid);
- printk(KERN_ERR " state=%d, dir=%x, intr=%x, err=%x\n",
- fs->state, rq_data_dir(fd_req), intr, err);
- swim3_end_request_cur(-EIO);
+ swim3_err("fd dma error: stat=%x resid=%d\n", stat, resid);
+ swim3_err(" state=%d, dir=%x, intr=%x, err=%x\n",
+ fs->state, rq_data_dir(req), intr, err);
+ swim3_end_request(fs, -EIO, 0);
fs->state = idle;
start_request(fs);
break;
}
- if (swim3_end_request(0, fs->scount << 9)) {
+ fs->retries = 0;
+ if (swim3_end_request(fs, 0, fs->scount << 9)) {
fs->req_sector += fs->scount;
if (fs->req_sector > fs->secpertrack) {
fs->req_sector -= fs->secpertrack;
start_request(fs);
break;
default:
- printk(KERN_ERR "swim3: don't know what to do in state %d\n", fs->state);
+ swim3_err("Don't know what to do in state %d\n", fs->state);
}
+ spin_unlock_irqrestore(&swim3_lock, flags);
return IRQ_HANDLED;
}
}
*/
+/* Called under the mutex to grab exclusive access to a drive */
static int grab_drive(struct floppy_state *fs, enum swim_state state,
int interruptible)
{
unsigned long flags;
- spin_lock_irqsave(&fs->lock, flags);
- if (fs->state != idle) {
+ swim3_dbg("%s", "-> grab drive\n");
+
+ spin_lock_irqsave(&swim3_lock, flags);
+ if (fs->state != idle && fs->state != available) {
++fs->wanted;
while (fs->state != available) {
+ spin_unlock_irqrestore(&swim3_lock, flags);
if (interruptible && signal_pending(current)) {
--fs->wanted;
- spin_unlock_irqrestore(&fs->lock, flags);
return -EINTR;
}
interruptible_sleep_on(&fs->wait);
+ spin_lock_irqsave(&swim3_lock, flags);
}
--fs->wanted;
}
fs->state = state;
- spin_unlock_irqrestore(&fs->lock, flags);
+ spin_unlock_irqrestore(&swim3_lock, flags);
+
return 0;
}
{
unsigned long flags;
- spin_lock_irqsave(&fs->lock, flags);
+ swim3_dbg("%s", "-> release drive\n");
+
+ spin_lock_irqsave(&swim3_lock, flags);
fs->state = idle;
start_request(fs);
- spin_unlock_irqrestore(&fs->lock, flags);
+ spin_unlock_irqrestore(&swim3_lock, flags);
}
static int fd_eject(struct floppy_state *fs)
{
struct floppy_state *fs = disk->private_data;
struct swim3 __iomem *sw = fs->swim3;
+
mutex_lock(&swim3_mutex);
if (fs->ref_count > 0 && --fs->ref_count == 0) {
swim3_action(fs, MOTOR_OFF);
.revalidate_disk= floppy_revalidate,
};
+static void swim3_mb_event(struct macio_dev* mdev, int mb_state)
+{
+ struct floppy_state *fs = macio_get_drvdata(mdev);
+ struct swim3 __iomem *sw = fs->swim3;
+
+ if (!fs)
+ return;
+ if (mb_state != MB_FD)
+ return;
+
+ /* Clear state */
+ out_8(&sw->intr_enable, 0);
+ in_8(&sw->intr);
+ in_8(&sw->error);
+}
+
static int swim3_add_device(struct macio_dev *mdev, int index)
{
struct device_node *swim = mdev->ofdev.dev.of_node;
struct floppy_state *fs = &floppy_states[index];
int rc = -EBUSY;
+ /* Do this first for message macros */
+ memset(fs, 0, sizeof(*fs));
+ fs->mdev = mdev;
+ fs->index = index;
+
/* Check & Request resources */
if (macio_resource_count(mdev) < 2) {
- printk(KERN_WARNING "ifd%d: no address for %s\n",
- index, swim->full_name);
+ swim3_err("%s", "No address in device-tree\n");
return -ENXIO;
}
- if (macio_irq_count(mdev) < 2) {
- printk(KERN_WARNING "fd%d: no intrs for device %s\n",
- index, swim->full_name);
+ if (macio_irq_count(mdev) < 1) {
+ swim3_err("%s", "No interrupt in device-tree\n");
+ return -ENXIO;
}
if (macio_request_resource(mdev, 0, "swim3 (mmio)")) {
- printk(KERN_ERR "fd%d: can't request mmio resource for %s\n",
- index, swim->full_name);
+ swim3_err("%s", "Can't request mmio resource\n");
return -EBUSY;
}
if (macio_request_resource(mdev, 1, "swim3 (dma)")) {
- printk(KERN_ERR "fd%d: can't request dma resource for %s\n",
- index, swim->full_name);
+ swim3_err("%s", "Can't request dma resource\n");
macio_release_resource(mdev, 0);
return -EBUSY;
}
if (mdev->media_bay == NULL)
pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 1);
- memset(fs, 0, sizeof(*fs));
- spin_lock_init(&fs->lock);
fs->state = idle;
fs->swim3 = (struct swim3 __iomem *)
ioremap(macio_resource_start(mdev, 0), 0x200);
if (fs->swim3 == NULL) {
- printk("fd%d: couldn't map registers for %s\n",
- index, swim->full_name);
+ swim3_err("%s", "Couldn't map mmio registers\n");
rc = -ENOMEM;
goto out_release;
}
fs->dma = (struct dbdma_regs __iomem *)
ioremap(macio_resource_start(mdev, 1), 0x200);
if (fs->dma == NULL) {
- printk("fd%d: couldn't map DMA for %s\n",
- index, swim->full_name);
+ swim3_err("%s", "Couldn't map dma registers\n");
iounmap(fs->swim3);
rc = -ENOMEM;
goto out_release;
fs->secpercyl = 36;
fs->secpertrack = 18;
fs->total_secs = 2880;
- fs->mdev = mdev;
init_waitqueue_head(&fs->wait);
fs->dma_cmd = (struct dbdma_cmd *) DBDMA_ALIGN(fs->dbdma_cmd_space);
memset(fs->dma_cmd, 0, 2 * sizeof(struct dbdma_cmd));
st_le16(&fs->dma_cmd[1].command, DBDMA_STOP);
+ if (mdev->media_bay == NULL || check_media_bay(mdev->media_bay) == MB_FD)
+ swim3_mb_event(mdev, MB_FD);
+
if (request_irq(fs->swim3_intr, swim3_interrupt, 0, "SWIM3", fs)) {
- printk(KERN_ERR "fd%d: couldn't request irq %d for %s\n",
- index, fs->swim3_intr, swim->full_name);
+ swim3_err("%s", "Couldn't request interrupt\n");
pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 0);
goto out_unmap;
return -EBUSY;
}
-/*
- if (request_irq(fs->dma_intr, fd_dma_interrupt, 0, "SWIM3-dma", fs)) {
- printk(KERN_ERR "Couldn't get irq %d for SWIM3 DMA",
- fs->dma_intr);
- return -EBUSY;
- }
-*/
init_timer(&fs->timeout);
- printk(KERN_INFO "fd%d: SWIM3 floppy controller %s\n", floppy_count,
+ swim3_info("SWIM3 floppy controller %s\n",
mdev->media_bay ? "in media bay" : "");
return 0;
static int __devinit swim3_attach(struct macio_dev *mdev, const struct of_device_id *match)
{
- int i, rc;
struct gendisk *disk;
+ int index, rc;
+
+ index = floppy_count++;
+ if (index >= MAX_FLOPPIES)
+ return -ENXIO;
/* Add the drive */
- rc = swim3_add_device(mdev, floppy_count);
+ rc = swim3_add_device(mdev, index);
if (rc)
return rc;
+ /* Now register that disk. Same comment about failure handling */
+ disk = disks[index] = alloc_disk(1);
+ if (disk == NULL)
+ return -ENOMEM;
+ disk->queue = blk_init_queue(do_fd_request, &swim3_lock);
+ if (disk->queue == NULL) {
+ put_disk(disk);
+ return -ENOMEM;
+ }
+ disk->queue->queuedata = &floppy_states[index];
- /* Now create the queue if not there yet */
- if (swim3_queue == NULL) {
+ if (index == 0) {
/* If we failed, there isn't much we can do as the driver is still
* too dumb to remove the device, just bail out
*/
if (register_blkdev(FLOPPY_MAJOR, "fd"))
return 0;
- swim3_queue = blk_init_queue(do_fd_request, &swim3_lock);
- if (swim3_queue == NULL) {
- unregister_blkdev(FLOPPY_MAJOR, "fd");
- return 0;
- }
}
- /* Now register that disk. Same comment about failure handling */
- i = floppy_count++;
- disk = disks[i] = alloc_disk(1);
- if (disk == NULL)
- return 0;
-
disk->major = FLOPPY_MAJOR;
- disk->first_minor = i;
+ disk->first_minor = index;
disk->fops = &floppy_fops;
- disk->private_data = &floppy_states[i];
- disk->queue = swim3_queue;
+ disk->private_data = &floppy_states[index];
disk->flags |= GENHD_FL_REMOVABLE;
- sprintf(disk->disk_name, "fd%d", i);
+ sprintf(disk->disk_name, "fd%d", index);
set_capacity(disk, 2880);
add_disk(disk);
.of_match_table = swim3_match,
},
.probe = swim3_attach,
+#ifdef CONFIG_PMAC_MEDIABAY
+ .mediabay_event = swim3_mb_event,
+#endif
#if 0
.suspend = swim3_suspend,
.resume = swim3_resume,
The core driver to support Marvell Bluetooth devices.
This driver is required if you want to support
- Marvell Bluetooth devices, such as 8688/8787.
+ Marvell Bluetooth devices, such as 8688/8787/8797.
Say Y here to compile Marvell Bluetooth driver
into the kernel or say M to compile it as module.
The driver for Marvell Bluetooth chipsets with SDIO interface.
This driver is required if you want to use Marvell Bluetooth
- devices with SDIO interface. Currently SD8688/SD8787 chipsets are
- supported.
+ devices with SDIO interface. Currently SD8688/SD8787/SD8797
+ chipsets are supported.
Say Y here to compile support for Marvell BT-over-SDIO driver
into the kernel or say M to compile it as module.
.io_port_1 = 0x01,
.io_port_2 = 0x02,
};
-static const struct btmrvl_sdio_card_reg btmrvl_reg_8787 = {
+static const struct btmrvl_sdio_card_reg btmrvl_reg_87xx = {
.cfg = 0x00,
.host_int_mask = 0x02,
.host_intstatus = 0x03,
static const struct btmrvl_sdio_device btmrvl_sdio_sd8787 = {
.helper = NULL,
.firmware = "mrvl/sd8787_uapsta.bin",
- .reg = &btmrvl_reg_8787,
+ .reg = &btmrvl_reg_87xx,
+ .sd_blksz_fw_dl = 256,
+};
+
+static const struct btmrvl_sdio_device btmrvl_sdio_sd8797 = {
+ .helper = NULL,
+ .firmware = "mrvl/sd8797_uapsta.bin",
+ .reg = &btmrvl_reg_87xx,
.sd_blksz_fw_dl = 256,
};
/* Marvell SD8787 Bluetooth device */
{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911A),
.driver_data = (unsigned long) &btmrvl_sdio_sd8787 },
+ /* Marvell SD8797 Bluetooth device */
+ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912A),
+ .driver_data = (unsigned long) &btmrvl_sdio_sd8797 },
{ } /* Terminating entry */
};
MODULE_FIRMWARE("sd8688_helper.bin");
MODULE_FIRMWARE("sd8688.bin");
MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin");
+MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin");
usb_mark_last_busy(data->udev);
}
- usb_free_urb(urb);
-
done:
+ usb_free_urb(urb);
return err;
}
ibft_cleanup();
}
+#ifdef CONFIG_ACPI
+static const struct {
+ char *sign;
+} ibft_signs[] = {
+ /*
+ * One spec says "IBFT", the other says "iBFT". We have to check
+ * for both.
+ */
+ { ACPI_SIG_IBFT },
+ { "iBFT" },
+};
+
+static void __init acpi_find_ibft_region(void)
+{
+ int i;
+ struct acpi_table_header *table = NULL;
+
+ if (acpi_disabled)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(ibft_signs) && !ibft_addr; i++) {
+ acpi_get_table(ibft_signs[i].sign, 0, &table);
+ ibft_addr = (struct acpi_table_ibft *)table;
+ }
+}
+#else
+static void __init acpi_find_ibft_region(void)
+{
+}
+#endif
+
/*
* ibft_init() - creates sysfs tree entries for the iBFT data.
*/
{
int rc = 0;
+ /*
+ As on UEFI systems the setup_arch()/find_ibft_region()
+ is called before ACPI tables are parsed and it only does
+ legacy finding.
+ */
+ if (!ibft_addr)
+ acpi_find_ibft_region();
+
if (ibft_addr) {
- printk(KERN_INFO "iBFT detected at 0x%llx.\n",
- (u64)isa_virt_to_bus(ibft_addr));
+ pr_info("iBFT detected.\n");
rc = ibft_check_device();
if (rc)
static const struct {
char *sign;
} ibft_signs[] = {
-#ifdef CONFIG_ACPI
- /*
- * One spec says "IBFT", the other says "iBFT". We have to check
- * for both.
- */
- { ACPI_SIG_IBFT },
-#endif
{ "iBFT" },
{ "BIFT" }, /* Broadcom iSCSI Offload */
};
#define VGA_MEM 0xA0000 /* VGA buffer */
#define VGA_SIZE 0x20000 /* 128kB */
-#ifdef CONFIG_ACPI
-static int __init acpi_find_ibft(struct acpi_table_header *header)
-{
- ibft_addr = (struct acpi_table_ibft *)header;
- return 0;
-}
-#endif /* CONFIG_ACPI */
-
static int __init find_ibft_in_mem(void)
{
unsigned long pos;
* the table cannot be valid. */
if (pos + len <= (IBFT_END-1)) {
ibft_addr = (struct acpi_table_ibft *)virt;
+ pr_info("iBFT found at 0x%lx.\n", pos);
goto done;
}
}
*/
unsigned long __init find_ibft_region(unsigned long *sizep)
{
-#ifdef CONFIG_ACPI
- int i;
-#endif
ibft_addr = NULL;
-#ifdef CONFIG_ACPI
- for (i = 0; i < ARRAY_SIZE(ibft_signs) && !ibft_addr; i++)
- acpi_table_parse(ibft_signs[i].sign, acpi_find_ibft);
-#endif /* CONFIG_ACPI */
-
/* iBFT 1.03 section 1.4.3.1 mandates that UEFI machines will
* only use ACPI for this */
- if (!ibft_addr && !efi_enabled)
+ if (!efi_enabled)
find_ibft_in_mem();
if (ibft_addr) {
#include <linux/module.h>
#include <linux/sigma.h>
-/* Return: 0==OK, <0==error, =1 ==no more actions */
+static size_t sigma_action_size(struct sigma_action *sa)
+{
+ size_t payload = 0;
+
+ switch (sa->instr) {
+ case SIGMA_ACTION_WRITEXBYTES:
+ case SIGMA_ACTION_WRITESINGLE:
+ case SIGMA_ACTION_WRITESAFELOAD:
+ payload = sigma_action_len(sa);
+ break;
+ default:
+ break;
+ }
+
+ payload = ALIGN(payload, 2);
+
+ return payload + sizeof(struct sigma_action);
+}
+
+/*
+ * Returns a negative error value in case of an error, 0 if processing of
+ * the firmware should be stopped after this action, 1 otherwise.
+ */
static int
-process_sigma_action(struct i2c_client *client, struct sigma_firmware *ssfw)
+process_sigma_action(struct i2c_client *client, struct sigma_action *sa)
{
- struct sigma_action *sa = (void *)(ssfw->fw->data + ssfw->pos);
size_t len = sigma_action_len(sa);
- int ret = 0;
+ int ret;
pr_debug("%s: instr:%i addr:%#x len:%zu\n", __func__,
sa->instr, sa->addr, len);
case SIGMA_ACTION_WRITEXBYTES:
case SIGMA_ACTION_WRITESINGLE:
case SIGMA_ACTION_WRITESAFELOAD:
- if (ssfw->fw->size < ssfw->pos + len)
- return -EINVAL;
ret = i2c_master_send(client, (void *)&sa->addr, len);
if (ret < 0)
return -EINVAL;
break;
-
case SIGMA_ACTION_DELAY:
- ret = 0;
udelay(len);
len = 0;
break;
-
case SIGMA_ACTION_END:
- return 1;
-
+ return 0;
default:
return -EINVAL;
}
- /* when arrive here ret=0 or sent data */
- ssfw->pos += sigma_action_size(sa, len);
- return ssfw->pos == ssfw->fw->size;
+ return 1;
}
static int
process_sigma_actions(struct i2c_client *client, struct sigma_firmware *ssfw)
{
- pr_debug("%s: processing %p\n", __func__, ssfw);
+ struct sigma_action *sa;
+ size_t size;
+ int ret;
+
+ while (ssfw->pos + sizeof(*sa) <= ssfw->fw->size) {
+ sa = (struct sigma_action *)(ssfw->fw->data + ssfw->pos);
+
+ size = sigma_action_size(sa);
+ ssfw->pos += size;
+ if (ssfw->pos > ssfw->fw->size || size == 0)
+ break;
+
+ ret = process_sigma_action(client, sa);
- while (1) {
- int ret = process_sigma_action(client, ssfw);
pr_debug("%s: action returned %i\n", __func__, ret);
- if (ret == 1)
- return 0;
- else if (ret)
+
+ if (ret <= 0)
return ret;
}
+
+ if (ssfw->pos != ssfw->fw->size)
+ return -EINVAL;
+
+ return 0;
}
int process_sigma_firmware(struct i2c_client *client, const char *name)
/* then verify the header */
ret = -EINVAL;
- if (fw->size < sizeof(*ssfw_head))
+
+ /*
+ * Reject too small or unreasonable large files. The upper limit has been
+ * chosen a bit arbitrarily, but it should be enough for all practical
+ * purposes and having the limit makes it easier to avoid integer
+ * overflows later in the loading process.
+ */
+ if (fw->size < sizeof(*ssfw_head) || fw->size >= 0x4000000)
goto done;
ssfw_head = (void *)fw->data;
if (memcmp(ssfw_head->magic, SIGMA_MAGIC, ARRAY_SIZE(ssfw_head->magic)))
goto done;
- crc = crc32(0, fw->data, fw->size);
+ crc = crc32(0, fw->data + sizeof(*ssfw_head),
+ fw->size - sizeof(*ssfw_head));
pr_debug("%s: crc=%x\n", __func__, crc);
- if (crc != ssfw_head->crc)
+ if (crc != le32_to_cpu(ssfw_head->crc))
goto done;
ssfw.pos = sizeof(*ssfw_head);
obj-$(CONFIG_GPIO_EP93XX) += gpio-ep93xx.o
obj-$(CONFIG_GPIO_IT8761E) += gpio-it8761e.o
obj-$(CONFIG_GPIO_JANZ_TTL) += gpio-janz-ttl.o
-obj-$(CONFIG_MACH_KS8695) += gpio-ks8695.o
+obj-$(CONFIG_ARCH_KS8695) += gpio-ks8695.o
obj-$(CONFIG_GPIO_LANGWELL) += gpio-langwell.o
obj-$(CONFIG_ARCH_LPC32XX) += gpio-lpc32xx.o
obj-$(CONFIG_GPIO_MAX730X) += gpio-max730x.o
#include <linux/mfd/da9052/da9052.h>
#include <linux/mfd/da9052/reg.h>
#include <linux/mfd/da9052/pdata.h>
-#include <linux/mfd/da9052/gpio.h>
#define DA9052_INPUT 1
#define DA9052_OUTPUT_OPENDRAIN 2
#define DA9052_GPIO_MASK_UPPER_NIBBLE 0xF0
#define DA9052_GPIO_MASK_LOWER_NIBBLE 0x0F
#define DA9052_GPIO_NIBBLE_SHIFT 4
+#define DA9052_IRQ_GPI0 16
+#define DA9052_GPIO_ODD_SHIFT 7
+#define DA9052_GPIO_EVEN_SHIFT 3
struct da9052_gpio {
struct da9052 *da9052;
static void da9052_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
{
struct da9052_gpio *gpio = to_da9052_gpio(gc);
- unsigned char register_value = 0;
int ret;
if (da9052_gpio_port_odd(offset)) {
- if (value) {
- register_value = DA9052_GPIO_ODD_PORT_MODE;
ret = da9052_reg_update(gpio->da9052, (offset >> 1) +
DA9052_GPIO_0_1_REG,
DA9052_GPIO_ODD_PORT_MODE,
- register_value);
+ value << DA9052_GPIO_ODD_SHIFT);
if (ret != 0)
dev_err(gpio->da9052->dev,
"Failed to updated gpio odd reg,%d",
ret);
- }
} else {
- if (value) {
- register_value = DA9052_GPIO_EVEN_PORT_MODE;
ret = da9052_reg_update(gpio->da9052, (offset >> 1) +
DA9052_GPIO_0_1_REG,
DA9052_GPIO_EVEN_PORT_MODE,
- register_value);
+ value << DA9052_GPIO_EVEN_SHIFT);
if (ret != 0)
dev_err(gpio->da9052->dev,
"Failed to updated gpio even reg,%d",
ret);
- }
}
}
.direction_input = da9052_gpio_direction_input,
.direction_output = da9052_gpio_direction_output,
.to_irq = da9052_gpio_to_irq,
- .can_sleep = 1;
- .ngpio = 16;
- .base = -1;
+ .can_sleep = 1,
+ .ngpio = 16,
+ .base = -1,
};
static int __devinit da9052_gpio_probe(struct platform_device *pdev)
&chip->reg->regs[chip->ch].imask);
}
+static void ioh_irq_disable(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct ioh_gpio *chip = gc->private;
+ unsigned long flags;
+ u32 ien;
+
+ spin_lock_irqsave(&chip->spinlock, flags);
+ ien = ioread32(&chip->reg->regs[chip->ch].ien);
+ ien &= ~(1 << (d->irq - chip->irq_base));
+ iowrite32(ien, &chip->reg->regs[chip->ch].ien);
+ spin_unlock_irqrestore(&chip->spinlock, flags);
+}
+
+static void ioh_irq_enable(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct ioh_gpio *chip = gc->private;
+ unsigned long flags;
+ u32 ien;
+
+ spin_lock_irqsave(&chip->spinlock, flags);
+ ien = ioread32(&chip->reg->regs[chip->ch].ien);
+ ien |= 1 << (d->irq - chip->irq_base);
+ iowrite32(ien, &chip->reg->regs[chip->ch].ien);
+ spin_unlock_irqrestore(&chip->spinlock, flags);
+}
+
static irqreturn_t ioh_gpio_handler(int irq, void *dev_id)
{
struct ioh_gpio *chip = dev_id;
int i, j;
int ret = IRQ_NONE;
- for (i = 0; i < 8; i++) {
+ for (i = 0; i < 8; i++, chip++) {
reg_val = ioread32(&chip->reg->regs[i].istatus);
for (j = 0; j < num_ports[i]; j++) {
if (reg_val & BIT(j)) {
ct->chip.irq_mask = ioh_irq_mask;
ct->chip.irq_unmask = ioh_irq_unmask;
ct->chip.irq_set_type = ioh_irq_type;
+ ct->chip.irq_disable = ioh_irq_disable;
+ ct->chip.irq_enable = ioh_irq_enable;
irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
IRQ_NOREQUEST | IRQ_NOPROBE, 0);
return 0;
}
+static int mpc5121_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+ /* GPIO 28..31 are input only on MPC5121 */
+ if (gpio >= 28)
+ return -EINVAL;
+
+ return mpc8xxx_gpio_dir_out(gc, gpio, val);
+}
+
static int mpc8xxx_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
{
struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc);
mm_gc->save_regs = mpc8xxx_gpio_save_regs;
gc->ngpio = MPC8XXX_GPIO_PINS;
gc->direction_input = mpc8xxx_gpio_dir_in;
- gc->direction_output = mpc8xxx_gpio_dir_out;
- if (of_device_is_compatible(np, "fsl,mpc8572-gpio"))
- gc->get = mpc8572_gpio_get;
- else
- gc->get = mpc8xxx_gpio_get;
+ gc->direction_output = of_device_is_compatible(np, "fsl,mpc5121-gpio") ?
+ mpc5121_gpio_dir_out : mpc8xxx_gpio_dir_out;
+ gc->get = of_device_is_compatible(np, "fsl,mpc8572-gpio") ?
+ mpc8572_gpio_get : mpc8xxx_gpio_get;
gc->set = mpc8xxx_gpio_set;
gc->to_irq = mpc8xxx_gpio_to_irq;
int ret, irq, i;
static DECLARE_BITMAP(init_irq, NR_IRQS);
- pdata = dev->dev.platform_data;
- if (pdata == NULL)
- return -ENODEV;
-
chip = kzalloc(sizeof(*chip), GFP_KERNEL);
if (chip == NULL)
return -ENOMEM;
EXPORT_SYMBOL(drm_crtc_helper_set_mode);
+static int
+drm_crtc_helper_disable(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+
+ /* Decouple all encoders and their attached connectors from this crtc */
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc != crtc)
+ continue;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ if (connector->encoder != encoder)
+ continue;
+
+ connector->encoder = NULL;
+ }
+ }
+
+ drm_helper_disable_unused_functions(dev);
+ return 0;
+}
+
/**
* drm_crtc_helper_set_config - set a new config from userspace
* @crtc: CRTC to setup
(int)set->num_connectors, set->x, set->y);
} else {
DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
- set->mode = NULL;
- set->num_connectors = 0;
+ return drm_crtc_helper_disable(set->crtc);
}
dev = set->crtc->dev;
const struct intel_device_info *info = INTEL_INFO(dev);
seq_printf(m, "gen: %d\n", info->gen);
+ seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
#define B(x) seq_printf(m, #x ": %s\n", yesno(info->x))
B(is_mobile);
B(is_i85x);
diff1 = now - dev_priv->last_time1;
+ /* Prevent division-by-zero if we are asking too fast.
+ * Also, we don't get interesting results if we are polling
+ * faster than once in 10ms, so just return the saved value
+ * in such cases.
+ */
+ if (diff1 <= 10)
+ return dev_priv->chipset_power;
+
count1 = I915_READ(DMIEC);
count2 = I915_READ(DDREC);
count3 = I915_READ(CSIEC);
dev_priv->last_count1 = total_count;
dev_priv->last_time1 = now;
+ dev_priv->chipset_power = ret;
+
return ret;
}
MODULE_PARM_DESC(powersave,
"Enable powersavings, fbc, downclocking, etc. (default: true)");
-unsigned int i915_semaphores __read_mostly = 0;
+int i915_semaphores __read_mostly = -1;
module_param_named(semaphores, i915_semaphores, int, 0600);
MODULE_PARM_DESC(semaphores,
- "Use semaphores for inter-ring sync (default: false)");
+ "Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))");
-unsigned int i915_enable_rc6 __read_mostly = 0;
+int i915_enable_rc6 __read_mostly = -1;
module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
MODULE_PARM_DESC(i915_enable_rc6,
- "Enable power-saving render C-state 6 (default: true)");
+ "Enable power-saving render C-state 6 (default: -1 (use per-chip default)");
int i915_enable_fbc __read_mostly = -1;
module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
}
}
-static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
+void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
{
int count;
udelay(10);
}
+void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
+{
+ int count;
+
+ count = 0;
+ while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1))
+ udelay(10);
+
+ I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 1);
+ POSTING_READ(FORCEWAKE_MT);
+
+ count = 0;
+ while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1) == 0)
+ udelay(10);
+}
+
/*
* Generally this is called implicitly by the register read function. However,
* if some sequence requires the GT to not power down then this function should
/* Forcewake is atomic in case we get in here without the lock */
if (atomic_add_return(1, &dev_priv->forcewake_count) == 1)
- __gen6_gt_force_wake_get(dev_priv);
+ dev_priv->display.force_wake_get(dev_priv);
}
-static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
+void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE, 0);
POSTING_READ(FORCEWAKE);
}
+void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 0);
+ POSTING_READ(FORCEWAKE_MT);
+}
+
/*
* see gen6_gt_force_wake_get()
*/
WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
if (atomic_dec_and_test(&dev_priv->forcewake_count))
- __gen6_gt_force_wake_put(dev_priv);
+ dev_priv->display.force_wake_put(dev_priv);
}
void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
/* We give fast paths for the really cool registers */
#define NEEDS_FORCE_WAKE(dev_priv, reg) \
(((dev_priv)->info->gen >= 6) && \
- ((reg) < 0x40000) && \
- ((reg) != FORCEWAKE))
+ ((reg) < 0x40000) && \
+ ((reg) != FORCEWAKE) && \
+ ((reg) != ECOBUS))
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
struct opregion_acpi;
struct opregion_swsci;
struct opregion_asle;
+struct drm_i915_private;
struct intel_opregion {
struct opregion_header *header;
struct drm_i915_gem_object *obj);
int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
int x, int y);
+ void (*force_wake_get)(struct drm_i915_private *dev_priv);
+ void (*force_wake_put)(struct drm_i915_private *dev_priv);
/* clock updates for mode set */
/* cursor updates */
/* render clock increase/decrease */
u64 last_count1;
unsigned long last_time1;
+ unsigned long chipset_power;
u64 last_count2;
struct timespec last_time2;
unsigned long gfx_power;
extern unsigned int i915_fbpercrtc __always_unused;
extern int i915_panel_ignore_lid __read_mostly;
extern unsigned int i915_powersave __read_mostly;
-extern unsigned int i915_semaphores __read_mostly;
+extern int i915_semaphores __read_mostly;
extern unsigned int i915_lvds_downclock __read_mostly;
extern int i915_panel_use_ssc __read_mostly;
extern int i915_vbt_sdvo_panel_type __read_mostly;
-extern unsigned int i915_enable_rc6 __read_mostly;
+extern int i915_enable_rc6 __read_mostly;
extern int i915_enable_fbc __read_mostly;
extern bool i915_enable_hangcheck __read_mostly;
extern void intel_detect_pch(struct drm_device *dev);
extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
+extern void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
+extern void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv);
+extern void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
+extern void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv);
+
/* overlay */
#ifdef CONFIG_DEBUG_FS
extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
/* We give fast paths for the really cool registers */
#define NEEDS_FORCE_WAKE(dev_priv, reg) \
(((dev_priv)->info->gen >= 6) && \
- ((reg) < 0x40000) && \
- ((reg) != FORCEWAKE))
+ ((reg) < 0x40000) && \
+ ((reg) != FORCEWAKE) && \
+ ((reg) != ECOBUS))
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"
+#include <linux/dma_remapping.h>
struct change_domains {
uint32_t invalidate_domains;
return 0;
}
+static bool
+intel_enable_semaphores(struct drm_device *dev)
+{
+ if (INTEL_INFO(dev)->gen < 6)
+ return 0;
+
+ if (i915_semaphores >= 0)
+ return i915_semaphores;
+
+ /* Enable semaphores on SNB when IO remapping is off */
+ if (INTEL_INFO(dev)->gen == 6)
+ return !intel_iommu_enabled;
+
+ return 1;
+}
+
static int
i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *to)
return 0;
/* XXX gpu semaphores are implicated in various hard hangs on SNB */
- if (INTEL_INFO(obj->base.dev)->gen < 6 || !i915_semaphores)
+ if (!intel_enable_semaphores(obj->base.dev))
return i915_gem_object_wait_rendering(obj);
idx = intel_ring_sync_index(from, to);
/* or SDVOB */
#define HDMIB 0xe1140
#define PORT_ENABLE (1 << 31)
-#define TRANSCODER_A (0)
-#define TRANSCODER_B (1 << 30)
-#define TRANSCODER(pipe) ((pipe) << 30)
-#define TRANSCODER_MASK (1 << 30)
+#define TRANSCODER(pipe) ((pipe) << 30)
+#define TRANSCODER_CPT(pipe) ((pipe) << 29)
+#define TRANSCODER_MASK (1 << 30)
+#define TRANSCODER_MASK_CPT (3 << 29)
#define COLOR_FORMAT_8bpc (0)
#define COLOR_FORMAT_12bpc (3 << 26)
#define SDVOB_HOTPLUG_ENABLE (1 << 23)
#define EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B (0x38<<22)
#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22)
+/* IVB */
+#define EDP_LINK_TRAIN_400MV_0DB_IVB (0x24 <<22)
+#define EDP_LINK_TRAIN_400MV_3_5DB_IVB (0x2a <<22)
+#define EDP_LINK_TRAIN_400MV_6DB_IVB (0x2f <<22)
+#define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 <<22)
+#define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 <<22)
+#define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 <<22)
+#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x33 <<22)
+
+/* legacy values */
+#define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 <<22)
+#define EDP_LINK_TRAIN_1000MV_0DB_IVB (0x20 <<22)
+#define EDP_LINK_TRAIN_500MV_3_5DB_IVB (0x02 <<22)
+#define EDP_LINK_TRAIN_1000MV_3_5DB_IVB (0x22 <<22)
+#define EDP_LINK_TRAIN_1000MV_6DB_IVB (0x23 <<22)
+
+#define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f<<22)
+
#define FORCEWAKE 0xA18C
#define FORCEWAKE_ACK 0x130090
+#define FORCEWAKE_MT 0xa188 /* multi-threaded */
+#define FORCEWAKE_MT_ACK 0x130040
+#define ECOBUS 0xa180
+#define FORCEWAKE_MT_ENABLE (1<<5)
#define GT_FIFO_FREE_ENTRIES 0x120008
#define GT_FIFO_NUM_RESERVED_ENTRIES 20
#include "i915_drv.h"
#include "i915_trace.h"
#include "drm_dp_helper.h"
-
#include "drm_crtc_helper.h"
+#include <linux/dma_remapping.h>
#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
/**
* intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
* @crtc: CRTC structure
+ * @mode: requested mode
*
* A pipe may be connected to one or more outputs. Based on the depth of the
* attached framebuffer, choose a good color depth to use on the pipe.
* HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
* Displays may support a restricted set as well, check EDID and clamp as
* appropriate.
+ * DP may want to dither down to 6bpc to fit larger modes
*
* RETURNS:
* Dithering requirement (i.e. false if display bpc and pipe bpc match,
* true if they don't match).
*/
static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
- unsigned int *pipe_bpp)
+ unsigned int *pipe_bpp,
+ struct drm_display_mode *mode)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
}
}
+ if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
+ DRM_DEBUG_KMS("Dithering DP to 6bpc\n");
+ display_bpc = 6;
+ }
+
/*
* We could just drive the pipe at the highest bpc all the time and
* enable dithering as needed, but that costs bandwidth. So choose
pipeconf &= ~PIPECONF_DOUBLE_WIDE;
}
+ /* default to 8bpc */
+ pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
+ if (is_dp) {
+ if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
+ pipeconf |= PIPECONF_BPP_6 |
+ PIPECONF_DITHER_EN |
+ PIPECONF_DITHER_TYPE_SP;
+ }
+ }
+
dpll |= DPLL_VCO_ENABLE;
DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
/* determine panel color depth */
temp = I915_READ(PIPECONF(pipe));
temp &= ~PIPE_BPC_MASK;
- dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp);
+ dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode);
switch (pipe_bpp) {
case 18:
temp |= PIPE_6BPC;
work->old_fb_obj = intel_fb->obj;
INIT_WORK(&work->work, intel_unpin_work_fn);
+ ret = drm_vblank_get(dev, intel_crtc->pipe);
+ if (ret)
+ goto free_work;
+
/* We borrow the event spin lock for protecting unpin_work */
spin_lock_irqsave(&dev->event_lock, flags);
if (intel_crtc->unpin_work) {
spin_unlock_irqrestore(&dev->event_lock, flags);
kfree(work);
+ drm_vblank_put(dev, intel_crtc->pipe);
DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
return -EBUSY;
crtc->fb = fb;
- ret = drm_vblank_get(dev, intel_crtc->pipe);
- if (ret)
- goto cleanup_objs;
-
work->pending_flip_obj = obj;
work->enable_stall_check = true;
cleanup_pending:
atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
-cleanup_objs:
drm_gem_object_unreference(&work->old_fb_obj->base);
drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
intel_crtc->unpin_work = NULL;
spin_unlock_irqrestore(&dev->event_lock, flags);
+ drm_vblank_put(dev, intel_crtc->pipe);
+free_work:
kfree(work);
return ret;
dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
}
+static bool intel_enable_rc6(struct drm_device *dev)
+{
+ /*
+ * Respect the kernel parameter if it is set
+ */
+ if (i915_enable_rc6 >= 0)
+ return i915_enable_rc6;
+
+ /*
+ * Disable RC6 on Ironlake
+ */
+ if (INTEL_INFO(dev)->gen == 5)
+ return 0;
+
+ /*
+ * Enable rc6 on Sandybridge if DMA remapping is disabled
+ */
+ if (INTEL_INFO(dev)->gen == 6) {
+ DRM_DEBUG_DRIVER("Sandybridge: intel_iommu_enabled %s -- RC6 %sabled\n",
+ intel_iommu_enabled ? "true" : "false",
+ !intel_iommu_enabled ? "en" : "dis");
+ return !intel_iommu_enabled;
+ }
+ DRM_DEBUG_DRIVER("RC6 enabled\n");
+ return 1;
+}
+
void gen6_enable_rps(struct drm_i915_private *dev_priv)
{
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
- if (i915_enable_rc6)
+ if (intel_enable_rc6(dev_priv->dev))
rc6_mask = GEN6_RC_CTL_RC6p_ENABLE |
GEN6_RC_CTL_RC6_ENABLE;
/* rc6 disabled by default due to repeated reports of hanging during
* boot and resume.
*/
- if (!i915_enable_rc6)
+ if (!intel_enable_rc6(dev))
return;
mutex_lock(&dev->struct_mutex);
/* For FIFO watermark updates */
if (HAS_PCH_SPLIT(dev)) {
+ dev_priv->display.force_wake_get = __gen6_gt_force_wake_get;
+ dev_priv->display.force_wake_put = __gen6_gt_force_wake_put;
+
+ /* IVB configs may use multi-threaded forcewake */
+ if (IS_IVYBRIDGE(dev)) {
+ u32 ecobus;
+
+ mutex_lock(&dev->struct_mutex);
+ __gen6_gt_force_wake_mt_get(dev_priv);
+ ecobus = I915_READ(ECOBUS);
+ __gen6_gt_force_wake_mt_put(dev_priv);
+ mutex_unlock(&dev->struct_mutex);
+
+ if (ecobus & FORCEWAKE_MT_ENABLE) {
+ DRM_DEBUG_KMS("Using MT version of forcewake\n");
+ dev_priv->display.force_wake_get =
+ __gen6_gt_force_wake_mt_get;
+ dev_priv->display.force_wake_put =
+ __gen6_gt_force_wake_mt_put;
+ }
+ }
+
if (HAS_PCH_IBX(dev))
dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
else if (HAS_PCH_CPT(dev))
*/
static int
-intel_dp_link_required(struct intel_dp *intel_dp, int pixel_clock)
+intel_dp_link_required(struct intel_dp *intel_dp, int pixel_clock, int check_bpp)
{
struct drm_crtc *crtc = intel_dp->base.base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int bpp = 24;
- if (intel_crtc)
+ if (check_bpp)
+ bpp = check_bpp;
+ else if (intel_crtc)
bpp = intel_crtc->bpp;
return (pixel_clock * bpp + 9) / 10;
struct intel_dp *intel_dp = intel_attached_dp(connector);
int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
int max_lanes = intel_dp_max_lane_count(intel_dp);
+ int max_rate, mode_rate;
if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay)
return MODE_PANEL;
}
- if (intel_dp_link_required(intel_dp, mode->clock)
- > intel_dp_max_data_rate(max_link_clock, max_lanes))
- return MODE_CLOCK_HIGH;
+ mode_rate = intel_dp_link_required(intel_dp, mode->clock, 0);
+ max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
+
+ if (mode_rate > max_rate) {
+ mode_rate = intel_dp_link_required(intel_dp,
+ mode->clock, 18);
+ if (mode_rate > max_rate)
+ return MODE_CLOCK_HIGH;
+ else
+ mode->private_flags |= INTEL_MODE_DP_FORCE_6BPC;
+ }
if (mode->clock < 10000)
return MODE_CLOCK_LOW;
* clock divider.
*/
if (is_cpu_edp(intel_dp)) {
- if (IS_GEN6(dev))
- aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */
+ if (IS_GEN6(dev) || IS_GEN7(dev))
+ aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
else
aux_clock_divider = 225; /* eDP input clock at 450Mhz */
} else if (HAS_PCH_SPLIT(dev))
int lane_count, clock;
int max_lane_count = intel_dp_max_lane_count(intel_dp);
int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
+ int bpp = mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 0;
static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
for (clock = 0; clock <= max_clock; clock++) {
int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
- if (intel_dp_link_required(intel_dp, mode->clock)
+ if (intel_dp_link_required(intel_dp, mode->clock, bpp)
<= link_avail) {
intel_dp->link_bw = bws[clock];
intel_dp->lane_count = lane_count;
}
/*
- * There are three kinds of DP registers:
+ * There are four kinds of DP registers:
*
* IBX PCH
- * CPU
+ * SNB CPU
+ * IVB CPU
* CPT PCH
*
* IBX PCH and CPU are the same for almost everything,
/* Split out the IBX/CPU vs CPT settings */
- if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
+ if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) {
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
+ intel_dp->DP |= DP_SYNC_HS_HIGH;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
+ intel_dp->DP |= DP_SYNC_VS_HIGH;
+ intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
+
+ if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
+ intel_dp->DP |= DP_ENHANCED_FRAMING;
+
+ intel_dp->DP |= intel_crtc->pipe << 29;
+
+ /* don't miss out required setting for eDP */
+ intel_dp->DP |= DP_PLL_ENABLE;
+ if (adjusted_mode->clock < 200000)
+ intel_dp->DP |= DP_PLL_FREQ_160MHZ;
+ else
+ intel_dp->DP |= DP_PLL_FREQ_270MHZ;
+ } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
intel_dp->DP |= intel_dp->color_range;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
* These are source-specific values; current Intel hardware supports
* a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
*/
-#define I830_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_800
-#define I830_DP_VOLTAGE_MAX_CPT DP_TRAIN_VOLTAGE_SWING_1200
static uint8_t
-intel_dp_pre_emphasis_max(uint8_t voltage_swing)
+intel_dp_voltage_max(struct intel_dp *intel_dp)
{
- switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
- case DP_TRAIN_VOLTAGE_SWING_400:
- return DP_TRAIN_PRE_EMPHASIS_6;
- case DP_TRAIN_VOLTAGE_SWING_600:
- return DP_TRAIN_PRE_EMPHASIS_6;
- case DP_TRAIN_VOLTAGE_SWING_800:
- return DP_TRAIN_PRE_EMPHASIS_3_5;
- case DP_TRAIN_VOLTAGE_SWING_1200:
- default:
- return DP_TRAIN_PRE_EMPHASIS_0;
+ struct drm_device *dev = intel_dp->base.base.dev;
+
+ if (IS_GEN7(dev) && is_cpu_edp(intel_dp))
+ return DP_TRAIN_VOLTAGE_SWING_800;
+ else if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+ return DP_TRAIN_VOLTAGE_SWING_1200;
+ else
+ return DP_TRAIN_VOLTAGE_SWING_800;
+}
+
+static uint8_t
+intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
+{
+ struct drm_device *dev = intel_dp->base.base.dev;
+
+ if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+ switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ return DP_TRAIN_PRE_EMPHASIS_3_5;
+ default:
+ return DP_TRAIN_PRE_EMPHASIS_0;
+ }
+ } else {
+ switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+ case DP_TRAIN_VOLTAGE_SWING_400:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_600:
+ return DP_TRAIN_PRE_EMPHASIS_6;
+ case DP_TRAIN_VOLTAGE_SWING_800:
+ return DP_TRAIN_PRE_EMPHASIS_3_5;
+ case DP_TRAIN_VOLTAGE_SWING_1200:
+ default:
+ return DP_TRAIN_PRE_EMPHASIS_0;
+ }
}
}
static void
intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
{
- struct drm_device *dev = intel_dp->base.base.dev;
uint8_t v = 0;
uint8_t p = 0;
int lane;
uint8_t *adjust_request = link_status + (DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS);
- int voltage_max;
+ uint8_t voltage_max;
+ uint8_t preemph_max;
for (lane = 0; lane < intel_dp->lane_count; lane++) {
uint8_t this_v = intel_get_adjust_request_voltage(adjust_request, lane);
p = this_p;
}
- if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
- voltage_max = I830_DP_VOLTAGE_MAX_CPT;
- else
- voltage_max = I830_DP_VOLTAGE_MAX;
+ voltage_max = intel_dp_voltage_max(intel_dp);
if (v >= voltage_max)
v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
- if (p >= intel_dp_pre_emphasis_max(v))
- p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+ preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
+ if (p >= preemph_max)
+ p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
for (lane = 0; lane < 4; lane++)
intel_dp->train_set[lane] = v | p;
}
}
+/* Gen7's DP voltage swing and pre-emphasis control */
+static uint32_t
+intel_gen7_edp_signal_levels(uint8_t train_set)
+{
+ int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
+ DP_TRAIN_PRE_EMPHASIS_MASK);
+ switch (signal_levels) {
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
+ return EDP_LINK_TRAIN_400MV_0DB_IVB;
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
+ case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
+ return EDP_LINK_TRAIN_400MV_6DB_IVB;
+
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
+ return EDP_LINK_TRAIN_600MV_0DB_IVB;
+ case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
+
+ case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
+ return EDP_LINK_TRAIN_800MV_0DB_IVB;
+ case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
+ return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
+
+ default:
+ DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
+ "0x%x\n", signal_levels);
+ return EDP_LINK_TRAIN_500MV_0DB_IVB;
+ }
+}
+
static uint8_t
intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
int lane)
DP_LINK_CONFIGURATION_SIZE);
DP |= DP_PORT_EN;
- if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+
+ if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
DP &= ~DP_LINK_TRAIN_MASK_CPT;
else
DP &= ~DP_LINK_TRAIN_MASK;
uint8_t link_status[DP_LINK_STATUS_SIZE];
uint32_t signal_levels;
- if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
+
+ if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+ signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
+ DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
+ } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
- if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+ if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
else
reg = DP | DP_LINK_TRAIN_PAT_1;
break;
}
- if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
+ if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
+ signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
+ DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
+ } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
- if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+ if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
else
reg = DP | DP_LINK_TRAIN_PAT_2;
++tries;
}
- if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+ if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
reg = DP | DP_LINK_TRAIN_OFF_CPT;
else
reg = DP | DP_LINK_TRAIN_OFF;
udelay(100);
}
- if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) {
+ if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
DP &= ~DP_LINK_TRAIN_MASK_CPT;
I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
} else {
msleep(17);
if (is_edp(intel_dp)) {
- if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
+ if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
DP |= DP_LINK_TRAIN_OFF_CPT;
else
DP |= DP_LINK_TRAIN_OFF;
/* drm_display_mode->private_flags */
#define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0)
#define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT)
+#define INTEL_MODE_DP_FORCE_6BPC (0x10)
static inline void
intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,
DMI_MATCH(DMI_PRODUCT_NAME, "EB1007"),
},
},
+ {
+ .callback = intel_no_lvds_dmi_callback,
+ .ident = "Asus AT5NM10T-I",
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
+ DMI_MATCH(DMI_BOARD_NAME, "AT5NM10T-I"),
+ },
+ },
{ } /* terminating entry */
};
if (HAS_PCH_SPLIT(dev)) {
max >>= 16;
} else {
- if (IS_PINEVIEW(dev)) {
+ if (INTEL_INFO(dev)->gen < 4)
max >>= 17;
- } else {
+ else
max >>= 16;
- if (INTEL_INFO(dev)->gen < 4)
- max &= ~1;
- }
if (is_backlight_combination_mode(dev))
max *= 0xff;
val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
} else {
val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
- if (IS_PINEVIEW(dev))
+ if (INTEL_INFO(dev)->gen < 4)
val >>= 1;
if (is_backlight_combination_mode(dev)) {
u8 lbpc;
- val &= ~1;
pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
val *= lbpc;
}
}
tmp = I915_READ(BLC_PWM_CTL);
- if (IS_PINEVIEW(dev)) {
- tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
+ if (INTEL_INFO(dev)->gen < 4)
level <<= 1;
- } else
- tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
+ tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
I915_WRITE(BLC_PWM_CTL, tmp | level);
}
#define IS_TMDS(c) (c->output_flag & SDVO_TMDS_MASK)
#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK)
#define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK))
+#define IS_DIGITAL(c) (c->output_flag & (SDVO_TMDS_MASK | SDVO_LVDS_MASK))
static const char *tv_format_names[] = {
}
sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
}
- if (intel_crtc->pipe == 1)
- sdvox |= SDVO_PIPE_B_SELECT;
+
+ if (INTEL_PCH_TYPE(dev) >= PCH_CPT)
+ sdvox |= TRANSCODER_CPT(intel_crtc->pipe);
+ else
+ sdvox |= TRANSCODER(intel_crtc->pipe);
+
if (intel_sdvo->has_hdmi_audio)
sdvox |= SDVO_AUDIO_ENABLE;
return status;
}
+static bool
+intel_sdvo_connector_matches_edid(struct intel_sdvo_connector *sdvo,
+ struct edid *edid)
+{
+ bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
+ bool connector_is_digital = !!IS_DIGITAL(sdvo);
+
+ DRM_DEBUG_KMS("connector_is_digital? %d, monitor_is_digital? %d\n",
+ connector_is_digital, monitor_is_digital);
+ return connector_is_digital == monitor_is_digital;
+}
+
static enum drm_connector_status
intel_sdvo_detect(struct drm_connector *connector, bool force)
{
if (edid == NULL)
edid = intel_sdvo_get_analog_edid(connector);
if (edid != NULL) {
- if (edid->input & DRM_EDID_INPUT_DIGITAL)
- ret = connector_status_disconnected;
- else
+ if (intel_sdvo_connector_matches_edid(intel_sdvo_connector,
+ edid))
ret = connector_status_connected;
+ else
+ ret = connector_status_disconnected;
+
connector->display_info.raw_edid = NULL;
kfree(edid);
} else
edid = intel_sdvo_get_analog_edid(connector);
if (edid != NULL) {
- struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
- bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
- bool connector_is_digital = !!IS_TMDS(intel_sdvo_connector);
-
- if (connector_is_digital == monitor_is_digital) {
+ if (intel_sdvo_connector_matches_edid(to_intel_sdvo_connector(connector),
+ edid)) {
drm_mode_connector_update_edid_property(connector, edid);
drm_add_edid_modes(connector, edid);
}
spin_unlock_irqrestore(&dev->event_lock, flags);
return 0;
}
+
+int
+nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ struct nouveau_bo *bo;
+ int ret;
+
+ args->pitch = roundup(args->width * (args->bpp / 8), 256);
+ args->size = args->pitch * args->height;
+ args->size = roundup(args->size, PAGE_SIZE);
+
+ ret = nouveau_gem_new(dev, args->size, 0, TTM_PL_FLAG_VRAM, 0, 0, &bo);
+ if (ret)
+ return ret;
+
+ ret = drm_gem_handle_create(file_priv, bo->gem, &args->handle);
+ drm_gem_object_unreference_unlocked(bo->gem);
+ return ret;
+}
+
+int
+nouveau_display_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
+ uint32_t handle)
+{
+ return drm_gem_handle_delete(file_priv, handle);
+}
+
+int
+nouveau_display_dumb_map_offset(struct drm_file *file_priv,
+ struct drm_device *dev,
+ uint32_t handle, uint64_t *poffset)
+{
+ struct drm_gem_object *gem;
+
+ gem = drm_gem_object_lookup(dev, file_priv, handle);
+ if (gem) {
+ struct nouveau_bo *bo = gem->driver_private;
+ *poffset = bo->bo.addr_space_offset;
+ drm_gem_object_unreference_unlocked(gem);
+ return 0;
+ }
+
+ return -ENOENT;
+}
.gem_open_object = nouveau_gem_object_open,
.gem_close_object = nouveau_gem_object_close,
+ .dumb_create = nouveau_display_dumb_create,
+ .dumb_map_offset = nouveau_display_dumb_map_offset,
+ .dumb_destroy = nouveau_display_dumb_destroy,
+
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
#ifdef GIT_REVISION
struct drm_pending_vblank_event *event);
int nouveau_finish_page_flip(struct nouveau_channel *,
struct nouveau_page_flip_state *);
+int nouveau_display_dumb_create(struct drm_file *, struct drm_device *,
+ struct drm_mode_create_dumb *args);
+int nouveau_display_dumb_map_offset(struct drm_file *, struct drm_device *,
+ uint32_t handle, uint64_t *offset);
+int nouveau_display_dumb_destroy(struct drm_file *, struct drm_device *,
+ uint32_t handle);
/* nv10_gpio.c */
int nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
return ret;
}
- ret = drm_mm_init(&chan->ramin_heap, base, size);
+ ret = drm_mm_init(&chan->ramin_heap, base, size - base);
if (ret) {
NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret);
nouveau_gpuobj_ref(NULL, &chan->ramin);
pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
}
+ nvbe->unmap_pages = false;
}
+
+ nvbe->pages = NULL;
}
static void
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv50_display *disp = nv50_display(dev);
u32 unk30 = nv_rd32(dev, 0x610030), mc;
- int i, crtc, or, type = OUTPUT_ANY;
+ int i, crtc, or = 0, type = OUTPUT_ANY;
NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
disp->irq.dcb = NULL;
struct nv50_display *disp = nv50_display(dev);
u32 unk30 = nv_rd32(dev, 0x610030), tmp, pclk, script, mc = 0;
struct dcb_entry *dcb;
- int i, crtc, or, type = OUTPUT_ANY;
+ int i, crtc, or = 0, type = OUTPUT_ANY;
NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
dcb = disp->irq.dcb;
u8 tpnr[GPC_MAX];
int i, gpc, tpc;
+ nv_wr32(dev, TP_UNIT(0, 0, 0x5c), 1); /* affects TFB offset queries */
+
/*
* TP ROP UNKVAL(magic_not_rop_nr)
* 450: 4/0/0/0 2 3
continue;
if (nv_partner != nv_encoder &&
- nv_partner->dcb->or == nv_encoder->or) {
+ nv_partner->dcb->or == nv_encoder->dcb->or) {
if (nv_partner->last_dpms == DRM_MODE_DPMS_ON)
return;
break;
return -EINVAL;
}
- if (tiling_flags & RADEON_TILING_MACRO)
+ if (tiling_flags & RADEON_TILING_MACRO) {
+ if (rdev->family >= CHIP_CAYMAN)
+ tmp = rdev->config.cayman.tile_config;
+ else
+ tmp = rdev->config.evergreen.tile_config;
+
+ switch ((tmp & 0xf0) >> 4) {
+ case 0: /* 4 banks */
+ fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK);
+ break;
+ case 1: /* 8 banks */
+ default:
+ fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK);
+ break;
+ case 2: /* 16 banks */
+ fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK);
+ break;
+ }
+
+ switch ((tmp & 0xf000) >> 12) {
+ case 0: /* 1KB rows */
+ default:
+ fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_1KB);
+ break;
+ case 1: /* 2KB rows */
+ fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_2KB);
+ break;
+ case 2: /* 4KB rows */
+ fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_4KB);
+ break;
+ }
+
fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1);
- else if (tiling_flags & RADEON_TILING_MICRO)
+ } else if (tiling_flags & RADEON_TILING_MICRO)
fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
switch (radeon_crtc->crtc_id) {
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset);
+ int i;
/* Lock the graphics update lock */
tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
(u32)crtc_base);
/* Wait for update_pending to go high. */
- while (!(RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING));
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING)
+ break;
+ udelay(1);
+ }
DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
/* Unlock the lock, so double-buffering can take place inside vblank */
u32 group_size;
u32 nbanks;
u32 npipes;
+ u32 row_size;
/* value we track */
u32 nsamples;
u32 cb_color_base_last[12];
struct radeon_bo *db_s_write_bo;
};
+static u32 evergreen_cs_get_aray_mode(u32 tiling_flags)
+{
+ if (tiling_flags & RADEON_TILING_MACRO)
+ return ARRAY_2D_TILED_THIN1;
+ else if (tiling_flags & RADEON_TILING_MICRO)
+ return ARRAY_1D_TILED_THIN1;
+ else
+ return ARRAY_LINEAR_GENERAL;
+}
+
+static u32 evergreen_cs_get_num_banks(u32 nbanks)
+{
+ switch (nbanks) {
+ case 2:
+ return ADDR_SURF_2_BANK;
+ case 4:
+ return ADDR_SURF_4_BANK;
+ case 8:
+ default:
+ return ADDR_SURF_8_BANK;
+ case 16:
+ return ADDR_SURF_16_BANK;
+ }
+}
+
+static u32 evergreen_cs_get_tile_split(u32 row_size)
+{
+ switch (row_size) {
+ case 1:
+ default:
+ return ADDR_SURF_TILE_SPLIT_1KB;
+ case 2:
+ return ADDR_SURF_TILE_SPLIT_2KB;
+ case 4:
+ return ADDR_SURF_TILE_SPLIT_4KB;
+ }
+}
+
static void evergreen_cs_track_init(struct evergreen_cs_track *track)
{
int i;
}
ib[idx] &= ~Z_ARRAY_MODE(0xf);
track->db_z_info &= ~Z_ARRAY_MODE(0xf);
+ ib[idx] |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+ track->db_z_info |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
- ib[idx] |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
- track->db_z_info |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
- } else {
- ib[idx] |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
- track->db_z_info |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+ ib[idx] |= DB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
+ ib[idx] |= DB_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size));
}
}
break;
"0x%04X\n", reg);
return -EINVAL;
}
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
- ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
- track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
- } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
- ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
- track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
- }
+ ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+ track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
}
break;
case CB_COLOR8_INFO:
"0x%04X\n", reg);
return -EINVAL;
}
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
- ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
- track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
- } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
- ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
- track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
- }
+ ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+ track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
}
break;
case CB_COLOR0_PITCH:
case CB_COLOR9_ATTRIB:
case CB_COLOR10_ATTRIB:
case CB_COLOR11_ATTRIB:
+ r = evergreen_cs_packet_next_reloc(p, &reloc);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+ ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
+ ib[idx] |= CB_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size));
+ }
break;
case CB_COLOR0_DIM:
case CB_COLOR1_DIM:
}
ib[idx+1+(i*8)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
if (!p->keep_tiling_flags) {
- if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
- ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
- else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
- ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
+ ib[idx+1+(i*8)+1] |=
+ TEX_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
+ if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
+ ib[idx+1+(i*8)+6] |=
+ TEX_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size));
+ ib[idx+1+(i*8)+7] |=
+ TEX_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
+ }
}
texture = reloc->robj;
/* tex mip base */
{
struct radeon_cs_packet pkt;
struct evergreen_cs_track *track;
+ u32 tmp;
int r;
if (p->track == NULL) {
if (track == NULL)
return -ENOMEM;
evergreen_cs_track_init(track);
- track->npipes = p->rdev->config.evergreen.tiling_npipes;
- track->nbanks = p->rdev->config.evergreen.tiling_nbanks;
- track->group_size = p->rdev->config.evergreen.tiling_group_size;
+ if (p->rdev->family >= CHIP_CAYMAN)
+ tmp = p->rdev->config.cayman.tile_config;
+ else
+ tmp = p->rdev->config.evergreen.tile_config;
+
+ switch (tmp & 0xf) {
+ case 0:
+ track->npipes = 1;
+ break;
+ case 1:
+ default:
+ track->npipes = 2;
+ break;
+ case 2:
+ track->npipes = 4;
+ break;
+ case 3:
+ track->npipes = 8;
+ break;
+ }
+
+ switch ((tmp & 0xf0) >> 4) {
+ case 0:
+ track->nbanks = 4;
+ break;
+ case 1:
+ default:
+ track->nbanks = 8;
+ break;
+ case 2:
+ track->nbanks = 16;
+ break;
+ }
+
+ switch ((tmp & 0xf00) >> 8) {
+ case 0:
+ track->group_size = 256;
+ break;
+ case 1:
+ default:
+ track->group_size = 512;
+ break;
+ }
+
+ switch ((tmp & 0xf000) >> 12) {
+ case 0:
+ track->row_size = 1;
+ break;
+ case 1:
+ default:
+ track->row_size = 2;
+ break;
+ case 2:
+ track->row_size = 4;
+ break;
+ }
+
p->track = track;
}
do {
# define EVERGREEN_GRPH_DEPTH_8BPP 0
# define EVERGREEN_GRPH_DEPTH_16BPP 1
# define EVERGREEN_GRPH_DEPTH_32BPP 2
+# define EVERGREEN_GRPH_NUM_BANKS(x) (((x) & 0x3) << 2)
+# define EVERGREEN_ADDR_SURF_2_BANK 0
+# define EVERGREEN_ADDR_SURF_4_BANK 1
+# define EVERGREEN_ADDR_SURF_8_BANK 2
+# define EVERGREEN_ADDR_SURF_16_BANK 3
+# define EVERGREEN_GRPH_Z(x) (((x) & 0x3) << 4)
+# define EVERGREEN_GRPH_BANK_WIDTH(x) (((x) & 0x3) << 6)
+# define EVERGREEN_ADDR_SURF_BANK_WIDTH_1 0
+# define EVERGREEN_ADDR_SURF_BANK_WIDTH_2 1
+# define EVERGREEN_ADDR_SURF_BANK_WIDTH_4 2
+# define EVERGREEN_ADDR_SURF_BANK_WIDTH_8 3
# define EVERGREEN_GRPH_FORMAT(x) (((x) & 0x7) << 8)
/* 8 BPP */
# define EVERGREEN_GRPH_FORMAT_INDEXED 0
# define EVERGREEN_GRPH_FORMAT_8B_BGRA1010102 5
# define EVERGREEN_GRPH_FORMAT_RGB111110 6
# define EVERGREEN_GRPH_FORMAT_BGR101111 7
+# define EVERGREEN_GRPH_BANK_HEIGHT(x) (((x) & 0x3) << 11)
+# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_1 0
+# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_2 1
+# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_4 2
+# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_8 3
+# define EVERGREEN_GRPH_TILE_SPLIT(x) (((x) & 0x7) << 13)
+# define EVERGREEN_ADDR_SURF_TILE_SPLIT_64B 0
+# define EVERGREEN_ADDR_SURF_TILE_SPLIT_128B 1
+# define EVERGREEN_ADDR_SURF_TILE_SPLIT_256B 2
+# define EVERGREEN_ADDR_SURF_TILE_SPLIT_512B 3
+# define EVERGREEN_ADDR_SURF_TILE_SPLIT_1KB 4
+# define EVERGREEN_ADDR_SURF_TILE_SPLIT_2KB 5
+# define EVERGREEN_ADDR_SURF_TILE_SPLIT_4KB 6
+# define EVERGREEN_GRPH_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 18)
+# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1 0
+# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2 1
+# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4 2
+# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8 3
# define EVERGREEN_GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20)
# define EVERGREEN_GRPH_ARRAY_LINEAR_GENERAL 0
# define EVERGREEN_GRPH_ARRAY_LINEAR_ALIGNED 1
#define DB_HTILE_DATA_BASE 0x28014
#define DB_Z_INFO 0x28040
# define Z_ARRAY_MODE(x) ((x) << 4)
+# define DB_TILE_SPLIT(x) (((x) & 0x7) << 8)
+# define DB_NUM_BANKS(x) (((x) & 0x3) << 12)
+# define DB_BANK_WIDTH(x) (((x) & 0x3) << 16)
+# define DB_BANK_HEIGHT(x) (((x) & 0x3) << 20)
#define DB_STENCIL_INFO 0x28044
#define DB_Z_READ_BASE 0x28048
#define DB_STENCIL_READ_BASE 0x2804c
# define CB_SF_EXPORT_FULL 0
# define CB_SF_EXPORT_NORM 1
#define CB_COLOR0_ATTRIB 0x28c74
+# define CB_TILE_SPLIT(x) (((x) & 0x7) << 5)
+# define ADDR_SURF_TILE_SPLIT_64B 0
+# define ADDR_SURF_TILE_SPLIT_128B 1
+# define ADDR_SURF_TILE_SPLIT_256B 2
+# define ADDR_SURF_TILE_SPLIT_512B 3
+# define ADDR_SURF_TILE_SPLIT_1KB 4
+# define ADDR_SURF_TILE_SPLIT_2KB 5
+# define ADDR_SURF_TILE_SPLIT_4KB 6
+# define CB_NUM_BANKS(x) (((x) & 0x3) << 10)
+# define ADDR_SURF_2_BANK 0
+# define ADDR_SURF_4_BANK 1
+# define ADDR_SURF_8_BANK 2
+# define ADDR_SURF_16_BANK 3
+# define CB_BANK_WIDTH(x) (((x) & 0x3) << 13)
+# define ADDR_SURF_BANK_WIDTH_1 0
+# define ADDR_SURF_BANK_WIDTH_2 1
+# define ADDR_SURF_BANK_WIDTH_4 2
+# define ADDR_SURF_BANK_WIDTH_8 3
+# define CB_BANK_HEIGHT(x) (((x) & 0x3) << 16)
+# define ADDR_SURF_BANK_HEIGHT_1 0
+# define ADDR_SURF_BANK_HEIGHT_2 1
+# define ADDR_SURF_BANK_HEIGHT_4 2
+# define ADDR_SURF_BANK_HEIGHT_8 3
#define CB_COLOR0_DIM 0x28c78
/* only CB0-7 blocks have these regs */
#define CB_COLOR0_CMASK 0x28c7c
# define SQ_SEL_1 5
#define SQ_TEX_RESOURCE_WORD5_0 0x30014
#define SQ_TEX_RESOURCE_WORD6_0 0x30018
+# define TEX_TILE_SPLIT(x) (((x) & 0x7) << 29)
#define SQ_TEX_RESOURCE_WORD7_0 0x3001c
+# define TEX_BANK_WIDTH(x) (((x) & 0x3) << 8)
+# define TEX_BANK_HEIGHT(x) (((x) & 0x3) << 10)
+# define TEX_NUM_BANKS(x) (((x) & 0x3) << 16)
#define SQ_VTX_CONSTANT_WORD0_0 0x30000
#define SQ_VTX_CONSTANT_WORD1_0 0x30004
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK;
+ int i;
/* Lock the graphics update lock */
/* update the scanout addresses */
WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
/* Wait for update_pending to go high. */
- while (!(RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET));
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET)
+ break;
+ udelay(1);
+ }
DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
/* Unlock the lock, so double-buffering can take place inside vblank */
/* Fail only if calling the method fails and ATIF is supported */
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
- printk(KERN_DEBUG "failed to evaluate ATIF got %s\n", acpi_format_exception(status));
+ DRM_DEBUG_DRIVER("failed to evaluate ATIF got %s\n",
+ acpi_format_exception(status));
kfree(buffer.pointer);
return 1;
}
acpi_handle handle;
int ret;
- /* No need to proceed if we're sure that ATIF is not supported */
- if (!ASIC_IS_AVIVO(rdev) || !rdev->bios)
- return 0;
-
/* Get the device handle */
handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev);
+ /* No need to proceed if we're sure that ATIF is not supported */
+ if (!ASIC_IS_AVIVO(rdev) || !rdev->bios || !handle)
+ return 0;
+
/* Call the ATIF method */
ret = radeon_atif_call(handle);
if (ret)
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_TRAVIS:
case ENCODER_OBJECT_ID_NUTMEG:
- return true;
+ return radeon_encoder->encoder_id;
default:
- return false;
+ return ENCODER_OBJECT_ID_NONE;
}
}
-
- return false;
+ return ENCODER_OBJECT_ID_NONE;
}
void radeon_panel_mode_fixup(struct drm_encoder *encoder,
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
+ int i;
/* Lock the graphics update lock */
tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
(u32)crtc_base);
/* Wait for update_pending to go high. */
- while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING));
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)
+ break;
+ udelay(1);
+ }
DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
/* Unlock the lock, so double-buffering can take place inside vblank */
{
struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
+ int i;
/* Lock the graphics update lock */
tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
(u32)crtc_base);
/* Wait for update_pending to go high. */
- while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING));
+ for (i = 0; i < rdev->usec_timeout; i++) {
+ if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)
+ break;
+ udelay(1);
+ }
DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
/* Unlock the lock, so double-buffering can take place inside vblank */
goto out_clips;
}
- clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
+ clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
if (clips == NULL) {
DRM_ERROR("Failed to allocate clip rect list.\n");
ret = -ENOMEM;
goto out_clips;
}
- clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
+ clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
if (clips == NULL) {
DRM_ERROR("Failed to allocate clip rect list.\n");
ret = -ENOMEM;
}
rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
- rects = kzalloc(rects_size, GFP_KERNEL);
+ rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
+ GFP_KERNEL);
if (unlikely(!rects)) {
ret = -ENOMEM;
goto out_unlock;
}
for (i = 0; i < arg->num_outputs; ++i) {
- if (rects->x < 0 ||
- rects->y < 0 ||
- rects->x + rects->w > mode_config->max_width ||
- rects->y + rects->h > mode_config->max_height) {
+ if (rects[i].x < 0 ||
+ rects[i].y < 0 ||
+ rects[i].x + rects[i].w > mode_config->max_width ||
+ rects[i].y + rects[i].h > mode_config->max_height) {
DRM_ERROR("Invalid GUI layout.\n");
ret = -EINVAL;
goto out_free;
{ HID_USB_DEVICE(USB_VENDOR_ID_ESSENTIAL_REALITY, USB_DEVICE_ID_ESSENTIAL_REALITY_P5) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC5UH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ETT, USB_DEVICE_ID_TC4UM) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0001) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0002) },
- { HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0003) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH, 0x0004) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_4_PHIDGETSERVO_30) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GLAB, USB_DEVICE_ID_1_PHIDGETSERVO_30) },
#define USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR 0x0002
#define USB_VENDOR_ID_GENERAL_TOUCH 0x0dfc
-#define USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS 0x0001
+#define USB_DEVICE_ID_GENERAL_TOUCH_WIN7_TWOFINGERS 0x0003
#define USB_VENDOR_ID_GLAB 0x06c2
#define USB_DEVICE_ID_4_PHIDGETSERVO_30 0x0038
{
struct jz4740_hwmon *hwmon = dev_get_drvdata(dev);
struct completion *completion = &hwmon->read_completion;
- unsigned long t;
+ long t;
unsigned long val;
int ret;
return 0;
}
-struct platform_driver jz4740_hwmon_driver = {
+static struct platform_driver jz4740_hwmon_driver = {
.probe = jz4740_hwmon_probe,
.remove = __devexit_p(jz4740_hwmon_remove),
.driver = {
int dmar_disabled = 1;
#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
+int intel_iommu_enabled = 0;
+EXPORT_SYMBOL_GPL(intel_iommu_enabled);
+
static int dmar_map_gfx = 1;
static int dmar_forcedac;
static int intel_iommu_strict;
return 0;
}
-int dmar_parse_rmrr_atsr_dev(void)
+int __init dmar_parse_rmrr_atsr_dev(void)
{
struct dmar_rmrr_unit *rmrr, *rmrr_n;
struct dmar_atsr_unit *atsr, *atsr_n;
bus_register_notifier(&pci_bus_type, &device_nb);
+ intel_iommu_enabled = 1;
+
return 0;
}
return ir_supported;
}
-int ir_dev_scope_init(void)
+int __init ir_dev_scope_init(void)
{
if (!intr_remapping_enabled)
return 0;
case IIOCDOCFINT:
if (!divert_if.drv_to_name(dioctl.cf_ctrl.drvid))
return (-EINVAL); /* invalid driver */
+ if (strnlen(dioctl.cf_ctrl.msn, sizeof(dioctl.cf_ctrl.msn)) ==
+ sizeof(dioctl.cf_ctrl.msn))
+ return -EINVAL;
+ if (strnlen(dioctl.cf_ctrl.fwd_nr, sizeof(dioctl.cf_ctrl.fwd_nr)) ==
+ sizeof(dioctl.cf_ctrl.fwd_nr))
+ return -EINVAL;
if ((i = cf_command(dioctl.cf_ctrl.drvid,
(cmd == IIOCDOCFACT) ? 1 : (cmd == IIOCDOCFDIS) ? 0 : 2,
dioctl.cf_ctrl.cfproc,
char *c,
*e;
+ if (strnlen(cfg->drvid, sizeof(cfg->drvid)) ==
+ sizeof(cfg->drvid))
+ return -EINVAL;
drvidx = -1;
chidx = -1;
strcpy(drvid, cfg->drvid);
*/
int i;
+ spin_lock_irq(&bitmap->lock);
for (i = 0; i < bitmap->file_pages; i++)
set_page_attr(bitmap, bitmap->filemap[i],
BITMAP_PAGE_NEEDWRITE);
bitmap->allclean = 0;
+ spin_unlock_irq(&bitmap->lock);
}
static void bitmap_count_page(struct bitmap *bitmap, sector_t offset, int inc)
for (chunk = s; chunk <= e; chunk++) {
sector_t sec = (sector_t)chunk << CHUNK_BLOCK_SHIFT(bitmap);
bitmap_set_memory_bits(bitmap, sec, 1);
+ spin_lock_irq(&bitmap->lock);
bitmap_file_set_bit(bitmap, sec);
+ spin_unlock_irq(&bitmap->lock);
if (sec < bitmap->mddev->recovery_cp)
/* We are asserting that the array is dirty,
* so move the recovery_cp address back so
mddev->ctime == 0 && !mddev->hold_active) {
/* Array is not configured at all, and not held active,
* so destroy it */
- list_del(&mddev->all_mddevs);
+ list_del_init(&mddev->all_mddevs);
bs = mddev->bio_set;
mddev->bio_set = NULL;
if (mddev->gendisk) {
sep = ",";
}
if (test_bit(Blocked, &rdev->flags) ||
- rdev->badblocks.unacked_exist) {
+ (rdev->badblocks.unacked_exist
+ && !test_bit(Faulty, &rdev->flags))) {
len += sprintf(page+len, "%sblocked", sep);
sep = ",";
}
if (err)
return err;
else {
+ if (mddev->hold_active == UNTIL_IOCTL)
+ mddev->hold_active = 0;
sysfs_notify_dirent_safe(mddev->sysfs_state);
return len;
}
if (!entry->show)
return -EIO;
+ spin_lock(&all_mddevs_lock);
+ if (list_empty(&mddev->all_mddevs)) {
+ spin_unlock(&all_mddevs_lock);
+ return -EBUSY;
+ }
+ mddev_get(mddev);
+ spin_unlock(&all_mddevs_lock);
+
rv = mddev_lock(mddev);
if (!rv) {
rv = entry->show(mddev, page);
mddev_unlock(mddev);
}
+ mddev_put(mddev);
return rv;
}
return -EIO;
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
+ spin_lock(&all_mddevs_lock);
+ if (list_empty(&mddev->all_mddevs)) {
+ spin_unlock(&all_mddevs_lock);
+ return -EBUSY;
+ }
+ mddev_get(mddev);
+ spin_unlock(&all_mddevs_lock);
rv = mddev_lock(mddev);
- if (mddev->hold_active == UNTIL_IOCTL)
- mddev->hold_active = 0;
if (!rv) {
rv = entry->store(mddev, page, length);
mddev_unlock(mddev);
}
+ mddev_put(mddev);
return rv;
}
s + rdev->data_offset, sectors, acknowledged);
if (rv) {
/* Make sure they get written out promptly */
+ sysfs_notify_dirent_safe(rdev->sysfs_state);
set_bit(MD_CHANGE_CLEAN, &rdev->mddev->flags);
md_wakeup_thread(rdev->mddev->thread);
}
if (dev->written)
s->written++;
rdev = rcu_dereference(conf->disks[i].rdev);
+ if (rdev && test_bit(Faulty, &rdev->flags))
+ rdev = NULL;
if (rdev) {
is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS,
&first_bad, &bad_sectors);
}
} else if (test_bit(In_sync, &rdev->flags))
set_bit(R5_Insync, &dev->flags);
- else if (!test_bit(Faulty, &rdev->flags)) {
+ else {
/* in sync if before recovery_offset */
if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
set_bit(R5_Insync, &dev->flags);
}
- if (test_bit(R5_WriteError, &dev->flags)) {
+ if (rdev && test_bit(R5_WriteError, &dev->flags)) {
clear_bit(R5_Insync, &dev->flags);
if (!test_bit(Faulty, &rdev->flags)) {
s->handle_bad_blocks = 1;
} else
clear_bit(R5_WriteError, &dev->flags);
}
- if (test_bit(R5_MadeGood, &dev->flags)) {
+ if (rdev && test_bit(R5_MadeGood, &dev->flags)) {
if (!test_bit(Faulty, &rdev->flags)) {
s->handle_bad_blocks = 1;
atomic_inc(&rdev->nr_pending);
MMC_QUIRK_BLK_NO_CMD23),
MMC_FIXUP("MMC32G", 0x11, CID_OEMID_ANY, add_quirk_mmc,
MMC_QUIRK_BLK_NO_CMD23),
+
+ /*
+ * Some Micron MMC cards needs longer data read timeout than
+ * indicated in CSD.
+ */
+ MMC_FIXUP(CID_NAME_ANY, 0x13, 0x200, add_quirk_mmc,
+ MMC_QUIRK_LONG_READ_TIME),
+
END_FIXUP
};
data->timeout_clks = 0;
}
}
+
+ /*
+ * Some cards require longer data read timeout than indicated in CSD.
+ * Address this by setting the read timeout to a "reasonably high"
+ * value. For the cards tested, 300ms has proven enough. If necessary,
+ * this value can be increased if other problematic cards require this.
+ */
+ if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) {
+ data->timeout_ns = 300000000;
+ data->timeout_clks = 0;
+ }
+
/*
* Some cards need very high timeouts if driven in SPI mode.
* The worst observed timeout was 900ms after writing a
mmc_host_clk_release(host);
}
+static void mmc_poweroff_notify(struct mmc_host *host)
+{
+ struct mmc_card *card;
+ unsigned int timeout;
+ unsigned int notify_type = EXT_CSD_NO_POWER_NOTIFICATION;
+ int err = 0;
+
+ card = host->card;
+
+ /*
+ * Send power notify command only if card
+ * is mmc and notify state is powered ON
+ */
+ if (card && mmc_card_mmc(card) &&
+ (card->poweroff_notify_state == MMC_POWERED_ON)) {
+
+ if (host->power_notify_type == MMC_HOST_PW_NOTIFY_SHORT) {
+ notify_type = EXT_CSD_POWER_OFF_SHORT;
+ timeout = card->ext_csd.generic_cmd6_time;
+ card->poweroff_notify_state = MMC_POWEROFF_SHORT;
+ } else {
+ notify_type = EXT_CSD_POWER_OFF_LONG;
+ timeout = card->ext_csd.power_off_longtime;
+ card->poweroff_notify_state = MMC_POWEROFF_LONG;
+ }
+
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_POWER_OFF_NOTIFICATION,
+ notify_type, timeout);
+
+ if (err && err != -EBADMSG)
+ pr_err("Device failed to respond within %d poweroff "
+ "time. Forcefully powering down the device\n",
+ timeout);
+
+ /* Set the card state to no notification after the poweroff */
+ card->poweroff_notify_state = MMC_NO_POWER_NOTIFICATION;
+ }
+}
+
/*
* Apply power to the MMC stack. This is a two-stage process.
* First, we enable power to the card without the clock running.
void mmc_power_off(struct mmc_host *host)
{
- struct mmc_card *card;
- unsigned int notify_type;
- unsigned int timeout;
- int err;
-
mmc_host_clk_hold(host);
- card = host->card;
host->ios.clock = 0;
host->ios.vdd = 0;
- if (card && mmc_card_mmc(card) &&
- (card->poweroff_notify_state == MMC_POWERED_ON)) {
-
- if (host->power_notify_type == MMC_HOST_PW_NOTIFY_SHORT) {
- notify_type = EXT_CSD_POWER_OFF_SHORT;
- timeout = card->ext_csd.generic_cmd6_time;
- card->poweroff_notify_state = MMC_POWEROFF_SHORT;
- } else {
- notify_type = EXT_CSD_POWER_OFF_LONG;
- timeout = card->ext_csd.power_off_longtime;
- card->poweroff_notify_state = MMC_POWEROFF_LONG;
- }
-
- err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_POWER_OFF_NOTIFICATION,
- notify_type, timeout);
-
- if (err && err != -EBADMSG)
- pr_err("Device failed to respond within %d poweroff "
- "time. Forcefully powering down the device\n",
- timeout);
-
- /* Set the card state to no notification after the poweroff */
- card->poweroff_notify_state = MMC_NO_POWER_NOTIFICATION;
- }
+ mmc_poweroff_notify(host);
/*
* Reset ocr mask to be the highest possible voltage supported for
mmc_bus_get(host);
- if (host->bus_ops && !host->bus_dead && host->bus_ops->awake)
+ if (host->bus_ops && !host->bus_dead && host->bus_ops->sleep)
err = host->bus_ops->sleep(host);
mmc_bus_put(host);
* pre-claim the host.
*/
if (mmc_try_claim_host(host)) {
- if (host->bus_ops->suspend)
+ if (host->bus_ops->suspend) {
+ /*
+ * For eMMC 4.5 device send notify command
+ * before sleep, because in sleep state eMMC 4.5
+ * devices respond to only RESET and AWAKE cmd
+ */
+ mmc_poweroff_notify(host);
err = host->bus_ops->suspend(host);
+ }
+ mmc_do_release_host(host);
+
if (err == -ENOSYS || !host->bus_ops->resume) {
/*
* We simply "remove" the card in this case.
host->pm_flags = 0;
err = 0;
}
- mmc_do_release_host(host);
} else {
err = -EBUSY;
}
* set the notification byte in the ext_csd register of device
*/
if ((host->caps2 & MMC_CAP2_POWEROFF_NOTIFY) &&
- (card->poweroff_notify_state == MMC_NO_POWER_NOTIFICATION)) {
+ (card->ext_csd.rev >= 6)) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_POWER_OFF_NOTIFICATION,
EXT_CSD_POWER_ON,
card->ext_csd.generic_cmd6_time);
if (err && err != -EBADMSG)
goto free_card;
- }
- if (!err)
- card->poweroff_notify_state = MMC_POWERED_ON;
+ /*
+ * The err can be -EBADMSG or 0,
+ * so check for success and update the flag
+ */
+ if (!err)
+ card->poweroff_notify_state = MMC_POWERED_ON;
+ }
/*
* Activate high speed (if supported)
"failed to config DMA channel. Falling back to PIO\n");
dma_release_channel(host->dma);
host->do_dma = 0;
+ host->dma = NULL;
}
}
host->data->sg_len,
omap_hsmmc_get_dma_dir(host, host->data));
omap_free_dma(dma_ch);
+ host->data->host_cookie = 0;
}
host->data = NULL;
}
struct mmc_data *data = mrq->data;
if (host->use_dma) {
- dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
- omap_hsmmc_get_dma_dir(host, data));
+ if (data->host_cookie)
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg,
+ data->sg_len,
+ omap_hsmmc_get_dma_dir(host, data));
data->host_cookie = 0;
}
}
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/mmc/host.h>
+#include <linux/module.h>
#include <mach/cns3xxx.h>
#include "sdhci-pltfm.h"
static struct platform_driver sdhci_s3c_driver = {
.probe = sdhci_s3c_probe,
.remove = __devexit_p(sdhci_s3c_remove),
- .suspend = sdhci_s3c_suspend,
- .resume = sdhci_s3c_resume,
.driver = {
.owner = THIS_MODULE,
.name = "s3c-sdhci",
if (host->power) {
pm_runtime_put(&host->pd->dev);
host->power = false;
- if (p->down_pwr)
+ if (p->down_pwr && ios->power_mode == MMC_POWER_OFF)
p->down_pwr(host->pd);
}
host->state = STATE_IDLE;
/* start bus clock */
tmio_mmc_clk_start(host);
} else if (ios->power_mode != MMC_POWER_UP) {
- if (host->set_pwr)
+ if (host->set_pwr && ios->power_mode == MMC_POWER_OFF)
host->set_pwr(host->pdev, 0);
if ((pdata->flags & TMIO_MMC_HAS_COLD_CD) &&
pdata->power) {
menuconfig ARCNET
depends on NETDEVICES && (ISA || PCI || PCMCIA)
- bool "ARCnet support"
+ tristate "ARCnet support"
---help---
If you have a network card of this type, say Y and check out the
(arguably) beautiful poetry in
}
}
-static __be32 bond_glean_dev_ip(struct net_device *dev)
-{
- struct in_device *idev;
- struct in_ifaddr *ifa;
- __be32 addr = 0;
-
- if (!dev)
- return 0;
-
- rcu_read_lock();
- idev = __in_dev_get_rcu(dev);
- if (!idev)
- goto out;
-
- ifa = idev->ifa_list;
- if (!ifa)
- goto out;
-
- addr = ifa->ifa_local;
-out:
- rcu_read_unlock();
- return addr;
-}
-
static int bond_has_this_ip(struct bonding *bond, __be32 ip)
{
struct vlan_entry *vlan;
struct bonding *bond;
struct vlan_entry *vlan;
+ /* we only care about primary address */
+ if(ifa->ifa_flags & IFA_F_SECONDARY)
+ return NOTIFY_DONE;
+
list_for_each_entry(bond, &bn->dev_list, bond_list) {
if (bond->dev == event_dev) {
switch (event) {
bond->master_ip = ifa->ifa_local;
return NOTIFY_OK;
case NETDEV_DOWN:
- bond->master_ip = bond_glean_dev_ip(bond->dev);
+ bond->master_ip = 0;
return NOTIFY_OK;
default:
return NOTIFY_DONE;
vlan->vlan_ip = ifa->ifa_local;
return NOTIFY_OK;
case NETDEV_DOWN:
- vlan->vlan_ip =
- bond_glean_dev_ip(vlan_dev);
+ vlan->vlan_ip = 0;
return NOTIFY_OK;
default:
return NOTIFY_DONE;
*/
#include <linux/kernel.h>
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
skb->len,
DMA_TO_DEVICE);
rp->skb = NULL;
- dev_kfree_skb(skb);
+ dev_kfree_skb_irq(skb);
}
bp->tx_cons = cons;
return 0;
}
+
+static void bnx2x_5461x_set_link_led(struct bnx2x_phy *phy,
+ struct link_params *params, u8 mode)
+{
+ struct bnx2x *bp = params->bp;
+ u16 temp;
+
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_GPHY_SHADOW,
+ MDIO_REG_GPHY_SHADOW_LED_SEL1);
+ bnx2x_cl22_read(bp, phy,
+ MDIO_REG_GPHY_SHADOW,
+ &temp);
+ temp &= 0xff00;
+
+ DP(NETIF_MSG_LINK, "54618x set link led (mode=%x)\n", mode);
+ switch (mode) {
+ case LED_MODE_FRONT_PANEL_OFF:
+ case LED_MODE_OFF:
+ temp |= 0x00ee;
+ break;
+ case LED_MODE_OPER:
+ temp |= 0x0001;
+ break;
+ case LED_MODE_ON:
+ temp |= 0x00ff;
+ break;
+ default:
+ break;
+ }
+ bnx2x_cl22_write(bp, phy,
+ MDIO_REG_GPHY_SHADOW,
+ MDIO_REG_GPHY_SHADOW_WR_ENA | temp);
+ return;
+}
+
+
static void bnx2x_54618se_link_reset(struct bnx2x_phy *phy,
struct link_params *params)
{
.config_loopback = (config_loopback_t)bnx2x_54618se_config_loopback,
.format_fw_ver = (format_fw_ver_t)NULL,
.hw_reset = (hw_reset_t)NULL,
- .set_link_led = (set_link_led_t)NULL,
+ .set_link_led = (set_link_led_t)bnx2x_5461x_set_link_led,
.phy_specific_func = (phy_specific_func_t)NULL
};
/*****************************************************************/
#define MDIO_REG_INTR_MASK 0x1b
#define MDIO_REG_INTR_MASK_LINK_STATUS (0x1 << 1)
#define MDIO_REG_GPHY_SHADOW 0x1c
+#define MDIO_REG_GPHY_SHADOW_LED_SEL1 (0x0d << 10)
#define MDIO_REG_GPHY_SHADOW_LED_SEL2 (0x0e << 10)
#define MDIO_REG_GPHY_SHADOW_WR_ENA (0x1 << 15)
#define MDIO_REG_GPHY_SHADOW_AUTO_DET_MED (0x1e << 10)
if (!dm->wake_state)
irq_set_irq_wake(dm->irq_wake, 1);
- else if (dm->wake_state & !opts)
+ else if (dm->wake_state && !opts)
irq_set_irq_wake(dm->irq_wake, 0);
}
bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \
ARCH_MXC || ARCH_MXS)
+ default ARCH_MXC || ARCH_MXS if ARM
select PHYLIB
---help---
Say Y here if you want to use the built-in 10/100 Fast ethernet
struct platform_device *pdev;
int opened;
+ int dev_id;
/* Phylib and MDIO interface */
struct mii_bus *mii_bus;
/* Adjust MAC if using macaddr */
if (iap == macaddr)
- ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->pdev->id;
+ ndev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->dev_id;
}
/* ------------------------------------------------------------------------- */
char mdio_bus_id[MII_BUS_ID_SIZE];
char phy_name[MII_BUS_ID_SIZE + 3];
int phy_id;
- int dev_id = fep->pdev->id;
+ int dev_id = fep->dev_id;
fep->phy_dev = NULL;
* mdio interface in board design, and need to be configured by
* fec0 mii_bus.
*/
- if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && pdev->id > 0) {
+ if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) {
/* fec1 uses fec0 mii_bus */
fep->mii_bus = fec0_mii_bus;
return 0;
fep->mii_bus->read = fec_enet_mdio_read;
fep->mii_bus->write = fec_enet_mdio_write;
fep->mii_bus->reset = fec_enet_mdio_reset;
- snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id + 1);
+ snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%x", fep->dev_id + 1);
fep->mii_bus->priv = fep;
fep->mii_bus->parent = &pdev->dev;
int i, irq, ret = 0;
struct resource *r;
const struct of_device_id *of_id;
+ static int dev_id;
of_id = of_match_device(fec_dt_ids, &pdev->dev);
if (of_id)
fep->hwp = ioremap(r->start, resource_size(r));
fep->pdev = pdev;
+ fep->dev_id = dev_id++;
if (!fep->hwp) {
ret = -ENOMEM;
}
EXPORT_SYMBOL_GPL(fsl_pq_mdio_bus_name);
-/* Scan the bus in reverse, looking for an empty spot */
-static int fsl_pq_mdio_find_free(struct mii_bus *new_bus)
-{
- int i;
-
- for (i = PHY_MAX_ADDR; i > 0; i--) {
- u32 phy_id;
-
- if (get_phy_id(new_bus, i, &phy_id))
- return -1;
-
- if (phy_id == 0xffffffff)
- break;
- }
-
- return i;
-}
-
-#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct device_node *np)
{
+#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
struct gfar __iomem *enet_regs;
/*
} else if (of_device_is_compatible(np, "fsl,etsec2-mdio") ||
of_device_is_compatible(np, "fsl,etsec2-tbi")) {
return of_iomap(np, 1);
- } else
- return NULL;
-}
+ }
#endif
+ return NULL;
+}
-#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
static int get_ucc_id_for_range(u64 start, u64 end, u32 *ucc_id)
{
+#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
struct device_node *np = NULL;
int err = 0;
return err;
else
return -EINVAL;
-}
+#else
+ return -ENODEV;
#endif
-
+}
static int fsl_pq_mdio_probe(struct platform_device *ofdev)
{
of_device_is_compatible(np, "fsl,etsec2-mdio") ||
of_device_is_compatible(np, "fsl,etsec2-tbi") ||
of_device_is_compatible(np, "gianfar")) {
-#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
tbipa = get_gfar_tbipa(regs, np);
if (!tbipa) {
err = -EINVAL;
goto err_free_irqs;
}
-#else
- err = -ENODEV;
- goto err_free_irqs;
-#endif
} else if (of_device_is_compatible(np, "fsl,ucc-mdio") ||
of_device_is_compatible(np, "ucc_geth_phy")) {
-#if defined(CONFIG_UCC_GETH) || defined(CONFIG_UCC_GETH_MODULE)
u32 id;
static u32 mii_mng_master;
mii_mng_master = id;
ucc_set_qe_mux_mii_mng(id - 1);
}
-#else
- err = -ENODEV;
- goto err_free_irqs;
-#endif
} else {
err = -ENODEV;
goto err_free_irqs;
}
if (tbiaddr == -1) {
- out_be32(tbipa, 0);
-
- tbiaddr = fsl_pq_mdio_find_free(new_bus);
- }
-
- /*
- * We define TBIPA at 0 to be illegal, opting to fail for boards that
- * have PHYs at 1-31, rather than change tbipa and rescan.
- */
- if (tbiaddr == 0) {
err = -EBUSY;
goto err_free_irqs;
#ifdef EHEA_SMALL_QUEUES
#define EHEA_MAX_CQE_COUNT 1023
#define EHEA_DEF_ENTRIES_SQ 1023
-#define EHEA_DEF_ENTRIES_RQ1 4095
+#define EHEA_DEF_ENTRIES_RQ1 1023
#define EHEA_DEF_ENTRIES_RQ2 1023
-#define EHEA_DEF_ENTRIES_RQ3 1023
+#define EHEA_DEF_ENTRIES_RQ3 511
#else
#define EHEA_MAX_CQE_COUNT 4080
#define EHEA_DEF_ENTRIES_SQ 4080
out_herr:
free_page((unsigned long)cb2);
resched:
- schedule_delayed_work(&port->stats_work, msecs_to_jiffies(1000));
+ schedule_delayed_work(&port->stats_work,
+ round_jiffies_relative(msecs_to_jiffies(1000)));
}
static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
}
mutex_unlock(&port->port_lock);
- schedule_delayed_work(&port->stats_work, msecs_to_jiffies(1000));
+ schedule_delayed_work(&port->stats_work,
+ round_jiffies_relative(msecs_to_jiffies(1000)));
return ret;
}
/* FIXME: do we need this? */
memset(local_list, 0, sizeof(local_list));
- memset(remote_list, 0, sizeof(VETH_MAX_FRAMES_PER_MSG));
+ memset(remote_list, 0, sizeof(remote_list));
/* a 0 address marks the end of the valid entries */
if (senddata->addr[startchunk] == 0)
jme_new_phy_off(jme);
}
+static int
+jme_phy_specreg_read(struct jme_adapter *jme, u32 specreg)
+{
+ u32 phy_addr;
+
+ phy_addr = JM_PHY_SPEC_REG_READ | specreg;
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_ADDR_REG,
+ phy_addr);
+ return jme_mdio_read(jme->dev, jme->mii_if.phy_id,
+ JM_PHY_SPEC_DATA_REG);
+}
+
+static void
+jme_phy_specreg_write(struct jme_adapter *jme, u32 ext_reg, u32 phy_data)
+{
+ u32 phy_addr;
+
+ phy_addr = JM_PHY_SPEC_REG_WRITE | ext_reg;
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_DATA_REG,
+ phy_data);
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id, JM_PHY_SPEC_ADDR_REG,
+ phy_addr);
+}
+
+static int
+jme_phy_calibration(struct jme_adapter *jme)
+{
+ u32 ctrl1000, phy_data;
+
+ jme_phy_off(jme);
+ jme_phy_on(jme);
+ /* Enabel PHY test mode 1 */
+ ctrl1000 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_CTRL1000);
+ ctrl1000 &= ~PHY_GAD_TEST_MODE_MSK;
+ ctrl1000 |= PHY_GAD_TEST_MODE_1;
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_CTRL1000, ctrl1000);
+
+ phy_data = jme_phy_specreg_read(jme, JM_PHY_EXT_COMM_2_REG);
+ phy_data &= ~JM_PHY_EXT_COMM_2_CALI_MODE_0;
+ phy_data |= JM_PHY_EXT_COMM_2_CALI_LATCH |
+ JM_PHY_EXT_COMM_2_CALI_ENABLE;
+ jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_2_REG, phy_data);
+ msleep(20);
+ phy_data = jme_phy_specreg_read(jme, JM_PHY_EXT_COMM_2_REG);
+ phy_data &= ~(JM_PHY_EXT_COMM_2_CALI_ENABLE |
+ JM_PHY_EXT_COMM_2_CALI_MODE_0 |
+ JM_PHY_EXT_COMM_2_CALI_LATCH);
+ jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_2_REG, phy_data);
+
+ /* Disable PHY test mode */
+ ctrl1000 = jme_mdio_read(jme->dev, jme->mii_if.phy_id, MII_CTRL1000);
+ ctrl1000 &= ~PHY_GAD_TEST_MODE_MSK;
+ jme_mdio_write(jme->dev, jme->mii_if.phy_id, MII_CTRL1000, ctrl1000);
+ return 0;
+}
+
+static int
+jme_phy_setEA(struct jme_adapter *jme)
+{
+ u32 phy_comm0 = 0, phy_comm1 = 0;
+ u8 nic_ctrl;
+
+ pci_read_config_byte(jme->pdev, PCI_PRIV_SHARE_NICCTRL, &nic_ctrl);
+ if ((nic_ctrl & 0x3) == JME_FLAG_PHYEA_ENABLE)
+ return 0;
+
+ switch (jme->pdev->device) {
+ case PCI_DEVICE_ID_JMICRON_JMC250:
+ if (((jme->chip_main_rev == 5) &&
+ ((jme->chip_sub_rev == 0) || (jme->chip_sub_rev == 1) ||
+ (jme->chip_sub_rev == 3))) ||
+ (jme->chip_main_rev >= 6)) {
+ phy_comm0 = 0x008A;
+ phy_comm1 = 0x4109;
+ }
+ if ((jme->chip_main_rev == 3) &&
+ ((jme->chip_sub_rev == 1) || (jme->chip_sub_rev == 2)))
+ phy_comm0 = 0xE088;
+ break;
+ case PCI_DEVICE_ID_JMICRON_JMC260:
+ if (((jme->chip_main_rev == 5) &&
+ ((jme->chip_sub_rev == 0) || (jme->chip_sub_rev == 1) ||
+ (jme->chip_sub_rev == 3))) ||
+ (jme->chip_main_rev >= 6)) {
+ phy_comm0 = 0x008A;
+ phy_comm1 = 0x4109;
+ }
+ if ((jme->chip_main_rev == 3) &&
+ ((jme->chip_sub_rev == 1) || (jme->chip_sub_rev == 2)))
+ phy_comm0 = 0xE088;
+ if ((jme->chip_main_rev == 2) && (jme->chip_sub_rev == 0))
+ phy_comm0 = 0x608A;
+ if ((jme->chip_main_rev == 2) && (jme->chip_sub_rev == 2))
+ phy_comm0 = 0x408A;
+ break;
+ default:
+ return -ENODEV;
+ }
+ if (phy_comm0)
+ jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_0_REG, phy_comm0);
+ if (phy_comm1)
+ jme_phy_specreg_write(jme, JM_PHY_EXT_COMM_1_REG, phy_comm1);
+
+ return 0;
+}
+
static int
jme_open(struct net_device *netdev)
{
jme_set_settings(netdev, &jme->old_ecmd);
else
jme_reset_phy_processor(jme);
-
+ jme_phy_calibration(jme);
+ jme_phy_setEA(jme);
jme_reset_link(jme);
return 0;
jme_set_settings(netdev, &jme->old_ecmd);
else
jme_reset_phy_processor(jme);
-
+ jme_phy_calibration(jme);
+ jme_phy_setEA(jme);
jme_start_irq(jme);
netif_device_attach(netdev);
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, jme_pci_tbl);
-
RXMCS_CHECKSUM,
};
+/* Extern PHY common register 2 */
+
+#define PHY_GAD_TEST_MODE_1 0x00002000
+#define PHY_GAD_TEST_MODE_MSK 0x0000E000
+#define JM_PHY_SPEC_REG_READ 0x00004000
+#define JM_PHY_SPEC_REG_WRITE 0x00008000
+#define PHY_CALIBRATION_DELAY 20
+#define JM_PHY_SPEC_ADDR_REG 0x1E
+#define JM_PHY_SPEC_DATA_REG 0x1F
+
+#define JM_PHY_EXT_COMM_0_REG 0x30
+#define JM_PHY_EXT_COMM_1_REG 0x31
+#define JM_PHY_EXT_COMM_2_REG 0x32
+#define JM_PHY_EXT_COMM_2_CALI_ENABLE 0x01
+#define JM_PHY_EXT_COMM_2_CALI_MODE_0 0x02
+#define JM_PHY_EXT_COMM_2_CALI_LATCH 0x10
+#define PCI_PRIV_SHARE_NICCTRL 0xF5
+#define JME_FLAG_PHYEA_ENABLE 0x2
+
/*
* Wakeup Frame setup interface registers
*/
# Makefile for the A Semi network device drivers.
#
-obj-$(CONFIG_PASEMI_MAC) += pasemi_mac.o pasemi_mac_ethtool.o
+obj-$(CONFIG_PASEMI_MAC) += pasemi_mac_driver.o
+pasemi_mac_driver-objs := pasemi_mac.o pasemi_mac_ethtool.o
#define TX_DESC_PER_IOCB 8
-/* The maximum number of frags we handle is based
- * on PAGE_SIZE...
- */
-#if (PAGE_SHIFT == 12) || (PAGE_SHIFT == 13) /* 4k & 8k pages */
+
+#if ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2) > 0
#define TX_DESC_PER_OAL ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2)
#else /* all other page sizes */
#define TX_DESC_PER_OAL 0
struct ob_mac_iocb_req *queue_entry;
u32 index;
struct oal oal;
- struct map_list map[MAX_SKB_FRAGS + 1];
+ struct map_list map[MAX_SKB_FRAGS + 2];
int map_cnt;
struct tx_ring_desc *next;
};
return value;
}
-static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr)
+static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
{
- RTL_W16(IntrMask, 0x0000);
+ void __iomem *ioaddr = tp->mmio_addr;
- RTL_W16(IntrStatus, 0xffff);
+ RTL_W16(IntrMask, 0x0000);
+ RTL_W16(IntrStatus, tp->intr_event);
+ RTL_R8(ChipCmd);
}
static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp)
break;
udelay(100);
}
-
- rtl8169_init_ring_indexes(tp);
}
static int __devinit
void __iomem *ioaddr = tp->mmio_addr;
/* Disable interrupts */
- rtl8169_irq_mask_and_ack(ioaddr);
+ rtl8169_irq_mask_and_ack(tp);
rtl_rx_close(tp);
RTL_W16(IntrMitigate, 0x5151);
/* Work around for RxFIFO overflow. */
- if (tp->mac_version == RTL_GIGA_MAC_VER_11 ||
- tp->mac_version == RTL_GIGA_MAC_VER_22) {
+ if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
tp->intr_event |= RxFIFOOver | PCSTimeout;
tp->intr_event &= ~RxOverflow;
}
void __iomem *ioaddr = tp->mmio_addr;
struct pci_dev *pdev = tp->pci_dev;
+ if (tp->mac_version >= RTL_GIGA_MAC_VER_30) {
+ tp->intr_event &= ~RxFIFOOver;
+ tp->napi_event &= ~RxFIFOOver;
+ }
+
if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
tp->mac_version == RTL_GIGA_MAC_VER_16) {
int cap = pci_pcie_cap(pdev);
/* Wait for any pending NAPI task to complete */
napi_disable(&tp->napi);
- rtl8169_irq_mask_and_ack(ioaddr);
+ rtl8169_irq_mask_and_ack(tp);
tp->intr_mask = 0xffff;
RTL_W16(IntrMask, tp->intr_event);
if (!netif_running(dev))
goto out_unlock;
+ rtl8169_hw_reset(tp);
+
rtl8169_wait_for_quiescence(dev);
for (i = 0; i < NUM_RX_DESC; i++)
rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz);
rtl8169_tx_clear(tp);
+ rtl8169_init_ring_indexes(tp);
- rtl8169_hw_reset(tp);
rtl_hw_start(dev);
netif_wake_queue(dev);
rtl8169_check_link_status(dev, tp, tp->mmio_addr);
static void rtl8169_tx_timeout(struct net_device *dev)
{
- struct rtl8169_private *tp = netdev_priv(dev);
-
- rtl8169_hw_reset(tp);
-
- /* Let's wait a bit while any (async) irq lands on */
rtl8169_schedule_work(dev, rtl8169_reset_task);
}
*/
status = RTL_R16(IntrStatus);
while (status && status != 0xffff) {
+ status &= tp->intr_event;
+ if (!status)
+ break;
+
handled = 1;
/* Handle all of the error cases first. These will reset
switch (tp->mac_version) {
/* Work around for rx fifo overflow */
case RTL_GIGA_MAC_VER_11:
- case RTL_GIGA_MAC_VER_22:
- case RTL_GIGA_MAC_VER_26:
netif_stop_queue(dev);
rtl8169_tx_timeout(dev);
goto done;
- /* Testers needed. */
- case RTL_GIGA_MAC_VER_17:
- case RTL_GIGA_MAC_VER_19:
- case RTL_GIGA_MAC_VER_20:
- case RTL_GIGA_MAC_VER_21:
- case RTL_GIGA_MAC_VER_23:
- case RTL_GIGA_MAC_VER_24:
- case RTL_GIGA_MAC_VER_27:
- case RTL_GIGA_MAC_VER_28:
- case RTL_GIGA_MAC_VER_31:
- /* Experimental science. Pktgen proof. */
- case RTL_GIGA_MAC_VER_12:
- case RTL_GIGA_MAC_VER_25:
- if (status == RxFIFOOver)
- goto done;
- break;
default:
break;
}
unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
- /* Do not manage MMC IRQ (FIXME) */
+ /* Mask MMC irq, counters are managed in SW and registers
+ * are cleared on each READ eventually. */
dwmac_mmc_intr_all_mask(priv->ioaddr);
- dwmac_mmc_ctrl(priv->ioaddr, mode);
- memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
+
+ if (priv->dma_cap.rmon) {
+ dwmac_mmc_ctrl(priv->ioaddr, mode);
+ memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
+ } else
+ pr_info(" No MAC Management Counters available");
}
static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv)
memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
priv->xstats.threshold = tc;
- if (priv->dma_cap.rmon)
- stmmac_mmc_setup(priv);
+ stmmac_mmc_setup(priv);
/* Start the ball rolling... */
DBG(probe, DEBUG, "%s: DMA RX/TX processes started...\n", dev->name);
goto done;
/* Re-enable the ingress interrupt. */
- enable_percpu_irq(priv->intr_id);
+ enable_percpu_irq(priv->intr_id, 0);
/* HACK: Avoid the "rotting packet" problem (see above). */
if (qup->__packet_receive_read !=
info->napi_enabled = true;
/* Enable the ingress interrupt. */
- enable_percpu_irq(priv->intr_id);
+ enable_percpu_irq(priv->intr_id, 0);
}
for (i = 0; i < sh->nr_frags; i++) {
skb_frag_t *f = &sh->frags[i];
- unsigned long pfn = page_to_pfn(f->page);
+ unsigned long pfn = page_to_pfn(skb_frag_page(f));
/* FIXME: Compute "hash_for_home" properly. */
/* ISSUE: The hypervisor checks CHIP_HAS_REV1_DMA_PACKETS(). */
/* FIXME: Hmmm. */
if (!hash_default) {
void *va = pfn_to_kaddr(pfn) + f->page_offset;
- BUG_ON(PageHighMem(f->page));
+ BUG_ON(PageHighMem(skb_frag_page(f)));
finv_buffer_remote(va, f->size, 0);
}
#
menuconfig PHYLIB
- bool "PHY Device support and infrastructure"
+ tristate "PHY Device support and infrastructure"
depends on !S390
depends on NETDEVICES
help
lock_sock(sk);
opt->src_addr = sp->sa_addr.pptp;
- if (add_chan(po)) {
- release_sock(sk);
+ if (add_chan(po))
error = -EBUSY;
- }
release_sock(sk);
return error;
ath_start_ani(common);
}
- if (ath9k_hw_ops(ah)->antdiv_comb_conf_get && sc->ant_rx != 3) {
+ if ((ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) && sc->ant_rx != 3) {
struct ath_hw_antcomb_conf div_ant_conf;
u8 lna_conf;
.chain_noise_scale = 1000,
.wd_timeout = IWL_DEF_WD_TIMEOUT,
.max_event_log_size = 128,
+ .wd_disable = true,
};
static struct iwl_ht_params iwl1000_ht_params = {
.ht_greenfield_support = true,
.wd_timeout = IWL_LONG_WD_TIMEOUT,
.max_event_log_size = 512,
.no_idle_support = true,
+ .wd_disable = true,
};
static struct iwl_ht_params iwl5000_ht_params = {
.ht_greenfield_support = true,
return 0;
}
+void iwlagn_config_ht40(struct ieee80211_conf *conf,
+ struct iwl_rxon_context *ctx)
+{
+ if (conf_is_ht40_minus(conf)) {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_BELOW;
+ ctx->ht.is_40mhz = true;
+ } else if (conf_is_ht40_plus(conf)) {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
+ ctx->ht.is_40mhz = true;
+ } else {
+ ctx->ht.extension_chan_offset =
+ IEEE80211_HT_PARAM_CHA_SEC_NONE;
+ ctx->ht.is_40mhz = false;
+ }
+}
+
int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
{
struct iwl_priv *priv = hw->priv;
ctx->ht.enabled = conf_is_ht(conf);
if (ctx->ht.enabled) {
- if (conf_is_ht40_minus(conf)) {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_BELOW;
- ctx->ht.is_40mhz = true;
- } else if (conf_is_ht40_plus(conf)) {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
- ctx->ht.is_40mhz = true;
- } else {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_NONE;
- ctx->ht.is_40mhz = false;
- }
+ /* if HT40 is used, it should not change
+ * after associated except channel switch */
+ if (iwl_is_associated_ctx(ctx) &&
+ !ctx->ht.is_40mhz)
+ iwlagn_config_ht40(conf, ctx);
} else
ctx->ht.is_40mhz = false;
switch (keyconf->cipher) {
case WLAN_CIPHER_SUITE_TKIP:
- keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
- keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
-
if (sta)
addr = sta->addr;
else /* station mode case only */
seq.tkip.iv32, p1k, CMD_SYNC);
break;
case WLAN_CIPHER_SUITE_CCMP:
- keyconf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
- /* fall through */
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
ret = iwlagn_send_sta_key(priv, keyconf, sta_id,
return -EOPNOTSUPP;
}
+ switch (key->cipher) {
+ case WLAN_CIPHER_SUITE_TKIP:
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
+ /* fall through */
+ case WLAN_CIPHER_SUITE_CCMP:
+ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
+ break;
+ default:
+ break;
+ }
+
/*
* We could program these keys into the hardware as well, but we
* don't expect much multicast traffic in IBSS and having keys
/* Configure HT40 channels */
ctx->ht.enabled = conf_is_ht(conf);
- if (ctx->ht.enabled) {
- if (conf_is_ht40_minus(conf)) {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_BELOW;
- ctx->ht.is_40mhz = true;
- } else if (conf_is_ht40_plus(conf)) {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
- ctx->ht.is_40mhz = true;
- } else {
- ctx->ht.extension_chan_offset =
- IEEE80211_HT_PARAM_CHA_SEC_NONE;
- ctx->ht.is_40mhz = false;
- }
- } else
+ if (ctx->ht.enabled)
+ iwlagn_config_ht40(conf, ctx);
+ else
ctx->ht.is_40mhz = false;
if ((le16_to_cpu(ctx->staging.channel) != ch))
module_param_named(ack_check, iwlagn_mod_params.ack_check, bool, S_IRUGO);
MODULE_PARM_DESC(ack_check, "Check ack health (default: 0 [disabled])");
-module_param_named(wd_disable, iwlagn_mod_params.wd_disable, bool, S_IRUGO);
+module_param_named(wd_disable, iwlagn_mod_params.wd_disable, int, S_IRUGO);
MODULE_PARM_DESC(wd_disable,
- "Disable stuck queue watchdog timer (default: 0 [enabled])");
+ "Disable stuck queue watchdog timer 0=system default, "
+ "1=disable, 2=enable (default: 0)");
/*
* set bt_coex_active to true, uCode will do kill/defer
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
u32 changes);
+void iwlagn_config_ht40(struct ieee80211_conf *conf,
+ struct iwl_rxon_context *ctx);
/* uCode */
int iwlagn_rx_calib_result(struct iwl_priv *priv,
{
unsigned int timeout = priv->cfg->base_params->wd_timeout;
- if (timeout && !iwlagn_mod_params.wd_disable)
- mod_timer(&priv->watchdog,
- jiffies + msecs_to_jiffies(IWL_WD_TICK(timeout)));
- else
- del_timer(&priv->watchdog);
+ if (!iwlagn_mod_params.wd_disable) {
+ /* use system default */
+ if (timeout && !priv->cfg->base_params->wd_disable)
+ mod_timer(&priv->watchdog,
+ jiffies +
+ msecs_to_jiffies(IWL_WD_TICK(timeout)));
+ else
+ del_timer(&priv->watchdog);
+ } else {
+ /* module parameter overwrite default configuration */
+ if (timeout && iwlagn_mod_params.wd_disable == 2)
+ mod_timer(&priv->watchdog,
+ jiffies +
+ msecs_to_jiffies(IWL_WD_TICK(timeout)));
+ else
+ del_timer(&priv->watchdog);
+ }
}
/**
* @shadow_reg_enable: HW shadhow register bit
* @no_idle_support: do not support idle mode
* @hd_v2: v2 of enhanced sensitivity value, used for 2000 series and up
+ * wd_disable: disable watchdog timer
*/
struct iwl_base_params {
int eeprom_size;
const bool shadow_reg_enable;
const bool no_idle_support;
const bool hd_v2;
+ const bool wd_disable;
};
/*
* @advanced_bt_coexist: support advanced bt coexist
* @restart_fw: restart firmware, default = 1
* @plcp_check: enable plcp health check, default = true
* @ack_check: disable ack health check, default = false
- * @wd_disable: enable stuck queue check, default = false
+ * @wd_disable: enable stuck queue check, default = 0
* @bt_coex_active: enable bt coex, default = true
* @led_mode: system default, default = 0
* @no_sleep_autoadjust: disable autoadjust, default = true
int restart_fw;
bool plcp_check;
bool ack_check;
- bool wd_disable;
+ int wd_disable;
bool bt_coex_active;
int led_mode;
bool no_sleep_autoadjust;
WARN_ON(priv->fw_state != FW_STATE_READY);
- cancel_work_sync(&priv->work);
-
p54spi_power_off(priv);
spin_lock_irqsave(&priv->tx_lock, flags);
INIT_LIST_HEAD(&priv->tx_pending);
priv->fw_state = FW_STATE_OFF;
mutex_unlock(&priv->mutex);
+
+ cancel_work_sync(&priv->work);
}
static int __devinit p54spi_probe(struct spi_device *spi)
init_completion(&priv->fw_comp);
INIT_LIST_HEAD(&priv->tx_pending);
mutex_init(&priv->mutex);
+ spin_lock_init(&priv->tx_lock);
SET_IEEE80211_DEV(hw, &spi->dev);
priv->common.open = p54spi_op_start;
priv->common.stop = p54spi_op_stop;
dwrq->flags = 0;
dwrq->length = 0;
}
- essid->octets[essid->length] = '\0';
+ essid->octets[dwrq->length] = '\0';
memcpy(extra, essid->octets, dwrq->length);
kfree(essid);
/* Apparently the data is read from end to start */
rt2800_register_read_lock(rt2x00dev, EFUSE_DATA3, ®);
/* The returned value is in CPU order, but eeprom is le */
- rt2x00dev->eeprom[i] = cpu_to_le32(reg);
+ *(u32 *)&rt2x00dev->eeprom[i] = cpu_to_le32(reg);
rt2800_register_read_lock(rt2x00dev, EFUSE_DATA2, ®);
*(u32 *)&rt2x00dev->eeprom[i + 2] = cpu_to_le32(reg);
rt2800_register_read_lock(rt2x00dev, EFUSE_DATA1, ®);
if (mac->link_state != MAC80211_LINKED)
return;
- spin_lock(&rtlpriv->locks.lps_lock);
+ spin_lock_irq(&rtlpriv->locks.lps_lock);
/* Idle for a while if we connect to AP a while ago. */
if (mac->cnt_after_linked >= 2) {
}
}
- spin_unlock(&rtlpriv->locks.lps_lock);
+ spin_unlock_irq(&rtlpriv->locks.lps_lock);
}
/*Leave the leisure power save mode.*/
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
+ unsigned long flags;
- spin_lock(&rtlpriv->locks.lps_lock);
+ spin_lock_irqsave(&rtlpriv->locks.lps_lock, flags);
if (ppsc->fwctrl_lps) {
if (ppsc->dot11_psmode != EACTIVE) {
rtl_lps_set_psmode(hw, EACTIVE);
}
}
- spin_unlock(&rtlpriv->locks.lps_lock);
+ spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flags);
}
/* For sw LPS*/
RT_CLEAR_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM);
}
- spin_lock(&rtlpriv->locks.lps_lock);
+ spin_lock_irq(&rtlpriv->locks.lps_lock);
rtl_ps_set_rf_state(hw, ERFON, RF_CHANGE_BY_PS);
- spin_unlock(&rtlpriv->locks.lps_lock);
+ spin_unlock_irq(&rtlpriv->locks.lps_lock);
}
void rtl_swlps_rfon_wq_callback(void *data)
if (rtlpriv->link_info.busytraffic)
return;
- spin_lock(&rtlpriv->locks.lps_lock);
+ spin_lock_irq(&rtlpriv->locks.lps_lock);
rtl_ps_set_rf_state(hw, ERFSLEEP, RF_CHANGE_BY_PS);
- spin_unlock(&rtlpriv->locks.lps_lock);
+ spin_unlock_irq(&rtlpriv->locks.lps_lock);
if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM &&
!RT_IN_PS_LEVEL(ppsc, RT_PS_LEVEL_ASPM)) {
}
case ERFSLEEP:{
if (ppsc->rfpwr_state == ERFOFF)
- break;
+ return false;
for (queue_id = 0, i = 0;
queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
ring = &pcipriv->dev.tx_ring[queue_id];
break;
case ERFSLEEP:
if (ppsc->rfpwr_state == ERFOFF)
- break;
+ return false;
for (queue_id = 0, i = 0;
queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
ring = &pcipriv->dev.tx_ring[queue_id];
break;
case ERFSLEEP:
if (ppsc->rfpwr_state == ERFOFF)
- break;
+ return false;
for (queue_id = 0, i = 0;
queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
}
case ERFSLEEP:
if (ppsc->rfpwr_state == ERFOFF)
- break;
+ return false;
for (queue_id = 0, i = 0;
queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
pending_idx = *((u16 *)skb->data);
xen_netbk_idx_release(netbk, pending_idx);
for (j = start; j < i; j++) {
- pending_idx = frag_get_pending_idx(&shinfo->frags[i]);
+ pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
xen_netbk_idx_release(netbk, pending_idx);
}
"netback/%u", group);
if (IS_ERR(netbk->task)) {
- printk(KERN_ALERT "kthread_run() fails at netback\n");
+ printk(KERN_ALERT "kthread_create() fails at netback\n");
del_timer(&netbk->net_timer);
rc = PTR_ERR(netbk->task);
goto failed_init;
#include <linux/string.h>
#include <linux/slab.h>
-/* For archs that don't support NO_IRQ (such as x86), provide a dummy value */
-#ifndef NO_IRQ
-#define NO_IRQ 0
-#endif
-
/**
* irq_of_parse_and_map - Parse and map an interrupt into linux virq space
* @device: Device node of the device whose interrupt is to be mapped
struct of_irq oirq;
if (of_irq_map_one(dev, index, &oirq))
- return NO_IRQ;
+ return 0;
return irq_create_of_mapping(oirq.controller, oirq.specifier,
oirq.size);
/* Only dereference the resource if both the
* resource and the irq are valid. */
- if (r && irq != NO_IRQ) {
+ if (r && irq) {
r->start = r->end = irq;
r->flags = IORESOURCE_IRQ;
r->name = dev->full_name;
{
int nr = 0;
- while (of_irq_to_resource(dev, nr, NULL) != NO_IRQ)
+ while (of_irq_to_resource(dev, nr, NULL))
nr++;
return nr;
int i;
for (i = 0; i < nr_irqs; i++, res++)
- if (of_irq_to_resource(dev, i, res) == NO_IRQ)
+ if (!of_irq_to_resource(dev, i, res))
break;
return i;
return err;
}
+static int timer_mode;
+
static int __init oprofile_init(void)
{
int err;
+ /* always init architecture to setup backtrace support */
err = oprofile_arch_init(&oprofile_ops);
- if (err < 0 || timer) {
- printk(KERN_INFO "oprofile: using timer interrupt.\n");
+
+ timer_mode = err || timer; /* fall back to timer mode on errors */
+ if (timer_mode) {
+ if (!err)
+ oprofile_arch_exit();
err = oprofile_timer_init(&oprofile_ops);
if (err)
return err;
}
- return oprofilefs_register();
+
+ err = oprofilefs_register();
+ if (!err)
+ return 0;
+
+ /* failed */
+ if (timer_mode)
+ oprofile_timer_exit();
+ else
+ oprofile_arch_exit();
+
+ return err;
}
static void __exit oprofile_exit(void)
{
- oprofile_timer_exit();
oprofilefs_unregister();
- oprofile_arch_exit();
+ if (timer_mode)
+ oprofile_timer_exit();
+ else
+ oprofile_arch_exit();
}
ops->start = oprofile_hrtimer_start;
ops->stop = oprofile_hrtimer_stop;
ops->cpu_type = "timer";
+ printk(KERN_INFO "oprofile: using timer interrupt.\n");
return 0;
}
int illumination_supported:1;
int video_supported:1;
int fan_supported:1;
+ int system_event_supported:1;
struct mutex mutex;
};
u32 hci_result;
u32 value;
- if (!dev->key_event_valid) {
+ if (!dev->key_event_valid && dev->system_event_supported) {
hci_read1(dev, HCI_SYSTEM_EVENT, &value, &hci_result);
if (hci_result == HCI_SUCCESS) {
dev->key_event_valid = 1;
/* enable event fifo */
hci_write1(dev, HCI_SYSTEM_EVENT, 1, &hci_result);
+ if (hci_result == HCI_SUCCESS)
+ dev->system_event_supported = 1;
props.type = BACKLIGHT_PLATFORM;
props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1;
{
struct toshiba_acpi_dev *dev = acpi_driver_data(acpi_dev);
u32 hci_result, value;
+ int retries = 3;
- if (event != 0x80)
+ if (!dev->system_event_supported || event != 0x80)
return;
+
do {
hci_read1(dev, HCI_SYSTEM_EVENT, &value, &hci_result);
- if (hci_result == HCI_SUCCESS) {
+ switch (hci_result) {
+ case HCI_SUCCESS:
if (value == 0x100)
continue;
/* act on key press; ignore key release */
pr_info("Unknown key %x\n",
value);
}
- } else if (hci_result == HCI_NOT_SUPPORTED) {
+ break;
+ case HCI_NOT_SUPPORTED:
/* This is a workaround for an unresolved issue on
* some machines where system events sporadically
* become disabled. */
hci_write1(dev, HCI_SYSTEM_EVENT, 1, &hci_result);
pr_notice("Re-enabled hotkeys\n");
+ /* fall through */
+ default:
+ retries--;
+ break;
}
- } while (hci_result != HCI_EMPTY);
+ } while (retries && hci_result != HCI_EMPTY);
}
#define PMIC_BATT_CHR_SBATDET_MASK (1 << 5)
#define PMIC_BATT_CHR_SDCLMT_MASK (1 << 6)
#define PMIC_BATT_CHR_SUSBOVP_MASK (1 << 7)
-#define PMIC_BATT_CHR_EXCPT_MASK 0xC6
+#define PMIC_BATT_CHR_EXCPT_MASK 0x86
+
#define PMIC_BATT_ADC_ACCCHRG_MASK (1 << 31)
#define PMIC_BATT_ADC_ACCCHRGVAL_MASK 0x7FFFFFFF
pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
pmic_battery_log_event(BATT_EVENT_BATOVP_EXCPT);
batt_exception = 1;
- } else if (r8 & PMIC_BATT_CHR_SDCLMT_MASK) {
- pbi->batt_health = POWER_SUPPLY_HEALTH_OVERVOLTAGE;
- pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
- pmic_battery_log_event(BATT_EVENT_DCLMT_EXCPT);
- batt_exception = 1;
} else if (r8 & PMIC_BATT_CHR_STEMP_MASK) {
pbi->batt_health = POWER_SUPPLY_HEALTH_OVERHEAT;
pbi->batt_status = POWER_SUPPLY_STATUS_NOT_CHARGING;
batt_exception = 1;
} else {
pbi->batt_health = POWER_SUPPLY_HEALTH_GOOD;
+ if (r8 & PMIC_BATT_CHR_SDCLMT_MASK) {
+ /* PMIC will change charging current automatically */
+ pmic_battery_log_event(BATT_EVENT_DCLMT_EXCPT);
+ }
}
}
static int ptp_clock_getres(struct posix_clock *pc, struct timespec *tp)
{
- return 1; /* always round timer functions to one nanosecond */
+ tp->tv_sec = 0;
+ tp->tv_nsec = 1;
+ return 0;
}
static int ptp_clock_settime(struct posix_clock *pc, const struct timespec *tp)
INIT_WORK(&priv->idb_work, tsi721_db_dpc);
/* Allocate buffer for inbound doorbells queue */
- priv->idb_base = dma_alloc_coherent(&priv->pdev->dev,
+ priv->idb_base = dma_zalloc_coherent(&priv->pdev->dev,
IDB_QSIZE * TSI721_IDB_ENTRY_SIZE,
&priv->idb_dma, GFP_KERNEL);
if (!priv->idb_base)
return -ENOMEM;
- memset(priv->idb_base, 0, IDB_QSIZE * TSI721_IDB_ENTRY_SIZE);
-
dev_dbg(&priv->pdev->dev, "Allocated IDB buffer @ %p (phys = %llx)\n",
priv->idb_base, (unsigned long long)priv->idb_dma);
*/
/* Allocate space for DMA descriptors */
- bd_ptr = dma_alloc_coherent(&priv->pdev->dev,
+ bd_ptr = dma_zalloc_coherent(&priv->pdev->dev,
bd_num * sizeof(struct tsi721_dma_desc),
&bd_phys, GFP_KERNEL);
if (!bd_ptr)
priv->bdma[chnum].bd_phys = bd_phys;
priv->bdma[chnum].bd_base = bd_ptr;
- memset(bd_ptr, 0, bd_num * sizeof(struct tsi721_dma_desc));
-
dev_dbg(&priv->pdev->dev, "DMA descriptors @ %p (phys = %llx)\n",
bd_ptr, (unsigned long long)bd_phys);
sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ?
bd_num : TSI721_DMA_MINSTSSZ;
sts_size = roundup_pow_of_two(sts_size);
- sts_ptr = dma_alloc_coherent(&priv->pdev->dev,
+ sts_ptr = dma_zalloc_coherent(&priv->pdev->dev,
sts_size * sizeof(struct tsi721_dma_sts),
&sts_phys, GFP_KERNEL);
if (!sts_ptr) {
priv->bdma[chnum].sts_base = sts_ptr;
priv->bdma[chnum].sts_size = sts_size;
- memset(sts_ptr, 0, sts_size);
-
dev_dbg(&priv->pdev->dev,
"desc status FIFO @ %p (phys = %llx) size=0x%x\n",
sts_ptr, (unsigned long long)sts_phys, sts_size);
/* Outbound message descriptor status FIFO allocation */
priv->omsg_ring[mbox].sts_size = roundup_pow_of_two(entries + 1);
- priv->omsg_ring[mbox].sts_base = dma_alloc_coherent(&priv->pdev->dev,
+ priv->omsg_ring[mbox].sts_base = dma_zalloc_coherent(&priv->pdev->dev,
priv->omsg_ring[mbox].sts_size *
sizeof(struct tsi721_dma_sts),
&priv->omsg_ring[mbox].sts_phys, GFP_KERNEL);
goto out_desc;
}
- memset(priv->omsg_ring[mbox].sts_base, 0,
- entries * sizeof(struct tsi721_dma_sts));
-
/*
* Configure Outbound Messaging Engine
*/
INIT_LIST_HEAD(&mport->dbells);
rio_init_dbell_res(&mport->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff);
- rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 0);
- rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 0);
+ rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 3);
+ rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 3);
strcpy(mport->name, "Tsi721 mport");
/* Hook up interrupt handler */
const struct pci_device_id *id)
{
struct tsi721_device *priv;
- int i;
+ int i, cap;
int err;
u32 regval;
dev_info(&pdev->dev, "Unable to set consistent DMA mask\n");
}
- /* Clear "no snoop" and "relaxed ordering" bits. */
- pci_read_config_dword(pdev, 0x40 + PCI_EXP_DEVCTL, ®val);
- regval &= ~(PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN);
- pci_write_config_dword(pdev, 0x40 + PCI_EXP_DEVCTL, regval);
+ cap = pci_pcie_cap(pdev);
+ BUG_ON(cap == 0);
+
+ /* Clear "no snoop" and "relaxed ordering" bits, use default MRRS. */
+ pci_read_config_dword(pdev, cap + PCI_EXP_DEVCTL, ®val);
+ regval &= ~(PCI_EXP_DEVCTL_READRQ | PCI_EXP_DEVCTL_RELAX_EN |
+ PCI_EXP_DEVCTL_NOSNOOP_EN);
+ regval |= 0x2 << MAX_READ_REQUEST_SZ_SHIFT;
+ pci_write_config_dword(pdev, cap + PCI_EXP_DEVCTL, regval);
+
+ /* Adjust PCIe completion timeout. */
+ pci_read_config_dword(pdev, cap + PCI_EXP_DEVCTL2, ®val);
+ regval &= ~(0x0f);
+ pci_write_config_dword(pdev, cap + PCI_EXP_DEVCTL2, regval | 0x2);
/*
* FIXUP: correct offsets of MSI-X tables in the MSI-X Capability Block
#define TSI721_MSIXPBA_OFFSET 0x2a000
#define TSI721_PCIECFG_EPCTL 0x400
+#define MAX_READ_REQUEST_SZ_SHIFT 12
+
/*
* Event Management Registers
*/
*/
delta = timespec_sub(old_system, old_rtc);
delta_delta = timespec_sub(delta, old_delta);
- if (abs(delta_delta.tv_sec) >= 2) {
+ if (delta_delta.tv_sec < -2 || delta_delta.tv_sec >= 2) {
/*
* if delta_delta is too large, assume time correction
* has occured and set old_delta to the current delta.
rtc_tm_to_time(&tm, &new_rtc.tv_sec);
new_rtc.tv_nsec = 0;
- if (new_rtc.tv_sec <= old_rtc.tv_sec) {
- if (new_rtc.tv_sec < old_rtc.tv_sec)
- pr_debug("%s: time travel!\n", dev_name(&rtc->dev));
+ if (new_rtc.tv_sec < old_rtc.tv_sec) {
+ pr_debug("%s: time travel!\n", dev_name(&rtc->dev));
return 0;
}
sleep_time = timespec_sub(sleep_time,
timespec_sub(new_system, old_system));
- timekeeping_inject_sleeptime(&sleep_time);
+ if (sleep_time.tv_sec >= 0)
+ timekeeping_inject_sleeptime(&sleep_time);
return 0;
}
}
EXPORT_SYMBOL_GPL(rtc_read_alarm);
+static int ___rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
+{
+ int err;
+
+ if (!rtc->ops)
+ err = -ENODEV;
+ else if (!rtc->ops->set_alarm)
+ err = -EINVAL;
+ else
+ err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
+
+ return err;
+}
+
static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
{
struct rtc_time tm;
* over right here, before we set the alarm.
*/
- if (!rtc->ops)
- err = -ENODEV;
- else if (!rtc->ops->set_alarm)
- err = -EINVAL;
- else
- err = rtc->ops->set_alarm(rtc->dev.parent, alarm);
-
- return err;
+ return ___rtc_set_alarm(rtc, alarm);
}
int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
return 0;
}
+static void rtc_alarm_disable(struct rtc_device *rtc)
+{
+ struct rtc_wkalrm alarm;
+ struct rtc_time tm;
+
+ __rtc_read_time(rtc, &tm);
+
+ alarm.time = rtc_ktime_to_tm(ktime_add(rtc_tm_to_ktime(tm),
+ ktime_set(300, 0)));
+ alarm.enabled = 0;
+
+ ___rtc_set_alarm(rtc, &alarm);
+}
+
/**
* rtc_timer_remove - Removes a rtc_timer from the rtc_device timerqueue
* @rtc rtc device
struct rtc_wkalrm alarm;
int err;
next = timerqueue_getnext(&rtc->timerqueue);
- if (!next)
+ if (!next) {
+ rtc_alarm_disable(rtc);
return;
+ }
alarm.time = rtc_ktime_to_tm(next->expires);
alarm.enabled = 1;
err = __rtc_set_alarm(rtc, &alarm);
err = __rtc_set_alarm(rtc, &alarm);
if (err == -ETIME)
goto again;
- }
+ } else
+ rtc_alarm_disable(rtc);
mutex_unlock(&rtc->ops_lock);
}
void __iomem *base = s3c_rtc_base;
int year = tm->tm_year - 100;
- clk_enable(rtc_clk);
pr_debug("set time %04d.%02d.%02d %02d:%02d:%02d\n",
1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
tm->tm_hour, tm->tm_min, tm->tm_sec);
return -EINVAL;
}
+ clk_enable(rtc_clk);
writeb(bin2bcd(tm->tm_sec), base + S3C2410_RTCSEC);
writeb(bin2bcd(tm->tm_min), base + S3C2410_RTCMIN);
writeb(bin2bcd(tm->tm_hour), base + S3C2410_RTCHOUR);
int chsc_chp_vary(struct chp_id chpid, int on)
{
struct channel_path *chp = chpid_to_chp(chpid);
- struct chp_link link;
- memset(&link, 0, sizeof(struct chp_link));
- link.chpid = chpid;
/* Wait until previous actions have settled. */
css_wait_for_slow_path();
/*
/* Try to update the channel path descritor. */
chsc_determine_base_channel_path_desc(chpid, &chp->desc);
for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
- __s390_vary_chpid_on, &link);
+ __s390_vary_chpid_on, &chpid);
} else
for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
- NULL, &link);
+ NULL, &chpid);
return 0;
}
__u8 mda[4]; /* model dependent area */
} __attribute__ ((packed,aligned(4)));
+/*
+ * When rescheduled, todo's with higher values will overwrite those
+ * with lower values.
+ */
enum sch_todo {
SCH_TODO_NOTHING,
+ SCH_TODO_EVAL,
SCH_TODO_UNREG,
};
}
EXPORT_SYMBOL_GPL(css_sch_device_unregister);
-static void css_sch_todo(struct work_struct *work)
-{
- struct subchannel *sch;
- enum sch_todo todo;
-
- sch = container_of(work, struct subchannel, todo_work);
- /* Find out todo. */
- spin_lock_irq(sch->lock);
- todo = sch->todo;
- CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
- sch->schid.sch_no, todo);
- sch->todo = SCH_TODO_NOTHING;
- spin_unlock_irq(sch->lock);
- /* Perform todo. */
- if (todo == SCH_TODO_UNREG)
- css_sch_device_unregister(sch);
- /* Release workqueue ref. */
- put_device(&sch->dev);
-}
-
-/**
- * css_sched_sch_todo - schedule a subchannel operation
- * @sch: subchannel
- * @todo: todo
- *
- * Schedule the operation identified by @todo to be performed on the slow path
- * workqueue. Do nothing if another operation with higher priority is already
- * scheduled. Needs to be called with subchannel lock held.
- */
-void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
-{
- CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
- sch->schid.ssid, sch->schid.sch_no, todo);
- if (sch->todo >= todo)
- return;
- /* Get workqueue ref. */
- if (!get_device(&sch->dev))
- return;
- sch->todo = todo;
- if (!queue_work(cio_work_q, &sch->todo_work)) {
- /* Already queued, release workqueue ref. */
- put_device(&sch->dev);
- }
-}
-
static void ssd_from_pmcw(struct chsc_ssd_info *ssd, struct pmcw *pmcw)
{
int i;
css_schedule_eval(schid);
}
+/**
+ * css_sched_sch_todo - schedule a subchannel operation
+ * @sch: subchannel
+ * @todo: todo
+ *
+ * Schedule the operation identified by @todo to be performed on the slow path
+ * workqueue. Do nothing if another operation with higher priority is already
+ * scheduled. Needs to be called with subchannel lock held.
+ */
+void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
+{
+ CIO_MSG_EVENT(4, "sch_todo: sched sch=0.%x.%04x todo=%d\n",
+ sch->schid.ssid, sch->schid.sch_no, todo);
+ if (sch->todo >= todo)
+ return;
+ /* Get workqueue ref. */
+ if (!get_device(&sch->dev))
+ return;
+ sch->todo = todo;
+ if (!queue_work(cio_work_q, &sch->todo_work)) {
+ /* Already queued, release workqueue ref. */
+ put_device(&sch->dev);
+ }
+}
+
+static void css_sch_todo(struct work_struct *work)
+{
+ struct subchannel *sch;
+ enum sch_todo todo;
+ int ret;
+
+ sch = container_of(work, struct subchannel, todo_work);
+ /* Find out todo. */
+ spin_lock_irq(sch->lock);
+ todo = sch->todo;
+ CIO_MSG_EVENT(4, "sch_todo: sch=0.%x.%04x, todo=%d\n", sch->schid.ssid,
+ sch->schid.sch_no, todo);
+ sch->todo = SCH_TODO_NOTHING;
+ spin_unlock_irq(sch->lock);
+ /* Perform todo. */
+ switch (todo) {
+ case SCH_TODO_NOTHING:
+ break;
+ case SCH_TODO_EVAL:
+ ret = css_evaluate_known_subchannel(sch, 1);
+ if (ret == -EAGAIN) {
+ spin_lock_irq(sch->lock);
+ css_sched_sch_todo(sch, todo);
+ spin_unlock_irq(sch->lock);
+ }
+ break;
+ case SCH_TODO_UNREG:
+ css_sch_device_unregister(sch);
+ break;
+ }
+ /* Release workqueue ref. */
+ put_device(&sch->dev);
+}
+
static struct idset *slow_subchannel_set;
static spinlock_t slow_subchannel_lock;
static wait_queue_head_t css_eval_wq;
*/
cdev->private->flags.resuming = 1;
cdev->private->path_new_mask = LPM_ANYPATH;
- css_schedule_eval(sch->schid);
+ css_sched_sch_todo(sch, SCH_TODO_EVAL);
spin_unlock_irq(sch->lock);
- css_complete_work();
+ css_wait_for_slow_path();
/* cdev may have been moved to a different subchannel. */
sch = to_subchannel(cdev->dev.parent);
cdev->private->pgid_reset_mask = 0;
}
-void
-ccw_device_verify_done(struct ccw_device *cdev, int err)
+static void create_fake_irb(struct irb *irb, int type)
+{
+ memset(irb, 0, sizeof(*irb));
+ if (type == FAKE_CMD_IRB) {
+ struct cmd_scsw *scsw = &irb->scsw.cmd;
+ scsw->cc = 1;
+ scsw->fctl = SCSW_FCTL_START_FUNC;
+ scsw->actl = SCSW_ACTL_START_PEND;
+ scsw->stctl = SCSW_STCTL_STATUS_PEND;
+ } else if (type == FAKE_TM_IRB) {
+ struct tm_scsw *scsw = &irb->scsw.tm;
+ scsw->x = 1;
+ scsw->cc = 1;
+ scsw->fctl = SCSW_FCTL_START_FUNC;
+ scsw->actl = SCSW_ACTL_START_PEND;
+ scsw->stctl = SCSW_STCTL_STATUS_PEND;
+ }
+}
+
+void ccw_device_verify_done(struct ccw_device *cdev, int err)
{
struct subchannel *sch;
ccw_device_done(cdev, DEV_STATE_ONLINE);
/* Deliver fake irb to device driver, if needed. */
if (cdev->private->flags.fake_irb) {
- memset(&cdev->private->irb, 0, sizeof(struct irb));
- cdev->private->irb.scsw.cmd.cc = 1;
- cdev->private->irb.scsw.cmd.fctl = SCSW_FCTL_START_FUNC;
- cdev->private->irb.scsw.cmd.actl = SCSW_ACTL_START_PEND;
- cdev->private->irb.scsw.cmd.stctl =
- SCSW_STCTL_STATUS_PEND;
+ create_fake_irb(&cdev->private->irb,
+ cdev->private->flags.fake_irb);
cdev->private->flags.fake_irb = 0;
if (cdev->handler)
cdev->handler(cdev, cdev->private->intparm,
if (cdev->private->state == DEV_STATE_VERIFY) {
/* Remember to fake irb when finished. */
if (!cdev->private->flags.fake_irb) {
- cdev->private->flags.fake_irb = 1;
+ cdev->private->flags.fake_irb = FAKE_CMD_IRB;
cdev->private->intparm = intparm;
return 0;
} else
ret = cio_set_options (sch, flags);
if (ret)
return ret;
- /* Adjust requested path mask to excluded varied off paths. */
+ /* Adjust requested path mask to exclude unusable paths. */
if (lpm) {
- lpm &= sch->opm;
+ lpm &= sch->lpm;
if (lpm == 0)
return -EACCES;
}
sch = to_subchannel(cdev->dev.parent);
if (!sch->schib.pmcw.ena)
return -EINVAL;
+ if (cdev->private->state == DEV_STATE_VERIFY) {
+ /* Remember to fake irb when finished. */
+ if (!cdev->private->flags.fake_irb) {
+ cdev->private->flags.fake_irb = FAKE_TM_IRB;
+ cdev->private->intparm = intparm;
+ return 0;
+ } else
+ /* There's already a fake I/O around. */
+ return -EBUSY;
+ }
if (cdev->private->state != DEV_STATE_ONLINE)
return -EIO;
- /* Adjust requested path mask to excluded varied off paths. */
+ /* Adjust requested path mask to exclude unusable paths. */
if (lpm) {
- lpm &= sch->opm;
+ lpm &= sch->lpm;
if (lpm == 0)
return -EACCES;
}
CDEV_TODO_UNREG_EVAL,
};
+#define FAKE_CMD_IRB 1
+#define FAKE_TM_IRB 2
+
struct ccw_device_private {
struct ccw_device *cdev;
struct subchannel *sch;
unsigned int doverify:1; /* delayed path verification */
unsigned int donotify:1; /* call notify function */
unsigned int recog_done:1; /* dev. recog. complete */
- unsigned int fake_irb:1; /* deliver faked irb */
+ unsigned int fake_irb:2; /* deliver faked irb */
unsigned int resuming:1; /* recognition while resume */
unsigned int pgroup:1; /* pathgroup is set up */
unsigned int mpath:1; /* multipathing is set up */
rc = ap_init_queue(ap_dev->qid);
if (rc == -ENODEV)
ap_dev->unregistered = 1;
+ else
+ __ap_schedule_poll_timer();
}
static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags)
int ret = 0;
while (len > 0) {
- int err = bbc_i2c_writeb(client, *buf, off);
-
- if (err < 0) {
- ret = err;
+ ret = bbc_i2c_writeb(client, *buf, off);
+ if (ret < 0)
break;
- }
-
len--;
buf++;
off++;
int ret = 0;
while (len > 0) {
- int err = bbc_i2c_readb(client, buf, off);
- if (err < 0) {
- ret = err;
+ ret = bbc_i2c_readb(client, buf, off);
+ if (ret < 0)
break;
- }
len--;
buf++;
off++;
.remove = __devexit_p(bbc_i2c_remove),
};
-static int __init bbc_i2c_init(void)
-{
- return platform_driver_register(&bbc_i2c_driver);
-}
-
-static void __exit bbc_i2c_exit(void)
-{
- platform_driver_unregister(&bbc_i2c_driver);
-}
-
-module_init(bbc_i2c_init);
-module_exit(bbc_i2c_exit);
+module_platform_driver(bbc_i2c_driver);
MODULE_LICENSE("GPL");
.remove = __devexit_p(d7s_remove),
};
-static int __init d7s_init(void)
-{
- return platform_driver_register(&d7s_driver);
-}
-
-static void __exit d7s_exit(void)
-{
- platform_driver_unregister(&d7s_driver);
-}
-
-module_init(d7s_init);
-module_exit(d7s_exit);
+module_platform_driver(d7s_driver);
.remove = __devexit_p(envctrl_remove),
};
-static int __init envctrl_init(void)
-{
- return platform_driver_register(&envctrl_driver);
-}
-
-static void __exit envctrl_exit(void)
-{
- platform_driver_unregister(&envctrl_driver);
-}
+module_platform_driver(envctrl_driver);
-module_init(envctrl_init);
-module_exit(envctrl_exit);
MODULE_LICENSE("GPL");
.remove = __devexit_p(flash_remove),
};
-static int __init flash_init(void)
-{
- return platform_driver_register(&flash_driver);
-}
-
-static void __exit flash_cleanup(void)
-{
- platform_driver_unregister(&flash_driver);
-}
+module_platform_driver(flash_driver);
-module_init(flash_init);
-module_exit(flash_cleanup);
MODULE_LICENSE("GPL");
};
-static int __init uctrl_init(void)
-{
- return platform_driver_register(&uctrl_driver);
-}
-
-static void __exit uctrl_exit(void)
-{
- platform_driver_unregister(&uctrl_driver);
-}
+module_platform_driver(uctrl_driver);
-module_init(uctrl_init);
-module_exit(uctrl_exit);
MODULE_LICENSE("GPL");
depends on FSL_SOC
config SPI_FSL_SPI
- tristate "Freescale SPI controller"
+ bool "Freescale SPI controller"
depends on FSL_SOC
select SPI_FSL_LIB
help
MPC8569 uses the controller in QE mode, MPC8610 in cpu mode.
config SPI_FSL_ESPI
- tristate "Freescale eSPI controller"
+ bool "Freescale eSPI controller"
depends on FSL_SOC
select SPI_FSL_LIB
help
*/
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
spi_bitbang_cleanup(spi);
}
-static int __init spi_gpio_alloc(unsigned pin, const char *label, bool is_in)
+static int __devinit spi_gpio_alloc(unsigned pin, const char *label, bool is_in)
{
int value;
return value;
}
-static int __init
+static int __devinit
spi_gpio_request(struct spi_gpio_platform_data *pdata, const char *label,
u16 *res_flags)
{
*
*/
+#include <linux/module.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
static void __devinit ssb_pcicore_init_clientmode(struct ssb_pcicore *pc)
{
- ssb_pcicore_fix_sprom_core_index(pc);
+ struct ssb_device *pdev = pc->dev;
+ struct ssb_bus *bus = pdev->bus;
+
+ if (bus->bustype == SSB_BUSTYPE_PCI)
+ ssb_pcicore_fix_sprom_core_index(pc);
/* Disable PCI interrupts. */
- ssb_write32(pc->dev, SSB_INTVEC, 0);
+ ssb_write32(pdev, SSB_INTVEC, 0);
/* Additional PCIe always once-executed workarounds */
if (pc->dev->id.coreid == SSB_DEV_PCIE) {
}
insns =
- kmalloc(sizeof(struct comedi_insn) * insnlist.n_insns, GFP_KERNEL);
+ kcalloc(insnlist.n_insns, sizeof(struct comedi_insn), GFP_KERNEL);
if (!insns) {
DPRINTK("kmalloc failed\n");
ret = -ENOMEM;
return ret;
}
-static void comedi_unmap(struct vm_area_struct *area)
+
+static void comedi_vm_open(struct vm_area_struct *area)
+{
+ struct comedi_async *async;
+ struct comedi_device *dev;
+
+ async = area->vm_private_data;
+ dev = async->subdevice->device;
+
+ mutex_lock(&dev->mutex);
+ async->mmap_count++;
+ mutex_unlock(&dev->mutex);
+}
+
+static void comedi_vm_close(struct vm_area_struct *area)
{
struct comedi_async *async;
struct comedi_device *dev;
}
static struct vm_operations_struct comedi_vm_ops = {
- .close = comedi_unmap,
+ .open = comedi_vm_open,
+ .close = comedi_vm_close,
};
static int comedi_mmap(struct file *file, struct vm_area_struct *vma)
{
const unsigned minor = iminor(file->f_dentry->d_inode);
- struct comedi_device_file_info *dev_file_info =
- comedi_get_device_file_info(minor);
- struct comedi_device *dev = dev_file_info->device;
struct comedi_async *async = NULL;
unsigned long start = vma->vm_start;
unsigned long size;
int i;
int retval;
struct comedi_subdevice *s;
+ struct comedi_device_file_info *dev_file_info;
+ struct comedi_device *dev;
+
+ dev_file_info = comedi_get_device_file_info(minor);
+ if (dev_file_info == NULL)
+ return -ENODEV;
+ dev = dev_file_info->device;
+ if (dev == NULL)
+ return -ENODEV;
mutex_lock(&dev->mutex);
if (!dev->attached) {
{
unsigned int mask = 0;
const unsigned minor = iminor(file->f_dentry->d_inode);
- struct comedi_device_file_info *dev_file_info =
- comedi_get_device_file_info(minor);
- struct comedi_device *dev = dev_file_info->device;
struct comedi_subdevice *read_subdev;
struct comedi_subdevice *write_subdev;
+ struct comedi_device_file_info *dev_file_info;
+ struct comedi_device *dev;
+ dev_file_info = comedi_get_device_file_info(minor);
+
+ if (dev_file_info == NULL)
+ return -ENODEV;
+ dev = dev_file_info->device;
+ if (dev == NULL)
+ return -ENODEV;
mutex_lock(&dev->mutex);
if (!dev->attached) {
int n, m, count = 0, retval = 0;
DECLARE_WAITQUEUE(wait, current);
const unsigned minor = iminor(file->f_dentry->d_inode);
- struct comedi_device_file_info *dev_file_info =
- comedi_get_device_file_info(minor);
- struct comedi_device *dev = dev_file_info->device;
+ struct comedi_device_file_info *dev_file_info;
+ struct comedi_device *dev;
+ dev_file_info = comedi_get_device_file_info(minor);
+
+ if (dev_file_info == NULL)
+ return -ENODEV;
+ dev = dev_file_info->device;
+ if (dev == NULL)
+ return -ENODEV;
if (!dev->attached) {
DPRINTK("no driver configured on comedi%i\n", dev->minor);
retval = -EAGAIN;
break;
}
+ schedule();
if (signal_pending(current)) {
retval = -ERESTARTSYS;
break;
}
- schedule();
if (!s->busy)
break;
if (s->busy != file) {
int n, m, count = 0, retval = 0;
DECLARE_WAITQUEUE(wait, current);
const unsigned minor = iminor(file->f_dentry->d_inode);
- struct comedi_device_file_info *dev_file_info =
- comedi_get_device_file_info(minor);
- struct comedi_device *dev = dev_file_info->device;
+ struct comedi_device_file_info *dev_file_info;
+ struct comedi_device *dev;
+ dev_file_info = comedi_get_device_file_info(minor);
+
+ if (dev_file_info == NULL)
+ return -ENODEV;
+ dev = dev_file_info->device;
+ if (dev == NULL)
+ return -ENODEV;
if (!dev->attached) {
DPRINTK("no driver configured on comedi%i\n", dev->minor);
retval = -EAGAIN;
break;
}
+ schedule();
if (signal_pending(current)) {
retval = -ERESTARTSYS;
break;
}
- schedule();
if (!s->busy) {
retval = 0;
break;
static int comedi_close(struct inode *inode, struct file *file)
{
const unsigned minor = iminor(inode);
- struct comedi_device_file_info *dev_file_info =
- comedi_get_device_file_info(minor);
- struct comedi_device *dev = dev_file_info->device;
struct comedi_subdevice *s = NULL;
int i;
+ struct comedi_device_file_info *dev_file_info;
+ struct comedi_device *dev;
+ dev_file_info = comedi_get_device_file_info(minor);
+
+ if (dev_file_info == NULL)
+ return -ENODEV;
+ dev = dev_file_info->device;
+ if (dev == NULL)
+ return -ENODEV;
mutex_lock(&dev->mutex);
static int comedi_fasync(int fd, struct file *file, int on)
{
const unsigned minor = iminor(file->f_dentry->d_inode);
- struct comedi_device_file_info *dev_file_info =
- comedi_get_device_file_info(minor);
+ struct comedi_device_file_info *dev_file_info;
+ struct comedi_device *dev;
+ dev_file_info = comedi_get_device_file_info(minor);
- struct comedi_device *dev = dev_file_info->device;
+ if (dev_file_info == NULL)
+ return -ENODEV;
+ dev = dev_file_info->device;
+ if (dev == NULL)
+ return -ENODEV;
return fasync_helper(fd, file, on, &dev->async_queue);
}
-#define DRIVER_VERSION "v0.5"
+#define DRIVER_VERSION "v0.6"
#define DRIVER_AUTHOR "Bernd Porr, BerndPorr@f2s.com"
#define DRIVER_DESC "Stirling/ITL USB-DUX SIGMA -- Bernd.Porr@f2s.com"
/*
Description: University of Stirling USB DAQ & INCITE Technology Limited
Devices: [ITL] USB-DUX (usbduxsigma.o)
Author: Bernd Porr <BerndPorr@f2s.com>
-Updated: 21 Jul 2011
+Updated: 8 Nov 2011
Status: testing
*/
/*
* 0.3: proper vendor ID and driver name
* 0.4: fixed D/A voltage range
* 0.5: various bug fixes, health check at startup
+ * 0.6: corrected wrong input range
*/
/* generates loads of debug info */
/* comedi constants */
static const struct comedi_lrange range_usbdux_ai_range = { 1, {
BIP_RANGE
- (2.65)
+ (2.65/2.0)
}
};
{USB_DEVICE(0x0DF6, 0x0045)},
{USB_DEVICE(0x0DF6, 0x0059)}, /* 11n mode disable */
{USB_DEVICE(0x0DF6, 0x004B)},
+ {USB_DEVICE(0x0DF6, 0x005D)},
{USB_DEVICE(0x0DF6, 0x0063)},
/* Sweex */
{USB_DEVICE(0x177F, 0x0154)},
th = kthread_create(rtsx_scan_thread, dev, "rtsx-scan");
if (IS_ERR(th)) {
printk(KERN_ERR "Unable to start the device-scanning thread\n");
+ complete(&dev->scanning_done);
quiesce_and_remove_host(dev);
err = PTR_ERR(th);
goto errout;
/* Bridge GPT id (1 - 4), DM Timer id (5 - 8) */
#define DMT_ID(id) ((id) + 4)
+#define DM_TIMER_CLOCKS 4
/* Bridge MCBSP id (6 - 10), OMAP Mcbsp id (0 - 4) */
#define MCBSP_ID(id) ((id) - 6)
*/
void dsp_clk_exit(void)
{
+ int i;
+
dsp_clock_disable_all(dsp_clocks);
+ for (i = 0; i < DM_TIMER_CLOCKS; i++)
+ omap_dm_timer_free(timer[i]);
+
clk_put(iva2_clk);
clk_put(ssi.sst_fck);
clk_put(ssi.ssr_fck);
void dsp_clk_init(void)
{
static struct platform_device dspbridge_device;
+ int i, id;
dspbridge_device.dev.bus = &platform_bus_type;
+ for (i = 0, id = 5; i < DM_TIMER_CLOCKS; i++, id++)
+ timer[i] = omap_dm_timer_request_specific(id);
+
iva2_clk = clk_get(&dspbridge_device.dev, "iva2_ck");
if (IS_ERR(iva2_clk))
dev_err(bridge, "failed to get iva2 clock %p\n", iva2_clk);
clk_enable(iva2_clk);
break;
case GPT_CLK:
- timer[clk_id - 1] =
- omap_dm_timer_request_specific(DMT_ID(clk_id));
+ status = omap_dm_timer_start(timer[clk_id - 1]);
break;
#ifdef CONFIG_OMAP_MCBSP
case MCBSP_CLK:
clk_disable(iva2_clk);
break;
case GPT_CLK:
- omap_dm_timer_free(timer[clk_id - 1]);
+ status = omap_dm_timer_stop(timer[clk_id - 1]);
break;
#ifdef CONFIG_OMAP_MCBSP
case MCBSP_CLK:
#include <linux/types.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
-
-#ifdef MODULE
#include <linux/module.h>
-#endif
-
#include <linux/device.h>
#include <linux/init.h>
#include <linux/moduleparam.h>
{
struct usbip_device *ud = &vdev->ud;
struct urb *urb;
+ unsigned long flags;
spin_lock(&vdev->priv_lock);
urb = pickup_urb_and_free_priv(vdev, pdu->base.seqnum);
usbip_dbg_vhci_rx("now giveback urb %p\n", urb);
- spin_lock(&the_controller->lock);
+ spin_lock_irqsave(&the_controller->lock, flags);
usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
- spin_unlock(&the_controller->lock);
+ spin_unlock_irqrestore(&the_controller->lock, flags);
usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb, urb->status);
{
struct vhci_unlink *unlink;
struct urb *urb;
+ unsigned long flags;
usbip_dump_header(pdu);
urb->status = pdu->u.ret_unlink.status;
pr_info("urb->status %d\n", urb->status);
- spin_lock(&the_controller->lock);
+ spin_lock_irqsave(&the_controller->lock, flags);
usb_hcd_unlink_urb_from_ep(vhci_to_hcd(the_controller), urb);
- spin_unlock(&the_controller->lock);
+ spin_unlock_irqrestore(&the_controller->lock, flags);
usb_hcd_giveback_urb(vhci_to_hcd(the_controller), urb,
urb->status);
hdr = (struct iscsi_reject *) cmd->pdu;
hdr->reason = reason;
- cmd->buf_ptr = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
+ cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
if (!cmd->buf_ptr) {
pr_err("Unable to allocate memory for cmd->buf_ptr\n");
iscsit_release_cmd(cmd);
return -1;
}
- memcpy(cmd->buf_ptr, buf, ISCSI_HDR_LEN);
spin_lock_bh(&conn->cmd_lock);
list_add_tail(&cmd->i_list, &conn->conn_cmd_list);
hdr = (struct iscsi_reject *) cmd->pdu;
hdr->reason = reason;
- cmd->buf_ptr = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
+ cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL);
if (!cmd->buf_ptr) {
pr_err("Unable to allocate memory for cmd->buf_ptr\n");
iscsit_release_cmd(cmd);
return -1;
}
- memcpy(cmd->buf_ptr, buf, ISCSI_HDR_LEN);
if (add_to_conn) {
spin_lock_bh(&conn->cmd_lock);
" non-existent or non-exported iSCSI LUN:"
" 0x%016Lx\n", get_unaligned_le64(&hdr->lun));
}
- if (ret == PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES)
- return iscsit_add_reject_from_cmd(
- ISCSI_REASON_BOOKMARK_NO_RESOURCES,
- 1, 1, buf, cmd);
-
send_check_condition = 1;
goto attach_cmd;
}
*/
send_check_condition = 1;
} else {
+ cmd->data_length = cmd->se_cmd.data_length;
+
if (iscsit_decide_list_to_build(cmd, payload_length) < 0)
return iscsit_add_reject_from_cmd(
ISCSI_REASON_BOOKMARK_NO_RESOURCES,
* the backend memory allocation.
*/
ret = transport_generic_new_cmd(&cmd->se_cmd);
- if ((ret < 0) || (cmd->se_cmd.se_cmd_flags & SCF_SE_CMD_FAILED)) {
+ if (ret < 0) {
immed_ret = IMMEDIATE_DATA_NORMAL_OPERATION;
dump_immediate_data = 1;
goto after_immediate_data;
spin_lock_irqsave(&se_cmd->t_state_lock, flags);
if (!(se_cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) ||
- (se_cmd->se_cmd_flags & SCF_SE_CMD_FAILED))
+ (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION))
dump_unsolicited_data = 1;
spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
if (hdr->flags & ISCSI_FLAG_DATA_STATUS) {
if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
hdr->flags |= ISCSI_FLAG_DATA_OVERFLOW;
- hdr->residual_count = cpu_to_be32(cmd->residual_count);
+ hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
} else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
hdr->flags |= ISCSI_FLAG_DATA_UNDERFLOW;
- hdr->residual_count = cpu_to_be32(cmd->residual_count);
+ hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
}
}
hton24(hdr->dlength, datain.length);
hdr->flags |= ISCSI_FLAG_CMD_FINAL;
if (cmd->se_cmd.se_cmd_flags & SCF_OVERFLOW_BIT) {
hdr->flags |= ISCSI_FLAG_CMD_OVERFLOW;
- hdr->residual_count = cpu_to_be32(cmd->residual_count);
+ hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
} else if (cmd->se_cmd.se_cmd_flags & SCF_UNDERFLOW_BIT) {
hdr->flags |= ISCSI_FLAG_CMD_UNDERFLOW;
- hdr->residual_count = cpu_to_be32(cmd->residual_count);
+ hdr->residual_count = cpu_to_be32(cmd->se_cmd.residual_count);
}
hdr->response = cmd->iscsi_response;
hdr->cmd_status = cmd->se_cmd.scsi_status;
hdr = (struct iscsi_tm_rsp *) cmd->pdu;
memset(hdr, 0, ISCSI_HDR_LEN);
hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
+ hdr->flags = ISCSI_FLAG_CMD_FINAL;
hdr->response = iscsit_convert_tcm_tmr_rsp(se_tmr);
hdr->itt = cpu_to_be32(cmd->init_task_tag);
cmd->stat_sn = conn->stat_sn++;
static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len)
{
- int j = DIV_ROUND_UP(len, 2);
+ int j = DIV_ROUND_UP(len, 2), rc;
- hex2bin(dst, src, j);
+ rc = hex2bin(dst, src, j);
+ if (rc < 0)
+ pr_debug("CHAP string contains non hex digit symbols\n");
dst[j] = '\0';
return j;
u32 pdu_send_order;
/* Current struct iscsi_pdu in struct iscsi_cmd->pdu_list */
u32 pdu_start;
- u32 residual_count;
/* Next struct iscsi_seq to send in struct iscsi_cmd->seq_list */
u32 seq_send_order;
/* Number of struct iscsi_seq in struct iscsi_cmd->seq_list */
atomic_t connection_exit;
atomic_t connection_recovery;
atomic_t connection_reinstatement;
- atomic_t connection_wait;
atomic_t connection_wait_rcfr;
atomic_t sleep_on_conn_wait_comp;
atomic_t transport_failed;
atomic_t session_reinstatement;
atomic_t session_stop_active;
atomic_t sleep_on_sess_wait_comp;
- atomic_t transport_wait_cmds;
/* connection list */
struct list_head sess_conn_list;
struct list_head cr_active_list;
* handle the SCF_SCSI_RESERVATION_CONFLICT case here as well.
*/
if (se_cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION) {
- if (se_cmd->se_cmd_flags &
- SCF_SCSI_RESERVATION_CONFLICT) {
+ if (se_cmd->scsi_sense_reason == TCM_RESERVATION_CONFLICT) {
cmd->i_state = ISTATE_SEND_STATUS;
spin_unlock_bh(&cmd->istate_lock);
iscsit_add_cmd_to_response_queue(cmd, cmd->conn,
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
pr_err("Could not allocate memory for session\n");
- return -1;
+ return -ENOMEM;
}
iscsi_login_set_conn_values(sess, conn, pdu->cid);
pr_err("idr_pre_get() for sess_idr failed\n");
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
- return -1;
+ kfree(sess);
+ return -ENOMEM;
}
spin_lock(&sess_idr_lock);
idr_get_new(&sess_idr, NULL, &sess->session_index);
ISCSI_LOGIN_STATUS_NO_RESOURCES);
pr_err("Unable to allocate memory for"
" struct iscsi_sess_ops.\n");
- return -1;
+ kfree(sess);
+ return -ENOMEM;
}
sess->se_sess = transport_init_session();
- if (!sess->se_sess) {
+ if (IS_ERR(sess->se_sess)) {
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
- return -1;
+ kfree(sess);
+ return -ENOMEM;
}
return 0;
return NULL;
}
- login->req = kzalloc(ISCSI_HDR_LEN, GFP_KERNEL);
+ login->req = kmemdup(login_pdu, ISCSI_HDR_LEN, GFP_KERNEL);
if (!login->req) {
pr_err("Unable to allocate memory for Login Request.\n");
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
ISCSI_LOGIN_STATUS_NO_RESOURCES);
goto out;
}
- memcpy(login->req, login_pdu, ISCSI_HDR_LEN);
login->req_buf = kzalloc(MAX_KEY_VALUE_PAIRS, GFP_KERNEL);
if (!login->req_buf) {
scsi_bufflen(sc), sc->sc_data_direction, sam_task_attr,
&tl_cmd->tl_sense_buf[0]);
- /*
- * Signal BIDI usage with T_TASK(cmd)->t_tasks_bidi
- */
if (scsi_bidi_cmnd(sc))
- se_cmd->t_tasks_bidi = 1;
+ se_cmd->se_cmd_flags |= SCF_BIDI;
+
/*
* Locate the struct se_lun pointer and attach it to struct se_cmd
*/
* Allocate the necessary tasks to complete the received CDB+data
*/
ret = transport_generic_allocate_tasks(se_cmd, sc->cmnd);
- if (ret == -ENOMEM) {
- /* Out of Resources */
- return PYX_TRANSPORT_LU_COMM_FAILURE;
- } else if (ret == -EINVAL) {
- /*
- * Handle case for SAM_STAT_RESERVATION_CONFLICT
- */
- if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
- /*
- * Otherwise, return SAM_STAT_CHECK_CONDITION and return
- * sense data.
- */
- return PYX_TRANSPORT_USE_SENSE_REASON;
- }
-
+ if (ret != 0)
+ return ret;
/*
* For BIDI commands, pass in the extra READ buffer
* to transport_generic_map_mem_to_cmd() below..
*/
- if (se_cmd->t_tasks_bidi) {
+ if (se_cmd->se_cmd_flags & SCF_BIDI) {
struct scsi_data_buffer *sdb = scsi_in(sc);
sgl_bidi = sdb->table.sgl;
}
/* Tell the core about our preallocated memory */
- ret = transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
+ return transport_generic_map_mem_to_cmd(se_cmd, scsi_sglist(sc),
scsi_sg_count(sc), sgl_bidi, sgl_bidi_count);
- if (ret < 0)
- return PYX_TRANSPORT_LU_COMM_FAILURE;
-
- return 0;
}
/*
{
struct tcm_loop_hba *tl_hba = container_of(wwn,
struct tcm_loop_hba, tl_hba_wwn);
- int host_no = tl_hba->sh->host_no;
+
+ pr_debug("TCM_Loop_ConfigFS: Deallocating emulated Target"
+ " SAS Address: %s at Linux/SCSI Host ID: %d\n",
+ tl_hba->tl_wwn_address, tl_hba->sh->host_no);
/*
* Call device_unregister() on the original tl_hba->dev.
* tcm_loop_fabric_scsi.c:tcm_loop_release_adapter() will
* release *tl_hba;
*/
device_unregister(&tl_hba->dev);
-
- pr_debug("TCM_Loop_ConfigFS: Deallocated emulated Target"
- " SAS Address: %s at Linux/SCSI Host ID: %d\n",
- config_item_name(&wwn->wwn_group.cg_item), host_no);
}
/* Start items for tcm_loop_cit */
int alua_access_state, primary = 0, rc;
u16 tg_pt_id, rtpi;
- if (!l_port)
- return PYX_TRANSPORT_LU_COMM_FAILURE;
-
+ if (!l_port) {
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
+ }
buf = transport_kmap_first_data_page(cmd);
/*
l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
if (!l_tg_pt_gp_mem) {
pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
- rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ rc = -EINVAL;
goto out;
}
spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
if (!l_tg_pt_gp) {
spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
- rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ rc = -EINVAL;
goto out;
}
rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA);
if (!rc) {
pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
" while TPGS_EXPLICT_ALUA is disabled\n");
- rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ rc = -EINVAL;
goto out;
}
* REQUEST, and the additional sense code set to INVALID
* FIELD IN PARAMETER LIST.
*/
- rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ rc = -EINVAL;
goto out;
}
rc = -1;
* throw an exception with ASCQ: INVALID_PARAMETER_LIST
*/
if (rc != 0) {
- rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ rc = -EINVAL;
goto out;
}
} else {
* INVALID_PARAMETER_LIST
*/
if (rc != 0) {
- rc = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ rc = -EINVAL;
goto out;
}
}
* struct t10_alua_lu_gp.
*/
spin_lock(&lu_gps_lock);
- atomic_set(&lu_gp->lu_gp_shutdown, 1);
list_del(&lu_gp->lu_gp_node);
alua_lu_gps_count--;
spin_unlock(&lu_gps_lock);
tg_pt_gp_mem->tg_pt = port;
port->sep_alua_tg_pt_gp_mem = tg_pt_gp_mem;
- atomic_set(&port->sep_tg_pt_gp_active, 1);
return tg_pt_gp_mem;
}
if (cmd->data_length < 60)
return 0;
- buf[2] = 0x3c;
+ buf[3] = 0x3c;
/* Set HEADSUP, ORDSUP, SIMPSUP */
buf[5] = 0x07;
if (cmd->data_length < 4) {
pr_err("SCSI Inquiry payload length: %u"
" too small for EVPD=1\n", cmd->data_length);
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
return -EINVAL;
}
}
pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]);
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
ret = -EINVAL;
out_unmap:
default:
pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n",
cdb[2] & 0x3f, cdb[3]);
- return PYX_TRANSPORT_UNKNOWN_MODE_PAGE;
+ cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE;
+ return -EINVAL;
}
offset += length;
if (cdb[1] & 0x01) {
pr_err("REQUEST_SENSE description emulation not"
" supported\n");
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -ENOSYS;
}
buf = transport_kmap_first_data_page(cmd);
if (!dev->transport->do_discard) {
pr_err("UNMAP emulation not supported for: %s\n",
dev->transport->name);
- return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ return -ENOSYS;
}
/* First UNMAP block descriptor starts at 8 byte offset */
if (!dev->transport->do_discard) {
pr_err("WRITE_SAME emulation not supported"
" for: %s\n", dev->transport->name);
- return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ return -ENOSYS;
}
if (cmd->t_task_cdb[0] == WRITE_SAME)
int target_emulate_synchronize_cache(struct se_task *task)
{
struct se_device *dev = task->task_se_cmd->se_dev;
+ struct se_cmd *cmd = task->task_se_cmd;
if (!dev->transport->do_sync_cache) {
pr_err("SYNCHRONIZE_CACHE emulation not supported"
" for: %s\n", dev->transport->name);
- return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ return -ENOSYS;
}
dev->transport->do_sync_cache(task);
static struct config_group alua_group;
static struct config_group alua_lu_gps_group;
-static DEFINE_SPINLOCK(se_device_lock);
-static LIST_HEAD(se_dev_list);
-
static inline struct se_hba *
item_to_hba(struct config_item *item)
{
" struct se_subsystem_dev\n");
goto unlock;
}
- INIT_LIST_HEAD(&se_dev->se_dev_node);
INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
" from allocate_virtdevice()\n");
goto out;
}
- spin_lock(&se_device_lock);
- list_add_tail(&se_dev->se_dev_node, &se_dev_list);
- spin_unlock(&se_device_lock);
config_group_init_type_name(&se_dev->se_dev_group, name,
&target_core_dev_cit);
mutex_lock(&hba->hba_access_mutex);
t = hba->transport;
- spin_lock(&se_device_lock);
- list_del(&se_dev->se_dev_node);
- spin_unlock(&se_device_lock);
-
dev_stat_grp = &se_dev->dev_stat_grps.stat_group;
for (i = 0; dev_stat_grp->default_groups[i]; i++) {
df_item = &dev_stat_grp->default_groups[i]->cg_item;
se_cmd->se_lun = deve->se_lun;
se_cmd->pr_res_key = deve->pr_res_key;
se_cmd->orig_fe_lun = unpacked_lun;
- se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
}
spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
se_lun = &se_sess->se_tpg->tpg_virt_lun0;
se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
se_cmd->orig_fe_lun = 0;
- se_cmd->se_orig_obj_ptr = se_cmd->se_lun->lun_se_dev;
se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
}
/*
se_lun = deve->se_lun;
se_cmd->pr_res_key = deve->pr_res_key;
se_cmd->orig_fe_lun = unpacked_lun;
- se_cmd->se_orig_obj_ptr = se_cmd->se_dev;
}
spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
se_task->task_scsi_status = GOOD;
transport_complete_task(se_task, 1);
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
}
/* se_release_device_for_hba():
return -EINVAL;
}
- pr_err("dpo_emulated not supported\n");
- return -EINVAL;
+ if (flag) {
+ pr_err("dpo_emulated not supported\n");
+ return -EINVAL;
+ }
+
+ return 0;
}
int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
return -EINVAL;
}
- if (dev->transport->fua_write_emulated == 0) {
+ if (flag && dev->transport->fua_write_emulated == 0) {
pr_err("fua_write_emulated not supported\n");
return -EINVAL;
}
return -EINVAL;
}
- pr_err("ua read emulated not supported\n");
- return -EINVAL;
+ if (flag) {
+ pr_err("ua read emulated not supported\n");
+ return -EINVAL;
+ }
+
+ return 0;
}
int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
pr_err("Illegal value %d\n", flag);
return -EINVAL;
}
- if (dev->transport->write_cache_emulated == 0) {
+ if (flag && dev->transport->write_cache_emulated == 0) {
pr_err("write_cache_emulated not supported\n");
return -EINVAL;
}
* We expect this value to be non-zero when generic Block Layer
* Discard supported is detected iblock_create_virtdevice().
*/
- if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
+ if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
pr_err("Generic Block Discard not supported\n");
return -ENOSYS;
}
* We expect this value to be non-zero when generic Block Layer
* Discard supported is detected iblock_create_virtdevice().
*/
- if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
+ if (flag && !dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
pr_err("Generic Block Discard not supported\n");
return -ENOSYS;
}
ret = -ENOMEM;
goto out;
}
- INIT_LIST_HEAD(&se_dev->se_dev_node);
INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
INIT_LIST_HEAD(&se_dev->t10_pr.registration_list);
return -ENOMEM;
}
- for (i = 0; i < task->task_sg_nents; i++) {
- iov[i].iov_len = sg[i].length;
- iov[i].iov_base = sg_virt(&sg[i]);
+ for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
+ iov[i].iov_len = sg->length;
+ iov[i].iov_base = sg_virt(sg);
}
old_fs = get_fs();
return -ENOMEM;
}
- for (i = 0; i < task->task_sg_nents; i++) {
- iov[i].iov_len = sg[i].length;
- iov[i].iov_base = sg_virt(&sg[i]);
+ for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
+ iov[i].iov_len = sg->length;
+ iov[i].iov_base = sg_virt(sg);
}
old_fs = get_fs();
if (ret > 0 &&
dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 &&
dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
- cmd->t_tasks_fua) {
+ (cmd->se_cmd_flags & SCF_FUA)) {
/*
* We might need to be a bit smarter here
* and return some sense data to let the initiator
}
- if (ret < 0)
+ if (ret < 0) {
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return ret;
+ }
if (ret) {
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
}
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
}
/* fd_free_task(): (Part of se_subsystem_api_t template)
*/
if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 ||
(dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
- task->task_se_cmd->t_tasks_fua))
+ (cmd->se_cmd_flags & SCF_FUA)))
rw = WRITE_FUA;
else
rw = WRITE;
else {
pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
" %u\n", dev->se_sub_dev->se_dev_attrib.block_size);
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -ENOSYS;
}
bio = iblock_get_bio(task, block_lba, sg_num);
- if (!bio)
- return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+ if (!bio) {
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -ENOMEM;
+ }
bio_list_init(&list);
bio_list_add(&list, bio);
submit_bio(rw, bio);
blk_finish_plug(&plug);
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
fail:
while ((bio = bio_list_pop(&list)))
bio_put(bio);
- return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -ENOMEM;
}
static u32 iblock_get_device_rev(struct se_device *dev)
pr_err("Received legacy SPC-2 RESERVE/RELEASE"
" while active SPC-3 registrations exist,"
" returning RESERVATION_CONFLICT\n");
- *ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
return true;
}
(cmd->t_task_cdb[1] & 0x02)) {
pr_err("LongIO and Obselete Bits set, returning"
" ILLEGAL_REQUEST\n");
- ret = PYX_TRANSPORT_ILLEGAL_REQUEST;
+ cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+ ret = -EINVAL;
goto out;
}
/*
" from %s \n", cmd->se_lun->unpacked_lun,
cmd->se_deve->mapped_lun,
sess->se_node_acl->initiatorname);
- ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ ret = -EINVAL;
goto out_unlock;
}
tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL);
if (!tidh_new) {
pr_err("Unable to allocate tidh_new\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
INIT_LIST_HEAD(&tidh_new->dest_list);
tidh_new->dest_tpg = tpg;
sa_res_key, all_tg_pt, aptpl);
if (!local_pr_reg) {
kfree(tidh_new);
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -ENOMEM;
}
tidh_new->dest_pr_reg = local_pr_reg;
/*
pr_err("SPC-3 PR: Illegal tpdl: %u + 28 byte header"
" does not equal CDB data_length: %u\n", tpdl,
cmd->data_length);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
/*
" for tmp_tpg\n");
atomic_dec(&tmp_tpg->tpg_pr_ref_count);
smp_mb__after_atomic_dec();
- ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = -EINVAL;
goto out;
}
/*
atomic_dec(&dest_node_acl->acl_pr_ref_count);
smp_mb__after_atomic_dec();
core_scsi3_tpg_undepend_item(tmp_tpg);
- ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = -EINVAL;
goto out;
}
if (!dest_tpg) {
pr_err("SPC-3 PR SPEC_I_PT: Unable to locate"
" dest_tpg\n");
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
#if 0
" %u for Transport ID: %s\n", tid_len, ptr);
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
/*
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
smp_mb__after_atomic_dec();
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
- ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = -EINVAL;
goto out;
}
#if 0
core_scsi3_lunacl_undepend_item(dest_se_deve);
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
- ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = -ENOMEM;
goto out;
}
INIT_LIST_HEAD(&tidh_new->dest_list);
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
kfree(tidh_new);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
tidh_new->dest_pr_reg = dest_pr_reg;
if (!se_sess || !se_lun) {
pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
se_tpg = se_sess->se_tpg;
se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
if (res_key) {
pr_warn("SPC-3 PR: Reservation Key non-zero"
" for SA REGISTER, returning CONFLICT\n");
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* Do nothing but return GOOD status.
*/
if (!sa_res_key)
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
if (!spec_i_pt) {
/*
if (ret != 0) {
pr_err("Unable to allocate"
" struct t10_pr_registration\n");
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ return -EINVAL;
}
} else {
/*
" 0x%016Lx\n", res_key,
pr_reg->pr_res_key);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
}
if (spec_i_pt) {
pr_err("SPC-3 PR UNREGISTER: SPEC_I_PT"
" set while sa_res_key=0\n");
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ return -EINVAL;
}
/*
* An existing ALL_TG_PT=1 registration being released
" registration exists, but ALL_TG_PT=1 bit not"
" present in received PROUT\n");
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
}
/*
* Allocate APTPL metadata buffer used for UNREGISTER ops
pr_err("Unable to allocate"
" pr_aptpl_buf\n");
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
}
/*
if (pr_holder < 0) {
kfree(pr_aptpl_buf);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
spin_lock(&pr_tmpl->registration_lock);
if (!se_sess || !se_lun) {
pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
se_tpg = se_sess->se_tpg;
se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
if (!pr_reg) {
pr_err("SPC-3 PR: Unable to locate"
" PR_REGISTERED *pr_reg for RESERVE\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
/*
* From spc4r17 Section 5.7.9: Reserving:
" does not match existing SA REGISTER res_key:"
" 0x%016Lx\n", res_key, pr_reg->pr_res_key);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* From spc4r17 Section 5.7.9: Reserving:
if (scope != PR_SCOPE_LU_SCOPE) {
pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ return -EINVAL;
}
/*
* See if we have an existing PR reservation holder pointer at
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* From spc4r17 Section 5.7.9: Reserving:
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* From spc4r17 Section 5.7.9: Reserving:
*/
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
}
/*
* Otherwise, our *pr_reg becomes the PR reservation holder for said
default:
pr_err("SPC-3 PR: Unknown Service Action RESERVE Type:"
" 0x%02x\n", type);
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
}
return ret;
if (!se_sess || !se_lun) {
pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
/*
* Locate the existing *pr_reg via struct se_node_acl pointers
if (!pr_reg) {
pr_err("SPC-3 PR: Unable to locate"
" PR_REGISTERED *pr_reg for RELEASE\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
/*
* From spc4r17 Section 5.7.11.2 Releasing:
*/
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
}
if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
(pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG))
*/
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
}
/*
* From spc4r17 Section 5.7.11.2 Releasing:
" 0x%016Lx\n", res_key, pr_reg->pr_res_key);
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* From spc4r17 Section 5.7.11.2 Releasing and above:
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* In response to a persistent reservation release request from the
if (!pr_reg_n) {
pr_err("SPC-3 PR: Unable to locate"
" PR_REGISTERED *pr_reg for CLEAR\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
/*
* From spc4r17 section 5.7.11.6, Clearing:
" existing SA REGISTER res_key:"
" 0x%016Lx\n", res_key, pr_reg_n->pr_res_key);
core_scsi3_put_pr_reg(pr_reg_n);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* a) Release the persistent reservation, if any;
int all_reg = 0, calling_it_nexus = 0, released_regs = 0;
int prh_type = 0, prh_scope = 0, ret;
- if (!se_sess)
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ if (!se_sess) {
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
+ }
se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
pr_err("SPC-3 PR: Unable to locate"
" PR_REGISTERED *pr_reg for PREEMPT%s\n",
(abort) ? "_AND_ABORT" : "");
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
if (pr_reg_n->pr_res_key != res_key) {
core_scsi3_put_pr_reg(pr_reg_n);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
if (scope != PR_SCOPE_LU_SCOPE) {
pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
core_scsi3_put_pr_reg(pr_reg_n);
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ return -EINVAL;
}
INIT_LIST_HEAD(&preempt_and_abort_list);
if (!all_reg && !sa_res_key) {
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg_n);
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ return -EINVAL;
}
/*
* From spc4r17, section 5.7.11.4.4 Removing Registrations:
if (!released_regs) {
spin_unlock(&dev->dev_reservation_lock);
core_scsi3_put_pr_reg(pr_reg_n);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* For an existing all registrants type reservation
default:
pr_err("SPC-3 PR: Unknown Service Action PREEMPT%s"
" Type: 0x%02x\n", (abort) ? "_AND_ABORT" : "", type);
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
}
return ret;
if (!se_sess || !se_lun) {
pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
memset(dest_iport, 0, 64);
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
if (!pr_reg) {
pr_err("SPC-3 PR: Unable to locate PR_REGISTERED"
" *pr_reg for REGISTER_AND_MOVE\n");
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
/*
* The provided reservation key much match the existing reservation key
" res_key: 0x%016Lx does not match existing SA REGISTER"
" res_key: 0x%016Lx\n", res_key, pr_reg->pr_res_key);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
/*
* The service active reservation key needs to be non zero
pr_warn("SPC-3 PR REGISTER_AND_MOVE: Received zero"
" sa_res_key\n");
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ return -EINVAL;
}
/*
" does not equal CDB data_length: %u\n", tid_len,
cmd->data_length);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ return -EINVAL;
}
spin_lock(&dev->se_port_lock);
atomic_dec(&dest_se_tpg->tpg_pr_ref_count);
smp_mb__after_atomic_dec();
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
}
spin_lock(&dev->se_port_lock);
" fabric ops from Relative Target Port Identifier:"
" %hu\n", rtpi);
core_scsi3_put_pr_reg(pr_reg);
- return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ return -EINVAL;
}
buf = transport_kmap_first_data_page(cmd);
" from fabric: %s\n", proto_ident,
dest_tf_ops->get_fabric_proto_ident(dest_se_tpg),
dest_tf_ops->get_fabric_name());
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
if (dest_tf_ops->tpg_parse_pr_out_transport_id == NULL) {
pr_err("SPC-3 PR REGISTER_AND_MOVE: Fabric does not"
" containg a valid tpg_parse_pr_out_transport_id"
" function pointer\n");
- ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = -EINVAL;
goto out;
}
initiator_str = dest_tf_ops->tpg_parse_pr_out_transport_id(dest_se_tpg,
if (!initiator_str) {
pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
" initiator_str from Transport ID\n");
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
pr_err("SPC-3 PR REGISTER_AND_MOVE: TransportID: %s"
" matches: %s on received I_T Nexus\n", initiator_str,
pr_reg_nacl->initiatorname);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
if (!strcmp(iport_ptr, pr_reg->pr_reg_isid)) {
" matches: %s %s on received I_T Nexus\n",
initiator_str, iport_ptr, pr_reg_nacl->initiatorname,
pr_reg->pr_reg_isid);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
after_iport_check:
pr_err("Unable to locate %s dest_node_acl for"
" TransportID%s\n", dest_tf_ops->get_fabric_name(),
initiator_str);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
ret = core_scsi3_nodeacl_depend_item(dest_node_acl);
atomic_dec(&dest_node_acl->acl_pr_ref_count);
smp_mb__after_atomic_dec();
dest_node_acl = NULL;
- ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
#if 0
if (!dest_se_deve) {
pr_err("Unable to locate %s dest_se_deve from RTPI:"
" %hu\n", dest_tf_ops->get_fabric_name(), rtpi);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
atomic_dec(&dest_se_deve->pr_ref_count);
smp_mb__after_atomic_dec();
dest_se_deve = NULL;
- ret = PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = -EINVAL;
goto out;
}
#if 0
pr_warn("SPC-3 PR REGISTER_AND_MOVE: No reservation"
" currently held\n");
spin_unlock(&dev->dev_reservation_lock);
- ret = PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ ret = -EINVAL;
goto out;
}
/*
pr_warn("SPC-3 PR REGISTER_AND_MOVE: Calling I_T"
" Nexus is not reservation holder\n");
spin_unlock(&dev->dev_reservation_lock);
- ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ ret = -EINVAL;
goto out;
}
/*
" reservation for type: %s\n",
core_scsi3_pr_dump_type(pr_res_holder->pr_res_type));
spin_unlock(&dev->dev_reservation_lock);
- ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ ret = -EINVAL;
goto out;
}
pr_res_nacl = pr_res_holder->pr_reg_nacl;
sa_res_key, 0, aptpl, 2, 1);
if (ret != 0) {
spin_unlock(&dev->dev_reservation_lock);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
pr_err("Received PERSISTENT_RESERVE CDB while legacy"
" SPC-2 reservation is held, returning"
" RESERVATION_CONFLICT\n");
- ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ ret = EINVAL;
goto out;
}
* FIXME: A NULL struct se_session pointer means an this is not coming from
* a $FABRIC_MOD's nexus, but from internal passthrough ops.
*/
- if (!cmd->se_sess)
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ if (!cmd->se_sess) {
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -EINVAL;
+ }
if (cmd->data_length < 24) {
pr_warn("SPC-PR: Received PR OUT parameter list"
" length too small: %u\n", cmd->data_length);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
/*
* SPEC_I_PT=1 is only valid for Service action: REGISTER
*/
if (spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER)) {
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
(cmd->data_length != 24)) {
pr_warn("SPC-PR: Received PR OUT illegal parameter"
" list length: %u\n", cmd->data_length);
- ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
+ cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
+ ret = -EINVAL;
goto out;
}
/*
default:
pr_err("Unknown PERSISTENT_RESERVE_OUT service"
" action: 0x%02x\n", cdb[1] & 0x1f);
- ret = PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ ret = -EINVAL;
break;
}
if (cmd->data_length < 8) {
pr_err("PRIN SA READ_KEYS SCSI Data Length: %u"
" too small\n", cmd->data_length);
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
}
buf = transport_kmap_first_data_page(cmd);
if (cmd->data_length < 8) {
pr_err("PRIN SA READ_RESERVATIONS SCSI Data Length: %u"
" too small\n", cmd->data_length);
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
}
buf = transport_kmap_first_data_page(cmd);
if (cmd->data_length < 6) {
pr_err("PRIN SA REPORT_CAPABILITIES SCSI Data Length:"
" %u too small\n", cmd->data_length);
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
}
buf = transport_kmap_first_data_page(cmd);
if (cmd->data_length < 8) {
pr_err("PRIN SA READ_FULL_STATUS SCSI Data Length: %u"
" too small\n", cmd->data_length);
- return PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
}
buf = transport_kmap_first_data_page(cmd);
pr_err("Received PERSISTENT_RESERVE CDB while legacy"
" SPC-2 reservation is held, returning"
" RESERVATION_CONFLICT\n");
- return PYX_TRANSPORT_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EINVAL;
}
switch (cmd->t_task_cdb[1] & 0x1f) {
default:
pr_err("Unknown PERSISTENT_RESERVE_IN service"
" action: 0x%02x\n", cmd->t_task_cdb[1] & 0x1f);
- ret = PYX_TRANSPORT_INVALID_CDB_FIELD;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ ret = -EINVAL;
break;
}
static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg,
struct bio **hbio)
{
+ struct se_cmd *cmd = task->task_se_cmd;
struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;
u32 task_sg_num = task->task_sg_nents;
struct bio *bio = NULL, *tbio = NULL;
u32 data_len = task->task_size, i, len, bytes, off;
int nr_pages = (task->task_size + task_sg[0].offset +
PAGE_SIZE - 1) >> PAGE_SHIFT;
- int nr_vecs = 0, rc, ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+ int nr_vecs = 0, rc;
int rw = (task->task_data_direction == DMA_TO_DEVICE);
*hbio = NULL;
bio->bi_next = NULL;
bio_endio(bio, 0); /* XXX: should be error */
}
- return ret;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -ENOMEM;
}
static int pscsi_do_task(struct se_task *task)
{
+ struct se_cmd *cmd = task->task_se_cmd;
struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;
struct pscsi_plugin_task *pt = PSCSI_TASK(task);
struct request *req;
if (!req || IS_ERR(req)) {
pr_err("PSCSI: blk_get_request() failed: %ld\n",
req ? IS_ERR(req) : -ENOMEM);
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -ENODEV;
}
} else {
BUG_ON(!task->task_size);
* Setup the main struct request for the task->task_sg[] payload
*/
ret = pscsi_map_sg(task, task->task_sg, &hbio);
- if (ret < 0)
- return PYX_TRANSPORT_LU_COMM_FAILURE;
+ if (ret < 0) {
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return ret;
+ }
req = blk_make_request(pdv->pdv_sd->request_queue, hbio,
GFP_KERNEL);
(task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG),
pscsi_req_done);
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
fail:
while (hbio) {
bio->bi_next = NULL;
bio_endio(bio, 0); /* XXX: should be error */
}
- return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return -ENOMEM;
}
/* pscsi_get_sense_buffer():
" 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
pt->pscsi_result);
task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
- task->task_error_status = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
- task->task_se_cmd->transport_error_status =
- PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ task->task_se_cmd->scsi_sense_reason =
+ TCM_UNSUPPORTED_SCSI_OPCODE;
transport_complete_task(task, 0);
break;
}
return NULL;
}
-/* rd_MEMCPY_read():
- *
- *
- */
-static int rd_MEMCPY_read(struct rd_request *req)
+static int rd_MEMCPY(struct rd_request *req, u32 read_rd)
{
struct se_task *task = &req->rd_task;
struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr;
struct rd_dev_sg_table *table;
- struct scatterlist *sg_d, *sg_s;
- void *dst, *src;
- u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
- u32 length, page_end = 0, table_sg_end;
+ struct scatterlist *rd_sg;
+ struct sg_mapping_iter m;
u32 rd_offset = req->rd_offset;
+ u32 src_len;
table = rd_get_sg_table(dev, req->rd_page);
if (!table)
return -EINVAL;
- table_sg_end = (table->page_end_offset - req->rd_page);
- sg_d = task->task_sg;
- sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
+ rd_sg = &table->sg_table[req->rd_page - table->page_start_offset];
- pr_debug("RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:"
- " %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
- req->rd_page, req->rd_offset);
-
- src_offset = rd_offset;
+ pr_debug("RD[%u]: %s LBA: %llu, Size: %u Page: %u, Offset: %u\n",
+ dev->rd_dev_id, read_rd ? "Read" : "Write",
+ task->task_lba, req->rd_size, req->rd_page,
+ rd_offset);
+ src_len = PAGE_SIZE - rd_offset;
+ sg_miter_start(&m, task->task_sg, task->task_sg_nents,
+ read_rd ? SG_MITER_TO_SG : SG_MITER_FROM_SG);
while (req->rd_size) {
- if ((sg_d[i].length - dst_offset) <
- (sg_s[j].length - src_offset)) {
- length = (sg_d[i].length - dst_offset);
-
- pr_debug("Step 1 - sg_d[%d]: %p length: %d"
- " offset: %u sg_s[%d].length: %u\n", i,
- &sg_d[i], sg_d[i].length, sg_d[i].offset, j,
- sg_s[j].length);
- pr_debug("Step 1 - length: %u dst_offset: %u"
- " src_offset: %u\n", length, dst_offset,
- src_offset);
-
- if (length > req->rd_size)
- length = req->rd_size;
-
- dst = sg_virt(&sg_d[i++]) + dst_offset;
- BUG_ON(!dst);
-
- src = sg_virt(&sg_s[j]) + src_offset;
- BUG_ON(!src);
-
- dst_offset = 0;
- src_offset = length;
- page_end = 0;
- } else {
- length = (sg_s[j].length - src_offset);
-
- pr_debug("Step 2 - sg_d[%d]: %p length: %d"
- " offset: %u sg_s[%d].length: %u\n", i,
- &sg_d[i], sg_d[i].length, sg_d[i].offset,
- j, sg_s[j].length);
- pr_debug("Step 2 - length: %u dst_offset: %u"
- " src_offset: %u\n", length, dst_offset,
- src_offset);
-
- if (length > req->rd_size)
- length = req->rd_size;
-
- dst = sg_virt(&sg_d[i]) + dst_offset;
- BUG_ON(!dst);
-
- if (sg_d[i].length == length) {
- i++;
- dst_offset = 0;
- } else
- dst_offset = length;
-
- src = sg_virt(&sg_s[j++]) + src_offset;
- BUG_ON(!src);
-
- src_offset = 0;
- page_end = 1;
- }
+ u32 len;
+ void *rd_addr;
- memcpy(dst, src, length);
+ sg_miter_next(&m);
+ len = min((u32)m.length, src_len);
+ m.consumed = len;
- pr_debug("page: %u, remaining size: %u, length: %u,"
- " i: %u, j: %u\n", req->rd_page,
- (req->rd_size - length), length, i, j);
+ rd_addr = sg_virt(rd_sg) + rd_offset;
- req->rd_size -= length;
- if (!req->rd_size)
- return 0;
+ if (read_rd)
+ memcpy(m.addr, rd_addr, len);
+ else
+ memcpy(rd_addr, m.addr, len);
- if (!page_end)
+ req->rd_size -= len;
+ if (!req->rd_size)
continue;
- if (++req->rd_page <= table->page_end_offset) {
- pr_debug("page: %u in same page table\n",
- req->rd_page);
+ src_len -= len;
+ if (src_len) {
+ rd_offset += len;
continue;
}
- pr_debug("getting new page table for page: %u\n",
- req->rd_page);
-
- table = rd_get_sg_table(dev, req->rd_page);
- if (!table)
- return -EINVAL;
-
- sg_s = &table->sg_table[j = 0];
- }
-
- return 0;
-}
-
-/* rd_MEMCPY_write():
- *
- *
- */
-static int rd_MEMCPY_write(struct rd_request *req)
-{
- struct se_task *task = &req->rd_task;
- struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr;
- struct rd_dev_sg_table *table;
- struct scatterlist *sg_d, *sg_s;
- void *dst, *src;
- u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
- u32 length, page_end = 0, table_sg_end;
- u32 rd_offset = req->rd_offset;
-
- table = rd_get_sg_table(dev, req->rd_page);
- if (!table)
- return -EINVAL;
-
- table_sg_end = (table->page_end_offset - req->rd_page);
- sg_d = &table->sg_table[req->rd_page - table->page_start_offset];
- sg_s = task->task_sg;
-
- pr_debug("RD[%d] Write LBA: %llu, Size: %u, Page: %u,"
- " Offset: %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
- req->rd_page, req->rd_offset);
-
- dst_offset = rd_offset;
-
- while (req->rd_size) {
- if ((sg_s[i].length - src_offset) <
- (sg_d[j].length - dst_offset)) {
- length = (sg_s[i].length - src_offset);
-
- pr_debug("Step 1 - sg_s[%d]: %p length: %d"
- " offset: %d sg_d[%d].length: %u\n", i,
- &sg_s[i], sg_s[i].length, sg_s[i].offset,
- j, sg_d[j].length);
- pr_debug("Step 1 - length: %u src_offset: %u"
- " dst_offset: %u\n", length, src_offset,
- dst_offset);
-
- if (length > req->rd_size)
- length = req->rd_size;
-
- src = sg_virt(&sg_s[i++]) + src_offset;
- BUG_ON(!src);
-
- dst = sg_virt(&sg_d[j]) + dst_offset;
- BUG_ON(!dst);
-
- src_offset = 0;
- dst_offset = length;
- page_end = 0;
- } else {
- length = (sg_d[j].length - dst_offset);
-
- pr_debug("Step 2 - sg_s[%d]: %p length: %d"
- " offset: %d sg_d[%d].length: %u\n", i,
- &sg_s[i], sg_s[i].length, sg_s[i].offset,
- j, sg_d[j].length);
- pr_debug("Step 2 - length: %u src_offset: %u"
- " dst_offset: %u\n", length, src_offset,
- dst_offset);
-
- if (length > req->rd_size)
- length = req->rd_size;
-
- src = sg_virt(&sg_s[i]) + src_offset;
- BUG_ON(!src);
-
- if (sg_s[i].length == length) {
- i++;
- src_offset = 0;
- } else
- src_offset = length;
-
- dst = sg_virt(&sg_d[j++]) + dst_offset;
- BUG_ON(!dst);
-
- dst_offset = 0;
- page_end = 1;
- }
-
- memcpy(dst, src, length);
-
- pr_debug("page: %u, remaining size: %u, length: %u,"
- " i: %u, j: %u\n", req->rd_page,
- (req->rd_size - length), length, i, j);
-
- req->rd_size -= length;
- if (!req->rd_size)
- return 0;
-
- if (!page_end)
- continue;
-
- if (++req->rd_page <= table->page_end_offset) {
- pr_debug("page: %u in same page table\n",
- req->rd_page);
+ /* rd page completed, next one please */
+ req->rd_page++;
+ rd_offset = 0;
+ src_len = PAGE_SIZE;
+ if (req->rd_page <= table->page_end_offset) {
+ rd_sg++;
continue;
}
- pr_debug("getting new page table for page: %u\n",
- req->rd_page);
-
table = rd_get_sg_table(dev, req->rd_page);
- if (!table)
+ if (!table) {
+ sg_miter_stop(&m);
return -EINVAL;
+ }
- sg_d = &table->sg_table[j = 0];
+ /* since we increment, the first sg entry is correct */
+ rd_sg = table->sg_table;
}
-
+ sg_miter_stop(&m);
return 0;
}
{
struct se_device *dev = task->task_se_cmd->se_dev;
struct rd_request *req = RD_REQ(task);
- unsigned long long lba;
+ u64 tmp;
int ret;
- req->rd_page = (task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size) / PAGE_SIZE;
- lba = task->task_lba;
- req->rd_offset = (do_div(lba,
- (PAGE_SIZE / dev->se_sub_dev->se_dev_attrib.block_size))) *
- dev->se_sub_dev->se_dev_attrib.block_size;
+ tmp = task->task_lba * dev->se_sub_dev->se_dev_attrib.block_size;
+ req->rd_offset = do_div(tmp, PAGE_SIZE);
+ req->rd_page = tmp;
req->rd_size = task->task_size;
- if (task->task_data_direction == DMA_FROM_DEVICE)
- ret = rd_MEMCPY_read(req);
- else
- ret = rd_MEMCPY_write(req);
-
+ ret = rd_MEMCPY(req, task->task_data_direction == DMA_FROM_DEVICE);
if (ret != 0)
return ret;
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
-
- return PYX_TRANSPORT_SENT_TO_TRANSPORT;
+ return 0;
}
/* rd_free_task(): (Part of se_subsystem_api_t template)
" %d t_fe_count: %d\n", (preempt_and_abort_list) ?
"Preempt" : "", cmd, cmd->t_state,
atomic_read(&cmd->t_fe_count));
- /*
- * Signal that the command has failed via cmd->se_cmd_flags,
- */
- transport_new_cmd_failure(cmd);
core_tmr_handle_tas_abort(tmr_nacl, cmd, tas,
atomic_read(&cmd->t_fe_count));
static int sub_api_initialized;
static struct workqueue_struct *target_completion_wq;
-static struct kmem_cache *se_cmd_cache;
static struct kmem_cache *se_sess_cache;
struct kmem_cache *se_tmr_req_cache;
struct kmem_cache *se_ua_cache;
static void transport_put_cmd(struct se_cmd *cmd);
static void transport_remove_cmd_from_queue(struct se_cmd *cmd);
static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq);
-static void transport_generic_request_failure(struct se_cmd *, int, int);
+static void transport_generic_request_failure(struct se_cmd *);
static void target_complete_ok_work(struct work_struct *work);
int init_se_kmem_caches(void)
{
- se_cmd_cache = kmem_cache_create("se_cmd_cache",
- sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL);
- if (!se_cmd_cache) {
- pr_err("kmem_cache_create for struct se_cmd failed\n");
- goto out;
- }
se_tmr_req_cache = kmem_cache_create("se_tmr_cache",
sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req),
0, NULL);
if (!se_tmr_req_cache) {
pr_err("kmem_cache_create() for struct se_tmr_req"
" failed\n");
- goto out_free_cmd_cache;
+ goto out;
}
se_sess_cache = kmem_cache_create("se_sess_cache",
sizeof(struct se_session), __alignof__(struct se_session),
kmem_cache_destroy(se_sess_cache);
out_free_tmr_req_cache:
kmem_cache_destroy(se_tmr_req_cache);
-out_free_cmd_cache:
- kmem_cache_destroy(se_cmd_cache);
out:
return -ENOMEM;
}
void release_se_kmem_caches(void)
{
destroy_workqueue(target_completion_wq);
- kmem_cache_destroy(se_cmd_cache);
kmem_cache_destroy(se_tmr_req_cache);
kmem_cache_destroy(se_sess_cache);
kmem_cache_destroy(se_ua_cache);
task->task_scsi_status = GOOD;
} else {
task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
- task->task_error_status = PYX_TRANSPORT_ILLEGAL_REQUEST;
- task->task_se_cmd->transport_error_status =
- PYX_TRANSPORT_ILLEGAL_REQUEST;
+ task->task_se_cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+
}
transport_complete_task(task, good);
{
struct se_cmd *cmd = container_of(work, struct se_cmd, work);
- transport_generic_request_failure(cmd, 1, 1);
+ transport_generic_request_failure(cmd);
}
/* transport_complete_task():
if (cmd->t_tasks_failed) {
if (!task->task_error_status) {
task->task_error_status =
- PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
- cmd->transport_error_status =
- PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
+
INIT_WORK(&cmd->work, target_complete_failure_work);
} else {
atomic_set(&cmd->t_transport_complete, 1);
dev->se_hba = hba;
dev->se_sub_dev = se_dev;
dev->transport = transport;
- atomic_set(&dev->active_cmds, 0);
INIT_LIST_HEAD(&dev->dev_list);
INIT_LIST_HEAD(&dev->dev_sep_list);
INIT_LIST_HEAD(&dev->dev_tmr_list);
INIT_LIST_HEAD(&dev->execute_task_list);
INIT_LIST_HEAD(&dev->delayed_cmd_list);
- INIT_LIST_HEAD(&dev->ordered_cmd_list);
INIT_LIST_HEAD(&dev->state_task_list);
INIT_LIST_HEAD(&dev->qf_cmd_list);
spin_lock_init(&dev->execute_task_lock);
spin_lock_init(&dev->delayed_cmd_lock);
- spin_lock_init(&dev->ordered_cmd_lock);
- spin_lock_init(&dev->state_task_lock);
- spin_lock_init(&dev->dev_alua_lock);
spin_lock_init(&dev->dev_reservation_lock);
spin_lock_init(&dev->dev_status_lock);
- spin_lock_init(&dev->dev_status_thr_lock);
spin_lock_init(&dev->se_port_lock);
spin_lock_init(&dev->se_tmr_lock);
spin_lock_init(&dev->qf_cmd_lock);
{
INIT_LIST_HEAD(&cmd->se_lun_node);
INIT_LIST_HEAD(&cmd->se_delayed_node);
- INIT_LIST_HEAD(&cmd->se_ordered_node);
INIT_LIST_HEAD(&cmd->se_qf_node);
INIT_LIST_HEAD(&cmd->se_queue_node);
INIT_LIST_HEAD(&cmd->se_cmd_list);
pr_err("Received SCSI CDB with command_size: %d that"
" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
return -EINVAL;
}
/*
" %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
scsi_command_size(cdb),
(unsigned long)sizeof(cmd->__t_task_cdb));
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason =
+ TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
return -ENOMEM;
}
} else
* and call transport_generic_request_failure() if necessary..
*/
ret = transport_generic_new_cmd(cmd);
- if (ret < 0) {
- cmd->transport_error_status = ret;
- transport_generic_request_failure(cmd, 0,
- (cmd->data_direction != DMA_TO_DEVICE));
- }
+ if (ret < 0)
+ transport_generic_request_failure(cmd);
+
return 0;
}
EXPORT_SYMBOL(transport_handle_cdb_direct);
/*
* Handle SAM-esque emulation for generic transport request failures.
*/
-static void transport_generic_request_failure(
- struct se_cmd *cmd,
- int complete,
- int sc)
+static void transport_generic_request_failure(struct se_cmd *cmd)
{
int ret = 0;
pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
" CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
cmd->t_task_cdb[0]);
- pr_debug("-----[ i_state: %d t_state: %d transport_error_status: %d\n",
+ pr_debug("-----[ i_state: %d t_state: %d scsi_sense_reason: %d\n",
cmd->se_tfo->get_cmd_state(cmd),
- cmd->t_state,
- cmd->transport_error_status);
+ cmd->t_state, cmd->scsi_sense_reason);
pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d"
" t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
" t_transport_active: %d t_transport_stop: %d"
if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED)
transport_complete_task_attr(cmd);
- if (complete) {
- cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
- }
-
- switch (cmd->transport_error_status) {
- case PYX_TRANSPORT_UNKNOWN_SAM_OPCODE:
- cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
- break;
- case PYX_TRANSPORT_REQ_TOO_MANY_SECTORS:
- cmd->scsi_sense_reason = TCM_SECTOR_COUNT_TOO_MANY;
- break;
- case PYX_TRANSPORT_INVALID_CDB_FIELD:
- cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
- break;
- case PYX_TRANSPORT_INVALID_PARAMETER_LIST:
- cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
- break;
- case PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES:
- if (!sc)
- transport_new_cmd_failure(cmd);
- /*
- * Currently for PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES,
- * we force this session to fall back to session
- * recovery.
- */
- cmd->se_tfo->fall_back_to_erl0(cmd->se_sess);
- cmd->se_tfo->stop_session(cmd->se_sess, 0, 0);
-
- goto check_stop;
- case PYX_TRANSPORT_LU_COMM_FAILURE:
- case PYX_TRANSPORT_ILLEGAL_REQUEST:
- cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- break;
- case PYX_TRANSPORT_UNKNOWN_MODE_PAGE:
- cmd->scsi_sense_reason = TCM_UNKNOWN_MODE_PAGE;
- break;
- case PYX_TRANSPORT_WRITE_PROTECTED:
- cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
+ switch (cmd->scsi_sense_reason) {
+ case TCM_NON_EXISTENT_LUN:
+ case TCM_UNSUPPORTED_SCSI_OPCODE:
+ case TCM_INVALID_CDB_FIELD:
+ case TCM_INVALID_PARAMETER_LIST:
+ case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
+ case TCM_UNKNOWN_MODE_PAGE:
+ case TCM_WRITE_PROTECTED:
+ case TCM_CHECK_CONDITION_ABORT_CMD:
+ case TCM_CHECK_CONDITION_UNIT_ATTENTION:
+ case TCM_CHECK_CONDITION_NOT_READY:
break;
- case PYX_TRANSPORT_RESERVATION_CONFLICT:
+ case TCM_RESERVATION_CONFLICT:
/*
* No SENSE Data payload for this case, set SCSI Status
* and queue the response to $FABRIC_MOD.
if (ret == -EAGAIN || ret == -ENOMEM)
goto queue_full;
goto check_stop;
- case PYX_TRANSPORT_USE_SENSE_REASON:
- /*
- * struct se_cmd->scsi_sense_reason already set
- */
- break;
default:
pr_err("Unknown transport error for CDB 0x%02x: %d\n",
- cmd->t_task_cdb[0],
- cmd->transport_error_status);
+ cmd->t_task_cdb[0], cmd->scsi_sense_reason);
cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
break;
}
* transport_send_check_condition_and_sense() after handling
* possible unsoliticied write data payloads.
*/
- if (!sc && !cmd->se_tfo->new_cmd_map)
- transport_new_cmd_failure(cmd);
- else {
- ret = transport_send_check_condition_and_sense(cmd,
- cmd->scsi_sense_reason, 0);
- if (ret == -EAGAIN || ret == -ENOMEM)
- goto queue_full;
- }
+ ret = transport_send_check_condition_and_sense(cmd,
+ cmd->scsi_sense_reason, 0);
+ if (ret == -EAGAIN || ret == -ENOMEM)
+ goto queue_full;
check_stop:
transport_lun_remove_cmd(cmd);
* to allow the passed struct se_cmd list of tasks to the front of the list.
*/
if (cmd->sam_task_attr == MSG_HEAD_TAG) {
- atomic_inc(&cmd->se_dev->dev_hoq_count);
- smp_mb__after_atomic_inc();
pr_debug("Added HEAD_OF_QUEUE for CDB:"
" 0x%02x, se_ordered_id: %u\n",
cmd->t_task_cdb[0],
cmd->se_ordered_id);
return 1;
} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
- spin_lock(&cmd->se_dev->ordered_cmd_lock);
- list_add_tail(&cmd->se_ordered_node,
- &cmd->se_dev->ordered_cmd_list);
- spin_unlock(&cmd->se_dev->ordered_cmd_lock);
-
atomic_inc(&cmd->se_dev->dev_ordered_sync);
smp_mb__after_atomic_inc();
{
int add_tasks;
- if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) {
- cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;
- transport_generic_request_failure(cmd, 0, 1);
+ if (se_dev_check_online(cmd->se_dev) != 0) {
+ cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ transport_generic_request_failure(cmd);
return 0;
}
else
error = dev->transport->do_task(task);
if (error != 0) {
- cmd->transport_error_status = error;
spin_lock_irqsave(&cmd->t_state_lock, flags);
task->task_flags &= ~TF_ACTIVE;
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
atomic_set(&cmd->t_transport_sent, 0);
transport_stop_tasks_for_cmd(cmd);
atomic_inc(&dev->depth_left);
- transport_generic_request_failure(cmd, 0, 1);
+ transport_generic_request_failure(cmd);
}
goto check_depth;
return 0;
}
-void transport_new_cmd_failure(struct se_cmd *se_cmd)
-{
- unsigned long flags;
- /*
- * Any unsolicited data will get dumped for failed command inside of
- * the fabric plugin
- */
- spin_lock_irqsave(&se_cmd->t_state_lock, flags);
- se_cmd->se_cmd_flags |= SCF_SE_CMD_FAILED;
- se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
-}
-
static inline u32 transport_get_sectors_6(
unsigned char *cdb,
struct se_cmd *cmd,
/*
* Everything else assume TYPE_DISK Sector CDB location.
- * Use 8-bit sector value.
+ * Use 8-bit sector value. SBC-3 says:
+ *
+ * A TRANSFER LENGTH field set to zero specifies that 256
+ * logical blocks shall be written. Any other value
+ * specifies the number of logical blocks that shall be
+ * written.
*/
type_disk:
- return (u32)cdb[4];
+ return cdb[4] ? : 256;
}
static inline u32 transport_get_sectors_10(
return -1;
}
-static int
-transport_handle_reservation_conflict(struct se_cmd *cmd)
-{
- cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
- cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
- cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
- /*
- * For UA Interlock Code 11b, a RESERVATION CONFLICT will
- * establish a UNIT ATTENTION with PREVIOUS RESERVATION
- * CONFLICT STATUS.
- *
- * See spc4r17, section 7.4.6 Control Mode Page, Table 349
- */
- if (cmd->se_sess &&
- cmd->se_dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl == 2)
- core_scsi3_ua_allocate(cmd->se_sess->se_node_acl,
- cmd->orig_fe_lun, 0x2C,
- ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
- return -EINVAL;
-}
-
static inline long long transport_dev_end_lba(struct se_device *dev)
{
return dev->transport->get_blocks(dev) + 1;
*/
if (su_dev->t10_pr.pr_ops.t10_reservation_check(cmd, &pr_reg_type) != 0) {
if (su_dev->t10_pr.pr_ops.t10_seq_non_holder(
- cmd, cdb, pr_reg_type) != 0)
- return transport_handle_reservation_conflict(cmd);
+ cmd, cdb, pr_reg_type) != 0) {
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;
+ cmd->scsi_sense_reason = TCM_RESERVATION_CONFLICT;
+ return -EBUSY;
+ }
/*
* This means the CDB is allowed for the SCSI Initiator port
* when said port is *NOT* holding the legacy SPC-2 or
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->t_task_lba = transport_lba_32(cdb);
- cmd->t_tasks_fua = (cdb[1] & 0x8);
+ if (cdb[1] & 0x8)
+ cmd->se_cmd_flags |= SCF_FUA;
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
case WRITE_12:
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->t_task_lba = transport_lba_32(cdb);
- cmd->t_tasks_fua = (cdb[1] & 0x8);
+ if (cdb[1] & 0x8)
+ cmd->se_cmd_flags |= SCF_FUA;
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
case WRITE_16:
goto out_unsupported_cdb;
size = transport_get_size(sectors, cdb, cmd);
cmd->t_task_lba = transport_lba_64(cdb);
- cmd->t_tasks_fua = (cdb[1] & 0x8);
+ if (cdb[1] & 0x8)
+ cmd->se_cmd_flags |= SCF_FUA;
cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;
break;
case XDWRITEREAD_10:
if ((cmd->data_direction != DMA_TO_DEVICE) ||
- !(cmd->t_tasks_bidi))
+ !(cmd->se_cmd_flags & SCF_BIDI))
goto out_invalid_cdb_field;
sectors = transport_get_sectors_10(cdb, cmd, §or_ret);
if (sector_ret)
* Setup BIDI XOR callback to be run after I/O completion.
*/
cmd->transport_complete_callback = &transport_xor_callback;
- cmd->t_tasks_fua = (cdb[1] & 0x8);
+ if (cdb[1] & 0x8)
+ cmd->se_cmd_flags |= SCF_FUA;
break;
case VARIABLE_LENGTH_CMD:
service_action = get_unaligned_be16(&cdb[8]);
* completion.
*/
cmd->transport_complete_callback = &transport_xor_callback;
- cmd->t_tasks_fua = (cdb[10] & 0x8);
+ if (cdb[1] & 0x8)
+ cmd->se_cmd_flags |= SCF_FUA;
break;
case WRITE_SAME_32:
sectors = transport_get_sectors_32(cdb, cmd, §or_ret);
" SIMPLE: %u\n", dev->dev_cur_ordered_id,
cmd->se_ordered_id);
} else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
- atomic_dec(&dev->dev_hoq_count);
- smp_mb__after_atomic_dec();
dev->dev_cur_ordered_id++;
pr_debug("Incremented dev_cur_ordered_id: %u for"
" HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
cmd->se_ordered_id);
} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
- spin_lock(&dev->ordered_cmd_lock);
- list_del(&cmd->se_ordered_node);
atomic_dec(&dev->dev_ordered_sync);
smp_mb__after_atomic_dec();
- spin_unlock(&dev->ordered_cmd_lock);
dev->dev_cur_ordered_id++;
pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
if ((cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) ||
(cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB)) {
+ /*
+ * Reject SCSI data overflow with map_mem_to_cmd() as incoming
+ * scatterlists already have been set to follow what the fabric
+ * passes for the original expected data transfer length.
+ */
+ if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
+ pr_warn("Rejecting SCSI DATA overflow for fabric using"
+ " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
+ cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+ cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+ return -EINVAL;
+ }
cmd->t_data_sg = sgl;
cmd->t_data_nents = sgl_count;
cmd->data_length) {
ret = transport_generic_get_mem(cmd);
if (ret < 0)
- return ret;
+ goto out_fail;
}
/*
task_cdbs = transport_allocate_control_task(cmd);
}
- if (task_cdbs <= 0)
+ if (task_cdbs < 0)
goto out_fail;
+ else if (!task_cdbs && (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
+ cmd->t_state = TRANSPORT_COMPLETE;
+ atomic_set(&cmd->t_transport_active, 1);
+ INIT_WORK(&cmd->work, target_complete_ok_work);
+ queue_work(target_completion_wq, &cmd->work);
+ return 0;
+ }
if (set_counts) {
atomic_inc(&cmd->t_fe_count);
else if (ret < 0)
return ret;
- return PYX_TRANSPORT_WRITE_PENDING;
+ return 1;
queue_full:
pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
if (cmd->se_tfo->write_pending_status(cmd) != 0) {
atomic_inc(&cmd->t_transport_aborted);
smp_mb__after_atomic_inc();
- cmd->scsi_status = SAM_STAT_TASK_ABORTED;
- transport_new_cmd_failure(cmd);
- return;
}
}
cmd->scsi_status = SAM_STAT_TASK_ABORTED;
struct se_cmd *cmd;
struct se_device *dev = (struct se_device *) param;
- set_user_nice(current, -20);
-
while (!kthread_should_stop()) {
ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq,
atomic_read(&dev->dev_queue_obj.queue_cnt) ||
}
ret = cmd->se_tfo->new_cmd_map(cmd);
if (ret < 0) {
- cmd->transport_error_status = ret;
- transport_generic_request_failure(cmd,
- 0, (cmd->data_direction !=
- DMA_TO_DEVICE));
+ transport_generic_request_failure(cmd);
break;
}
ret = transport_generic_new_cmd(cmd);
if (ret < 0) {
- cmd->transport_error_status = ret;
- transport_generic_request_failure(cmd,
- 0, (cmd->data_direction !=
- DMA_TO_DEVICE));
+ transport_generic_request_failure(cmd);
+ break;
}
break;
case TRANSPORT_PROCESS_WRITE:
lport = ep->lp;
fp = fc_frame_alloc(lport, sizeof(*txrdy));
if (!fp)
- return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
+ return -ENOMEM; /* Signal QUEUE_FULL */
txrdy = fc_frame_payload_get(fp, sizeof(*txrdy));
memset(txrdy, 0, sizeof(*txrdy));
struct ft_lport_acl *lacl = container_of(wwn,
struct ft_lport_acl, fc_lport_wwn);
- pr_debug("del lport %s\n",
- config_item_name(&wwn->wwn_group.cg_item));
+ pr_debug("del lport %s\n", lacl->name);
mutex_lock(&ft_lport_lock);
list_del(&lacl->list);
mutex_unlock(&ft_lport_lock);
},
{ USB_DEVICE(0x22b8, 0x6425), /* Motorola MOTOMAGX phones */
},
+ /* Motorola H24 HSPA module: */
+ { USB_DEVICE(0x22b8, 0x2d91) }, /* modem */
+ { USB_DEVICE(0x22b8, 0x2d92) }, /* modem + diagnostics */
+ { USB_DEVICE(0x22b8, 0x2d93) }, /* modem + AT port */
+ { USB_DEVICE(0x22b8, 0x2d95) }, /* modem + AT port + diagnostics */
+ { USB_DEVICE(0x22b8, 0x2d96) }, /* modem + NMEA */
+ { USB_DEVICE(0x22b8, 0x2d97) }, /* modem + diagnostics + NMEA */
+ { USB_DEVICE(0x22b8, 0x2d99) }, /* modem + AT port + NMEA */
+ { USB_DEVICE(0x22b8, 0x2d9a) }, /* modem + AT port + diagnostics + NMEA */
+
{ USB_DEVICE(0x0572, 0x1329), /* Hummingbird huc56s (Conexant) */
.driver_info = NO_UNION_NORMAL, /* union descriptor misplaced on
data interface instead of
u32 tmp;
if (!driver || !bind || !driver->setup
- || driver->speed != USB_SPEED_HIGH)
+ || driver->speed < USB_SPEED_HIGH)
return -EINVAL;
if (!dev)
return -ENODEV;
fsg_common_put(common);
usb_free_descriptors(fsg->function.descriptors);
usb_free_descriptors(fsg->function.hs_descriptors);
+ usb_free_descriptors(fsg->function.ss_descriptors);
kfree(fsg);
}
}
if (!gser->port.in->desc || !gser->port.out->desc) {
DBG(cdev, "activate generic ttyGS%d\n", gser->port_num);
- if (!config_ep_by_speed(cdev->gadget, f, gser->port.in) ||
- !config_ep_by_speed(cdev->gadget, f, gser->port.out)) {
+ if (config_ep_by_speed(cdev->gadget, f, gser->port.in) ||
+ config_ep_by_speed(cdev->gadget, f, gser->port.out)) {
gser->port.in->desc = NULL;
gser->port.out->desc = NULL;
return -EINVAL;
#include <linux/err.h>
#include <linux/fsl_devices.h>
#include <linux/platform_device.h>
+#include <linux/io.h>
#include <mach/hardware.h>
void fsl_udc_clk_finalize(struct platform_device *pdev)
{
struct fsl_usb2_platform_data *pdata = pdev->dev.platform_data;
-#if defined(CONFIG_SOC_IMX35)
if (cpu_is_mx35()) {
unsigned int v;
USBPHYCTRL_OTGBASE_OFFSET));
}
}
-#endif
/* ULPI transceivers don't need usbpll */
if (pdata->phy_mode == FSL_USB2_PHY_ULPI) {
if (!udc_controller)
return -ENODEV;
- if (!driver || (driver->speed != USB_SPEED_FULL
- && driver->speed != USB_SPEED_HIGH)
+ if (!driver || driver->speed < USB_SPEED_FULL
|| !bind || !driver->disconnect || !driver->setup)
return -EINVAL;
kfree(req);
}
-/*-------------------------------------------------------------------------*/
+/* Actually add a dTD chain to an empty dQH and let go */
+static void fsl_prime_ep(struct fsl_ep *ep, struct ep_td_struct *td)
+{
+ struct ep_queue_head *qh = get_qh_by_ep(ep);
+
+ /* Write dQH next pointer and terminate bit to 0 */
+ qh->next_dtd_ptr = cpu_to_hc32(td->td_dma
+ & EP_QUEUE_HEAD_NEXT_POINTER_MASK);
+
+ /* Clear active and halt bit */
+ qh->size_ioc_int_sts &= cpu_to_hc32(~(EP_QUEUE_HEAD_STATUS_ACTIVE
+ | EP_QUEUE_HEAD_STATUS_HALT));
+
+ /* Ensure that updates to the QH will occur before priming. */
+ wmb();
+
+ /* Prime endpoint by writing correct bit to ENDPTPRIME */
+ fsl_writel(ep_is_in(ep) ? (1 << (ep_index(ep) + 16))
+ : (1 << (ep_index(ep))), &dr_regs->endpointprime);
+}
+
+/* Add dTD chain to the dQH of an EP */
static void fsl_queue_td(struct fsl_ep *ep, struct fsl_req *req)
{
- int i = ep_index(ep) * 2 + ep_is_in(ep);
u32 temp, bitmask, tmp_stat;
- struct ep_queue_head *dQH = &ep->udc->ep_qh[i];
/* VDBG("QH addr Register 0x%8x", dr_regs->endpointlistaddr);
VDBG("ep_qh[%d] addr is 0x%8x", i, (u32)&(ep->udc->ep_qh[i])); */
cpu_to_hc32(req->head->td_dma & DTD_ADDR_MASK);
/* Read prime bit, if 1 goto done */
if (fsl_readl(&dr_regs->endpointprime) & bitmask)
- goto out;
+ return;
do {
/* Set ATDTW bit in USBCMD */
fsl_writel(temp & ~USB_CMD_ATDTW, &dr_regs->usbcmd);
if (tmp_stat)
- goto out;
+ return;
}
- /* Write dQH next pointer and terminate bit to 0 */
- temp = req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK;
- dQH->next_dtd_ptr = cpu_to_hc32(temp);
-
- /* Clear active and halt bit */
- temp = cpu_to_hc32(~(EP_QUEUE_HEAD_STATUS_ACTIVE
- | EP_QUEUE_HEAD_STATUS_HALT));
- dQH->size_ioc_int_sts &= temp;
-
- /* Ensure that updates to the QH will occur before priming. */
- wmb();
-
- /* Prime endpoint by writing 1 to ENDPTPRIME */
- temp = ep_is_in(ep)
- ? (1 << (ep_index(ep) + 16))
- : (1 << (ep_index(ep)));
- fsl_writel(temp, &dr_regs->endpointprime);
-out:
- return;
+ fsl_prime_ep(ep, req->head);
}
/* Fill in the dTD structure
VDBG("%s, bad ep", __func__);
return -EINVAL;
}
- if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
+ if (usb_endpoint_xfer_isoc(ep->desc)) {
if (req->req.length > ep->ep.maxpacket)
return -EMSGSIZE;
}
/* The request isn't the last request in this ep queue */
if (req->queue.next != &ep->queue) {
- struct ep_queue_head *qh;
struct fsl_req *next_req;
- qh = ep->qh;
next_req = list_entry(req->queue.next, struct fsl_req,
queue);
- /* Point the QH to the first TD of next request */
- fsl_writel((u32) next_req->head, &qh->curr_dtd_ptr);
+ /* prime with dTD of next request */
+ fsl_prime_ep(ep, next_req->head);
}
-
- /* The request hasn't been processed, patch up the TD chain */
+ /* The request hasn't been processed, patch up the TD chain */
} else {
struct fsl_req *prev_req;
prev_req = list_entry(req->queue.prev, struct fsl_req, queue);
- fsl_writel(fsl_readl(&req->tail->next_td_ptr),
- &prev_req->tail->next_td_ptr);
-
+ prev_req->tail->next_td_ptr = req->tail->next_td_ptr;
}
done(ep, req, -ECONNRESET);
goto out;
}
- if (ep->desc->bmAttributes == USB_ENDPOINT_XFER_ISOC) {
+ if (usb_endpoint_xfer_isoc(ep->desc)) {
status = -EOPNOTSUPP;
goto out;
}
struct fsl_udc *udc;
int size = 0;
u32 bitmask;
- struct ep_queue_head *d_qh;
+ struct ep_queue_head *qh;
ep = container_of(_ep, struct fsl_ep, ep);
if (!_ep || (!ep->desc && ep_index(ep) != 0))
if (!udc->driver || udc->gadget.speed == USB_SPEED_UNKNOWN)
return -ESHUTDOWN;
- d_qh = &ep->udc->ep_qh[ep_index(ep) * 2 + ep_is_in(ep)];
+ qh = get_qh_by_ep(ep);
bitmask = (ep_is_in(ep)) ? (1 << (ep_index(ep) + 16)) :
(1 << (ep_index(ep)));
if (fsl_readl(&dr_regs->endptstatus) & bitmask)
- size = (d_qh->size_ioc_int_sts & DTD_PACKET_SIZE)
+ size = (qh->size_ioc_int_sts & DTD_PACKET_SIZE)
>> DTD_LENGTH_BIT_POS;
pr_debug("%s %u\n", __func__, size);
if (!udc_controller)
return -ENODEV;
- if (!driver || (driver->speed != USB_SPEED_FULL
- && driver->speed != USB_SPEED_HIGH)
+ if (!driver || driver->speed < USB_SPEED_FULL
|| !bind || !driver->disconnect || !driver->setup)
return -EINVAL;
* 2 + ((windex & USB_DIR_IN) ? 1 : 0))
#define get_pipe_by_ep(EP) (ep_index(EP) * 2 + ep_is_in(EP))
+static inline struct ep_queue_head *get_qh_by_ep(struct fsl_ep *ep)
+{
+ /* we only have one ep0 structure but two queue heads */
+ if (ep_index(ep) != 0)
+ return ep->qh;
+ else
+ return &ep->udc->ep_qh[(ep->udc->ep0_dir ==
+ USB_DIR_IN) ? 1 : 0];
+}
+
struct platform_device;
#ifdef CONFIG_ARCH_MXC
int fsl_udc_clk_init(struct platform_device *pdev);
int retval;
if (!driver
- || driver->speed != USB_SPEED_HIGH
+ || driver->speed < USB_SPEED_HIGH
|| !bind
|| !driver->setup)
return -EINVAL;
* (dev->usb->xcvrdiag & FORCE_FULL_SPEED_MODE)
* "must not be used in normal operation"
*/
- if (!driver || driver->speed != USB_SPEED_HIGH
+ if (!driver || driver->speed < USB_SPEED_HIGH
|| !driver->setup)
return -EINVAL;
struct r8a66597 *r8a66597 = gadget_to_r8a66597(gadget);
if (!driver
- || driver->speed != USB_SPEED_HIGH
+ || driver->speed < USB_SPEED_HIGH
|| !driver->setup)
return -EINVAL;
if (!r8a66597)
return -EINVAL;
}
- if (driver->speed != USB_SPEED_HIGH &&
- driver->speed != USB_SPEED_FULL) {
+ if (driver->speed < USB_SPEED_FULL)
dev_err(hsotg->dev, "%s: bad speed\n", __func__);
- }
if (!bind || !driver->setup) {
dev_err(hsotg->dev, "%s: missing entry points\n", __func__);
int ret;
if (!driver
- || (driver->speed != USB_SPEED_FULL &&
- driver->speed != USB_SPEED_HIGH)
+ || driver->speed < USB_SPEED_FULL
|| !bind
|| !driver->unbind || !driver->disconnect || !driver->setup)
return -EINVAL;
* jump until after the queue is primed.
*/
else {
+ int done = 0;
start = SCHEDULE_SLOP + (now & ~0x07);
/* NOTE: assumes URB_ISO_ASAP, to limit complexity/bugs */
if (stream->highspeed) {
if (itd_slot_ok(ehci, mod, start,
stream->usecs, period))
- break;
+ done = 1;
} else {
if ((start % 8) >= 6)
continue;
if (sitd_slot_ok(ehci, mod, stream,
start, sched, period))
- break;
+ done = 1;
}
- } while (start > next);
+ } while (start > next && !done);
/* no room in the schedule */
- if (start == next) {
+ if (!done) {
ehci_dbg(ehci, "iso resched full %p (now %d max %d)\n",
urb, now, now + mod);
status = -ENOSPC;
{
qset->td_start = qset->td_end = qset->ntds = 0;
- qset->qh.link = cpu_to_le32(QH_LINK_NTDS(8) | QH_LINK_T);
+ qset->qh.link = cpu_to_le64(QH_LINK_NTDS(8) | QH_LINK_T);
qset->qh.status = qset->qh.status & QH_STATUS_SEQ_MASK;
qset->qh.err_count = 0;
qset->qh.scratch[0] = 0;
ring = xhci->cmd_ring;
seg = ring->deq_seg;
do {
- memset(seg->trbs, 0, SEGMENT_SIZE);
+ memset(seg->trbs, 0,
+ sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
+ seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
+ cpu_to_le32(~TRB_CYCLE);
seg = seg->next;
} while (seg != ring->deq_seg);
*/
}
- musb_save_context(musb);
-
spin_unlock_irqrestore(&musb->lock, flags);
return 0;
}
static int musb_resume_noirq(struct device *dev)
{
- struct musb *musb = dev_to_musb(dev);
-
- musb_restore_context(musb);
-
/* for static cmos like DaVinci, register values were preserved
* unless for some reason the whole soc powered down or the USB
* module got reset through the PSC (vs just being disabled).
unsigned long flags;
int retval = -EINVAL;
- if (driver->speed != USB_SPEED_HIGH)
+ if (driver->speed < USB_SPEED_HIGH)
goto err0;
pm_runtime_get_sync(musb->controller);
if (mod->irq_attch)
intenb1 |= ATTCHE;
- if (mod->irq_attch)
+ if (mod->irq_dtch)
intenb1 |= DTCHE;
if (mod->irq_sign)
struct usb_gadget_driver *driver)
{
struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget);
- struct usbhs_priv *priv;
- struct device *dev;
- int ret;
+ struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
if (!driver ||
!driver->setup ||
- driver->speed != USB_SPEED_HIGH)
+ driver->speed < USB_SPEED_FULL)
return -EINVAL;
- dev = usbhsg_gpriv_to_dev(gpriv);
- priv = usbhsg_gpriv_to_priv(gpriv);
-
/* first hook up the driver ... */
gpriv->driver = driver;
gpriv->gadget.dev.driver = &driver->driver;
- ret = device_add(&gpriv->gadget.dev);
- if (ret) {
- dev_err(dev, "device_add error %d\n", ret);
- goto add_fail;
- }
-
return usbhsg_try_start(priv, USBHSG_STATUS_REGISTERD);
-
-add_fail:
- gpriv->driver = NULL;
- gpriv->gadget.dev.driver = NULL;
-
- return ret;
}
static int usbhsg_gadget_stop(struct usb_gadget *gadget,
struct usb_gadget_driver *driver)
{
struct usbhsg_gpriv *gpriv = usbhsg_gadget_to_gpriv(gadget);
- struct usbhs_priv *priv;
- struct device *dev;
+ struct usbhs_priv *priv = usbhsg_gpriv_to_priv(gpriv);
if (!driver ||
!driver->unbind)
return -EINVAL;
- dev = usbhsg_gpriv_to_dev(gpriv);
- priv = usbhsg_gpriv_to_priv(gpriv);
-
usbhsg_try_stop(priv, USBHSG_STATUS_REGISTERD);
- device_del(&gpriv->gadget.dev);
+ gpriv->gadget.dev.driver = NULL;
gpriv->driver = NULL;
return 0;
static int usbhsg_stop(struct usbhs_priv *priv)
{
+ struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv);
+
+ /* cable disconnect */
+ if (gpriv->driver &&
+ gpriv->driver->disconnect)
+ gpriv->driver->disconnect(&gpriv->gadget);
+
return usbhsg_try_stop(priv, USBHSG_STATUS_STARTED);
}
/*
* init gadget
*/
- device_initialize(&gpriv->gadget.dev);
dev_set_name(&gpriv->gadget.dev, "gadget");
gpriv->gadget.dev.parent = dev;
gpriv->gadget.name = "renesas_usbhs_udc";
gpriv->gadget.ops = &usbhsg_gadget_ops;
gpriv->gadget.is_dualspeed = 1;
+ ret = device_register(&gpriv->gadget.dev);
+ if (ret < 0)
+ goto err_add_udc;
INIT_LIST_HEAD(&gpriv->gadget.ep_list);
ret = usb_add_gadget_udc(dev, &gpriv->gadget);
if (ret)
- goto err_add_udc;
+ goto err_register;
dev_info(dev, "gadget probed\n");
return 0;
+
+err_register:
+ device_unregister(&gpriv->gadget.dev);
err_add_udc:
kfree(gpriv->uep);
usb_del_gadget_udc(&gpriv->gadget);
+ device_unregister(&gpriv->gadget.dev);
+
usbhsg_controller_unregister(gpriv);
kfree(gpriv->uep);
dev_err(dev, "Failed to create hcd\n");
return -ENOMEM;
}
+ hcd->has_tt = 1; /* for low/full speed */
pipe_info = kzalloc(sizeof(*pipe_info) * pipe_size, GFP_KERNEL);
if (!pipe_info) {
{ USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_ELSTER_UNICOM_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_PROPOX_JTAGCABLEII_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_PROPOX_ISPCABLEIII_PID) },
{ USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
{ USB_DEVICE(OLIMEX_VID, OLIMEX_ARM_USB_OCD_H_PID),
/* Propox devices */
#define FTDI_PROPOX_JTAGCABLEII_PID 0xD738
+#define FTDI_PROPOX_ISPCABLEIII_PID 0xD739
/* Lenz LI-USB Computer Interface. */
#define FTDI_LENZ_LIUSB_PID 0xD780
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x31) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x32) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x01) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x02) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x03) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x10) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x12) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x13) },
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x01) }, /* E398 3G Modem */
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x02) }, /* E398 3G PC UI Interface */
+ { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x03) }, /* E398 3G Application Interface */
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) },
{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
+ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_IGNORE_RESIDUE ),
+/* Reported by Qinglin Ye <yestyle@gmail.com> */
+UNUSUAL_DEV( 0x13fe, 0x3600, 0x0100, 0x0100,
+ "Kingston",
+ "DT 101 G2",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_BULK_IGNORE_TAG ),
+
/* Reported by Francesco Foresti <frafore@tiscali.it> */
UNUSUAL_DEV( 0x14cd, 0x6600, 0x0201, 0x0201,
"Super Top",
source "drivers/video/omap2/Kconfig"
source "drivers/video/backlight/Kconfig"
-source "drivers/video/display/Kconfig"
if VT
source "drivers/video/console/Kconfig"
obj-$(CONFIG_VT) += console/
obj-$(CONFIG_LOGO) += logo/
-obj-y += backlight/ display/
+obj-y += backlight/
obj-$(CONFIG_FB_CFB_FILLRECT) += cfbfillrect.o
obj-$(CONFIG_FB_CFB_COPYAREA) += cfbcopyarea.o
- hsstrt: Start of horizontal synchronization pulse
- hsstop: End of horizontal synchronization pulse
- - htotal: Last value on the line (i.e. line length = htotal+1)
+ - htotal: Last value on the line (i.e. line length = htotal + 1)
- vsstrt: Start of vertical synchronization pulse
- vsstop: End of vertical synchronization pulse
- - vtotal: Last line value (i.e. number of lines = vtotal+1)
+ - vtotal: Last line value (i.e. number of lines = vtotal + 1)
- hcenter: Start of vertical retrace for interlace
You can specify the blanking timings independently. Currently I just set
clock):
- diwstrt_h: Horizontal start of the visible window
- - diwstop_h: Horizontal stop+1(*) of the visible window
+ - diwstop_h: Horizontal stop + 1(*) of the visible window
- diwstrt_v: Vertical start of the visible window
- diwstop_v: Vertical stop of the visible window
- ddfstrt: Horizontal start of display DMA
Sprite positioning:
- - sprstrt_h: Horizontal start-4 of sprite
+ - sprstrt_h: Horizontal start - 4 of sprite
- sprstrt_v: Vertical start of sprite
(*) Even Commodore did it wrong in the AGA monitor drivers by not adding 1.
display parameters. Here's what I found out:
- ddfstrt and ddfstop are best aligned to 64 pixels.
- - the chipset needs 64+4 horizontal pixels after the DMA start before the
- first pixel is output, so diwstrt_h = ddfstrt+64+4 if you want to
- display the first pixel on the line too. Increase diwstrt_h for virtual
- screen panning.
+ - the chipset needs 64 + 4 horizontal pixels after the DMA start before
+ the first pixel is output, so diwstrt_h = ddfstrt + 64 + 4 if you want
+ to display the first pixel on the line too. Increase diwstrt_h for
+ virtual screen panning.
- the display DMA always fetches 64 pixels at a time (fmode = 3).
- - ddfstop is ddfstrt+#pixels-64.
- - diwstop_h = diwstrt_h+xres+1. Because of the additional 1 this can be 1
- more than htotal.
+ - ddfstop is ddfstrt+#pixels - 64.
+ - diwstop_h = diwstrt_h + xres + 1. Because of the additional 1 this can
+ be 1 more than htotal.
- hscroll simply adds a delay to the display output. Smooth horizontal
- panning needs an extra 64 pixels on the left to prefetch the pixels that
- `fall off' on the left.
+ panning needs an extra 64 pixels on the left to prefetch the pixels that
+ `fall off' on the left.
- if ddfstrt < 192, the sprite DMA cycles are all stolen by the bitplane
- DMA, so it's best to make the DMA start as late as possible.
+ DMA, so it's best to make the DMA start as late as possible.
- you really don't want to make ddfstrt < 128, since this will steal DMA
- cycles from the other DMA channels (audio, floppy and Chip RAM refresh).
+ cycles from the other DMA channels (audio, floppy and Chip RAM refresh).
- I make diwstop_h and diwstop_v as large as possible.
General dependencies
- all values are SHRES pixel (35ns)
- table 1:fetchstart table 2:prefetch table 3:fetchsize
- ------------------ ---------------- -----------------
+ table 1:fetchstart table 2:prefetch table 3:fetchsize
+ ------------------ ---------------- -----------------
Pixclock # SHRES|HIRES|LORES # SHRES|HIRES|LORES # SHRES|HIRES|LORES
-------------#------+-----+------#------+-----+------#------+-----+------
Bus width 1x # 16 | 32 | 64 # 16 | 32 | 64 # 64 | 64 | 64
- chipset needs 4 pixels before the first pixel is output
- ddfstrt must be aligned to fetchstart (table 1)
- chipset needs also prefetch (table 2) to get first pixel data, so
- ddfstrt = ((diwstrt_h-4) & -fetchstart) - prefetch
+ ddfstrt = ((diwstrt_h - 4) & -fetchstart) - prefetch
- for horizontal panning decrease diwstrt_h
- the length of a fetchline must be aligned to fetchsize (table 3)
- if fetchstart is smaller than fetchsize, then ddfstrt can a little bit
- moved to optimize use of dma (useful for OCS/ECS overscan displays)
- - ddfstop is ddfstrt+ddfsize-fetchsize
+ moved to optimize use of dma (useful for OCS/ECS overscan displays)
+ - ddfstop is ddfstrt + ddfsize - fetchsize
- If C= didn't change anything for AGA, then at following positions the
- dma bus is already used:
- ddfstrt < 48 -> memory refresh
- < 96 -> disk dma
- < 160 -> audio dma
- < 192 -> sprite 0 dma
- < 416 -> sprite dma (32 per sprite)
+ dma bus is already used:
+ ddfstrt < 48 -> memory refresh
+ < 96 -> disk dma
+ < 160 -> audio dma
+ < 192 -> sprite 0 dma
+ < 416 -> sprite dma (32 per sprite)
- in accordance with the hardware reference manual a hardware stop is at
- 192, but AGA (ECS?) can go below this.
+ 192, but AGA (ECS?) can go below this.
DMA priorities
--------------
the hardware cursor:
- if you want to start display DMA too early, you lose the ability to
- do smooth horizontal panning (xpanstep 1 -> 64).
+ do smooth horizontal panning (xpanstep 1 -> 64).
- if you want to go even further, you lose the hardware cursor too.
IMHO a hardware cursor is more important for X than horizontal scrolling,
Standard VGA timings
--------------------
- xres yres left right upper lower hsync vsync
- ---- ---- ---- ----- ----- ----- ----- -----
+ xres yres left right upper lower hsync vsync
+ ---- ---- ---- ----- ----- ----- ----- -----
80x25 720 400 27 45 35 12 108 2
80x30 720 480 27 45 30 9 108 2
As a comparison, graphics/monitor.h suggests the following:
- xres yres left right upper lower hsync vsync
- ---- ---- ---- ----- ----- ----- ----- -----
+ xres yres left right upper lower hsync vsync
+ ---- ---- ---- ----- ----- ----- ----- -----
VGA 640 480 52 112 24 19 112 - 2 +
VGA70 640 400 52 112 27 21 112 - 2 -
VSYNC HSYNC Vertical size Vertical total
----- ----- ------------- --------------
- + + Reserved Reserved
- + - 400 414
- - + 350 362
- - - 480 496
+ + + Reserved Reserved
+ + - 400 414
+ - + 350 362
+ - - 480 496
Source: CL-GD542X Technical Reference Manual, Cirrus Logic, Oct 1992
-----------
- a scanline is 64 µs long, of which 52.48 µs are visible. This is about
- 736 visible 70 ns pixels per line.
+ 736 visible 70 ns pixels per line.
- we have 625 scanlines, of which 575 are visible (interlaced); after
- rounding this becomes 576.
+ rounding this becomes 576.
RETMA -> NTSC
-------------
- a scanline is 63.5 µs long, of which 53.5 µs are visible. This is about
- 736 visible 70 ns pixels per line.
+ 736 visible 70 ns pixels per line.
- we have 525 scanlines, of which 485 are visible (interlaced); after
- rounding this becomes 484.
+ rounding this becomes 484.
Thus if you want a PAL compatible display, you have to do the following:
- set the FB_SYNC_BROADCAST flag to indicate that standard broadcast
- timings are to be used.
- - make sure upper_margin+yres+lower_margin+vsync_len = 625 for an
- interlaced, 312 for a non-interlaced and 156 for a doublescanned
- display.
- - make sure left_margin+xres+right_margin+hsync_len = 1816 for a SHRES,
- 908 for a HIRES and 454 for a LORES display.
+ timings are to be used.
+ - make sure upper_margin + yres + lower_margin + vsync_len = 625 for an
+ interlaced, 312 for a non-interlaced and 156 for a doublescanned
+ display.
+ - make sure left_margin + xres + right_margin + hsync_len = 1816 for a
+ SHRES, 908 for a HIRES and 454 for a LORES display.
- the left visible part begins at 360 (SHRES; HIRES:180, LORES:90),
- left_margin+2*hsync_len must be greater or equal.
+ left_margin + 2 * hsync_len must be greater or equal.
- the upper visible part begins at 48 (interlaced; non-interlaced:24,
- doublescanned:12), upper_margin+2*vsync_len must be greater or equal.
+ doublescanned:12), upper_margin + 2 * vsync_len must be greater or
+ equal.
- ami_encode_var() calculates margins with a hsync of 5320 ns and a vsync
- of 4 scanlines
+ of 4 scanlines
The settings for a NTSC compatible display are straightforward.
anything about horizontal/vertical synchronization nor refresh rates.
- -- Geert --
+ -- Geert --
*******************************************************************************/
* Various macros
*/
-#define up2(v) (((v)+1) & -2)
+#define up2(v) (((v) + 1) & -2)
#define down2(v) ((v) & -2)
#define div2(v) ((v)>>1)
#define mod2(v) ((v) & 1)
-#define up4(v) (((v)+3) & -4)
+#define up4(v) (((v) + 3) & -4)
#define down4(v) ((v) & -4)
-#define mul4(v) ((v)<<2)
+#define mul4(v) ((v) << 2)
#define div4(v) ((v)>>2)
#define mod4(v) ((v) & 3)
-#define up8(v) (((v)+7) & -8)
+#define up8(v) (((v) + 7) & -8)
#define down8(v) ((v) & -8)
#define div8(v) ((v)>>3)
#define mod8(v) ((v) & 7)
-#define up16(v) (((v)+15) & -16)
+#define up16(v) (((v) + 15) & -16)
#define down16(v) ((v) & -16)
#define div16(v) ((v)>>4)
#define mod16(v) ((v) & 15)
-#define up32(v) (((v)+31) & -32)
+#define up32(v) (((v) + 31) & -32)
#define down32(v) ((v) & -32)
#define div32(v) ((v)>>5)
#define mod32(v) ((v) & 31)
-#define up64(v) (((v)+63) & -64)
+#define up64(v) (((v) + 63) & -64)
#define down64(v) ((v) & -64)
#define div64(v) ((v)>>6)
#define mod64(v) ((v) & 63)
-#define upx(x,v) (((v)+(x)-1) & -(x))
-#define downx(x,v) ((v) & -(x))
-#define modx(x,v) ((v) & ((x)-1))
+#define upx(x, v) (((v) + (x) - 1) & -(x))
+#define downx(x, v) ((v) & -(x))
+#define modx(x, v) ((v) & ((x) - 1))
/* if x1 is not a constant, this macro won't make real sense :-) */
#ifdef __mc68000__
#define DIVUL(x1, x2) ({int res; asm("divul %1,%2,%3": "=d" (res): \
- "d" (x2), "d" ((long)((x1)/0x100000000ULL)), "0" ((long)(x1))); res;})
+ "d" (x2), "d" ((long)((x1) / 0x100000000ULL)), "0" ((long)(x1))); res;})
#else
/* We know a bit about the numbers, so we can do it this way */
#define DIVUL(x1, x2) ((((long)((unsigned long long)x1 >> 8) / x2) << 8) + \
#define VIDEOMEMSIZE_ECS_1M (393216) /* ECS (1MB) : max 1024*768*16 */
#define VIDEOMEMSIZE_OCS (262144) /* OCS : max ca. 800*600*16 */
-#define SPRITEMEMSIZE (64*64/4) /* max 64*64*4 */
+#define SPRITEMEMSIZE (64 * 64 / 4) /* max 64*64*4 */
#define DUMMYSPRITEMEMSIZE (8)
static u_long spritememory;
* Copper Instructions
*/
-#define CMOVE(val, reg) (CUSTOM_OFS(reg)<<16 | (val))
-#define CMOVE2(val, reg) ((CUSTOM_OFS(reg)+2)<<16 | (val))
-#define CWAIT(x, y) (((y) & 0x1fe)<<23 | ((x) & 0x7f0)<<13 | 0x0001fffe)
+#define CMOVE(val, reg) (CUSTOM_OFS(reg) << 16 | (val))
+#define CMOVE2(val, reg) ((CUSTOM_OFS(reg) + 2) << 16 | (val))
+#define CWAIT(x, y) (((y) & 0x1fe) << 23 | ((x) & 0x7f0) << 13 | 0x0001fffe)
#define CEND (0xfffffffe)
* Current Video Mode
*/
-static struct amifb_par {
+struct amifb_par {
/* General Values */
/* Additional AGA Hardware Registers */
u_short fmode; /* vmode */
-} currentpar;
-
-
-static struct fb_info fb_info = {
- .fix = {
- .id = "Amiga ",
- .visual = FB_VISUAL_PSEUDOCOLOR,
- .accel = FB_ACCEL_AMIGABLITT
- }
};
static struct fb_videomode ami_modedb[] __initdata = {
- /*
- * AmigaOS Video Modes
- *
- * If you change these, make sure to update DEFMODE_* as well!
- */
-
- {
- /* 640x200, 15 kHz, 60 Hz (NTSC) */
- "ntsc", 60, 640, 200, TAG_HIRES, 106, 86, 44, 16, 76, 2,
- FB_SYNC_BROADCAST, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
- }, {
- /* 640x400, 15 kHz, 60 Hz interlaced (NTSC) */
- "ntsc-lace", 60, 640, 400, TAG_HIRES, 106, 86, 88, 33, 76, 4,
- FB_SYNC_BROADCAST, FB_VMODE_INTERLACED | FB_VMODE_YWRAP
- }, {
- /* 640x256, 15 kHz, 50 Hz (PAL) */
- "pal", 50, 640, 256, TAG_HIRES, 106, 86, 40, 14, 76, 2,
- FB_SYNC_BROADCAST, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
- }, {
- /* 640x512, 15 kHz, 50 Hz interlaced (PAL) */
- "pal-lace", 50, 640, 512, TAG_HIRES, 106, 86, 80, 29, 76, 4,
- FB_SYNC_BROADCAST, FB_VMODE_INTERLACED | FB_VMODE_YWRAP
- }, {
- /* 640x480, 29 kHz, 57 Hz */
- "multiscan", 57, 640, 480, TAG_SHRES, 96, 112, 29, 8, 72, 8,
- 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
- }, {
- /* 640x960, 29 kHz, 57 Hz interlaced */
- "multiscan-lace", 57, 640, 960, TAG_SHRES, 96, 112, 58, 16, 72, 16,
- 0, FB_VMODE_INTERLACED | FB_VMODE_YWRAP
- }, {
- /* 640x200, 15 kHz, 72 Hz */
- "euro36", 72, 640, 200, TAG_HIRES, 92, 124, 6, 6, 52, 5,
- 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
- }, {
- /* 640x400, 15 kHz, 72 Hz interlaced */
- "euro36-lace", 72, 640, 400, TAG_HIRES, 92, 124, 12, 12, 52, 10,
- 0, FB_VMODE_INTERLACED | FB_VMODE_YWRAP
- }, {
- /* 640x400, 29 kHz, 68 Hz */
- "euro72", 68, 640, 400, TAG_SHRES, 164, 92, 9, 9, 80, 8,
- 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
- }, {
- /* 640x800, 29 kHz, 68 Hz interlaced */
- "euro72-lace", 68, 640, 800, TAG_SHRES, 164, 92, 18, 18, 80, 16,
- 0, FB_VMODE_INTERLACED | FB_VMODE_YWRAP
- }, {
- /* 800x300, 23 kHz, 70 Hz */
- "super72", 70, 800, 300, TAG_SHRES, 212, 140, 10, 11, 80, 7,
- 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
- }, {
- /* 800x600, 23 kHz, 70 Hz interlaced */
- "super72-lace", 70, 800, 600, TAG_SHRES, 212, 140, 20, 22, 80, 14,
- 0, FB_VMODE_INTERLACED | FB_VMODE_YWRAP
- }, {
- /* 640x200, 27 kHz, 57 Hz doublescan */
- "dblntsc", 57, 640, 200, TAG_SHRES, 196, 124, 18, 17, 80, 4,
- 0, FB_VMODE_DOUBLE | FB_VMODE_YWRAP
- }, {
- /* 640x400, 27 kHz, 57 Hz */
- "dblntsc-ff", 57, 640, 400, TAG_SHRES, 196, 124, 36, 35, 80, 7,
- 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
- }, {
- /* 640x800, 27 kHz, 57 Hz interlaced */
- "dblntsc-lace", 57, 640, 800, TAG_SHRES, 196, 124, 72, 70, 80, 14,
- 0, FB_VMODE_INTERLACED | FB_VMODE_YWRAP
- }, {
- /* 640x256, 27 kHz, 47 Hz doublescan */
- "dblpal", 47, 640, 256, TAG_SHRES, 196, 124, 14, 13, 80, 4,
- 0, FB_VMODE_DOUBLE | FB_VMODE_YWRAP
- }, {
- /* 640x512, 27 kHz, 47 Hz */
- "dblpal-ff", 47, 640, 512, TAG_SHRES, 196, 124, 28, 27, 80, 7,
- 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
- }, {
- /* 640x1024, 27 kHz, 47 Hz interlaced */
- "dblpal-lace", 47, 640, 1024, TAG_SHRES, 196, 124, 56, 54, 80, 14,
- 0, FB_VMODE_INTERLACED | FB_VMODE_YWRAP
- },
-
- /*
- * VGA Video Modes
- */
-
- {
- /* 640x480, 31 kHz, 60 Hz (VGA) */
- "vga", 60, 640, 480, TAG_SHRES, 64, 96, 30, 9, 112, 2,
- 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
- }, {
- /* 640x400, 31 kHz, 70 Hz (VGA) */
- "vga70", 70, 640, 400, TAG_SHRES, 64, 96, 35, 12, 112, 2,
- FB_SYNC_VERT_HIGH_ACT | FB_SYNC_COMP_HIGH_ACT, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
- },
+ /*
+ * AmigaOS Video Modes
+ *
+ * If you change these, make sure to update DEFMODE_* as well!
+ */
+
+ {
+ /* 640x200, 15 kHz, 60 Hz (NTSC) */
+ "ntsc", 60, 640, 200, TAG_HIRES, 106, 86, 44, 16, 76, 2,
+ FB_SYNC_BROADCAST, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ }, {
+ /* 640x400, 15 kHz, 60 Hz interlaced (NTSC) */
+ "ntsc-lace", 60, 640, 400, TAG_HIRES, 106, 86, 88, 33, 76, 4,
+ FB_SYNC_BROADCAST, FB_VMODE_INTERLACED | FB_VMODE_YWRAP
+ }, {
+ /* 640x256, 15 kHz, 50 Hz (PAL) */
+ "pal", 50, 640, 256, TAG_HIRES, 106, 86, 40, 14, 76, 2,
+ FB_SYNC_BROADCAST, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ }, {
+ /* 640x512, 15 kHz, 50 Hz interlaced (PAL) */
+ "pal-lace", 50, 640, 512, TAG_HIRES, 106, 86, 80, 29, 76, 4,
+ FB_SYNC_BROADCAST, FB_VMODE_INTERLACED | FB_VMODE_YWRAP
+ }, {
+ /* 640x480, 29 kHz, 57 Hz */
+ "multiscan", 57, 640, 480, TAG_SHRES, 96, 112, 29, 8, 72, 8,
+ 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ }, {
+ /* 640x960, 29 kHz, 57 Hz interlaced */
+ "multiscan-lace", 57, 640, 960, TAG_SHRES, 96, 112, 58, 16, 72,
+ 16,
+ 0, FB_VMODE_INTERLACED | FB_VMODE_YWRAP
+ }, {
+ /* 640x200, 15 kHz, 72 Hz */
+ "euro36", 72, 640, 200, TAG_HIRES, 92, 124, 6, 6, 52, 5,
+ 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ }, {
+ /* 640x400, 15 kHz, 72 Hz interlaced */
+ "euro36-lace", 72, 640, 400, TAG_HIRES, 92, 124, 12, 12, 52,
+ 10,
+ 0, FB_VMODE_INTERLACED | FB_VMODE_YWRAP
+ }, {
+ /* 640x400, 29 kHz, 68 Hz */
+ "euro72", 68, 640, 400, TAG_SHRES, 164, 92, 9, 9, 80, 8,
+ 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ }, {
+ /* 640x800, 29 kHz, 68 Hz interlaced */
+ "euro72-lace", 68, 640, 800, TAG_SHRES, 164, 92, 18, 18, 80,
+ 16,
+ 0, FB_VMODE_INTERLACED | FB_VMODE_YWRAP
+ }, {
+ /* 800x300, 23 kHz, 70 Hz */
+ "super72", 70, 800, 300, TAG_SHRES, 212, 140, 10, 11, 80, 7,
+ 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ }, {
+ /* 800x600, 23 kHz, 70 Hz interlaced */
+ "super72-lace", 70, 800, 600, TAG_SHRES, 212, 140, 20, 22, 80,
+ 14,
+ 0, FB_VMODE_INTERLACED | FB_VMODE_YWRAP
+ }, {
+ /* 640x200, 27 kHz, 57 Hz doublescan */
+ "dblntsc", 57, 640, 200, TAG_SHRES, 196, 124, 18, 17, 80, 4,
+ 0, FB_VMODE_DOUBLE | FB_VMODE_YWRAP
+ }, {
+ /* 640x400, 27 kHz, 57 Hz */
+ "dblntsc-ff", 57, 640, 400, TAG_SHRES, 196, 124, 36, 35, 80, 7,
+ 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ }, {
+ /* 640x800, 27 kHz, 57 Hz interlaced */
+ "dblntsc-lace", 57, 640, 800, TAG_SHRES, 196, 124, 72, 70, 80,
+ 14,
+ 0, FB_VMODE_INTERLACED | FB_VMODE_YWRAP
+ }, {
+ /* 640x256, 27 kHz, 47 Hz doublescan */
+ "dblpal", 47, 640, 256, TAG_SHRES, 196, 124, 14, 13, 80, 4,
+ 0, FB_VMODE_DOUBLE | FB_VMODE_YWRAP
+ }, {
+ /* 640x512, 27 kHz, 47 Hz */
+ "dblpal-ff", 47, 640, 512, TAG_SHRES, 196, 124, 28, 27, 80, 7,
+ 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ }, {
+ /* 640x1024, 27 kHz, 47 Hz interlaced */
+ "dblpal-lace", 47, 640, 1024, TAG_SHRES, 196, 124, 56, 54, 80,
+ 14,
+ 0, FB_VMODE_INTERLACED | FB_VMODE_YWRAP
+ },
+
+ /*
+ * VGA Video Modes
+ */
+
+ {
+ /* 640x480, 31 kHz, 60 Hz (VGA) */
+ "vga", 60, 640, 480, TAG_SHRES, 64, 96, 30, 9, 112, 2,
+ 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ }, {
+ /* 640x400, 31 kHz, 70 Hz (VGA) */
+ "vga70", 70, 640, 400, TAG_SHRES, 64, 96, 35, 12, 112, 2,
+ FB_SYNC_VERT_HIGH_ACT | FB_SYNC_COMP_HIGH_ACT,
+ FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ },
#if 0
- /*
- * A2024 video modes
- * These modes don't work yet because there's no A2024 driver.
- */
-
- {
- /* 1024x800, 10 Hz */
- "a2024-10", 10, 1024, 800, TAG_HIRES, 0, 0, 0, 0, 0, 0,
- 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
- }, {
- /* 1024x800, 15 Hz */
- "a2024-15", 15, 1024, 800, TAG_HIRES, 0, 0, 0, 0, 0, 0,
- 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
- }
+ /*
+ * A2024 video modes
+ * These modes don't work yet because there's no A2024 driver.
+ */
+
+ {
+ /* 1024x800, 10 Hz */
+ "a2024-10", 10, 1024, 800, TAG_HIRES, 0, 0, 0, 0, 0, 0,
+ 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ }, {
+ /* 1024x800, 15 Hz */
+ "a2024-15", 15, 1024, 800, TAG_HIRES, 0, 0, 0, 0, 0, 0,
+ 0, FB_VMODE_NONINTERLACED | FB_VMODE_YWRAP
+ }
#endif
};
static int amifb_ilbm = 0; /* interleaved or normal bitplanes */
static int amifb_inverse = 0;
+static u32 amifb_hfmin __initdata; /* monitor hfreq lower limit (Hz) */
+static u32 amifb_hfmax __initdata; /* monitor hfreq upper limit (Hz) */
+static u16 amifb_vfmin __initdata; /* monitor vfreq lower limit (Hz) */
+static u16 amifb_vfmax __initdata; /* monitor vfreq upper limit (Hz) */
+
/*
* Macros for the conversion from real world values to hardware register
/* bplcon1 (smooth scrolling) */
#define hscroll2hw(hscroll) \
- (((hscroll)<<12 & 0x3000) | ((hscroll)<<8 & 0xc300) | \
- ((hscroll)<<4 & 0x0c00) | ((hscroll)<<2 & 0x00f0) | ((hscroll)>>2 & 0x000f))
+ (((hscroll) << 12 & 0x3000) | ((hscroll) << 8 & 0xc300) | \
+ ((hscroll) << 4 & 0x0c00) | ((hscroll) << 2 & 0x00f0) | \
+ ((hscroll)>>2 & 0x000f))
/* diwstrt/diwstop/diwhigh (visible display window) */
#define diwstrt2hw(diwstrt_h, diwstrt_v) \
- (((diwstrt_v)<<7 & 0xff00) | ((diwstrt_h)>>2 & 0x00ff))
+ (((diwstrt_v) << 7 & 0xff00) | ((diwstrt_h)>>2 & 0x00ff))
#define diwstop2hw(diwstop_h, diwstop_v) \
- (((diwstop_v)<<7 & 0xff00) | ((diwstop_h)>>2 & 0x00ff))
+ (((diwstop_v) << 7 & 0xff00) | ((diwstop_h)>>2 & 0x00ff))
#define diwhigh2hw(diwstrt_h, diwstrt_v, diwstop_h, diwstop_v) \
- (((diwstop_h)<<3 & 0x2000) | ((diwstop_h)<<11 & 0x1800) | \
+ (((diwstop_h) << 3 & 0x2000) | ((diwstop_h) << 11 & 0x1800) | \
((diwstop_v)>>1 & 0x0700) | ((diwstrt_h)>>5 & 0x0020) | \
- ((diwstrt_h)<<3 & 0x0018) | ((diwstrt_v)>>9 & 0x0007))
+ ((diwstrt_h) << 3 & 0x0018) | ((diwstrt_v)>>9 & 0x0007))
/* ddfstrt/ddfstop (display DMA) */
#define hsstrt2hw(hsstrt) (div8(hsstrt))
#define hsstop2hw(hsstop) (div8(hsstop))
-#define htotal2hw(htotal) (div8(htotal)-1)
+#define htotal2hw(htotal) (div8(htotal) - 1)
#define vsstrt2hw(vsstrt) (div2(vsstrt))
#define vsstop2hw(vsstop) (div2(vsstop))
-#define vtotal2hw(vtotal) (div2(vtotal)-1)
+#define vtotal2hw(vtotal) (div2(vtotal) - 1)
#define hcenter2hw(htotal) (div8(htotal))
/* hbstrt/hbstop/vbstrt/vbstop (blanking timings) */
-#define hbstrt2hw(hbstrt) (((hbstrt)<<8 & 0x0700) | ((hbstrt)>>3 & 0x00ff))
-#define hbstop2hw(hbstop) (((hbstop)<<8 & 0x0700) | ((hbstop)>>3 & 0x00ff))
+#define hbstrt2hw(hbstrt) (((hbstrt) << 8 & 0x0700) | ((hbstrt)>>3 & 0x00ff))
+#define hbstop2hw(hbstop) (((hbstop) << 8 & 0x0700) | ((hbstop)>>3 & 0x00ff))
#define vbstrt2hw(vbstrt) (div2(vbstrt))
#define vbstop2hw(vbstop) (div2(vbstop))
/* colour */
#define rgb2hw8_high(red, green, blue) \
- (((red & 0xf0)<<4) | (green & 0xf0) | ((blue & 0xf0)>>4))
+ (((red & 0xf0) << 4) | (green & 0xf0) | ((blue & 0xf0)>>4))
#define rgb2hw8_low(red, green, blue) \
- (((red & 0x0f)<<8) | ((green & 0x0f)<<4) | (blue & 0x0f))
+ (((red & 0x0f) << 8) | ((green & 0x0f) << 4) | (blue & 0x0f))
#define rgb2hw4(red, green, blue) \
- (((red & 0xf0)<<4) | (green & 0xf0) | ((blue & 0xf0)>>4))
+ (((red & 0xf0) << 4) | (green & 0xf0) | ((blue & 0xf0)>>4))
#define rgb2hw2(red, green, blue) \
- (((red & 0xc0)<<4) | (green & 0xc0) | ((blue & 0xc0)>>4))
+ (((red & 0xc0) << 4) | (green & 0xc0) | ((blue & 0xc0)>>4))
/* sprpos/sprctl (sprite positioning) */
#define spr2hw_pos(start_v, start_h) \
- (((start_v)<<7&0xff00) | ((start_h)>>3&0x00ff))
+ (((start_v) << 7 & 0xff00) | ((start_h)>>3 & 0x00ff))
#define spr2hw_ctl(start_v, start_h, stop_v) \
- (((stop_v)<<7&0xff00) | ((start_v)>>4&0x0040) | ((stop_v)>>5&0x0020) | \
- ((start_h)<<3&0x0018) | ((start_v)>>7&0x0004) | ((stop_v)>>8&0x0002) | \
- ((start_h)>>2&0x0001))
+ (((stop_v) << 7 & 0xff00) | ((start_v)>>4 & 0x0040) | \
+ ((stop_v)>>5 & 0x0020) | ((start_h) << 3 & 0x0018) | \
+ ((start_v)>>7 & 0x0004) | ((stop_v)>>8 & 0x0002) | \
+ ((start_h)>>2 & 0x0001))
/* get current vertical position of beam */
#define get_vbpos() ((u_short)((*(u_long volatile *)&custom.vposr >> 7) & 0xffe))
* Copper Initialisation List
*/
-#define COPINITSIZE (sizeof(copins)*40)
+#define COPINITSIZE (sizeof(copins) * 40)
enum {
cip_bplcon0
* Don't change the order, build_copper()/rebuild_copper() rely on this
*/
-#define COPLISTSIZE (sizeof(copins)*64)
+#define COPLISTSIZE (sizeof(copins) * 64)
enum {
cop_wait, cop_bplcon0,
};
- /*
- * Interface used by the world
- */
-
-int amifb_setup(char*);
-
-static int amifb_check_var(struct fb_var_screeninfo *var,
- struct fb_info *info);
-static int amifb_set_par(struct fb_info *info);
-static int amifb_setcolreg(unsigned regno, unsigned red, unsigned green,
- unsigned blue, unsigned transp,
- struct fb_info *info);
-static int amifb_blank(int blank, struct fb_info *info);
-static int amifb_pan_display(struct fb_var_screeninfo *var,
- struct fb_info *info);
-static void amifb_fillrect(struct fb_info *info,
- const struct fb_fillrect *rect);
-static void amifb_copyarea(struct fb_info *info,
- const struct fb_copyarea *region);
-static void amifb_imageblit(struct fb_info *info,
- const struct fb_image *image);
-static int amifb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg);
-
+/* --------------------------- Hardware routines --------------------------- */
/*
- * Interface to the low level console driver
+ * Get the video params out of `var'. If a value doesn't fit, round
+ * it up, if it's too big, return -EINVAL.
*/
-static void amifb_deinit(struct platform_device *pdev);
+static int ami_decode_var(struct fb_var_screeninfo *var, struct amifb_par *par,
+ const struct fb_info *info)
+{
+ u_short clk_shift, line_shift;
+ u_long maxfetchstop, fstrt, fsize, fconst, xres_n, yres_n;
+ u_int htotal, vtotal;
/*
- * Internal routines
+ * Find a matching Pixel Clock
*/
-static int flash_cursor(void);
-static irqreturn_t amifb_interrupt(int irq, void *dev_id);
-static u_long chipalloc(u_long size);
-static void chipfree(void);
+ for (clk_shift = TAG_SHRES; clk_shift <= TAG_LORES; clk_shift++)
+ if (var->pixclock <= pixclock[clk_shift])
+ break;
+ if (clk_shift > TAG_LORES) {
+ DPRINTK("pixclock too high\n");
+ return -EINVAL;
+ }
+ par->clk_shift = clk_shift;
/*
- * Hardware routines
+ * Check the Geometry Values
*/
-static int ami_decode_var(struct fb_var_screeninfo *var,
- struct amifb_par *par);
-static int ami_encode_var(struct fb_var_screeninfo *var,
- struct amifb_par *par);
-static void ami_pan_var(struct fb_var_screeninfo *var);
-static int ami_update_par(void);
-static void ami_update_display(void);
-static void ami_init_display(void);
-static void ami_do_blank(void);
-static int ami_get_fix_cursorinfo(struct fb_fix_cursorinfo *fix);
-static int ami_get_var_cursorinfo(struct fb_var_cursorinfo *var, u_char __user *data);
-static int ami_set_var_cursorinfo(struct fb_var_cursorinfo *var, u_char __user *data);
-static int ami_get_cursorstate(struct fb_cursorstate *state);
-static int ami_set_cursorstate(struct fb_cursorstate *state);
-static void ami_set_sprite(void);
-static void ami_init_copper(void);
-static void ami_reinit_copper(void);
-static void ami_build_copper(void);
-static void ami_rebuild_copper(void);
-
-
-static struct fb_ops amifb_ops = {
- .owner = THIS_MODULE,
- .fb_check_var = amifb_check_var,
- .fb_set_par = amifb_set_par,
- .fb_setcolreg = amifb_setcolreg,
- .fb_blank = amifb_blank,
- .fb_pan_display = amifb_pan_display,
- .fb_fillrect = amifb_fillrect,
- .fb_copyarea = amifb_copyarea,
- .fb_imageblit = amifb_imageblit,
- .fb_ioctl = amifb_ioctl,
-};
+ if ((par->xres = var->xres) < 64)
+ par->xres = 64;
+ if ((par->yres = var->yres) < 64)
+ par->yres = 64;
+ if ((par->vxres = var->xres_virtual) < par->xres)
+ par->vxres = par->xres;
+ if ((par->vyres = var->yres_virtual) < par->yres)
+ par->vyres = par->yres;
-static void __init amifb_setup_mcap(char *spec)
-{
- char *p;
- int vmin, vmax, hmin, hmax;
+ par->bpp = var->bits_per_pixel;
+ if (!var->nonstd) {
+ if (par->bpp < 1)
+ par->bpp = 1;
+ if (par->bpp > maxdepth[clk_shift]) {
+ if (round_down_bpp && maxdepth[clk_shift])
+ par->bpp = maxdepth[clk_shift];
+ else {
+ DPRINTK("invalid bpp\n");
+ return -EINVAL;
+ }
+ }
+ } else if (var->nonstd == FB_NONSTD_HAM) {
+ if (par->bpp < 6)
+ par->bpp = 6;
+ if (par->bpp != 6) {
+ if (par->bpp < 8)
+ par->bpp = 8;
+ if (par->bpp != 8 || !IS_AGA) {
+ DPRINTK("invalid bpp for ham mode\n");
+ return -EINVAL;
+ }
+ }
+ } else {
+ DPRINTK("unknown nonstd mode\n");
+ return -EINVAL;
+ }
- /* Format for monitor capabilities is: <Vmin>;<Vmax>;<Hmin>;<Hmax>
- * <V*> vertical freq. in Hz
- * <H*> horizontal freq. in kHz
+ /*
+ * FB_VMODE_SMOOTH_XPAN will be cleared, if one of the folloing
+ * checks failed and smooth scrolling is not possible
*/
- if (!(p = strsep(&spec, ";")) || !*p)
- return;
- vmin = simple_strtoul(p, NULL, 10);
- if (vmin <= 0)
- return;
- if (!(p = strsep(&spec, ";")) || !*p)
- return;
- vmax = simple_strtoul(p, NULL, 10);
- if (vmax <= 0 || vmax <= vmin)
- return;
- if (!(p = strsep(&spec, ";")) || !*p)
- return;
- hmin = 1000 * simple_strtoul(p, NULL, 10);
- if (hmin <= 0)
- return;
- if (!(p = strsep(&spec, "")) || !*p)
- return;
- hmax = 1000 * simple_strtoul(p, NULL, 10);
- if (hmax <= 0 || hmax <= hmin)
- return;
-
- fb_info.monspecs.vfmin = vmin;
- fb_info.monspecs.vfmax = vmax;
- fb_info.monspecs.hfmin = hmin;
- fb_info.monspecs.hfmax = hmax;
-}
-
-int __init amifb_setup(char *options)
-{
- char *this_opt;
-
- if (!options || !*options)
- return 0;
-
- while ((this_opt = strsep(&options, ",")) != NULL) {
- if (!*this_opt)
- continue;
- if (!strcmp(this_opt, "inverse")) {
- amifb_inverse = 1;
- fb_invert_cmaps();
- } else if (!strcmp(this_opt, "ilbm"))
- amifb_ilbm = 1;
- else if (!strncmp(this_opt, "monitorcap:", 11))
- amifb_setup_mcap(this_opt+11);
- else if (!strncmp(this_opt, "fstart:", 7))
- min_fstrt = simple_strtoul(this_opt+7, NULL, 0);
- else
- mode_option = this_opt;
+ par->vmode = var->vmode | FB_VMODE_SMOOTH_XPAN;
+ switch (par->vmode & FB_VMODE_MASK) {
+ case FB_VMODE_INTERLACED:
+ line_shift = 0;
+ break;
+ case FB_VMODE_NONINTERLACED:
+ line_shift = 1;
+ break;
+ case FB_VMODE_DOUBLE:
+ if (!IS_AGA) {
+ DPRINTK("double mode only possible with aga\n");
+ return -EINVAL;
+ }
+ line_shift = 2;
+ break;
+ default:
+ DPRINTK("unknown video mode\n");
+ return -EINVAL;
+ break;
}
+ par->line_shift = line_shift;
- if (min_fstrt < 48)
- min_fstrt = 48;
-
- return 0;
-}
-
-
-static int amifb_check_var(struct fb_var_screeninfo *var,
- struct fb_info *info)
-{
- int err;
- struct amifb_par par;
-
- /* Validate wanted screen parameters */
- if ((err = ami_decode_var(var, &par)))
- return err;
-
- /* Encode (possibly rounded) screen parameters */
- ami_encode_var(var, &par);
- return 0;
-}
-
-
-static int amifb_set_par(struct fb_info *info)
-{
- struct amifb_par *par = (struct amifb_par *)info->par;
-
- do_vmode_pan = 0;
- do_vmode_full = 0;
-
- /* Decode wanted screen parameters */
- ami_decode_var(&info->var, par);
-
- /* Set new videomode */
- ami_build_copper();
-
- /* Set VBlank trigger */
- do_vmode_full = 1;
+ /*
+ * Vertical and Horizontal Timings
+ */
- /* Update fix for new screen parameters */
- if (par->bpp == 1) {
- info->fix.type = FB_TYPE_PACKED_PIXELS;
- info->fix.type_aux = 0;
- } else if (amifb_ilbm) {
- info->fix.type = FB_TYPE_INTERLEAVED_PLANES;
- info->fix.type_aux = par->next_line;
- } else {
- info->fix.type = FB_TYPE_PLANES;
- info->fix.type_aux = 0;
- }
- info->fix.line_length = div8(upx(16<<maxfmode, par->vxres));
+ xres_n = par->xres << clk_shift;
+ yres_n = par->yres << line_shift;
+ par->htotal = down8((var->left_margin + par->xres + var->right_margin +
+ var->hsync_len) << clk_shift);
+ par->vtotal =
+ down2(((var->upper_margin + par->yres + var->lower_margin +
+ var->vsync_len) << line_shift) + 1);
- if (par->vmode & FB_VMODE_YWRAP) {
- info->fix.ywrapstep = 1;
- info->fix.xpanstep = 0;
- info->fix.ypanstep = 0;
- info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YWRAP |
- FBINFO_READS_FAST; /* override SCROLL_REDRAW */
- } else {
- info->fix.ywrapstep = 0;
- if (par->vmode & FB_VMODE_SMOOTH_XPAN)
- info->fix.xpanstep = 1;
+ if (IS_AGA)
+ par->bplcon3 = sprpixmode[clk_shift];
+ else
+ par->bplcon3 = 0;
+ if (var->sync & FB_SYNC_BROADCAST) {
+ par->diwstop_h = par->htotal -
+ ((var->right_margin - var->hsync_len) << clk_shift);
+ if (IS_AGA)
+ par->diwstop_h += mod4(var->hsync_len);
else
- info->fix.xpanstep = 16<<maxfmode;
- info->fix.ypanstep = 1;
- info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN;
- }
- return 0;
-}
-
-
- /*
- * Pan or Wrap the Display
- *
- * This call looks only at xoffset, yoffset and the FB_VMODE_YWRAP flag
- */
+ par->diwstop_h = down4(par->diwstop_h);
-static int amifb_pan_display(struct fb_var_screeninfo *var,
- struct fb_info *info)
-{
- if (var->vmode & FB_VMODE_YWRAP) {
- if (var->yoffset < 0 ||
- var->yoffset >= info->var.yres_virtual || var->xoffset)
+ par->diwstrt_h = par->diwstop_h - xres_n;
+ par->diwstop_v = par->vtotal -
+ ((var->lower_margin - var->vsync_len) << line_shift);
+ par->diwstrt_v = par->diwstop_v - yres_n;
+ if (par->diwstop_h >= par->htotal + 8) {
+ DPRINTK("invalid diwstop_h\n");
return -EINVAL;
- } else {
- /*
- * TODO: There will be problems when xpan!=1, so some columns
- * on the right side will never be seen
- */
- if (var->xoffset+info->var.xres > upx(16<<maxfmode, info->var.xres_virtual) ||
- var->yoffset+info->var.yres > info->var.yres_virtual)
+ }
+ if (par->diwstop_v > par->vtotal) {
+ DPRINTK("invalid diwstop_v\n");
return -EINVAL;
- }
- ami_pan_var(var);
- info->var.xoffset = var->xoffset;
- info->var.yoffset = var->yoffset;
- if (var->vmode & FB_VMODE_YWRAP)
- info->var.vmode |= FB_VMODE_YWRAP;
- else
- info->var.vmode &= ~FB_VMODE_YWRAP;
- return 0;
-}
-
+ }
-#if BITS_PER_LONG == 32
-#define BYTES_PER_LONG 4
-#define SHIFT_PER_LONG 5
-#elif BITS_PER_LONG == 64
-#define BYTES_PER_LONG 8
-#define SHIFT_PER_LONG 6
-#else
-#define Please update me
-#endif
+ if (!IS_OCS) {
+ /* Initialize sync with some reasonable values for pwrsave */
+ par->hsstrt = 160;
+ par->hsstop = 320;
+ par->vsstrt = 30;
+ par->vsstop = 34;
+ } else {
+ par->hsstrt = 0;
+ par->hsstop = 0;
+ par->vsstrt = 0;
+ par->vsstop = 0;
+ }
+ if (par->vtotal > (PAL_VTOTAL + NTSC_VTOTAL) / 2) {
+ /* PAL video mode */
+ if (par->htotal != PAL_HTOTAL) {
+ DPRINTK("htotal invalid for pal\n");
+ return -EINVAL;
+ }
+ if (par->diwstrt_h < PAL_DIWSTRT_H) {
+ DPRINTK("diwstrt_h too low for pal\n");
+ return -EINVAL;
+ }
+ if (par->diwstrt_v < PAL_DIWSTRT_V) {
+ DPRINTK("diwstrt_v too low for pal\n");
+ return -EINVAL;
+ }
+ htotal = PAL_HTOTAL>>clk_shift;
+ vtotal = PAL_VTOTAL>>1;
+ if (!IS_OCS) {
+ par->beamcon0 = BMC0_PAL;
+ par->bplcon3 |= BPC3_BRDRBLNK;
+ } else if (AMIGAHW_PRESENT(AGNUS_HR_PAL) ||
+ AMIGAHW_PRESENT(AGNUS_HR_NTSC)) {
+ par->beamcon0 = BMC0_PAL;
+ par->hsstop = 1;
+ } else if (amiga_vblank != 50) {
+ DPRINTK("pal not supported by this chipset\n");
+ return -EINVAL;
+ }
+ } else {
+ /* NTSC video mode
+ * In the AGA chipset seems to be hardware bug with BPC3_BRDRBLNK
+ * and NTSC activated, so than better let diwstop_h <= 1812
+ */
+ if (par->htotal != NTSC_HTOTAL) {
+ DPRINTK("htotal invalid for ntsc\n");
+ return -EINVAL;
+ }
+ if (par->diwstrt_h < NTSC_DIWSTRT_H) {
+ DPRINTK("diwstrt_h too low for ntsc\n");
+ return -EINVAL;
+ }
+ if (par->diwstrt_v < NTSC_DIWSTRT_V) {
+ DPRINTK("diwstrt_v too low for ntsc\n");
+ return -EINVAL;
+ }
+ htotal = NTSC_HTOTAL>>clk_shift;
+ vtotal = NTSC_VTOTAL>>1;
+ if (!IS_OCS) {
+ par->beamcon0 = 0;
+ par->bplcon3 |= BPC3_BRDRBLNK;
+ } else if (AMIGAHW_PRESENT(AGNUS_HR_PAL) ||
+ AMIGAHW_PRESENT(AGNUS_HR_NTSC)) {
+ par->beamcon0 = 0;
+ par->hsstop = 1;
+ } else if (amiga_vblank != 60) {
+ DPRINTK("ntsc not supported by this chipset\n");
+ return -EINVAL;
+ }
+ }
+ if (IS_OCS) {
+ if (par->diwstrt_h >= 1024 || par->diwstop_h < 1024 ||
+ par->diwstrt_v >= 512 || par->diwstop_v < 256) {
+ DPRINTK("invalid position for display on ocs\n");
+ return -EINVAL;
+ }
+ }
+ } else if (!IS_OCS) {
+ /* Programmable video mode */
+ par->hsstrt = var->right_margin << clk_shift;
+ par->hsstop = (var->right_margin + var->hsync_len) << clk_shift;
+ par->diwstop_h = par->htotal - mod8(par->hsstrt) + 8 - (1 << clk_shift);
+ if (!IS_AGA)
+ par->diwstop_h = down4(par->diwstop_h) - 16;
+ par->diwstrt_h = par->diwstop_h - xres_n;
+ par->hbstop = par->diwstrt_h + 4;
+ par->hbstrt = par->diwstop_h + 4;
+ if (par->hbstrt >= par->htotal + 8)
+ par->hbstrt -= par->htotal;
+ par->hcenter = par->hsstrt + (par->htotal >> 1);
+ par->vsstrt = var->lower_margin << line_shift;
+ par->vsstop = (var->lower_margin + var->vsync_len) << line_shift;
+ par->diwstop_v = par->vtotal;
+ if ((par->vmode & FB_VMODE_MASK) == FB_VMODE_INTERLACED)
+ par->diwstop_v -= 2;
+ par->diwstrt_v = par->diwstop_v - yres_n;
+ par->vbstop = par->diwstrt_v - 2;
+ par->vbstrt = par->diwstop_v - 2;
+ if (par->vtotal > 2048) {
+ DPRINTK("vtotal too high\n");
+ return -EINVAL;
+ }
+ if (par->htotal > 2048) {
+ DPRINTK("htotal too high\n");
+ return -EINVAL;
+ }
+ par->bplcon3 |= BPC3_EXTBLKEN;
+ par->beamcon0 = BMC0_HARDDIS | BMC0_VARVBEN | BMC0_LOLDIS |
+ BMC0_VARVSYEN | BMC0_VARHSYEN | BMC0_VARBEAMEN |
+ BMC0_PAL | BMC0_VARCSYEN;
+ if (var->sync & FB_SYNC_HOR_HIGH_ACT)
+ par->beamcon0 |= BMC0_HSYTRUE;
+ if (var->sync & FB_SYNC_VERT_HIGH_ACT)
+ par->beamcon0 |= BMC0_VSYTRUE;
+ if (var->sync & FB_SYNC_COMP_HIGH_ACT)
+ par->beamcon0 |= BMC0_CSYTRUE;
+ htotal = par->htotal>>clk_shift;
+ vtotal = par->vtotal>>1;
+ } else {
+ DPRINTK("only broadcast modes possible for ocs\n");
+ return -EINVAL;
+ }
+ /*
+ * Checking the DMA timing
+ */
- /*
- * Compose two values, using a bitmask as decision value
- * This is equivalent to (a & mask) | (b & ~mask)
- */
+ fconst = 16 << maxfmode << clk_shift;
-static inline unsigned long comp(unsigned long a, unsigned long b,
- unsigned long mask)
-{
- return ((a ^ b) & mask) ^ b;
-}
+ /*
+ * smallest window start value without turn off other dma cycles
+ * than sprite1-7, unless you change min_fstrt
+ */
-static inline unsigned long xor(unsigned long a, unsigned long b,
- unsigned long mask)
-{
- return (a & mask) ^ b;
-}
+ fsize = ((maxfmode + clk_shift <= 1) ? fconst : 64);
+ fstrt = downx(fconst, par->diwstrt_h - 4) - fsize;
+ if (fstrt < min_fstrt) {
+ DPRINTK("fetch start too low\n");
+ return -EINVAL;
+ }
+ /*
+ * smallest window start value where smooth scrolling is possible
+ */
- /*
- * Unaligned forward bit copy using 32-bit or 64-bit memory accesses
- */
+ fstrt = downx(fconst, par->diwstrt_h - fconst + (1 << clk_shift) - 4) -
+ fsize;
+ if (fstrt < min_fstrt)
+ par->vmode &= ~FB_VMODE_SMOOTH_XPAN;
-static void bitcpy(unsigned long *dst, int dst_idx, const unsigned long *src,
- int src_idx, u32 n)
-{
- unsigned long first, last;
- int shift = dst_idx-src_idx, left, right;
- unsigned long d0, d1;
- int m;
+ maxfetchstop = down16(par->htotal - 80);
- if (!n)
- return;
+ fstrt = downx(fconst, par->diwstrt_h - 4) - 64 - fconst;
+ fsize = upx(fconst, xres_n +
+ modx(fconst, downx(1 << clk_shift, par->diwstrt_h - 4)));
+ if (fstrt + fsize > maxfetchstop)
+ par->vmode &= ~FB_VMODE_SMOOTH_XPAN;
- shift = dst_idx-src_idx;
- first = ~0UL >> dst_idx;
- last = ~(~0UL >> ((dst_idx+n) % BITS_PER_LONG));
+ fsize = upx(fconst, xres_n);
+ if (fstrt + fsize > maxfetchstop) {
+ DPRINTK("fetch stop too high\n");
+ return -EINVAL;
+ }
- if (!shift) {
- // Same alignment for source and dest
+ if (maxfmode + clk_shift <= 1) {
+ fsize = up64(xres_n + fconst - 1);
+ if (min_fstrt + fsize - 64 > maxfetchstop)
+ par->vmode &= ~FB_VMODE_SMOOTH_XPAN;
- if (dst_idx+n <= BITS_PER_LONG) {
- // Single word
- if (last)
- first &= last;
- *dst = comp(*src, *dst, first);
- } else {
- // Multiple destination words
- // Leading bits
- if (first) {
- *dst = comp(*src, *dst, first);
- dst++;
- src++;
- n -= BITS_PER_LONG-dst_idx;
- }
+ fsize = up64(xres_n);
+ if (min_fstrt + fsize - 64 > maxfetchstop) {
+ DPRINTK("fetch size too high\n");
+ return -EINVAL;
+ }
- // Main chunk
- n /= BITS_PER_LONG;
- while (n >= 8) {
- *dst++ = *src++;
- *dst++ = *src++;
- *dst++ = *src++;
- *dst++ = *src++;
- *dst++ = *src++;
- *dst++ = *src++;
- *dst++ = *src++;
- *dst++ = *src++;
- n -= 8;
- }
- while (n--)
- *dst++ = *src++;
+ fsize -= 64;
+ } else
+ fsize -= fconst;
- // Trailing bits
- if (last)
- *dst = comp(*src, *dst, last);
+ /*
+ * Check if there is enough time to update the bitplane pointers for ywrap
+ */
+
+ if (par->htotal - fsize - 64 < par->bpp * 64)
+ par->vmode &= ~FB_VMODE_YWRAP;
+
+ /*
+ * Bitplane calculations and check the Memory Requirements
+ */
+
+ if (amifb_ilbm) {
+ par->next_plane = div8(upx(16 << maxfmode, par->vxres));
+ par->next_line = par->bpp * par->next_plane;
+ if (par->next_line * par->vyres > info->fix.smem_len) {
+ DPRINTK("too few video mem\n");
+ return -EINVAL;
}
} else {
- // Different alignment for source and dest
+ par->next_line = div8(upx(16 << maxfmode, par->vxres));
+ par->next_plane = par->vyres * par->next_line;
+ if (par->next_plane * par->bpp > info->fix.smem_len) {
+ DPRINTK("too few video mem\n");
+ return -EINVAL;
+ }
+ }
- right = shift & (BITS_PER_LONG-1);
- left = -shift & (BITS_PER_LONG-1);
+ /*
+ * Hardware Register Values
+ */
- if (dst_idx+n <= BITS_PER_LONG) {
- // Single destination word
- if (last)
- first &= last;
- if (shift > 0) {
- // Single source word
- *dst = comp(*src >> right, *dst, first);
- } else if (src_idx+n <= BITS_PER_LONG) {
- // Single source word
- *dst = comp(*src << left, *dst, first);
- } else {
- // 2 source words
- d0 = *src++;
- d1 = *src;
- *dst = comp(d0 << left | d1 >> right, *dst,
- first);
- }
- } else {
- // Multiple destination words
- d0 = *src++;
- // Leading bits
- if (shift > 0) {
- // Single source word
- *dst = comp(d0 >> right, *dst, first);
- dst++;
- n -= BITS_PER_LONG-dst_idx;
- } else {
- // 2 source words
- d1 = *src++;
- *dst = comp(d0 << left | d1 >> right, *dst,
- first);
- d0 = d1;
- dst++;
- n -= BITS_PER_LONG-dst_idx;
- }
+ par->bplcon0 = BPC0_COLOR | bplpixmode[clk_shift];
+ if (!IS_OCS)
+ par->bplcon0 |= BPC0_ECSENA;
+ if (par->bpp == 8)
+ par->bplcon0 |= BPC0_BPU3;
+ else
+ par->bplcon0 |= par->bpp << 12;
+ if (var->nonstd == FB_NONSTD_HAM)
+ par->bplcon0 |= BPC0_HAM;
+ if (var->sync & FB_SYNC_EXT)
+ par->bplcon0 |= BPC0_ERSY;
- // Main chunk
- m = n % BITS_PER_LONG;
- n /= BITS_PER_LONG;
- while (n >= 4) {
- d1 = *src++;
- *dst++ = d0 << left | d1 >> right;
- d0 = d1;
- d1 = *src++;
- *dst++ = d0 << left | d1 >> right;
- d0 = d1;
- d1 = *src++;
- *dst++ = d0 << left | d1 >> right;
- d0 = d1;
- d1 = *src++;
- *dst++ = d0 << left | d1 >> right;
- d0 = d1;
- n -= 4;
- }
- while (n--) {
- d1 = *src++;
- *dst++ = d0 << left | d1 >> right;
- d0 = d1;
- }
+ if (IS_AGA)
+ par->fmode = bplfetchmode[maxfmode];
- // Trailing bits
- if (last) {
- if (m <= right) {
- // Single source word
- *dst = comp(d0 << left, *dst, last);
- } else {
- // 2 source words
- d1 = *src;
- *dst = comp(d0 << left | d1 >> right,
- *dst, last);
- }
- }
- }
+ switch (par->vmode & FB_VMODE_MASK) {
+ case FB_VMODE_INTERLACED:
+ par->bplcon0 |= BPC0_LACE;
+ break;
+ case FB_VMODE_DOUBLE:
+ if (IS_AGA)
+ par->fmode |= FMODE_SSCAN2 | FMODE_BSCAN2;
+ break;
}
-}
+ if (!((par->vmode ^ var->vmode) & FB_VMODE_YWRAP)) {
+ par->xoffset = var->xoffset;
+ par->yoffset = var->yoffset;
+ if (par->vmode & FB_VMODE_YWRAP) {
+ if (par->xoffset || par->yoffset < 0 ||
+ par->yoffset >= par->vyres)
+ par->xoffset = par->yoffset = 0;
+ } else {
+ if (par->xoffset < 0 ||
+ par->xoffset > upx(16 << maxfmode, par->vxres - par->xres) ||
+ par->yoffset < 0 || par->yoffset > par->vyres - par->yres)
+ par->xoffset = par->yoffset = 0;
+ }
+ } else
+ par->xoffset = par->yoffset = 0;
- /*
- * Unaligned reverse bit copy using 32-bit or 64-bit memory accesses
- */
+ par->crsr.crsr_x = par->crsr.crsr_y = 0;
+ par->crsr.spot_x = par->crsr.spot_y = 0;
+ par->crsr.height = par->crsr.width = 0;
-static void bitcpy_rev(unsigned long *dst, int dst_idx,
- const unsigned long *src, int src_idx, u32 n)
-{
- unsigned long first, last;
- int shift = dst_idx-src_idx, left, right;
- unsigned long d0, d1;
- int m;
+ return 0;
+}
- if (!n)
- return;
+ /*
+ * Fill the `var' structure based on the values in `par' and maybe
+ * other values read out of the hardware.
+ */
- dst += (n-1)/BITS_PER_LONG;
- src += (n-1)/BITS_PER_LONG;
- if ((n-1) % BITS_PER_LONG) {
- dst_idx += (n-1) % BITS_PER_LONG;
- dst += dst_idx >> SHIFT_PER_LONG;
- dst_idx &= BITS_PER_LONG-1;
- src_idx += (n-1) % BITS_PER_LONG;
- src += src_idx >> SHIFT_PER_LONG;
- src_idx &= BITS_PER_LONG-1;
- }
+static void ami_encode_var(struct fb_var_screeninfo *var,
+ struct amifb_par *par)
+{
+ u_short clk_shift, line_shift;
- shift = dst_idx-src_idx;
- first = ~0UL << (BITS_PER_LONG-1-dst_idx);
- last = ~(~0UL << (BITS_PER_LONG-1-((dst_idx-n) % BITS_PER_LONG)));
+ memset(var, 0, sizeof(struct fb_var_screeninfo));
- if (!shift) {
- // Same alignment for source and dest
+ clk_shift = par->clk_shift;
+ line_shift = par->line_shift;
- if ((unsigned long)dst_idx+1 >= n) {
- // Single word
- if (last)
- first &= last;
- *dst = comp(*src, *dst, first);
- } else {
- // Multiple destination words
- // Leading bits
- if (first) {
- *dst = comp(*src, *dst, first);
- dst--;
- src--;
- n -= dst_idx+1;
- }
+ var->xres = par->xres;
+ var->yres = par->yres;
+ var->xres_virtual = par->vxres;
+ var->yres_virtual = par->vyres;
+ var->xoffset = par->xoffset;
+ var->yoffset = par->yoffset;
- // Main chunk
- n /= BITS_PER_LONG;
- while (n >= 8) {
- *dst-- = *src--;
- *dst-- = *src--;
- *dst-- = *src--;
- *dst-- = *src--;
- *dst-- = *src--;
- *dst-- = *src--;
- *dst-- = *src--;
- *dst-- = *src--;
- n -= 8;
- }
- while (n--)
- *dst-- = *src--;
+ var->bits_per_pixel = par->bpp;
+ var->grayscale = 0;
- // Trailing bits
- if (last)
- *dst = comp(*src, *dst, last);
- }
- } else {
- // Different alignment for source and dest
+ var->red.offset = 0;
+ var->red.msb_right = 0;
+ var->red.length = par->bpp;
+ if (par->bplcon0 & BPC0_HAM)
+ var->red.length -= 2;
+ var->blue = var->green = var->red;
+ var->transp.offset = 0;
+ var->transp.length = 0;
+ var->transp.msb_right = 0;
- right = shift & (BITS_PER_LONG-1);
- left = -shift & (BITS_PER_LONG-1);
+ if (par->bplcon0 & BPC0_HAM)
+ var->nonstd = FB_NONSTD_HAM;
+ else
+ var->nonstd = 0;
+ var->activate = 0;
- if ((unsigned long)dst_idx+1 >= n) {
- // Single destination word
- if (last)
- first &= last;
- if (shift < 0) {
- // Single source word
- *dst = comp(*src << left, *dst, first);
- } else if (1+(unsigned long)src_idx >= n) {
- // Single source word
- *dst = comp(*src >> right, *dst, first);
- } else {
- // 2 source words
- d0 = *src--;
- d1 = *src;
- *dst = comp(d0 >> right | d1 << left, *dst,
- first);
- }
- } else {
- // Multiple destination words
- d0 = *src--;
- // Leading bits
- if (shift < 0) {
- // Single source word
- *dst = comp(d0 << left, *dst, first);
- dst--;
- n -= dst_idx+1;
- } else {
- // 2 source words
- d1 = *src--;
- *dst = comp(d0 >> right | d1 << left, *dst,
- first);
- d0 = d1;
- dst--;
- n -= dst_idx+1;
- }
+ var->height = -1;
+ var->width = -1;
- // Main chunk
- m = n % BITS_PER_LONG;
- n /= BITS_PER_LONG;
- while (n >= 4) {
- d1 = *src--;
- *dst-- = d0 >> right | d1 << left;
- d0 = d1;
- d1 = *src--;
- *dst-- = d0 >> right | d1 << left;
- d0 = d1;
- d1 = *src--;
- *dst-- = d0 >> right | d1 << left;
- d0 = d1;
- d1 = *src--;
- *dst-- = d0 >> right | d1 << left;
- d0 = d1;
- n -= 4;
- }
- while (n--) {
- d1 = *src--;
- *dst-- = d0 >> right | d1 << left;
- d0 = d1;
- }
+ var->pixclock = pixclock[clk_shift];
- // Trailing bits
- if (last) {
- if (m <= left) {
- // Single source word
- *dst = comp(d0 >> right, *dst, last);
- } else {
- // 2 source words
- d1 = *src;
- *dst = comp(d0 >> right | d1 << left,
- *dst, last);
- }
- }
- }
+ if (IS_AGA && par->fmode & FMODE_BSCAN2)
+ var->vmode = FB_VMODE_DOUBLE;
+ else if (par->bplcon0 & BPC0_LACE)
+ var->vmode = FB_VMODE_INTERLACED;
+ else
+ var->vmode = FB_VMODE_NONINTERLACED;
+
+ if (!IS_OCS && par->beamcon0 & BMC0_VARBEAMEN) {
+ var->hsync_len = (par->hsstop - par->hsstrt)>>clk_shift;
+ var->right_margin = par->hsstrt>>clk_shift;
+ var->left_margin = (par->htotal>>clk_shift) - var->xres - var->right_margin - var->hsync_len;
+ var->vsync_len = (par->vsstop - par->vsstrt)>>line_shift;
+ var->lower_margin = par->vsstrt>>line_shift;
+ var->upper_margin = (par->vtotal>>line_shift) - var->yres - var->lower_margin - var->vsync_len;
+ var->sync = 0;
+ if (par->beamcon0 & BMC0_HSYTRUE)
+ var->sync |= FB_SYNC_HOR_HIGH_ACT;
+ if (par->beamcon0 & BMC0_VSYTRUE)
+ var->sync |= FB_SYNC_VERT_HIGH_ACT;
+ if (par->beamcon0 & BMC0_CSYTRUE)
+ var->sync |= FB_SYNC_COMP_HIGH_ACT;
+ } else {
+ var->sync = FB_SYNC_BROADCAST;
+ var->hsync_len = (152>>clk_shift) + mod4(par->diwstop_h);
+ var->right_margin = ((par->htotal - down4(par->diwstop_h))>>clk_shift) + var->hsync_len;
+ var->left_margin = (par->htotal>>clk_shift) - var->xres - var->right_margin - var->hsync_len;
+ var->vsync_len = 4>>line_shift;
+ var->lower_margin = ((par->vtotal - par->diwstop_v)>>line_shift) + var->vsync_len;
+ var->upper_margin = (((par->vtotal - 2)>>line_shift) + 1) - var->yres -
+ var->lower_margin - var->vsync_len;
}
+
+ if (par->bplcon0 & BPC0_ERSY)
+ var->sync |= FB_SYNC_EXT;
+ if (par->vmode & FB_VMODE_YWRAP)
+ var->vmode |= FB_VMODE_YWRAP;
}
- /*
- * Unaligned forward inverting bit copy using 32-bit or 64-bit memory
- * accesses
- */
+ /*
+ * Update hardware
+ */
-static void bitcpy_not(unsigned long *dst, int dst_idx,
- const unsigned long *src, int src_idx, u32 n)
+static void ami_update_par(struct fb_info *info)
{
- unsigned long first, last;
- int shift = dst_idx-src_idx, left, right;
- unsigned long d0, d1;
- int m;
+ struct amifb_par *par = info->par;
+ short clk_shift, vshift, fstrt, fsize, fstop, fconst, shift, move, mod;
- if (!n)
- return;
+ clk_shift = par->clk_shift;
- shift = dst_idx-src_idx;
- first = ~0UL >> dst_idx;
- last = ~(~0UL >> ((dst_idx+n) % BITS_PER_LONG));
+ if (!(par->vmode & FB_VMODE_SMOOTH_XPAN))
+ par->xoffset = upx(16 << maxfmode, par->xoffset);
- if (!shift) {
- // Same alignment for source and dest
+ fconst = 16 << maxfmode << clk_shift;
+ vshift = modx(16 << maxfmode, par->xoffset);
+ fstrt = par->diwstrt_h - (vshift << clk_shift) - 4;
+ fsize = (par->xres + vshift) << clk_shift;
+ shift = modx(fconst, fstrt);
+ move = downx(2 << maxfmode, div8(par->xoffset));
+ if (maxfmode + clk_shift > 1) {
+ fstrt = downx(fconst, fstrt) - 64;
+ fsize = upx(fconst, fsize);
+ fstop = fstrt + fsize - fconst;
+ } else {
+ mod = fstrt = downx(fconst, fstrt) - fconst;
+ fstop = fstrt + upx(fconst, fsize) - 64;
+ fsize = up64(fsize);
+ fstrt = fstop - fsize + 64;
+ if (fstrt < min_fstrt) {
+ fstop += min_fstrt - fstrt;
+ fstrt = min_fstrt;
+ }
+ move = move - div8((mod - fstrt)>>clk_shift);
+ }
+ mod = par->next_line - div8(fsize>>clk_shift);
+ par->ddfstrt = fstrt;
+ par->ddfstop = fstop;
+ par->bplcon1 = hscroll2hw(shift);
+ par->bpl2mod = mod;
+ if (par->bplcon0 & BPC0_LACE)
+ par->bpl2mod += par->next_line;
+ if (IS_AGA && (par->fmode & FMODE_BSCAN2))
+ par->bpl1mod = -div8(fsize>>clk_shift);
+ else
+ par->bpl1mod = par->bpl2mod;
- if (dst_idx+n <= BITS_PER_LONG) {
- // Single word
- if (last)
- first &= last;
- *dst = comp(~*src, *dst, first);
- } else {
- // Multiple destination words
- // Leading bits
- if (first) {
- *dst = comp(~*src, *dst, first);
- dst++;
- src++;
- n -= BITS_PER_LONG-dst_idx;
+ if (par->yoffset) {
+ par->bplpt0 = info->fix.smem_start +
+ par->next_line * par->yoffset + move;
+ if (par->vmode & FB_VMODE_YWRAP) {
+ if (par->yoffset > par->vyres - par->yres) {
+ par->bplpt0wrap = info->fix.smem_start + move;
+ if (par->bplcon0 & BPC0_LACE &&
+ mod2(par->diwstrt_v + par->vyres -
+ par->yoffset))
+ par->bplpt0wrap += par->next_line;
}
+ }
+ } else
+ par->bplpt0 = info->fix.smem_start + move;
- // Main chunk
- n /= BITS_PER_LONG;
- while (n >= 8) {
- *dst++ = ~*src++;
- *dst++ = ~*src++;
- *dst++ = ~*src++;
- *dst++ = ~*src++;
- *dst++ = ~*src++;
- *dst++ = ~*src++;
- *dst++ = ~*src++;
- *dst++ = ~*src++;
- n -= 8;
- }
- while (n--)
- *dst++ = ~*src++;
+ if (par->bplcon0 & BPC0_LACE && mod2(par->diwstrt_v))
+ par->bplpt0 += par->next_line;
+}
- // Trailing bits
- if (last)
- *dst = comp(~*src, *dst, last);
- }
- } else {
- // Different alignment for source and dest
- right = shift & (BITS_PER_LONG-1);
- left = -shift & (BITS_PER_LONG-1);
+ /*
+ * Pan or Wrap the Display
+ *
+ * This call looks only at xoffset, yoffset and the FB_VMODE_YWRAP flag
+ * in `var'.
+ */
- if (dst_idx+n <= BITS_PER_LONG) {
- // Single destination word
- if (last)
- first &= last;
- if (shift > 0) {
- // Single source word
- *dst = comp(~*src >> right, *dst, first);
- } else if (src_idx+n <= BITS_PER_LONG) {
- // Single source word
- *dst = comp(~*src << left, *dst, first);
- } else {
- // 2 source words
- d0 = ~*src++;
- d1 = ~*src;
- *dst = comp(d0 << left | d1 >> right, *dst,
- first);
- }
- } else {
- // Multiple destination words
- d0 = ~*src++;
- // Leading bits
- if (shift > 0) {
- // Single source word
- *dst = comp(d0 >> right, *dst, first);
- dst++;
- n -= BITS_PER_LONG-dst_idx;
- } else {
- // 2 source words
- d1 = ~*src++;
- *dst = comp(d0 << left | d1 >> right, *dst,
- first);
- d0 = d1;
- dst++;
- n -= BITS_PER_LONG-dst_idx;
- }
+static void ami_pan_var(struct fb_var_screeninfo *var, struct fb_info *info)
+{
+ struct amifb_par *par = info->par;
- // Main chunk
- m = n % BITS_PER_LONG;
- n /= BITS_PER_LONG;
- while (n >= 4) {
- d1 = ~*src++;
- *dst++ = d0 << left | d1 >> right;
- d0 = d1;
- d1 = ~*src++;
- *dst++ = d0 << left | d1 >> right;
- d0 = d1;
- d1 = ~*src++;
- *dst++ = d0 << left | d1 >> right;
- d0 = d1;
- d1 = ~*src++;
- *dst++ = d0 << left | d1 >> right;
- d0 = d1;
- n -= 4;
- }
- while (n--) {
- d1 = ~*src++;
- *dst++ = d0 << left | d1 >> right;
- d0 = d1;
- }
+ par->xoffset = var->xoffset;
+ par->yoffset = var->yoffset;
+ if (var->vmode & FB_VMODE_YWRAP)
+ par->vmode |= FB_VMODE_YWRAP;
+ else
+ par->vmode &= ~FB_VMODE_YWRAP;
- // Trailing bits
- if (last) {
- if (m <= right) {
- // Single source word
- *dst = comp(d0 << left, *dst, last);
- } else {
- // 2 source words
- d1 = ~*src;
- *dst = comp(d0 << left | d1 >> right,
- *dst, last);
- }
- }
- }
- }
+ do_vmode_pan = 0;
+ ami_update_par(info);
+ do_vmode_pan = 1;
}
- /*
- * Unaligned 32-bit pattern fill using 32/64-bit memory accesses
- */
-
-static void bitfill32(unsigned long *dst, int dst_idx, u32 pat, u32 n)
+static void ami_update_display(const struct amifb_par *par)
{
- unsigned long val = pat;
- unsigned long first, last;
-
- if (!n)
- return;
+ custom.bplcon1 = par->bplcon1;
+ custom.bpl1mod = par->bpl1mod;
+ custom.bpl2mod = par->bpl2mod;
+ custom.ddfstrt = ddfstrt2hw(par->ddfstrt);
+ custom.ddfstop = ddfstop2hw(par->ddfstop);
+}
-#if BITS_PER_LONG == 64
- val |= val << 32;
-#endif
+ /*
+ * Change the video mode (called by VBlank interrupt)
+ */
- first = ~0UL >> dst_idx;
- last = ~(~0UL >> ((dst_idx+n) % BITS_PER_LONG));
+static void ami_init_display(const struct amifb_par *par)
+{
+ int i;
- if (dst_idx+n <= BITS_PER_LONG) {
- // Single word
- if (last)
- first &= last;
- *dst = comp(val, *dst, first);
- } else {
- // Multiple destination words
- // Leading bits
- if (first) {
- *dst = comp(val, *dst, first);
- dst++;
- n -= BITS_PER_LONG-dst_idx;
+ custom.bplcon0 = par->bplcon0 & ~BPC0_LACE;
+ custom.bplcon2 = (IS_OCS ? 0 : BPC2_KILLEHB) | BPC2_PF2P2 | BPC2_PF1P2;
+ if (!IS_OCS) {
+ custom.bplcon3 = par->bplcon3;
+ if (IS_AGA)
+ custom.bplcon4 = BPC4_ESPRM4 | BPC4_OSPRM4;
+ if (par->beamcon0 & BMC0_VARBEAMEN) {
+ custom.htotal = htotal2hw(par->htotal);
+ custom.hbstrt = hbstrt2hw(par->hbstrt);
+ custom.hbstop = hbstop2hw(par->hbstop);
+ custom.hsstrt = hsstrt2hw(par->hsstrt);
+ custom.hsstop = hsstop2hw(par->hsstop);
+ custom.hcenter = hcenter2hw(par->hcenter);
+ custom.vtotal = vtotal2hw(par->vtotal);
+ custom.vbstrt = vbstrt2hw(par->vbstrt);
+ custom.vbstop = vbstop2hw(par->vbstop);
+ custom.vsstrt = vsstrt2hw(par->vsstrt);
+ custom.vsstop = vsstop2hw(par->vsstop);
}
+ }
+ if (!IS_OCS || par->hsstop)
+ custom.beamcon0 = par->beamcon0;
+ if (IS_AGA)
+ custom.fmode = par->fmode;
- // Main chunk
- n /= BITS_PER_LONG;
- while (n >= 8) {
- *dst++ = val;
- *dst++ = val;
- *dst++ = val;
- *dst++ = val;
- *dst++ = val;
- *dst++ = val;
- *dst++ = val;
- *dst++ = val;
- n -= 8;
- }
- while (n--)
- *dst++ = val;
+ /*
+ * The minimum period for audio depends on htotal
+ */
- // Trailing bits
- if (last)
- *dst = comp(val, *dst, last);
+ amiga_audio_min_period = div16(par->htotal);
+
+ is_lace = par->bplcon0 & BPC0_LACE ? 1 : 0;
+#if 1
+ if (is_lace) {
+ i = custom.vposr >> 15;
+ } else {
+ custom.vposw = custom.vposr | 0x8000;
+ i = 1;
}
+#else
+ i = 1;
+ custom.vposw = custom.vposr | 0x8000;
+#endif
+ custom.cop2lc = (u_short *)ZTWO_PADDR(copdisplay.list[currentcop][i]);
}
+ /*
+ * (Un)Blank the screen (called by VBlank interrupt)
+ */
- /*
- * Unaligned 32-bit pattern xor using 32/64-bit memory accesses
- */
-
-static void bitxor32(unsigned long *dst, int dst_idx, u32 pat, u32 n)
+static void ami_do_blank(const struct amifb_par *par)
{
- unsigned long val = pat;
- unsigned long first, last;
-
- if (!n)
- return;
-
-#if BITS_PER_LONG == 64
- val |= val << 32;
+#if defined(CONFIG_FB_AMIGA_AGA)
+ u_short bplcon3 = par->bplcon3;
#endif
+ u_char red, green, blue;
- first = ~0UL >> dst_idx;
- last = ~(~0UL >> ((dst_idx+n) % BITS_PER_LONG));
-
- if (dst_idx+n <= BITS_PER_LONG) {
- // Single word
- if (last)
- first &= last;
- *dst = xor(val, *dst, first);
- } else {
- // Multiple destination words
- // Leading bits
- if (first) {
- *dst = xor(val, *dst, first);
- dst++;
- n -= BITS_PER_LONG-dst_idx;
+ if (do_blank > 0) {
+ custom.dmacon = DMAF_RASTER | DMAF_SPRITE;
+ red = green = blue = 0;
+ if (!IS_OCS && do_blank > 1) {
+ switch (do_blank) {
+ case FB_BLANK_VSYNC_SUSPEND:
+ custom.hsstrt = hsstrt2hw(par->hsstrt);
+ custom.hsstop = hsstop2hw(par->hsstop);
+ custom.vsstrt = vsstrt2hw(par->vtotal + 4);
+ custom.vsstop = vsstop2hw(par->vtotal + 4);
+ break;
+ case FB_BLANK_HSYNC_SUSPEND:
+ custom.hsstrt = hsstrt2hw(par->htotal + 16);
+ custom.hsstop = hsstop2hw(par->htotal + 16);
+ custom.vsstrt = vsstrt2hw(par->vsstrt);
+ custom.vsstop = vsstrt2hw(par->vsstop);
+ break;
+ case FB_BLANK_POWERDOWN:
+ custom.hsstrt = hsstrt2hw(par->htotal + 16);
+ custom.hsstop = hsstop2hw(par->htotal + 16);
+ custom.vsstrt = vsstrt2hw(par->vtotal + 4);
+ custom.vsstop = vsstop2hw(par->vtotal + 4);
+ break;
+ }
+ if (!(par->beamcon0 & BMC0_VARBEAMEN)) {
+ custom.htotal = htotal2hw(par->htotal);
+ custom.vtotal = vtotal2hw(par->vtotal);
+ custom.beamcon0 = BMC0_HARDDIS | BMC0_VARBEAMEN |
+ BMC0_VARVSYEN | BMC0_VARHSYEN | BMC0_VARCSYEN;
+ }
}
-
- // Main chunk
- n /= BITS_PER_LONG;
- while (n >= 4) {
- *dst++ ^= val;
- *dst++ ^= val;
- *dst++ ^= val;
- *dst++ ^= val;
- n -= 4;
+ } else {
+ custom.dmacon = DMAF_SETCLR | DMAF_RASTER | DMAF_SPRITE;
+ red = red0;
+ green = green0;
+ blue = blue0;
+ if (!IS_OCS) {
+ custom.hsstrt = hsstrt2hw(par->hsstrt);
+ custom.hsstop = hsstop2hw(par->hsstop);
+ custom.vsstrt = vsstrt2hw(par->vsstrt);
+ custom.vsstop = vsstop2hw(par->vsstop);
+ custom.beamcon0 = par->beamcon0;
}
- while (n--)
- *dst++ ^= val;
-
- // Trailing bits
- if (last)
- *dst = xor(val, *dst, last);
}
-}
+#if defined(CONFIG_FB_AMIGA_AGA)
+ if (IS_AGA) {
+ custom.bplcon3 = bplcon3;
+ custom.color[0] = rgb2hw8_high(red, green, blue);
+ custom.bplcon3 = bplcon3 | BPC3_LOCT;
+ custom.color[0] = rgb2hw8_low(red, green, blue);
+ custom.bplcon3 = bplcon3;
+ } else
+#endif
+#if defined(CONFIG_FB_AMIGA_ECS)
+ if (par->bplcon0 & BPC0_SHRES) {
+ u_short color, mask;
+ int i;
-static inline void fill_one_line(int bpp, unsigned long next_plane,
- unsigned long *dst, int dst_idx, u32 n,
- u32 color)
-{
- while (1) {
- dst += dst_idx >> SHIFT_PER_LONG;
- dst_idx &= (BITS_PER_LONG-1);
- bitfill32(dst, dst_idx, color & 1 ? ~0 : 0, n);
- if (!--bpp)
- break;
- color >>= 1;
- dst_idx += next_plane*8;
- }
+ mask = 0x3333;
+ color = rgb2hw2(red, green, blue);
+ for (i = 12; i >= 0; i -= 4)
+ custom.color[i] = ecs_palette[i] = (ecs_palette[i] & mask) | color;
+ mask <<= 2; color >>= 2;
+ for (i = 3; i >= 0; i--)
+ custom.color[i] = ecs_palette[i] = (ecs_palette[i] & mask) | color;
+ } else
+#endif
+ custom.color[0] = rgb2hw4(red, green, blue);
+ is_blanked = do_blank > 0 ? do_blank : 0;
}
-static inline void xor_one_line(int bpp, unsigned long next_plane,
- unsigned long *dst, int dst_idx, u32 n,
- u32 color)
+static int ami_get_fix_cursorinfo(struct fb_fix_cursorinfo *fix,
+ const struct amifb_par *par)
{
- while (color) {
- dst += dst_idx >> SHIFT_PER_LONG;
- dst_idx &= (BITS_PER_LONG-1);
- bitxor32(dst, dst_idx, color & 1 ? ~0 : 0, n);
- if (!--bpp)
- break;
- color >>= 1;
- dst_idx += next_plane*8;
- }
+ fix->crsr_width = fix->crsr_xsize = par->crsr.width;
+ fix->crsr_height = fix->crsr_ysize = par->crsr.height;
+ fix->crsr_color1 = 17;
+ fix->crsr_color2 = 18;
+ return 0;
}
-
-static void amifb_fillrect(struct fb_info *info,
- const struct fb_fillrect *rect)
+static int ami_get_var_cursorinfo(struct fb_var_cursorinfo *var,
+ u_char __user *data,
+ const struct amifb_par *par)
{
- struct amifb_par *par = (struct amifb_par *)info->par;
- int dst_idx, x2, y2;
- unsigned long *dst;
- u32 width, height;
-
- if (!rect->width || !rect->height)
- return;
-
- /*
- * We could use hardware clipping but on many cards you get around
- * hardware clipping by writing to framebuffer directly.
- * */
- x2 = rect->dx + rect->width;
- y2 = rect->dy + rect->height;
- x2 = x2 < info->var.xres_virtual ? x2 : info->var.xres_virtual;
- y2 = y2 < info->var.yres_virtual ? y2 : info->var.yres_virtual;
- width = x2 - rect->dx;
- height = y2 - rect->dy;
-
- dst = (unsigned long *)
- ((unsigned long)info->screen_base & ~(BYTES_PER_LONG-1));
- dst_idx = ((unsigned long)info->screen_base & (BYTES_PER_LONG-1))*8;
- dst_idx += rect->dy*par->next_line*8+rect->dx;
- while (height--) {
- switch (rect->rop) {
- case ROP_COPY:
- fill_one_line(info->var.bits_per_pixel,
- par->next_plane, dst, dst_idx, width,
- rect->color);
- break;
+ register u_short *lspr, *sspr;
+#ifdef __mc68000__
+ register u_long datawords asm ("d2");
+#else
+ register u_long datawords;
+#endif
+ register short delta;
+ register u_char color;
+ short height, width, bits, words;
+ int size, alloc;
- case ROP_XOR:
- xor_one_line(info->var.bits_per_pixel, par->next_plane,
- dst, dst_idx, width, rect->color);
- break;
+ size = par->crsr.height * par->crsr.width;
+ alloc = var->height * var->width;
+ var->height = par->crsr.height;
+ var->width = par->crsr.width;
+ var->xspot = par->crsr.spot_x;
+ var->yspot = par->crsr.spot_y;
+ if (size > var->height * var->width)
+ return -ENAMETOOLONG;
+ if (!access_ok(VERIFY_WRITE, data, size))
+ return -EFAULT;
+ delta = 1 << par->crsr.fmode;
+ lspr = lofsprite + (delta << 1);
+ if (par->bplcon0 & BPC0_LACE)
+ sspr = shfsprite + (delta << 1);
+ else
+ sspr = NULL;
+ for (height = (short)var->height - 1; height >= 0; height--) {
+ bits = 0; words = delta; datawords = 0;
+ for (width = (short)var->width - 1; width >= 0; width--) {
+ if (bits == 0) {
+ bits = 16; --words;
+#ifdef __mc68000__
+ asm volatile ("movew %1@(%3:w:2),%0 ; swap %0 ; movew %1@+,%0"
+ : "=d" (datawords), "=a" (lspr) : "1" (lspr), "d" (delta));
+#else
+ datawords = (*(lspr + delta) << 16) | (*lspr++);
+#endif
+ }
+ --bits;
+#ifdef __mc68000__
+ asm volatile (
+ "clrb %0 ; swap %1 ; lslw #1,%1 ; roxlb #1,%0 ; "
+ "swap %1 ; lslw #1,%1 ; roxlb #1,%0"
+ : "=d" (color), "=d" (datawords) : "1" (datawords));
+#else
+ color = (((datawords >> 30) & 2)
+ | ((datawords >> 15) & 1));
+ datawords <<= 1;
+#endif
+ put_user(color, data++);
+ }
+ if (bits > 0) {
+ --words; ++lspr;
+ }
+ while (--words >= 0)
+ ++lspr;
+#ifdef __mc68000__
+ asm volatile ("lea %0@(%4:w:2),%0 ; tstl %1 ; jeq 1f ; exg %0,%1\n1:"
+ : "=a" (lspr), "=a" (sspr) : "0" (lspr), "1" (sspr), "d" (delta));
+#else
+ lspr += delta;
+ if (sspr) {
+ u_short *tmp = lspr;
+ lspr = sspr;
+ sspr = tmp;
}
- dst_idx += par->next_line*8;
+#endif
}
+ return 0;
}
-static inline void copy_one_line(int bpp, unsigned long next_plane,
- unsigned long *dst, int dst_idx,
- unsigned long *src, int src_idx, u32 n)
+static int ami_set_var_cursorinfo(struct fb_var_cursorinfo *var,
+ u_char __user *data, struct amifb_par *par)
{
- while (1) {
- dst += dst_idx >> SHIFT_PER_LONG;
- dst_idx &= (BITS_PER_LONG-1);
- src += src_idx >> SHIFT_PER_LONG;
- src_idx &= (BITS_PER_LONG-1);
- bitcpy(dst, dst_idx, src, src_idx, n);
- if (!--bpp)
- break;
- dst_idx += next_plane*8;
- src_idx += next_plane*8;
- }
-}
+ register u_short *lspr, *sspr;
+#ifdef __mc68000__
+ register u_long datawords asm ("d2");
+#else
+ register u_long datawords;
+#endif
+ register short delta;
+ u_short fmode;
+ short height, width, bits, words;
-static inline void copy_one_line_rev(int bpp, unsigned long next_plane,
- unsigned long *dst, int dst_idx,
- unsigned long *src, int src_idx, u32 n)
-{
- while (1) {
- dst += dst_idx >> SHIFT_PER_LONG;
- dst_idx &= (BITS_PER_LONG-1);
- src += src_idx >> SHIFT_PER_LONG;
- src_idx &= (BITS_PER_LONG-1);
- bitcpy_rev(dst, dst_idx, src, src_idx, n);
- if (!--bpp)
- break;
- dst_idx += next_plane*8;
- src_idx += next_plane*8;
+ if (!var->width)
+ return -EINVAL;
+ else if (var->width <= 16)
+ fmode = TAG_FMODE_1;
+ else if (var->width <= 32)
+ fmode = TAG_FMODE_2;
+ else if (var->width <= 64)
+ fmode = TAG_FMODE_4;
+ else
+ return -EINVAL;
+ if (fmode > maxfmode)
+ return -EINVAL;
+ if (!var->height)
+ return -EINVAL;
+ if (!access_ok(VERIFY_READ, data, var->width * var->height))
+ return -EFAULT;
+ delta = 1 << fmode;
+ lofsprite = shfsprite = (u_short *)spritememory;
+ lspr = lofsprite + (delta << 1);
+ if (par->bplcon0 & BPC0_LACE) {
+ if (((var->height + 4) << fmode << 2) > SPRITEMEMSIZE)
+ return -EINVAL;
+ memset(lspr, 0, (var->height + 4) << fmode << 2);
+ shfsprite += ((var->height + 5)&-2) << fmode;
+ sspr = shfsprite + (delta << 1);
+ } else {
+ if (((var->height + 2) << fmode << 2) > SPRITEMEMSIZE)
+ return -EINVAL;
+ memset(lspr, 0, (var->height + 2) << fmode << 2);
+ sspr = NULL;
}
-}
-
-
-static void amifb_copyarea(struct fb_info *info,
- const struct fb_copyarea *area)
-{
- struct amifb_par *par = (struct amifb_par *)info->par;
- int x2, y2;
- u32 dx, dy, sx, sy, width, height;
- unsigned long *dst, *src;
- int dst_idx, src_idx;
- int rev_copy = 0;
-
- /* clip the destination */
- x2 = area->dx + area->width;
- y2 = area->dy + area->height;
- dx = area->dx > 0 ? area->dx : 0;
- dy = area->dy > 0 ? area->dy : 0;
- x2 = x2 < info->var.xres_virtual ? x2 : info->var.xres_virtual;
- y2 = y2 < info->var.yres_virtual ? y2 : info->var.yres_virtual;
- width = x2 - dx;
- height = y2 - dy;
-
- if (area->sx + dx < area->dx || area->sy + dy < area->dy)
- return;
-
- /* update sx,sy */
- sx = area->sx + (dx - area->dx);
- sy = area->sy + (dy - area->dy);
-
- /* the source must be completely inside the virtual screen */
- if (sx + width > info->var.xres_virtual ||
- sy + height > info->var.yres_virtual)
- return;
-
- if (dy > sy || (dy == sy && dx > sx)) {
- dy += height;
- sy += height;
- rev_copy = 1;
- }
- dst = (unsigned long *)
- ((unsigned long)info->screen_base & ~(BYTES_PER_LONG-1));
- src = dst;
- dst_idx = ((unsigned long)info->screen_base & (BYTES_PER_LONG-1))*8;
- src_idx = dst_idx;
- dst_idx += dy*par->next_line*8+dx;
- src_idx += sy*par->next_line*8+sx;
- if (rev_copy) {
- while (height--) {
- dst_idx -= par->next_line*8;
- src_idx -= par->next_line*8;
- copy_one_line_rev(info->var.bits_per_pixel,
- par->next_plane, dst, dst_idx, src,
- src_idx, width);
+ for (height = (short)var->height - 1; height >= 0; height--) {
+ bits = 16; words = delta; datawords = 0;
+ for (width = (short)var->width - 1; width >= 0; width--) {
+ unsigned long tdata = 0;
+ get_user(tdata, data);
+ data++;
+#ifdef __mc68000__
+ asm volatile (
+ "lsrb #1,%2 ; roxlw #1,%0 ; swap %0 ; "
+ "lsrb #1,%2 ; roxlw #1,%0 ; swap %0"
+ : "=d" (datawords)
+ : "0" (datawords), "d" (tdata));
+#else
+ datawords = ((datawords << 1) & 0xfffefffe);
+ datawords |= tdata & 1;
+ datawords |= (tdata & 2) << (16 - 1);
+#endif
+ if (--bits == 0) {
+ bits = 16; --words;
+#ifdef __mc68000__
+ asm volatile ("swap %2 ; movew %2,%0@(%3:w:2) ; swap %2 ; movew %2,%0@+"
+ : "=a" (lspr) : "0" (lspr), "d" (datawords), "d" (delta));
+#else
+ *(lspr + delta) = (u_short) (datawords >> 16);
+ *lspr++ = (u_short) (datawords & 0xffff);
+#endif
+ }
}
- } else {
- while (height--) {
- copy_one_line(info->var.bits_per_pixel,
- par->next_plane, dst, dst_idx, src,
- src_idx, width);
- dst_idx += par->next_line*8;
- src_idx += par->next_line*8;
+ if (bits < 16) {
+ --words;
+#ifdef __mc68000__
+ asm volatile (
+ "swap %2 ; lslw %4,%2 ; movew %2,%0@(%3:w:2) ; "
+ "swap %2 ; lslw %4,%2 ; movew %2,%0@+"
+ : "=a" (lspr) : "0" (lspr), "d" (datawords), "d" (delta), "d" (bits));
+#else
+ *(lspr + delta) = (u_short) (datawords >> (16 + bits));
+ *lspr++ = (u_short) ((datawords & 0x0000ffff) >> bits);
+#endif
+ }
+ while (--words >= 0) {
+#ifdef __mc68000__
+ asm volatile ("moveql #0,%%d0 ; movew %%d0,%0@(%2:w:2) ; movew %%d0,%0@+"
+ : "=a" (lspr) : "0" (lspr), "d" (delta) : "d0");
+#else
+ *(lspr + delta) = 0;
+ *lspr++ = 0;
+#endif
+ }
+#ifdef __mc68000__
+ asm volatile ("lea %0@(%4:w:2),%0 ; tstl %1 ; jeq 1f ; exg %0,%1\n1:"
+ : "=a" (lspr), "=a" (sspr) : "0" (lspr), "1" (sspr), "d" (delta));
+#else
+ lspr += delta;
+ if (sspr) {
+ u_short *tmp = lspr;
+ lspr = sspr;
+ sspr = tmp;
}
+#endif
+ }
+ par->crsr.height = var->height;
+ par->crsr.width = var->width;
+ par->crsr.spot_x = var->xspot;
+ par->crsr.spot_y = var->yspot;
+ par->crsr.fmode = fmode;
+ if (IS_AGA) {
+ par->fmode &= ~(FMODE_SPAGEM | FMODE_SPR32);
+ par->fmode |= sprfetchmode[fmode];
+ custom.fmode = par->fmode;
}
+ return 0;
}
-
-static inline void expand_one_line(int bpp, unsigned long next_plane,
- unsigned long *dst, int dst_idx, u32 n,
- const u8 *data, u32 bgcolor, u32 fgcolor)
+static int ami_get_cursorstate(struct fb_cursorstate *state,
+ const struct amifb_par *par)
{
- const unsigned long *src;
- int src_idx;
-
- while (1) {
- dst += dst_idx >> SHIFT_PER_LONG;
- dst_idx &= (BITS_PER_LONG-1);
- if ((bgcolor ^ fgcolor) & 1) {
- src = (unsigned long *)((unsigned long)data & ~(BYTES_PER_LONG-1));
- src_idx = ((unsigned long)data & (BYTES_PER_LONG-1))*8;
- if (fgcolor & 1)
- bitcpy(dst, dst_idx, src, src_idx, n);
- else
- bitcpy_not(dst, dst_idx, src, src_idx, n);
- /* set or clear */
- } else
- bitfill32(dst, dst_idx, fgcolor & 1 ? ~0 : 0, n);
- if (!--bpp)
- break;
- bgcolor >>= 1;
- fgcolor >>= 1;
- dst_idx += next_plane*8;
- }
+ state->xoffset = par->crsr.crsr_x;
+ state->yoffset = par->crsr.crsr_y;
+ state->mode = cursormode;
+ return 0;
}
-
-static void amifb_imageblit(struct fb_info *info, const struct fb_image *image)
+static int ami_set_cursorstate(struct fb_cursorstate *state,
+ struct amifb_par *par)
{
- struct amifb_par *par = (struct amifb_par *)info->par;
- int x2, y2;
- unsigned long *dst;
- int dst_idx;
- const char *src;
- u32 dx, dy, width, height, pitch;
+ par->crsr.crsr_x = state->xoffset;
+ par->crsr.crsr_y = state->yoffset;
+ if ((cursormode = state->mode) == FB_CURSOR_OFF)
+ cursorstate = -1;
+ do_cursor = 1;
+ return 0;
+}
- /*
- * We could use hardware clipping but on many cards you get around
- * hardware clipping by writing to framebuffer directly like we are
- * doing here.
- */
- x2 = image->dx + image->width;
- y2 = image->dy + image->height;
- dx = image->dx;
- dy = image->dy;
- x2 = x2 < info->var.xres_virtual ? x2 : info->var.xres_virtual;
- y2 = y2 < info->var.yres_virtual ? y2 : info->var.yres_virtual;
- width = x2 - dx;
- height = y2 - dy;
+static void ami_set_sprite(const struct amifb_par *par)
+{
+ copins *copl, *cops;
+ u_short hs, vs, ve;
+ u_long pl, ps, pt;
+ short mx, my;
- if (image->depth == 1) {
- dst = (unsigned long *)
- ((unsigned long)info->screen_base & ~(BYTES_PER_LONG-1));
- dst_idx = ((unsigned long)info->screen_base & (BYTES_PER_LONG-1))*8;
- dst_idx += dy*par->next_line*8+dx;
- src = image->data;
- pitch = (image->width+7)/8;
- while (height--) {
- expand_one_line(info->var.bits_per_pixel,
- par->next_plane, dst, dst_idx, width,
- src, image->bg_color,
- image->fg_color);
- dst_idx += par->next_line*8;
- src += pitch;
+ cops = copdisplay.list[currentcop][0];
+ copl = copdisplay.list[currentcop][1];
+ ps = pl = ZTWO_PADDR(dummysprite);
+ mx = par->crsr.crsr_x - par->crsr.spot_x;
+ my = par->crsr.crsr_y - par->crsr.spot_y;
+ if (!(par->vmode & FB_VMODE_YWRAP)) {
+ mx -= par->xoffset;
+ my -= par->yoffset;
+ }
+ if (!is_blanked && cursorstate > 0 && par->crsr.height > 0 &&
+ mx > -(short)par->crsr.width && mx < par->xres &&
+ my > -(short)par->crsr.height && my < par->yres) {
+ pl = ZTWO_PADDR(lofsprite);
+ hs = par->diwstrt_h + (mx << par->clk_shift) - 4;
+ vs = par->diwstrt_v + (my << par->line_shift);
+ ve = vs + (par->crsr.height << par->line_shift);
+ if (par->bplcon0 & BPC0_LACE) {
+ ps = ZTWO_PADDR(shfsprite);
+ lofsprite[0] = spr2hw_pos(vs, hs);
+ shfsprite[0] = spr2hw_pos(vs + 1, hs);
+ if (mod2(vs)) {
+ lofsprite[1 << par->crsr.fmode] = spr2hw_ctl(vs, hs, ve);
+ shfsprite[1 << par->crsr.fmode] = spr2hw_ctl(vs + 1, hs, ve + 1);
+ pt = pl; pl = ps; ps = pt;
+ } else {
+ lofsprite[1 << par->crsr.fmode] = spr2hw_ctl(vs, hs, ve + 1);
+ shfsprite[1 << par->crsr.fmode] = spr2hw_ctl(vs + 1, hs, ve);
+ }
+ } else {
+ lofsprite[0] = spr2hw_pos(vs, hs) | (IS_AGA && (par->fmode & FMODE_BSCAN2) ? 0x80 : 0);
+ lofsprite[1 << par->crsr.fmode] = spr2hw_ctl(vs, hs, ve);
}
- } else {
- c2p_planar(info->screen_base, image->data, dx, dy, width,
- height, par->next_line, par->next_plane,
- image->width, info->var.bits_per_pixel);
+ }
+ copl[cop_spr0ptrh].w[1] = highw(pl);
+ copl[cop_spr0ptrl].w[1] = loww(pl);
+ if (par->bplcon0 & BPC0_LACE) {
+ cops[cop_spr0ptrh].w[1] = highw(ps);
+ cops[cop_spr0ptrl].w[1] = loww(ps);
}
}
/*
- * Amiga Frame Buffer Specific ioctls
+ * Initialise the Copper Initialisation List
*/
-static int amifb_ioctl(struct fb_info *info,
- unsigned int cmd, unsigned long arg)
+static void __init ami_init_copper(void)
{
- union {
- struct fb_fix_cursorinfo fix;
- struct fb_var_cursorinfo var;
- struct fb_cursorstate state;
- } crsr;
- void __user *argp = (void __user *)arg;
+ copins *cop = copdisplay.init;
+ u_long p;
int i;
- switch (cmd) {
- case FBIOGET_FCURSORINFO:
- i = ami_get_fix_cursorinfo(&crsr.fix);
- if (i)
- return i;
- return copy_to_user(argp, &crsr.fix,
- sizeof(crsr.fix)) ? -EFAULT : 0;
-
- case FBIOGET_VCURSORINFO:
- i = ami_get_var_cursorinfo(&crsr.var,
- ((struct fb_var_cursorinfo __user *)arg)->data);
- if (i)
- return i;
- return copy_to_user(argp, &crsr.var,
- sizeof(crsr.var)) ? -EFAULT : 0;
-
- case FBIOPUT_VCURSORINFO:
- if (copy_from_user(&crsr.var, argp, sizeof(crsr.var)))
- return -EFAULT;
- return ami_set_var_cursorinfo(&crsr.var,
- ((struct fb_var_cursorinfo __user *)arg)->data);
-
- case FBIOGET_CURSORSTATE:
- i = ami_get_cursorstate(&crsr.state);
- if (i)
- return i;
- return copy_to_user(argp, &crsr.state,
- sizeof(crsr.state)) ? -EFAULT : 0;
-
- case FBIOPUT_CURSORSTATE:
- if (copy_from_user(&crsr.state, argp,
- sizeof(crsr.state)))
- return -EFAULT;
- return ami_set_cursorstate(&crsr.state);
+ if (!IS_OCS) {
+ (cop++)->l = CMOVE(BPC0_COLOR | BPC0_SHRES | BPC0_ECSENA, bplcon0);
+ (cop++)->l = CMOVE(0x0181, diwstrt);
+ (cop++)->l = CMOVE(0x0281, diwstop);
+ (cop++)->l = CMOVE(0x0000, diwhigh);
+ } else
+ (cop++)->l = CMOVE(BPC0_COLOR, bplcon0);
+ p = ZTWO_PADDR(dummysprite);
+ for (i = 0; i < 8; i++) {
+ (cop++)->l = CMOVE(0, spr[i].pos);
+ (cop++)->l = CMOVE(highw(p), sprpt[i]);
+ (cop++)->l = CMOVE2(loww(p), sprpt[i]);
}
- return -EINVAL;
-}
-
- /*
- * Allocate, Clear and Align a Block of Chip Memory
- */
-
-static void *aligned_chipptr;
+ (cop++)->l = CMOVE(IF_SETCLR | IF_COPER, intreq);
+ copdisplay.wait = cop;
+ (cop++)->l = CEND;
+ (cop++)->l = CMOVE(0, copjmp2);
+ cop->l = CEND;
-static inline u_long __init chipalloc(u_long size)
-{
- aligned_chipptr = amiga_chip_alloc(size, "amifb [RAM]");
- if (!aligned_chipptr) {
- pr_err("amifb: No Chip RAM for frame buffer");
- return 0;
- }
- memset(aligned_chipptr, 0, size);
- return (u_long)aligned_chipptr;
+ custom.cop1lc = (u_short *)ZTWO_PADDR(copdisplay.init);
+ custom.copjmp1 = 0;
}
-static inline void chipfree(void)
+static void ami_reinit_copper(const struct amifb_par *par)
{
- if (aligned_chipptr)
- amiga_chip_free(aligned_chipptr);
+ copdisplay.init[cip_bplcon0].w[1] = ~(BPC0_BPU3 | BPC0_BPU2 | BPC0_BPU1 | BPC0_BPU0) & par->bplcon0;
+ copdisplay.wait->l = CWAIT(32, par->diwstrt_v - 4);
}
/*
- * Initialisation
+ * Rebuild the Copper List
+ *
+ * We only change the things that are not static
*/
-static int __init amifb_probe(struct platform_device *pdev)
+static void ami_rebuild_copper(const struct amifb_par *par)
{
- int tag, i, err = 0;
- u_long chipptr;
- u_int defmode;
-
-#ifndef MODULE
- char *option = NULL;
+ copins *copl, *cops;
+ u_short line, h_end1, h_end2;
+ short i;
+ u_long p;
- if (fb_get_options("amifb", &option)) {
- amifb_video_off();
- return -ENODEV;
- }
- amifb_setup(option);
-#endif
- custom.dmacon = DMAF_ALL | DMAF_MASTER;
-
- switch (amiga_chipset) {
-#ifdef CONFIG_FB_AMIGA_OCS
- case CS_OCS:
- strcat(fb_info.fix.id, "OCS");
-default_chipset:
- chipset = TAG_OCS;
- maxdepth[TAG_SHRES] = 0; /* OCS means no SHRES */
- maxdepth[TAG_HIRES] = 4;
- maxdepth[TAG_LORES] = 6;
- maxfmode = TAG_FMODE_1;
- defmode = amiga_vblank == 50 ? DEFMODE_PAL
- : DEFMODE_NTSC;
- fb_info.fix.smem_len = VIDEOMEMSIZE_OCS;
- break;
-#endif /* CONFIG_FB_AMIGA_OCS */
-
-#ifdef CONFIG_FB_AMIGA_ECS
- case CS_ECS:
- strcat(fb_info.fix.id, "ECS");
- chipset = TAG_ECS;
- maxdepth[TAG_SHRES] = 2;
- maxdepth[TAG_HIRES] = 4;
- maxdepth[TAG_LORES] = 6;
- maxfmode = TAG_FMODE_1;
- if (AMIGAHW_PRESENT(AMBER_FF))
- defmode = amiga_vblank == 50 ? DEFMODE_AMBER_PAL
- : DEFMODE_AMBER_NTSC;
- else
- defmode = amiga_vblank == 50 ? DEFMODE_PAL
- : DEFMODE_NTSC;
- if (amiga_chip_avail()-CHIPRAM_SAFETY_LIMIT >
- VIDEOMEMSIZE_ECS_2M)
- fb_info.fix.smem_len = VIDEOMEMSIZE_ECS_2M;
- else
- fb_info.fix.smem_len = VIDEOMEMSIZE_ECS_1M;
- break;
-#endif /* CONFIG_FB_AMIGA_ECS */
+ if (IS_AGA && maxfmode + par->clk_shift == 0)
+ h_end1 = par->diwstrt_h - 64;
+ else
+ h_end1 = par->htotal - 32;
+ h_end2 = par->ddfstop + 64;
-#ifdef CONFIG_FB_AMIGA_AGA
- case CS_AGA:
- strcat(fb_info.fix.id, "AGA");
- chipset = TAG_AGA;
- maxdepth[TAG_SHRES] = 8;
- maxdepth[TAG_HIRES] = 8;
- maxdepth[TAG_LORES] = 8;
- maxfmode = TAG_FMODE_4;
- defmode = DEFMODE_AGA;
- if (amiga_chip_avail()-CHIPRAM_SAFETY_LIMIT >
- VIDEOMEMSIZE_AGA_2M)
- fb_info.fix.smem_len = VIDEOMEMSIZE_AGA_2M;
- else
- fb_info.fix.smem_len = VIDEOMEMSIZE_AGA_1M;
- break;
-#endif /* CONFIG_FB_AMIGA_AGA */
+ ami_set_sprite(par);
- default:
-#ifdef CONFIG_FB_AMIGA_OCS
- printk("Unknown graphics chipset, defaulting to OCS\n");
- strcat(fb_info.fix.id, "Unknown");
- goto default_chipset;
-#else /* CONFIG_FB_AMIGA_OCS */
- err = -ENODEV;
- goto amifb_error;
-#endif /* CONFIG_FB_AMIGA_OCS */
- break;
+ copl = copdisplay.rebuild[1];
+ p = par->bplpt0;
+ if (par->vmode & FB_VMODE_YWRAP) {
+ if ((par->vyres - par->yoffset) != 1 || !mod2(par->diwstrt_v)) {
+ if (par->yoffset > par->vyres - par->yres) {
+ for (i = 0; i < (short)par->bpp; i++, p += par->next_plane) {
+ (copl++)->l = CMOVE(highw(p), bplpt[i]);
+ (copl++)->l = CMOVE2(loww(p), bplpt[i]);
+ }
+ line = par->diwstrt_v + ((par->vyres - par->yoffset) << par->line_shift) - 1;
+ while (line >= 512) {
+ (copl++)->l = CWAIT(h_end1, 510);
+ line -= 512;
+ }
+ if (line >= 510 && IS_AGA && maxfmode + par->clk_shift == 0)
+ (copl++)->l = CWAIT(h_end1, line);
+ else
+ (copl++)->l = CWAIT(h_end2, line);
+ p = par->bplpt0wrap;
+ }
+ } else
+ p = par->bplpt0wrap;
}
-
- /*
- * Calculate the Pixel Clock Values for this Machine
- */
-
- {
- u_long tmp = DIVUL(200000000000ULL, amiga_eclock);
-
- pixclock[TAG_SHRES] = (tmp + 4) / 8; /* SHRES: 35 ns / 28 MHz */
- pixclock[TAG_HIRES] = (tmp + 2) / 4; /* HIRES: 70 ns / 14 MHz */
- pixclock[TAG_LORES] = (tmp + 1) / 2; /* LORES: 140 ns / 7 MHz */
+ for (i = 0; i < (short)par->bpp; i++, p += par->next_plane) {
+ (copl++)->l = CMOVE(highw(p), bplpt[i]);
+ (copl++)->l = CMOVE2(loww(p), bplpt[i]);
}
+ copl->l = CEND;
- /*
- * Replace the Tag Values with the Real Pixel Clock Values
- */
-
- for (i = 0; i < NUM_TOTAL_MODES; i++) {
- struct fb_videomode *mode = &ami_modedb[i];
- tag = mode->pixclock;
- if (tag == TAG_SHRES || tag == TAG_HIRES || tag == TAG_LORES) {
- mode->pixclock = pixclock[tag];
+ if (par->bplcon0 & BPC0_LACE) {
+ cops = copdisplay.rebuild[0];
+ p = par->bplpt0;
+ if (mod2(par->diwstrt_v))
+ p -= par->next_line;
+ else
+ p += par->next_line;
+ if (par->vmode & FB_VMODE_YWRAP) {
+ if ((par->vyres - par->yoffset) != 1 || mod2(par->diwstrt_v)) {
+ if (par->yoffset > par->vyres - par->yres + 1) {
+ for (i = 0; i < (short)par->bpp; i++, p += par->next_plane) {
+ (cops++)->l = CMOVE(highw(p), bplpt[i]);
+ (cops++)->l = CMOVE2(loww(p), bplpt[i]);
+ }
+ line = par->diwstrt_v + ((par->vyres - par->yoffset) << par->line_shift) - 2;
+ while (line >= 512) {
+ (cops++)->l = CWAIT(h_end1, 510);
+ line -= 512;
+ }
+ if (line > 510 && IS_AGA && maxfmode + par->clk_shift == 0)
+ (cops++)->l = CWAIT(h_end1, line);
+ else
+ (cops++)->l = CWAIT(h_end2, line);
+ p = par->bplpt0wrap;
+ if (mod2(par->diwstrt_v + par->vyres -
+ par->yoffset))
+ p -= par->next_line;
+ else
+ p += par->next_line;
+ }
+ } else
+ p = par->bplpt0wrap - par->next_line;
+ }
+ for (i = 0; i < (short)par->bpp; i++, p += par->next_plane) {
+ (cops++)->l = CMOVE(highw(p), bplpt[i]);
+ (cops++)->l = CMOVE2(loww(p), bplpt[i]);
}
+ cops->l = CEND;
}
+}
+
/*
- * These monitor specs are for a typical Amiga monitor (e.g. A1960)
+ * Build the Copper List
*/
- if (fb_info.monspecs.hfmin == 0) {
- fb_info.monspecs.hfmin = 15000;
- fb_info.monspecs.hfmax = 38000;
- fb_info.monspecs.vfmin = 49;
- fb_info.monspecs.vfmax = 90;
- }
- fb_info.fbops = &amifb_ops;
- fb_info.par = ¤tpar;
- fb_info.flags = FBINFO_DEFAULT;
- fb_info.device = &pdev->dev;
+static void ami_build_copper(struct fb_info *info)
+{
+ struct amifb_par *par = info->par;
+ copins *copl, *cops;
+ u_long p;
- if (!fb_find_mode(&fb_info.var, &fb_info, mode_option, ami_modedb,
- NUM_TOTAL_MODES, &ami_modedb[defmode], 4)) {
- err = -EINVAL;
- goto amifb_error;
- }
+ currentcop = 1 - currentcop;
- fb_videomode_to_modelist(ami_modedb, NUM_TOTAL_MODES,
- &fb_info.modelist);
+ copl = copdisplay.list[currentcop][1];
- round_down_bpp = 0;
- chipptr = chipalloc(fb_info.fix.smem_len+
- SPRITEMEMSIZE+
- DUMMYSPRITEMEMSIZE+
- COPINITSIZE+
- 4*COPLISTSIZE);
- if (!chipptr) {
- err = -ENOMEM;
- goto amifb_error;
- }
+ (copl++)->l = CWAIT(0, 10);
+ (copl++)->l = CMOVE(par->bplcon0, bplcon0);
+ (copl++)->l = CMOVE(0, sprpt[0]);
+ (copl++)->l = CMOVE2(0, sprpt[0]);
- assignchunk(videomemory, u_long, chipptr, fb_info.fix.smem_len);
- assignchunk(spritememory, u_long, chipptr, SPRITEMEMSIZE);
- assignchunk(dummysprite, u_short *, chipptr, DUMMYSPRITEMEMSIZE);
- assignchunk(copdisplay.init, copins *, chipptr, COPINITSIZE);
- assignchunk(copdisplay.list[0][0], copins *, chipptr, COPLISTSIZE);
- assignchunk(copdisplay.list[0][1], copins *, chipptr, COPLISTSIZE);
- assignchunk(copdisplay.list[1][0], copins *, chipptr, COPLISTSIZE);
- assignchunk(copdisplay.list[1][1], copins *, chipptr, COPLISTSIZE);
+ if (par->bplcon0 & BPC0_LACE) {
+ cops = copdisplay.list[currentcop][0];
- /*
- * access the videomem with writethrough cache
- */
- fb_info.fix.smem_start = (u_long)ZTWO_PADDR(videomemory);
- videomemory = (u_long)ioremap_writethrough(fb_info.fix.smem_start,
- fb_info.fix.smem_len);
- if (!videomemory) {
- printk("amifb: WARNING! unable to map videomem cached writethrough\n");
- fb_info.screen_base = (char *)ZTWO_VADDR(fb_info.fix.smem_start);
- } else
- fb_info.screen_base = (char *)videomemory;
+ (cops++)->l = CWAIT(0, 10);
+ (cops++)->l = CMOVE(par->bplcon0, bplcon0);
+ (cops++)->l = CMOVE(0, sprpt[0]);
+ (cops++)->l = CMOVE2(0, sprpt[0]);
- memset(dummysprite, 0, DUMMYSPRITEMEMSIZE);
+ (copl++)->l = CMOVE(diwstrt2hw(par->diwstrt_h, par->diwstrt_v + 1), diwstrt);
+ (copl++)->l = CMOVE(diwstop2hw(par->diwstop_h, par->diwstop_v + 1), diwstop);
+ (cops++)->l = CMOVE(diwstrt2hw(par->diwstrt_h, par->diwstrt_v), diwstrt);
+ (cops++)->l = CMOVE(diwstop2hw(par->diwstop_h, par->diwstop_v), diwstop);
+ if (!IS_OCS) {
+ (copl++)->l = CMOVE(diwhigh2hw(par->diwstrt_h, par->diwstrt_v + 1,
+ par->diwstop_h, par->diwstop_v + 1), diwhigh);
+ (cops++)->l = CMOVE(diwhigh2hw(par->diwstrt_h, par->diwstrt_v,
+ par->diwstop_h, par->diwstop_v), diwhigh);
+#if 0
+ if (par->beamcon0 & BMC0_VARBEAMEN) {
+ (copl++)->l = CMOVE(vtotal2hw(par->vtotal), vtotal);
+ (copl++)->l = CMOVE(vbstrt2hw(par->vbstrt + 1), vbstrt);
+ (copl++)->l = CMOVE(vbstop2hw(par->vbstop + 1), vbstop);
+ (cops++)->l = CMOVE(vtotal2hw(par->vtotal), vtotal);
+ (cops++)->l = CMOVE(vbstrt2hw(par->vbstrt), vbstrt);
+ (cops++)->l = CMOVE(vbstop2hw(par->vbstop), vbstop);
+ }
+#endif
+ }
+ p = ZTWO_PADDR(copdisplay.list[currentcop][0]);
+ (copl++)->l = CMOVE(highw(p), cop2lc);
+ (copl++)->l = CMOVE2(loww(p), cop2lc);
+ p = ZTWO_PADDR(copdisplay.list[currentcop][1]);
+ (cops++)->l = CMOVE(highw(p), cop2lc);
+ (cops++)->l = CMOVE2(loww(p), cop2lc);
+ copdisplay.rebuild[0] = cops;
+ } else {
+ (copl++)->l = CMOVE(diwstrt2hw(par->diwstrt_h, par->diwstrt_v), diwstrt);
+ (copl++)->l = CMOVE(diwstop2hw(par->diwstop_h, par->diwstop_v), diwstop);
+ if (!IS_OCS) {
+ (copl++)->l = CMOVE(diwhigh2hw(par->diwstrt_h, par->diwstrt_v,
+ par->diwstop_h, par->diwstop_v), diwhigh);
+#if 0
+ if (par->beamcon0 & BMC0_VARBEAMEN) {
+ (copl++)->l = CMOVE(vtotal2hw(par->vtotal), vtotal);
+ (copl++)->l = CMOVE(vbstrt2hw(par->vbstrt), vbstrt);
+ (copl++)->l = CMOVE(vbstop2hw(par->vbstop), vbstop);
+ }
+#endif
+ }
+ }
+ copdisplay.rebuild[1] = copl;
- /*
- * Enable Display DMA
- */
+ ami_update_par(info);
+ ami_rebuild_copper(info->par);
+}
- custom.dmacon = DMAF_SETCLR | DMAF_MASTER | DMAF_RASTER | DMAF_COPPER |
- DMAF_BLITTER | DMAF_SPRITE;
- /*
- * Make sure the Copper has something to do
+static void __init amifb_setup_mcap(char *spec)
+{
+ char *p;
+ int vmin, vmax, hmin, hmax;
+
+ /* Format for monitor capabilities is: <Vmin>;<Vmax>;<Hmin>;<Hmax>
+ * <V*> vertical freq. in Hz
+ * <H*> horizontal freq. in kHz
*/
- ami_init_copper();
+ if (!(p = strsep(&spec, ";")) || !*p)
+ return;
+ vmin = simple_strtoul(p, NULL, 10);
+ if (vmin <= 0)
+ return;
+ if (!(p = strsep(&spec, ";")) || !*p)
+ return;
+ vmax = simple_strtoul(p, NULL, 10);
+ if (vmax <= 0 || vmax <= vmin)
+ return;
+ if (!(p = strsep(&spec, ";")) || !*p)
+ return;
+ hmin = 1000 * simple_strtoul(p, NULL, 10);
+ if (hmin <= 0)
+ return;
+ if (!(p = strsep(&spec, "")) || !*p)
+ return;
+ hmax = 1000 * simple_strtoul(p, NULL, 10);
+ if (hmax <= 0 || hmax <= hmin)
+ return;
- if (request_irq(IRQ_AMIGA_COPPER, amifb_interrupt, 0,
- "fb vertb handler", ¤tpar)) {
- err = -EBUSY;
- goto amifb_error;
- }
+ amifb_hfmin = hmin;
+ amifb_hfmax = hmax;
+ amifb_vfmin = vmin;
+ amifb_vfmax = vmax;
+}
- err = fb_alloc_cmap(&fb_info.cmap, 1<<fb_info.var.bits_per_pixel, 0);
- if (err)
- goto amifb_error;
+static int __init amifb_setup(char *options)
+{
+ char *this_opt;
- if (register_framebuffer(&fb_info) < 0) {
- err = -EINVAL;
- goto amifb_error;
+ if (!options || !*options)
+ return 0;
+
+ while ((this_opt = strsep(&options, ",")) != NULL) {
+ if (!*this_opt)
+ continue;
+ if (!strcmp(this_opt, "inverse")) {
+ amifb_inverse = 1;
+ fb_invert_cmaps();
+ } else if (!strcmp(this_opt, "ilbm"))
+ amifb_ilbm = 1;
+ else if (!strncmp(this_opt, "monitorcap:", 11))
+ amifb_setup_mcap(this_opt + 11);
+ else if (!strncmp(this_opt, "fstart:", 7))
+ min_fstrt = simple_strtoul(this_opt + 7, NULL, 0);
+ else
+ mode_option = this_opt;
}
- printk("fb%d: %s frame buffer device, using %dK of video memory\n",
- fb_info.node, fb_info.fix.id, fb_info.fix.smem_len>>10);
+ if (min_fstrt < 48)
+ min_fstrt = 48;
return 0;
-
-amifb_error:
- amifb_deinit(pdev);
- return err;
}
-static void amifb_deinit(struct platform_device *pdev)
+
+static int amifb_check_var(struct fb_var_screeninfo *var,
+ struct fb_info *info)
{
- if (fb_info.cmap.len)
- fb_dealloc_cmap(&fb_info.cmap);
- fb_dealloc_cmap(&fb_info.cmap);
- chipfree();
- if (videomemory)
- iounmap((void*)videomemory);
- custom.dmacon = DMAF_ALL | DMAF_MASTER;
+ int err;
+ struct amifb_par par;
+
+ /* Validate wanted screen parameters */
+ err = ami_decode_var(var, &par, info);
+ if (err)
+ return err;
+
+ /* Encode (possibly rounded) screen parameters */
+ ami_encode_var(var, &par);
+ return 0;
}
- /*
- * Blank the display.
- */
-
-static int amifb_blank(int blank, struct fb_info *info)
-{
- do_blank = blank ? blank : -1;
-
- return 0;
-}
-
- /*
- * Flash the cursor (called by VBlank interrupt)
- */
-
-static int flash_cursor(void)
+static int amifb_set_par(struct fb_info *info)
{
- static int cursorcount = 1;
+ struct amifb_par *par = info->par;
+ int error;
- if (cursormode == FB_CURSOR_FLASH) {
- if (!--cursorcount) {
- cursorstate = -cursorstate;
- cursorcount = cursorrate;
- if (!is_blanked)
- return 1;
- }
- }
- return 0;
-}
+ do_vmode_pan = 0;
+ do_vmode_full = 0;
- /*
- * VBlank Display Interrupt
- */
+ /* Decode wanted screen parameters */
+ error = ami_decode_var(&info->var, par, info);
+ if (error)
+ return error;
-static irqreturn_t amifb_interrupt(int irq, void *dev_id)
-{
- if (do_vmode_pan || do_vmode_full)
- ami_update_display();
+ /* Set new videomode */
+ ami_build_copper(info);
- if (do_vmode_full)
- ami_init_display();
+ /* Set VBlank trigger */
+ do_vmode_full = 1;
- if (do_vmode_pan) {
- flash_cursor();
- ami_rebuild_copper();
- do_cursor = do_vmode_pan = 0;
- } else if (do_cursor) {
- flash_cursor();
- ami_set_sprite();
- do_cursor = 0;
+ /* Update fix for new screen parameters */
+ if (par->bpp == 1) {
+ info->fix.type = FB_TYPE_PACKED_PIXELS;
+ info->fix.type_aux = 0;
+ } else if (amifb_ilbm) {
+ info->fix.type = FB_TYPE_INTERLEAVED_PLANES;
+ info->fix.type_aux = par->next_line;
} else {
- if (flash_cursor())
- ami_set_sprite();
- }
-
- if (do_blank) {
- ami_do_blank();
- do_blank = 0;
+ info->fix.type = FB_TYPE_PLANES;
+ info->fix.type_aux = 0;
}
+ info->fix.line_length = div8(upx(16 << maxfmode, par->vxres));
- if (do_vmode_full) {
- ami_reinit_copper();
- do_vmode_full = 0;
+ if (par->vmode & FB_VMODE_YWRAP) {
+ info->fix.ywrapstep = 1;
+ info->fix.xpanstep = 0;
+ info->fix.ypanstep = 0;
+ info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YWRAP |
+ FBINFO_READS_FAST; /* override SCROLL_REDRAW */
+ } else {
+ info->fix.ywrapstep = 0;
+ if (par->vmode & FB_VMODE_SMOOTH_XPAN)
+ info->fix.xpanstep = 1;
+ else
+ info->fix.xpanstep = 16 << maxfmode;
+ info->fix.ypanstep = 1;
+ info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN;
}
- return IRQ_HANDLED;
+ return 0;
}
-/* --------------------------- Hardware routines --------------------------- */
/*
- * Get the video params out of `var'. If a value doesn't fit, round
- * it up, if it's too big, return -EINVAL.
+ * Set a single color register. The values supplied are already
+ * rounded down to the hardware's capabilities (according to the
+ * entries in the var structure). Return != 0 for invalid regno.
*/
-static int ami_decode_var(struct fb_var_screeninfo *var,
- struct amifb_par *par)
+static int amifb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
+ u_int transp, struct fb_info *info)
{
- u_short clk_shift, line_shift;
- u_long maxfetchstop, fstrt, fsize, fconst, xres_n, yres_n;
- u_int htotal, vtotal;
-
- /*
- * Find a matching Pixel Clock
- */
-
- for (clk_shift = TAG_SHRES; clk_shift <= TAG_LORES; clk_shift++)
- if (var->pixclock <= pixclock[clk_shift])
- break;
- if (clk_shift > TAG_LORES) {
- DPRINTK("pixclock too high\n");
- return -EINVAL;
- }
- par->clk_shift = clk_shift;
+ const struct amifb_par *par = info->par;
- /*
- * Check the Geometry Values
- */
-
- if ((par->xres = var->xres) < 64)
- par->xres = 64;
- if ((par->yres = var->yres) < 64)
- par->yres = 64;
- if ((par->vxres = var->xres_virtual) < par->xres)
- par->vxres = par->xres;
- if ((par->vyres = var->yres_virtual) < par->yres)
- par->vyres = par->yres;
-
- par->bpp = var->bits_per_pixel;
- if (!var->nonstd) {
- if (par->bpp < 1)
- par->bpp = 1;
- if (par->bpp > maxdepth[clk_shift]) {
- if (round_down_bpp && maxdepth[clk_shift])
- par->bpp = maxdepth[clk_shift];
- else {
- DPRINTK("invalid bpp\n");
- return -EINVAL;
- }
- }
- } else if (var->nonstd == FB_NONSTD_HAM) {
- if (par->bpp < 6)
- par->bpp = 6;
- if (par->bpp != 6) {
- if (par->bpp < 8)
- par->bpp = 8;
- if (par->bpp != 8 || !IS_AGA) {
- DPRINTK("invalid bpp for ham mode\n");
- return -EINVAL;
- }
- }
+ if (IS_AGA) {
+ if (regno > 255)
+ return 1;
+ } else if (par->bplcon0 & BPC0_SHRES) {
+ if (regno > 3)
+ return 1;
} else {
- DPRINTK("unknown nonstd mode\n");
- return -EINVAL;
+ if (regno > 31)
+ return 1;
}
-
- /*
- * FB_VMODE_SMOOTH_XPAN will be cleared, if one of the folloing
- * checks failed and smooth scrolling is not possible
- */
-
- par->vmode = var->vmode | FB_VMODE_SMOOTH_XPAN;
- switch (par->vmode & FB_VMODE_MASK) {
- case FB_VMODE_INTERLACED:
- line_shift = 0;
- break;
- case FB_VMODE_NONINTERLACED:
- line_shift = 1;
- break;
- case FB_VMODE_DOUBLE:
- if (!IS_AGA) {
- DPRINTK("double mode only possible with aga\n");
- return -EINVAL;
- }
- line_shift = 2;
- break;
- default:
- DPRINTK("unknown video mode\n");
- return -EINVAL;
- break;
+ red >>= 8;
+ green >>= 8;
+ blue >>= 8;
+ if (!regno) {
+ red0 = red;
+ green0 = green;
+ blue0 = blue;
}
- par->line_shift = line_shift;
/*
- * Vertical and Horizontal Timings
+ * Update the corresponding Hardware Color Register, unless it's Color
+ * Register 0 and the screen is blanked.
+ *
+ * VBlank is switched off to protect bplcon3 or ecs_palette[] from
+ * being changed by ami_do_blank() during the VBlank.
*/
- xres_n = par->xres<<clk_shift;
- yres_n = par->yres<<line_shift;
- par->htotal = down8((var->left_margin+par->xres+var->right_margin+var->hsync_len)<<clk_shift);
- par->vtotal = down2(((var->upper_margin+par->yres+var->lower_margin+var->vsync_len)<<line_shift)+1);
-
- if (IS_AGA)
- par->bplcon3 = sprpixmode[clk_shift];
- else
- par->bplcon3 = 0;
- if (var->sync & FB_SYNC_BROADCAST) {
- par->diwstop_h = par->htotal-((var->right_margin-var->hsync_len)<<clk_shift);
- if (IS_AGA)
- par->diwstop_h += mod4(var->hsync_len);
- else
- par->diwstop_h = down4(par->diwstop_h);
-
- par->diwstrt_h = par->diwstop_h - xres_n;
- par->diwstop_v = par->vtotal-((var->lower_margin-var->vsync_len)<<line_shift);
- par->diwstrt_v = par->diwstop_v - yres_n;
- if (par->diwstop_h >= par->htotal+8) {
- DPRINTK("invalid diwstop_h\n");
- return -EINVAL;
- }
- if (par->diwstop_v > par->vtotal) {
- DPRINTK("invalid diwstop_v\n");
- return -EINVAL;
- }
+ if (regno || !is_blanked) {
+#if defined(CONFIG_FB_AMIGA_AGA)
+ if (IS_AGA) {
+ u_short bplcon3 = par->bplcon3;
+ VBlankOff();
+ custom.bplcon3 = bplcon3 | (regno << 8 & 0xe000);
+ custom.color[regno & 31] = rgb2hw8_high(red, green,
+ blue);
+ custom.bplcon3 = bplcon3 | (regno << 8 & 0xe000) |
+ BPC3_LOCT;
+ custom.color[regno & 31] = rgb2hw8_low(red, green,
+ blue);
+ custom.bplcon3 = bplcon3;
+ VBlankOn();
+ } else
+#endif
+#if defined(CONFIG_FB_AMIGA_ECS)
+ if (par->bplcon0 & BPC0_SHRES) {
+ u_short color, mask;
+ int i;
- if (!IS_OCS) {
- /* Initialize sync with some reasonable values for pwrsave */
- par->hsstrt = 160;
- par->hsstop = 320;
- par->vsstrt = 30;
- par->vsstop = 34;
- } else {
- par->hsstrt = 0;
- par->hsstop = 0;
- par->vsstrt = 0;
- par->vsstop = 0;
- }
- if (par->vtotal > (PAL_VTOTAL+NTSC_VTOTAL)/2) {
- /* PAL video mode */
- if (par->htotal != PAL_HTOTAL) {
- DPRINTK("htotal invalid for pal\n");
- return -EINVAL;
- }
- if (par->diwstrt_h < PAL_DIWSTRT_H) {
- DPRINTK("diwstrt_h too low for pal\n");
- return -EINVAL;
- }
- if (par->diwstrt_v < PAL_DIWSTRT_V) {
- DPRINTK("diwstrt_v too low for pal\n");
- return -EINVAL;
- }
- htotal = PAL_HTOTAL>>clk_shift;
- vtotal = PAL_VTOTAL>>1;
- if (!IS_OCS) {
- par->beamcon0 = BMC0_PAL;
- par->bplcon3 |= BPC3_BRDRBLNK;
- } else if (AMIGAHW_PRESENT(AGNUS_HR_PAL) ||
- AMIGAHW_PRESENT(AGNUS_HR_NTSC)) {
- par->beamcon0 = BMC0_PAL;
- par->hsstop = 1;
- } else if (amiga_vblank != 50) {
- DPRINTK("pal not supported by this chipset\n");
- return -EINVAL;
- }
- } else {
- /* NTSC video mode
- * In the AGA chipset seems to be hardware bug with BPC3_BRDRBLNK
- * and NTSC activated, so than better let diwstop_h <= 1812
- */
- if (par->htotal != NTSC_HTOTAL) {
- DPRINTK("htotal invalid for ntsc\n");
- return -EINVAL;
- }
- if (par->diwstrt_h < NTSC_DIWSTRT_H) {
- DPRINTK("diwstrt_h too low for ntsc\n");
- return -EINVAL;
- }
- if (par->diwstrt_v < NTSC_DIWSTRT_V) {
- DPRINTK("diwstrt_v too low for ntsc\n");
- return -EINVAL;
- }
- htotal = NTSC_HTOTAL>>clk_shift;
- vtotal = NTSC_VTOTAL>>1;
- if (!IS_OCS) {
- par->beamcon0 = 0;
- par->bplcon3 |= BPC3_BRDRBLNK;
- } else if (AMIGAHW_PRESENT(AGNUS_HR_PAL) ||
- AMIGAHW_PRESENT(AGNUS_HR_NTSC)) {
- par->beamcon0 = 0;
- par->hsstop = 1;
- } else if (amiga_vblank != 60) {
- DPRINTK("ntsc not supported by this chipset\n");
- return -EINVAL;
- }
- }
- if (IS_OCS) {
- if (par->diwstrt_h >= 1024 || par->diwstop_h < 1024 ||
- par->diwstrt_v >= 512 || par->diwstop_v < 256) {
- DPRINTK("invalid position for display on ocs\n");
- return -EINVAL;
- }
- }
- } else if (!IS_OCS) {
- /* Programmable video mode */
- par->hsstrt = var->right_margin<<clk_shift;
- par->hsstop = (var->right_margin+var->hsync_len)<<clk_shift;
- par->diwstop_h = par->htotal - mod8(par->hsstrt) + 8 - (1 << clk_shift);
- if (!IS_AGA)
- par->diwstop_h = down4(par->diwstop_h) - 16;
- par->diwstrt_h = par->diwstop_h - xres_n;
- par->hbstop = par->diwstrt_h + 4;
- par->hbstrt = par->diwstop_h + 4;
- if (par->hbstrt >= par->htotal + 8)
- par->hbstrt -= par->htotal;
- par->hcenter = par->hsstrt + (par->htotal >> 1);
- par->vsstrt = var->lower_margin<<line_shift;
- par->vsstop = (var->lower_margin+var->vsync_len)<<line_shift;
- par->diwstop_v = par->vtotal;
- if ((par->vmode & FB_VMODE_MASK) == FB_VMODE_INTERLACED)
- par->diwstop_v -= 2;
- par->diwstrt_v = par->diwstop_v - yres_n;
- par->vbstop = par->diwstrt_v - 2;
- par->vbstrt = par->diwstop_v - 2;
- if (par->vtotal > 2048) {
- DPRINTK("vtotal too high\n");
- return -EINVAL;
- }
- if (par->htotal > 2048) {
- DPRINTK("htotal too high\n");
- return -EINVAL;
- }
- par->bplcon3 |= BPC3_EXTBLKEN;
- par->beamcon0 = BMC0_HARDDIS | BMC0_VARVBEN | BMC0_LOLDIS |
- BMC0_VARVSYEN | BMC0_VARHSYEN | BMC0_VARBEAMEN |
- BMC0_PAL | BMC0_VARCSYEN;
- if (var->sync & FB_SYNC_HOR_HIGH_ACT)
- par->beamcon0 |= BMC0_HSYTRUE;
- if (var->sync & FB_SYNC_VERT_HIGH_ACT)
- par->beamcon0 |= BMC0_VSYTRUE;
- if (var->sync & FB_SYNC_COMP_HIGH_ACT)
- par->beamcon0 |= BMC0_CSYTRUE;
- htotal = par->htotal>>clk_shift;
- vtotal = par->vtotal>>1;
- } else {
- DPRINTK("only broadcast modes possible for ocs\n");
- return -EINVAL;
+ mask = 0x3333;
+ color = rgb2hw2(red, green, blue);
+ VBlankOff();
+ for (i = regno + 12; i >= (int)regno; i -= 4)
+ custom.color[i] = ecs_palette[i] = (ecs_palette[i] & mask) | color;
+ mask <<= 2; color >>= 2;
+ regno = down16(regno) + mul4(mod4(regno));
+ for (i = regno + 3; i >= (int)regno; i--)
+ custom.color[i] = ecs_palette[i] = (ecs_palette[i] & mask) | color;
+ VBlankOn();
+ } else
+#endif
+ custom.color[regno] = rgb2hw4(red, green, blue);
}
+ return 0;
+}
+
/*
- * Checking the DMA timing
+ * Blank the display.
*/
- fconst = 16<<maxfmode<<clk_shift;
+static int amifb_blank(int blank, struct fb_info *info)
+{
+ do_blank = blank ? blank : -1;
+
+ return 0;
+}
+
/*
- * smallest window start value without turn off other dma cycles
- * than sprite1-7, unless you change min_fstrt
+ * Pan or Wrap the Display
+ *
+ * This call looks only at xoffset, yoffset and the FB_VMODE_YWRAP flag
*/
-
- fsize = ((maxfmode+clk_shift <= 1) ? fconst : 64);
- fstrt = downx(fconst, par->diwstrt_h-4) - fsize;
- if (fstrt < min_fstrt) {
- DPRINTK("fetch start too low\n");
- return -EINVAL;
+static int amifb_pan_display(struct fb_var_screeninfo *var,
+ struct fb_info *info)
+{
+ if (var->vmode & FB_VMODE_YWRAP) {
+ if (var->yoffset < 0 ||
+ var->yoffset >= info->var.yres_virtual || var->xoffset)
+ return -EINVAL;
+ } else {
+ /*
+ * TODO: There will be problems when xpan!=1, so some columns
+ * on the right side will never be seen
+ */
+ if (var->xoffset + info->var.xres >
+ upx(16 << maxfmode, info->var.xres_virtual) ||
+ var->yoffset + info->var.yres > info->var.yres_virtual)
+ return -EINVAL;
}
+ ami_pan_var(var, info);
+ info->var.xoffset = var->xoffset;
+ info->var.yoffset = var->yoffset;
+ if (var->vmode & FB_VMODE_YWRAP)
+ info->var.vmode |= FB_VMODE_YWRAP;
+ else
+ info->var.vmode &= ~FB_VMODE_YWRAP;
+ return 0;
+}
+
+
+#if BITS_PER_LONG == 32
+#define BYTES_PER_LONG 4
+#define SHIFT_PER_LONG 5
+#elif BITS_PER_LONG == 64
+#define BYTES_PER_LONG 8
+#define SHIFT_PER_LONG 6
+#else
+#define Please update me
+#endif
+
/*
- * smallest window start value where smooth scrolling is possible
+ * Compose two values, using a bitmask as decision value
+ * This is equivalent to (a & mask) | (b & ~mask)
*/
- fstrt = downx(fconst, par->diwstrt_h-fconst+(1<<clk_shift)-4) - fsize;
- if (fstrt < min_fstrt)
- par->vmode &= ~FB_VMODE_SMOOTH_XPAN;
+static inline unsigned long comp(unsigned long a, unsigned long b,
+ unsigned long mask)
+{
+ return ((a ^ b) & mask) ^ b;
+}
- maxfetchstop = down16(par->htotal - 80);
- fstrt = downx(fconst, par->diwstrt_h-4) - 64 - fconst;
- fsize = upx(fconst, xres_n + modx(fconst, downx(1<<clk_shift, par->diwstrt_h-4)));
- if (fstrt + fsize > maxfetchstop)
- par->vmode &= ~FB_VMODE_SMOOTH_XPAN;
+static inline unsigned long xor(unsigned long a, unsigned long b,
+ unsigned long mask)
+{
+ return (a & mask) ^ b;
+}
- fsize = upx(fconst, xres_n);
- if (fstrt + fsize > maxfetchstop) {
- DPRINTK("fetch stop too high\n");
- return -EINVAL;
- }
- if (maxfmode + clk_shift <= 1) {
- fsize = up64(xres_n + fconst - 1);
- if (min_fstrt + fsize - 64 > maxfetchstop)
- par->vmode &= ~FB_VMODE_SMOOTH_XPAN;
+ /*
+ * Unaligned forward bit copy using 32-bit or 64-bit memory accesses
+ */
- fsize = up64(xres_n);
- if (min_fstrt + fsize - 64 > maxfetchstop) {
- DPRINTK("fetch size too high\n");
- return -EINVAL;
+static void bitcpy(unsigned long *dst, int dst_idx, const unsigned long *src,
+ int src_idx, u32 n)
+{
+ unsigned long first, last;
+ int shift = dst_idx - src_idx, left, right;
+ unsigned long d0, d1;
+ int m;
+
+ if (!n)
+ return;
+
+ shift = dst_idx - src_idx;
+ first = ~0UL >> dst_idx;
+ last = ~(~0UL >> ((dst_idx + n) % BITS_PER_LONG));
+
+ if (!shift) {
+ // Same alignment for source and dest
+
+ if (dst_idx + n <= BITS_PER_LONG) {
+ // Single word
+ if (last)
+ first &= last;
+ *dst = comp(*src, *dst, first);
+ } else {
+ // Multiple destination words
+ // Leading bits
+ if (first) {
+ *dst = comp(*src, *dst, first);
+ dst++;
+ src++;
+ n -= BITS_PER_LONG - dst_idx;
+ }
+
+ // Main chunk
+ n /= BITS_PER_LONG;
+ while (n >= 8) {
+ *dst++ = *src++;
+ *dst++ = *src++;
+ *dst++ = *src++;
+ *dst++ = *src++;
+ *dst++ = *src++;
+ *dst++ = *src++;
+ *dst++ = *src++;
+ *dst++ = *src++;
+ n -= 8;
+ }
+ while (n--)
+ *dst++ = *src++;
+
+ // Trailing bits
+ if (last)
+ *dst = comp(*src, *dst, last);
}
+ } else {
+ // Different alignment for source and dest
- fsize -= 64;
- } else
- fsize -= fconst;
+ right = shift & (BITS_PER_LONG - 1);
+ left = -shift & (BITS_PER_LONG - 1);
- /*
- * Check if there is enough time to update the bitplane pointers for ywrap
- */
+ if (dst_idx + n <= BITS_PER_LONG) {
+ // Single destination word
+ if (last)
+ first &= last;
+ if (shift > 0) {
+ // Single source word
+ *dst = comp(*src >> right, *dst, first);
+ } else if (src_idx + n <= BITS_PER_LONG) {
+ // Single source word
+ *dst = comp(*src << left, *dst, first);
+ } else {
+ // 2 source words
+ d0 = *src++;
+ d1 = *src;
+ *dst = comp(d0 << left | d1 >> right, *dst,
+ first);
+ }
+ } else {
+ // Multiple destination words
+ d0 = *src++;
+ // Leading bits
+ if (shift > 0) {
+ // Single source word
+ *dst = comp(d0 >> right, *dst, first);
+ dst++;
+ n -= BITS_PER_LONG - dst_idx;
+ } else {
+ // 2 source words
+ d1 = *src++;
+ *dst = comp(d0 << left | d1 >> right, *dst,
+ first);
+ d0 = d1;
+ dst++;
+ n -= BITS_PER_LONG - dst_idx;
+ }
+
+ // Main chunk
+ m = n % BITS_PER_LONG;
+ n /= BITS_PER_LONG;
+ while (n >= 4) {
+ d1 = *src++;
+ *dst++ = d0 << left | d1 >> right;
+ d0 = d1;
+ d1 = *src++;
+ *dst++ = d0 << left | d1 >> right;
+ d0 = d1;
+ d1 = *src++;
+ *dst++ = d0 << left | d1 >> right;
+ d0 = d1;
+ d1 = *src++;
+ *dst++ = d0 << left | d1 >> right;
+ d0 = d1;
+ n -= 4;
+ }
+ while (n--) {
+ d1 = *src++;
+ *dst++ = d0 << left | d1 >> right;
+ d0 = d1;
+ }
+
+ // Trailing bits
+ if (last) {
+ if (m <= right) {
+ // Single source word
+ *dst = comp(d0 << left, *dst, last);
+ } else {
+ // 2 source words
+ d1 = *src;
+ *dst = comp(d0 << left | d1 >> right,
+ *dst, last);
+ }
+ }
+ }
+ }
+}
- if (par->htotal-fsize-64 < par->bpp*64)
- par->vmode &= ~FB_VMODE_YWRAP;
/*
- * Bitplane calculations and check the Memory Requirements
+ * Unaligned reverse bit copy using 32-bit or 64-bit memory accesses
*/
- if (amifb_ilbm) {
- par->next_plane = div8(upx(16<<maxfmode, par->vxres));
- par->next_line = par->bpp*par->next_plane;
- if (par->next_line * par->vyres > fb_info.fix.smem_len) {
- DPRINTK("too few video mem\n");
- return -EINVAL;
+static void bitcpy_rev(unsigned long *dst, int dst_idx,
+ const unsigned long *src, int src_idx, u32 n)
+{
+ unsigned long first, last;
+ int shift = dst_idx - src_idx, left, right;
+ unsigned long d0, d1;
+ int m;
+
+ if (!n)
+ return;
+
+ dst += (n - 1) / BITS_PER_LONG;
+ src += (n - 1) / BITS_PER_LONG;
+ if ((n - 1) % BITS_PER_LONG) {
+ dst_idx += (n - 1) % BITS_PER_LONG;
+ dst += dst_idx >> SHIFT_PER_LONG;
+ dst_idx &= BITS_PER_LONG - 1;
+ src_idx += (n - 1) % BITS_PER_LONG;
+ src += src_idx >> SHIFT_PER_LONG;
+ src_idx &= BITS_PER_LONG - 1;
+ }
+
+ shift = dst_idx - src_idx;
+ first = ~0UL << (BITS_PER_LONG - 1 - dst_idx);
+ last = ~(~0UL << (BITS_PER_LONG - 1 - ((dst_idx - n) % BITS_PER_LONG)));
+
+ if (!shift) {
+ // Same alignment for source and dest
+
+ if ((unsigned long)dst_idx + 1 >= n) {
+ // Single word
+ if (last)
+ first &= last;
+ *dst = comp(*src, *dst, first);
+ } else {
+ // Multiple destination words
+ // Leading bits
+ if (first) {
+ *dst = comp(*src, *dst, first);
+ dst--;
+ src--;
+ n -= dst_idx + 1;
+ }
+
+ // Main chunk
+ n /= BITS_PER_LONG;
+ while (n >= 8) {
+ *dst-- = *src--;
+ *dst-- = *src--;
+ *dst-- = *src--;
+ *dst-- = *src--;
+ *dst-- = *src--;
+ *dst-- = *src--;
+ *dst-- = *src--;
+ *dst-- = *src--;
+ n -= 8;
+ }
+ while (n--)
+ *dst-- = *src--;
+
+ // Trailing bits
+ if (last)
+ *dst = comp(*src, *dst, last);
}
} else {
- par->next_line = div8(upx(16<<maxfmode, par->vxres));
- par->next_plane = par->vyres*par->next_line;
- if (par->next_plane * par->bpp > fb_info.fix.smem_len) {
- DPRINTK("too few video mem\n");
- return -EINVAL;
- }
- }
-
- /*
- * Hardware Register Values
- */
-
- par->bplcon0 = BPC0_COLOR | bplpixmode[clk_shift];
- if (!IS_OCS)
- par->bplcon0 |= BPC0_ECSENA;
- if (par->bpp == 8)
- par->bplcon0 |= BPC0_BPU3;
- else
- par->bplcon0 |= par->bpp<<12;
- if (var->nonstd == FB_NONSTD_HAM)
- par->bplcon0 |= BPC0_HAM;
- if (var->sync & FB_SYNC_EXT)
- par->bplcon0 |= BPC0_ERSY;
-
- if (IS_AGA)
- par->fmode = bplfetchmode[maxfmode];
+ // Different alignment for source and dest
- switch (par->vmode & FB_VMODE_MASK) {
- case FB_VMODE_INTERLACED:
- par->bplcon0 |= BPC0_LACE;
- break;
- case FB_VMODE_DOUBLE:
- if (IS_AGA)
- par->fmode |= FMODE_SSCAN2 | FMODE_BSCAN2;
- break;
- }
+ right = shift & (BITS_PER_LONG - 1);
+ left = -shift & (BITS_PER_LONG - 1);
- if (!((par->vmode ^ var->vmode) & FB_VMODE_YWRAP)) {
- par->xoffset = var->xoffset;
- par->yoffset = var->yoffset;
- if (par->vmode & FB_VMODE_YWRAP) {
- if (par->xoffset || par->yoffset < 0 || par->yoffset >= par->vyres)
- par->xoffset = par->yoffset = 0;
+ if ((unsigned long)dst_idx + 1 >= n) {
+ // Single destination word
+ if (last)
+ first &= last;
+ if (shift < 0) {
+ // Single source word
+ *dst = comp(*src << left, *dst, first);
+ } else if (1 + (unsigned long)src_idx >= n) {
+ // Single source word
+ *dst = comp(*src >> right, *dst, first);
+ } else {
+ // 2 source words
+ d0 = *src--;
+ d1 = *src;
+ *dst = comp(d0 >> right | d1 << left, *dst,
+ first);
+ }
} else {
- if (par->xoffset < 0 || par->xoffset > upx(16<<maxfmode, par->vxres-par->xres) ||
- par->yoffset < 0 || par->yoffset > par->vyres-par->yres)
- par->xoffset = par->yoffset = 0;
- }
- } else
- par->xoffset = par->yoffset = 0;
+ // Multiple destination words
+ d0 = *src--;
+ // Leading bits
+ if (shift < 0) {
+ // Single source word
+ *dst = comp(d0 << left, *dst, first);
+ dst--;
+ n -= dst_idx + 1;
+ } else {
+ // 2 source words
+ d1 = *src--;
+ *dst = comp(d0 >> right | d1 << left, *dst,
+ first);
+ d0 = d1;
+ dst--;
+ n -= dst_idx + 1;
+ }
- par->crsr.crsr_x = par->crsr.crsr_y = 0;
- par->crsr.spot_x = par->crsr.spot_y = 0;
- par->crsr.height = par->crsr.width = 0;
+ // Main chunk
+ m = n % BITS_PER_LONG;
+ n /= BITS_PER_LONG;
+ while (n >= 4) {
+ d1 = *src--;
+ *dst-- = d0 >> right | d1 << left;
+ d0 = d1;
+ d1 = *src--;
+ *dst-- = d0 >> right | d1 << left;
+ d0 = d1;
+ d1 = *src--;
+ *dst-- = d0 >> right | d1 << left;
+ d0 = d1;
+ d1 = *src--;
+ *dst-- = d0 >> right | d1 << left;
+ d0 = d1;
+ n -= 4;
+ }
+ while (n--) {
+ d1 = *src--;
+ *dst-- = d0 >> right | d1 << left;
+ d0 = d1;
+ }
- return 0;
+ // Trailing bits
+ if (last) {
+ if (m <= left) {
+ // Single source word
+ *dst = comp(d0 >> right, *dst, last);
+ } else {
+ // 2 source words
+ d1 = *src;
+ *dst = comp(d0 >> right | d1 << left,
+ *dst, last);
+ }
+ }
+ }
+ }
}
+
/*
- * Fill the `var' structure based on the values in `par' and maybe
- * other values read out of the hardware.
+ * Unaligned forward inverting bit copy using 32-bit or 64-bit memory
+ * accesses
*/
-static int ami_encode_var(struct fb_var_screeninfo *var,
- struct amifb_par *par)
+static void bitcpy_not(unsigned long *dst, int dst_idx,
+ const unsigned long *src, int src_idx, u32 n)
{
- u_short clk_shift, line_shift;
-
- memset(var, 0, sizeof(struct fb_var_screeninfo));
-
- clk_shift = par->clk_shift;
- line_shift = par->line_shift;
+ unsigned long first, last;
+ int shift = dst_idx - src_idx, left, right;
+ unsigned long d0, d1;
+ int m;
- var->xres = par->xres;
- var->yres = par->yres;
- var->xres_virtual = par->vxres;
- var->yres_virtual = par->vyres;
- var->xoffset = par->xoffset;
- var->yoffset = par->yoffset;
+ if (!n)
+ return;
- var->bits_per_pixel = par->bpp;
- var->grayscale = 0;
+ shift = dst_idx - src_idx;
+ first = ~0UL >> dst_idx;
+ last = ~(~0UL >> ((dst_idx + n) % BITS_PER_LONG));
- var->red.offset = 0;
- var->red.msb_right = 0;
- var->red.length = par->bpp;
- if (par->bplcon0 & BPC0_HAM)
- var->red.length -= 2;
- var->blue = var->green = var->red;
- var->transp.offset = 0;
- var->transp.length = 0;
- var->transp.msb_right = 0;
+ if (!shift) {
+ // Same alignment for source and dest
- if (par->bplcon0 & BPC0_HAM)
- var->nonstd = FB_NONSTD_HAM;
- else
- var->nonstd = 0;
- var->activate = 0;
+ if (dst_idx + n <= BITS_PER_LONG) {
+ // Single word
+ if (last)
+ first &= last;
+ *dst = comp(~*src, *dst, first);
+ } else {
+ // Multiple destination words
+ // Leading bits
+ if (first) {
+ *dst = comp(~*src, *dst, first);
+ dst++;
+ src++;
+ n -= BITS_PER_LONG - dst_idx;
+ }
- var->height = -1;
- var->width = -1;
+ // Main chunk
+ n /= BITS_PER_LONG;
+ while (n >= 8) {
+ *dst++ = ~*src++;
+ *dst++ = ~*src++;
+ *dst++ = ~*src++;
+ *dst++ = ~*src++;
+ *dst++ = ~*src++;
+ *dst++ = ~*src++;
+ *dst++ = ~*src++;
+ *dst++ = ~*src++;
+ n -= 8;
+ }
+ while (n--)
+ *dst++ = ~*src++;
- var->pixclock = pixclock[clk_shift];
+ // Trailing bits
+ if (last)
+ *dst = comp(~*src, *dst, last);
+ }
+ } else {
+ // Different alignment for source and dest
- if (IS_AGA && par->fmode & FMODE_BSCAN2)
- var->vmode = FB_VMODE_DOUBLE;
- else if (par->bplcon0 & BPC0_LACE)
- var->vmode = FB_VMODE_INTERLACED;
- else
- var->vmode = FB_VMODE_NONINTERLACED;
+ right = shift & (BITS_PER_LONG - 1);
+ left = -shift & (BITS_PER_LONG - 1);
- if (!IS_OCS && par->beamcon0 & BMC0_VARBEAMEN) {
- var->hsync_len = (par->hsstop-par->hsstrt)>>clk_shift;
- var->right_margin = par->hsstrt>>clk_shift;
- var->left_margin = (par->htotal>>clk_shift) - var->xres - var->right_margin - var->hsync_len;
- var->vsync_len = (par->vsstop-par->vsstrt)>>line_shift;
- var->lower_margin = par->vsstrt>>line_shift;
- var->upper_margin = (par->vtotal>>line_shift) - var->yres - var->lower_margin - var->vsync_len;
- var->sync = 0;
- if (par->beamcon0 & BMC0_HSYTRUE)
- var->sync |= FB_SYNC_HOR_HIGH_ACT;
- if (par->beamcon0 & BMC0_VSYTRUE)
- var->sync |= FB_SYNC_VERT_HIGH_ACT;
- if (par->beamcon0 & BMC0_CSYTRUE)
- var->sync |= FB_SYNC_COMP_HIGH_ACT;
- } else {
- var->sync = FB_SYNC_BROADCAST;
- var->hsync_len = (152>>clk_shift) + mod4(par->diwstop_h);
- var->right_margin = ((par->htotal - down4(par->diwstop_h))>>clk_shift) + var->hsync_len;
- var->left_margin = (par->htotal>>clk_shift) - var->xres - var->right_margin - var->hsync_len;
- var->vsync_len = 4>>line_shift;
- var->lower_margin = ((par->vtotal - par->diwstop_v)>>line_shift) + var->vsync_len;
- var->upper_margin = (((par->vtotal - 2)>>line_shift) + 1) - var->yres -
- var->lower_margin - var->vsync_len;
- }
+ if (dst_idx + n <= BITS_PER_LONG) {
+ // Single destination word
+ if (last)
+ first &= last;
+ if (shift > 0) {
+ // Single source word
+ *dst = comp(~*src >> right, *dst, first);
+ } else if (src_idx + n <= BITS_PER_LONG) {
+ // Single source word
+ *dst = comp(~*src << left, *dst, first);
+ } else {
+ // 2 source words
+ d0 = ~*src++;
+ d1 = ~*src;
+ *dst = comp(d0 << left | d1 >> right, *dst,
+ first);
+ }
+ } else {
+ // Multiple destination words
+ d0 = ~*src++;
+ // Leading bits
+ if (shift > 0) {
+ // Single source word
+ *dst = comp(d0 >> right, *dst, first);
+ dst++;
+ n -= BITS_PER_LONG - dst_idx;
+ } else {
+ // 2 source words
+ d1 = ~*src++;
+ *dst = comp(d0 << left | d1 >> right, *dst,
+ first);
+ d0 = d1;
+ dst++;
+ n -= BITS_PER_LONG - dst_idx;
+ }
- if (par->bplcon0 & BPC0_ERSY)
- var->sync |= FB_SYNC_EXT;
- if (par->vmode & FB_VMODE_YWRAP)
- var->vmode |= FB_VMODE_YWRAP;
+ // Main chunk
+ m = n % BITS_PER_LONG;
+ n /= BITS_PER_LONG;
+ while (n >= 4) {
+ d1 = ~*src++;
+ *dst++ = d0 << left | d1 >> right;
+ d0 = d1;
+ d1 = ~*src++;
+ *dst++ = d0 << left | d1 >> right;
+ d0 = d1;
+ d1 = ~*src++;
+ *dst++ = d0 << left | d1 >> right;
+ d0 = d1;
+ d1 = ~*src++;
+ *dst++ = d0 << left | d1 >> right;
+ d0 = d1;
+ n -= 4;
+ }
+ while (n--) {
+ d1 = ~*src++;
+ *dst++ = d0 << left | d1 >> right;
+ d0 = d1;
+ }
- return 0;
+ // Trailing bits
+ if (last) {
+ if (m <= right) {
+ // Single source word
+ *dst = comp(d0 << left, *dst, last);
+ } else {
+ // 2 source words
+ d1 = ~*src;
+ *dst = comp(d0 << left | d1 >> right,
+ *dst, last);
+ }
+ }
+ }
+ }
}
/*
- * Pan or Wrap the Display
- *
- * This call looks only at xoffset, yoffset and the FB_VMODE_YWRAP flag
- * in `var'.
+ * Unaligned 32-bit pattern fill using 32/64-bit memory accesses
*/
-static void ami_pan_var(struct fb_var_screeninfo *var)
+static void bitfill32(unsigned long *dst, int dst_idx, u32 pat, u32 n)
{
- struct amifb_par *par = ¤tpar;
+ unsigned long val = pat;
+ unsigned long first, last;
- par->xoffset = var->xoffset;
- par->yoffset = var->yoffset;
- if (var->vmode & FB_VMODE_YWRAP)
- par->vmode |= FB_VMODE_YWRAP;
- else
- par->vmode &= ~FB_VMODE_YWRAP;
+ if (!n)
+ return;
- do_vmode_pan = 0;
- ami_update_par();
- do_vmode_pan = 1;
+#if BITS_PER_LONG == 64
+ val |= val << 32;
+#endif
+
+ first = ~0UL >> dst_idx;
+ last = ~(~0UL >> ((dst_idx + n) % BITS_PER_LONG));
+
+ if (dst_idx + n <= BITS_PER_LONG) {
+ // Single word
+ if (last)
+ first &= last;
+ *dst = comp(val, *dst, first);
+ } else {
+ // Multiple destination words
+ // Leading bits
+ if (first) {
+ *dst = comp(val, *dst, first);
+ dst++;
+ n -= BITS_PER_LONG - dst_idx;
+ }
+
+ // Main chunk
+ n /= BITS_PER_LONG;
+ while (n >= 8) {
+ *dst++ = val;
+ *dst++ = val;
+ *dst++ = val;
+ *dst++ = val;
+ *dst++ = val;
+ *dst++ = val;
+ *dst++ = val;
+ *dst++ = val;
+ n -= 8;
+ }
+ while (n--)
+ *dst++ = val;
+
+ // Trailing bits
+ if (last)
+ *dst = comp(val, *dst, last);
+ }
}
+
/*
- * Update hardware
+ * Unaligned 32-bit pattern xor using 32/64-bit memory accesses
*/
-static int ami_update_par(void)
+static void bitxor32(unsigned long *dst, int dst_idx, u32 pat, u32 n)
{
- struct amifb_par *par = ¤tpar;
- short clk_shift, vshift, fstrt, fsize, fstop, fconst, shift, move, mod;
+ unsigned long val = pat;
+ unsigned long first, last;
- clk_shift = par->clk_shift;
+ if (!n)
+ return;
- if (!(par->vmode & FB_VMODE_SMOOTH_XPAN))
- par->xoffset = upx(16<<maxfmode, par->xoffset);
+#if BITS_PER_LONG == 64
+ val |= val << 32;
+#endif
- fconst = 16<<maxfmode<<clk_shift;
- vshift = modx(16<<maxfmode, par->xoffset);
- fstrt = par->diwstrt_h - (vshift<<clk_shift) - 4;
- fsize = (par->xres+vshift)<<clk_shift;
- shift = modx(fconst, fstrt);
- move = downx(2<<maxfmode, div8(par->xoffset));
- if (maxfmode + clk_shift > 1) {
- fstrt = downx(fconst, fstrt) - 64;
- fsize = upx(fconst, fsize);
- fstop = fstrt + fsize - fconst;
+ first = ~0UL >> dst_idx;
+ last = ~(~0UL >> ((dst_idx + n) % BITS_PER_LONG));
+
+ if (dst_idx + n <= BITS_PER_LONG) {
+ // Single word
+ if (last)
+ first &= last;
+ *dst = xor(val, *dst, first);
} else {
- mod = fstrt = downx(fconst, fstrt) - fconst;
- fstop = fstrt + upx(fconst, fsize) - 64;
- fsize = up64(fsize);
- fstrt = fstop - fsize + 64;
- if (fstrt < min_fstrt) {
- fstop += min_fstrt - fstrt;
- fstrt = min_fstrt;
+ // Multiple destination words
+ // Leading bits
+ if (first) {
+ *dst = xor(val, *dst, first);
+ dst++;
+ n -= BITS_PER_LONG - dst_idx;
}
- move = move - div8((mod-fstrt)>>clk_shift);
- }
- mod = par->next_line - div8(fsize>>clk_shift);
- par->ddfstrt = fstrt;
- par->ddfstop = fstop;
- par->bplcon1 = hscroll2hw(shift);
- par->bpl2mod = mod;
- if (par->bplcon0 & BPC0_LACE)
- par->bpl2mod += par->next_line;
- if (IS_AGA && (par->fmode & FMODE_BSCAN2))
- par->bpl1mod = -div8(fsize>>clk_shift);
- else
- par->bpl1mod = par->bpl2mod;
- if (par->yoffset) {
- par->bplpt0 = fb_info.fix.smem_start + par->next_line*par->yoffset + move;
- if (par->vmode & FB_VMODE_YWRAP) {
- if (par->yoffset > par->vyres-par->yres) {
- par->bplpt0wrap = fb_info.fix.smem_start + move;
- if (par->bplcon0 & BPC0_LACE && mod2(par->diwstrt_v+par->vyres-par->yoffset))
- par->bplpt0wrap += par->next_line;
- }
+ // Main chunk
+ n /= BITS_PER_LONG;
+ while (n >= 4) {
+ *dst++ ^= val;
+ *dst++ ^= val;
+ *dst++ ^= val;
+ *dst++ ^= val;
+ n -= 4;
}
- } else
- par->bplpt0 = fb_info.fix.smem_start + move;
+ while (n--)
+ *dst++ ^= val;
- if (par->bplcon0 & BPC0_LACE && mod2(par->diwstrt_v))
- par->bplpt0 += par->next_line;
+ // Trailing bits
+ if (last)
+ *dst = xor(val, *dst, last);
+ }
+}
- return 0;
+static inline void fill_one_line(int bpp, unsigned long next_plane,
+ unsigned long *dst, int dst_idx, u32 n,
+ u32 color)
+{
+ while (1) {
+ dst += dst_idx >> SHIFT_PER_LONG;
+ dst_idx &= (BITS_PER_LONG - 1);
+ bitfill32(dst, dst_idx, color & 1 ? ~0 : 0, n);
+ if (!--bpp)
+ break;
+ color >>= 1;
+ dst_idx += next_plane * 8;
+ }
}
+static inline void xor_one_line(int bpp, unsigned long next_plane,
+ unsigned long *dst, int dst_idx, u32 n,
+ u32 color)
+{
+ while (color) {
+ dst += dst_idx >> SHIFT_PER_LONG;
+ dst_idx &= (BITS_PER_LONG - 1);
+ bitxor32(dst, dst_idx, color & 1 ? ~0 : 0, n);
+ if (!--bpp)
+ break;
+ color >>= 1;
+ dst_idx += next_plane * 8;
+ }
+}
- /*
- * Set a single color register. The values supplied are already
- * rounded down to the hardware's capabilities (according to the
- * entries in the var structure). Return != 0 for invalid regno.
- */
-static int amifb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
- u_int transp, struct fb_info *info)
+static void amifb_fillrect(struct fb_info *info,
+ const struct fb_fillrect *rect)
{
- if (IS_AGA) {
- if (regno > 255)
- return 1;
- } else if (currentpar.bplcon0 & BPC0_SHRES) {
- if (regno > 3)
- return 1;
- } else {
- if (regno > 31)
- return 1;
- }
- red >>= 8;
- green >>= 8;
- blue >>= 8;
- if (!regno) {
- red0 = red;
- green0 = green;
- blue0 = blue;
- }
+ struct amifb_par *par = info->par;
+ int dst_idx, x2, y2;
+ unsigned long *dst;
+ u32 width, height;
+
+ if (!rect->width || !rect->height)
+ return;
/*
- * Update the corresponding Hardware Color Register, unless it's Color
- * Register 0 and the screen is blanked.
- *
- * VBlank is switched off to protect bplcon3 or ecs_palette[] from
- * being changed by ami_do_blank() during the VBlank.
- */
+ * We could use hardware clipping but on many cards you get around
+ * hardware clipping by writing to framebuffer directly.
+ * */
+ x2 = rect->dx + rect->width;
+ y2 = rect->dy + rect->height;
+ x2 = x2 < info->var.xres_virtual ? x2 : info->var.xres_virtual;
+ y2 = y2 < info->var.yres_virtual ? y2 : info->var.yres_virtual;
+ width = x2 - rect->dx;
+ height = y2 - rect->dy;
- if (regno || !is_blanked) {
-#if defined(CONFIG_FB_AMIGA_AGA)
- if (IS_AGA) {
- u_short bplcon3 = currentpar.bplcon3;
- VBlankOff();
- custom.bplcon3 = bplcon3 | (regno<<8 & 0xe000);
- custom.color[regno&31] = rgb2hw8_high(red, green, blue);
- custom.bplcon3 = bplcon3 | (regno<<8 & 0xe000) | BPC3_LOCT;
- custom.color[regno&31] = rgb2hw8_low(red, green, blue);
- custom.bplcon3 = bplcon3;
- VBlankOn();
- } else
-#endif
-#if defined(CONFIG_FB_AMIGA_ECS)
- if (currentpar.bplcon0 & BPC0_SHRES) {
- u_short color, mask;
- int i;
+ dst = (unsigned long *)
+ ((unsigned long)info->screen_base & ~(BYTES_PER_LONG - 1));
+ dst_idx = ((unsigned long)info->screen_base & (BYTES_PER_LONG - 1)) * 8;
+ dst_idx += rect->dy * par->next_line * 8 + rect->dx;
+ while (height--) {
+ switch (rect->rop) {
+ case ROP_COPY:
+ fill_one_line(info->var.bits_per_pixel,
+ par->next_plane, dst, dst_idx, width,
+ rect->color);
+ break;
- mask = 0x3333;
- color = rgb2hw2(red, green, blue);
- VBlankOff();
- for (i = regno+12; i >= (int)regno; i -= 4)
- custom.color[i] = ecs_palette[i] = (ecs_palette[i] & mask) | color;
- mask <<=2; color >>= 2;
- regno = down16(regno)+mul4(mod4(regno));
- for (i = regno+3; i >= (int)regno; i--)
- custom.color[i] = ecs_palette[i] = (ecs_palette[i] & mask) | color;
- VBlankOn();
- } else
-#endif
- custom.color[regno] = rgb2hw4(red, green, blue);
+ case ROP_XOR:
+ xor_one_line(info->var.bits_per_pixel, par->next_plane,
+ dst, dst_idx, width, rect->color);
+ break;
+ }
+ dst_idx += par->next_line * 8;
}
- return 0;
}
-static void ami_update_display(void)
+static inline void copy_one_line(int bpp, unsigned long next_plane,
+ unsigned long *dst, int dst_idx,
+ unsigned long *src, int src_idx, u32 n)
{
- struct amifb_par *par = ¤tpar;
-
- custom.bplcon1 = par->bplcon1;
- custom.bpl1mod = par->bpl1mod;
- custom.bpl2mod = par->bpl2mod;
- custom.ddfstrt = ddfstrt2hw(par->ddfstrt);
- custom.ddfstop = ddfstop2hw(par->ddfstop);
+ while (1) {
+ dst += dst_idx >> SHIFT_PER_LONG;
+ dst_idx &= (BITS_PER_LONG - 1);
+ src += src_idx >> SHIFT_PER_LONG;
+ src_idx &= (BITS_PER_LONG - 1);
+ bitcpy(dst, dst_idx, src, src_idx, n);
+ if (!--bpp)
+ break;
+ dst_idx += next_plane * 8;
+ src_idx += next_plane * 8;
+ }
}
- /*
- * Change the video mode (called by VBlank interrupt)
- */
-
-static void ami_init_display(void)
+static inline void copy_one_line_rev(int bpp, unsigned long next_plane,
+ unsigned long *dst, int dst_idx,
+ unsigned long *src, int src_idx, u32 n)
{
- struct amifb_par *par = ¤tpar;
- int i;
-
- custom.bplcon0 = par->bplcon0 & ~BPC0_LACE;
- custom.bplcon2 = (IS_OCS ? 0 : BPC2_KILLEHB) | BPC2_PF2P2 | BPC2_PF1P2;
- if (!IS_OCS) {
- custom.bplcon3 = par->bplcon3;
- if (IS_AGA)
- custom.bplcon4 = BPC4_ESPRM4 | BPC4_OSPRM4;
- if (par->beamcon0 & BMC0_VARBEAMEN) {
- custom.htotal = htotal2hw(par->htotal);
- custom.hbstrt = hbstrt2hw(par->hbstrt);
- custom.hbstop = hbstop2hw(par->hbstop);
- custom.hsstrt = hsstrt2hw(par->hsstrt);
- custom.hsstop = hsstop2hw(par->hsstop);
- custom.hcenter = hcenter2hw(par->hcenter);
- custom.vtotal = vtotal2hw(par->vtotal);
- custom.vbstrt = vbstrt2hw(par->vbstrt);
- custom.vbstop = vbstop2hw(par->vbstop);
- custom.vsstrt = vsstrt2hw(par->vsstrt);
- custom.vsstop = vsstop2hw(par->vsstop);
- }
+ while (1) {
+ dst += dst_idx >> SHIFT_PER_LONG;
+ dst_idx &= (BITS_PER_LONG - 1);
+ src += src_idx >> SHIFT_PER_LONG;
+ src_idx &= (BITS_PER_LONG - 1);
+ bitcpy_rev(dst, dst_idx, src, src_idx, n);
+ if (!--bpp)
+ break;
+ dst_idx += next_plane * 8;
+ src_idx += next_plane * 8;
}
- if (!IS_OCS || par->hsstop)
- custom.beamcon0 = par->beamcon0;
- if (IS_AGA)
- custom.fmode = par->fmode;
+}
- /*
- * The minimum period for audio depends on htotal
- */
- amiga_audio_min_period = div16(par->htotal);
+static void amifb_copyarea(struct fb_info *info,
+ const struct fb_copyarea *area)
+{
+ struct amifb_par *par = info->par;
+ int x2, y2;
+ u32 dx, dy, sx, sy, width, height;
+ unsigned long *dst, *src;
+ int dst_idx, src_idx;
+ int rev_copy = 0;
- is_lace = par->bplcon0 & BPC0_LACE ? 1 : 0;
-#if 1
- if (is_lace) {
- i = custom.vposr >> 15;
- } else {
- custom.vposw = custom.vposr | 0x8000;
- i = 1;
- }
-#else
- i = 1;
- custom.vposw = custom.vposr | 0x8000;
-#endif
- custom.cop2lc = (u_short *)ZTWO_PADDR(copdisplay.list[currentcop][i]);
-}
+ /* clip the destination */
+ x2 = area->dx + area->width;
+ y2 = area->dy + area->height;
+ dx = area->dx > 0 ? area->dx : 0;
+ dy = area->dy > 0 ? area->dy : 0;
+ x2 = x2 < info->var.xres_virtual ? x2 : info->var.xres_virtual;
+ y2 = y2 < info->var.yres_virtual ? y2 : info->var.yres_virtual;
+ width = x2 - dx;
+ height = y2 - dy;
- /*
- * (Un)Blank the screen (called by VBlank interrupt)
- */
+ if (area->sx + dx < area->dx || area->sy + dy < area->dy)
+ return;
-static void ami_do_blank(void)
-{
- struct amifb_par *par = ¤tpar;
-#if defined(CONFIG_FB_AMIGA_AGA)
- u_short bplcon3 = par->bplcon3;
-#endif
- u_char red, green, blue;
+ /* update sx,sy */
+ sx = area->sx + (dx - area->dx);
+ sy = area->sy + (dy - area->dy);
- if (do_blank > 0) {
- custom.dmacon = DMAF_RASTER | DMAF_SPRITE;
- red = green = blue = 0;
- if (!IS_OCS && do_blank > 1) {
- switch (do_blank) {
- case FB_BLANK_VSYNC_SUSPEND:
- custom.hsstrt = hsstrt2hw(par->hsstrt);
- custom.hsstop = hsstop2hw(par->hsstop);
- custom.vsstrt = vsstrt2hw(par->vtotal+4);
- custom.vsstop = vsstop2hw(par->vtotal+4);
- break;
- case FB_BLANK_HSYNC_SUSPEND:
- custom.hsstrt = hsstrt2hw(par->htotal+16);
- custom.hsstop = hsstop2hw(par->htotal+16);
- custom.vsstrt = vsstrt2hw(par->vsstrt);
- custom.vsstop = vsstrt2hw(par->vsstop);
- break;
- case FB_BLANK_POWERDOWN:
- custom.hsstrt = hsstrt2hw(par->htotal+16);
- custom.hsstop = hsstop2hw(par->htotal+16);
- custom.vsstrt = vsstrt2hw(par->vtotal+4);
- custom.vsstop = vsstop2hw(par->vtotal+4);
- break;
- }
- if (!(par->beamcon0 & BMC0_VARBEAMEN)) {
- custom.htotal = htotal2hw(par->htotal);
- custom.vtotal = vtotal2hw(par->vtotal);
- custom.beamcon0 = BMC0_HARDDIS | BMC0_VARBEAMEN |
- BMC0_VARVSYEN | BMC0_VARHSYEN | BMC0_VARCSYEN;
- }
+ /* the source must be completely inside the virtual screen */
+ if (sx + width > info->var.xres_virtual ||
+ sy + height > info->var.yres_virtual)
+ return;
+
+ if (dy > sy || (dy == sy && dx > sx)) {
+ dy += height;
+ sy += height;
+ rev_copy = 1;
+ }
+ dst = (unsigned long *)
+ ((unsigned long)info->screen_base & ~(BYTES_PER_LONG - 1));
+ src = dst;
+ dst_idx = ((unsigned long)info->screen_base & (BYTES_PER_LONG - 1)) * 8;
+ src_idx = dst_idx;
+ dst_idx += dy * par->next_line * 8 + dx;
+ src_idx += sy * par->next_line * 8 + sx;
+ if (rev_copy) {
+ while (height--) {
+ dst_idx -= par->next_line * 8;
+ src_idx -= par->next_line * 8;
+ copy_one_line_rev(info->var.bits_per_pixel,
+ par->next_plane, dst, dst_idx, src,
+ src_idx, width);
}
} else {
- custom.dmacon = DMAF_SETCLR | DMAF_RASTER | DMAF_SPRITE;
- red = red0;
- green = green0;
- blue = blue0;
- if (!IS_OCS) {
- custom.hsstrt = hsstrt2hw(par->hsstrt);
- custom.hsstop = hsstop2hw(par->hsstop);
- custom.vsstrt = vsstrt2hw(par->vsstrt);
- custom.vsstop = vsstop2hw(par->vsstop);
- custom.beamcon0 = par->beamcon0;
+ while (height--) {
+ copy_one_line(info->var.bits_per_pixel,
+ par->next_plane, dst, dst_idx, src,
+ src_idx, width);
+ dst_idx += par->next_line * 8;
+ src_idx += par->next_line * 8;
}
}
-#if defined(CONFIG_FB_AMIGA_AGA)
- if (IS_AGA) {
- custom.bplcon3 = bplcon3;
- custom.color[0] = rgb2hw8_high(red, green, blue);
- custom.bplcon3 = bplcon3 | BPC3_LOCT;
- custom.color[0] = rgb2hw8_low(red, green, blue);
- custom.bplcon3 = bplcon3;
- } else
-#endif
-#if defined(CONFIG_FB_AMIGA_ECS)
- if (par->bplcon0 & BPC0_SHRES) {
- u_short color, mask;
- int i;
-
- mask = 0x3333;
- color = rgb2hw2(red, green, blue);
- for (i = 12; i >= 0; i -= 4)
- custom.color[i] = ecs_palette[i] = (ecs_palette[i] & mask) | color;
- mask <<=2; color >>= 2;
- for (i = 3; i >= 0; i--)
- custom.color[i] = ecs_palette[i] = (ecs_palette[i] & mask) | color;
- } else
-#endif
- custom.color[0] = rgb2hw4(red, green, blue);
- is_blanked = do_blank > 0 ? do_blank : 0;
}
-static int ami_get_fix_cursorinfo(struct fb_fix_cursorinfo *fix)
-{
- struct amifb_par *par = ¤tpar;
-
- fix->crsr_width = fix->crsr_xsize = par->crsr.width;
- fix->crsr_height = fix->crsr_ysize = par->crsr.height;
- fix->crsr_color1 = 17;
- fix->crsr_color2 = 18;
- return 0;
-}
-static int ami_get_var_cursorinfo(struct fb_var_cursorinfo *var, u_char __user *data)
+static inline void expand_one_line(int bpp, unsigned long next_plane,
+ unsigned long *dst, int dst_idx, u32 n,
+ const u8 *data, u32 bgcolor, u32 fgcolor)
{
- struct amifb_par *par = ¤tpar;
- register u_short *lspr, *sspr;
-#ifdef __mc68000__
- register u_long datawords asm ("d2");
-#else
- register u_long datawords;
-#endif
- register short delta;
- register u_char color;
- short height, width, bits, words;
- int size, alloc;
+ const unsigned long *src;
+ int src_idx;
- size = par->crsr.height*par->crsr.width;
- alloc = var->height*var->width;
- var->height = par->crsr.height;
- var->width = par->crsr.width;
- var->xspot = par->crsr.spot_x;
- var->yspot = par->crsr.spot_y;
- if (size > var->height*var->width)
- return -ENAMETOOLONG;
- if (!access_ok(VERIFY_WRITE, data, size))
- return -EFAULT;
- delta = 1<<par->crsr.fmode;
- lspr = lofsprite + (delta<<1);
- if (par->bplcon0 & BPC0_LACE)
- sspr = shfsprite + (delta<<1);
- else
- sspr = NULL;
- for (height = (short)var->height-1; height >= 0; height--) {
- bits = 0; words = delta; datawords = 0;
- for (width = (short)var->width-1; width >= 0; width--) {
- if (bits == 0) {
- bits = 16; --words;
-#ifdef __mc68000__
- asm volatile ("movew %1@(%3:w:2),%0 ; swap %0 ; movew %1@+,%0"
- : "=d" (datawords), "=a" (lspr) : "1" (lspr), "d" (delta));
-#else
- datawords = (*(lspr+delta) << 16) | (*lspr++);
-#endif
- }
- --bits;
-#ifdef __mc68000__
- asm volatile (
- "clrb %0 ; swap %1 ; lslw #1,%1 ; roxlb #1,%0 ; "
- "swap %1 ; lslw #1,%1 ; roxlb #1,%0"
- : "=d" (color), "=d" (datawords) : "1" (datawords));
-#else
- color = (((datawords >> 30) & 2)
- | ((datawords >> 15) & 1));
- datawords <<= 1;
-#endif
- put_user(color, data++);
- }
- if (bits > 0) {
- --words; ++lspr;
- }
- while (--words >= 0)
- ++lspr;
-#ifdef __mc68000__
- asm volatile ("lea %0@(%4:w:2),%0 ; tstl %1 ; jeq 1f ; exg %0,%1\n1:"
- : "=a" (lspr), "=a" (sspr) : "0" (lspr), "1" (sspr), "d" (delta));
-#else
- lspr += delta;
- if (sspr) {
- u_short *tmp = lspr;
- lspr = sspr;
- sspr = tmp;
- }
-#endif
+ while (1) {
+ dst += dst_idx >> SHIFT_PER_LONG;
+ dst_idx &= (BITS_PER_LONG - 1);
+ if ((bgcolor ^ fgcolor) & 1) {
+ src = (unsigned long *)
+ ((unsigned long)data & ~(BYTES_PER_LONG - 1));
+ src_idx = ((unsigned long)data & (BYTES_PER_LONG - 1)) * 8;
+ if (fgcolor & 1)
+ bitcpy(dst, dst_idx, src, src_idx, n);
+ else
+ bitcpy_not(dst, dst_idx, src, src_idx, n);
+ /* set or clear */
+ } else
+ bitfill32(dst, dst_idx, fgcolor & 1 ? ~0 : 0, n);
+ if (!--bpp)
+ break;
+ bgcolor >>= 1;
+ fgcolor >>= 1;
+ dst_idx += next_plane * 8;
}
- return 0;
}
-static int ami_set_var_cursorinfo(struct fb_var_cursorinfo *var, u_char __user *data)
+
+static void amifb_imageblit(struct fb_info *info, const struct fb_image *image)
{
- struct amifb_par *par = ¤tpar;
- register u_short *lspr, *sspr;
-#ifdef __mc68000__
- register u_long datawords asm ("d2");
-#else
- register u_long datawords;
-#endif
- register short delta;
- u_short fmode;
- short height, width, bits, words;
+ struct amifb_par *par = info->par;
+ int x2, y2;
+ unsigned long *dst;
+ int dst_idx;
+ const char *src;
+ u32 dx, dy, width, height, pitch;
- if (!var->width)
- return -EINVAL;
- else if (var->width <= 16)
- fmode = TAG_FMODE_1;
- else if (var->width <= 32)
- fmode = TAG_FMODE_2;
- else if (var->width <= 64)
- fmode = TAG_FMODE_4;
- else
- return -EINVAL;
- if (fmode > maxfmode)
- return -EINVAL;
- if (!var->height)
- return -EINVAL;
- if (!access_ok(VERIFY_READ, data, var->width*var->height))
- return -EFAULT;
- delta = 1<<fmode;
- lofsprite = shfsprite = (u_short *)spritememory;
- lspr = lofsprite + (delta<<1);
- if (par->bplcon0 & BPC0_LACE) {
- if (((var->height+4)<<fmode<<2) > SPRITEMEMSIZE)
- return -EINVAL;
- memset(lspr, 0, (var->height+4)<<fmode<<2);
- shfsprite += ((var->height+5)&-2)<<fmode;
- sspr = shfsprite + (delta<<1);
- } else {
- if (((var->height+2)<<fmode<<2) > SPRITEMEMSIZE)
- return -EINVAL;
- memset(lspr, 0, (var->height+2)<<fmode<<2);
- sspr = NULL;
- }
- for (height = (short)var->height-1; height >= 0; height--) {
- bits = 16; words = delta; datawords = 0;
- for (width = (short)var->width-1; width >= 0; width--) {
- unsigned long tdata = 0;
- get_user(tdata, data);
- data++;
-#ifdef __mc68000__
- asm volatile (
- "lsrb #1,%2 ; roxlw #1,%0 ; swap %0 ; "
- "lsrb #1,%2 ; roxlw #1,%0 ; swap %0"
- : "=d" (datawords)
- : "0" (datawords), "d" (tdata));
-#else
- datawords = ((datawords << 1) & 0xfffefffe);
- datawords |= tdata & 1;
- datawords |= (tdata & 2) << (16-1);
-#endif
- if (--bits == 0) {
- bits = 16; --words;
-#ifdef __mc68000__
- asm volatile ("swap %2 ; movew %2,%0@(%3:w:2) ; swap %2 ; movew %2,%0@+"
- : "=a" (lspr) : "0" (lspr), "d" (datawords), "d" (delta));
-#else
- *(lspr+delta) = (u_short) (datawords >> 16);
- *lspr++ = (u_short) (datawords & 0xffff);
-#endif
- }
- }
- if (bits < 16) {
- --words;
-#ifdef __mc68000__
- asm volatile (
- "swap %2 ; lslw %4,%2 ; movew %2,%0@(%3:w:2) ; "
- "swap %2 ; lslw %4,%2 ; movew %2,%0@+"
- : "=a" (lspr) : "0" (lspr), "d" (datawords), "d" (delta), "d" (bits));
-#else
- *(lspr+delta) = (u_short) (datawords >> (16+bits));
- *lspr++ = (u_short) ((datawords & 0x0000ffff) >> bits);
-#endif
- }
- while (--words >= 0) {
-#ifdef __mc68000__
- asm volatile ("moveql #0,%%d0 ; movew %%d0,%0@(%2:w:2) ; movew %%d0,%0@+"
- : "=a" (lspr) : "0" (lspr), "d" (delta) : "d0");
-#else
- *(lspr+delta) = 0;
- *lspr++ = 0;
-#endif
- }
-#ifdef __mc68000__
- asm volatile ("lea %0@(%4:w:2),%0 ; tstl %1 ; jeq 1f ; exg %0,%1\n1:"
- : "=a" (lspr), "=a" (sspr) : "0" (lspr), "1" (sspr), "d" (delta));
-#else
- lspr += delta;
- if (sspr) {
- u_short *tmp = lspr;
- lspr = sspr;
- sspr = tmp;
+ /*
+ * We could use hardware clipping but on many cards you get around
+ * hardware clipping by writing to framebuffer directly like we are
+ * doing here.
+ */
+ x2 = image->dx + image->width;
+ y2 = image->dy + image->height;
+ dx = image->dx;
+ dy = image->dy;
+ x2 = x2 < info->var.xres_virtual ? x2 : info->var.xres_virtual;
+ y2 = y2 < info->var.yres_virtual ? y2 : info->var.yres_virtual;
+ width = x2 - dx;
+ height = y2 - dy;
+
+ if (image->depth == 1) {
+ dst = (unsigned long *)
+ ((unsigned long)info->screen_base & ~(BYTES_PER_LONG - 1));
+ dst_idx = ((unsigned long)info->screen_base & (BYTES_PER_LONG - 1)) * 8;
+ dst_idx += dy * par->next_line * 8 + dx;
+ src = image->data;
+ pitch = (image->width + 7) / 8;
+ while (height--) {
+ expand_one_line(info->var.bits_per_pixel,
+ par->next_plane, dst, dst_idx, width,
+ src, image->bg_color,
+ image->fg_color);
+ dst_idx += par->next_line * 8;
+ src += pitch;
}
-#endif
- }
- par->crsr.height = var->height;
- par->crsr.width = var->width;
- par->crsr.spot_x = var->xspot;
- par->crsr.spot_y = var->yspot;
- par->crsr.fmode = fmode;
- if (IS_AGA) {
- par->fmode &= ~(FMODE_SPAGEM | FMODE_SPR32);
- par->fmode |= sprfetchmode[fmode];
- custom.fmode = par->fmode;
+ } else {
+ c2p_planar(info->screen_base, image->data, dx, dy, width,
+ height, par->next_line, par->next_plane,
+ image->width, info->var.bits_per_pixel);
}
- return 0;
}
-static int ami_get_cursorstate(struct fb_cursorstate *state)
+
+ /*
+ * Amiga Frame Buffer Specific ioctls
+ */
+
+static int amifb_ioctl(struct fb_info *info,
+ unsigned int cmd, unsigned long arg)
{
- struct amifb_par *par = ¤tpar;
+ union {
+ struct fb_fix_cursorinfo fix;
+ struct fb_var_cursorinfo var;
+ struct fb_cursorstate state;
+ } crsr;
+ void __user *argp = (void __user *)arg;
+ int i;
- state->xoffset = par->crsr.crsr_x;
- state->yoffset = par->crsr.crsr_y;
- state->mode = cursormode;
- return 0;
+ switch (cmd) {
+ case FBIOGET_FCURSORINFO:
+ i = ami_get_fix_cursorinfo(&crsr.fix, info->par);
+ if (i)
+ return i;
+ return copy_to_user(argp, &crsr.fix,
+ sizeof(crsr.fix)) ? -EFAULT : 0;
+
+ case FBIOGET_VCURSORINFO:
+ i = ami_get_var_cursorinfo(&crsr.var,
+ ((struct fb_var_cursorinfo __user *)arg)->data,
+ info->par);
+ if (i)
+ return i;
+ return copy_to_user(argp, &crsr.var,
+ sizeof(crsr.var)) ? -EFAULT : 0;
+
+ case FBIOPUT_VCURSORINFO:
+ if (copy_from_user(&crsr.var, argp, sizeof(crsr.var)))
+ return -EFAULT;
+ return ami_set_var_cursorinfo(&crsr.var,
+ ((struct fb_var_cursorinfo __user *)arg)->data,
+ info->par);
+
+ case FBIOGET_CURSORSTATE:
+ i = ami_get_cursorstate(&crsr.state, info->par);
+ if (i)
+ return i;
+ return copy_to_user(argp, &crsr.state,
+ sizeof(crsr.state)) ? -EFAULT : 0;
+
+ case FBIOPUT_CURSORSTATE:
+ if (copy_from_user(&crsr.state, argp, sizeof(crsr.state)))
+ return -EFAULT;
+ return ami_set_cursorstate(&crsr.state, info->par);
+ }
+ return -EINVAL;
}
-static int ami_set_cursorstate(struct fb_cursorstate *state)
+
+ /*
+ * Flash the cursor (called by VBlank interrupt)
+ */
+
+static int flash_cursor(void)
{
- struct amifb_par *par = ¤tpar;
+ static int cursorcount = 1;
- par->crsr.crsr_x = state->xoffset;
- par->crsr.crsr_y = state->yoffset;
- if ((cursormode = state->mode) == FB_CURSOR_OFF)
- cursorstate = -1;
- do_cursor = 1;
+ if (cursormode == FB_CURSOR_FLASH) {
+ if (!--cursorcount) {
+ cursorstate = -cursorstate;
+ cursorcount = cursorrate;
+ if (!is_blanked)
+ return 1;
+ }
+ }
return 0;
}
-static void ami_set_sprite(void)
+ /*
+ * VBlank Display Interrupt
+ */
+
+static irqreturn_t amifb_interrupt(int irq, void *dev_id)
{
- struct amifb_par *par = ¤tpar;
- copins *copl, *cops;
- u_short hs, vs, ve;
- u_long pl, ps, pt;
- short mx, my;
+ struct amifb_par *par = dev_id;
- cops = copdisplay.list[currentcop][0];
- copl = copdisplay.list[currentcop][1];
- ps = pl = ZTWO_PADDR(dummysprite);
- mx = par->crsr.crsr_x-par->crsr.spot_x;
- my = par->crsr.crsr_y-par->crsr.spot_y;
- if (!(par->vmode & FB_VMODE_YWRAP)) {
- mx -= par->xoffset;
- my -= par->yoffset;
+ if (do_vmode_pan || do_vmode_full)
+ ami_update_display(par);
+
+ if (do_vmode_full)
+ ami_init_display(par);
+
+ if (do_vmode_pan) {
+ flash_cursor();
+ ami_rebuild_copper(par);
+ do_cursor = do_vmode_pan = 0;
+ } else if (do_cursor) {
+ flash_cursor();
+ ami_set_sprite(par);
+ do_cursor = 0;
+ } else {
+ if (flash_cursor())
+ ami_set_sprite(par);
}
- if (!is_blanked && cursorstate > 0 && par->crsr.height > 0 &&
- mx > -(short)par->crsr.width && mx < par->xres &&
- my > -(short)par->crsr.height && my < par->yres) {
- pl = ZTWO_PADDR(lofsprite);
- hs = par->diwstrt_h + (mx<<par->clk_shift) - 4;
- vs = par->diwstrt_v + (my<<par->line_shift);
- ve = vs + (par->crsr.height<<par->line_shift);
- if (par->bplcon0 & BPC0_LACE) {
- ps = ZTWO_PADDR(shfsprite);
- lofsprite[0] = spr2hw_pos(vs, hs);
- shfsprite[0] = spr2hw_pos(vs+1, hs);
- if (mod2(vs)) {
- lofsprite[1<<par->crsr.fmode] = spr2hw_ctl(vs, hs, ve);
- shfsprite[1<<par->crsr.fmode] = spr2hw_ctl(vs+1, hs, ve+1);
- pt = pl; pl = ps; ps = pt;
- } else {
- lofsprite[1<<par->crsr.fmode] = spr2hw_ctl(vs, hs, ve+1);
- shfsprite[1<<par->crsr.fmode] = spr2hw_ctl(vs+1, hs, ve);
- }
- } else {
- lofsprite[0] = spr2hw_pos(vs, hs) | (IS_AGA && (par->fmode & FMODE_BSCAN2) ? 0x80 : 0);
- lofsprite[1<<par->crsr.fmode] = spr2hw_ctl(vs, hs, ve);
- }
+
+ if (do_blank) {
+ ami_do_blank(par);
+ do_blank = 0;
}
- copl[cop_spr0ptrh].w[1] = highw(pl);
- copl[cop_spr0ptrl].w[1] = loww(pl);
- if (par->bplcon0 & BPC0_LACE) {
- cops[cop_spr0ptrh].w[1] = highw(ps);
- cops[cop_spr0ptrl].w[1] = loww(ps);
+
+ if (do_vmode_full) {
+ ami_reinit_copper(par);
+ do_vmode_full = 0;
+ }
+ return IRQ_HANDLED;
+}
+
+
+static struct fb_ops amifb_ops = {
+ .owner = THIS_MODULE,
+ .fb_check_var = amifb_check_var,
+ .fb_set_par = amifb_set_par,
+ .fb_setcolreg = amifb_setcolreg,
+ .fb_blank = amifb_blank,
+ .fb_pan_display = amifb_pan_display,
+ .fb_fillrect = amifb_fillrect,
+ .fb_copyarea = amifb_copyarea,
+ .fb_imageblit = amifb_imageblit,
+ .fb_ioctl = amifb_ioctl,
+};
+
+
+ /*
+ * Allocate, Clear and Align a Block of Chip Memory
+ */
+
+static void *aligned_chipptr;
+
+static inline u_long __init chipalloc(u_long size)
+{
+ aligned_chipptr = amiga_chip_alloc(size, "amifb [RAM]");
+ if (!aligned_chipptr) {
+ pr_err("amifb: No Chip RAM for frame buffer");
+ return 0;
}
+ memset(aligned_chipptr, 0, size);
+ return (u_long)aligned_chipptr;
+}
+
+static inline void chipfree(void)
+{
+ if (aligned_chipptr)
+ amiga_chip_free(aligned_chipptr);
}
/*
- * Initialise the Copper Initialisation List
+ * Initialisation
*/
-static void __init ami_init_copper(void)
+static int __init amifb_probe(struct platform_device *pdev)
{
- copins *cop = copdisplay.init;
- u_long p;
- int i;
+ struct fb_info *info;
+ int tag, i, err = 0;
+ u_long chipptr;
+ u_int defmode;
- if (!IS_OCS) {
- (cop++)->l = CMOVE(BPC0_COLOR | BPC0_SHRES | BPC0_ECSENA, bplcon0);
- (cop++)->l = CMOVE(0x0181, diwstrt);
- (cop++)->l = CMOVE(0x0281, diwstop);
- (cop++)->l = CMOVE(0x0000, diwhigh);
- } else
- (cop++)->l = CMOVE(BPC0_COLOR, bplcon0);
- p = ZTWO_PADDR(dummysprite);
- for (i = 0; i < 8; i++) {
- (cop++)->l = CMOVE(0, spr[i].pos);
- (cop++)->l = CMOVE(highw(p), sprpt[i]);
- (cop++)->l = CMOVE2(loww(p), sprpt[i]);
+#ifndef MODULE
+ char *option = NULL;
+
+ if (fb_get_options("amifb", &option)) {
+ amifb_video_off();
+ return -ENODEV;
}
+ amifb_setup(option);
+#endif
+ custom.dmacon = DMAF_ALL | DMAF_MASTER;
- (cop++)->l = CMOVE(IF_SETCLR | IF_COPER, intreq);
- copdisplay.wait = cop;
- (cop++)->l = CEND;
- (cop++)->l = CMOVE(0, copjmp2);
- cop->l = CEND;
+ info = framebuffer_alloc(sizeof(struct amifb_par), &pdev->dev);
+ if (!info) {
+ dev_err(&pdev->dev, "framebuffer_alloc failed\n");
+ return -ENOMEM;
+ }
- custom.cop1lc = (u_short *)ZTWO_PADDR(copdisplay.init);
- custom.copjmp1 = 0;
-}
+ strcpy(info->fix.id, "Amiga ");
+ info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
+ info->fix.accel = FB_ACCEL_AMIGABLITT;
-static void ami_reinit_copper(void)
-{
- struct amifb_par *par = ¤tpar;
+ switch (amiga_chipset) {
+#ifdef CONFIG_FB_AMIGA_OCS
+ case CS_OCS:
+ strcat(info->fix.id, "OCS");
+default_chipset:
+ chipset = TAG_OCS;
+ maxdepth[TAG_SHRES] = 0; /* OCS means no SHRES */
+ maxdepth[TAG_HIRES] = 4;
+ maxdepth[TAG_LORES] = 6;
+ maxfmode = TAG_FMODE_1;
+ defmode = amiga_vblank == 50 ? DEFMODE_PAL : DEFMODE_NTSC;
+ info->fix.smem_len = VIDEOMEMSIZE_OCS;
+ break;
+#endif /* CONFIG_FB_AMIGA_OCS */
- copdisplay.init[cip_bplcon0].w[1] = ~(BPC0_BPU3 | BPC0_BPU2 | BPC0_BPU1 | BPC0_BPU0) & par->bplcon0;
- copdisplay.wait->l = CWAIT(32, par->diwstrt_v-4);
-}
+#ifdef CONFIG_FB_AMIGA_ECS
+ case CS_ECS:
+ strcat(info->fix.id, "ECS");
+ chipset = TAG_ECS;
+ maxdepth[TAG_SHRES] = 2;
+ maxdepth[TAG_HIRES] = 4;
+ maxdepth[TAG_LORES] = 6;
+ maxfmode = TAG_FMODE_1;
+ if (AMIGAHW_PRESENT(AMBER_FF))
+ defmode = amiga_vblank == 50 ? DEFMODE_AMBER_PAL
+ : DEFMODE_AMBER_NTSC;
+ else
+ defmode = amiga_vblank == 50 ? DEFMODE_PAL
+ : DEFMODE_NTSC;
+ if (amiga_chip_avail() - CHIPRAM_SAFETY_LIMIT >
+ VIDEOMEMSIZE_ECS_2M)
+ info->fix.smem_len = VIDEOMEMSIZE_ECS_2M;
+ else
+ info->fix.smem_len = VIDEOMEMSIZE_ECS_1M;
+ break;
+#endif /* CONFIG_FB_AMIGA_ECS */
+
+#ifdef CONFIG_FB_AMIGA_AGA
+ case CS_AGA:
+ strcat(info->fix.id, "AGA");
+ chipset = TAG_AGA;
+ maxdepth[TAG_SHRES] = 8;
+ maxdepth[TAG_HIRES] = 8;
+ maxdepth[TAG_LORES] = 8;
+ maxfmode = TAG_FMODE_4;
+ defmode = DEFMODE_AGA;
+ if (amiga_chip_avail() - CHIPRAM_SAFETY_LIMIT >
+ VIDEOMEMSIZE_AGA_2M)
+ info->fix.smem_len = VIDEOMEMSIZE_AGA_2M;
+ else
+ info->fix.smem_len = VIDEOMEMSIZE_AGA_1M;
+ break;
+#endif /* CONFIG_FB_AMIGA_AGA */
+
+ default:
+#ifdef CONFIG_FB_AMIGA_OCS
+ printk("Unknown graphics chipset, defaulting to OCS\n");
+ strcat(info->fix.id, "Unknown");
+ goto default_chipset;
+#else /* CONFIG_FB_AMIGA_OCS */
+ err = -ENODEV;
+ goto release;
+#endif /* CONFIG_FB_AMIGA_OCS */
+ break;
+ }
/*
- * Build the Copper List
+ * Calculate the Pixel Clock Values for this Machine
*/
-static void ami_build_copper(void)
-{
- struct amifb_par *par = ¤tpar;
- copins *copl, *cops;
- u_long p;
+ {
+ u_long tmp = DIVUL(200000000000ULL, amiga_eclock);
- currentcop = 1 - currentcop;
+ pixclock[TAG_SHRES] = (tmp + 4) / 8; /* SHRES: 35 ns / 28 MHz */
+ pixclock[TAG_HIRES] = (tmp + 2) / 4; /* HIRES: 70 ns / 14 MHz */
+ pixclock[TAG_LORES] = (tmp + 1) / 2; /* LORES: 140 ns / 7 MHz */
+ }
- copl = copdisplay.list[currentcop][1];
+ /*
+ * Replace the Tag Values with the Real Pixel Clock Values
+ */
- (copl++)->l = CWAIT(0, 10);
- (copl++)->l = CMOVE(par->bplcon0, bplcon0);
- (copl++)->l = CMOVE(0, sprpt[0]);
- (copl++)->l = CMOVE2(0, sprpt[0]);
+ for (i = 0; i < NUM_TOTAL_MODES; i++) {
+ struct fb_videomode *mode = &ami_modedb[i];
+ tag = mode->pixclock;
+ if (tag == TAG_SHRES || tag == TAG_HIRES || tag == TAG_LORES) {
+ mode->pixclock = pixclock[tag];
+ }
+ }
- if (par->bplcon0 & BPC0_LACE) {
- cops = copdisplay.list[currentcop][0];
+ if (amifb_hfmin) {
+ info->monspecs.hfmin = amifb_hfmin;
+ info->monspecs.hfmax = amifb_hfmax;
+ info->monspecs.vfmin = amifb_vfmin;
+ info->monspecs.vfmax = amifb_vfmax;
+ } else {
+ /*
+ * These are for a typical Amiga monitor (e.g. A1960)
+ */
+ info->monspecs.hfmin = 15000;
+ info->monspecs.hfmax = 38000;
+ info->monspecs.vfmin = 49;
+ info->monspecs.vfmax = 90;
+ }
- (cops++)->l = CWAIT(0, 10);
- (cops++)->l = CMOVE(par->bplcon0, bplcon0);
- (cops++)->l = CMOVE(0, sprpt[0]);
- (cops++)->l = CMOVE2(0, sprpt[0]);
+ info->fbops = &amifb_ops;
+ info->flags = FBINFO_DEFAULT;
+ info->device = &pdev->dev;
- (copl++)->l = CMOVE(diwstrt2hw(par->diwstrt_h, par->diwstrt_v+1), diwstrt);
- (copl++)->l = CMOVE(diwstop2hw(par->diwstop_h, par->diwstop_v+1), diwstop);
- (cops++)->l = CMOVE(diwstrt2hw(par->diwstrt_h, par->diwstrt_v), diwstrt);
- (cops++)->l = CMOVE(diwstop2hw(par->diwstop_h, par->diwstop_v), diwstop);
- if (!IS_OCS) {
- (copl++)->l = CMOVE(diwhigh2hw(par->diwstrt_h, par->diwstrt_v+1,
- par->diwstop_h, par->diwstop_v+1), diwhigh);
- (cops++)->l = CMOVE(diwhigh2hw(par->diwstrt_h, par->diwstrt_v,
- par->diwstop_h, par->diwstop_v), diwhigh);
-#if 0
- if (par->beamcon0 & BMC0_VARBEAMEN) {
- (copl++)->l = CMOVE(vtotal2hw(par->vtotal), vtotal);
- (copl++)->l = CMOVE(vbstrt2hw(par->vbstrt+1), vbstrt);
- (copl++)->l = CMOVE(vbstop2hw(par->vbstop+1), vbstop);
- (cops++)->l = CMOVE(vtotal2hw(par->vtotal), vtotal);
- (cops++)->l = CMOVE(vbstrt2hw(par->vbstrt), vbstrt);
- (cops++)->l = CMOVE(vbstop2hw(par->vbstop), vbstop);
- }
-#endif
- }
- p = ZTWO_PADDR(copdisplay.list[currentcop][0]);
- (copl++)->l = CMOVE(highw(p), cop2lc);
- (copl++)->l = CMOVE2(loww(p), cop2lc);
- p = ZTWO_PADDR(copdisplay.list[currentcop][1]);
- (cops++)->l = CMOVE(highw(p), cop2lc);
- (cops++)->l = CMOVE2(loww(p), cop2lc);
- copdisplay.rebuild[0] = cops;
- } else {
- (copl++)->l = CMOVE(diwstrt2hw(par->diwstrt_h, par->diwstrt_v), diwstrt);
- (copl++)->l = CMOVE(diwstop2hw(par->diwstop_h, par->diwstop_v), diwstop);
- if (!IS_OCS) {
- (copl++)->l = CMOVE(diwhigh2hw(par->diwstrt_h, par->diwstrt_v,
- par->diwstop_h, par->diwstop_v), diwhigh);
-#if 0
- if (par->beamcon0 & BMC0_VARBEAMEN) {
- (copl++)->l = CMOVE(vtotal2hw(par->vtotal), vtotal);
- (copl++)->l = CMOVE(vbstrt2hw(par->vbstrt), vbstrt);
- (copl++)->l = CMOVE(vbstop2hw(par->vbstop), vbstop);
- }
-#endif
- }
+ if (!fb_find_mode(&info->var, info, mode_option, ami_modedb,
+ NUM_TOTAL_MODES, &ami_modedb[defmode], 4)) {
+ err = -EINVAL;
+ goto release;
}
- copdisplay.rebuild[1] = copl;
- ami_update_par();
- ami_rebuild_copper();
-}
+ fb_videomode_to_modelist(ami_modedb, NUM_TOTAL_MODES,
+ &info->modelist);
+
+ round_down_bpp = 0;
+ chipptr = chipalloc(info->fix.smem_len + SPRITEMEMSIZE +
+ DUMMYSPRITEMEMSIZE + COPINITSIZE +
+ 4 * COPLISTSIZE);
+ if (!chipptr) {
+ err = -ENOMEM;
+ goto release;
+ }
+
+ assignchunk(videomemory, u_long, chipptr, info->fix.smem_len);
+ assignchunk(spritememory, u_long, chipptr, SPRITEMEMSIZE);
+ assignchunk(dummysprite, u_short *, chipptr, DUMMYSPRITEMEMSIZE);
+ assignchunk(copdisplay.init, copins *, chipptr, COPINITSIZE);
+ assignchunk(copdisplay.list[0][0], copins *, chipptr, COPLISTSIZE);
+ assignchunk(copdisplay.list[0][1], copins *, chipptr, COPLISTSIZE);
+ assignchunk(copdisplay.list[1][0], copins *, chipptr, COPLISTSIZE);
+ assignchunk(copdisplay.list[1][1], copins *, chipptr, COPLISTSIZE);
/*
- * Rebuild the Copper List
- *
- * We only change the things that are not static
+ * access the videomem with writethrough cache
*/
+ info->fix.smem_start = (u_long)ZTWO_PADDR(videomemory);
+ videomemory = (u_long)ioremap_writethrough(info->fix.smem_start,
+ info->fix.smem_len);
+ if (!videomemory) {
+ dev_warn(&pdev->dev,
+ "Unable to map videomem cached writethrough\n");
+ info->screen_base = (char *)ZTWO_VADDR(info->fix.smem_start);
+ } else
+ info->screen_base = (char *)videomemory;
-static void ami_rebuild_copper(void)
-{
- struct amifb_par *par = ¤tpar;
- copins *copl, *cops;
- u_short line, h_end1, h_end2;
- short i;
- u_long p;
+ memset(dummysprite, 0, DUMMYSPRITEMEMSIZE);
- if (IS_AGA && maxfmode + par->clk_shift == 0)
- h_end1 = par->diwstrt_h-64;
- else
- h_end1 = par->htotal-32;
- h_end2 = par->ddfstop+64;
+ /*
+ * Make sure the Copper has something to do
+ */
+ ami_init_copper();
- ami_set_sprite();
+ /*
+ * Enable Display DMA
+ */
+ custom.dmacon = DMAF_SETCLR | DMAF_MASTER | DMAF_RASTER | DMAF_COPPER |
+ DMAF_BLITTER | DMAF_SPRITE;
- copl = copdisplay.rebuild[1];
- p = par->bplpt0;
- if (par->vmode & FB_VMODE_YWRAP) {
- if ((par->vyres-par->yoffset) != 1 || !mod2(par->diwstrt_v)) {
- if (par->yoffset > par->vyres-par->yres) {
- for (i = 0; i < (short)par->bpp; i++, p += par->next_plane) {
- (copl++)->l = CMOVE(highw(p), bplpt[i]);
- (copl++)->l = CMOVE2(loww(p), bplpt[i]);
- }
- line = par->diwstrt_v + ((par->vyres-par->yoffset)<<par->line_shift) - 1;
- while (line >= 512) {
- (copl++)->l = CWAIT(h_end1, 510);
- line -= 512;
- }
- if (line >= 510 && IS_AGA && maxfmode + par->clk_shift == 0)
- (copl++)->l = CWAIT(h_end1, line);
- else
- (copl++)->l = CWAIT(h_end2, line);
- p = par->bplpt0wrap;
- }
- } else p = par->bplpt0wrap;
- }
- for (i = 0; i < (short)par->bpp; i++, p += par->next_plane) {
- (copl++)->l = CMOVE(highw(p), bplpt[i]);
- (copl++)->l = CMOVE2(loww(p), bplpt[i]);
- }
- copl->l = CEND;
+ err = request_irq(IRQ_AMIGA_COPPER, amifb_interrupt, 0,
+ "fb vertb handler", info->par);
+ if (err)
+ goto disable_dma;
- if (par->bplcon0 & BPC0_LACE) {
- cops = copdisplay.rebuild[0];
- p = par->bplpt0;
- if (mod2(par->diwstrt_v))
- p -= par->next_line;
- else
- p += par->next_line;
- if (par->vmode & FB_VMODE_YWRAP) {
- if ((par->vyres-par->yoffset) != 1 || mod2(par->diwstrt_v)) {
- if (par->yoffset > par->vyres-par->yres+1) {
- for (i = 0; i < (short)par->bpp; i++, p += par->next_plane) {
- (cops++)->l = CMOVE(highw(p), bplpt[i]);
- (cops++)->l = CMOVE2(loww(p), bplpt[i]);
- }
- line = par->diwstrt_v + ((par->vyres-par->yoffset)<<par->line_shift) - 2;
- while (line >= 512) {
- (cops++)->l = CWAIT(h_end1, 510);
- line -= 512;
- }
- if (line > 510 && IS_AGA && maxfmode + par->clk_shift == 0)
- (cops++)->l = CWAIT(h_end1, line);
- else
- (cops++)->l = CWAIT(h_end2, line);
- p = par->bplpt0wrap;
- if (mod2(par->diwstrt_v+par->vyres-par->yoffset))
- p -= par->next_line;
- else
- p += par->next_line;
- }
- } else p = par->bplpt0wrap - par->next_line;
- }
- for (i = 0; i < (short)par->bpp; i++, p += par->next_plane) {
- (cops++)->l = CMOVE(highw(p), bplpt[i]);
- (cops++)->l = CMOVE2(loww(p), bplpt[i]);
- }
- cops->l = CEND;
- }
+ err = fb_alloc_cmap(&info->cmap, 1 << info->var.bits_per_pixel, 0);
+ if (err)
+ goto free_irq;
+
+ dev_set_drvdata(&pdev->dev, info);
+
+ err = register_framebuffer(info);
+ if (err)
+ goto unset_drvdata;
+
+ printk("fb%d: %s frame buffer device, using %dK of video memory\n",
+ info->node, info->fix.id, info->fix.smem_len>>10);
+
+ return 0;
+
+unset_drvdata:
+ dev_set_drvdata(&pdev->dev, NULL);
+ fb_dealloc_cmap(&info->cmap);
+free_irq:
+ free_irq(IRQ_AMIGA_COPPER, info->par);
+disable_dma:
+ custom.dmacon = DMAF_ALL | DMAF_MASTER;
+ if (videomemory)
+ iounmap((void *)videomemory);
+ chipfree();
+release:
+ framebuffer_release(info);
+ return err;
}
+
static int __exit amifb_remove(struct platform_device *pdev)
{
- unregister_framebuffer(&fb_info);
- amifb_deinit(pdev);
+ struct fb_info *info = dev_get_drvdata(&pdev->dev);
+
+ unregister_framebuffer(info);
+ dev_set_drvdata(&pdev->dev, NULL);
+ fb_dealloc_cmap(&info->cmap);
+ free_irq(IRQ_AMIGA_COPPER, info->par);
+ custom.dmacon = DMAF_ALL | DMAF_MASTER;
+ if (videomemory)
+ iounmap((void *)videomemory);
+ chipfree();
+ framebuffer_release(info);
amifb_video_off();
return 0;
}
brightness = 0;
lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_VAL, brightness);
- lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_CTR,
+ if (contrast_ctr & ATMEL_LCDC_POL_POSITIVE)
+ lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_CTR,
brightness ? contrast_ctr : 0);
+ else
+ lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_CTR, contrast_ctr);
bl->props.fb_blank = bl->props.power = sinfo->bl_power = power;
var->lower_margin = min_t(u32, var->lower_margin,
ATMEL_LCDC_VFP);
var->right_margin = min_t(u32, var->right_margin,
- (ATMEL_LCDC_HFP >> ATMEL_LCDC_HFP_OFFSET) + 1);
+ (ATMEL_LCDC_HFP >> ATMEL_LCDC_HFP_OFFSET) + 2);
var->hsync_len = min_t(u32, var->hsync_len,
(ATMEL_LCDC_HPW >> ATMEL_LCDC_HPW_OFFSET) + 1);
var->left_margin = min_t(u32, var->left_margin,
lcdc_writel(sinfo, ATMEL_LCDC_TIM1, value);
/* Horizontal timing */
- value = (info->var.right_margin - 1) << ATMEL_LCDC_HFP_OFFSET;
+ value = (info->var.right_margin - 2) << ATMEL_LCDC_HFP_OFFSET;
value |= (info->var.hsync_len - 1) << ATMEL_LCDC_HPW_OFFSET;
value |= (info->var.left_margin - 1);
dev_dbg(info->device, " * LCDTIM2 = %08lx\n", value);
case FB_VISUAL_PSEUDOCOLOR:
if (regno < 256) {
- val = ((red >> 11) & 0x001f);
- val |= ((green >> 6) & 0x03e0);
- val |= ((blue >> 1) & 0x7c00);
-
- /*
- * TODO: intensity bit. Maybe something like
- * ~(red[10] ^ green[10] ^ blue[10]) & 1
- */
+ if (cpu_is_at91sam9261() || cpu_is_at91sam9263()
+ || cpu_is_at91sam9rl()) {
+ /* old style I+BGR:555 */
+ val = ((red >> 11) & 0x001f);
+ val |= ((green >> 6) & 0x03e0);
+ val |= ((blue >> 1) & 0x7c00);
+
+ /*
+ * TODO: intensity bit. Maybe something like
+ * ~(red[10] ^ green[10] ^ blue[10]) & 1
+ */
+ } else {
+ /* new style BGR:565 / RGB:565 */
+ if (sinfo->lcd_wiring_mode ==
+ ATMEL_LCDC_WIRING_RGB) {
+ val = ((blue >> 11) & 0x001f);
+ val |= ((red >> 0) & 0xf800);
+ } else {
+ val = ((red >> 11) & 0x001f);
+ val |= ((blue >> 0) & 0xf800);
+ }
+
+ val |= ((green >> 5) & 0x07e0);
+ }
lcdc_writel(sinfo, ATMEL_LCDC_LUT(regno), val);
ret = 0;
#endif /* CONFIG_PCI */
#ifdef CONFIG_ZORRO
-static const struct zorro_device_id cirrusfb_zorro_table[] = {
+struct zorrocl {
+ enum cirrus_board type; /* Board type */
+ u32 regoffset; /* Offset of registers in first Zorro device */
+ u32 ramsize; /* Size of video RAM in first Zorro device */
+ /* If zero, use autoprobe on RAM device */
+ u32 ramoffset; /* Offset of video RAM in first Zorro device */
+ zorro_id ramid; /* Zorro ID of RAM device */
+ zorro_id ramid2; /* Zorro ID of optional second RAM device */
+};
+
+static const struct zorrocl zcl_sd64 __devinitconst = {
+ .type = BT_SD64,
+ .ramid = ZORRO_PROD_HELFRICH_SD64_RAM,
+};
+
+static const struct zorrocl zcl_piccolo __devinitconst = {
+ .type = BT_PICCOLO,
+ .ramid = ZORRO_PROD_HELFRICH_PICCOLO_RAM,
+};
+
+static const struct zorrocl zcl_picasso __devinitconst = {
+ .type = BT_PICASSO,
+ .ramid = ZORRO_PROD_VILLAGE_TRONIC_PICASSO_II_II_PLUS_RAM,
+};
+
+static const struct zorrocl zcl_spectrum __devinitconst = {
+ .type = BT_SPECTRUM,
+ .ramid = ZORRO_PROD_GVP_EGS_28_24_SPECTRUM_RAM,
+};
+
+static const struct zorrocl zcl_picasso4_z3 __devinitconst = {
+ .type = BT_PICASSO4,
+ .regoffset = 0x00600000,
+ .ramsize = 4 * MB_,
+ .ramoffset = 0x01000000, /* 0x02000000 for 64 MiB boards */
+};
+
+static const struct zorrocl zcl_picasso4_z2 __devinitconst = {
+ .type = BT_PICASSO4,
+ .regoffset = 0x10000,
+ .ramid = ZORRO_PROD_VILLAGE_TRONIC_PICASSO_IV_Z2_RAM1,
+ .ramid2 = ZORRO_PROD_VILLAGE_TRONIC_PICASSO_IV_Z2_RAM2,
+};
+
+
+static const struct zorro_device_id cirrusfb_zorro_table[] __devinitconst = {
{
- .id = ZORRO_PROD_HELFRICH_SD64_RAM,
- .driver_data = BT_SD64,
+ .id = ZORRO_PROD_HELFRICH_SD64_REG,
+ .driver_data = (unsigned long)&zcl_sd64,
}, {
- .id = ZORRO_PROD_HELFRICH_PICCOLO_RAM,
- .driver_data = BT_PICCOLO,
+ .id = ZORRO_PROD_HELFRICH_PICCOLO_REG,
+ .driver_data = (unsigned long)&zcl_piccolo,
}, {
- .id = ZORRO_PROD_VILLAGE_TRONIC_PICASSO_II_II_PLUS_RAM,
- .driver_data = BT_PICASSO,
+ .id = ZORRO_PROD_VILLAGE_TRONIC_PICASSO_II_II_PLUS_REG,
+ .driver_data = (unsigned long)&zcl_picasso,
}, {
- .id = ZORRO_PROD_GVP_EGS_28_24_SPECTRUM_RAM,
- .driver_data = BT_SPECTRUM,
+ .id = ZORRO_PROD_GVP_EGS_28_24_SPECTRUM_REG,
+ .driver_data = (unsigned long)&zcl_spectrum,
}, {
.id = ZORRO_PROD_VILLAGE_TRONIC_PICASSO_IV_Z3,
- .driver_data = BT_PICASSO4,
+ .driver_data = (unsigned long)&zcl_picasso4_z3,
+ }, {
+ .id = ZORRO_PROD_VILLAGE_TRONIC_PICASSO_IV_Z2_REG,
+ .driver_data = (unsigned long)&zcl_picasso4_z2,
},
{ 0 }
};
MODULE_DEVICE_TABLE(zorro, cirrusfb_zorro_table);
-
-static const struct {
- zorro_id id2;
- unsigned long size;
-} cirrusfb_zorro_table2[] = {
- [BT_SD64] = {
- .id2 = ZORRO_PROD_HELFRICH_SD64_REG,
- .size = 0x400000
- },
- [BT_PICCOLO] = {
- .id2 = ZORRO_PROD_HELFRICH_PICCOLO_REG,
- .size = 0x200000
- },
- [BT_PICASSO] = {
- .id2 = ZORRO_PROD_VILLAGE_TRONIC_PICASSO_II_II_PLUS_REG,
- .size = 0x200000
- },
- [BT_SPECTRUM] = {
- .id2 = ZORRO_PROD_GVP_EGS_28_24_SPECTRUM_REG,
- .size = 0x200000
- },
- [BT_PICASSO4] = {
- .id2 = 0,
- .size = 0x400000
- }
-};
#endif /* CONFIG_ZORRO */
#ifdef CIRRUSFB_DEBUG
struct cirrusfb_info *cinfo = info->par;
struct zorro_dev *zdev = to_zorro_dev(info->device);
- zorro_release_device(zdev);
-
- if (cinfo->btype == BT_PICASSO4) {
- cinfo->regbase -= 0x600000;
- iounmap((void *)cinfo->regbase);
+ if (info->fix.smem_start > 16 * MB_)
iounmap(info->screen_base);
- } else {
- if (zorro_resource_start(zdev) > 0x01000000)
- iounmap(info->screen_base);
- }
+ if (info->fix.mmio_start > 16 * MB_)
+ iounmap(cinfo->regbase);
+
+ zorro_release_device(zdev);
}
#endif /* CONFIG_ZORRO */
static int __devinit cirrusfb_zorro_register(struct zorro_dev *z,
const struct zorro_device_id *ent)
{
- struct cirrusfb_info *cinfo;
struct fb_info *info;
+ int error;
+ const struct zorrocl *zcl;
enum cirrus_board btype;
- struct zorro_dev *z2 = NULL;
- unsigned long board_addr, board_size, size;
- int ret;
-
- btype = ent->driver_data;
- if (cirrusfb_zorro_table2[btype].id2)
- z2 = zorro_find_device(cirrusfb_zorro_table2[btype].id2, NULL);
- size = cirrusfb_zorro_table2[btype].size;
+ unsigned long regbase, ramsize, rambase;
+ struct cirrusfb_info *cinfo;
info = framebuffer_alloc(sizeof(struct cirrusfb_info), &z->dev);
if (!info) {
printk(KERN_ERR "cirrusfb: could not allocate memory\n");
- ret = -ENOMEM;
- goto err_out;
+ return -ENOMEM;
+ }
+
+ zcl = (const struct zorrocl *)ent->driver_data;
+ btype = zcl->type;
+ regbase = zorro_resource_start(z) + zcl->regoffset;
+ ramsize = zcl->ramsize;
+ if (ramsize) {
+ rambase = zorro_resource_start(z) + zcl->ramoffset;
+ if (zorro_resource_len(z) == 64 * MB_) {
+ /* Quirk for 64 MiB Picasso IV */
+ rambase += zcl->ramoffset;
+ }
+ } else {
+ struct zorro_dev *ram = zorro_find_device(zcl->ramid, NULL);
+ if (!ram || !zorro_resource_len(ram)) {
+ dev_err(info->device, "No video RAM found\n");
+ error = -ENODEV;
+ goto err_release_fb;
+ }
+ rambase = zorro_resource_start(ram);
+ ramsize = zorro_resource_len(ram);
+ if (zcl->ramid2 &&
+ (ram = zorro_find_device(zcl->ramid2, NULL))) {
+ if (zorro_resource_start(ram) != rambase + ramsize) {
+ dev_warn(info->device,
+ "Skipping non-contiguous RAM at %pR\n",
+ &ram->resource);
+ } else {
+ ramsize += zorro_resource_len(ram);
+ }
+ }
}
- dev_info(info->device, "%s board detected\n",
- cirrusfb_board_info[btype].name);
-
- cinfo = info->par;
- cinfo->btype = btype;
-
- assert(z);
- assert(btype != BT_NONE);
-
- board_addr = zorro_resource_start(z);
- board_size = zorro_resource_len(z);
- info->screen_size = size;
+ dev_info(info->device,
+ "%s board detected, REG at 0x%lx, %lu MiB RAM at 0x%lx\n",
+ cirrusfb_board_info[btype].name, regbase, ramsize / MB_,
+ rambase);
if (!zorro_request_device(z, "cirrusfb")) {
- dev_err(info->device, "cannot reserve region 0x%lx, abort\n",
- board_addr);
- ret = -EBUSY;
+ dev_err(info->device, "Cannot reserve %pR\n", &z->resource);
+ error = -EBUSY;
goto err_release_fb;
}
- ret = -EIO;
-
- if (btype == BT_PICASSO4) {
- dev_info(info->device, " REG at $%lx\n", board_addr + 0x600000);
-
- /* To be precise, for the P4 this is not the */
- /* begin of the board, but the begin of RAM. */
- /* for P4, map in its address space in 2 chunks (### TEST! ) */
- /* (note the ugly hardcoded 16M number) */
- cinfo->regbase = ioremap(board_addr, 16777216);
- if (!cinfo->regbase)
- goto err_release_region;
-
- dev_dbg(info->device, "Virtual address for board set to: $%p\n",
- cinfo->regbase);
- cinfo->regbase += 0x600000;
- info->fix.mmio_start = board_addr + 0x600000;
-
- info->fix.smem_start = board_addr + 16777216;
- info->screen_base = ioremap(info->fix.smem_start, 16777216);
- if (!info->screen_base)
- goto err_unmap_regbase;
- } else {
- dev_info(info->device, " REG at $%lx\n",
- (unsigned long) z2->resource.start);
-
- info->fix.smem_start = board_addr;
- if (board_addr > 0x01000000)
- info->screen_base = ioremap(board_addr, board_size);
- else
- info->screen_base = (caddr_t) ZTWO_VADDR(board_addr);
- if (!info->screen_base)
- goto err_release_region;
+ cinfo = info->par;
+ cinfo->btype = btype;
- /* set address for REG area of board */
- cinfo->regbase = (caddr_t) ZTWO_VADDR(z2->resource.start);
- info->fix.mmio_start = z2->resource.start;
+ info->fix.mmio_start = regbase;
+ cinfo->regbase = regbase > 16 * MB_ ? ioremap(regbase, 64 * 1024)
+ : (caddr_t)ZTWO_VADDR(regbase);
+ if (!cinfo->regbase) {
+ dev_err(info->device, "Cannot map registers\n");
+ error = -EIO;
+ goto err_release_dev;
+ }
- dev_dbg(info->device, "Virtual address for board set to: $%p\n",
- cinfo->regbase);
+ info->fix.smem_start = rambase;
+ info->screen_size = ramsize;
+ info->screen_base = rambase > 16 * MB_ ? ioremap(rambase, ramsize)
+ : (caddr_t)ZTWO_VADDR(rambase);
+ if (!info->screen_base) {
+ dev_err(info->device, "Cannot map video RAM\n");
+ error = -EIO;
+ goto err_unmap_reg;
}
+
cinfo->unmap = cirrusfb_zorro_unmap;
dev_info(info->device,
- "Cirrus Logic chipset on Zorro bus, RAM (%lu MB) at $%lx\n",
- board_size / MB_, board_addr);
-
- zorro_set_drvdata(z, info);
+ "Cirrus Logic chipset on Zorro bus, RAM (%lu MiB) at 0x%lx\n",
+ ramsize / MB_, rambase);
/* MCLK select etc. */
if (cirrusfb_board_info[btype].init_sr1f)
vga_wseq(cinfo->regbase, CL_SEQR1F,
cirrusfb_board_info[btype].sr1f);
- ret = cirrusfb_register(info);
- if (!ret)
- return 0;
+ error = cirrusfb_register(info);
+ if (error) {
+ dev_err(info->device, "Failed to register device, error %d\n",
+ error);
+ goto err_unmap_ram;
+ }
- if (btype == BT_PICASSO4 || board_addr > 0x01000000)
+ zorro_set_drvdata(z, info);
+ return 0;
+
+err_unmap_ram:
+ if (rambase > 16 * MB_)
iounmap(info->screen_base);
-err_unmap_regbase:
- if (btype == BT_PICASSO4)
- iounmap(cinfo->regbase - 0x600000);
-err_release_region:
- release_region(board_addr, board_size);
+err_unmap_reg:
+ if (regbase > 16 * MB_)
+ iounmap(cinfo->regbase);
+err_release_dev:
+ zorro_release_device(z);
err_release_fb:
framebuffer_release(info);
-err_out:
- return ret;
+ return error;
}
void __devexit cirrusfb_zorro_unregister(struct zorro_dev *z)
struct fb_info *info = zorro_get_drvdata(z);
cirrusfb_cleanup(info);
+ zorro_set_drvdata(z, NULL);
}
static struct zorro_driver cirrusfb_zorro_driver = {
/* Try to pick a video mode out of NVRAM if we have one. */
#ifdef CONFIG_NVRAM
- if (default_cmode == CMODE_NVRAM){
+ if (default_cmode == CMODE_NVRAM) {
cmode = nvram_read_byte(NV_CMODE);
if(cmode < CMODE_8 || cmode > CMODE_32)
cmode = CMODE_8;
+++ /dev/null
-#
-# Display drivers configuration
-#
-
-menu "Display device support"
-
-config DISPLAY_SUPPORT
- tristate "Display panel/monitor support"
- ---help---
- This framework adds support for low-level control of a display.
- This includes support for power.
-
- Enable this to be able to choose the drivers for controlling the
- physical display panel/monitor on some platforms. This not only
- covers LCD displays for PDAs but also other types of displays
- such as CRT, TVout etc.
-
- To have support for your specific display panel you will have to
- select the proper drivers which depend on this option.
-
-comment "Display hardware drivers"
- depends on DISPLAY_SUPPORT
-
-endmenu
+++ /dev/null
-# Display drivers
-
-display-objs := display-sysfs.o
-
-obj-$(CONFIG_DISPLAY_SUPPORT) += display.o
-
+++ /dev/null
-/*
- * display-sysfs.c - Display output driver sysfs interface
- *
- * Copyright (C) 2007 James Simmons <jsimmons@infradead.org>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- */
-#include <linux/module.h>
-#include <linux/display.h>
-#include <linux/ctype.h>
-#include <linux/idr.h>
-#include <linux/err.h>
-#include <linux/kdev_t.h>
-#include <linux/slab.h>
-
-static ssize_t display_show_name(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct display_device *dsp = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%s\n", dsp->name);
-}
-
-static ssize_t display_show_type(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct display_device *dsp = dev_get_drvdata(dev);
- return snprintf(buf, PAGE_SIZE, "%s\n", dsp->type);
-}
-
-static ssize_t display_show_contrast(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct display_device *dsp = dev_get_drvdata(dev);
- ssize_t rc = -ENXIO;
-
- mutex_lock(&dsp->lock);
- if (likely(dsp->driver) && dsp->driver->get_contrast)
- rc = sprintf(buf, "%d\n", dsp->driver->get_contrast(dsp));
- mutex_unlock(&dsp->lock);
- return rc;
-}
-
-static ssize_t display_store_contrast(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct display_device *dsp = dev_get_drvdata(dev);
- ssize_t ret = -EINVAL, size;
- int contrast;
- char *endp;
-
- contrast = simple_strtoul(buf, &endp, 0);
- size = endp - buf;
-
- if (isspace(*endp))
- size++;
-
- if (size != count)
- return ret;
-
- mutex_lock(&dsp->lock);
- if (likely(dsp->driver && dsp->driver->set_contrast)) {
- pr_debug("display: set contrast to %d\n", contrast);
- dsp->driver->set_contrast(dsp, contrast);
- ret = count;
- }
- mutex_unlock(&dsp->lock);
- return ret;
-}
-
-static ssize_t display_show_max_contrast(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct display_device *dsp = dev_get_drvdata(dev);
- ssize_t rc = -ENXIO;
-
- mutex_lock(&dsp->lock);
- if (likely(dsp->driver))
- rc = sprintf(buf, "%d\n", dsp->driver->max_contrast);
- mutex_unlock(&dsp->lock);
- return rc;
-}
-
-static struct device_attribute display_attrs[] = {
- __ATTR(name, S_IRUGO, display_show_name, NULL),
- __ATTR(type, S_IRUGO, display_show_type, NULL),
- __ATTR(contrast, S_IRUGO | S_IWUSR, display_show_contrast, display_store_contrast),
- __ATTR(max_contrast, S_IRUGO, display_show_max_contrast, NULL),
-};
-
-static int display_suspend(struct device *dev, pm_message_t state)
-{
- struct display_device *dsp = dev_get_drvdata(dev);
-
- mutex_lock(&dsp->lock);
- if (likely(dsp->driver->suspend))
- dsp->driver->suspend(dsp, state);
- mutex_unlock(&dsp->lock);
- return 0;
-};
-
-static int display_resume(struct device *dev)
-{
- struct display_device *dsp = dev_get_drvdata(dev);
-
- mutex_lock(&dsp->lock);
- if (likely(dsp->driver->resume))
- dsp->driver->resume(dsp);
- mutex_unlock(&dsp->lock);
- return 0;
-};
-
-static struct mutex allocated_dsp_lock;
-static DEFINE_IDR(allocated_dsp);
-static struct class *display_class;
-
-struct display_device *display_device_register(struct display_driver *driver,
- struct device *parent, void *devdata)
-{
- struct display_device *new_dev = NULL;
- int ret = -EINVAL;
-
- if (unlikely(!driver))
- return ERR_PTR(ret);
-
- mutex_lock(&allocated_dsp_lock);
- ret = idr_pre_get(&allocated_dsp, GFP_KERNEL);
- mutex_unlock(&allocated_dsp_lock);
- if (!ret)
- return ERR_PTR(ret);
-
- new_dev = kzalloc(sizeof(struct display_device), GFP_KERNEL);
- if (likely(new_dev) && unlikely(driver->probe(new_dev, devdata))) {
- // Reserve the index for this display
- mutex_lock(&allocated_dsp_lock);
- ret = idr_get_new(&allocated_dsp, new_dev, &new_dev->idx);
- mutex_unlock(&allocated_dsp_lock);
-
- if (!ret) {
- new_dev->dev = device_create(display_class, parent,
- MKDEV(0, 0), new_dev,
- "display%d", new_dev->idx);
- if (!IS_ERR(new_dev->dev)) {
- new_dev->parent = parent;
- new_dev->driver = driver;
- mutex_init(&new_dev->lock);
- return new_dev;
- }
- mutex_lock(&allocated_dsp_lock);
- idr_remove(&allocated_dsp, new_dev->idx);
- mutex_unlock(&allocated_dsp_lock);
- ret = -EINVAL;
- }
- }
- kfree(new_dev);
- return ERR_PTR(ret);
-}
-EXPORT_SYMBOL(display_device_register);
-
-void display_device_unregister(struct display_device *ddev)
-{
- if (!ddev)
- return;
- // Free device
- mutex_lock(&ddev->lock);
- device_unregister(ddev->dev);
- mutex_unlock(&ddev->lock);
- // Mark device index as available
- mutex_lock(&allocated_dsp_lock);
- idr_remove(&allocated_dsp, ddev->idx);
- mutex_unlock(&allocated_dsp_lock);
- kfree(ddev);
-}
-EXPORT_SYMBOL(display_device_unregister);
-
-static int __init display_class_init(void)
-{
- display_class = class_create(THIS_MODULE, "display");
- if (IS_ERR(display_class)) {
- printk(KERN_ERR "Failed to create display class\n");
- display_class = NULL;
- return -EINVAL;
- }
- display_class->dev_attrs = display_attrs;
- display_class->suspend = display_suspend;
- display_class->resume = display_resume;
- mutex_init(&allocated_dsp_lock);
- return 0;
-}
-
-static void __exit display_class_exit(void)
-{
- class_destroy(display_class);
-}
-
-module_init(display_class_init);
-module_exit(display_class_exit);
-
-MODULE_DESCRIPTION("Display Hardware handling");
-MODULE_AUTHOR("James Simmons <jsimmons@infradead.org>");
-MODULE_LICENSE("GPL");
-
memcmp(&info->var, var, sizeof(struct fb_var_screeninfo))) {
u32 activate = var->activate;
+ /* When using FOURCC mode, make sure the red, green, blue and
+ * transp fields are set to 0.
+ */
+ if ((info->fix.capabilities & FB_CAP_FOURCC) &&
+ var->grayscale > 1) {
+ if (var->red.offset || var->green.offset ||
+ var->blue.offset || var->transp.offset ||
+ var->red.length || var->green.length ||
+ var->blue.length || var->transp.length ||
+ var->red.msb_right || var->green.msb_right ||
+ var->blue.msb_right || var->transp.msb_right)
+ return -EINVAL;
+ }
+
if (!info->fbops->fb_check_var) {
*var = info->var;
goto done;
#include <linux/fsl-diu-fb.h>
#include "edid.h"
-#define FSL_AOI_NUM 6 /* 5 AOIs and one dummy AOI */
- /* 1 for plane 0, 2 for plane 1&2 each */
+#define NUM_AOIS 5 /* 1 for plane 0, 2 for planes 1 & 2 each */
/* HW cursor parameters */
#define MAX_CURS 32
#define INT_PARERR 0x08 /* Display parameters error interrupt */
#define INT_LS_BF_VS 0x10 /* Lines before vsync. interrupt */
-struct diu_addr {
- void *vaddr; /* Virtual address */
- dma_addr_t paddr; /* Physical address */
- __u32 offset;
-};
-
/*
* List of supported video modes
*
static DEFINE_SPINLOCK(diu_lock);
-struct fsl_diu_data {
- struct fb_info *fsl_diu_info[FSL_AOI_NUM - 1];
- /*FSL_AOI_NUM has one dummy AOI */
- struct device_attribute dev_attr;
- struct diu_ad *dummy_ad;
- void *dummy_aoi_virt;
- unsigned int irq;
- int fb_enabled;
- enum fsl_diu_monitor_port monitor_port;
- struct diu __iomem *diu_reg;
- spinlock_t reg_lock;
- struct diu_addr ad;
- struct diu_addr gamma;
- struct diu_addr pallete;
- struct diu_addr cursor;
-};
-
enum mfb_index {
PLANE0 = 0, /* Plane 0, only one AOI that fills the screen */
PLANE1_AOI0, /* Plane 1, first AOI */
u8 *edid_data;
};
+/**
+ * struct fsl_diu_data - per-DIU data structure
+ * @dma_addr: DMA address of this structure
+ * @fsl_diu_info: fb_info objects, one per AOI
+ * @dev_attr: sysfs structure
+ * @irq: IRQ
+ * @monitor_port: the monitor port this DIU is connected to
+ * @diu_reg: pointer to the DIU hardware registers
+ * @reg_lock: spinlock for register access
+ * @dummy_aoi: video buffer for the 4x4 32-bit dummy AOI
+ * dummy_ad: DIU Area Descriptor for the dummy AOI
+ * @ad[]: Area Descriptors for each real AOI
+ * @gamma: gamma color table
+ * @cursor: hardware cursor data
+ *
+ * This data structure must be allocated with 32-byte alignment, so that the
+ * internal fields can be aligned properly.
+ */
+struct fsl_diu_data {
+ dma_addr_t dma_addr;
+ struct fb_info fsl_diu_info[NUM_AOIS];
+ struct mfb_info mfb[NUM_AOIS];
+ struct device_attribute dev_attr;
+ unsigned int irq;
+ enum fsl_diu_monitor_port monitor_port;
+ struct diu __iomem *diu_reg;
+ spinlock_t reg_lock;
+ u8 dummy_aoi[4 * 4 * 4];
+ struct diu_ad dummy_ad __aligned(8);
+ struct diu_ad ad[NUM_AOIS] __aligned(8);
+ u8 gamma[256 * 3] __aligned(32);
+ u8 cursor[MAX_CURS * MAX_CURS * 2] __aligned(32);
+} __aligned(32);
+
+/* Determine the DMA address of a member of the fsl_diu_data structure */
+#define DMA_ADDR(p, f) ((p)->dma_addr + offsetof(struct fsl_diu_data, f))
static struct mfb_info mfb_template[] = {
{
return diu_ops.valid_monitor_port(port);
}
-/**
- * fsl_diu_alloc - allocate memory for the DIU
- * @size: number of bytes to allocate
- * @param: returned physical address of memory
- *
- * This function allocates a physically-contiguous block of memory.
- */
-static void *fsl_diu_alloc(size_t size, phys_addr_t *phys)
-{
- void *virt;
-
- virt = alloc_pages_exact(size, GFP_DMA | __GFP_ZERO);
- if (virt)
- *phys = virt_to_phys(virt);
-
- return virt;
-}
-
-/**
- * fsl_diu_free - release DIU memory
- * @virt: pointer returned by fsl_diu_alloc()
- * @size: number of bytes allocated by fsl_diu_alloc()
- *
- * This function releases memory allocated by fsl_diu_alloc().
- */
-static void fsl_diu_free(void *virt, size_t size)
-{
- if (virt && size)
- free_pages_exact(virt, size);
-}
-
/*
* Workaround for failed writing desc register of planes.
* Needed with MPC5121 DIU rev 2.0 silicon.
{
struct mfb_info *pmfbi, *cmfbi, *mfbi = info->par;
struct diu_ad *ad = mfbi->ad;
- struct fsl_diu_data *machine_data = mfbi->parent;
- struct diu __iomem *hw = machine_data->diu_reg;
+ struct fsl_diu_data *data = mfbi->parent;
+ struct diu __iomem *hw = data->diu_reg;
switch (mfbi->index) {
case PLANE0:
wr_reg_wa(&hw->desc[0], ad->paddr);
break;
case PLANE1_AOI0:
- cmfbi = machine_data->fsl_diu_info[2]->par;
+ cmfbi = &data->mfb[2];
if (hw->desc[1] != ad->paddr) { /* AOI0 closed */
if (cmfbi->count > 0) /* AOI1 open */
ad->next_ad =
}
break;
case PLANE2_AOI0:
- cmfbi = machine_data->fsl_diu_info[4]->par;
+ cmfbi = &data->mfb[4];
if (hw->desc[2] != ad->paddr) { /* AOI0 closed */
if (cmfbi->count > 0) /* AOI1 open */
ad->next_ad =
}
break;
case PLANE1_AOI1:
- pmfbi = machine_data->fsl_diu_info[1]->par;
+ pmfbi = &data->mfb[1];
ad->next_ad = 0;
- if (hw->desc[1] == machine_data->dummy_ad->paddr)
+ if (hw->desc[1] == data->dummy_ad.paddr)
wr_reg_wa(&hw->desc[1], ad->paddr);
else /* AOI0 open */
pmfbi->ad->next_ad = cpu_to_le32(ad->paddr);
break;
case PLANE2_AOI1:
- pmfbi = machine_data->fsl_diu_info[3]->par;
+ pmfbi = &data->mfb[3];
ad->next_ad = 0;
- if (hw->desc[2] == machine_data->dummy_ad->paddr)
+ if (hw->desc[2] == data->dummy_ad.paddr)
wr_reg_wa(&hw->desc[2], ad->paddr);
else /* AOI0 was open */
pmfbi->ad->next_ad = cpu_to_le32(ad->paddr);
{
struct mfb_info *pmfbi, *cmfbi, *mfbi = info->par;
struct diu_ad *ad = mfbi->ad;
- struct fsl_diu_data *machine_data = mfbi->parent;
- struct diu __iomem *hw = machine_data->diu_reg;
+ struct fsl_diu_data *data = mfbi->parent;
+ struct diu __iomem *hw = data->diu_reg;
switch (mfbi->index) {
case PLANE0:
- if (hw->desc[0] != machine_data->dummy_ad->paddr)
- wr_reg_wa(&hw->desc[0], machine_data->dummy_ad->paddr);
+ if (hw->desc[0] != data->dummy_ad.paddr)
+ wr_reg_wa(&hw->desc[0], data->dummy_ad.paddr);
break;
case PLANE1_AOI0:
- cmfbi = machine_data->fsl_diu_info[2]->par;
+ cmfbi = &data->mfb[2];
if (cmfbi->count > 0) /* AOI1 is open */
wr_reg_wa(&hw->desc[1], cmfbi->ad->paddr);
/* move AOI1 to the first */
else /* AOI1 was closed */
- wr_reg_wa(&hw->desc[1], machine_data->dummy_ad->paddr);
+ wr_reg_wa(&hw->desc[1], data->dummy_ad.paddr);
/* close AOI 0 */
break;
case PLANE2_AOI0:
- cmfbi = machine_data->fsl_diu_info[4]->par;
+ cmfbi = &data->mfb[4];
if (cmfbi->count > 0) /* AOI1 is open */
wr_reg_wa(&hw->desc[2], cmfbi->ad->paddr);
/* move AOI1 to the first */
else /* AOI1 was closed */
- wr_reg_wa(&hw->desc[2], machine_data->dummy_ad->paddr);
+ wr_reg_wa(&hw->desc[2], data->dummy_ad.paddr);
/* close AOI 0 */
break;
case PLANE1_AOI1:
- pmfbi = machine_data->fsl_diu_info[1]->par;
+ pmfbi = &data->mfb[1];
if (hw->desc[1] != ad->paddr) {
/* AOI1 is not the first in the chain */
if (pmfbi->count > 0)
/* AOI0 is open, must be the first */
pmfbi->ad->next_ad = 0;
} else /* AOI1 is the first in the chain */
- wr_reg_wa(&hw->desc[1], machine_data->dummy_ad->paddr);
+ wr_reg_wa(&hw->desc[1], data->dummy_ad.paddr);
/* close AOI 1 */
break;
case PLANE2_AOI1:
- pmfbi = machine_data->fsl_diu_info[3]->par;
+ pmfbi = &data->mfb[3];
if (hw->desc[2] != ad->paddr) {
/* AOI1 is not the first in the chain */
if (pmfbi->count > 0)
/* AOI0 is open, must be the first */
pmfbi->ad->next_ad = 0;
} else /* AOI1 is the first in the chain */
- wr_reg_wa(&hw->desc[2], machine_data->dummy_ad->paddr);
+ wr_reg_wa(&hw->desc[2], data->dummy_ad.paddr);
/* close AOI 1 */
break;
}
static void enable_lcdc(struct fb_info *info)
{
struct mfb_info *mfbi = info->par;
- struct fsl_diu_data *machine_data = mfbi->parent;
- struct diu __iomem *hw = machine_data->diu_reg;
+ struct fsl_diu_data *data = mfbi->parent;
+ struct diu __iomem *hw = data->diu_reg;
- if (!machine_data->fb_enabled) {
- out_be32(&hw->diu_mode, MFB_MODE1);
- machine_data->fb_enabled++;
- }
+ out_be32(&hw->diu_mode, MFB_MODE1);
}
static void disable_lcdc(struct fb_info *info)
{
struct mfb_info *mfbi = info->par;
- struct fsl_diu_data *machine_data = mfbi->parent;
- struct diu __iomem *hw = machine_data->diu_reg;
+ struct fsl_diu_data *data = mfbi->parent;
+ struct diu __iomem *hw = data->diu_reg;
- if (machine_data->fb_enabled) {
- out_be32(&hw->diu_mode, 0);
- machine_data->fb_enabled = 0;
- }
+ out_be32(&hw->diu_mode, 0);
}
static void adjust_aoi_size_position(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct mfb_info *lower_aoi_mfbi, *upper_aoi_mfbi, *mfbi = info->par;
- struct fsl_diu_data *machine_data = mfbi->parent;
+ struct fsl_diu_data *data = mfbi->parent;
int available_height, upper_aoi_bottom;
enum mfb_index index = mfbi->index;
int lower_aoi_is_open, upper_aoi_is_open;
__u32 base_plane_width, base_plane_height, upper_aoi_height;
- base_plane_width = machine_data->fsl_diu_info[0]->var.xres;
- base_plane_height = machine_data->fsl_diu_info[0]->var.yres;
+ base_plane_width = data->fsl_diu_info[0].var.xres;
+ base_plane_height = data->fsl_diu_info[0].var.yres;
if (mfbi->x_aoi_d < 0)
mfbi->x_aoi_d = 0;
break;
case PLANE1_AOI0:
case PLANE2_AOI0:
- lower_aoi_mfbi = machine_data->fsl_diu_info[index+1]->par;
+ lower_aoi_mfbi = data->fsl_diu_info[index+1].par;
lower_aoi_is_open = lower_aoi_mfbi->count > 0 ? 1 : 0;
if (var->xres > base_plane_width)
var->xres = base_plane_width;
break;
case PLANE1_AOI1:
case PLANE2_AOI1:
- upper_aoi_mfbi = machine_data->fsl_diu_info[index-1]->par;
- upper_aoi_height =
- machine_data->fsl_diu_info[index-1]->var.yres;
+ upper_aoi_mfbi = data->fsl_diu_info[index-1].par;
+ upper_aoi_height = data->fsl_diu_info[index-1].var.yres;
upper_aoi_bottom = upper_aoi_mfbi->y_aoi_d + upper_aoi_height;
upper_aoi_is_open = upper_aoi_mfbi->count > 0 ? 1 : 0;
if (var->xres > base_plane_width)
{
struct fb_var_screeninfo *var = &info->var;
struct mfb_info *mfbi = info->par;
- struct fsl_diu_data *machine_data = mfbi->parent;
+ struct fsl_diu_data *data = mfbi->parent;
struct diu __iomem *hw;
int i, j;
- char __iomem *cursor_base, *gamma_table_base;
+ u8 *gamma_table_base;
u32 temp;
- hw = machine_data->diu_reg;
+ hw = data->diu_reg;
+
+ diu_ops.set_monitor_port(data->monitor_port);
+ gamma_table_base = data->gamma;
- diu_ops.set_monitor_port(machine_data->monitor_port);
- gamma_table_base = machine_data->gamma.vaddr;
- cursor_base = machine_data->cursor.vaddr;
/* Prep for DIU init - gamma table, cursor table */
for (i = 0; i <= 2; i++)
for (j = 0; j <= 255; j++)
*gamma_table_base++ = j;
- diu_ops.set_gamma_table(machine_data->monitor_port,
- machine_data->gamma.vaddr);
+ if (diu_ops.set_gamma_table)
+ diu_ops.set_gamma_table(data->monitor_port, data->gamma);
disable_lcdc(info);
/* Program DIU registers */
- out_be32(&hw->gamma, machine_data->gamma.paddr);
- out_be32(&hw->cursor, machine_data->cursor.paddr);
+ out_be32(&hw->gamma, DMA_ADDR(data, gamma));
+ out_be32(&hw->cursor, DMA_ADDR(data, cursor));
out_be32(&hw->bgnd, 0x007F7F7F); /* BGND */
out_be32(&hw->bgnd_wb, 0); /* BGND_WB */
static int map_video_memory(struct fb_info *info)
{
- phys_addr_t phys;
u32 smem_len = info->fix.line_length * info->var.yres_virtual;
+ void *p;
- info->screen_base = fsl_diu_alloc(smem_len, &phys);
- if (info->screen_base == NULL) {
+ p = alloc_pages_exact(smem_len, GFP_DMA | __GFP_ZERO);
+ if (!p) {
dev_err(info->dev, "unable to allocate fb memory\n");
return -ENOMEM;
}
mutex_lock(&info->mm_lock);
- info->fix.smem_start = (unsigned long) phys;
+ info->screen_base = p;
+ info->fix.smem_start = virt_to_phys(info->screen_base);
info->fix.smem_len = smem_len;
mutex_unlock(&info->mm_lock);
info->screen_size = info->fix.smem_len;
static void unmap_video_memory(struct fb_info *info)
{
- fsl_diu_free(info->screen_base, info->fix.smem_len);
+ void *p = info->screen_base;
+ size_t l = info->fix.smem_len;
+
mutex_lock(&info->mm_lock);
info->screen_base = NULL;
info->fix.smem_start = 0;
info->fix.smem_len = 0;
mutex_unlock(&info->mm_lock);
+
+ if (p)
+ free_pages_exact(p, l);
}
/*
return 0;
}
+/**
+ * fsl_diu_get_pixel_format: return the pixel format for a given color depth
+ *
+ * The pixel format is a 32-bit value that determine which bits in each
+ * pixel are to be used for each color. This is the default function used
+ * if the platform does not define its own version.
+ */
+static u32 fsl_diu_get_pixel_format(unsigned int bits_per_pixel)
+{
+#define PF_BYTE_F 0x10000000
+#define PF_ALPHA_C_MASK 0x0E000000
+#define PF_ALPHA_C_SHIFT 25
+#define PF_BLUE_C_MASK 0x01800000
+#define PF_BLUE_C_SHIFT 23
+#define PF_GREEN_C_MASK 0x00600000
+#define PF_GREEN_C_SHIFT 21
+#define PF_RED_C_MASK 0x00180000
+#define PF_RED_C_SHIFT 19
+#define PF_PALETTE 0x00040000
+#define PF_PIXEL_S_MASK 0x00030000
+#define PF_PIXEL_S_SHIFT 16
+#define PF_COMP_3_MASK 0x0000F000
+#define PF_COMP_3_SHIFT 12
+#define PF_COMP_2_MASK 0x00000F00
+#define PF_COMP_2_SHIFT 8
+#define PF_COMP_1_MASK 0x000000F0
+#define PF_COMP_1_SHIFT 4
+#define PF_COMP_0_MASK 0x0000000F
+#define PF_COMP_0_SHIFT 0
+
+#define MAKE_PF(alpha, red, blue, green, size, c0, c1, c2, c3) \
+ cpu_to_le32(PF_BYTE_F | (alpha << PF_ALPHA_C_SHIFT) | \
+ (blue << PF_BLUE_C_SHIFT) | (green << PF_GREEN_C_SHIFT) | \
+ (red << PF_RED_C_SHIFT) | (c3 << PF_COMP_3_SHIFT) | \
+ (c2 << PF_COMP_2_SHIFT) | (c1 << PF_COMP_1_SHIFT) | \
+ (c0 << PF_COMP_0_SHIFT) | (size << PF_PIXEL_S_SHIFT))
+
+ switch (bits_per_pixel) {
+ case 32:
+ /* 0x88883316 */
+ return MAKE_PF(3, 2, 0, 1, 3, 8, 8, 8, 8);
+ case 24:
+ /* 0x88082219 */
+ return MAKE_PF(4, 0, 1, 2, 2, 0, 8, 8, 8);
+ case 16:
+ /* 0x65053118 */
+ return MAKE_PF(4, 2, 1, 0, 1, 5, 6, 5, 0);
+ default:
+ pr_err("fsl-diu: unsupported color depth %u\n", bits_per_pixel);
+ return 0;
+ }
+}
+
/*
* Using the fb_var_screeninfo in fb_info we set the resolution of this
* particular framebuffer. This function alters the fb_fix_screeninfo stored
unsigned long len;
struct fb_var_screeninfo *var = &info->var;
struct mfb_info *mfbi = info->par;
- struct fsl_diu_data *machine_data = mfbi->parent;
+ struct fsl_diu_data *data = mfbi->parent;
struct diu_ad *ad = mfbi->ad;
struct diu __iomem *hw;
- hw = machine_data->diu_reg;
+ hw = data->diu_reg;
set_fix(info);
mfbi->cursor_reset = 1;
}
}
- ad->pix_fmt = diu_ops.get_pixel_format(machine_data->monitor_port,
- var->bits_per_pixel);
+ if (diu_ops.get_pixel_format)
+ ad->pix_fmt = diu_ops.get_pixel_format(data->monitor_port,
+ var->bits_per_pixel);
+ else
+ ad->pix_fmt = fsl_diu_get_pixel_format(var->bits_per_pixel);
+
ad->addr = cpu_to_le32(info->fix.smem_start);
ad->src_size_g_alpha = cpu_to_le32((var->yres_virtual << 12) |
var->xres_virtual) | mfbi->g_alpha;
.fb_release = fsl_diu_release,
};
-static int init_fbinfo(struct fb_info *info)
-{
- struct mfb_info *mfbi = info->par;
-
- info->device = NULL;
- info->var.activate = FB_ACTIVATE_NOW;
- info->fbops = &fsl_diu_ops;
- info->flags = FBINFO_FLAG_DEFAULT;
- info->pseudo_palette = &mfbi->pseudo_palette;
-
- /* Allocate colormap */
- fb_alloc_cmap(&info->cmap, 16, 0);
- return 0;
-}
-
static int __devinit install_fb(struct fb_info *info)
{
int rc;
unsigned int dbsize = ARRAY_SIZE(fsl_diu_mode_db);
int has_default_mode = 1;
- if (init_fbinfo(info))
- return -EINVAL;
+ info->var.activate = FB_ACTIVATE_NOW;
+ info->fbops = &fsl_diu_ops;
+ info->flags = FBINFO_DEFAULT | FBINFO_VIRTFB | FBINFO_PARTIAL_PAN_OK |
+ FBINFO_READS_FAST;
+ info->pseudo_palette = mfbi->pseudo_palette;
+
+ rc = fb_alloc_cmap(&info->cmap, 16, 0);
+ if (rc)
+ return rc;
if (mfbi->index == PLANE0) {
if (mfbi->edid_data) {
return IRQ_NONE;
}
-static int request_irq_local(struct fsl_diu_data *machine_data)
+static int request_irq_local(struct fsl_diu_data *data)
{
- struct diu __iomem *hw = machine_data->diu_reg;
+ struct diu __iomem *hw = data->diu_reg;
u32 ints;
int ret;
/* Read to clear the status */
in_be32(&hw->int_status);
- ret = request_irq(machine_data->irq, fsl_diu_isr, 0, "fsl-diu-fb", hw);
+ ret = request_irq(data->irq, fsl_diu_isr, 0, "fsl-diu-fb", hw);
if (!ret) {
ints = INT_PARERR | INT_LS_BF_VS;
#if !defined(CONFIG_NOT_COHERENT_CACHE)
return ret;
}
-static void free_irq_local(struct fsl_diu_data *machine_data)
+static void free_irq_local(struct fsl_diu_data *data)
{
- struct diu __iomem *hw = machine_data->diu_reg;
+ struct diu __iomem *hw = data->diu_reg;
/* Disable all LCDC interrupt */
out_be32(&hw->int_mask, 0x1f);
- free_irq(machine_data->irq, NULL);
+ free_irq(data->irq, NULL);
}
#ifdef CONFIG_PM
*/
static int fsl_diu_suspend(struct platform_device *ofdev, pm_message_t state)
{
- struct fsl_diu_data *machine_data;
+ struct fsl_diu_data *data;
- machine_data = dev_get_drvdata(&ofdev->dev);
- disable_lcdc(machine_data->fsl_diu_info[0]);
+ data = dev_get_drvdata(&ofdev->dev);
+ disable_lcdc(data->fsl_diu_info[0]);
return 0;
}
static int fsl_diu_resume(struct platform_device *ofdev)
{
- struct fsl_diu_data *machine_data;
+ struct fsl_diu_data *data;
- machine_data = dev_get_drvdata(&ofdev->dev);
- enable_lcdc(machine_data->fsl_diu_info[0]);
+ data = dev_get_drvdata(&ofdev->dev);
+ enable_lcdc(data->fsl_diu_info[0]);
return 0;
}
#define fsl_diu_resume NULL
#endif /* CONFIG_PM */
-/* Align to 64-bit(8-byte), 32-byte, etc. */
-static int allocate_buf(struct device *dev, struct diu_addr *buf, u32 size,
- u32 bytes_align)
-{
- u32 offset;
- dma_addr_t mask;
-
- buf->vaddr =
- dma_alloc_coherent(dev, size + bytes_align, &buf->paddr,
- GFP_DMA | __GFP_ZERO);
- if (!buf->vaddr)
- return -ENOMEM;
-
- mask = bytes_align - 1;
- offset = buf->paddr & mask;
- if (offset) {
- buf->offset = bytes_align - offset;
- buf->paddr = buf->paddr + offset;
- } else
- buf->offset = 0;
-
- return 0;
-}
-
-static void free_buf(struct device *dev, struct diu_addr *buf, u32 size,
- u32 bytes_align)
-{
- dma_free_coherent(dev, size + bytes_align, buf->vaddr,
- buf->paddr - buf->offset);
-}
-
static ssize_t store_monitor(struct device *device,
struct device_attribute *attr, const char *buf, size_t count)
{
enum fsl_diu_monitor_port old_monitor_port;
- struct fsl_diu_data *machine_data =
+ struct fsl_diu_data *data =
container_of(attr, struct fsl_diu_data, dev_attr);
- old_monitor_port = machine_data->monitor_port;
- machine_data->monitor_port = fsl_diu_name_to_port(buf);
+ old_monitor_port = data->monitor_port;
+ data->monitor_port = fsl_diu_name_to_port(buf);
- if (old_monitor_port != machine_data->monitor_port) {
+ if (old_monitor_port != data->monitor_port) {
/* All AOIs need adjust pixel format
* fsl_diu_set_par only change the pixsel format here
* unlikely to fail. */
- fsl_diu_set_par(machine_data->fsl_diu_info[0]);
- fsl_diu_set_par(machine_data->fsl_diu_info[1]);
- fsl_diu_set_par(machine_data->fsl_diu_info[2]);
- fsl_diu_set_par(machine_data->fsl_diu_info[3]);
- fsl_diu_set_par(machine_data->fsl_diu_info[4]);
+ unsigned int i;
+
+ for (i=0; i < NUM_AOIS; i++)
+ fsl_diu_set_par(&data->fsl_diu_info[i]);
}
return count;
}
static ssize_t show_monitor(struct device *device,
struct device_attribute *attr, char *buf)
{
- struct fsl_diu_data *machine_data =
+ struct fsl_diu_data *data =
container_of(attr, struct fsl_diu_data, dev_attr);
- switch (machine_data->monitor_port) {
+ switch (data->monitor_port) {
case FSL_DIU_PORT_DVI:
return sprintf(buf, "DVI\n");
case FSL_DIU_PORT_LVDS:
{
struct device_node *np = pdev->dev.of_node;
struct mfb_info *mfbi;
- phys_addr_t dummy_ad_addr = 0;
- int ret, i, error = 0;
- struct fsl_diu_data *machine_data;
+ struct fsl_diu_data *data;
int diu_mode;
+ dma_addr_t dma_addr; /* DMA addr of fsl_diu_data struct */
+ unsigned int i;
+ int ret;
- machine_data = kzalloc(sizeof(struct fsl_diu_data), GFP_KERNEL);
- if (!machine_data)
+ data = dma_alloc_coherent(&pdev->dev, sizeof(struct fsl_diu_data),
+ &dma_addr, GFP_DMA | __GFP_ZERO);
+ if (!data)
return -ENOMEM;
+ data->dma_addr = dma_addr;
+
+ /*
+ * dma_alloc_coherent() uses a page allocator, so the address is
+ * always page-aligned. We need the memory to be 32-byte aligned,
+ * so that's good. However, if one day the allocator changes, we
+ * need to catch that. It's not worth the effort to handle unaligned
+ * alloctions now because it's highly unlikely to ever be a problem.
+ */
+ if ((unsigned long)data & 31) {
+ dev_err(&pdev->dev, "misaligned allocation");
+ ret = -ENOMEM;
+ goto error;
+ }
- spin_lock_init(&machine_data->reg_lock);
+ spin_lock_init(&data->reg_lock);
- for (i = 0; i < ARRAY_SIZE(machine_data->fsl_diu_info); i++) {
- machine_data->fsl_diu_info[i] =
- framebuffer_alloc(sizeof(struct mfb_info), &pdev->dev);
- if (!machine_data->fsl_diu_info[i]) {
- dev_err(&pdev->dev, "cannot allocate memory\n");
- ret = -ENOMEM;
- goto error2;
- }
- mfbi = machine_data->fsl_diu_info[i]->par;
+ for (i = 0; i < NUM_AOIS; i++) {
+ struct fb_info *info = &data->fsl_diu_info[i];
+
+ info->device = &pdev->dev;
+ info->par = &data->mfb[i];
+
+ /*
+ * We store the physical address of the AD in the reserved
+ * 'paddr' field of the AD itself.
+ */
+ data->ad[i].paddr = DMA_ADDR(data, ad[i]);
+
+ info->fix.smem_start = 0;
+
+ /* Initialize the AOI data structure */
+ mfbi = info->par;
memcpy(mfbi, &mfb_template[i], sizeof(struct mfb_info));
- mfbi->parent = machine_data;
+ mfbi->parent = data;
+ mfbi->ad = &data->ad[i];
if (mfbi->index == PLANE0) {
const u8 *prop;
}
}
- machine_data->diu_reg = of_iomap(np, 0);
- if (!machine_data->diu_reg) {
+ data->diu_reg = of_iomap(np, 0);
+ if (!data->diu_reg) {
dev_err(&pdev->dev, "cannot map DIU registers\n");
ret = -EFAULT;
- goto error2;
+ goto error;
}
- diu_mode = in_be32(&machine_data->diu_reg->diu_mode);
+ diu_mode = in_be32(&data->diu_reg->diu_mode);
if (diu_mode == MFB_MODE0)
- out_be32(&machine_data->diu_reg->diu_mode, 0); /* disable DIU */
+ out_be32(&data->diu_reg->diu_mode, 0); /* disable DIU */
/* Get the IRQ of the DIU */
- machine_data->irq = irq_of_parse_and_map(np, 0);
+ data->irq = irq_of_parse_and_map(np, 0);
- if (!machine_data->irq) {
+ if (!data->irq) {
dev_err(&pdev->dev, "could not get DIU IRQ\n");
ret = -EINVAL;
goto error;
}
- machine_data->monitor_port = monitor_port;
-
- /* Area descriptor memory pool aligns to 64-bit boundary */
- if (allocate_buf(&pdev->dev, &machine_data->ad,
- sizeof(struct diu_ad) * FSL_AOI_NUM, 8))
- return -ENOMEM;
-
- /* Get memory for Gamma Table - 32-byte aligned memory */
- if (allocate_buf(&pdev->dev, &machine_data->gamma, 768, 32)) {
- ret = -ENOMEM;
- goto error;
- }
-
- /* For performance, cursor bitmap buffer aligns to 32-byte boundary */
- if (allocate_buf(&pdev->dev, &machine_data->cursor,
- MAX_CURS * MAX_CURS * 2, 32)) {
- ret = -ENOMEM;
- goto error;
- }
-
- i = ARRAY_SIZE(machine_data->fsl_diu_info);
- machine_data->dummy_ad = (struct diu_ad *)((u32)machine_data->ad.vaddr +
- machine_data->ad.offset) + i;
- machine_data->dummy_ad->paddr = machine_data->ad.paddr +
- i * sizeof(struct diu_ad);
- machine_data->dummy_aoi_virt = fsl_diu_alloc(64, &dummy_ad_addr);
- if (!machine_data->dummy_aoi_virt) {
- ret = -ENOMEM;
- goto error;
- }
- machine_data->dummy_ad->addr = cpu_to_le32(dummy_ad_addr);
- machine_data->dummy_ad->pix_fmt = 0x88882317;
- machine_data->dummy_ad->src_size_g_alpha = cpu_to_le32((4 << 12) | 4);
- machine_data->dummy_ad->aoi_size = cpu_to_le32((4 << 16) | 2);
- machine_data->dummy_ad->offset_xyi = 0;
- machine_data->dummy_ad->offset_xyd = 0;
- machine_data->dummy_ad->next_ad = 0;
+ data->monitor_port = monitor_port;
+
+ /* Initialize the dummy Area Descriptor */
+ data->dummy_ad.addr = cpu_to_le32(DMA_ADDR(data, dummy_aoi));
+ data->dummy_ad.pix_fmt = 0x88882317;
+ data->dummy_ad.src_size_g_alpha = cpu_to_le32((4 << 12) | 4);
+ data->dummy_ad.aoi_size = cpu_to_le32((4 << 16) | 2);
+ data->dummy_ad.offset_xyi = 0;
+ data->dummy_ad.offset_xyd = 0;
+ data->dummy_ad.next_ad = 0;
+ data->dummy_ad.paddr = DMA_ADDR(data, dummy_ad);
/*
* Let DIU display splash screen if it was pre-initialized
* by the bootloader, set dummy area descriptor otherwise.
*/
if (diu_mode == MFB_MODE0)
- out_be32(&machine_data->diu_reg->desc[0],
- machine_data->dummy_ad->paddr);
-
- out_be32(&machine_data->diu_reg->desc[1], machine_data->dummy_ad->paddr);
- out_be32(&machine_data->diu_reg->desc[2], machine_data->dummy_ad->paddr);
-
- for (i = 0; i < ARRAY_SIZE(machine_data->fsl_diu_info); i++) {
- machine_data->fsl_diu_info[i]->fix.smem_start = 0;
- mfbi = machine_data->fsl_diu_info[i]->par;
- mfbi->ad = (struct diu_ad *)((u32)machine_data->ad.vaddr
- + machine_data->ad.offset) + i;
- mfbi->ad->paddr =
- machine_data->ad.paddr + i * sizeof(struct diu_ad);
- ret = install_fb(machine_data->fsl_diu_info[i]);
+ out_be32(&data->diu_reg->desc[0], data->dummy_ad.paddr);
+
+ out_be32(&data->diu_reg->desc[1], data->dummy_ad.paddr);
+ out_be32(&data->diu_reg->desc[2], data->dummy_ad.paddr);
+
+ for (i = 0; i < NUM_AOIS; i++) {
+ ret = install_fb(&data->fsl_diu_info[i]);
if (ret) {
dev_err(&pdev->dev, "could not register fb %d\n", i);
goto error;
}
}
- if (request_irq_local(machine_data)) {
+ if (request_irq_local(data)) {
dev_err(&pdev->dev, "could not claim irq\n");
goto error;
}
- sysfs_attr_init(&machine_data->dev_attr.attr);
- machine_data->dev_attr.attr.name = "monitor";
- machine_data->dev_attr.attr.mode = S_IRUGO|S_IWUSR;
- machine_data->dev_attr.show = show_monitor;
- machine_data->dev_attr.store = store_monitor;
- error = device_create_file(machine_data->fsl_diu_info[0]->dev,
- &machine_data->dev_attr);
- if (error) {
+ sysfs_attr_init(&data->dev_attr.attr);
+ data->dev_attr.attr.name = "monitor";
+ data->dev_attr.attr.mode = S_IRUGO|S_IWUSR;
+ data->dev_attr.show = show_monitor;
+ data->dev_attr.store = store_monitor;
+ ret = device_create_file(&pdev->dev, &data->dev_attr);
+ if (ret) {
dev_err(&pdev->dev, "could not create sysfs file %s\n",
- machine_data->dev_attr.attr.name);
+ data->dev_attr.attr.name);
}
- dev_set_drvdata(&pdev->dev, machine_data);
+ dev_set_drvdata(&pdev->dev, data);
return 0;
error:
- for (i = 0; i < ARRAY_SIZE(machine_data->fsl_diu_info); i++)
- uninstall_fb(machine_data->fsl_diu_info[i]);
-
- if (machine_data->ad.vaddr)
- free_buf(&pdev->dev, &machine_data->ad,
- sizeof(struct diu_ad) * FSL_AOI_NUM, 8);
- if (machine_data->gamma.vaddr)
- free_buf(&pdev->dev, &machine_data->gamma, 768, 32);
- if (machine_data->cursor.vaddr)
- free_buf(&pdev->dev, &machine_data->cursor,
- MAX_CURS * MAX_CURS * 2, 32);
- if (machine_data->dummy_aoi_virt)
- fsl_diu_free(machine_data->dummy_aoi_virt, 64);
- iounmap(machine_data->diu_reg);
-
-error2:
- for (i = 0; i < ARRAY_SIZE(machine_data->fsl_diu_info); i++)
- if (machine_data->fsl_diu_info[i])
- framebuffer_release(machine_data->fsl_diu_info[i]);
- kfree(machine_data);
+ for (i = 0; i < NUM_AOIS; i++)
+ uninstall_fb(&data->fsl_diu_info[i]);
+
+ iounmap(data->diu_reg);
+
+ dma_free_coherent(&pdev->dev, sizeof(struct fsl_diu_data), data,
+ data->dma_addr);
return ret;
}
static int fsl_diu_remove(struct platform_device *pdev)
{
- struct fsl_diu_data *machine_data;
+ struct fsl_diu_data *data;
int i;
- machine_data = dev_get_drvdata(&pdev->dev);
- disable_lcdc(machine_data->fsl_diu_info[0]);
- free_irq_local(machine_data);
- for (i = 0; i < ARRAY_SIZE(machine_data->fsl_diu_info); i++)
- uninstall_fb(machine_data->fsl_diu_info[i]);
- if (machine_data->ad.vaddr)
- free_buf(&pdev->dev, &machine_data->ad,
- sizeof(struct diu_ad) * FSL_AOI_NUM, 8);
- if (machine_data->gamma.vaddr)
- free_buf(&pdev->dev, &machine_data->gamma, 768, 32);
- if (machine_data->cursor.vaddr)
- free_buf(&pdev->dev, &machine_data->cursor,
- MAX_CURS * MAX_CURS * 2, 32);
- if (machine_data->dummy_aoi_virt)
- fsl_diu_free(machine_data->dummy_aoi_virt, 64);
- iounmap(machine_data->diu_reg);
- for (i = 0; i < ARRAY_SIZE(machine_data->fsl_diu_info); i++)
- if (machine_data->fsl_diu_info[i])
- framebuffer_release(machine_data->fsl_diu_info[i]);
- kfree(machine_data);
+ data = dev_get_drvdata(&pdev->dev);
+ disable_lcdc(&data->fsl_diu_info[0]);
+ free_irq_local(data);
+
+ for (i = 0; i < NUM_AOIS; i++)
+ uninstall_fb(&data->fsl_diu_info[i]);
+
+ iounmap(data->diu_reg);
+
+ dma_free_coherent(&pdev->dev, sizeof(struct fsl_diu_data), data,
+ data->dma_addr);
return 0;
}
}
};
-static struct fb_fix_screeninfo grvga_fix __initdata = {
+static struct fb_fix_screeninfo grvga_fix __devinitdata = {
.id = "AG SVGACTRL",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_PSEUDOCOLOR,
.fb_imageblit = cfb_imageblit
};
-static int __init grvga_parse_custom(char *options,
+static int __devinit grvga_parse_custom(char *options,
struct fb_var_screeninfo *screendata)
{
char *this_opt;
static int sync __devinitdata;
static int extvga __devinitdata;
static int dcolor __devinitdata;
-static int ddc3 __devinitdata = 2;
+static bool ddc3 __devinitdata;
/*------------------------------------------------------------*/
if (sync)
par->dev_flags |= ALWAYS_SYNC;
- par->ddc_num = ddc3;
+ par->ddc_num = (ddc3 ? 3 : 2);
if (bpp < 8)
bpp = 8;
else if (!strncmp(this_opt, "dcolor", 6))
dcolor = 1;
else if (!strncmp(this_opt, "ddc3", 4))
- ddc3 = 3;
+ ddc3 = true;
else
mode_option = this_opt;
}
39721L,48L,16L,33L,10L,
96L,2L,~0, /* No sync info */
FB_VMODE_NONINTERLACED,
- 0, {0,0,0,0,0}
};
39721L,48L,16L,33L,10L,
96L,2,0, /* no sync info */
FB_VMODE_NONINTERLACED,
- 0, {0,0,0,0,0}
};
static int matroxfb_dh_regit(const struct matrox_fb_info *minfo,
},
};
-int __devinit mbxfb_init(void)
-{
- return platform_driver_register(&mbxfb_driver);
-}
-
-static void __devexit mbxfb_exit(void)
-{
- platform_driver_unregister(&mbxfb_driver);
-}
-
-module_init(mbxfb_init);
-module_exit(mbxfb_exit);
+module_platform_driver(mbxfb_driver);
MODULE_DESCRIPTION("loadable framebuffer driver for Marathon device");
MODULE_AUTHOR("Mike Rapoport, Compulab");
},
};
-static int __init mxsfb_init(void)
-{
- return platform_driver_register(&mxsfb_driver);
-}
-
-static void __exit mxsfb_exit(void)
-{
- platform_driver_unregister(&mxsfb_driver);
-}
-
-module_init(mxsfb_init);
-module_exit(mxsfb_exit);
+module_platform_driver(mxsfb_driver);
MODULE_DESCRIPTION("Freescale mxs framebuffer driver");
MODULE_AUTHOR("Sascha Hauer, Pengutronix");
},
};
-int __devinit nuc900fb_init(void)
-{
- return platform_driver_register(&nuc900fb_driver);
-}
-
-static void __exit nuc900fb_cleanup(void)
-{
- platform_driver_unregister(&nuc900fb_driver);
-}
-
-module_init(nuc900fb_init);
-module_exit(nuc900fb_cleanup);
+module_platform_driver(nuc900fb_driver);
MODULE_DESCRIPTION("Framebuffer driver for the NUC900");
MODULE_LICENSE("GPL");
static struct spi_driver mipid_spi_driver = {
.driver = {
.name = MIPID_MODULE_NAME,
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = mipid_spi_probe,
static struct spi_driver acx565akm_spi_driver = {
.driver = {
.name = "acx565akm",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = acx565akm_spi_probe,
static struct spi_driver mipid_spi_driver = {
.driver = {
.name = "lcd_mipid",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = mipid_spi_probe,
.resume = nec_8048_spi_resume,
.driver = {
.name = "nec_8048_spi",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
};
static struct spi_driver tpo_td043_spi_driver = {
.driver = {
.name = "tpo_td043mtea1_panel_spi",
- .bus = &spi_bus_type,
.owner = THIS_MODULE,
},
.probe = tpo_td043_spi_probe,
.remove = rgbfb_remove,
};
-static int __init rgbfb_init(void)
-{
- return platform_driver_register(&rgbfb_driver);
-}
-
-static void __exit rgbfb_exit(void)
-{
- platform_driver_unregister(&rgbfb_driver);
-}
-
-module_init(rgbfb_init);
-module_exit(rgbfb_exit);
+module_platform_driver(rgbfb_driver);
MODULE_LICENSE("GPL");
.resume = sdum_resume,
};
-int __init sdum_init(void)
-{
- return platform_driver_register(&sdum_driver);
-}
-
-static void __exit sdum_exit(void)
-{
- platform_driver_unregister(&sdum_driver);
-};
-
-module_init(sdum_init);
-module_exit(sdum_exit);
+module_platform_driver(sdum_driver);
MODULE_LICENSE("GPL");
.remove = __devexit_p(pxa168fb_remove),
};
-static int __init pxa168fb_init(void)
-{
- return platform_driver_register(&pxa168fb_driver);
-}
-module_init(pxa168fb_init);
-
-static void __exit pxa168fb_exit(void)
-{
- platform_driver_unregister(&pxa168fb_driver);
-}
-module_exit(pxa168fb_exit);
+module_platform_driver(pxa168fb_driver);
MODULE_AUTHOR("Lennert Buytenhek <buytenh@marvell.com> "
"Green Wan <gwan@marvell.com>");
},
};
-static int __init
-pxa3xx_gcu_init(void)
-{
- return platform_driver_register(&pxa3xx_gcu_driver);
-}
-
-static void __exit
-pxa3xx_gcu_exit(void)
-{
- platform_driver_unregister(&pxa3xx_gcu_driver);
-}
-
-module_init(pxa3xx_gcu_init);
-module_exit(pxa3xx_gcu_exit);
+module_platform_driver(pxa3xx_gcu_driver);
MODULE_DESCRIPTION("PXA3xx graphics controller unit driver");
MODULE_LICENSE("GPL");
* @regs: The mapped hardware registers.
* @variant: Variant information for this hardware.
* @enabled: A bitmask of enabled hardware windows.
+ * @output_on: Flag if the physical output is enabled.
* @pdata: The platform configuration data passed with the device.
* @windows: The hardware windows that have been claimed.
* @irq_no: IRQ line number
struct s3c_fb_variant variant;
unsigned char enabled;
+ bool output_on;
struct s3c_fb_platdata *pdata;
struct s3c_fb_win *windows[S3C_FB_MAX_WIN];
}
}
+/**
+ * s3c_fb_enable() - Set the state of the main LCD output
+ * @sfb: The main framebuffer state.
+ * @enable: The state to set.
+ */
+static void s3c_fb_enable(struct s3c_fb *sfb, int enable)
+{
+ u32 vidcon0 = readl(sfb->regs + VIDCON0);
+
+ if (enable && !sfb->output_on)
+ pm_runtime_get_sync(sfb->dev);
+
+ if (enable) {
+ vidcon0 |= VIDCON0_ENVID | VIDCON0_ENVID_F;
+ } else {
+ /* see the note in the framebuffer datasheet about
+ * why you cannot take both of these bits down at the
+ * same time. */
+
+ if (vidcon0 & VIDCON0_ENVID) {
+ vidcon0 |= VIDCON0_ENVID;
+ vidcon0 &= ~VIDCON0_ENVID_F;
+ }
+ }
+
+ writel(vidcon0, sfb->regs + VIDCON0);
+
+ if (!enable && sfb->output_on)
+ pm_runtime_put_sync(sfb->dev);
+
+ sfb->output_on = enable;
+}
+
/**
* s3c_fb_set_par() - framebuffer request to set new framebuffer state.
* @info: The framebuffer to change.
dev_dbg(sfb->dev, "setting framebuffer parameters\n");
+ pm_runtime_get_sync(sfb->dev);
+
shadow_protect_win(win, 1);
switch (var->bits_per_pixel) {
if (sfb->variant.is_2443)
data |= (1 << 5);
- data |= VIDCON0_ENVID | VIDCON0_ENVID_F;
writel(data, regs + VIDCON0);
+ s3c_fb_enable(sfb, 1);
+
data = VIDTCON0_VBPD(var->upper_margin - 1) |
VIDTCON0_VFPD(var->lower_margin - 1) |
VIDTCON0_VSPW(var->vsync_len - 1);
}
data = WINCONx_ENWIN;
+ sfb->enabled |= (1 << win->index);
/* note, since we have to round up the bits-per-pixel, we end up
* relying on the bitfield information for r/g/b/a to work out
} else if (var->transp.length == 1)
data |= WINCON1_BPPMODE_25BPP_A1888
| WINCON1_BLD_PIX;
- else if (var->transp.length == 4)
+ else if ((var->transp.length == 4) ||
+ (var->transp.length == 8))
data |= WINCON1_BPPMODE_28BPP_A4888
| WINCON1_BLD_PIX | WINCON1_ALPHA_SEL;
else
shadow_protect_win(win, 0);
+ pm_runtime_put_sync(sfb->dev);
+
return 0;
}
dev_dbg(sfb->dev, "%s: win %d: %d => rgb=%d/%d/%d\n",
__func__, win->index, regno, red, green, blue);
+ pm_runtime_get_sync(sfb->dev);
+
switch (info->fix.visual) {
case FB_VISUAL_TRUECOLOR:
/* true-colour, use pseudo-palette */
break;
default:
+ pm_runtime_put_sync(sfb->dev);
return 1; /* unknown type */
}
+ pm_runtime_put_sync(sfb->dev);
return 0;
}
-/**
- * s3c_fb_enable() - Set the state of the main LCD output
- * @sfb: The main framebuffer state.
- * @enable: The state to set.
- */
-static void s3c_fb_enable(struct s3c_fb *sfb, int enable)
-{
- u32 vidcon0 = readl(sfb->regs + VIDCON0);
-
- if (enable)
- vidcon0 |= VIDCON0_ENVID | VIDCON0_ENVID_F;
- else {
- /* see the note in the framebuffer datasheet about
- * why you cannot take both of these bits down at the
- * same time. */
-
- if (!(vidcon0 & VIDCON0_ENVID))
- return;
-
- vidcon0 |= VIDCON0_ENVID;
- vidcon0 &= ~VIDCON0_ENVID_F;
- }
-
- writel(vidcon0, sfb->regs + VIDCON0);
-}
-
/**
* s3c_fb_blank() - blank or unblank the given window
* @blank_mode: The blank state from FB_BLANK_*
dev_dbg(sfb->dev, "blank mode %d\n", blank_mode);
+ pm_runtime_get_sync(sfb->dev);
+
wincon = readl(sfb->regs + sfb->variant.wincon + (index * 4));
switch (blank_mode) {
case FB_BLANK_NORMAL:
/* disable the DMA and display 0x0 (black) */
+ shadow_protect_win(win, 1);
writel(WINxMAP_MAP | WINxMAP_MAP_COLOUR(0x0),
sfb->regs + sfb->variant.winmap + (index * 4));
+ shadow_protect_win(win, 0);
break;
case FB_BLANK_UNBLANK:
+ shadow_protect_win(win, 1);
writel(0x0, sfb->regs + sfb->variant.winmap + (index * 4));
+ shadow_protect_win(win, 0);
wincon |= WINCONx_ENWIN;
sfb->enabled |= (1 << index);
break;
case FB_BLANK_VSYNC_SUSPEND:
case FB_BLANK_HSYNC_SUSPEND:
default:
+ pm_runtime_put_sync(sfb->dev);
return 1;
}
+ shadow_protect_win(win, 1);
writel(wincon, sfb->regs + sfb->variant.wincon + (index * 4));
+ shadow_protect_win(win, 0);
/* Check the enabled state to see if we need to be running the
* main LCD interface, as if there are no active windows then
/* we're stuck with this until we can do something about overriding
* the power control using the blanking event for a single fb.
*/
- if (index == sfb->pdata->default_win)
+ if (index == sfb->pdata->default_win) {
+ shadow_protect_win(win, 1);
s3c_fb_enable(sfb, blank_mode != FB_BLANK_POWERDOWN ? 1 : 0);
+ shadow_protect_win(win, 0);
+ }
+
+ pm_runtime_put_sync(sfb->dev);
return 0;
}
void __iomem *buf = sfb->regs + win->index * 8;
unsigned int start_boff, end_boff;
+ pm_runtime_get_sync(sfb->dev);
+
/* Offset in bytes to the start of the displayed area */
start_boff = var->yoffset * info->fix.line_length;
/* X offset depends on the current bpp */
break;
default:
dev_err(sfb->dev, "invalid bpp\n");
+ pm_runtime_put_sync(sfb->dev);
return -EINVAL;
}
}
shadow_protect_win(win, 0);
+ pm_runtime_put_sync(sfb->dev);
return 0;
}
if (crtc != 0)
return -ENODEV;
+ pm_runtime_get_sync(sfb->dev);
+
count = sfb->vsync_info.count;
s3c_fb_enable_irq(sfb);
ret = wait_event_interruptible_timeout(sfb->vsync_info.wait,
count != sfb->vsync_info.count,
msecs_to_jiffies(VSYNC_TIMEOUT_MSEC));
+
+ pm_runtime_put_sync(sfb->dev);
+
if (ret == 0)
return -ETIMEDOUT;
return ret;
}
-static int s3c_fb_open(struct fb_info *info, int user)
-{
- struct s3c_fb_win *win = info->par;
- struct s3c_fb *sfb = win->parent;
-
- pm_runtime_get_sync(sfb->dev);
-
- return 0;
-}
-
-static int s3c_fb_release(struct fb_info *info, int user)
-{
- struct s3c_fb_win *win = info->par;
- struct s3c_fb *sfb = win->parent;
-
- pm_runtime_put_sync(sfb->dev);
-
- return 0;
-}
-
static struct fb_ops s3c_fb_ops = {
.owner = THIS_MODULE,
- .fb_open = s3c_fb_open,
- .fb_release = s3c_fb_release,
.fb_check_var = s3c_fb_check_var,
.fb_set_par = s3c_fb_set_par,
.fb_blank = s3c_fb_blank,
dev_err(dev, "failed to create window %d\n", win);
for (; win >= 0; win--)
s3c_fb_release_win(sfb, sfb->windows[win]);
- goto err_irq;
+ goto err_pm_runtime;
}
}
return 0;
-err_irq:
+err_pm_runtime:
+ pm_runtime_put_sync(sfb->dev);
free_irq(sfb->irq_no, sfb);
err_ioremap:
release_mem_region(sfb->regs_res->start, resource_size(sfb->regs_res));
err_lcd_clk:
+ pm_runtime_disable(sfb->dev);
+
if (!sfb->variant.has_clksel) {
clk_disable(sfb->lcd_clk);
clk_put(sfb->lcd_clk);
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int s3c_fb_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
for (win_no = 0; win_no < sfb->variant.nr_windows - 1; win_no++) {
void __iomem *regs = sfb->regs + sfb->variant.keycon;
+ win = sfb->windows[win_no];
+ if (!win)
+ continue;
+ shadow_protect_win(win, 1);
regs += (win_no * 8);
writel(0xffffff, regs + WKEYCON0);
writel(0xffffff, regs + WKEYCON1);
+ shadow_protect_win(win, 0);
}
/* restore framebuffers */
return 0;
}
+#endif
+#ifdef CONFIG_PM_RUNTIME
static int s3c_fb_runtime_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct s3c_fb *sfb = platform_get_drvdata(pdev);
- struct s3c_fb_win *win;
- int win_no;
-
- for (win_no = S3C_FB_MAX_WIN - 1; win_no >= 0; win_no--) {
- win = sfb->windows[win_no];
- if (!win)
- continue;
-
- /* use the blank function to push into power-down */
- s3c_fb_blank(FB_BLANK_POWERDOWN, win->fbinfo);
- }
if (!sfb->variant.has_clksel)
clk_disable(sfb->lcd_clk);
clk_disable(sfb->bus_clk);
+
return 0;
}
struct platform_device *pdev = to_platform_device(dev);
struct s3c_fb *sfb = platform_get_drvdata(pdev);
struct s3c_fb_platdata *pd = sfb->pdata;
- struct s3c_fb_win *win;
- int win_no;
clk_enable(sfb->bus_clk);
pd->setup_gpio();
writel(pd->vidcon1, sfb->regs + VIDCON1);
- /* zero all windows before we do anything */
- for (win_no = 0; win_no < sfb->variant.nr_windows; win_no++)
- s3c_fb_clear_win(sfb, win_no);
-
- for (win_no = 0; win_no < sfb->variant.nr_windows - 1; win_no++) {
- void __iomem *regs = sfb->regs + sfb->variant.keycon;
-
- regs += (win_no * 8);
- writel(0xffffff, regs + WKEYCON0);
- writel(0xffffff, regs + WKEYCON1);
- }
-
- /* restore framebuffers */
- for (win_no = 0; win_no < S3C_FB_MAX_WIN; win_no++) {
- win = sfb->windows[win_no];
- if (!win)
- continue;
-
- dev_dbg(&pdev->dev, "resuming window %d\n", win_no);
- s3c_fb_set_par(win->fbinfo);
- }
-
return 0;
}
-
-#else
-#define s3c_fb_suspend NULL
-#define s3c_fb_resume NULL
-#define s3c_fb_runtime_suspend NULL
-#define s3c_fb_runtime_resume NULL
#endif
-
#define VALID_BPP124 (VALID_BPP(1) | VALID_BPP(2) | VALID_BPP(4))
#define VALID_BPP1248 (VALID_BPP124 | VALID_BPP(8))
MODULE_DEVICE_TABLE(platform, s3c_fb_driver_ids);
static const struct dev_pm_ops s3cfb_pm_ops = {
- .suspend = s3c_fb_suspend,
- .resume = s3c_fb_resume,
- .runtime_suspend = s3c_fb_runtime_suspend,
- .runtime_resume = s3c_fb_runtime_resume,
+ SET_SYSTEM_SLEEP_PM_OPS(s3c_fb_suspend, s3c_fb_resume)
+ SET_RUNTIME_PM_OPS(s3c_fb_runtime_suspend, s3c_fb_runtime_resume,
+ NULL)
};
static struct platform_driver s3c_fb_driver = {
},
};
-static int __init s3c_fb_init(void)
-{
- return platform_driver_register(&s3c_fb_driver);
-}
-
-static void __exit s3c_fb_cleanup(void)
-{
- platform_driver_unregister(&s3c_fb_driver);
-}
-
-module_init(s3c_fb_init);
-module_exit(s3c_fb_cleanup);
+module_platform_driver(s3c_fb_driver);
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
MODULE_DESCRIPTION("Samsung S3C SoC Framebuffer driver");
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/cpufreq.h>
+#include <linux/io.h>
-#include <asm/io.h>
#include <asm/div64.h>
#include <asm/mach/map.h>
#ifdef CONFIG_FB_S3C2410_DEBUG
static int debug = 1;
#else
-static int debug = 0;
+static int debug;
#endif
-#define dprintk(msg...) if (debug) { printk(KERN_DEBUG "s3c2410fb: " msg); }
+#define dprintk(msg...) if (debug) printk(KERN_DEBUG "s3c2410fb: " msg);
/* useful functions */
tpal_reg += is_s3c2412(fbi) ? S3C2412_TPAL : S3C2410_TPAL;
- if (blank_mode == FB_BLANK_POWERDOWN) {
+ if (blank_mode == FB_BLANK_POWERDOWN)
s3c2410fb_lcd_enable(fbi, 0);
- } else {
+ else
s3c2410fb_lcd_enable(fbi, 1);
- }
if (blank_mode == FB_BLANK_UNBLANK)
writel(0x0, tpal_reg);
#endif
-static char driver_name[] = "s3c2410fb";
+static const char driver_name[] = "s3c2410fb";
static int __devinit s3c24xxfb_probe(struct platform_device *pdev,
enum s3c_drv_type drv_type)
goto release_mem;
}
- info->irq_base = info->io + ((drv_type == DRV_S3C2412) ? S3C2412_LCDINTBASE : S3C2410_LCDINTBASE);
+ if (drv_type == DRV_S3C2412)
+ info->irq_base = info->io + S3C2412_LCDINTBASE;
+ else
+ info->irq_base = info->io + S3C2410_LCDINTBASE;
dprintk("devinit\n");
clk_enable(info->clk);
dprintk("got and enabled clock\n");
- msleep(1);
+ usleep_range(1000, 1000);
info->clk_rate = clk_get_rate(info->clk);
/* create device files */
ret = device_create_file(&pdev->dev, &dev_attr_debug);
- if (ret) {
+ if (ret)
printk(KERN_ERR "failed to add debug attribute\n");
- }
printk(KERN_INFO "fb%d: %s frame buffer device\n",
fbinfo->node, fbinfo->fix.id);
s3c2410fb_cpufreq_deregister(info);
s3c2410fb_lcd_enable(info, 0);
- msleep(1);
+ usleep_range(1000, 1000);
s3c2410fb_unmap_video_memory(fbinfo);
* the LCD DMA engine is not going to get back on the bus
* before the clock goes off again (bjd) */
- msleep(1);
+ usleep_range(1000, 1000);
clk_disable(info->clk);
return 0;
struct s3c2410fb_info *info = fbinfo->par;
clk_enable(info->clk);
- msleep(1);
+ usleep_range(1000, 1000);
s3c2410fb_init_registers(fbinfo);
if (par->chip == CHIP_988_VIRGE_VX) {
vga_wcrt(par->state.vgabase, 0x50, 0x00);
vga_wcrt(par->state.vgabase, 0x67, 0x50);
-
+ msleep(10); /* screen remains blank sometimes without this */
vga_wcrt(par->state.vgabase, 0x63, (mode <= 2) ? 0x90 : 0x09);
vga_wcrt(par->state.vgabase, 0x66, 0x90);
}
/* Set Data Transfer Position */
hsstart = ((info->var.xres + info->var.right_margin) * hmul) / 8;
- value = clamp((htotal + hsstart + 1) / 2, hsstart + 4, htotal + 1);
+ /* + 2 is needed for Virge/VX, does no harm on other cards */
+ value = clamp((htotal + hsstart + 1) / 2 + 2, hsstart + 4, htotal + 1);
svga_wcrt_multi(par->state.vgabase, s3_dtpc_regs, value);
memset_io(info->screen_base, 0x00, screen_size);
info->screen_size = 2 << 20;
break;
}
+ } else if (par->chip == CHIP_988_VIRGE_VX) {
+ switch ((regval & 0x60) >> 5) {
+ case 0: /* 2MB */
+ info->screen_size = 2 << 20;
+ break;
+ case 1: /* 4MB */
+ info->screen_size = 4 << 20;
+ break;
+ case 2: /* 6MB */
+ info->screen_size = 6 << 20;
+ break;
+ case 3: /* 8MB */
+ info->screen_size = 8 << 20;
+ break;
+ }
+ /* off-screen memory */
+ regval = vga_rcrt(par->state.vgabase, 0x37);
+ switch ((regval & 0x60) >> 5) {
+ case 1: /* 4MB */
+ info->screen_size -= 4 << 20;
+ break;
+ case 2: /* 2MB */
+ info->screen_size -= 2 << 20;
+ break;
+ }
} else
info->screen_size = s3_memsizes[regval >> 5] << 10;
info->fix.smem_len = info->screen_size;
map_offset = (physbase + map[i].poff) & POFF_MASK;
break;
}
- if (!map_size){
+ if (!map_size) {
page += PAGE_SIZE;
continue;
}
.remove = __devexit_p(sh7760fb_remove),
};
-static int __init sh7760fb_init(void)
-{
- return platform_driver_register(&sh7760_lcdc_driver);
-}
-
-static void __exit sh7760fb_exit(void)
-{
- platform_driver_unregister(&sh7760_lcdc_driver);
-}
-
-module_init(sh7760fb_init);
-module_exit(sh7760fb_exit);
+module_platform_driver(sh7760_lcdc_driver);
MODULE_AUTHOR("Nobuhiro Iwamatsu, Manuel Lauss");
MODULE_DESCRIPTION("FBdev for SH7760/63 integrated LCD Controller");
* published by the Free Software Foundation.
*/
+#include <linux/bitmap.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/init.h>
#define VMCTR1 0x0020
#define VMCTR2 0x0024
#define VMLEN1 0x0028
+#define VMLEN2 0x002c
#define CMTSRTREQ 0x0070
#define CMTSRTCTR 0x00d0
void __iomem *base;
void __iomem *linkbase;
struct clk *dsit_clk;
- struct clk *dsip_clk;
- struct device *dev;
+ struct platform_device *pdev;
void *next_board_data;
void (*next_display_on)(void *board_data, struct fb_info *info);
sh_mipi_dsi_enable(mipi, false);
}
-static void mipi_display_on(void *arg, struct fb_info *info)
-{
- struct sh_mipi *mipi = arg;
-
- pm_runtime_get_sync(mipi->dev);
- sh_mipi_dsi_enable(mipi, true);
-
- if (mipi->next_display_on)
- mipi->next_display_on(mipi->next_board_data, info);
-}
-
-static void mipi_display_off(void *arg)
-{
- struct sh_mipi *mipi = arg;
-
- if (mipi->next_display_off)
- mipi->next_display_off(mipi->next_board_data);
-
- sh_mipi_dsi_enable(mipi, false);
- pm_runtime_put(mipi->dev);
-}
-
static int __init sh_mipi_setup(struct sh_mipi *mipi,
struct sh_mipi_dsi_info *pdata)
{
void __iomem *base = mipi->base;
struct sh_mobile_lcdc_chan_cfg *ch = pdata->lcd_chan;
- u32 pctype, datatype, pixfmt, linelength, vmctr2 = 0x00e00000;
+ u32 pctype, datatype, pixfmt, linelength, vmctr2;
+ u32 tmp, top, bottom, delay, div;
bool yuv;
+ int bpp;
/*
* Select data format. MIPI DSI is not hot-pluggable, so, we just use
(!yuv && ch->interface_type != RGB24))
return -EINVAL;
+ if (!pdata->lane)
+ return -EINVAL;
+
/* reset DSI link */
iowrite32(0x00000001, base + SYSCTRL);
/* Hold reset for 100 cycles of the slowest of bus, HS byte and LP clock */
/* setup DSI link */
- /*
- * Default = ULPS enable |
- * Contention detection enabled |
- * EoT packet transmission enable |
- * CRC check enable |
- * ECC check enable
- * additionally enable first two lanes
- */
- iowrite32(0x00003703, base + SYSCONF);
/*
* T_wakeup = 0x7000
* T_hs-trail = 3
iowrite32(0x0fffffff, base + TATOVSET);
/* Peripheral reset timeout, default 0xffffffff */
iowrite32(0x0fffffff, base + PRTOVSET);
- /* Enable timeout counters */
- iowrite32(0x00000f00, base + DSICTRL);
/* Interrupts not used, disable all */
iowrite32(0, base + DSIINTE);
/* DSI-Tx bias on */
iowrite32(0x00000001, base + PHYCTRL);
udelay(200);
- /* Deassert resets, power on, set multiplier */
- iowrite32(0x03070b01, base + PHYCTRL);
+ /* Deassert resets, power on */
+ iowrite32(0x03070001, base + PHYCTRL);
+
+ /*
+ * Default = ULPS enable |
+ * Contention detection enabled |
+ * EoT packet transmission enable |
+ * CRC check enable |
+ * ECC check enable
+ */
+ bitmap_fill((unsigned long *)&tmp, pdata->lane);
+ tmp |= 0x00003700;
+ iowrite32(tmp, base + SYSCONF);
/* setup l-bridge */
* Non-burst mode with sync pulses: VSE and HSE are output,
* HSA period allowed, no commands in LP
*/
+ vmctr2 = 0;
+ if (pdata->flags & SH_MIPI_DSI_VSEE)
+ vmctr2 |= 1 << 23;
+ if (pdata->flags & SH_MIPI_DSI_HSEE)
+ vmctr2 |= 1 << 22;
+ if (pdata->flags & SH_MIPI_DSI_HSAE)
+ vmctr2 |= 1 << 21;
+ if (pdata->flags & SH_MIPI_DSI_BL2E)
+ vmctr2 |= 1 << 17;
if (pdata->flags & SH_MIPI_DSI_HSABM)
- vmctr2 |= 0x20;
- if (pdata->flags & SH_MIPI_DSI_HSPBM)
- vmctr2 |= 0x10;
+ vmctr2 |= 1 << 5;
+ if (pdata->flags & SH_MIPI_DSI_HBPBM)
+ vmctr2 |= 1 << 4;
+ if (pdata->flags & SH_MIPI_DSI_HFPBM)
+ vmctr2 |= 1 << 3;
iowrite32(vmctr2, mipi->linkbase + VMCTR2);
/*
- * 0x660 = 1632 bytes per line (RGB24, 544 pixels: see
- * sh_mobile_lcdc_info.ch[0].lcd_cfg[0].xres), HSALEN = 1 - default
- * (unused if VMCTR2[HSABM] = 0)
+ * VMLEN1 = RGBLEN | HSALEN
+ *
+ * see
+ * Video mode - Blanking Packet setting
+ */
+ top = linelength << 16; /* RGBLEN */
+ bottom = 0x00000001;
+ if (pdata->flags & SH_MIPI_DSI_HSABM) /* HSALEN */
+ bottom = (pdata->lane * ch->lcd_cfg[0].hsync_len) - 10;
+ iowrite32(top | bottom , mipi->linkbase + VMLEN1);
+
+ /*
+ * VMLEN2 = HBPLEN | HFPLEN
+ *
+ * see
+ * Video mode - Blanking Packet setting
*/
- iowrite32(1 | (linelength << 16), mipi->linkbase + VMLEN1);
+ top = 0x00010000;
+ bottom = 0x00000001;
+ delay = 0;
+
+ div = 1; /* HSbyteCLK is calculation base
+ * HS4divCLK = HSbyteCLK/2
+ * HS6divCLK is not supported for now */
+ if (pdata->flags & SH_MIPI_DSI_HS4divCLK)
+ div = 2;
+
+ if (pdata->flags & SH_MIPI_DSI_HFPBM) { /* HBPLEN */
+ top = ch->lcd_cfg[0].hsync_len + ch->lcd_cfg[0].left_margin;
+ top = ((pdata->lane * top / div) - 10) << 16;
+ }
+ if (pdata->flags & SH_MIPI_DSI_HBPBM) { /* HFPLEN */
+ bottom = ch->lcd_cfg[0].right_margin;
+ bottom = (pdata->lane * bottom / div) - 12;
+ }
+
+ bpp = linelength / ch->lcd_cfg[0].xres; /* byte / pixel */
+ if ((pdata->lane / div) > bpp) {
+ tmp = ch->lcd_cfg[0].xres / bpp; /* output cycle */
+ tmp = ch->lcd_cfg[0].xres - tmp; /* (input - output) cycle */
+ delay = (pdata->lane * tmp);
+ }
+
+ iowrite32(top | (bottom + delay) , mipi->linkbase + VMLEN2);
msleep(5);
pixfmt << 4);
sh_mipi_dcs(ch->chan, MIPI_DCS_SET_DISPLAY_ON);
+ /* Enable timeout counters */
+ iowrite32(0x00000f00, base + DSICTRL);
+
return 0;
}
+static void mipi_display_on(void *arg, struct fb_info *info)
+{
+ struct sh_mipi *mipi = arg;
+ struct sh_mipi_dsi_info *pdata = mipi->pdev->dev.platform_data;
+ int ret;
+
+ pm_runtime_get_sync(&mipi->pdev->dev);
+
+ ret = pdata->set_dot_clock(mipi->pdev, mipi->base, 1);
+ if (ret < 0)
+ goto mipi_display_on_fail1;
+
+ ret = sh_mipi_setup(mipi, pdata);
+ if (ret < 0)
+ goto mipi_display_on_fail2;
+
+ sh_mipi_dsi_enable(mipi, true);
+
+ if (mipi->next_display_on)
+ mipi->next_display_on(mipi->next_board_data, info);
+
+ return;
+
+mipi_display_on_fail1:
+ pm_runtime_put_sync(&mipi->pdev->dev);
+mipi_display_on_fail2:
+ pdata->set_dot_clock(mipi->pdev, mipi->base, 0);
+}
+
+static void mipi_display_off(void *arg)
+{
+ struct sh_mipi *mipi = arg;
+ struct sh_mipi_dsi_info *pdata = mipi->pdev->dev.platform_data;
+
+ if (mipi->next_display_off)
+ mipi->next_display_off(mipi->next_board_data);
+
+ sh_mipi_dsi_enable(mipi, false);
+
+ pdata->set_dot_clock(mipi->pdev, mipi->base, 0);
+
+ pm_runtime_put_sync(&mipi->pdev->dev);
+}
+
static int __init sh_mipi_probe(struct platform_device *pdev)
{
struct sh_mipi *mipi;
struct resource *res2 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
unsigned long rate, f_current;
int idx = pdev->id, ret;
- char dsip_clk[] = "dsi.p_clk";
if (!res || !res2 || idx >= ARRAY_SIZE(mipi_dsi) || !pdata)
return -ENODEV;
+ if (!pdata->set_dot_clock)
+ return -EINVAL;
+
mutex_lock(&array_lock);
if (idx < 0)
for (idx = 0; idx < ARRAY_SIZE(mipi_dsi) && mipi_dsi[idx]; idx++)
goto emap2;
}
- mipi->dev = &pdev->dev;
+ mipi->pdev = pdev;
mipi->dsit_clk = clk_get(&pdev->dev, "dsit_clk");
if (IS_ERR(mipi->dsit_clk)) {
dev_dbg(&pdev->dev, "DSI-T clk %lu -> %lu\n", f_current, rate);
- sprintf(dsip_clk, "dsi%1.1dp_clk", idx);
- mipi->dsip_clk = clk_get(&pdev->dev, dsip_clk);
- if (IS_ERR(mipi->dsip_clk)) {
- ret = PTR_ERR(mipi->dsip_clk);
- goto eclkpget;
- }
-
- f_current = clk_get_rate(mipi->dsip_clk);
- /* Between 10 and 50MHz */
- rate = clk_round_rate(mipi->dsip_clk, 24000000);
- if (rate > 0 && rate != f_current)
- ret = clk_set_rate(mipi->dsip_clk, rate);
- else
- ret = rate;
- if (ret < 0)
- goto esetprate;
-
- dev_dbg(&pdev->dev, "DSI-P clk %lu -> %lu\n", f_current, rate);
-
- msleep(10);
-
ret = clk_enable(mipi->dsit_clk);
if (ret < 0)
goto eclkton;
- ret = clk_enable(mipi->dsip_clk);
- if (ret < 0)
- goto eclkpon;
-
mipi_dsi[idx] = mipi;
pm_runtime_enable(&pdev->dev);
pm_runtime_resume(&pdev->dev);
- ret = sh_mipi_setup(mipi, pdata);
- if (ret < 0)
- goto emipisetup;
-
mutex_unlock(&array_lock);
platform_set_drvdata(pdev, mipi);
return 0;
-emipisetup:
- mipi_dsi[idx] = NULL;
- pm_runtime_disable(&pdev->dev);
- clk_disable(mipi->dsip_clk);
-eclkpon:
- clk_disable(mipi->dsit_clk);
eclkton:
-esetprate:
- clk_put(mipi->dsip_clk);
-eclkpget:
esettrate:
clk_put(mipi->dsit_clk);
eclktget:
pdata->lcd_chan->board_cfg.board_data = NULL;
pm_runtime_disable(&pdev->dev);
- clk_disable(mipi->dsip_clk);
clk_disable(mipi->dsit_clk);
clk_put(mipi->dsit_clk);
- clk_put(mipi->dsip_clk);
+
iounmap(mipi->linkbase);
if (res2)
release_mem_region(res2->start, resource_size(res2));
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/interrupt.h>
+#include <linux/videodev2.h>
#include <linux/vmalloc.h>
#include <linux/ioctl.h>
#include <linux/slab.h>
struct sh_mobile_lcdc_chan ch[2];
struct notifier_block notifier;
int started;
- int forced_bpp; /* 2 channel LCDC must share bpp setting */
+ int forced_fourcc; /* 2 channel LCDC must share fourcc setting */
struct sh_mobile_meram_info *meram_dev;
};
lcdc_sys_read_data,
};
+static int sh_mobile_format_fourcc(const struct fb_var_screeninfo *var)
+{
+ if (var->grayscale > 1)
+ return var->grayscale;
+
+ switch (var->bits_per_pixel) {
+ case 16:
+ return V4L2_PIX_FMT_RGB565;
+ case 24:
+ return V4L2_PIX_FMT_BGR24;
+ case 32:
+ return V4L2_PIX_FMT_BGR32;
+ default:
+ return 0;
+ }
+}
+
+static int sh_mobile_format_is_fourcc(const struct fb_var_screeninfo *var)
+{
+ return var->grayscale > 1;
+}
+
+static bool sh_mobile_format_is_yuv(const struct fb_var_screeninfo *var)
+{
+ if (var->grayscale <= 1)
+ return false;
+
+ switch (var->grayscale) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ case V4L2_PIX_FMT_NV24:
+ case V4L2_PIX_FMT_NV42:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
static void sh_mobile_lcdc_clk_on(struct sh_mobile_lcdc_priv *priv)
{
if (atomic_inc_and_test(&priv->hw_usecnt)) {
tmp = ((display_var->xres & 7) << 24) |
((display_h_total & 7) << 16) |
((display_var->hsync_len & 7) << 8) |
- hsync_pos;
+ (hsync_pos & 7);
lcdc_write_chan(ch, LDHAJR, tmp);
}
{
struct sh_mobile_lcdc_chan *ch;
unsigned long tmp;
- int bpp = 0;
int k, m;
/* Enable LCDC channels. Read data from external memory, avoid using the
if (!ch->enabled)
continue;
- if (!bpp)
- bpp = ch->info->var.bits_per_pixel;
-
/* Power supply */
lcdc_write_chan(ch, LDPMR, 0);
sh_mobile_lcdc_geometry(ch);
- if (ch->info->var.nonstd) {
- tmp = (ch->info->var.nonstd << 16);
- switch (ch->info->var.bits_per_pixel) {
- case 12:
- tmp |= LDDFR_YF_420;
- break;
- case 16:
- tmp |= LDDFR_YF_422;
- break;
- case 24:
- default:
- tmp |= LDDFR_YF_444;
- break;
- }
- } else {
- switch (ch->info->var.bits_per_pixel) {
- case 16:
- tmp = LDDFR_PKF_RGB16;
- break;
- case 24:
- tmp = LDDFR_PKF_RGB24;
+ switch (sh_mobile_format_fourcc(&ch->info->var)) {
+ case V4L2_PIX_FMT_RGB565:
+ tmp = LDDFR_PKF_RGB16;
+ break;
+ case V4L2_PIX_FMT_BGR24:
+ tmp = LDDFR_PKF_RGB24;
+ break;
+ case V4L2_PIX_FMT_BGR32:
+ tmp = LDDFR_PKF_ARGB32;
+ break;
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ tmp = LDDFR_CC | LDDFR_YF_420;
+ break;
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ tmp = LDDFR_CC | LDDFR_YF_422;
+ break;
+ case V4L2_PIX_FMT_NV24:
+ case V4L2_PIX_FMT_NV42:
+ tmp = LDDFR_CC | LDDFR_YF_444;
+ break;
+ }
+
+ if (sh_mobile_format_is_yuv(&ch->info->var)) {
+ switch (ch->info->var.colorspace) {
+ case V4L2_COLORSPACE_REC709:
+ tmp |= LDDFR_CF1;
break;
- case 32:
- default:
- tmp = LDDFR_PKF_ARGB32;
+ case V4L2_COLORSPACE_JPEG:
+ tmp |= LDDFR_CF0;
break;
}
}
lcdc_write_chan(ch, LDDFR, tmp);
lcdc_write_chan(ch, LDMLSR, ch->pitch);
lcdc_write_chan(ch, LDSA1R, ch->base_addr_y);
- if (ch->info->var.nonstd)
+ if (sh_mobile_format_is_yuv(&ch->info->var))
lcdc_write_chan(ch, LDSA2R, ch->base_addr_c);
/* When using deferred I/O mode, configure the LCDC for one-shot
}
/* Word and long word swap. */
- if (priv->ch[0].info->var.nonstd)
+ switch (sh_mobile_format_fourcc(&priv->ch[0].info->var)) {
+ case V4L2_PIX_FMT_RGB565:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV61:
+ case V4L2_PIX_FMT_NV42:
+ tmp = LDDDSR_LS | LDDDSR_WS;
+ break;
+ case V4L2_PIX_FMT_BGR24:
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV24:
tmp = LDDDSR_LS | LDDDSR_WS | LDDDSR_BS;
- else {
- switch (bpp) {
- case 16:
- tmp = LDDDSR_LS | LDDDSR_WS;
- break;
- case 24:
- tmp = LDDDSR_LS | LDDDSR_WS | LDDDSR_BS;
- break;
- case 32:
- default:
- tmp = LDDDSR_LS;
- break;
- }
+ break;
+ case V4L2_PIX_FMT_BGR32:
+ default:
+ tmp = LDDDSR_LS;
+ break;
}
lcdc_write(priv, _LDDDSR, tmp);
ch->meram_enabled = 0;
}
- if (!ch->info->var.nonstd)
- pixelformat = SH_MOBILE_MERAM_PF_RGB;
- else if (ch->info->var.bits_per_pixel == 24)
- pixelformat = SH_MOBILE_MERAM_PF_NV24;
- else
+ switch (sh_mobile_format_fourcc(&ch->info->var)) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
pixelformat = SH_MOBILE_MERAM_PF_NV;
+ break;
+ case V4L2_PIX_FMT_NV24:
+ case V4L2_PIX_FMT_NV42:
+ pixelformat = SH_MOBILE_MERAM_PF_NV24;
+ break;
+ case V4L2_PIX_FMT_RGB565:
+ case V4L2_PIX_FMT_BGR24:
+ case V4L2_PIX_FMT_BGR32:
+ default:
+ pixelformat = SH_MOBILE_MERAM_PF_RGB;
+ break;
+ }
ret = mdev->ops->meram_register(mdev, cfg, ch->pitch,
ch->info->var.yres, pixelformat,
.xpanstep = 0,
.ypanstep = 1,
.ywrapstep = 0,
+ .capabilities = FB_CAP_FOURCC,
};
static void sh_mobile_lcdc_fillrect(struct fb_info *info,
unsigned long new_pan_offset;
unsigned long base_addr_y, base_addr_c;
unsigned long c_offset;
+ bool yuv = sh_mobile_format_is_yuv(&info->var);
- if (!info->var.nonstd)
+ if (!yuv)
new_pan_offset = var->yoffset * info->fix.line_length
+ var->xoffset * (info->var.bits_per_pixel / 8);
else
/* Set the source address for the next refresh */
base_addr_y = ch->dma_handle + new_pan_offset;
- if (info->var.nonstd) {
+ if (yuv) {
/* Set y offset */
c_offset = var->yoffset * info->fix.line_length
* (info->var.bits_per_pixel - 8) / 8;
+ info->var.xres * info->var.yres_virtual
+ c_offset;
/* Set x offset */
- if (info->var.bits_per_pixel == 24)
+ if (sh_mobile_format_fourcc(&info->var) == V4L2_PIX_FMT_NV24)
base_addr_c += 2 * var->xoffset;
else
base_addr_c += var->xoffset;
ch->base_addr_c = base_addr_c;
lcdc_write_chan_mirror(ch, LDSA1R, base_addr_y);
- if (info->var.nonstd)
+ if (yuv)
lcdc_write_chan_mirror(ch, LDSA2R, base_addr_c);
if (lcdc_chan_is_sublcd(ch))
if (var->yres_virtual < var->yres)
var->yres_virtual = var->yres;
- if (var->bits_per_pixel <= 16) { /* RGB 565 */
- var->bits_per_pixel = 16;
- var->red.offset = 11;
- var->red.length = 5;
- var->green.offset = 5;
- var->green.length = 6;
- var->blue.offset = 0;
- var->blue.length = 5;
- var->transp.offset = 0;
- var->transp.length = 0;
- } else if (var->bits_per_pixel <= 24) { /* RGB 888 */
- var->bits_per_pixel = 24;
- var->red.offset = 16;
- var->red.length = 8;
- var->green.offset = 8;
- var->green.length = 8;
- var->blue.offset = 0;
- var->blue.length = 8;
- var->transp.offset = 0;
- var->transp.length = 0;
- } else if (var->bits_per_pixel <= 32) { /* RGBA 888 */
- var->bits_per_pixel = 32;
- var->red.offset = 16;
- var->red.length = 8;
- var->green.offset = 8;
- var->green.length = 8;
- var->blue.offset = 0;
- var->blue.length = 8;
- var->transp.offset = 24;
- var->transp.length = 8;
- } else
- return -EINVAL;
+ if (sh_mobile_format_is_fourcc(var)) {
+ switch (var->grayscale) {
+ case V4L2_PIX_FMT_NV12:
+ case V4L2_PIX_FMT_NV21:
+ var->bits_per_pixel = 12;
+ break;
+ case V4L2_PIX_FMT_RGB565:
+ case V4L2_PIX_FMT_NV16:
+ case V4L2_PIX_FMT_NV61:
+ var->bits_per_pixel = 16;
+ break;
+ case V4L2_PIX_FMT_BGR24:
+ case V4L2_PIX_FMT_NV24:
+ case V4L2_PIX_FMT_NV42:
+ var->bits_per_pixel = 24;
+ break;
+ case V4L2_PIX_FMT_BGR32:
+ var->bits_per_pixel = 32;
+ break;
+ default:
+ return -EINVAL;
+ }
- var->red.msb_right = 0;
- var->green.msb_right = 0;
- var->blue.msb_right = 0;
- var->transp.msb_right = 0;
+ /* Default to RGB and JPEG color-spaces for RGB and YUV formats
+ * respectively.
+ */
+ if (!sh_mobile_format_is_yuv(var))
+ var->colorspace = V4L2_COLORSPACE_SRGB;
+ else if (var->colorspace != V4L2_COLORSPACE_REC709)
+ var->colorspace = V4L2_COLORSPACE_JPEG;
+ } else {
+ if (var->bits_per_pixel <= 16) { /* RGB 565 */
+ var->bits_per_pixel = 16;
+ var->red.offset = 11;
+ var->red.length = 5;
+ var->green.offset = 5;
+ var->green.length = 6;
+ var->blue.offset = 0;
+ var->blue.length = 5;
+ var->transp.offset = 0;
+ var->transp.length = 0;
+ } else if (var->bits_per_pixel <= 24) { /* RGB 888 */
+ var->bits_per_pixel = 24;
+ var->red.offset = 16;
+ var->red.length = 8;
+ var->green.offset = 8;
+ var->green.length = 8;
+ var->blue.offset = 0;
+ var->blue.length = 8;
+ var->transp.offset = 0;
+ var->transp.length = 0;
+ } else if (var->bits_per_pixel <= 32) { /* RGBA 888 */
+ var->bits_per_pixel = 32;
+ var->red.offset = 16;
+ var->red.length = 8;
+ var->green.offset = 8;
+ var->green.length = 8;
+ var->blue.offset = 0;
+ var->blue.length = 8;
+ var->transp.offset = 24;
+ var->transp.length = 8;
+ } else
+ return -EINVAL;
+
+ var->red.msb_right = 0;
+ var->green.msb_right = 0;
+ var->blue.msb_right = 0;
+ var->transp.msb_right = 0;
+ }
/* Make sure we don't exceed our allocated memory. */
if (var->xres_virtual * var->yres_virtual * var->bits_per_pixel / 8 >
info->fix.smem_len)
return -EINVAL;
- /* only accept the forced_bpp for dual channel configurations */
- if (p->forced_bpp && p->forced_bpp != var->bits_per_pixel)
+ /* only accept the forced_fourcc for dual channel configurations */
+ if (p->forced_fourcc &&
+ p->forced_fourcc != sh_mobile_format_fourcc(var))
return -EINVAL;
return 0;
sh_mobile_lcdc_stop(ch->lcdc);
- if (info->var.nonstd)
+ if (sh_mobile_format_is_yuv(&info->var))
info->fix.line_length = info->var.xres;
else
info->fix.line_length = info->var.xres
info->fix.line_length = line_length;
}
+ if (sh_mobile_format_is_fourcc(&info->var)) {
+ info->fix.type = FB_TYPE_FOURCC;
+ info->fix.visual = FB_VISUAL_FOURCC;
+ } else {
+ info->fix.type = FB_TYPE_PACKED_PIXELS;
+ info->fix.visual = FB_VISUAL_TRUECOLOR;
+ }
+
return ret;
}
for (i = 0, mode = cfg->lcd_cfg; i < cfg->num_cfg; i++, mode++) {
unsigned int size = mode->yres * mode->xres;
- /* NV12 buffers must have even number of lines */
- if ((cfg->nonstd) && cfg->bpp == 12 &&
- (mode->yres & 0x1)) {
+ /* NV12/NV21 buffers must have even number of lines */
+ if ((cfg->fourcc == V4L2_PIX_FMT_NV12 ||
+ cfg->fourcc == V4L2_PIX_FMT_NV21) && (mode->yres & 0x1)) {
dev_err(dev, "yres must be multiple of 2 for YCbCr420 "
"mode.\n");
return -EINVAL;
dev_dbg(dev, "Found largest videomode %ux%u\n",
max_mode->xres, max_mode->yres);
- /* Initialize fixed screen information. Restrict pan to 2 lines steps
- * for NV12.
- */
- info->fix = sh_mobile_lcdc_fix;
- info->fix.smem_len = max_size * 2 * cfg->bpp / 8;
- if (cfg->nonstd && cfg->bpp == 12)
- info->fix.ypanstep = 2;
-
/* Create the mode list. */
if (cfg->lcd_cfg == NULL) {
mode = &default_720p;
*/
var = &info->var;
fb_videomode_to_var(var, mode);
- var->bits_per_pixel = cfg->bpp;
var->width = cfg->lcd_size_cfg.width;
var->height = cfg->lcd_size_cfg.height;
var->yres_virtual = var->yres * 2;
var->activate = FB_ACTIVATE_NOW;
+ switch (cfg->fourcc) {
+ case V4L2_PIX_FMT_RGB565:
+ var->bits_per_pixel = 16;
+ break;
+ case V4L2_PIX_FMT_BGR24:
+ var->bits_per_pixel = 24;
+ break;
+ case V4L2_PIX_FMT_BGR32:
+ var->bits_per_pixel = 32;
+ break;
+ default:
+ var->grayscale = cfg->fourcc;
+ break;
+ }
+
+ /* Make sure the memory size check won't fail. smem_len is initialized
+ * later based on var.
+ */
+ info->fix.smem_len = UINT_MAX;
ret = sh_mobile_check_var(var, info);
if (ret)
return ret;
+ max_size = max_size * var->bits_per_pixel / 8 * 2;
+
/* Allocate frame buffer memory and color map. */
- buf = dma_alloc_coherent(dev, info->fix.smem_len, &ch->dma_handle,
- GFP_KERNEL);
+ buf = dma_alloc_coherent(dev, max_size, &ch->dma_handle, GFP_KERNEL);
if (!buf) {
dev_err(dev, "unable to allocate buffer\n");
return -ENOMEM;
ret = fb_alloc_cmap(&info->cmap, PALETTE_NR, 0);
if (ret < 0) {
dev_err(dev, "unable to allocate cmap\n");
- dma_free_coherent(dev, info->fix.smem_len,
- buf, ch->dma_handle);
+ dma_free_coherent(dev, max_size, buf, ch->dma_handle);
return ret;
}
+ /* Initialize fixed screen information. Restrict pan to 2 lines steps
+ * for NV12 and NV21.
+ */
+ info->fix = sh_mobile_lcdc_fix;
info->fix.smem_start = ch->dma_handle;
- if (var->nonstd)
+ info->fix.smem_len = max_size;
+ if (cfg->fourcc == V4L2_PIX_FMT_NV12 ||
+ cfg->fourcc == V4L2_PIX_FMT_NV21)
+ info->fix.ypanstep = 2;
+
+ if (sh_mobile_format_is_yuv(var)) {
info->fix.line_length = var->xres;
- else
- info->fix.line_length = var->xres * (cfg->bpp / 8);
+ info->fix.visual = FB_VISUAL_FOURCC;
+ } else {
+ info->fix.line_length = var->xres * var->bits_per_pixel / 8;
+ info->fix.visual = FB_VISUAL_TRUECOLOR;
+ }
info->screen_base = buf;
info->device = dev;
goto err1;
}
- /* for dual channel LCDC (MAIN + SUB) force shared bpp setting */
+ /* for dual channel LCDC (MAIN + SUB) force shared format setting */
if (num_channels == 2)
- priv->forced_bpp = pdata->ch[0].bpp;
+ priv->forced_fourcc = pdata->ch[0].fourcc;
priv->base = ioremap_nocache(res->start, resource_size(res));
if (!priv->base)
if (error < 0)
goto err1;
- dev_info(info->dev,
- "registered %s/%s as %dx%d %dbpp.\n",
- pdev->name,
- (ch->cfg.chan == LCDC_CHAN_MAINLCD) ?
- "mainlcd" : "sublcd",
- info->var.xres, info->var.yres,
- ch->cfg.bpp);
+ dev_info(info->dev, "registered %s/%s as %dx%d %dbpp.\n",
+ pdev->name, (ch->cfg.chan == LCDC_CHAN_MAINLCD) ?
+ "mainlcd" : "sublcd", info->var.xres, info->var.yres,
+ info->var.bits_per_pixel);
/* deferred io mode: disable clock to save power */
if (info->fbdefio || info->state == FBINFO_STATE_SUSPENDED)
.remove = sh_mobile_lcdc_remove,
};
-static int __init sh_mobile_lcdc_init(void)
-{
- return platform_driver_register(&sh_mobile_lcdc_driver);
-}
-
-static void __exit sh_mobile_lcdc_exit(void)
-{
- platform_driver_unregister(&sh_mobile_lcdc_driver);
-}
-
-module_init(sh_mobile_lcdc_init);
-module_exit(sh_mobile_lcdc_exit);
+module_platform_driver(sh_mobile_lcdc_driver);
MODULE_DESCRIPTION("SuperH Mobile LCDC Framebuffer driver");
MODULE_AUTHOR("Magnus Damm <damm@opensource.se>");
.remove = sh_mobile_meram_remove,
};
-static int __init sh_mobile_meram_init(void)
-{
- return platform_driver_register(&sh_mobile_meram_driver);
-}
-
-static void __exit sh_mobile_meram_exit(void)
-{
- platform_driver_unregister(&sh_mobile_meram_driver);
-}
-
-module_init(sh_mobile_meram_init);
-module_exit(sh_mobile_meram_exit);
+module_platform_driver(sh_mobile_meram_driver);
MODULE_DESCRIPTION("SuperH Mobile MERAM driver");
MODULE_AUTHOR("Damian Hobson-Garcia / Takanari Hayama");
},
};
-static int __devinit sm501fb_init(void)
-{
- return platform_driver_register(&sm501fb_driver);
-}
-
-static void __exit sm501fb_cleanup(void)
-{
- platform_driver_unregister(&sm501fb_driver);
-}
-
-module_init(sm501fb_init);
-module_exit(sm501fb_cleanup);
+module_platform_driver(sm501fb_driver);
module_param_named(mode, fb_mode, charp, 0);
MODULE_PARM_DESC(mode,
},
};
-static int __init vt8500lcd_init(void)
-{
- return platform_driver_register(&vt8500lcd_driver);
-}
-
-static void __exit vt8500lcd_exit(void)
-{
- platform_driver_unregister(&vt8500lcd_driver);
-}
-
-module_init(vt8500lcd_init);
-module_exit(vt8500lcd_exit);
+module_platform_driver(vt8500lcd_driver);
MODULE_AUTHOR("Alexey Charkov <alchark@gmail.com>");
MODULE_DESCRIPTION("LCD controller driver for VIA VT8500");
},
};
-int __init w100fb_init(void)
-{
- return platform_driver_register(&w100fb_driver);
-}
-
-void __exit w100fb_cleanup(void)
-{
- platform_driver_unregister(&w100fb_driver);
-}
-
-module_init(w100fb_init);
-module_exit(w100fb_cleanup);
+module_platform_driver(w100fb_driver);
MODULE_DESCRIPTION("ATI Imageon w100 framebuffer driver");
MODULE_LICENSE("GPL");
},
};
-static int __init wm8505fb_init(void)
-{
- return platform_driver_register(&wm8505fb_driver);
-}
-
-static void __exit wm8505fb_exit(void)
-{
- platform_driver_unregister(&wm8505fb_driver);
-}
-
-module_init(wm8505fb_init);
-module_exit(wm8505fb_exit);
+module_platform_driver(wm8505fb_driver);
MODULE_AUTHOR("Ed Spiridonov <edo.rus@gmail.com>");
MODULE_DESCRIPTION("Framebuffer driver for WMT WM8505");
},
};
-static int __init wmt_ge_rops_init(void)
-{
- return platform_driver_register(&wmt_ge_rops_driver);
-}
-
-static void __exit wmt_ge_rops_exit(void)
-{
- platform_driver_unregister(&wmt_ge_rops_driver);
-}
-
-module_init(wmt_ge_rops_init);
-module_exit(wmt_ge_rops_exit);
+module_platform_driver(wmt_ge_rops_driver);
MODULE_AUTHOR("Alexey Charkov <alchark@gmail.com");
MODULE_DESCRIPTION("Accelerators for raster operations using "
},
};
-
-/* ---------------------------------------------------------------------
- * Module setup and teardown
- */
-
-static int __init
-xilinxfb_init(void)
-{
- return platform_driver_register(&xilinxfb_of_driver);
-}
-
-static void __exit
-xilinxfb_cleanup(void)
-{
- platform_driver_unregister(&xilinxfb_of_driver);
-}
-
-module_init(xilinxfb_init);
-module_exit(xilinxfb_cleanup);
+module_platform_driver(xilinxfb_of_driver);
MODULE_AUTHOR("MontaVista Software, Inc. <source@mvista.com>");
MODULE_DESCRIPTION("Xilinx TFT frame buffer driver");
/*
* Get IO TLB memory from any location.
*/
- xen_io_tlb_start = alloc_bootmem(bytes);
+ xen_io_tlb_start = alloc_bootmem_pages(PAGE_ALIGN(bytes));
if (!xen_io_tlb_start) {
m = "Cannot allocate Xen-SWIOTLB buffer!\n";
goto error;
bytes,
xen_io_tlb_nslabs);
if (rc) {
- free_bootmem(__pa(xen_io_tlb_start), bytes);
+ free_bootmem(__pa(xen_io_tlb_start), PAGE_ALIGN(bytes));
m = "Failed to get contiguous memory for DMA from Xen!\n"\
"You either: don't have the permissions, do not have"\
" enough free memory under 4GB, or the hypervisor memory"\
int idle;
};
+static int __btrfs_start_workers(struct btrfs_workers *workers);
+
/*
* btrfs_start_workers uses kthread_run, which can block waiting for memory
* for a very long time. It will actually throttle on page writeback,
{
struct worker_start *start;
start = container_of(work, struct worker_start, work);
- btrfs_start_workers(start->queue, 1);
+ __btrfs_start_workers(start->queue);
kfree(start);
}
-static int start_new_worker(struct btrfs_workers *queue)
-{
- struct worker_start *start;
- int ret;
-
- start = kzalloc(sizeof(*start), GFP_NOFS);
- if (!start)
- return -ENOMEM;
-
- start->work.func = start_new_worker_func;
- start->queue = queue;
- ret = btrfs_queue_worker(queue->atomic_worker_start, &start->work);
- if (ret)
- kfree(start);
- return ret;
-}
-
/*
* helper function to move a thread onto the idle list after it
* has finished some requests.
static void check_pending_worker_creates(struct btrfs_worker_thread *worker)
{
struct btrfs_workers *workers = worker->workers;
+ struct worker_start *start;
unsigned long flags;
rmb();
if (!workers->atomic_start_pending)
return;
+ start = kzalloc(sizeof(*start), GFP_NOFS);
+ if (!start)
+ return;
+
+ start->work.func = start_new_worker_func;
+ start->queue = workers;
+
spin_lock_irqsave(&workers->lock, flags);
if (!workers->atomic_start_pending)
goto out;
workers->num_workers_starting += 1;
spin_unlock_irqrestore(&workers->lock, flags);
- start_new_worker(workers);
+ btrfs_queue_worker(workers->atomic_worker_start, &start->work);
return;
out:
+ kfree(start);
spin_unlock_irqrestore(&workers->lock, flags);
}
run_ordered_completions(worker->workers, work);
check_pending_worker_creates(worker);
-
+ cond_resched();
}
spin_lock_irq(&worker->lock);
* starts new worker threads. This does not enforce the max worker
* count in case you need to temporarily go past it.
*/
-static int __btrfs_start_workers(struct btrfs_workers *workers,
- int num_workers)
+static int __btrfs_start_workers(struct btrfs_workers *workers)
{
struct btrfs_worker_thread *worker;
int ret = 0;
- int i;
- for (i = 0; i < num_workers; i++) {
- worker = kzalloc(sizeof(*worker), GFP_NOFS);
- if (!worker) {
- ret = -ENOMEM;
- goto fail;
- }
+ worker = kzalloc(sizeof(*worker), GFP_NOFS);
+ if (!worker) {
+ ret = -ENOMEM;
+ goto fail;
+ }
- INIT_LIST_HEAD(&worker->pending);
- INIT_LIST_HEAD(&worker->prio_pending);
- INIT_LIST_HEAD(&worker->worker_list);
- spin_lock_init(&worker->lock);
-
- atomic_set(&worker->num_pending, 0);
- atomic_set(&worker->refs, 1);
- worker->workers = workers;
- worker->task = kthread_run(worker_loop, worker,
- "btrfs-%s-%d", workers->name,
- workers->num_workers + i);
- if (IS_ERR(worker->task)) {
- ret = PTR_ERR(worker->task);
- kfree(worker);
- goto fail;
- }
- spin_lock_irq(&workers->lock);
- list_add_tail(&worker->worker_list, &workers->idle_list);
- worker->idle = 1;
- workers->num_workers++;
- workers->num_workers_starting--;
- WARN_ON(workers->num_workers_starting < 0);
- spin_unlock_irq(&workers->lock);
+ INIT_LIST_HEAD(&worker->pending);
+ INIT_LIST_HEAD(&worker->prio_pending);
+ INIT_LIST_HEAD(&worker->worker_list);
+ spin_lock_init(&worker->lock);
+
+ atomic_set(&worker->num_pending, 0);
+ atomic_set(&worker->refs, 1);
+ worker->workers = workers;
+ worker->task = kthread_run(worker_loop, worker,
+ "btrfs-%s-%d", workers->name,
+ workers->num_workers + 1);
+ if (IS_ERR(worker->task)) {
+ ret = PTR_ERR(worker->task);
+ kfree(worker);
+ goto fail;
}
+ spin_lock_irq(&workers->lock);
+ list_add_tail(&worker->worker_list, &workers->idle_list);
+ worker->idle = 1;
+ workers->num_workers++;
+ workers->num_workers_starting--;
+ WARN_ON(workers->num_workers_starting < 0);
+ spin_unlock_irq(&workers->lock);
+
return 0;
fail:
- btrfs_stop_workers(workers);
+ spin_lock_irq(&workers->lock);
+ workers->num_workers_starting--;
+ spin_unlock_irq(&workers->lock);
return ret;
}
-int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
+int btrfs_start_workers(struct btrfs_workers *workers)
{
spin_lock_irq(&workers->lock);
- workers->num_workers_starting += num_workers;
+ workers->num_workers_starting++;
spin_unlock_irq(&workers->lock);
- return __btrfs_start_workers(workers, num_workers);
+ return __btrfs_start_workers(workers);
}
/*
struct btrfs_worker_thread *worker;
unsigned long flags;
struct list_head *fallback;
+ int ret;
again:
spin_lock_irqsave(&workers->lock, flags);
workers->num_workers_starting++;
spin_unlock_irqrestore(&workers->lock, flags);
/* we're below the limit, start another worker */
- __btrfs_start_workers(workers, 1);
+ ret = __btrfs_start_workers(workers);
+ if (ret)
+ goto fallback;
goto again;
}
}
/*
* places a struct btrfs_work into the pending queue of one of the kthreads
*/
-int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
+void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
{
struct btrfs_worker_thread *worker;
unsigned long flags;
/* don't requeue something already on a list */
if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
- goto out;
+ return;
worker = find_worker(workers);
if (workers->ordered) {
if (wake)
wake_up_process(worker->task);
spin_unlock_irqrestore(&worker->lock, flags);
-
-out:
- return 0;
}
char *name;
};
-int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work);
-int btrfs_start_workers(struct btrfs_workers *workers, int num_workers);
+void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work);
+int btrfs_start_workers(struct btrfs_workers *workers);
int btrfs_stop_workers(struct btrfs_workers *workers);
void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max,
struct btrfs_workers *async_starter);
int btrfs_readpage(struct file *file, struct page *page);
void btrfs_evict_inode(struct inode *inode);
int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc);
-void btrfs_dirty_inode(struct inode *inode, int flags);
+int btrfs_dirty_inode(struct inode *inode);
+int btrfs_update_time(struct file *file);
struct inode *btrfs_alloc_inode(struct super_block *sb);
void btrfs_destroy_inode(struct inode *inode);
int btrfs_drop_inode(struct inode *inode);
* Now if src_rsv == delalloc_block_rsv we'll let it just steal since
* we're accounted for.
*/
- if (!trans->bytes_reserved &&
- src_rsv != &root->fs_info->delalloc_block_rsv) {
+ if (!src_rsv || (!trans->bytes_reserved &&
+ src_rsv != &root->fs_info->delalloc_block_rsv)) {
ret = btrfs_block_rsv_add_noflush(root, dst_rsv, num_bytes);
/*
* Since we're under a transaction reserve_metadata_bytes could
fs_info->endio_meta_write_workers.idle_thresh = 2;
fs_info->readahead_workers.idle_thresh = 2;
- btrfs_start_workers(&fs_info->workers, 1);
- btrfs_start_workers(&fs_info->generic_worker, 1);
- btrfs_start_workers(&fs_info->submit_workers, 1);
- btrfs_start_workers(&fs_info->delalloc_workers, 1);
- btrfs_start_workers(&fs_info->fixup_workers, 1);
- btrfs_start_workers(&fs_info->endio_workers, 1);
- btrfs_start_workers(&fs_info->endio_meta_workers, 1);
- btrfs_start_workers(&fs_info->endio_meta_write_workers, 1);
- btrfs_start_workers(&fs_info->endio_write_workers, 1);
- btrfs_start_workers(&fs_info->endio_freespace_worker, 1);
- btrfs_start_workers(&fs_info->delayed_workers, 1);
- btrfs_start_workers(&fs_info->caching_workers, 1);
- btrfs_start_workers(&fs_info->readahead_workers, 1);
+ /*
+ * btrfs_start_workers can really only fail because of ENOMEM so just
+ * return -ENOMEM if any of these fail.
+ */
+ ret = btrfs_start_workers(&fs_info->workers);
+ ret |= btrfs_start_workers(&fs_info->generic_worker);
+ ret |= btrfs_start_workers(&fs_info->submit_workers);
+ ret |= btrfs_start_workers(&fs_info->delalloc_workers);
+ ret |= btrfs_start_workers(&fs_info->fixup_workers);
+ ret |= btrfs_start_workers(&fs_info->endio_workers);
+ ret |= btrfs_start_workers(&fs_info->endio_meta_workers);
+ ret |= btrfs_start_workers(&fs_info->endio_meta_write_workers);
+ ret |= btrfs_start_workers(&fs_info->endio_write_workers);
+ ret |= btrfs_start_workers(&fs_info->endio_freespace_worker);
+ ret |= btrfs_start_workers(&fs_info->delayed_workers);
+ ret |= btrfs_start_workers(&fs_info->caching_workers);
+ ret |= btrfs_start_workers(&fs_info->readahead_workers);
+ if (ret) {
+ ret = -ENOMEM;
+ goto fail_sb_buffer;
+ }
fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
btrfs_release_path(path);
out:
spin_lock(&block_group->lock);
- if (!ret)
+ if (!ret && dcs == BTRFS_DC_SETUP)
block_group->cache_generation = trans->transid;
block_group->disk_cache_state = dcs;
spin_unlock(&block_group->lock);
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
u64 to_reserve = 0;
+ u64 csum_bytes;
unsigned nr_extents = 0;
+ int extra_reserve = 0;
int flush = 1;
int ret;
+ /* Need to be holding the i_mutex here if we aren't free space cache */
if (btrfs_is_free_space_inode(root, inode))
flush = 0;
+ else
+ WARN_ON(!mutex_is_locked(&inode->i_mutex));
if (flush && btrfs_transaction_in_commit(root->fs_info))
schedule_timeout(1);
BTRFS_I(inode)->outstanding_extents++;
if (BTRFS_I(inode)->outstanding_extents >
- BTRFS_I(inode)->reserved_extents) {
+ BTRFS_I(inode)->reserved_extents)
nr_extents = BTRFS_I(inode)->outstanding_extents -
BTRFS_I(inode)->reserved_extents;
- BTRFS_I(inode)->reserved_extents += nr_extents;
- }
/*
* Add an item to reserve for updating the inode when we complete the
*/
if (!BTRFS_I(inode)->delalloc_meta_reserved) {
nr_extents++;
- BTRFS_I(inode)->delalloc_meta_reserved = 1;
+ extra_reserve = 1;
}
to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
+ csum_bytes = BTRFS_I(inode)->csum_bytes;
spin_unlock(&BTRFS_I(inode)->lock);
ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
spin_lock(&BTRFS_I(inode)->lock);
dropped = drop_outstanding_extent(inode);
- to_free = calc_csum_metadata_size(inode, num_bytes, 0);
- spin_unlock(&BTRFS_I(inode)->lock);
- to_free += btrfs_calc_trans_metadata_size(root, dropped);
-
/*
- * Somebody could have come in and twiddled with the
- * reservation, so if we have to free more than we would have
- * reserved from this reservation go ahead and release those
- * bytes.
+ * If the inodes csum_bytes is the same as the original
+ * csum_bytes then we know we haven't raced with any free()ers
+ * so we can just reduce our inodes csum bytes and carry on.
+ * Otherwise we have to do the normal free thing to account for
+ * the case that the free side didn't free up its reserve
+ * because of this outstanding reservation.
*/
- to_free -= to_reserve;
+ if (BTRFS_I(inode)->csum_bytes == csum_bytes)
+ calc_csum_metadata_size(inode, num_bytes, 0);
+ else
+ to_free = calc_csum_metadata_size(inode, num_bytes, 0);
+ spin_unlock(&BTRFS_I(inode)->lock);
+ if (dropped)
+ to_free += btrfs_calc_trans_metadata_size(root, dropped);
+
if (to_free)
btrfs_block_rsv_release(root, block_rsv, to_free);
return ret;
}
+ spin_lock(&BTRFS_I(inode)->lock);
+ if (extra_reserve) {
+ BTRFS_I(inode)->delalloc_meta_reserved = 1;
+ nr_extents--;
+ }
+ BTRFS_I(inode)->reserved_extents += nr_extents;
+ spin_unlock(&BTRFS_I(inode)->lock);
+
block_rsv_add_bytes(block_rsv, to_reserve, 1);
return 0;
struct btrfs_root *root = orig_root->fs_info->extent_root;
struct btrfs_free_cluster *last_ptr = NULL;
struct btrfs_block_group_cache *block_group = NULL;
+ struct btrfs_block_group_cache *used_block_group;
int empty_cluster = 2 * 1024 * 1024;
int allowed_chunk_alloc = 0;
int done_chunk_alloc = 0;
struct btrfs_space_info *space_info;
- int last_ptr_loop = 0;
int loop = 0;
int index = 0;
int alloc_type = (data & BTRFS_BLOCK_GROUP_DATA) ?
ideal_cache:
block_group = btrfs_lookup_block_group(root->fs_info,
search_start);
+ used_block_group = block_group;
/*
* we don't want to use the block group if it doesn't match our
* allocation bits, or if its not cached.
u64 offset;
int cached;
+ used_block_group = block_group;
btrfs_get_block_group(block_group);
search_start = block_group->key.objectid;
spin_unlock(&block_group->free_space_ctl->tree_lock);
/*
- * Ok we want to try and use the cluster allocator, so lets look
- * there, unless we are on LOOP_NO_EMPTY_SIZE, since we will
- * have tried the cluster allocator plenty of times at this
- * point and not have found anything, so we are likely way too
- * fragmented for the clustering stuff to find anything, so lets
- * just skip it and let the allocator find whatever block it can
- * find
+ * Ok we want to try and use the cluster allocator, so
+ * lets look there
*/
- if (last_ptr && loop < LOOP_NO_EMPTY_SIZE) {
+ if (last_ptr) {
/*
* the refill lock keeps out other
* people trying to start a new cluster
*/
spin_lock(&last_ptr->refill_lock);
- if (!last_ptr->block_group ||
- last_ptr->block_group->ro ||
- !block_group_bits(last_ptr->block_group, data))
+ used_block_group = last_ptr->block_group;
+ if (used_block_group != block_group &&
+ (!used_block_group ||
+ used_block_group->ro ||
+ !block_group_bits(used_block_group, data))) {
+ used_block_group = block_group;
goto refill_cluster;
+ }
+
+ if (used_block_group != block_group)
+ btrfs_get_block_group(used_block_group);
- offset = btrfs_alloc_from_cluster(block_group, last_ptr,
- num_bytes, search_start);
+ offset = btrfs_alloc_from_cluster(used_block_group,
+ last_ptr, num_bytes, used_block_group->key.objectid);
if (offset) {
/* we have a block, we're done */
spin_unlock(&last_ptr->refill_lock);
goto checks;
}
- spin_lock(&last_ptr->lock);
- /*
- * whoops, this cluster doesn't actually point to
- * this block group. Get a ref on the block
- * group is does point to and try again
- */
- if (!last_ptr_loop && last_ptr->block_group &&
- last_ptr->block_group != block_group &&
- index <=
- get_block_group_index(last_ptr->block_group)) {
-
- btrfs_put_block_group(block_group);
- block_group = last_ptr->block_group;
- btrfs_get_block_group(block_group);
- spin_unlock(&last_ptr->lock);
- spin_unlock(&last_ptr->refill_lock);
-
- last_ptr_loop = 1;
- search_start = block_group->key.objectid;
- /*
- * we know this block group is properly
- * in the list because
- * btrfs_remove_block_group, drops the
- * cluster before it removes the block
- * group from the list
- */
- goto have_block_group;
+ WARN_ON(last_ptr->block_group != used_block_group);
+ if (used_block_group != block_group) {
+ btrfs_put_block_group(used_block_group);
+ used_block_group = block_group;
}
- spin_unlock(&last_ptr->lock);
refill_cluster:
+ BUG_ON(used_block_group != block_group);
+ /* If we are on LOOP_NO_EMPTY_SIZE, we can't
+ * set up a new clusters, so lets just skip it
+ * and let the allocator find whatever block
+ * it can find. If we reach this point, we
+ * will have tried the cluster allocator
+ * plenty of times and not have found
+ * anything, so we are likely way too
+ * fragmented for the clustering stuff to find
+ * anything. */
+ if (loop >= LOOP_NO_EMPTY_SIZE) {
+ spin_unlock(&last_ptr->refill_lock);
+ goto unclustered_alloc;
+ }
+
/*
* this cluster didn't work out, free it and
* start over
*/
btrfs_return_cluster_to_free_space(NULL, last_ptr);
- last_ptr_loop = 0;
-
/* allocate a cluster in this block group */
ret = btrfs_find_space_cluster(trans, root,
block_group, last_ptr,
goto loop;
}
+unclustered_alloc:
offset = btrfs_find_space_for_alloc(block_group, search_start,
num_bytes, empty_size);
/*
search_start = stripe_align(root, offset);
/* move on to the next group */
if (search_start + num_bytes >= search_end) {
- btrfs_add_free_space(block_group, offset, num_bytes);
+ btrfs_add_free_space(used_block_group, offset, num_bytes);
goto loop;
}
/* move on to the next group */
if (search_start + num_bytes >
- block_group->key.objectid + block_group->key.offset) {
- btrfs_add_free_space(block_group, offset, num_bytes);
+ used_block_group->key.objectid + used_block_group->key.offset) {
+ btrfs_add_free_space(used_block_group, offset, num_bytes);
goto loop;
}
ins->offset = num_bytes;
if (offset < search_start)
- btrfs_add_free_space(block_group, offset,
+ btrfs_add_free_space(used_block_group, offset,
search_start - offset);
BUG_ON(offset > search_start);
- ret = btrfs_update_reserved_bytes(block_group, num_bytes,
+ ret = btrfs_update_reserved_bytes(used_block_group, num_bytes,
alloc_type);
if (ret == -EAGAIN) {
- btrfs_add_free_space(block_group, offset, num_bytes);
+ btrfs_add_free_space(used_block_group, offset, num_bytes);
goto loop;
}
ins->offset = num_bytes;
if (offset < search_start)
- btrfs_add_free_space(block_group, offset,
+ btrfs_add_free_space(used_block_group, offset,
search_start - offset);
BUG_ON(offset > search_start);
+ if (used_block_group != block_group)
+ btrfs_put_block_group(used_block_group);
btrfs_put_block_group(block_group);
break;
loop:
failed_cluster_refill = false;
failed_alloc = false;
BUG_ON(index != get_block_group_index(block_group));
+ if (used_block_group != block_group)
+ btrfs_put_block_group(used_block_group);
btrfs_put_block_group(block_group);
}
up_read(&space_info->groups_sem);
node = tree_search(tree, start);
if (!node) {
prealloc = alloc_extent_state_atomic(prealloc);
- if (!prealloc)
- return -ENOMEM;
+ if (!prealloc) {
+ err = -ENOMEM;
+ goto out;
+ }
err = insert_state(tree, prealloc, start, end, &bits);
prealloc = NULL;
BUG_ON(err == -EEXIST);
*/
if (state->start < start) {
prealloc = alloc_extent_state_atomic(prealloc);
- if (!prealloc)
- return -ENOMEM;
+ if (!prealloc) {
+ err = -ENOMEM;
+ goto out;
+ }
err = split_state(tree, state, prealloc, start);
BUG_ON(err == -EEXIST);
prealloc = NULL;
this_end = last_start - 1;
prealloc = alloc_extent_state_atomic(prealloc);
- if (!prealloc)
- return -ENOMEM;
+ if (!prealloc) {
+ err = -ENOMEM;
+ goto out;
+ }
/*
* Avoid to free 'prealloc' if it can be merged with
*/
if (state->start <= end && state->end > end) {
prealloc = alloc_extent_state_atomic(prealloc);
- if (!prealloc)
- return -ENOMEM;
+ if (!prealloc) {
+ err = -ENOMEM;
+ goto out;
+ }
err = split_state(tree, state, prealloc, end + 1);
BUG_ON(err == -EEXIST);
nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) /
PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
(sizeof(struct page *)));
+ nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
+ nrptrs = max(nrptrs, 8);
pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
if (!pages)
return -ENOMEM;
goto out;
}
- file_update_time(file);
+ err = btrfs_update_time(file);
+ if (err) {
+ mutex_unlock(&inode->i_mutex);
+ goto out;
+ }
BTRFS_I(inode)->sequence++;
start_pos = round_down(pos, root->sectorsize);
#include <linux/falloc.h>
#include <linux/slab.h>
#include <linux/ratelimit.h>
+#include <linux/mount.h>
#include "compat.h"
#include "ctree.h"
#include "disk-io.h"
/* insert an orphan item to track this unlinked/truncated file */
if (insert >= 1) {
ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
- BUG_ON(ret);
+ BUG_ON(ret && ret != -EEXIST);
}
/* insert an orphan item to track subvolume contains orphan files */
if (ret && ret != -ESTALE)
goto out;
+ if (ret == -ESTALE && root == root->fs_info->tree_root) {
+ struct btrfs_root *dead_root;
+ struct btrfs_fs_info *fs_info = root->fs_info;
+ int is_dead_root = 0;
+
+ /*
+ * this is an orphan in the tree root. Currently these
+ * could come from 2 sources:
+ * a) a snapshot deletion in progress
+ * b) a free space cache inode
+ * We need to distinguish those two, as the snapshot
+ * orphan must not get deleted.
+ * find_dead_roots already ran before us, so if this
+ * is a snapshot deletion, we should find the root
+ * in the dead_roots list
+ */
+ spin_lock(&fs_info->trans_lock);
+ list_for_each_entry(dead_root, &fs_info->dead_roots,
+ root_list) {
+ if (dead_root->root_key.objectid ==
+ found_key.objectid) {
+ is_dead_root = 1;
+ break;
+ }
+ }
+ spin_unlock(&fs_info->trans_lock);
+ if (is_dead_root) {
+ /* prevent this orphan from being found again */
+ key.offset = found_key.objectid - 1;
+ continue;
+ }
+ }
/*
* Inode is already gone but the orphan item is still there,
* kill the orphan item.
continue;
}
nr_truncate++;
+ /*
+ * Need to hold the imutex for reservation purposes, not
+ * a huge deal here but I have a WARN_ON in
+ * btrfs_delalloc_reserve_space to catch offenders.
+ */
+ mutex_lock(&inode->i_mutex);
ret = btrfs_truncate(inode);
+ mutex_unlock(&inode->i_mutex);
} else {
nr_unlink++;
}
u64 hint_byte = 0;
hole_size = last_byte - cur_offset;
- trans = btrfs_start_transaction(root, 2);
+ trans = btrfs_start_transaction(root, 3);
if (IS_ERR(trans)) {
err = PTR_ERR(trans);
break;
cur_offset + hole_size,
&hint_byte, 1);
if (err) {
+ btrfs_update_inode(trans, root, inode);
btrfs_end_transaction(trans, root);
break;
}
0, hole_size, 0, hole_size,
0, 0, 0);
if (err) {
+ btrfs_update_inode(trans, root, inode);
btrfs_end_transaction(trans, root);
break;
}
btrfs_drop_extent_cache(inode, hole_start,
last_byte - 1, 0);
+ btrfs_update_inode(trans, root, inode);
btrfs_end_transaction(trans, root);
}
free_extent_map(em);
static int btrfs_setsize(struct inode *inode, loff_t newsize)
{
+ struct btrfs_root *root = BTRFS_I(inode)->root;
+ struct btrfs_trans_handle *trans;
loff_t oldsize = i_size_read(inode);
int ret;
return 0;
if (newsize > oldsize) {
- i_size_write(inode, newsize);
- btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
truncate_pagecache(inode, oldsize, newsize);
ret = btrfs_cont_expand(inode, oldsize, newsize);
- if (ret) {
- btrfs_setsize(inode, oldsize);
+ if (ret)
return ret;
- }
- mark_inode_dirty(inode);
+ trans = btrfs_start_transaction(root, 1);
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
+
+ i_size_write(inode, newsize);
+ btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
+ ret = btrfs_update_inode(trans, root, inode);
+ btrfs_end_transaction_throttle(trans, root);
} else {
/*
if (attr->ia_valid) {
setattr_copy(inode, attr);
- mark_inode_dirty(inode);
+ err = btrfs_dirty_inode(inode);
- if (attr->ia_valid & ATTR_MODE)
+ if (!err && attr->ia_valid & ATTR_MODE)
err = btrfs_acl_chmod(inode);
}
* FIXME, needs more benchmarking...there are no reasons other than performance
* to keep or drop this code.
*/
-void btrfs_dirty_inode(struct inode *inode, int flags)
+int btrfs_dirty_inode(struct inode *inode)
{
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_trans_handle *trans;
int ret;
if (BTRFS_I(inode)->dummy_inode)
- return;
+ return 0;
trans = btrfs_join_transaction(root);
- BUG_ON(IS_ERR(trans));
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
ret = btrfs_update_inode(trans, root, inode);
if (ret && ret == -ENOSPC) {
/* whoops, lets try again with the full transaction */
btrfs_end_transaction(trans, root);
trans = btrfs_start_transaction(root, 1);
- if (IS_ERR(trans)) {
- printk_ratelimited(KERN_ERR "btrfs: fail to "
- "dirty inode %llu error %ld\n",
- (unsigned long long)btrfs_ino(inode),
- PTR_ERR(trans));
- return;
- }
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
ret = btrfs_update_inode(trans, root, inode);
- if (ret) {
- printk_ratelimited(KERN_ERR "btrfs: fail to "
- "dirty inode %llu error %d\n",
- (unsigned long long)btrfs_ino(inode),
- ret);
- }
}
btrfs_end_transaction(trans, root);
if (BTRFS_I(inode)->delayed_node)
btrfs_balance_delayed_items(root);
+
+ return ret;
+}
+
+/*
+ * This is a copy of file_update_time. We need this so we can return error on
+ * ENOSPC for updating the inode in the case of file write and mmap writes.
+ */
+int btrfs_update_time(struct file *file)
+{
+ struct inode *inode = file->f_path.dentry->d_inode;
+ struct timespec now;
+ int ret;
+ enum { S_MTIME = 1, S_CTIME = 2, S_VERSION = 4 } sync_it = 0;
+
+ /* First try to exhaust all avenues to not sync */
+ if (IS_NOCMTIME(inode))
+ return 0;
+
+ now = current_fs_time(inode->i_sb);
+ if (!timespec_equal(&inode->i_mtime, &now))
+ sync_it = S_MTIME;
+
+ if (!timespec_equal(&inode->i_ctime, &now))
+ sync_it |= S_CTIME;
+
+ if (IS_I_VERSION(inode))
+ sync_it |= S_VERSION;
+
+ if (!sync_it)
+ return 0;
+
+ /* Finally allowed to write? Takes lock. */
+ if (mnt_want_write_file(file))
+ return 0;
+
+ /* Only change inode inside the lock region */
+ if (sync_it & S_VERSION)
+ inode_inc_iversion(inode);
+ if (sync_it & S_CTIME)
+ inode->i_ctime = now;
+ if (sync_it & S_MTIME)
+ inode->i_mtime = now;
+ ret = btrfs_dirty_inode(inode);
+ if (!ret)
+ mark_inode_dirty_sync(inode);
+ mnt_drop_write(file->f_path.mnt);
+ return ret;
}
/*
goto out_unlock;
}
+ /*
+ * If the active LSM wants to access the inode during
+ * d_instantiate it needs these. Smack checks to see
+ * if the filesystem supports xattrs by looking at the
+ * ops vector.
+ */
+
+ inode->i_op = &btrfs_special_inode_operations;
err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
if (err)
drop_inode = 1;
else {
- inode->i_op = &btrfs_special_inode_operations;
init_special_inode(inode, inode->i_mode, rdev);
btrfs_update_inode(trans, root, inode);
}
goto out_unlock;
}
+ /*
+ * If the active LSM wants to access the inode during
+ * d_instantiate it needs these. Smack checks to see
+ * if the filesystem supports xattrs by looking at the
+ * ops vector.
+ */
+ inode->i_fop = &btrfs_file_operations;
+ inode->i_op = &btrfs_file_inode_operations;
+
err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
if (err)
drop_inode = 1;
else {
inode->i_mapping->a_ops = &btrfs_aops;
inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
- inode->i_fop = &btrfs_file_operations;
- inode->i_op = &btrfs_file_inode_operations;
BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
}
out_unlock:
u64 page_start;
u64 page_end;
+ /* Need this to keep space reservations serialized */
+ mutex_lock(&inode->i_mutex);
ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
+ mutex_unlock(&inode->i_mutex);
+ if (!ret)
+ ret = btrfs_update_time(vma->vm_file);
if (ret) {
if (ret == -ENOMEM)
ret = VM_FAULT_OOM;
/* Just need the 1 for updating the inode */
trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans)) {
- err = PTR_ERR(trans);
- goto out;
+ ret = err = PTR_ERR(trans);
+ trans = NULL;
+ break;
}
}
goto out_unlock;
}
+ /*
+ * If the active LSM wants to access the inode during
+ * d_instantiate it needs these. Smack checks to see
+ * if the filesystem supports xattrs by looking at the
+ * ops vector.
+ */
+ inode->i_fop = &btrfs_file_operations;
+ inode->i_op = &btrfs_file_inode_operations;
+
err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
if (err)
drop_inode = 1;
else {
inode->i_mapping->a_ops = &btrfs_aops;
inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
- inode->i_fop = &btrfs_file_operations;
- inode->i_op = &btrfs_file_inode_operations;
BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
}
if (drop_inode)
.follow_link = page_follow_link_light,
.put_link = page_put_link,
.getattr = btrfs_getattr,
+ .setattr = btrfs_setattr,
.permission = btrfs_permission,
.setxattr = btrfs_setxattr,
.getxattr = btrfs_getxattr,
trans = btrfs_join_transaction(root);
BUG_ON(IS_ERR(trans));
+ btrfs_update_iflags(inode);
+ inode->i_ctime = CURRENT_TIME;
ret = btrfs_update_inode(trans, root, inode);
BUG_ON(ret);
- btrfs_update_iflags(inode);
- inode->i_ctime = CURRENT_TIME;
btrfs_end_transaction(trans, root);
mnt_drop_write(file->f_path.mnt);
return 0;
file_end = (isize - 1) >> PAGE_CACHE_SHIFT;
+ mutex_lock(&inode->i_mutex);
ret = btrfs_delalloc_reserve_space(inode,
num_pages << PAGE_CACHE_SHIFT);
+ mutex_unlock(&inode->i_mutex);
if (ret)
return ret;
again:
index = (cluster->start - offset) >> PAGE_CACHE_SHIFT;
last_index = (cluster->end - offset) >> PAGE_CACHE_SHIFT;
while (index <= last_index) {
+ mutex_lock(&inode->i_mutex);
ret = btrfs_delalloc_reserve_metadata(inode, PAGE_CACHE_SIZE);
+ mutex_unlock(&inode->i_mutex);
if (ret)
goto out;
static noinline_for_stack int scrub_workers_get(struct btrfs_root *root)
{
struct btrfs_fs_info *fs_info = root->fs_info;
+ int ret = 0;
mutex_lock(&fs_info->scrub_lock);
if (fs_info->scrub_workers_refcnt == 0) {
btrfs_init_workers(&fs_info->scrub_workers, "scrub",
fs_info->thread_pool_size, &fs_info->generic_worker);
fs_info->scrub_workers.idle_thresh = 4;
- btrfs_start_workers(&fs_info->scrub_workers, 1);
+ ret = btrfs_start_workers(&fs_info->scrub_workers);
+ if (ret)
+ goto out;
}
++fs_info->scrub_workers_refcnt;
+out:
mutex_unlock(&fs_info->scrub_lock);
- return 0;
+ return ret;
}
static noinline_for_stack void scrub_workers_put(struct btrfs_root *root)
#include <linux/slab.h>
#include <linux/cleancache.h>
#include <linux/mnt_namespace.h>
+#include <linux/ratelimit.h>
#include "compat.h"
#include "delayed-inode.h"
#include "ctree.h"
u64 avail_space;
u64 used_space;
u64 min_stripe_size;
- int min_stripes = 1;
+ int min_stripes = 1, num_stripes = 1;
int i = 0, nr_devices;
int ret;
/* calc min stripe number for data space alloction */
type = btrfs_get_alloc_profile(root, 1);
- if (type & BTRFS_BLOCK_GROUP_RAID0)
+ if (type & BTRFS_BLOCK_GROUP_RAID0) {
min_stripes = 2;
- else if (type & BTRFS_BLOCK_GROUP_RAID1)
+ num_stripes = nr_devices;
+ } else if (type & BTRFS_BLOCK_GROUP_RAID1) {
min_stripes = 2;
- else if (type & BTRFS_BLOCK_GROUP_RAID10)
+ num_stripes = 2;
+ } else if (type & BTRFS_BLOCK_GROUP_RAID10) {
min_stripes = 4;
+ num_stripes = 4;
+ }
if (type & BTRFS_BLOCK_GROUP_DUP)
min_stripe_size = 2 * BTRFS_STRIPE_LEN;
i = nr_devices - 1;
avail_space = 0;
while (nr_devices >= min_stripes) {
+ if (num_stripes > nr_devices)
+ num_stripes = nr_devices;
+
if (devices_info[i].max_avail >= min_stripe_size) {
int j;
u64 alloc_size;
- avail_space += devices_info[i].max_avail * min_stripes;
+ avail_space += devices_info[i].max_avail * num_stripes;
alloc_size = devices_info[i].max_avail;
- for (j = i + 1 - min_stripes; j <= i; j++)
+ for (j = i + 1 - num_stripes; j <= i; j++)
devices_info[j].max_avail -= alloc_size;
}
i--;
return 0;
}
+static void btrfs_fs_dirty_inode(struct inode *inode, int flags)
+{
+ int ret;
+
+ ret = btrfs_dirty_inode(inode);
+ if (ret)
+ printk_ratelimited(KERN_ERR "btrfs: fail to dirty inode %Lu "
+ "error %d\n", btrfs_ino(inode), ret);
+}
+
static const struct super_operations btrfs_super_ops = {
.drop_inode = btrfs_drop_inode,
.evict_inode = btrfs_evict_inode,
.sync_fs = btrfs_sync_fs,
.show_options = btrfs_show_options,
.write_inode = btrfs_write_inode,
- .dirty_inode = btrfs_dirty_inode,
+ .dirty_inode = btrfs_fs_dirty_inode,
.alloc_inode = btrfs_alloc_inode,
.destroy_inode = btrfs_destroy_inode,
.statfs = btrfs_statfs,
btrfs_requeue_work(&device->work);
goto done;
}
+ /* unplug every 64 requests just for good measure */
+ if (batch_run % 64 == 0) {
+ blk_finish_plug(&plug);
+ blk_start_plug(&plug);
+ sync_pending = 0;
+ }
}
cond_resched();
if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
return -EINVAL;
- bdev = blkdev_get_by_path(device_path, FMODE_EXCL,
+ bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
root->fs_info->bdev_holder);
if (IS_ERR(bdev))
return PTR_ERR(bdev);
*/
if (atomic_read(&bbio->error) > bbio->max_errors) {
err = -EIO;
- } else if (err) {
+ } else {
/*
* this bio is actually up to date, we didn't
* go over the max number of errors
snapc = ceph_get_snap_context(ci->i_snap_realm->cached_context);
/* dirty the head */
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if (ci->i_head_snapc == NULL)
ci->i_head_snapc = ceph_get_snap_context(snapc);
++ci->i_wrbuffer_ref_head;
ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1,
ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
snapc, snapc->seq, snapc->num_snaps);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
/* now adjust page */
spin_lock_irq(&mapping->tree_lock);
struct ceph_snap_context *snapc = NULL;
struct ceph_cap_snap *capsnap = NULL;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap,
capsnap->context, capsnap->dirty_pages);
dout(" head snapc %p has %d dirty pages\n",
snapc, ci->i_wrbuffer_ref_head);
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
return snapc;
}
/*
* Find ceph_cap for given mds, if any.
*
- * Called with i_lock held.
+ * Called with i_ceph_lock held.
*/
static struct ceph_cap *__get_cap_for_mds(struct ceph_inode_info *ci, int mds)
{
{
struct ceph_cap *cap;
- spin_lock(&ci->vfs_inode.i_lock);
+ spin_lock(&ci->i_ceph_lock);
cap = __get_cap_for_mds(ci, mds);
- spin_unlock(&ci->vfs_inode.i_lock);
+ spin_unlock(&ci->i_ceph_lock);
return cap;
}
int ceph_get_cap_mds(struct inode *inode)
{
+ struct ceph_inode_info *ci = ceph_inode(inode);
int mds;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
mds = __ceph_get_cap_mds(ceph_inode(inode));
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
return mds;
}
/*
- * Called under i_lock.
+ * Called under i_ceph_lock.
*/
static void __insert_cap_node(struct ceph_inode_info *ci,
struct ceph_cap *new)
*
* If I_FLUSH is set, leave the inode at the front of the list.
*
- * Caller holds i_lock
+ * Caller holds i_ceph_lock
* -> we take mdsc->cap_delay_lock
*/
static void __cap_delay_requeue(struct ceph_mds_client *mdsc,
/*
* Cancel delayed work on cap.
*
- * Caller must hold i_lock.
+ * Caller must hold i_ceph_lock.
*/
static void __cap_delay_cancel(struct ceph_mds_client *mdsc,
struct ceph_inode_info *ci)
wanted |= ceph_caps_for_mode(fmode);
retry:
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
cap = __get_cap_for_mds(ci, mds);
if (!cap) {
if (new_cap) {
cap = new_cap;
new_cap = NULL;
} else {
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
new_cap = get_cap(mdsc, caps_reservation);
if (new_cap == NULL)
return -ENOMEM;
if (fmode >= 0)
__ceph_get_fmode(ci, fmode);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
wake_up_all(&ci->i_cap_wq);
return 0;
}
struct rb_node *p;
int ret = 0;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
cap = rb_entry(p, struct ceph_cap, ci_node);
if (__cap_is_valid(cap) &&
break;
}
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
dout("ceph_caps_revoking %p %s = %d\n", inode,
ceph_cap_string(mask), ret);
return ret;
}
/*
- * called under i_lock
+ * called under i_ceph_lock
*/
static int __ceph_is_any_caps(struct ceph_inode_info *ci)
{
/*
* Remove a cap. Take steps to deal with a racing iterate_session_caps.
*
- * caller should hold i_lock.
+ * caller should hold i_ceph_lock.
* caller will not hold session s_mutex if called from destroy_inode.
*/
void __ceph_remove_cap(struct ceph_cap *cap)
/*
* Queue cap releases when an inode is dropped from our cache. Since
- * inode is about to be destroyed, there is no need for i_lock.
+ * inode is about to be destroyed, there is no need for i_ceph_lock.
*/
void ceph_queue_caps_release(struct inode *inode)
{
/*
* Send a cap msg on the given inode. Update our caps state, then
- * drop i_lock and send the message.
+ * drop i_ceph_lock and send the message.
*
* Make note of max_size reported/requested from mds, revoked caps
* that have now been implemented.
* Return non-zero if delayed release, or we experienced an error
* such that the caller should requeue + retry later.
*
- * called with i_lock, then drops it.
+ * called with i_ceph_lock, then drops it.
* caller should hold snap_rwsem (read), s_mutex.
*/
static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
int op, int used, int want, int retain, int flushing,
unsigned *pflush_tid)
- __releases(cap->ci->vfs_inode->i_lock)
+ __releases(cap->ci->i_ceph_lock)
{
struct ceph_inode_info *ci = cap->ci;
struct inode *inode = &ci->vfs_inode;
xattr_version = ci->i_xattrs.version;
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
ret = send_cap_msg(session, ceph_vino(inode).ino, cap_id,
op, keep, want, flushing, seq, flush_tid, issue_seq, mseq,
* Unless @again is true, skip cap_snaps that were already sent to
* the MDS (i.e., during this session).
*
- * Called under i_lock. Takes s_mutex as needed.
+ * Called under i_ceph_lock. Takes s_mutex as needed.
*/
void __ceph_flush_snaps(struct ceph_inode_info *ci,
struct ceph_mds_session **psession,
int again)
- __releases(ci->vfs_inode->i_lock)
- __acquires(ci->vfs_inode->i_lock)
+ __releases(ci->i_ceph_lock)
+ __acquires(ci->i_ceph_lock)
{
struct inode *inode = &ci->vfs_inode;
int mds;
session = NULL;
}
if (!session) {
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
mutex_lock(&mdsc->mutex);
session = __ceph_lookup_mds_session(mdsc, mds);
mutex_unlock(&mdsc->mutex);
* deletion or migration. retry, and we'll
* get a better @mds value next time.
*/
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
goto retry;
}
list_del_init(&capsnap->flushing_item);
list_add_tail(&capsnap->flushing_item,
&session->s_cap_snaps_flushing);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
dout("flush_snaps %p cap_snap %p follows %lld tid %llu\n",
inode, capsnap, capsnap->follows, capsnap->flush_tid);
next_follows = capsnap->follows + 1;
ceph_put_cap_snap(capsnap);
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
goto retry;
}
static void ceph_flush_snaps(struct ceph_inode_info *ci)
{
- struct inode *inode = &ci->vfs_inode;
-
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
__ceph_flush_snaps(ci, NULL, 0);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
}
/*
* Add dirty inode to the flushing list. Assigned a seq number so we
* can wait for caps to flush without starving.
*
- * Called under i_lock.
+ * Called under i_ceph_lock.
*/
static int __mark_caps_flushing(struct inode *inode,
struct ceph_mds_session *session)
struct ceph_inode_info *ci = ceph_inode(inode);
u32 invalidating_gen = ci->i_rdcache_gen;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
invalidate_mapping_pages(&inode->i_data, 0, -1);
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if (inode->i_data.nrpages == 0 &&
invalidating_gen == ci->i_rdcache_gen) {
if (mdsc->stopping)
is_delayed = 1;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if (ci->i_ceph_flags & CEPH_I_FLUSH)
flags |= CHECK_CAPS_FLUSH;
__ceph_flush_snaps(ci, &session, 0);
goto retry_locked;
retry:
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
retry_locked:
file_wanted = __ceph_caps_file_wanted(ci);
used = __ceph_caps_used(ci);
if (mutex_trylock(&session->s_mutex) == 0) {
dout("inverting session/ino locks on %p\n",
session);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
if (took_snap_rwsem) {
up_read(&mdsc->snap_rwsem);
took_snap_rwsem = 0;
if (down_read_trylock(&mdsc->snap_rwsem) == 0) {
dout("inverting snap/in locks on %p\n",
inode);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
down_read(&mdsc->snap_rwsem);
took_snap_rwsem = 1;
goto retry;
mds = cap->mds; /* remember mds, so we don't repeat */
sent++;
- /* __send_cap drops i_lock */
+ /* __send_cap drops i_ceph_lock */
delayed += __send_cap(mdsc, cap, CEPH_CAP_OP_UPDATE, used, want,
retain, flushing, NULL);
- goto retry; /* retake i_lock and restart our cap scan. */
+ goto retry; /* retake i_ceph_lock and restart our cap scan. */
}
/*
else if (!is_delayed || force_requeue)
__cap_delay_requeue(mdsc, ci);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
if (queue_invalidate)
ceph_queue_invalidate(inode);
int flushing = 0;
retry:
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
dout("try_flush_caps skipping %p I_NOFLUSH set\n", inode);
goto out;
int delayed;
if (!session) {
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
session = cap->session;
mutex_lock(&session->s_mutex);
goto retry;
flushing = __mark_caps_flushing(inode, session);
- /* __send_cap drops i_lock */
+ /* __send_cap drops i_ceph_lock */
delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH, used, want,
cap->issued | cap->implemented, flushing,
flush_tid);
if (!delayed)
goto out_unlocked;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
__cap_delay_requeue(mdsc, ci);
}
out:
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
out_unlocked:
if (session && unlock_session)
mutex_unlock(&session->s_mutex);
struct ceph_inode_info *ci = ceph_inode(inode);
int i, ret = 1;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
for (i = 0; i < CEPH_CAP_BITS; i++)
if ((ci->i_flushing_caps & (1 << i)) &&
ci->i_cap_flush_tid[i] <= tid) {
ret = 0;
break;
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
return ret;
}
struct ceph_mds_client *mdsc =
ceph_sb_to_client(inode->i_sb)->mdsc;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if (__ceph_caps_dirty(ci))
__cap_delay_requeue_front(mdsc, ci);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
}
return err;
}
struct inode *inode = &ci->vfs_inode;
struct ceph_cap *cap;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
cap = ci->i_auth_cap;
if (cap && cap->session == session) {
dout("kick_flushing_caps %p cap %p capsnap %p\n", inode,
pr_err("%p auth cap %p not mds%d ???\n", inode,
cap, session->s_mds);
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
}
}
struct ceph_cap *cap;
int delayed = 0;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
cap = ci->i_auth_cap;
if (cap && cap->session == session) {
dout("kick_flushing_caps %p cap %p %s\n", inode,
cap->issued | cap->implemented,
ci->i_flushing_caps, NULL);
if (delayed) {
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
__cap_delay_requeue(mdsc, ci);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
}
} else {
pr_err("%p auth cap %p not mds%d ???\n", inode,
cap, session->s_mds);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
}
}
}
struct ceph_cap *cap;
int delayed = 0;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
cap = ci->i_auth_cap;
dout("kick_flushing_inode_caps %p flushing %s flush_seq %lld\n", inode,
ceph_cap_string(ci->i_flushing_caps), ci->i_cap_flush_seq);
cap->issued | cap->implemented,
ci->i_flushing_caps, NULL);
if (delayed) {
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
__cap_delay_requeue(mdsc, ci);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
}
} else {
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
}
}
* Take references to capabilities we hold, so that we don't release
* them to the MDS prematurely.
*
- * Protected by i_lock.
+ * Protected by i_ceph_lock.
*/
static void __take_cap_refs(struct ceph_inode_info *ci, int got)
{
dout("get_cap_refs %p need %s want %s\n", inode,
ceph_cap_string(need), ceph_cap_string(want));
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
/* make sure file is actually open */
file_wanted = __ceph_caps_file_wanted(ci);
ceph_cap_string(have), ceph_cap_string(need));
}
out:
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
dout("get_cap_refs %p ret %d got %s\n", inode,
ret, ceph_cap_string(*got));
return ret;
int check = 0;
/* do we need to explicitly request a larger max_size? */
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if ((endoff >= ci->i_max_size ||
endoff > (inode->i_size << 1)) &&
endoff > ci->i_wanted_max_size) {
ci->i_wanted_max_size = endoff;
check = 1;
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
if (check)
ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
}
*/
void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps)
{
- spin_lock(&ci->vfs_inode.i_lock);
+ spin_lock(&ci->i_ceph_lock);
__take_cap_refs(ci, caps);
- spin_unlock(&ci->vfs_inode.i_lock);
+ spin_unlock(&ci->i_ceph_lock);
}
/*
int last = 0, put = 0, flushsnaps = 0, wake = 0;
struct ceph_cap_snap *capsnap;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if (had & CEPH_CAP_PIN)
--ci->i_pin_ref;
if (had & CEPH_CAP_FILE_RD)
}
}
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
dout("put_cap_refs %p had %s%s%s\n", inode, ceph_cap_string(had),
last ? " last" : "", put ? " put" : "");
int found = 0;
struct ceph_cap_snap *capsnap = NULL;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
ci->i_wrbuffer_ref -= nr;
last = !ci->i_wrbuffer_ref;
}
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
if (last) {
ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
* Handle a cap GRANT message from the MDS. (Note that a GRANT may
* actually be a revocation if it specifies a smaller cap set.)
*
- * caller holds s_mutex and i_lock, we drop both.
+ * caller holds s_mutex and i_ceph_lock, we drop both.
*
* return value:
* 0 - ok
struct ceph_mds_session *session,
struct ceph_cap *cap,
struct ceph_buffer *xattr_buf)
- __releases(inode->i_lock)
+ __releases(ci->i_ceph_lock)
{
struct ceph_inode_info *ci = ceph_inode(inode);
int mds = session->s_mds;
}
BUG_ON(cap->issued & ~cap->implemented);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
if (writeback)
/*
* queue inode for writeback: we can't actually call
struct ceph_mds_caps *m,
struct ceph_mds_session *session,
struct ceph_cap *cap)
- __releases(inode->i_lock)
+ __releases(ci->i_ceph_lock)
{
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
wake_up_all(&ci->i_cap_wq);
out:
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
if (drop)
iput(inode);
}
dout("handle_cap_flushsnap_ack inode %p ci %p mds%d follows %lld\n",
inode, ci, session->s_mds, follows);
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
if (capsnap->follows == follows) {
if (capsnap->flush_tid != flush_tid) {
capsnap, capsnap->follows);
}
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
if (drop)
iput(inode);
}
static void handle_cap_trunc(struct inode *inode,
struct ceph_mds_caps *trunc,
struct ceph_mds_session *session)
- __releases(inode->i_lock)
+ __releases(ci->i_ceph_lock)
{
struct ceph_inode_info *ci = ceph_inode(inode);
int mds = session->s_mds;
inode, mds, seq, truncate_size, truncate_seq);
queue_trunc = ceph_fill_file_size(inode, issued,
truncate_seq, truncate_size, size);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
if (queue_trunc)
ceph_queue_vmtruncate(inode);
dout("handle_cap_export inode %p ci %p mds%d mseq %d\n",
inode, ci, mds, mseq);
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
/* make sure we haven't seen a higher mseq */
for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
}
/* else, we already released it */
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
}
/*
up_read(&mdsc->snap_rwsem);
/* make sure we re-request max_size, if necessary */
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
ci->i_requested_max_size = 0;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
}
/*
struct ceph_mds_client *mdsc = session->s_mdsc;
struct super_block *sb = mdsc->fsc->sb;
struct inode *inode;
+ struct ceph_inode_info *ci;
struct ceph_cap *cap;
struct ceph_mds_caps *h;
int mds = session->s_mds;
/* lookup ino */
inode = ceph_find_inode(sb, vino);
+ ci = ceph_inode(inode);
dout(" op %s ino %llx.%llx inode %p\n", ceph_cap_op_name(op), vino.ino,
vino.snap, inode);
if (!inode) {
}
/* the rest require a cap */
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
cap = __get_cap_for_mds(ceph_inode(inode), mds);
if (!cap) {
dout(" no cap on %p ino %llx.%llx from mds%d\n",
inode, ceph_ino(inode), ceph_snap(inode), mds);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
goto flush_cap_releases;
}
- /* note that each of these drops i_lock for us */
+ /* note that each of these drops i_ceph_lock for us */
switch (op) {
case CEPH_CAP_OP_REVOKE:
case CEPH_CAP_OP_GRANT:
break;
default:
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
pr_err("ceph_handle_caps: unknown cap op %d %s\n", op,
ceph_cap_op_name(op));
}
struct inode *inode = &ci->vfs_inode;
int last = 0;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
dout("put_fmode %p fmode %d %d -> %d\n", inode, fmode,
ci->i_nr_by_mode[fmode], ci->i_nr_by_mode[fmode]-1);
BUG_ON(ci->i_nr_by_mode[fmode] == 0);
if (--ci->i_nr_by_mode[fmode] == 0)
last++;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
if (last && ci->i_vino.snap == CEPH_NOSNAP)
ceph_check_caps(ci, 0, NULL);
int used, dirty;
int ret = 0;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
used = __ceph_caps_used(ci);
dirty = __ceph_caps_dirty(ci);
inode, cap, ceph_cap_string(cap->issued));
}
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
return ret;
}
/*
* force an record for the directory caps if we have a dentry lease.
- * this is racy (can't take i_lock and d_lock together), but it
+ * this is racy (can't take i_ceph_lock and d_lock together), but it
* doesn't have to be perfect; the mds will revoke anything we don't
* release.
*/
}
/* can we use the dcache? */
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if ((filp->f_pos == 2 || fi->dentry) &&
!ceph_test_mount_opt(fsc, NOASYNCREADDIR) &&
ceph_snap(inode) != CEPH_SNAPDIR &&
ceph_dir_test_complete(inode) &&
__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) {
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
err = __dcache_readdir(filp, dirent, filldir);
if (err != -EAGAIN)
return err;
} else {
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
}
if (fi->dentry) {
err = note_last_dentry(fi, fi->dentry->d_name.name,
* were released during the whole readdir, and we should have
* the complete dir contents in our cache.
*/
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if (ci->i_release_count == fi->dir_release_count) {
ceph_dir_set_complete(inode);
ci->i_max_offset = filp->f_pos;
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
dout("readdir %p filp %p done.\n", inode, filp);
return 0;
struct ceph_inode_info *ci = ceph_inode(dir);
struct ceph_dentry_info *di = ceph_dentry(dentry);
- spin_lock(&dir->i_lock);
+ spin_lock(&ci->i_ceph_lock);
dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags);
if (strncmp(dentry->d_name.name,
fsc->mount_options->snapdir_name,
!is_root_ceph_dentry(dir, dentry) &&
ceph_dir_test_complete(dir) &&
(__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) {
- spin_unlock(&dir->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
dout(" dir %p complete, -ENOENT\n", dir);
d_add(dentry, NULL);
di->lease_shared_gen = ci->i_shared_gen;
return NULL;
}
- spin_unlock(&dir->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
}
op = ceph_snap(dir) == CEPH_SNAPDIR ?
struct ceph_inode_info *ci = ceph_inode(inode);
int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if (inode->i_nlink == 1) {
drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN);
ci->i_ceph_flags |= CEPH_I_NODELAY;
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
return drop;
}
struct ceph_dentry_info *di = ceph_dentry(dentry);
int valid = 0;
- spin_lock(&dir->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if (ci->i_shared_gen == di->lease_shared_gen)
valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1);
- spin_unlock(&dir->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n",
dir, (unsigned)ci->i_shared_gen, dentry,
(unsigned)di->lease_shared_gen, valid);
/* trivially open snapdir */
if (ceph_snap(inode) == CEPH_SNAPDIR) {
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
__ceph_get_fmode(ci, fmode);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
return ceph_init_file(inode, file, fmode);
}
* write) or any MDS (for read). Update wanted set
* asynchronously.
*/
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if (__ceph_is_any_real_caps(ci) &&
(((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
int mds_wanted = __ceph_caps_mds_wanted(ci);
inode, fmode, ceph_cap_string(wanted),
ceph_cap_string(issued));
__ceph_get_fmode(ci, fmode);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
/* adjust wanted? */
if ((issued & wanted) != wanted &&
} else if (ceph_snap(inode) != CEPH_NOSNAP &&
(ci->i_snap_caps & wanted) == wanted) {
__ceph_get_fmode(ci, fmode);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
return ceph_init_file(inode, file, fmode);
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
req = prepare_open_request(inode->i_sb, flags, 0);
*/
int dirty;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
ceph_put_cap_refs(ci, got);
ret = generic_file_aio_write(iocb, iov, nr_segs, pos);
if (ret >= 0) {
int dirty;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
if (dirty)
__mark_inode_dirty(inode, dirty);
}
mutex_lock(&inode->i_mutex);
__ceph_do_pending_vmtruncate(inode);
- if (origin != SEEK_CUR || origin != SEEK_SET) {
+
+ if (origin == SEEK_END || origin == SEEK_DATA || origin == SEEK_HOLE) {
ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE);
if (ret < 0) {
offset = ret;
dout("alloc_inode %p\n", &ci->vfs_inode);
+ spin_lock_init(&ci->i_ceph_lock);
+
ci->i_version = 0;
ci->i_time_warp_seq = 0;
ci->i_ceph_flags = 0;
iinfo->xattr_len);
}
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
/*
* provided version will be odd if inode value is projected,
char *sym;
BUG_ON(symlen != inode->i_size);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
err = -ENOMEM;
sym = kmalloc(symlen+1, GFP_NOFS);
memcpy(sym, iinfo->symlink, symlen);
sym[symlen] = 0;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if (!ci->i_symlink)
ci->i_symlink = sym;
else
}
no_change:
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
/* queue truncate if we saw i_size decrease */
if (queue_trunc)
info->cap.flags,
caps_reservation);
} else {
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
dout(" %p got snap_caps %s\n", inode,
ceph_cap_string(le32_to_cpu(info->cap.caps)));
ci->i_snap_caps |= le32_to_cpu(info->cap.caps);
if (cap_fmode >= 0)
__ceph_get_fmode(ci, cap_fmode);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
}
} else if (cap_fmode >= 0) {
pr_warning("mds issued no caps on %llx.%llx\n",
{
struct dentry *dir = dn->d_parent;
struct inode *inode = dir->d_inode;
+ struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_dentry_info *di;
BUG_ON(!inode);
di = ceph_dentry(dn);
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if (!ceph_dir_test_complete(inode)) {
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
return;
}
di->offset = ceph_inode(inode)->i_max_offset++;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
spin_lock(&dir->d_lock);
spin_lock_nested(&dn->d_lock, DENTRY_D_LOCK_NESTED);
struct ceph_inode_info *ci = ceph_inode(inode);
int ret = 0;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
dout("set_size %p %llu -> %llu\n", inode, inode->i_size, size);
inode->i_size = size;
inode->i_blocks = (size + (1 << 9) - 1) >> 9;
(ci->i_reported_size << 1) < ci->i_max_size)
ret = 1;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
return ret;
}
u32 orig_gen;
int check = 0;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
dout("invalidate_pages %p gen %d revoking %d\n", inode,
ci->i_rdcache_gen, ci->i_rdcache_revoking);
if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
/* nevermind! */
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
goto out;
}
orig_gen = ci->i_rdcache_gen;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
truncate_inode_pages(&inode->i_data, 0);
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if (orig_gen == ci->i_rdcache_gen &&
orig_gen == ci->i_rdcache_revoking) {
dout("invalidate_pages %p gen %d successful\n", inode,
inode, orig_gen, ci->i_rdcache_gen,
ci->i_rdcache_revoking);
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
if (check)
ceph_check_caps(ci, 0, NULL);
int wrbuffer_refs, wake = 0;
retry:
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if (ci->i_truncate_pending == 0) {
dout("__do_pending_vmtruncate %p none pending\n", inode);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
return;
}
if (ci->i_wrbuffer_ref_head < ci->i_wrbuffer_ref) {
dout("__do_pending_vmtruncate %p flushing snaps first\n",
inode);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
filemap_write_and_wait_range(&inode->i_data, 0,
inode->i_sb->s_maxbytes);
goto retry;
wrbuffer_refs = ci->i_wrbuffer_ref;
dout("__do_pending_vmtruncate %p (%d) to %lld\n", inode,
ci->i_truncate_pending, to);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
truncate_inode_pages(inode->i_mapping, to);
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
ci->i_truncate_pending--;
if (ci->i_truncate_pending == 0)
wake = 1;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
if (wrbuffer_refs == 0)
ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
if (IS_ERR(req))
return PTR_ERR(req);
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
issued = __ceph_caps_issued(ci, NULL);
dout("setattr %p issued %s\n", inode, ceph_cap_string(issued));
}
release &= issued;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
if (inode_dirty_flags)
__mark_inode_dirty(inode, inode_dirty_flags);
__ceph_do_pending_vmtruncate(inode);
return err;
out:
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
ceph_mdsc_put_request(req);
return err;
}
struct ceph_inode_info *ci = ceph_inode(inode);
if ((fi->fmode & CEPH_FILE_MODE_LAZY) == 0) {
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
ci->i_nr_by_mode[fi->fmode]--;
fi->fmode |= CEPH_FILE_MODE_LAZY;
ci->i_nr_by_mode[fi->fmode]++;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
dout("ioctl_layzio: file %p marked lazy\n", file);
ceph_check_caps(ci, 0, NULL);
}
}
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
cap = NULL;
if (mode == USE_AUTH_MDS)
cap = ci->i_auth_cap;
if (!cap && !RB_EMPTY_ROOT(&ci->i_caps))
cap = rb_entry(rb_first(&ci->i_caps), struct ceph_cap, ci_node);
if (!cap) {
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
goto random;
}
mds = cap->session->s_mds;
dout("choose_mds %p %llx.%llx mds%d (%scap %p)\n",
inode, ceph_vinop(inode), mds,
cap == ci->i_auth_cap ? "auth " : "", cap);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
return mds;
random:
dout("removing cap %p, ci is %p, inode is %p\n",
cap, ci, &ci->vfs_inode);
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
__ceph_remove_cap(cap);
if (!__ceph_is_any_real_caps(ci)) {
struct ceph_mds_client *mdsc =
}
spin_unlock(&mdsc->cap_dirty_lock);
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
while (drop--)
iput(inode);
return 0;
wake_up_all(&ci->i_cap_wq);
if (arg) {
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
ci->i_wanted_max_size = 0;
ci->i_requested_max_size = 0;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
}
return 0;
}
if (session->s_trim_caps <= 0)
return -1;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
mine = cap->issued | cap->implemented;
used = __ceph_caps_used(ci);
oissued = __ceph_caps_issued_other(ci, cap);
__ceph_remove_cap(cap);
} else {
/* try to drop referring dentries */
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
d_prune_aliases(inode);
dout("trim_caps_cb %p cap %p pruned, count now %d\n",
inode, cap, atomic_read(&inode->i_count));
}
out:
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
return 0;
}
i_flushing_item);
struct inode *inode = &ci->vfs_inode;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if (ci->i_cap_flush_seq <= want_flush_seq) {
dout("check_cap_flush still flushing %p "
"seq %lld <= %lld to mds%d\n", inode,
session->s_mds);
ret = 0;
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
}
mutex_unlock(&session->s_mutex);
ceph_put_mds_session(session);
pos, temp);
} else if (stop_on_nosnap && inode &&
ceph_snap(inode) == CEPH_NOSNAP) {
+ spin_unlock(&temp->d_lock);
break;
} else {
pos -= temp->d_name.len;
struct ceph_inode_info *ci = ceph_inode(inode);
dout("invalidate_dir_request %p (D_COMPLETE, lease(s))\n", inode);
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
ceph_dir_clear_complete(inode);
ci->i_release_count++;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
if (req->r_dentry)
ceph_invalidate_dentry_lease(req->r_dentry);
if (err)
goto out_free;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
cap->seq = 0; /* reset cap seq */
cap->issue_seq = 0; /* and issue_seq */
rec.v1.pathbase = cpu_to_le64(pathbase);
reclen = sizeof(rec.v1);
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
if (recon_state->flock) {
int num_fcntl_locks, num_flock_locks;
*
* mdsc->snap_rwsem
*
- * inode->i_lock
+ * ci->i_ceph_lock
* mdsc->snap_flush_lock
* mdsc->cap_delay_lock
*
return;
}
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
used = __ceph_caps_used(ci);
dirty = __ceph_caps_dirty(ci);
kfree(capsnap);
}
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
}
/*
*
* If capsnap can now be flushed, add to snap_flush list, and return 1.
*
- * Caller must hold i_lock.
+ * Caller must hold i_ceph_lock.
*/
int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
struct ceph_cap_snap *capsnap)
inode = &ci->vfs_inode;
ihold(inode);
spin_unlock(&mdsc->snap_flush_lock);
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
__ceph_flush_snaps(ci, &session, 0);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
iput(inode);
spin_lock(&mdsc->snap_flush_lock);
}
continue;
ci = ceph_inode(inode);
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if (!ci->i_snap_realm)
goto skip_inode;
/*
oldrealm = ci->i_snap_realm;
ci->i_snap_realm = realm;
spin_unlock(&realm->inodes_with_caps_lock);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
ceph_get_snap_realm(mdsc, realm);
ceph_put_snap_realm(mdsc, oldrealm);
continue;
skip_inode:
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
iput(inode);
}
if (fsopt->rsize != CEPH_RSIZE_DEFAULT)
seq_printf(m, ",rsize=%d", fsopt->rsize);
if (fsopt->rasize != CEPH_RASIZE_DEFAULT)
- seq_printf(m, ",rasize=%d", fsopt->rsize);
+ seq_printf(m, ",rasize=%d", fsopt->rasize);
if (fsopt->congestion_kb != default_congestion_kb())
seq_printf(m, ",write_congestion_kb=%d", fsopt->congestion_kb);
if (fsopt->caps_wanted_delay_min != CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT)
* The locking for D_COMPLETE is a bit odd:
* - we can clear it at almost any time (see ceph_d_prune)
* - it is only meaningful if:
- * - we hold dir inode i_lock
+ * - we hold dir inode i_ceph_lock
* - we hold dir FILE_SHARED caps
* - the dentry D_COMPLETE is set
*/
struct ceph_inode_info {
struct ceph_vino i_vino; /* ceph ino + snap */
+ spinlock_t i_ceph_lock;
+
u64 i_version;
u32 i_time_warp_seq;
struct ceph_inode_xattrs_info i_xattrs;
- /* capabilities. protected _both_ by i_lock and cap->session's
+ /* capabilities. protected _both_ by i_ceph_lock and cap->session's
* s_mutex. */
struct rb_root i_caps; /* cap list */
struct ceph_cap *i_auth_cap; /* authoritative cap, if any */
{
struct ceph_inode_info *ci = ceph_inode(inode);
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
ci->i_ceph_flags &= ~mask;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
}
static inline void ceph_i_set(struct inode *inode, unsigned mask)
{
struct ceph_inode_info *ci = ceph_inode(inode);
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
ci->i_ceph_flags |= mask;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
}
static inline bool ceph_i_test(struct inode *inode, unsigned mask)
struct ceph_inode_info *ci = ceph_inode(inode);
bool r;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
r = (ci->i_ceph_flags & mask) == mask;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
return r;
}
static inline int ceph_caps_issued(struct ceph_inode_info *ci)
{
int issued;
- spin_lock(&ci->vfs_inode.i_lock);
+ spin_lock(&ci->i_ceph_lock);
issued = __ceph_caps_issued(ci, NULL);
- spin_unlock(&ci->vfs_inode.i_lock);
+ spin_unlock(&ci->i_ceph_lock);
return issued;
}
int touch)
{
int r;
- spin_lock(&ci->vfs_inode.i_lock);
+ spin_lock(&ci->i_ceph_lock);
r = __ceph_caps_issued_mask(ci, mask, touch);
- spin_unlock(&ci->vfs_inode.i_lock);
+ spin_unlock(&ci->i_ceph_lock);
return r;
}
extern void __ceph_remove_cap(struct ceph_cap *cap);
static inline void ceph_remove_cap(struct ceph_cap *cap)
{
- struct inode *inode = &cap->ci->vfs_inode;
- spin_lock(&inode->i_lock);
+ spin_lock(&cap->ci->i_ceph_lock);
__ceph_remove_cap(cap);
- spin_unlock(&inode->i_lock);
+ spin_unlock(&cap->ci->i_ceph_lock);
}
extern void ceph_put_cap(struct ceph_mds_client *mdsc,
struct ceph_cap *cap);
}
static int __build_xattrs(struct inode *inode)
- __releases(inode->i_lock)
- __acquires(inode->i_lock)
+ __releases(ci->i_ceph_lock)
+ __acquires(ci->i_ceph_lock)
{
u32 namelen;
u32 numattr = 0;
end = p + ci->i_xattrs.blob->vec.iov_len;
ceph_decode_32_safe(&p, end, numattr, bad);
xattr_version = ci->i_xattrs.version;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
xattrs = kcalloc(numattr, sizeof(struct ceph_xattr *),
GFP_NOFS);
goto bad_lock;
}
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if (ci->i_xattrs.version != xattr_version) {
/* lost a race, retry */
for (i = 0; i < numattr; i++)
return err;
bad_lock:
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
bad:
if (xattrs) {
for (i = 0; i < numattr; i++)
if (vxattrs)
vxattr = ceph_match_vxattr(vxattrs, name);
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
dout("getxattr %p ver=%lld index_ver=%lld\n", inode,
ci->i_xattrs.version, ci->i_xattrs.index_version);
(ci->i_xattrs.index_version >= ci->i_xattrs.version)) {
goto get_xattr;
} else {
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
/* get xattrs from mds (if we don't already have them) */
err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR);
if (err)
return err;
}
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if (vxattr && vxattr->readonly) {
err = vxattr->getxattr_cb(ci, value, size);
memcpy(value, xattr->val, xattr->val_len);
out:
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
return err;
}
u32 len;
int i;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
dout("listxattr %p ver=%lld index_ver=%lld\n", inode,
ci->i_xattrs.version, ci->i_xattrs.index_version);
(ci->i_xattrs.index_version >= ci->i_xattrs.version)) {
goto list_xattr;
} else {
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
err = ceph_do_getattr(inode, CEPH_STAT_CAP_XATTR);
if (err)
return err;
}
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
err = __build_xattrs(inode);
if (err < 0)
}
out:
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
return err;
}
if (!xattr)
goto out;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
retry:
issued = __ceph_caps_issued(ci, NULL);
if (!(issued & CEPH_CAP_XATTR_EXCL))
required_blob_size > ci->i_xattrs.prealloc_blob->alloc_len) {
struct ceph_buffer *blob = NULL;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
dout(" preaallocating new blob size=%d\n", required_blob_size);
blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
if (!blob)
goto out;
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
if (ci->i_xattrs.prealloc_blob)
ceph_buffer_put(ci->i_xattrs.prealloc_blob);
ci->i_xattrs.prealloc_blob = blob;
dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
ci->i_xattrs.dirty = true;
inode->i_ctime = CURRENT_TIME;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
if (dirty)
__mark_inode_dirty(inode, dirty);
return err;
do_sync:
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
err = ceph_sync_setxattr(dentry, name, value, size, flags);
out:
kfree(newname);
return -EOPNOTSUPP;
}
- spin_lock(&inode->i_lock);
+ spin_lock(&ci->i_ceph_lock);
__build_xattrs(inode);
issued = __ceph_caps_issued(ci, NULL);
dout("removexattr %p issued %s\n", inode, ceph_cap_string(issued));
ci->i_xattrs.dirty = true;
inode->i_ctime = CURRENT_TIME;
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
if (dirty)
__mark_inode_dirty(inode, dirty);
return err;
do_sync:
- spin_unlock(&inode->i_lock);
+ spin_unlock(&ci->i_ceph_lock);
err = ceph_send_removexattr(dentry, name);
return err;
}
smb_msg.msg_controllen = 0;
for (total_read = 0; to_read; total_read += length, to_read -= length) {
+ try_to_freeze();
+
if (server_unresponsive(server)) {
total_read = -EAGAIN;
break;
lock->type, lock->netfid, conf_lock);
}
+/*
+ * Check if there is another lock that prevents us to set the lock (mandatory
+ * style). If such a lock exists, update the flock structure with its
+ * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
+ * or leave it the same if we can't. Returns 0 if we don't need to request to
+ * the server or 1 otherwise.
+ */
static int
cifs_lock_test(struct cifsInodeInfo *cinode, __u64 offset, __u64 length,
__u8 type, __u16 netfid, struct file_lock *flock)
mutex_unlock(&cinode->lock_mutex);
}
+/*
+ * Set the byte-range lock (mandatory style). Returns:
+ * 1) 0, if we set the lock and don't need to request to the server;
+ * 2) 1, if no locks prevent us but we need to request to the server;
+ * 3) -EACCESS, if there is a lock that prevents us and wait is false.
+ */
static int
cifs_lock_add_if(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock,
bool wait)
return rc;
}
+/*
+ * Check if there is another lock that prevents us to set the lock (posix
+ * style). If such a lock exists, update the flock structure with its
+ * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
+ * or leave it the same if we can't. Returns 0 if we don't need to request to
+ * the server or 1 otherwise.
+ */
static int
cifs_posix_lock_test(struct file *file, struct file_lock *flock)
{
return rc;
}
+/*
+ * Set the byte-range lock (posix style). Returns:
+ * 1) 0, if we set the lock and don't need to request to the server;
+ * 2) 1, if we need to request to the server;
+ * 3) <0, if the error occurs while setting the lock.
+ */
static int
cifs_posix_lock_set(struct file *file, struct file_lock *flock)
{
rc);
return rc;
}
- cifs_save_resume_key(cifsFile->srch_inf.last_entry, cifsFile);
+ /* FindFirst/Next set last_entry to NULL on malformed reply */
+ if (cifsFile->srch_inf.last_entry)
+ cifs_save_resume_key(cifsFile->srch_inf.last_entry,
+ cifsFile);
}
while ((index_to_find >= cifsFile->srch_inf.index_of_last_entry) &&
cFYI(1, "calling findnext2");
rc = CIFSFindNext(xid, pTcon, cifsFile->netfid,
&cifsFile->srch_inf);
- cifs_save_resume_key(cifsFile->srch_inf.last_entry, cifsFile);
+ /* FindFirst/Next set last_entry to NULL on malformed reply */
+ if (cifsFile->srch_inf.last_entry)
+ cifs_save_resume_key(cifsFile->srch_inf.last_entry,
+ cifsFile);
if (rc)
return -ENOENT;
}
{
int rc;
int len;
- __u16 wpwd[129];
+ __le16 wpwd[129];
/* Password cannot be longer than 128 characters */
if (passwd) /* Password must be converted to NT unicode */
*wpwd = 0; /* Ensure string is null terminated */
}
- rc = mdfour(p16, (unsigned char *) wpwd, len * sizeof(__u16));
- memset(wpwd, 0, 129 * sizeof(__u16));
+ rc = mdfour(p16, (unsigned char *) wpwd, len * sizeof(__le16));
+ memset(wpwd, 0, 129 * sizeof(__le16));
return rc;
}
return bdi_init(&configfs_backing_dev_info);
}
-void __exit configfs_inode_exit(void)
+void configfs_inode_exit(void)
{
bdi_destroy(&configfs_backing_dev_info);
}
goto out;
config_kobj = kobject_create_and_add("config", kernel_kobj);
- if (!config_kobj) {
- kmem_cache_destroy(configfs_dir_cachep);
- configfs_dir_cachep = NULL;
- goto out;
- }
+ if (!config_kobj)
+ goto out2;
+
+ err = configfs_inode_init();
+ if (err)
+ goto out3;
err = register_filesystem(&configfs_fs_type);
- if (err) {
- printk(KERN_ERR "configfs: Unable to register filesystem!\n");
- kobject_put(config_kobj);
- kmem_cache_destroy(configfs_dir_cachep);
- configfs_dir_cachep = NULL;
- goto out;
- }
+ if (err)
+ goto out4;
- err = configfs_inode_init();
- if (err) {
- unregister_filesystem(&configfs_fs_type);
- kobject_put(config_kobj);
- kmem_cache_destroy(configfs_dir_cachep);
- configfs_dir_cachep = NULL;
- }
+ return 0;
+out4:
+ printk(KERN_ERR "configfs: Unable to register filesystem!\n");
+ configfs_inode_exit();
+out3:
+ kobject_put(config_kobj);
+out2:
+ kmem_cache_destroy(configfs_dir_cachep);
+ configfs_dir_cachep = NULL;
out:
return err;
}
/**
* prepend_path - Prepend path string to a buffer
* @path: the dentry/vfsmount to report
- * @root: root vfsmnt/dentry (may be modified by this function)
+ * @root: root vfsmnt/dentry
* @buffer: pointer to the end of the buffer
* @buflen: pointer to buffer length
*
* Caller holds the rename_lock.
- *
- * If path is not reachable from the supplied root, then the value of
- * root is changed (without modifying refcounts).
*/
-static int prepend_path(const struct path *path, struct path *root,
+static int prepend_path(const struct path *path,
+ const struct path *root,
char **buffer, int *buflen)
{
struct dentry *dentry = path->dentry;
dentry = parent;
}
-out:
if (!error && !slash)
error = prepend(buffer, buflen, "/", 1);
+out:
br_read_unlock(vfsmount_lock);
return error;
WARN(1, "Root dentry has weird name <%.*s>\n",
(int) dentry->d_name.len, dentry->d_name.name);
}
- root->mnt = vfsmnt;
- root->dentry = dentry;
+ if (!slash)
+ error = prepend(buffer, buflen, "/", 1);
+ if (!error)
+ error = vfsmnt->mnt_ns ? 1 : 2;
goto out;
}
/**
* __d_path - return the path of a dentry
* @path: the dentry/vfsmount to report
- * @root: root vfsmnt/dentry (may be modified by this function)
+ * @root: root vfsmnt/dentry
* @buf: buffer to return value in
* @buflen: buffer length
*
*
* "buflen" should be positive.
*
- * If path is not reachable from the supplied root, then the value of
- * root is changed (without modifying refcounts).
+ * If the path is not reachable from the supplied root, return %NULL.
*/
-char *__d_path(const struct path *path, struct path *root,
+char *__d_path(const struct path *path,
+ const struct path *root,
char *buf, int buflen)
{
char *res = buf + buflen;
error = prepend_path(path, root, &res, &buflen);
write_sequnlock(&rename_lock);
- if (error)
+ if (error < 0)
+ return ERR_PTR(error);
+ if (error > 0)
+ return NULL;
+ return res;
+}
+
+char *d_absolute_path(const struct path *path,
+ char *buf, int buflen)
+{
+ struct path root = {};
+ char *res = buf + buflen;
+ int error;
+
+ prepend(&res, &buflen, "\0", 1);
+ write_seqlock(&rename_lock);
+ error = prepend_path(path, &root, &res, &buflen);
+ write_sequnlock(&rename_lock);
+
+ if (error > 1)
+ error = -EINVAL;
+ if (error < 0)
return ERR_PTR(error);
return res;
}
/*
* same as __d_path but appends "(deleted)" for unlinked files.
*/
-static int path_with_deleted(const struct path *path, struct path *root,
- char **buf, int *buflen)
+static int path_with_deleted(const struct path *path,
+ const struct path *root,
+ char **buf, int *buflen)
{
prepend(buf, buflen, "\0", 1);
if (d_unlinked(path->dentry)) {
{
char *res = buf + buflen;
struct path root;
- struct path tmp;
int error;
/*
get_fs_root(current->fs, &root);
write_seqlock(&rename_lock);
- tmp = root;
- error = path_with_deleted(path, &tmp, &res, &buflen);
- if (error)
+ error = path_with_deleted(path, &root, &res, &buflen);
+ if (error < 0)
res = ERR_PTR(error);
write_sequnlock(&rename_lock);
path_put(&root);
{
char *res = buf + buflen;
struct path root;
- struct path tmp;
int error;
if (path->dentry->d_op && path->dentry->d_op->d_dname)
get_fs_root(current->fs, &root);
write_seqlock(&rename_lock);
- tmp = root;
- error = path_with_deleted(path, &tmp, &res, &buflen);
- if (!error && !path_equal(&tmp, &root))
+ error = path_with_deleted(path, &root, &res, &buflen);
+ if (error > 0)
error = prepend_unreachable(&res, &buflen);
write_sequnlock(&rename_lock);
path_put(&root);
write_seqlock(&rename_lock);
if (!d_unlinked(pwd.dentry)) {
unsigned long len;
- struct path tmp = root;
char *cwd = page + PAGE_SIZE;
int buflen = PAGE_SIZE;
prepend(&cwd, &buflen, "\0", 1);
- error = prepend_path(&pwd, &tmp, &cwd, &buflen);
+ error = prepend_path(&pwd, &root, &cwd, &buflen);
write_sequnlock(&rename_lock);
- if (error)
+ if (error < 0)
goto out;
/* Unreachable from current root */
- if (!path_equal(&tmp, &root)) {
+ if (error > 0) {
error = prepend_unreachable(&cwd, &buflen);
if (error)
goto out;
le32_to_cpu(EXT_FIRST_INDEX(neh)->ei_block),
ext4_idx_pblock(EXT_FIRST_INDEX(neh)));
- neh->eh_depth = cpu_to_le16(neh->eh_depth + 1);
+ neh->eh_depth = cpu_to_le16(le16_to_cpu(neh->eh_depth) + 1);
ext4_mark_inode_dirty(handle, inode);
out:
brelse(bh);
/* Pre-conditions */
BUG_ON(!ext4_ext_is_uninitialized(ex));
BUG_ON(!in_range(map->m_lblk, ee_block, ee_len));
- BUG_ON(map->m_lblk + map->m_len > ee_block + ee_len);
/*
* Attempt to transfer newly initialized blocks from the currently
clear_buffer_unwritten(bh);
}
- /* skip page if block allocation undone */
- if (buffer_delay(bh) || buffer_unwritten(bh))
+ /*
+ * skip page if block allocation undone and
+ * block is dirty
+ */
+ if (ext4_bh_delay_or_unwritten(NULL, bh))
skip_page = 1;
bh = bh->b_this_page;
block_start += bh->b_size;
pgoff_t index;
struct inode *inode = mapping->host;
handle_t *handle;
- loff_t page_len;
index = pos >> PAGE_CACHE_SHIFT;
*/
if (pos + len > inode->i_size)
ext4_truncate_failed_write(inode);
- } else {
- page_len = pos & (PAGE_CACHE_SIZE - 1);
- if (page_len > 0) {
- ret = ext4_discard_partial_page_buffers_no_lock(handle,
- inode, page, pos - page_len, page_len,
- EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED);
- }
}
if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
loff_t new_i_size;
unsigned long start, end;
int write_mode = (int)(unsigned long)fsdata;
- loff_t page_len;
if (write_mode == FALL_BACK_TO_NONDELALLOC) {
if (ext4_should_order_data(inode)) {
*/
new_i_size = pos + copied;
- if (new_i_size > EXT4_I(inode)->i_disksize) {
+ if (copied && new_i_size > EXT4_I(inode)->i_disksize) {
if (ext4_da_should_update_i_disksize(page, end)) {
down_write(&EXT4_I(inode)->i_data_sem);
if (new_i_size > EXT4_I(inode)->i_disksize) {
}
ret2 = generic_write_end(file, mapping, pos, len, copied,
page, fsdata);
-
- page_len = PAGE_CACHE_SIZE -
- ((pos + copied - 1) & (PAGE_CACHE_SIZE - 1));
-
- if (page_len > 0) {
- ret = ext4_discard_partial_page_buffers_no_lock(handle,
- inode, page, pos + copied - 1, page_len,
- EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED);
- }
-
copied = ret2;
if (ret2 < 0)
ret = ret2;
iocb->private, io_end->inode->i_ino, iocb, offset,
size);
+ iocb->private = NULL;
+
/* if not aio dio with unwritten extents, just free io and return */
if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
ext4_free_io_end(io_end);
- iocb->private = NULL;
out:
if (is_async)
aio_complete(iocb, ret, 0);
spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
/* queue the work to convert unwritten extents to written */
- iocb->private = NULL;
queue_work(wq, &io_end->work);
/* XXX: probably should move into the real I/O completion handler */
iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
- if (!page_has_buffers(page)) {
- /*
- * If the range to be discarded covers a partial block
- * we need to get the page buffers. This is because
- * partial blocks cannot be released and the page needs
- * to be updated with the contents of the block before
- * we write the zeros on top of it.
- */
- if ((from & (blocksize - 1)) ||
- ((from + length) & (blocksize - 1))) {
- create_empty_buffers(page, blocksize, 0);
- } else {
- /*
- * If there are no partial blocks,
- * there is nothing to update,
- * so we can return now
- */
- return 0;
- }
- }
+ if (!page_has_buffers(page))
+ create_empty_buffers(page, blocksize, 0);
/* Find the buffer that contains "offset" */
bh = page_buffers(page);
block_end = block_start + blocksize;
if (block_start >= len) {
+ /*
+ * Comments copied from block_write_full_page_endio:
+ *
+ * The page straddles i_size. It must be zeroed out on
+ * each and every writepage invocation because it may
+ * be mmapped. "A file is mapped in multiples of the
+ * page size. For a file that is not a multiple of
+ * the page size, the remaining memory is zeroed when
+ * mapped, and writes to that region are not written
+ * out to the file."
+ */
+ zero_user_segment(page, block_start, block_end);
clear_buffer_dirty(bh);
set_buffer_uptodate(bh);
continue;
seq_puts(seq, ",block_validity");
if (!test_opt(sb, INIT_INODE_TABLE))
- seq_puts(seq, ",noinit_inode_table");
+ seq_puts(seq, ",noinit_itable");
else if (sbi->s_li_wait_mult != EXT4_DEF_LI_WAIT_MULT)
- seq_printf(seq, ",init_inode_table=%u",
+ seq_printf(seq, ",init_itable=%u",
(unsigned) sbi->s_li_wait_mult);
ext4_show_quota_options(seq, sb);
Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity,
Opt_inode_readahead_blks, Opt_journal_ioprio,
Opt_dioread_nolock, Opt_dioread_lock,
- Opt_discard, Opt_nodiscard,
- Opt_init_inode_table, Opt_noinit_inode_table,
+ Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable,
};
static const match_table_t tokens = {
{Opt_dioread_lock, "dioread_lock"},
{Opt_discard, "discard"},
{Opt_nodiscard, "nodiscard"},
- {Opt_init_inode_table, "init_itable=%u"},
- {Opt_init_inode_table, "init_itable"},
- {Opt_noinit_inode_table, "noinit_itable"},
+ {Opt_init_itable, "init_itable=%u"},
+ {Opt_init_itable, "init_itable"},
+ {Opt_noinit_itable, "noinit_itable"},
{Opt_err, NULL},
};
case Opt_dioread_lock:
clear_opt(sb, DIOREAD_NOLOCK);
break;
- case Opt_init_inode_table:
+ case Opt_init_itable:
set_opt(sb, INIT_INODE_TABLE);
if (args[0].from) {
if (match_int(&args[0], &option))
return 0;
sbi->s_li_wait_mult = option;
break;
- case Opt_noinit_inode_table:
+ case Opt_noinit_itable:
clear_opt(sb, INIT_INODE_TABLE);
break;
default:
* bdi_start_writeback - start writeback
* @bdi: the backing device to write from
* @nr_pages: the number of pages to write
+ * @reason: reason why some writeback work was initiated
*
* Description:
* This does WB_SYNC_NONE opportunistic writeback. The IO is only
* writeback_inodes_sb_nr - writeback dirty inodes from given super_block
* @sb: the superblock
* @nr: the number of pages to write
+ * @reason: reason why some writeback work initiated
*
* Start writeback on some inodes on this super_block. No guarantees are made
* on how many (if any) will be written, and this function does not wait
/**
* writeback_inodes_sb - writeback dirty inodes from given super_block
* @sb: the superblock
+ * @reason: reason why some writeback work was initiated
*
* Start writeback on some inodes on this super_block. No guarantees are made
* on how many (if any) will be written, and this function does not wait
/**
* writeback_inodes_sb_if_idle - start writeback if none underway
* @sb: the superblock
+ * @reason: reason why some writeback work was initiated
*
* Invoke writeback_inodes_sb if no writeback is currently underway.
* Returns 1 if writeback was started, 0 if not.
* writeback_inodes_sb_if_idle - start writeback if none underway
* @sb: the superblock
* @nr: the number of pages to write
+ * @reason: reason why some writeback work was initiated
*
* Invoke writeback_inodes_sb if no writeback is currently underway.
* Returns 1 if writeback was started, 0 if not.
else if (outarg->offset + num > file_size)
num = file_size - outarg->offset;
- while (num) {
+ while (num && req->num_pages < FUSE_MAX_PAGES_PER_REQ) {
struct page *page;
unsigned int this_num;
num -= this_num;
total_len += this_num;
+ index++;
}
req->misc.retrieve_in.offset = outarg->offset;
req->misc.retrieve_in.size = total_len;
struct inode *inode = file->f_path.dentry->d_inode;
mutex_lock(&inode->i_mutex);
- if (origin != SEEK_CUR || origin != SEEK_SET) {
+ if (origin != SEEK_CUR && origin != SEEK_SET) {
retval = fuse_update_attributes(inode, NULL, file, NULL);
if (retval)
goto exit;
offset += i_size_read(inode);
break;
case SEEK_CUR:
+ if (offset == 0) {
+ retval = file->f_pos;
+ goto exit;
+ }
offset += file->f_pos;
break;
case SEEK_DATA:
{
int err;
- err = register_filesystem(&fuse_fs_type);
- if (err)
- goto out;
-
- err = register_fuseblk();
- if (err)
- goto out_unreg;
-
fuse_inode_cachep = kmem_cache_create("fuse_inode",
sizeof(struct fuse_inode),
0, SLAB_HWCACHE_ALIGN,
fuse_inode_init_once);
err = -ENOMEM;
if (!fuse_inode_cachep)
- goto out_unreg2;
+ goto out;
+
+ err = register_fuseblk();
+ if (err)
+ goto out2;
+
+ err = register_filesystem(&fuse_fs_type);
+ if (err)
+ goto out3;
return 0;
- out_unreg2:
+ out3:
unregister_fuseblk();
- out_unreg:
- unregister_filesystem(&fuse_fs_type);
+ out2:
+ kmem_cache_destroy(fuse_inode_cachep);
out:
return err;
}
if (err)
goto out;
seq_putc(m, ' ');
- seq_path_root(m, &mnt_path, &root, " \t\n\\");
- if (root.mnt != p->root.mnt || root.dentry != p->root.dentry) {
- /*
- * Mountpoint is outside root, discard that one. Ugly,
- * but less so than trying to do that in iterator in a
- * race-free way (due to renames).
- */
- return SEQ_SKIP;
- }
+
+ /* mountpoints outside of chroot jail will give SEQ_SKIP on this */
+ err = seq_path_root(m, &mnt_path, &root, " \t\n\\");
+ if (err)
+ goto out;
+
seq_puts(m, mnt->mnt_flags & MNT_READONLY ? " ro" : " rw");
show_mnt_opts(m, mnt);
}
}
EXPORT_SYMBOL(kern_unmount);
+
+bool our_mnt(struct vfsmount *mnt)
+{
+ return check_mnt(mnt);
+}
error = bdi_setup_and_register(&server->bdi, "ncpfs", BDI_CAP_MAP_COPY);
if (error)
- goto out_bdi;
+ goto out_fput;
server->ncp_filp = ncp_filp;
server->ncp_sock = sock;
error = -EBADF;
server->info_filp = fget(data.info_fd);
if (!server->info_filp)
- goto out_fput;
+ goto out_bdi;
error = -ENOTSOCK;
sock_inode = server->info_filp->f_path.dentry->d_inode;
if (!S_ISSOCK(sock_inode->i_mode))
out_fput2:
if (server->info_filp)
fput(server->info_filp);
-out_fput:
- bdi_destroy(&server->bdi);
out_bdi:
+ bdi_destroy(&server->bdi);
+out_fput:
/* 23/12/1998 Marcin Dalecki <dalecki@cs.net.pl>:
*
* The previously used put_filp(ncp_filp); was bogus, since
K(i.freeswap),
K(global_page_state(NR_FILE_DIRTY)),
K(global_page_state(NR_WRITEBACK)),
- K(global_page_state(NR_ANON_PAGES)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ K(global_page_state(NR_ANON_PAGES)
+ global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
- HPAGE_PMD_NR
+ HPAGE_PMD_NR),
+#else
+ K(global_page_state(NR_ANON_PAGES)),
#endif
- ),
K(global_page_state(NR_FILE_MAPPED)),
K(global_page_state(NR_SHMEM)),
K(global_page_state(NR_SLAB_RECLAIMABLE) +
void __init proc_root_init(void)
{
- struct vfsmount *mnt;
int err;
proc_init_inodecache();
err = register_filesystem(&proc_fs_type);
if (err)
return;
- mnt = kern_mount_data(&proc_fs_type, &init_pid_ns);
- if (IS_ERR(mnt)) {
+ err = pid_ns_prepare_proc(&init_pid_ns);
+ if (err) {
unregister_filesystem(&proc_fs_type);
return;
}
- init_pid_ns.proc_mnt = mnt;
proc_symlink("mounts", NULL, "self/mounts");
proc_net_init();
void pid_ns_release_proc(struct pid_namespace *ns)
{
- mntput(ns->proc_mnt);
+ kern_unmount(ns->proc_mnt);
}
idle = kstat_cpu(cpu).cpustat.idle;
idle = cputime64_add(idle, arch_idle_time(cpu));
} else
- idle = usecs_to_cputime(idle_time);
+ idle = nsecs_to_jiffies64(1000 * idle_time);
return idle;
}
/* !NO_HZ so we can rely on cpustat.iowait */
iowait = kstat_cpu(cpu).cpustat.iowait;
else
- iowait = usecs_to_cputime(iowait_time);
+ iowait = nsecs_to_jiffies64(1000 * iowait_time);
return iowait;
}
/*
* Same as seq_path, but relative to supplied root.
- *
- * root may be changed, see __d_path().
*/
int seq_path_root(struct seq_file *m, struct path *path, struct path *root,
char *esc)
char *p;
p = __d_path(path, root, buf, size);
+ if (!p)
+ return SEQ_SKIP;
res = PTR_ERR(p);
if (!IS_ERR(p)) {
char *end = mangle_path(buf, p, esc);
}
seq_commit(m, res);
- return res < 0 ? res : 0;
+ return res < 0 && res != -ENAMETOOLONG ? res : 0;
}
/*
return -EINVAL;
}
- err = register_filesystem(&ubifs_fs_type);
- if (err) {
- ubifs_err("cannot register file system, error %d", err);
- return err;
- }
-
- err = -ENOMEM;
ubifs_inode_slab = kmem_cache_create("ubifs_inode_slab",
sizeof(struct ubifs_inode), 0,
SLAB_MEM_SPREAD | SLAB_RECLAIM_ACCOUNT,
&inode_slab_ctor);
if (!ubifs_inode_slab)
- goto out_reg;
+ return -ENOMEM;
register_shrinker(&ubifs_shrinker_info);
if (err)
goto out_compr;
+ err = register_filesystem(&ubifs_fs_type);
+ if (err) {
+ ubifs_err("cannot register file system, error %d", err);
+ goto out_dbg;
+ }
return 0;
+out_dbg:
+ dbg_debugfs_exit();
out_compr:
ubifs_compressors_exit();
out_shrinker:
unregister_shrinker(&ubifs_shrinker_info);
kmem_cache_destroy(ubifs_inode_slab);
-out_reg:
- unregister_filesystem(&ubifs_fs_type);
return err;
}
/* late_initcall to let compressors initialize first */
int count, i;
count = be32_to_cpu(aclp->acl_cnt);
+ if (count > XFS_ACL_MAX_ENTRIES)
+ return ERR_PTR(-EFSCORRUPTED);
acl = posix_acl_alloc(count, GFP_KERNEL);
if (!acl)
/*
* Query whether the requested number of additional bytes of extended
* attribute space will be able to fit inline.
+ *
* Returns zero if not, else the di_forkoff fork offset to be used in the
* literal area for attribute data once the new bytes have been added.
*
int offset;
int minforkoff; /* lower limit on valid forkoff locations */
int maxforkoff; /* upper limit on valid forkoff locations */
- int dsize;
+ int dsize;
xfs_mount_t *mp = dp->i_mount;
offset = (XFS_LITINO(mp) - bytes) >> 3; /* rounded down */
return (offset >= minforkoff) ? minforkoff : 0;
}
- if (!(mp->m_flags & XFS_MOUNT_ATTR2)) {
- if (bytes <= XFS_IFORK_ASIZE(dp))
- return dp->i_d.di_forkoff;
+ /*
+ * If the requested numbers of bytes is smaller or equal to the
+ * current attribute fork size we can always proceed.
+ *
+ * Note that if_bytes in the data fork might actually be larger than
+ * the current data fork size is due to delalloc extents. In that
+ * case either the extent count will go down when they are converted
+ * to real extents, or the delalloc conversion will take care of the
+ * literal area rebalancing.
+ */
+ if (bytes <= XFS_IFORK_ASIZE(dp))
+ return dp->i_d.di_forkoff;
+
+ /*
+ * For attr2 we can try to move the forkoff if there is space in the
+ * literal area, but for the old format we are done if there is no
+ * space in the fixed attribute fork.
+ */
+ if (!(mp->m_flags & XFS_MOUNT_ATTR2))
return 0;
- }
dsize = dp->i_df.if_bytes;
-
+
switch (dp->i_d.di_format) {
case XFS_DINODE_FMT_EXTENTS:
- /*
+ /*
* If there is no attr fork and the data fork is extents,
- * determine if creating the default attr fork will result
- * in the extents form migrating to btree. If so, the
- * minimum offset only needs to be the space required for
+ * determine if creating the default attr fork will result
+ * in the extents form migrating to btree. If so, the
+ * minimum offset only needs to be the space required for
* the btree root.
- */
+ */
if (!dp->i_d.di_forkoff && dp->i_df.if_bytes >
xfs_default_attroffset(dp))
dsize = XFS_BMDR_SPACE_CALC(MINDBTPTRS);
break;
-
case XFS_DINODE_FMT_BTREE:
/*
- * If have data btree then keep forkoff if we have one,
- * otherwise we are adding a new attr, so then we set
- * minforkoff to where the btree root can finish so we have
+ * If we have a data btree then keep forkoff if we have one,
+ * otherwise we are adding a new attr, so then we set
+ * minforkoff to where the btree root can finish so we have
* plenty of room for attrs
*/
if (dp->i_d.di_forkoff) {
- if (offset < dp->i_d.di_forkoff)
+ if (offset < dp->i_d.di_forkoff)
return 0;
- else
- return dp->i_d.di_forkoff;
- } else
- dsize = XFS_BMAP_BROOT_SPACE(dp->i_df.if_broot);
+ return dp->i_d.di_forkoff;
+ }
+ dsize = XFS_BMAP_BROOT_SPACE(dp->i_df.if_broot);
break;
}
-
- /*
- * A data fork btree root must have space for at least
+
+ /*
+ * A data fork btree root must have space for at least
* MINDBTPTRS key/ptr pairs if the data fork is small or empty.
*/
minforkoff = MAX(dsize, XFS_BMDR_SPACE_CALC(MINDBTPTRS));
maxforkoff = XFS_LITINO(mp) - XFS_BMDR_SPACE_CALC(MINABTPTRS);
maxforkoff = maxforkoff >> 3; /* rounded down */
- if (offset >= minforkoff && offset < maxforkoff)
- return offset;
if (offset >= maxforkoff)
return maxforkoff;
+ if (offset >= minforkoff)
+ return offset;
return 0;
}
int tryagain;
int error;
+ ASSERT(ap->length);
+
mp = ap->ip->i_mount;
align = ap->userdata ? xfs_get_extsz_hint(ap->ip) : 0;
if (unlikely(align)) {
int error;
int rt;
+ ASSERT(bma->length > 0);
+
rt = (whichfork == XFS_DATA_FORK) && XFS_IS_REALTIME_INODE(bma->ip);
/*
ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
ASSERT(!(flags & XFS_BMAPI_IGSTATE));
ASSERT(tp != NULL);
+ ASSERT(len > 0);
whichfork = (flags & XFS_BMAPI_ATTRFORK) ?
XFS_ATTR_FORK : XFS_DATA_FORK;
bma.eof = eof;
bma.conv = !!(flags & XFS_BMAPI_CONVERT);
bma.wasdel = wasdelay;
- bma.length = len;
bma.offset = bno;
+ /*
+ * There's a 32/64 bit type mismatch between the
+ * allocation length request (which can be 64 bits in
+ * length) and the bma length request, which is
+ * xfs_extlen_t and therefore 32 bits. Hence we have to
+ * check for 32-bit overflows and handle them here.
+ */
+ if (len > (xfs_filblks_t)MAXEXTLEN)
+ bma.length = MAXEXTLEN;
+ else
+ bma.length = len;
+
+ ASSERT(len > 0);
+ ASSERT(bma.length > 0);
error = xfs_bmapi_allocate(&bma, flags);
if (error)
goto error0;
switch (fileid_type) {
case FILEID_INO32_GEN_PARENT:
spin_lock(&dentry->d_lock);
- fid->i32.parent_ino = dentry->d_parent->d_inode->i_ino;
+ fid->i32.parent_ino = XFS_I(dentry->d_parent->d_inode)->i_ino;
fid->i32.parent_gen = dentry->d_parent->d_inode->i_generation;
spin_unlock(&dentry->d_lock);
/*FALLTHRU*/
case FILEID_INO32_GEN:
- fid->i32.ino = inode->i_ino;
+ fid->i32.ino = XFS_I(inode)->i_ino;
fid->i32.gen = inode->i_generation;
break;
case FILEID_INO32_GEN_PARENT | XFS_FILEID_TYPE_64FLAG:
spin_lock(&dentry->d_lock);
- fid64->parent_ino = dentry->d_parent->d_inode->i_ino;
+ fid64->parent_ino = XFS_I(dentry->d_parent->d_inode)->i_ino;
fid64->parent_gen = dentry->d_parent->d_inode->i_generation;
spin_unlock(&dentry->d_lock);
/*FALLTHRU*/
case FILEID_INO32_GEN | XFS_FILEID_TYPE_64FLAG:
- fid64->ino = inode->i_ino;
+ fid64->ino = XFS_I(inode)->i_ino;
fid64->gen = inode->i_generation;
break;
}
return XFS_ERROR(EFSCORRUPTED);
}
+void
+xfs_promote_inode(
+ struct xfs_inode *ip)
+{
+ struct xfs_buf *bp;
+
+ ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
+
+ bp = xfs_incore(ip->i_mount->m_ddev_targp, ip->i_imap.im_blkno,
+ ip->i_imap.im_len, XBF_TRYLOCK);
+ if (!bp)
+ return;
+
+ if (XFS_BUF_ISDELAYWRITE(bp)) {
+ xfs_buf_delwri_promote(bp);
+ wake_up_process(ip->i_mount->m_ddev_targp->bt_task);
+ }
+
+ xfs_buf_relse(bp);
+}
+
/*
* Return a pointer to the extent record at file index idx.
*/
void xfs_iext_realloc(xfs_inode_t *, int, int);
void xfs_iunpin_wait(xfs_inode_t *);
int xfs_iflush(xfs_inode_t *, uint);
+void xfs_promote_inode(struct xfs_inode *);
void xfs_lock_inodes(xfs_inode_t **, int, uint);
void xfs_lock_two_inodes(xfs_inode_t *, xfs_inode_t *, uint);
} while (head_val != old);
}
+STATIC bool
+xlog_reserveq_wake(
+ struct log *log,
+ int *free_bytes)
+{
+ struct xlog_ticket *tic;
+ int need_bytes;
+
+ list_for_each_entry(tic, &log->l_reserveq, t_queue) {
+ if (tic->t_flags & XLOG_TIC_PERM_RESERV)
+ need_bytes = tic->t_unit_res * tic->t_cnt;
+ else
+ need_bytes = tic->t_unit_res;
+
+ if (*free_bytes < need_bytes)
+ return false;
+ *free_bytes -= need_bytes;
+
+ trace_xfs_log_grant_wake_up(log, tic);
+ wake_up(&tic->t_wait);
+ }
+
+ return true;
+}
+
+STATIC bool
+xlog_writeq_wake(
+ struct log *log,
+ int *free_bytes)
+{
+ struct xlog_ticket *tic;
+ int need_bytes;
+
+ list_for_each_entry(tic, &log->l_writeq, t_queue) {
+ ASSERT(tic->t_flags & XLOG_TIC_PERM_RESERV);
+
+ need_bytes = tic->t_unit_res;
+
+ if (*free_bytes < need_bytes)
+ return false;
+ *free_bytes -= need_bytes;
+
+ trace_xfs_log_regrant_write_wake_up(log, tic);
+ wake_up(&tic->t_wait);
+ }
+
+ return true;
+}
+
+STATIC int
+xlog_reserveq_wait(
+ struct log *log,
+ struct xlog_ticket *tic,
+ int need_bytes)
+{
+ list_add_tail(&tic->t_queue, &log->l_reserveq);
+
+ do {
+ if (XLOG_FORCED_SHUTDOWN(log))
+ goto shutdown;
+ xlog_grant_push_ail(log, need_bytes);
+
+ XFS_STATS_INC(xs_sleep_logspace);
+ trace_xfs_log_grant_sleep(log, tic);
+
+ xlog_wait(&tic->t_wait, &log->l_grant_reserve_lock);
+ trace_xfs_log_grant_wake(log, tic);
+
+ spin_lock(&log->l_grant_reserve_lock);
+ if (XLOG_FORCED_SHUTDOWN(log))
+ goto shutdown;
+ } while (xlog_space_left(log, &log->l_grant_reserve_head) < need_bytes);
+
+ list_del_init(&tic->t_queue);
+ return 0;
+shutdown:
+ list_del_init(&tic->t_queue);
+ return XFS_ERROR(EIO);
+}
+
+STATIC int
+xlog_writeq_wait(
+ struct log *log,
+ struct xlog_ticket *tic,
+ int need_bytes)
+{
+ list_add_tail(&tic->t_queue, &log->l_writeq);
+
+ do {
+ if (XLOG_FORCED_SHUTDOWN(log))
+ goto shutdown;
+ xlog_grant_push_ail(log, need_bytes);
+
+ XFS_STATS_INC(xs_sleep_logspace);
+ trace_xfs_log_regrant_write_sleep(log, tic);
+
+ xlog_wait(&tic->t_wait, &log->l_grant_write_lock);
+ trace_xfs_log_regrant_write_wake(log, tic);
+
+ spin_lock(&log->l_grant_write_lock);
+ if (XLOG_FORCED_SHUTDOWN(log))
+ goto shutdown;
+ } while (xlog_space_left(log, &log->l_grant_write_head) < need_bytes);
+
+ list_del_init(&tic->t_queue);
+ return 0;
+shutdown:
+ list_del_init(&tic->t_queue);
+ return XFS_ERROR(EIO);
+}
+
static void
xlog_tic_reset_res(xlog_ticket_t *tic)
{
retval = xlog_grant_log_space(log, internal_ticket);
}
+ if (unlikely(retval)) {
+ /*
+ * If we are failing, make sure the ticket doesn't have any
+ * current reservations. We don't want to add this back
+ * when the ticket/ transaction gets cancelled.
+ */
+ internal_ticket->t_curr_res = 0;
+ /* ungrant will give back unit_res * t_cnt. */
+ internal_ticket->t_cnt = 0;
+ }
+
return retval;
-} /* xfs_log_reserve */
+}
/*
/*
* Atomically get the log space required for a log ticket.
*
- * Once a ticket gets put onto the reserveq, it will only return after
- * the needed reservation is satisfied.
+ * Once a ticket gets put onto the reserveq, it will only return after the
+ * needed reservation is satisfied.
*
* This function is structured so that it has a lock free fast path. This is
* necessary because every new transaction reservation will come through this
* every pass.
*
* As tickets are only ever moved on and off the reserveq under the
- * l_grant_reserve_lock, we only need to take that lock if we are going
- * to add the ticket to the queue and sleep. We can avoid taking the lock if the
- * ticket was never added to the reserveq because the t_queue list head will be
- * empty and we hold the only reference to it so it can safely be checked
- * unlocked.
+ * l_grant_reserve_lock, we only need to take that lock if we are going to add
+ * the ticket to the queue and sleep. We can avoid taking the lock if the ticket
+ * was never added to the reserveq because the t_queue list head will be empty
+ * and we hold the only reference to it so it can safely be checked unlocked.
*/
STATIC int
-xlog_grant_log_space(xlog_t *log,
- xlog_ticket_t *tic)
+xlog_grant_log_space(
+ struct log *log,
+ struct xlog_ticket *tic)
{
- int free_bytes;
- int need_bytes;
+ int free_bytes, need_bytes;
+ int error = 0;
-#ifdef DEBUG
- if (log->l_flags & XLOG_ACTIVE_RECOVERY)
- panic("grant Recovery problem");
-#endif
+ ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY));
trace_xfs_log_grant_enter(log, tic);
+ /*
+ * If there are other waiters on the queue then give them a chance at
+ * logspace before us. Wake up the first waiters, if we do not wake
+ * up all the waiters then go to sleep waiting for more free space,
+ * otherwise try to get some space for this transaction.
+ */
need_bytes = tic->t_unit_res;
if (tic->t_flags & XFS_LOG_PERM_RESERV)
need_bytes *= tic->t_ocnt;
-
- /* something is already sleeping; insert new transaction at end */
- if (!list_empty_careful(&log->l_reserveq)) {
- spin_lock(&log->l_grant_reserve_lock);
- /* recheck the queue now we are locked */
- if (list_empty(&log->l_reserveq)) {
- spin_unlock(&log->l_grant_reserve_lock);
- goto redo;
- }
- list_add_tail(&tic->t_queue, &log->l_reserveq);
-
- trace_xfs_log_grant_sleep1(log, tic);
-
- /*
- * Gotta check this before going to sleep, while we're
- * holding the grant lock.
- */
- if (XLOG_FORCED_SHUTDOWN(log))
- goto error_return;
-
- XFS_STATS_INC(xs_sleep_logspace);
- xlog_wait(&tic->t_wait, &log->l_grant_reserve_lock);
-
- /*
- * If we got an error, and the filesystem is shutting down,
- * we'll catch it down below. So just continue...
- */
- trace_xfs_log_grant_wake1(log, tic);
- }
-
-redo:
- if (XLOG_FORCED_SHUTDOWN(log))
- goto error_return_unlocked;
-
free_bytes = xlog_space_left(log, &log->l_grant_reserve_head);
- if (free_bytes < need_bytes) {
+ if (!list_empty_careful(&log->l_reserveq)) {
spin_lock(&log->l_grant_reserve_lock);
- if (list_empty(&tic->t_queue))
- list_add_tail(&tic->t_queue, &log->l_reserveq);
-
- trace_xfs_log_grant_sleep2(log, tic);
-
- if (XLOG_FORCED_SHUTDOWN(log))
- goto error_return;
-
- xlog_grant_push_ail(log, need_bytes);
-
- XFS_STATS_INC(xs_sleep_logspace);
- xlog_wait(&tic->t_wait, &log->l_grant_reserve_lock);
-
- trace_xfs_log_grant_wake2(log, tic);
- goto redo;
- }
-
- if (!list_empty(&tic->t_queue)) {
+ if (!xlog_reserveq_wake(log, &free_bytes) ||
+ free_bytes < need_bytes)
+ error = xlog_reserveq_wait(log, tic, need_bytes);
+ spin_unlock(&log->l_grant_reserve_lock);
+ } else if (free_bytes < need_bytes) {
spin_lock(&log->l_grant_reserve_lock);
- list_del_init(&tic->t_queue);
+ error = xlog_reserveq_wait(log, tic, need_bytes);
spin_unlock(&log->l_grant_reserve_lock);
}
+ if (error)
+ return error;
- /* we've got enough space */
xlog_grant_add_space(log, &log->l_grant_reserve_head, need_bytes);
xlog_grant_add_space(log, &log->l_grant_write_head, need_bytes);
trace_xfs_log_grant_exit(log, tic);
xlog_verify_grant_tail(log);
return 0;
-
-error_return_unlocked:
- spin_lock(&log->l_grant_reserve_lock);
-error_return:
- list_del_init(&tic->t_queue);
- spin_unlock(&log->l_grant_reserve_lock);
- trace_xfs_log_grant_error(log, tic);
-
- /*
- * If we are failing, make sure the ticket doesn't have any
- * current reservations. We don't want to add this back when
- * the ticket/transaction gets cancelled.
- */
- tic->t_curr_res = 0;
- tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */
- return XFS_ERROR(EIO);
-} /* xlog_grant_log_space */
-
+}
/*
* Replenish the byte reservation required by moving the grant write head.
* free fast path.
*/
STATIC int
-xlog_regrant_write_log_space(xlog_t *log,
- xlog_ticket_t *tic)
+xlog_regrant_write_log_space(
+ struct log *log,
+ struct xlog_ticket *tic)
{
- int free_bytes, need_bytes;
+ int free_bytes, need_bytes;
+ int error = 0;
tic->t_curr_res = tic->t_unit_res;
xlog_tic_reset_res(tic);
if (tic->t_cnt > 0)
return 0;
-#ifdef DEBUG
- if (log->l_flags & XLOG_ACTIVE_RECOVERY)
- panic("regrant Recovery problem");
-#endif
+ ASSERT(!(log->l_flags & XLOG_ACTIVE_RECOVERY));
trace_xfs_log_regrant_write_enter(log, tic);
- if (XLOG_FORCED_SHUTDOWN(log))
- goto error_return_unlocked;
- /* If there are other waiters on the queue then give them a
- * chance at logspace before us. Wake up the first waiters,
- * if we do not wake up all the waiters then go to sleep waiting
- * for more free space, otherwise try to get some space for
- * this transaction.
+ /*
+ * If there are other waiters on the queue then give them a chance at
+ * logspace before us. Wake up the first waiters, if we do not wake
+ * up all the waiters then go to sleep waiting for more free space,
+ * otherwise try to get some space for this transaction.
*/
need_bytes = tic->t_unit_res;
- if (!list_empty_careful(&log->l_writeq)) {
- struct xlog_ticket *ntic;
-
- spin_lock(&log->l_grant_write_lock);
- free_bytes = xlog_space_left(log, &log->l_grant_write_head);
- list_for_each_entry(ntic, &log->l_writeq, t_queue) {
- ASSERT(ntic->t_flags & XLOG_TIC_PERM_RESERV);
-
- if (free_bytes < ntic->t_unit_res)
- break;
- free_bytes -= ntic->t_unit_res;
- wake_up(&ntic->t_wait);
- }
-
- if (ntic != list_first_entry(&log->l_writeq,
- struct xlog_ticket, t_queue)) {
- if (list_empty(&tic->t_queue))
- list_add_tail(&tic->t_queue, &log->l_writeq);
- trace_xfs_log_regrant_write_sleep1(log, tic);
-
- xlog_grant_push_ail(log, need_bytes);
-
- XFS_STATS_INC(xs_sleep_logspace);
- xlog_wait(&tic->t_wait, &log->l_grant_write_lock);
- trace_xfs_log_regrant_write_wake1(log, tic);
- } else
- spin_unlock(&log->l_grant_write_lock);
- }
-
-redo:
- if (XLOG_FORCED_SHUTDOWN(log))
- goto error_return_unlocked;
-
free_bytes = xlog_space_left(log, &log->l_grant_write_head);
- if (free_bytes < need_bytes) {
+ if (!list_empty_careful(&log->l_writeq)) {
spin_lock(&log->l_grant_write_lock);
- if (list_empty(&tic->t_queue))
- list_add_tail(&tic->t_queue, &log->l_writeq);
-
- if (XLOG_FORCED_SHUTDOWN(log))
- goto error_return;
-
- xlog_grant_push_ail(log, need_bytes);
-
- XFS_STATS_INC(xs_sleep_logspace);
- trace_xfs_log_regrant_write_sleep2(log, tic);
- xlog_wait(&tic->t_wait, &log->l_grant_write_lock);
-
- trace_xfs_log_regrant_write_wake2(log, tic);
- goto redo;
- }
-
- if (!list_empty(&tic->t_queue)) {
+ if (!xlog_writeq_wake(log, &free_bytes) ||
+ free_bytes < need_bytes)
+ error = xlog_writeq_wait(log, tic, need_bytes);
+ spin_unlock(&log->l_grant_write_lock);
+ } else if (free_bytes < need_bytes) {
spin_lock(&log->l_grant_write_lock);
- list_del_init(&tic->t_queue);
+ error = xlog_writeq_wait(log, tic, need_bytes);
spin_unlock(&log->l_grant_write_lock);
}
- /* we've got enough space */
+ if (error)
+ return error;
+
xlog_grant_add_space(log, &log->l_grant_write_head, need_bytes);
trace_xfs_log_regrant_write_exit(log, tic);
xlog_verify_grant_tail(log);
return 0;
-
-
- error_return_unlocked:
- spin_lock(&log->l_grant_write_lock);
- error_return:
- list_del_init(&tic->t_queue);
- spin_unlock(&log->l_grant_write_lock);
- trace_xfs_log_regrant_write_error(log, tic);
-
- /*
- * If we are failing, make sure the ticket doesn't have any
- * current reservations. We don't want to add this back when
- * the ticket/transaction gets cancelled.
- */
- tic->t_curr_res = 0;
- tic->t_cnt = 0; /* ungrant will give back unit_res * t_cnt. */
- return XFS_ERROR(EIO);
-} /* xlog_regrant_write_log_space */
-
+}
/* The first cnt-1 times through here we don't need to
* move the grant write head because the permanent
if (!xfs_iflock_nowait(ip)) {
if (!(sync_mode & SYNC_WAIT))
goto out;
+
+ /*
+ * If we only have a single dirty inode in a cluster there is
+ * a fair chance that the AIL push may have pushed it into
+ * the buffer, but xfsbufd won't touch it until 30 seconds
+ * from now, and thus we will lock up here.
+ *
+ * Promote the inode buffer to the front of the delwri list
+ * and wake up xfsbufd now.
+ */
+ xfs_promote_inode(ip);
xfs_iflock(ip);
}
DEFINE_LOGGRANT_EVENT(xfs_log_grant_enter);
DEFINE_LOGGRANT_EVENT(xfs_log_grant_exit);
DEFINE_LOGGRANT_EVENT(xfs_log_grant_error);
-DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep1);
-DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake1);
-DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep2);
-DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake2);
+DEFINE_LOGGRANT_EVENT(xfs_log_grant_sleep);
+DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake);
DEFINE_LOGGRANT_EVENT(xfs_log_grant_wake_up);
DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_enter);
DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_exit);
DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_error);
-DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep1);
-DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake1);
-DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep2);
-DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake2);
+DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_sleep);
+DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake);
DEFINE_LOGGRANT_EVENT(xfs_log_regrant_write_wake_up);
DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_enter);
DEFINE_LOGGRANT_EVENT(xfs_log_regrant_reserve_exit);
__SYSCALL(__NR_setns, sys_setns)
#define __NR_sendmmsg 269
__SC_COMP(__NR_sendmmsg, sys_sendmmsg, compat_sys_sendmmsg)
+#define __NR_process_vm_readv 270
+__SC_COMP(__NR_process_vm_readv, sys_process_vm_readv, \
+ compat_sys_process_vm_readv)
+#define __NR_process_vm_writev 271
+__SC_COMP(__NR_process_vm_writev, sys_process_vm_writev, \
+ compat_sys_process_vm_writev)
#undef __NR_syscalls
-#define __NR_syscalls 270
+#define __NR_syscalls 272
/*
* All syscalls below here should go away really,
{0x1002, 0x6748, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6751, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6758, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6759, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x675B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x675D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x675F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6760, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6761, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6767, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6768, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6770, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6778, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6779, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x677B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6840, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6841, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6842, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6843, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6849, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6858, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x6859, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6880, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6888, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x6889, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
{0x1002, 0x68f2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
{0x1002, 0x68f8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
{0x1002, 0x68f9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x68fa, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
{0x1002, 0x68fe, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CEDAR|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_NEW_MEMMAP}, \
{0x1002, 0x7101, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R520|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
{0x1002, 0x9648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
{0x1002, 0x964a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x964b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x964c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x964e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
{0x1002, 0x964f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
{0x1002, 0x9710, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9805, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9806, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9807, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0, 0, 0}
#define r128_PCI_IDS \
*/
extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
spinlock_t *lock, int node_id);
-extern struct request_queue *blk_init_allocated_queue_node(struct request_queue *,
- request_fn_proc *,
- spinlock_t *, int node_id);
extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
request_fn_proc *, spinlock_t *);
extern void __user *compat_alloc_user_space(unsigned long len);
+asmlinkage ssize_t compat_sys_process_vm_readv(compat_pid_t pid,
+ const struct compat_iovec __user *lvec,
+ unsigned long liovcnt, const struct compat_iovec __user *rvec,
+ unsigned long riovcnt, unsigned long flags);
+asmlinkage ssize_t compat_sys_process_vm_writev(compat_pid_t pid,
+ const struct compat_iovec __user *lvec,
+ unsigned long liovcnt, const struct compat_iovec __user *rvec,
+ unsigned long riovcnt, unsigned long flags);
+
#endif /* CONFIG_COMPAT */
#endif /* _LINUX_COMPAT_H */
*/
extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
-extern char *__d_path(const struct path *path, struct path *root, char *, int);
+extern char *__d_path(const struct path *, const struct path *, char *, int);
+extern char *d_absolute_path(const struct path *, char *, int);
extern char *d_path(const struct path *, char *, int);
extern char *d_path_with_unreachable(const struct path *, char *, int);
extern char *dentry_path_raw(struct dentry *, char *, int);
+++ /dev/null
-/*
- * Copyright (C) 2006 James Simmons <jsimmons@infradead.org>
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- */
-
-#ifndef _LINUX_DISPLAY_H
-#define _LINUX_DISPLAY_H
-
-#include <linux/device.h>
-
-struct display_device;
-
-/* This structure defines all the properties of a Display. */
-struct display_driver {
- int (*set_contrast)(struct display_device *, unsigned int);
- int (*get_contrast)(struct display_device *);
- void (*suspend)(struct display_device *, pm_message_t state);
- void (*resume)(struct display_device *);
- int (*probe)(struct display_device *, void *);
- int (*remove)(struct display_device *);
- int max_contrast;
-};
-
-struct display_device {
- struct module *owner; /* Owner module */
- struct display_driver *driver;
- struct device *parent; /* This is the parent */
- struct device *dev; /* This is this display device */
- struct mutex lock;
- void *priv_data;
- char type[16];
- char *name;
- int idx;
-};
-
-extern struct display_device *display_device_register(struct display_driver *driver,
- struct device *dev, void *devdata);
-extern void display_device_unregister(struct display_device *dev);
-
-extern int probe_edid(struct display_device *dev, void *devdata);
-
-#define to_display_device(obj) container_of(obj, struct display_device, class_dev)
-
-#endif
extern int iommu_calculate_agaw(struct intel_iommu *iommu);
extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
extern int dmar_disabled;
+extern int intel_iommu_enabled;
#else
static inline int iommu_calculate_agaw(struct intel_iommu *iommu)
{
{
}
#define dmar_disabled (1)
+#define intel_iommu_enabled (0)
#endif
#define FB_TYPE_INTERLEAVED_PLANES 2 /* Interleaved planes */
#define FB_TYPE_TEXT 3 /* Text/attributes */
#define FB_TYPE_VGA_PLANES 4 /* EGA/VGA planes */
+#define FB_TYPE_FOURCC 5 /* Type identified by a V4L2 FOURCC */
#define FB_AUX_TEXT_MDA 0 /* Monochrome text */
#define FB_AUX_TEXT_CGA 1 /* CGA/EGA/VGA Color text */
#define FB_VISUAL_PSEUDOCOLOR 3 /* Pseudo color (like atari) */
#define FB_VISUAL_DIRECTCOLOR 4 /* Direct color */
#define FB_VISUAL_STATIC_PSEUDOCOLOR 5 /* Pseudo color readonly */
+#define FB_VISUAL_FOURCC 6 /* Visual identified by a V4L2 FOURCC */
#define FB_ACCEL_NONE 0 /* no hardware accelerator */
#define FB_ACCEL_ATARIBLITT 1 /* Atari Blitter */
#define FB_ACCEL_PUV3_UNIGFX 0xa0 /* PKUnity-v3 Unigfx */
+#define FB_CAP_FOURCC 1 /* Device supports FOURCC-based formats */
+
struct fb_fix_screeninfo {
char id[16]; /* identification string eg "TT Builtin" */
unsigned long smem_start; /* Start of frame buffer mem */
__u32 mmio_len; /* Length of Memory Mapped I/O */
__u32 accel; /* Indicate to driver which */
/* specific chip/card we have */
- __u16 reserved[3]; /* Reserved for future compatibility */
+ __u16 capabilities; /* see FB_CAP_* */
+ __u16 reserved[2]; /* Reserved for future compatibility */
};
/* Interpretation of offset for color fields: All offsets are from the right,
__u32 yoffset; /* resolution */
__u32 bits_per_pixel; /* guess what */
- __u32 grayscale; /* != 0 Graylevels instead of colors */
-
+ __u32 grayscale; /* 0 = color, 1 = grayscale, */
+ /* >1 = FOURCC */
struct fb_bitfield red; /* bitfield in fb mem if true color, */
struct fb_bitfield green; /* else only length is significant */
struct fb_bitfield blue;
__u32 sync; /* see FB_SYNC_* */
__u32 vmode; /* see FB_VMODE_* */
__u32 rotate; /* angle we rotate counter clockwise */
- __u32 reserved[5]; /* Reserved for future compatibility */
+ __u32 colorspace; /* colorspace for FOURCC-based modes */
+ __u32 reserved[4]; /* Reserved for future compatibility */
};
struct fb_cmap {
#include <linux/semaphore.h>
#include <linux/fiemap.h>
#include <linux/rculist_bl.h>
-#include <linux/shrinker.h>
#include <linux/atomic.h>
+#include <linux/shrinker.h>
#include <asm/byteorder.h>
extern int statfs_by_dentry(struct dentry *, struct kstatfs *);
extern int freeze_super(struct super_block *super);
extern int thaw_super(struct super_block *super);
+extern bool our_mnt(struct vfsmount *mnt);
extern int current_umask(void);
TRACE_EVENT_FL_FILTERED_BIT,
TRACE_EVENT_FL_RECORDED_CMD_BIT,
TRACE_EVENT_FL_CAP_ANY_BIT,
+ TRACE_EVENT_FL_NO_SET_FILTER_BIT,
};
enum {
TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
TRACE_EVENT_FL_RECORDED_CMD = (1 << TRACE_EVENT_FL_RECORDED_CMD_BIT),
TRACE_EVENT_FL_CAP_ANY = (1 << TRACE_EVENT_FL_CAP_ANY_BIT),
+ TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT),
};
struct ftrace_event_call {
# define INIT_PERF_EVENTS(tsk)
#endif
+#define INIT_TASK_COMM "swapper"
+
/*
* INIT_TASK is used to set up the first task table, touch at
* your own risk!. Base=0, limit=0x1fffff (=2MB)
.group_leader = &tsk, \
RCU_INIT_POINTER(.real_cred, &init_cred), \
RCU_INIT_POINTER(.cred, &init_cred), \
- .comm = "swapper", \
+ .comm = INIT_TASK_COMM, \
.thread = INIT_THREAD, \
.fs = &init_fs, \
.files = &init_files, \
#define rounddown_pow_of_two(n) \
( \
__builtin_constant_p(n) ? ( \
- (n == 1) ? 0 : \
(1UL << ilog2(n))) : \
__rounddown_pow_of_two(n) \
)
#include <linux/mmzone.h>
#include <linux/rbtree.h>
#include <linux/prio_tree.h>
+#include <linux/atomic.h>
#include <linux/debug_locks.h>
#include <linux/mm_types.h>
#include <linux/range.h>
#define MMC_QUIRK_INAND_CMD38 (1<<6) /* iNAND devices have broken CMD38 */
#define MMC_QUIRK_BLK_NO_CMD23 (1<<7) /* Avoid CMD23 for regular multiblock */
#define MMC_QUIRK_BROKEN_BYTE_MODE_512 (1<<8) /* Avoid sending 512 bytes in */
+#define MMC_QUIRK_LONG_READ_TIME (1<<9) /* Data read time > CSD says */
/* byte mode */
unsigned int poweroff_notify_state; /* eMMC4.5 notify feature */
#define MMC_NO_POWER_NOTIFICATION 0
return c->quirks & MMC_QUIRK_BROKEN_BYTE_MODE_512;
}
+static inline int mmc_card_long_read_time(const struct mmc_card *c)
+{
+ return c->quirks & MMC_QUIRK_LONG_READ_TIME;
+}
+
#define mmc_card_name(c) ((c)->cid.prod_name)
#define mmc_card_id(c) (dev_name(&(c)->dev))
extern void *dev_seq_start(struct seq_file *seq, loff_t *pos);
extern void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos);
extern void dev_seq_stop(struct seq_file *seq, void *v);
+extern int dev_seq_open_ops(struct inode *inode, struct file *file,
+ const struct seq_operations *ops);
#endif
extern int netdev_class_create_file(struct class_attribute *class_attr);
#define PCI_DEVICE_ID_AMD_11H_NB_DRAM 0x1302
#define PCI_DEVICE_ID_AMD_11H_NB_MISC 0x1303
#define PCI_DEVICE_ID_AMD_11H_NB_LINK 0x1304
+#define PCI_DEVICE_ID_AMD_15H_NB_F0 0x1600
+#define PCI_DEVICE_ID_AMD_15H_NB_F1 0x1601
+#define PCI_DEVICE_ID_AMD_15H_NB_F2 0x1602
#define PCI_DEVICE_ID_AMD_15H_NB_F3 0x1603
#define PCI_DEVICE_ID_AMD_15H_NB_F4 0x1604
+#define PCI_DEVICE_ID_AMD_15H_NB_F5 0x1605
#define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703
#define PCI_DEVICE_ID_AMD_LANCE 0x2000
#define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001
int mmap_locked;
struct user_struct *mmap_user;
struct ring_buffer *rb;
+ struct list_head rb_entry;
/* poll related */
wait_queue_head_t waitq;
*/
struct tc_stats {
- __u64 bytes; /* NUmber of enqueues bytes */
+ __u64 bytes; /* Number of enqueued bytes */
__u32 packets; /* Number of enqueued packets */
__u32 drops; /* Packets dropped because of lack of resources */
__u32 overlimits; /* Number of throttle events when this
__u32 debug; /* debug flags */
/* stats */
- __u32 direct_pkts; /* count of non shapped packets */
+ __u32 direct_pkts; /* count of non shaped packets */
};
enum {
TCA_HTB_UNSPEC,
};
#define NETEM_LOSS_MAX (__NETEM_LOSS_MAX - 1)
-/* State transition probablities for 4 state model */
+/* State transition probabilities for 4 state model */
struct tc_netem_gimodel {
__u32 p13;
__u32 p31;
/* These are for internal use */
struct list_head list;
- long nr; /* objs pending delete */
+ atomic_long_t nr_in_batch; /* objs pending delete */
};
#define DEFAULT_SEEKS 2 /* A good number if you don't know better. */
extern void register_shrinker(struct shrinker *);
struct sigma_firmware_header {
unsigned char magic[7];
u8 version;
- u32 crc;
+ __le32 crc;
};
enum {
struct sigma_action {
u8 instr;
u8 len_hi;
- u16 len;
- u16 addr;
+ __le16 len;
+ __be16 addr;
unsigned char payload[];
};
static inline u32 sigma_action_len(struct sigma_action *sa)
{
- return (sa->len_hi << 16) | sa->len;
-}
-
-static inline size_t sigma_action_size(struct sigma_action *sa, u32 payload_len)
-{
- return sizeof(*sa) + payload_len + (payload_len % 2);
+ return (sa->len_hi << 16) | le16_to_cpu(sa->len);
}
extern int process_sigma_firmware(struct i2c_client *client, const char *name);
#define V4L2_PIX_FMT_NV21 v4l2_fourcc('N', 'V', '2', '1') /* 12 Y/CrCb 4:2:0 */
#define V4L2_PIX_FMT_NV16 v4l2_fourcc('N', 'V', '1', '6') /* 16 Y/CbCr 4:2:2 */
#define V4L2_PIX_FMT_NV61 v4l2_fourcc('N', 'V', '6', '1') /* 16 Y/CrCb 4:2:2 */
+#define V4L2_PIX_FMT_NV24 v4l2_fourcc('N', 'V', '2', '4') /* 24 Y/CbCr 4:4:4 */
+#define V4L2_PIX_FMT_NV42 v4l2_fourcc('N', 'V', '4', '2') /* 24 Y/CrCb 4:4:4 */
/* two non contiguous planes - one Y, one Cr + Cb interleaved */
#define V4L2_PIX_FMT_NV12M v4l2_fourcc('N', 'M', '1', '2') /* 12 Y/CbCr 4:2:0 */
#define ZORRO_PROD_VILLAGE_TRONIC_PICASSO_II_II_PLUS_RAM ZORRO_ID(VILLAGE_TRONIC, 0x0B, 0)
#define ZORRO_PROD_VILLAGE_TRONIC_PICASSO_II_II_PLUS_REG ZORRO_ID(VILLAGE_TRONIC, 0x0C, 0)
#define ZORRO_PROD_VILLAGE_TRONIC_PICASSO_II_II_PLUS_SEGMENTED_MODE ZORRO_ID(VILLAGE_TRONIC, 0x0D, 0)
-#define ZORRO_PROD_VILLAGE_TRONIC_PICASSO_IV_Z2_MEM1 ZORRO_ID(VILLAGE_TRONIC, 0x15, 0)
-#define ZORRO_PROD_VILLAGE_TRONIC_PICASSO_IV_Z2_MEM2 ZORRO_ID(VILLAGE_TRONIC, 0x16, 0)
+#define ZORRO_PROD_VILLAGE_TRONIC_PICASSO_IV_Z2_RAM1 ZORRO_ID(VILLAGE_TRONIC, 0x15, 0)
+#define ZORRO_PROD_VILLAGE_TRONIC_PICASSO_IV_Z2_RAM2 ZORRO_ID(VILLAGE_TRONIC, 0x16, 0)
#define ZORRO_PROD_VILLAGE_TRONIC_PICASSO_IV_Z2_REG ZORRO_ID(VILLAGE_TRONIC, 0x17, 0)
#define ZORRO_PROD_VILLAGE_TRONIC_PICASSO_IV_Z3 ZORRO_ID(VILLAGE_TRONIC, 0x18, 0)
#define ZORRO_PROD_VILLAGE_TRONIC_ARIADNE ZORRO_ID(VILLAGE_TRONIC, 0xC9, 0)
static inline u32 dst_mtu(const struct dst_entry *dst)
{
- u32 mtu = dst_metric_raw(dst, RTAX_MTU);
-
- if (!mtu)
- mtu = dst->ops->default_mtu(dst);
-
- return mtu;
+ return dst->ops->mtu(dst);
}
/* RTT metrics are stored in milliseconds for user ABI, but used as jiffies */
int (*gc)(struct dst_ops *ops);
struct dst_entry * (*check)(struct dst_entry *, __u32 cookie);
unsigned int (*default_advmss)(const struct dst_entry *);
- unsigned int (*default_mtu)(const struct dst_entry *);
+ unsigned int (*mtu)(const struct dst_entry *);
u32 * (*cow_metrics)(struct dst_entry *, unsigned long);
void (*destroy)(struct dst_entry *);
void (*ifdown)(struct dst_entry *,
/** struct ip_options - IP Options
*
* @faddr - Saved first hop address
+ * @nexthop - Saved nexthop address in LSRR and SSRR
* @is_data - Options in __data, rather than skb
* @is_strictroute - Strict source route
* @srr_is_hit - Packet destination addr was our one
*/
struct ip_options {
__be32 faddr;
+ __be32 nexthop;
unsigned char optlen;
unsigned char srr;
unsigned char rr;
u32 metrics[RTAX_MAX];
u32 rate_tokens; /* rate limiting for ICMP */
+ int redirect_genid;
unsigned long rate_last;
unsigned long pmtu_expires;
u32 pmtu_orig;
int (*fcn)(unsigned int events, struct nf_ct_event *item);
};
-extern struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb;
-extern int nf_conntrack_register_notifier(struct nf_ct_event_notifier *nb);
-extern void nf_conntrack_unregister_notifier(struct nf_ct_event_notifier *nb);
+extern int nf_conntrack_register_notifier(struct net *net, struct nf_ct_event_notifier *nb);
+extern void nf_conntrack_unregister_notifier(struct net *net, struct nf_ct_event_notifier *nb);
extern void nf_ct_deliver_cached_events(struct nf_conn *ct);
static inline void
nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct)
{
+ struct net *net = nf_ct_net(ct);
struct nf_conntrack_ecache *e;
- if (nf_conntrack_event_cb == NULL)
+ if (net->ct.nf_conntrack_event_cb == NULL)
return;
e = nf_ct_ecache_find(ct);
int report)
{
int ret = 0;
+ struct net *net = nf_ct_net(ct);
struct nf_ct_event_notifier *notify;
struct nf_conntrack_ecache *e;
rcu_read_lock();
- notify = rcu_dereference(nf_conntrack_event_cb);
+ notify = rcu_dereference(net->ct.nf_conntrack_event_cb);
if (notify == NULL)
goto out_unlock;
int (*fcn)(unsigned int events, struct nf_exp_event *item);
};
-extern struct nf_exp_event_notifier __rcu *nf_expect_event_cb;
-extern int nf_ct_expect_register_notifier(struct nf_exp_event_notifier *nb);
-extern void nf_ct_expect_unregister_notifier(struct nf_exp_event_notifier *nb);
+extern int nf_ct_expect_register_notifier(struct net *net, struct nf_exp_event_notifier *nb);
+extern void nf_ct_expect_unregister_notifier(struct net *net, struct nf_exp_event_notifier *nb);
static inline void
nf_ct_expect_event_report(enum ip_conntrack_expect_events event,
u32 pid,
int report)
{
+ struct net *net = nf_ct_exp_net(exp);
struct nf_exp_event_notifier *notify;
struct nf_conntrack_ecache *e;
rcu_read_lock();
- notify = rcu_dereference(nf_expect_event_cb);
+ notify = rcu_dereference(net->ct.nf_expect_event_cb);
if (notify == NULL)
goto out_unlock;
struct hlist_nulls_head unconfirmed;
struct hlist_nulls_head dying;
struct ip_conntrack_stat __percpu *stat;
+ struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb;
+ struct nf_exp_event_notifier __rcu *nf_expect_event_cb;
int sysctl_events;
unsigned int sysctl_events_retry_timeout;
int sysctl_acct;
u32 qR; /* Cached random number */
unsigned long qavg; /* Average queue length: A scaled */
- psched_time_t qidlestart; /* Start of current idle period */
+ ktime_t qidlestart; /* Start of current idle period */
};
static inline u32 red_rmask(u8 Plog)
static inline int red_is_idling(struct red_parms *p)
{
- return p->qidlestart != PSCHED_PASTPERFECT;
+ return p->qidlestart.tv64 != 0;
}
static inline void red_start_of_idle_period(struct red_parms *p)
{
- p->qidlestart = psched_get_time();
+ p->qidlestart = ktime_get();
}
static inline void red_end_of_idle_period(struct red_parms *p)
{
- p->qidlestart = PSCHED_PASTPERFECT;
+ p->qidlestart.tv64 = 0;
}
static inline void red_restart(struct red_parms *p)
static inline unsigned long red_calc_qavg_from_idle_time(struct red_parms *p)
{
- psched_time_t now;
- long us_idle;
+ s64 delta = ktime_us_delta(ktime_get(), p->qidlestart);
+ long us_idle = min_t(s64, delta, p->Scell_max);
int shift;
- now = psched_get_time();
- us_idle = psched_tdiff_bounded(now, p->qidlestart, p->Scell_max);
-
/*
* The problem: ideally, average length queue recalcultion should
* be done over constant clock intervals. This is too expensive, so
struct fib_info *fi; /* for client ref to shared metrics */
};
-static inline bool rt_is_input_route(struct rtable *rt)
+static inline bool rt_is_input_route(const struct rtable *rt)
{
return rt->rt_route_iif != 0;
}
-static inline bool rt_is_output_route(struct rtable *rt)
+static inline bool rt_is_output_route(const struct rtable *rt)
{
return rt->rt_route_iif == 0;
}
SCF_SCSI_NON_DATA_CDB = 0x00000040,
SCF_SCSI_CDB_EXCEPTION = 0x00000080,
SCF_SCSI_RESERVATION_CONFLICT = 0x00000100,
- SCF_SE_CMD_FAILED = 0x00000400,
+ SCF_FUA = 0x00000200,
SCF_SE_LUN_CMD = 0x00000800,
SCF_SE_ALLOW_EOO = 0x00001000,
+ SCF_BIDI = 0x00002000,
SCF_SENT_CHECK_CONDITION = 0x00004000,
SCF_OVERFLOW_BIT = 0x00008000,
SCF_UNDERFLOW_BIT = 0x00010000,
TCM_CHECK_CONDITION_ABORT_CMD = 0x0d,
TCM_CHECK_CONDITION_UNIT_ATTENTION = 0x0e,
TCM_CHECK_CONDITION_NOT_READY = 0x0f,
+ TCM_RESERVATION_CONFLICT = 0x10,
};
struct se_obj {
u16 lu_gp_id;
int lu_gp_valid_id;
u32 lu_gp_members;
- atomic_t lu_gp_shutdown;
atomic_t lu_gp_ref_cnt;
spinlock_t lu_gp_lock;
struct config_group lu_gp_group;
int sam_task_attr;
/* Transport protocol dependent state, see transport_state_table */
enum transport_state_table t_state;
- /* Transport specific error status */
- int transport_error_status;
/* Used to signal cmd->se_tfo->check_release_cmd() usage per cmd */
- int check_release:1;
- int cmd_wait_set:1;
+ unsigned check_release:1;
+ unsigned cmd_wait_set:1;
/* See se_cmd_flags_table */
u32 se_cmd_flags;
u32 se_ordered_id;
/* Used for sense data */
void *sense_buffer;
struct list_head se_delayed_node;
- struct list_head se_ordered_node;
struct list_head se_lun_node;
struct list_head se_qf_node;
struct se_device *se_dev;
struct se_dev_entry *se_deve;
- struct se_device *se_obj_ptr;
- struct se_device *se_orig_obj_ptr;
struct se_lun *se_lun;
/* Only used for internal passthrough and legacy TCM fabric modules */
struct se_session *se_sess;
unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE];
unsigned long long t_task_lba;
int t_tasks_failed;
- int t_tasks_fua;
- bool t_tasks_bidi;
u32 t_tasks_sg_chained_no;
atomic_t t_fe_count;
atomic_t t_se_count;
struct work_struct work;
- /*
- * Used for pre-registered fabric SGL passthrough WRITE and READ
- * with the special SCF_PASSTHROUGH_CONTIG_TO_SG case for TCM_Loop
- * and other HW target mode fabric modules.
- */
- struct scatterlist *t_task_pt_sgl;
- u32 t_task_pt_sgl_num;
-
struct scatterlist *t_data_sg;
unsigned int t_data_nents;
struct scatterlist *t_bidi_data_sg;
} ____cacheline_aligned;
struct se_session {
- int sess_tearing_down:1;
+ unsigned sess_tearing_down:1;
u64 sess_bin_isid;
struct se_node_acl *se_node_acl;
struct se_portal_group *se_tpg;
struct t10_reservation t10_pr;
spinlock_t se_dev_lock;
void *se_dev_su_ptr;
- struct list_head se_dev_node;
struct config_group se_dev_group;
/* For T10 Reservations */
struct config_group se_dev_pr_group;
} ____cacheline_aligned;
struct se_device {
- /* Set to 1 if thread is NOT sleeping on thread_sem */
- u8 thread_active;
- u8 dev_status_timer_flags;
/* RELATIVE TARGET PORT IDENTIFER Counter */
u16 dev_rpti_counter;
/* Used for SAM Task Attribute ordering */
u64 write_bytes;
spinlock_t stats_lock;
/* Active commands on this virtual SE device */
- atomic_t active_cmds;
atomic_t simple_cmds;
atomic_t depth_left;
atomic_t dev_ordered_id;
- atomic_t dev_tur_active;
atomic_t execute_tasks;
- atomic_t dev_status_thr_count;
- atomic_t dev_hoq_count;
atomic_t dev_ordered_sync;
atomic_t dev_qf_count;
struct se_obj dev_obj;
struct se_obj dev_export_obj;
struct se_queue_obj dev_queue_obj;
spinlock_t delayed_cmd_lock;
- spinlock_t ordered_cmd_lock;
spinlock_t execute_task_lock;
- spinlock_t state_task_lock;
- spinlock_t dev_alua_lock;
spinlock_t dev_reservation_lock;
- spinlock_t dev_state_lock;
spinlock_t dev_status_lock;
- spinlock_t dev_status_thr_lock;
spinlock_t se_port_lock;
spinlock_t se_tmr_lock;
spinlock_t qf_cmd_lock;
struct t10_pr_registration *dev_pr_res_holder;
struct list_head dev_sep_list;
struct list_head dev_tmr_list;
- struct timer_list dev_status_timer;
/* Pointer to descriptor for processing thread */
struct task_struct *process_thread;
- pid_t process_thread_pid;
- struct task_struct *dev_mgmt_thread;
struct work_struct qf_work_queue;
struct list_head delayed_cmd_list;
- struct list_head ordered_cmd_list;
struct list_head execute_task_list;
struct list_head state_task_list;
struct list_head qf_cmd_list;
struct se_subsystem_api *transport;
/* Linked list for struct se_hba struct se_device list */
struct list_head dev_list;
- /* Linked list for struct se_global->g_se_dev_list */
- struct list_head g_se_dev_list;
} ____cacheline_aligned;
struct se_hba {
u32 sep_index;
struct scsi_port_stats sep_stats;
/* Used for ALUA Target Port Groups membership */
- atomic_t sep_tg_pt_gp_active;
atomic_t sep_tg_pt_secondary_offline;
/* Used for PR ALL_TG_PT=1 */
atomic_t sep_tg_pt_ref_cnt;
#define PYX_TRANSPORT_STATUS_INTERVAL 5 /* In seconds */
-#define PYX_TRANSPORT_SENT_TO_TRANSPORT 0
-#define PYX_TRANSPORT_WRITE_PENDING 1
-
-#define PYX_TRANSPORT_UNKNOWN_SAM_OPCODE -1
-#define PYX_TRANSPORT_HBA_QUEUE_FULL -2
-#define PYX_TRANSPORT_REQ_TOO_MANY_SECTORS -3
-#define PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES -4
-#define PYX_TRANSPORT_INVALID_CDB_FIELD -5
-#define PYX_TRANSPORT_INVALID_PARAMETER_LIST -6
-#define PYX_TRANSPORT_LU_COMM_FAILURE -7
-#define PYX_TRANSPORT_UNKNOWN_MODE_PAGE -8
-#define PYX_TRANSPORT_WRITE_PROTECTED -9
-#define PYX_TRANSPORT_RESERVATION_CONFLICT -10
-#define PYX_TRANSPORT_ILLEGAL_REQUEST -11
-#define PYX_TRANSPORT_USE_SENSE_REASON -12
-
-#ifndef SAM_STAT_RESERVATION_CONFLICT
-#define SAM_STAT_RESERVATION_CONFLICT 0x18
-#endif
-
-#define TRANSPORT_PLUGIN_FREE 0
-#define TRANSPORT_PLUGIN_REGISTERED 1
-
#define TRANSPORT_PLUGIN_PHBA_PDEV 1
#define TRANSPORT_PLUGIN_VHBA_PDEV 2
#define TRANSPORT_PLUGIN_VHBA_VDEV 3
extern int transport_handle_cdb_direct(struct se_cmd *);
extern int transport_generic_handle_cdb_map(struct se_cmd *);
extern int transport_generic_handle_data(struct se_cmd *);
-extern void transport_new_cmd_failure(struct se_cmd *);
extern int transport_generic_handle_tmr(struct se_cmd *);
extern bool target_stop_task(struct se_task *task, unsigned long *flags);
extern int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *, u32,
struct sh_mobile_lcdc_chan_cfg;
#define SH_MIPI_DSI_HSABM (1 << 0)
-#define SH_MIPI_DSI_HSPBM (1 << 1)
+#define SH_MIPI_DSI_HBPBM (1 << 1)
+#define SH_MIPI_DSI_HFPBM (1 << 2)
+#define SH_MIPI_DSI_BL2E (1 << 3)
+#define SH_MIPI_DSI_VSEE (1 << 4)
+#define SH_MIPI_DSI_HSEE (1 << 5)
+#define SH_MIPI_DSI_HSAE (1 << 6)
+
+#define SH_MIPI_DSI_HSbyteCLK (1 << 24)
+#define SH_MIPI_DSI_HS6divCLK (1 << 25)
+#define SH_MIPI_DSI_HS4divCLK (1 << 26)
+
+#define SH_MIPI_DSI_SYNC_PULSES_MODE (SH_MIPI_DSI_VSEE | \
+ SH_MIPI_DSI_HSEE | \
+ SH_MIPI_DSI_HSAE)
+#define SH_MIPI_DSI_SYNC_EVENTS_MODE (0)
+#define SH_MIPI_DSI_SYNC_BURST_MODE (SH_MIPI_DSI_BL2E)
struct sh_mipi_dsi_info {
enum sh_mipi_dsi_data_fmt data_format;
struct sh_mobile_lcdc_chan_cfg *lcd_chan;
+ int lane;
unsigned long flags;
u32 clksrc;
unsigned int vsynw_offset;
+ int (*set_dot_clock)(struct platform_device *pdev,
+ void __iomem *base,
+ int enable);
};
#endif
struct sh_mobile_lcdc_chan_cfg {
int chan;
- int bpp;
+ int fourcc;
+ int colorspace;
int interface_type; /* selects RGBn or SYSn I/F, see above */
int clock_divider;
unsigned long flags; /* LCDC_FLAGS_... */
struct sh_mobile_lcdc_board_cfg board_cfg;
struct sh_mobile_lcdc_bl_info bl_info;
struct sh_mobile_lcdc_sys_bus_cfg sys_bus_cfg; /* only for SYSn I/F */
- int nonstd;
struct sh_mobile_meram_cfg *meram_cfg;
};
void mq_put_mnt(struct ipc_namespace *ns)
{
- mntput(ns->mq_mnt);
+ kern_unmount(ns->mq_mnt);
}
static int __init init_mqueue_fs(void)
spin_lock_init(&mq_lock);
- init_ipc_ns.mq_mnt = kern_mount_data(&mqueue_fs_type, &init_ipc_ns);
- if (IS_ERR(init_ipc_ns.mq_mnt)) {
- error = PTR_ERR(init_ipc_ns.mq_mnt);
+ error = mq_init_ns(&init_ipc_ns);
+ if (error)
goto out_filesystem;
- }
return 0;
*/
struct ipc_namespace init_ipc_ns = {
.count = ATOMIC_INIT(1),
-#ifdef CONFIG_POSIX_MQUEUE
- .mq_queues_max = DFLT_QUEUESMAX,
- .mq_msg_max = DFLT_MSGMAX,
- .mq_msgsize_max = DFLT_MSGSIZEMAX,
-#endif
.user_ns = &init_user_ns,
};
static void update_context_time(struct perf_event_context *ctx);
static u64 perf_event_time(struct perf_event *event);
+static void ring_buffer_attach(struct perf_event *event,
+ struct ring_buffer *rb);
+
void __weak perf_event_print_debug(void) { }
extern __weak const char *perf_pmu_name(void)
*/
cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
- perf_event_sched_in(cpuctx, ctx, task);
+ if (ctx->nr_events)
+ cpuctx->task_ctx = ctx;
- cpuctx->task_ctx = ctx;
+ perf_event_sched_in(cpuctx, cpuctx->task_ctx, task);
perf_pmu_enable(ctx->pmu);
perf_ctx_unlock(cpuctx, ctx);
struct ring_buffer *rb;
unsigned int events = POLL_HUP;
+ /*
+ * Race between perf_event_set_output() and perf_poll(): perf_poll()
+ * grabs the rb reference but perf_event_set_output() overrides it.
+ * Here is the timeline for two threads T1, T2:
+ * t0: T1, rb = rcu_dereference(event->rb)
+ * t1: T2, old_rb = event->rb
+ * t2: T2, event->rb = new rb
+ * t3: T2, ring_buffer_detach(old_rb)
+ * t4: T1, ring_buffer_attach(rb1)
+ * t5: T1, poll_wait(event->waitq)
+ *
+ * To avoid this problem, we grab mmap_mutex in perf_poll()
+ * thereby ensuring that the assignment of the new ring buffer
+ * and the detachment of the old buffer appear atomic to perf_poll()
+ */
+ mutex_lock(&event->mmap_mutex);
+
rcu_read_lock();
rb = rcu_dereference(event->rb);
- if (rb)
+ if (rb) {
+ ring_buffer_attach(event, rb);
events = atomic_xchg(&rb->poll, 0);
+ }
rcu_read_unlock();
+ mutex_unlock(&event->mmap_mutex);
+
poll_wait(file, &event->waitq, wait);
return events;
return ret;
}
+static void ring_buffer_attach(struct perf_event *event,
+ struct ring_buffer *rb)
+{
+ unsigned long flags;
+
+ if (!list_empty(&event->rb_entry))
+ return;
+
+ spin_lock_irqsave(&rb->event_lock, flags);
+ if (!list_empty(&event->rb_entry))
+ goto unlock;
+
+ list_add(&event->rb_entry, &rb->event_list);
+unlock:
+ spin_unlock_irqrestore(&rb->event_lock, flags);
+}
+
+static void ring_buffer_detach(struct perf_event *event,
+ struct ring_buffer *rb)
+{
+ unsigned long flags;
+
+ if (list_empty(&event->rb_entry))
+ return;
+
+ spin_lock_irqsave(&rb->event_lock, flags);
+ list_del_init(&event->rb_entry);
+ wake_up_all(&event->waitq);
+ spin_unlock_irqrestore(&rb->event_lock, flags);
+}
+
+static void ring_buffer_wakeup(struct perf_event *event)
+{
+ struct ring_buffer *rb;
+
+ rcu_read_lock();
+ rb = rcu_dereference(event->rb);
+ list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
+ wake_up_all(&event->waitq);
+ }
+ rcu_read_unlock();
+}
+
static void rb_free_rcu(struct rcu_head *rcu_head)
{
struct ring_buffer *rb;
static void ring_buffer_put(struct ring_buffer *rb)
{
+ struct perf_event *event, *n;
+ unsigned long flags;
+
if (!atomic_dec_and_test(&rb->refcount))
return;
+ spin_lock_irqsave(&rb->event_lock, flags);
+ list_for_each_entry_safe(event, n, &rb->event_list, rb_entry) {
+ list_del_init(&event->rb_entry);
+ wake_up_all(&event->waitq);
+ }
+ spin_unlock_irqrestore(&rb->event_lock, flags);
+
call_rcu(&rb->rcu_head, rb_free_rcu);
}
atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
vma->vm_mm->pinned_vm -= event->mmap_locked;
rcu_assign_pointer(event->rb, NULL);
+ ring_buffer_detach(event, rb);
mutex_unlock(&event->mmap_mutex);
ring_buffer_put(rb);
void perf_event_wakeup(struct perf_event *event)
{
- wake_up_all(&event->waitq);
+ ring_buffer_wakeup(event);
if (event->pending_kill) {
kill_fasync(&event->fasync, SIGIO, event->pending_kill);
INIT_LIST_HEAD(&event->group_entry);
INIT_LIST_HEAD(&event->event_entry);
INIT_LIST_HEAD(&event->sibling_list);
+ INIT_LIST_HEAD(&event->rb_entry);
+
init_waitqueue_head(&event->waitq);
init_irq_work(&event->pending, perf_pending_event);
old_rb = event->rb;
rcu_assign_pointer(event->rb, rb);
+ if (old_rb)
+ ring_buffer_detach(event, old_rb);
ret = 0;
unlock:
mutex_unlock(&event->mmap_mutex);
local_t lost; /* nr records lost */
long watermark; /* wakeup watermark */
+ /* poll crap */
+ spinlock_t event_lock;
+ struct list_head event_list;
struct perf_event_mmap_page *user_page;
void *data_pages[0];
rb->writable = 1;
atomic_set(&rb->refcount, 1);
+
+ INIT_LIST_HEAD(&rb->event_list);
+ spin_lock_init(&rb->event_lock);
}
#ifndef CONFIG_PERF_USE_VMALLOC
static int irq_wait_for_interrupt(struct irqaction *action)
{
+ set_current_state(TASK_INTERRUPTIBLE);
+
while (!kthread_should_stop()) {
- set_current_state(TASK_INTERRUPTIBLE);
if (test_and_clear_bit(IRQTF_RUNTHREAD,
&action->thread_flags)) {
return 0;
}
schedule();
+ set_current_state(TASK_INTERRUPTIBLE);
}
+ __set_current_state(TASK_RUNNING);
return -1;
}
return;
jump_label_lock();
- if (atomic_add_return(1, &key->enabled) == 1)
+ if (atomic_read(&key->enabled) == 0)
jump_label_update(key, JUMP_LABEL_ENABLE);
+ atomic_inc(&key->enabled);
jump_label_unlock();
}
#include <linux/stringify.h>
#include <linux/bitops.h>
#include <linux/gfp.h>
+#include <linux/kmemcheck.h>
#include <asm/sections.h>
void lockdep_init_map(struct lockdep_map *lock, const char *name,
struct lock_class_key *key, int subclass)
{
- memset(lock, 0, sizeof(*lock));
+ int i;
+
+ kmemcheck_mark_initialized(lock, sizeof(*lock));
+
+ for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
+ lock->class_cache[i] = NULL;
#ifdef CONFIG_LOCK_STAT
lock->cpu = raw_smp_processor_id();
raw_spin_lock(&logbuf_lock);
if (con_start != log_end)
retry = 1;
+ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+
if (retry && console_trylock())
goto again;
- raw_spin_unlock_irqrestore(&logbuf_lock, flags);
if (wake_klogd)
wake_up_klogd();
}
#include <linux/ctype.h>
#include <linux/ftrace.h>
#include <linux/slab.h>
+#include <linux/init_task.h>
#include <asm/tlb.h>
#include <asm/irq_regs.h>
* This waits for either a completion of a specific task to be signaled or for a
* specified timeout to expire. The timeout is in jiffies. It is not
* interruptible.
+ *
+ * The return value is 0 if timed out, and positive (at least 1, or number of
+ * jiffies left till timeout) if completed.
*/
unsigned long __sched
wait_for_completion_timeout(struct completion *x, unsigned long timeout)
*
* This waits for completion of a specific task to be signaled. It is
* interruptible.
+ *
+ * The return value is -ERESTARTSYS if interrupted, 0 if completed.
*/
int __sched wait_for_completion_interruptible(struct completion *x)
{
*
* This waits for either a completion of a specific task to be signaled or for a
* specified timeout to expire. It is interruptible. The timeout is in jiffies.
+ *
+ * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
+ * positive (at least 1, or number of jiffies left till timeout) if completed.
*/
long __sched
wait_for_completion_interruptible_timeout(struct completion *x,
*
* This waits to be signaled for completion of a specific task. It can be
* interrupted by a kill signal.
+ *
+ * The return value is -ERESTARTSYS if interrupted, 0 if completed.
*/
int __sched wait_for_completion_killable(struct completion *x)
{
* This waits for either a completion of a specific task to be
* signaled or for a specified timeout to expire. It can be
* interrupted by a kill signal. The timeout is in jiffies.
+ *
+ * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
+ * positive (at least 1, or number of jiffies left till timeout) if completed.
*/
long __sched
wait_for_completion_killable_timeout(struct completion *x,
*/
idle->sched_class = &idle_sched_class;
ftrace_graph_init_idle_task(idle, cpu);
+#if defined(CONFIG_SMP)
+ sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu);
+#endif
}
/*
list_del_leaf_cfs_rq(cfs_rq);
}
+static inline long calc_tg_weight(struct task_group *tg, struct cfs_rq *cfs_rq)
+{
+ long tg_weight;
+
+ /*
+ * Use this CPU's actual weight instead of the last load_contribution
+ * to gain a more accurate current total weight. See
+ * update_cfs_rq_load_contribution().
+ */
+ tg_weight = atomic_read(&tg->load_weight);
+ tg_weight -= cfs_rq->load_contribution;
+ tg_weight += cfs_rq->load.weight;
+
+ return tg_weight;
+}
+
static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)
{
- long load_weight, load, shares;
+ long tg_weight, load, shares;
+ tg_weight = calc_tg_weight(tg, cfs_rq);
load = cfs_rq->load.weight;
- load_weight = atomic_read(&tg->load_weight);
- load_weight += load;
- load_weight -= cfs_rq->load_contribution;
-
shares = (tg->shares * load);
- if (load_weight)
- shares /= load_weight;
+ if (tg_weight)
+ shares /= tg_weight;
if (shares < MIN_SHARES)
shares = MIN_SHARES;
static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq)
{
- if (!cfs_rq->runtime_enabled || !cfs_rq->nr_running)
+ if (!cfs_rq->runtime_enabled || cfs_rq->nr_running)
return;
__return_cfs_rq_runtime(cfs_rq);
* Adding load to a group doesn't make a group heavier, but can cause movement
* of group shares between cpus. Assuming the shares were perfectly aligned one
* can calculate the shift in shares.
+ *
+ * Calculate the effective load difference if @wl is added (subtracted) to @tg
+ * on this @cpu and results in a total addition (subtraction) of @wg to the
+ * total group weight.
+ *
+ * Given a runqueue weight distribution (rw_i) we can compute a shares
+ * distribution (s_i) using:
+ *
+ * s_i = rw_i / \Sum rw_j (1)
+ *
+ * Suppose we have 4 CPUs and our @tg is a direct child of the root group and
+ * has 7 equal weight tasks, distributed as below (rw_i), with the resulting
+ * shares distribution (s_i):
+ *
+ * rw_i = { 2, 4, 1, 0 }
+ * s_i = { 2/7, 4/7, 1/7, 0 }
+ *
+ * As per wake_affine() we're interested in the load of two CPUs (the CPU the
+ * task used to run on and the CPU the waker is running on), we need to
+ * compute the effect of waking a task on either CPU and, in case of a sync
+ * wakeup, compute the effect of the current task going to sleep.
+ *
+ * So for a change of @wl to the local @cpu with an overall group weight change
+ * of @wl we can compute the new shares distribution (s'_i) using:
+ *
+ * s'_i = (rw_i + @wl) / (@wg + \Sum rw_j) (2)
+ *
+ * Suppose we're interested in CPUs 0 and 1, and want to compute the load
+ * differences in waking a task to CPU 0. The additional task changes the
+ * weight and shares distributions like:
+ *
+ * rw'_i = { 3, 4, 1, 0 }
+ * s'_i = { 3/8, 4/8, 1/8, 0 }
+ *
+ * We can then compute the difference in effective weight by using:
+ *
+ * dw_i = S * (s'_i - s_i) (3)
+ *
+ * Where 'S' is the group weight as seen by its parent.
+ *
+ * Therefore the effective change in loads on CPU 0 would be 5/56 (3/8 - 2/7)
+ * times the weight of the group. The effect on CPU 1 would be -4/56 (4/8 -
+ * 4/7) times the weight of the group.
*/
static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
{
struct sched_entity *se = tg->se[cpu];
- if (!tg->parent)
+ if (!tg->parent) /* the trivial, non-cgroup case */
return wl;
for_each_sched_entity(se) {
- long lw, w;
+ long w, W;
tg = se->my_q->tg;
- w = se->my_q->load.weight;
- /* use this cpu's instantaneous contribution */
- lw = atomic_read(&tg->load_weight);
- lw -= se->my_q->load_contribution;
- lw += w + wg;
+ /*
+ * W = @wg + \Sum rw_j
+ */
+ W = wg + calc_tg_weight(tg, se->my_q);
- wl += w;
+ /*
+ * w = rw_i + @wl
+ */
+ w = se->my_q->load.weight + wl;
- if (lw > 0 && wl < lw)
- wl = (wl * tg->shares) / lw;
+ /*
+ * wl = S * s'_i; see (2)
+ */
+ if (W > 0 && w < W)
+ wl = (w * tg->shares) / W;
else
wl = tg->shares;
- /* zero point is MIN_SHARES */
+ /*
+ * Per the above, wl is the new se->load.weight value; since
+ * those are clipped to [MIN_SHARES, ...) do so now. See
+ * calc_cfs_shares().
+ */
if (wl < MIN_SHARES)
wl = MIN_SHARES;
+
+ /*
+ * wl = dw_i = S * (s'_i - s_i); see (3)
+ */
wl -= se->load.weight;
+
+ /*
+ * Recursively apply this logic to all parent groups to compute
+ * the final effective load change on the root group. Since
+ * only the @tg group gets extra weight, all parent groups can
+ * only redistribute existing shares. @wl is the shift in shares
+ * resulting from this level per the above.
+ */
wg = 0;
}
int cpu = smp_processor_id();
int prev_cpu = task_cpu(p);
struct sched_domain *sd;
- int i;
+ struct sched_group *sg;
+ int i, smt = 0;
/*
* If the task is going to be woken-up on this cpu and if it is
* Otherwise, iterate the domains and find an elegible idle cpu.
*/
rcu_read_lock();
+again:
for_each_domain(target, sd) {
- if (!(sd->flags & SD_SHARE_PKG_RESOURCES))
- break;
+ if (!smt && (sd->flags & SD_SHARE_CPUPOWER))
+ continue;
- for_each_cpu_and(i, sched_domain_span(sd), tsk_cpus_allowed(p)) {
- if (idle_cpu(i)) {
- target = i;
- break;
+ if (!(sd->flags & SD_SHARE_PKG_RESOURCES)) {
+ if (!smt) {
+ smt = 1;
+ goto again;
}
+ break;
}
- /*
- * Lets stop looking for an idle sibling when we reached
- * the domain that spans the current cpu and prev_cpu.
- */
- if (cpumask_test_cpu(cpu, sched_domain_span(sd)) &&
- cpumask_test_cpu(prev_cpu, sched_domain_span(sd)))
- break;
+ sg = sd->groups;
+ do {
+ if (!cpumask_intersects(sched_group_cpus(sg),
+ tsk_cpus_allowed(p)))
+ goto next;
+
+ for_each_cpu(i, sched_group_cpus(sg)) {
+ if (!idle_cpu(i))
+ goto next;
+ }
+
+ target = cpumask_first_and(sched_group_cpus(sg),
+ tsk_cpus_allowed(p));
+ goto done;
+next:
+ sg = sg->next;
+ } while (sg != sd->groups);
}
+done:
rcu_read_unlock();
return target;
}
/**
- * update_sd_lb_stats - Update sched_group's statistics for load balancing.
+ * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
* @sd: sched_domain whose statistics are to be updated.
* @this_cpu: Cpu for which load balance is currently performed.
* @idle: Idle status of this_cpu
SCHED_FEAT(TTWU_QUEUE, 1)
SCHED_FEAT(FORCE_SD_OVERLAP, 0)
+SCHED_FEAT(RT_RUNTIME_SHARE, 1)
{
int more = 0;
+ if (!sched_feat(RT_RUNTIME_SHARE))
+ return more;
+
if (rt_rq->rt_time > rt_rq->rt_runtime) {
raw_spin_unlock(&rt_rq->rt_runtime_lock);
more = do_balance_runtime(rt_rq);
struct alarm *alarm;
ktime_t expired = next->expires;
- if (expired.tv64 >= now.tv64)
+ if (expired.tv64 > now.tv64)
break;
alarm = container_of(next, struct alarm, node);
* released list and do a notify add later.
*/
if (old) {
+ old->event_handler = clockevents_handle_noop;
clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED);
list_del(&old->list);
list_add(&old->list, &clockevents_released);
* note a margin of 12.5% is used because this can be computed with
* a shift, versus say 10% which would require division.
*/
- return max_nsecs - (max_nsecs >> 5);
+ return max_nsecs - (max_nsecs >> 3);
}
#ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
* ~ 0.06ppm granularity for NTP. We apply the same 12.5%
* margin as we do in clocksource_max_deferment()
*/
- sec = (cs->mask - (cs->mask >> 5));
+ sec = (cs->mask - (cs->mask >> 3));
do_div(sec, freq);
do_div(sec, scale);
if (!sec)
(dev->features & CLOCK_EVT_FEAT_C3STOP))
return 0;
- clockevents_exchange_device(NULL, dev);
+ clockevents_exchange_device(tick_broadcast_device.evtdev, dev);
tick_broadcast_device.evtdev = dev;
if (!cpumask_empty(tick_get_broadcast_mask()))
tick_broadcast_start_periodic(dev);
int pid;
rcu_read_lock();
- pid = task_tgid_vnr(current->real_parent);
+ pid = task_tgid_vnr(rcu_dereference(current->real_parent));
rcu_read_unlock();
return pid;
ftrace_pid_function = ftrace_stub;
}
-#undef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
/*
* For those archs that do not test ftrace_trace_stop in their
if (!src->count) {
free_ftrace_hash_rcu(*dst);
rcu_assign_pointer(*dst, EMPTY_HASH);
- return 0;
+ /* still need to update the function records */
+ ret = 0;
+ goto out;
}
/*
/* First see if we did not already create this dir */
list_for_each_entry(system, &event_subsystems, list) {
if (strcmp(system->name, name) == 0) {
- __get_system(system);
system->nr_events++;
return system->entry;
}
*/
err = replace_preds(call, NULL, ps, filter_string, true);
if (err)
- goto fail;
+ call->flags |= TRACE_EVENT_FL_NO_SET_FILTER;
+ else
+ call->flags &= ~TRACE_EVENT_FL_NO_SET_FILTER;
}
list_for_each_entry(call, &ftrace_events, list) {
if (strcmp(call->class->system, system->name) != 0)
continue;
+ if (call->flags & TRACE_EVENT_FL_NO_SET_FILTER)
+ continue;
+
filter_item = kzalloc(sizeof(*filter_item), GFP_KERNEL);
if (!filter_item)
goto fail_mem;
* replace the filter for the call.
*/
filter = call->filter;
- call->filter = filter_item->filter;
+ rcu_assign_pointer(call->filter, filter_item->filter);
filter_item->filter = filter;
fail = false;
filter = call->filter;
if (!filter)
goto out_unlock;
- call->filter = NULL;
+ RCU_INIT_POINTER(call->filter, NULL);
/* Make sure the filter is not being used */
synchronize_sched();
__free_filter(filter);
* string
*/
tmp = call->filter;
- call->filter = filter;
+ rcu_assign_pointer(call->filter, filter);
if (tmp) {
/* Make sure the call is done with the filter */
synchronize_sched();
static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b)
{
- return ((a->dev_addr == a->dev_addr) &&
+ return ((a->dev_addr == b->dev_addr) &&
(a->dev == b->dev)) ? true : false;
}
iov_iter_count(i));
again:
-
/*
* Bring in the user page that we will copy from _first_.
* Otherwise there's a nasty deadlock on copying from the
written += copied;
balance_dirty_pages_ratelimited(mapping);
-
+ if (fatal_signal_pending(current)) {
+ status = -EINTR;
+ break;
+ }
} while (iov_iter_count(i));
return written ? written : status;
static void khugepaged_alloc_sleep(void)
{
- DEFINE_WAIT(wait);
- add_wait_queue(&khugepaged_wait, &wait);
- schedule_timeout_interruptible(
- msecs_to_jiffies(
- khugepaged_alloc_sleep_millisecs));
- remove_wait_queue(&khugepaged_wait, &wait);
+ wait_event_freezable_timeout(khugepaged_wait, false,
+ msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
}
#ifndef CONFIG_NUMA
if (unlikely(kthread_should_stop()))
break;
if (khugepaged_has_work()) {
- DEFINE_WAIT(wait);
if (!khugepaged_scan_sleep_millisecs)
continue;
- add_wait_queue(&khugepaged_wait, &wait);
- schedule_timeout_interruptible(
- msecs_to_jiffies(
- khugepaged_scan_sleep_millisecs));
- remove_wait_queue(&khugepaged_wait, &wait);
+ wait_event_freezable_timeout(khugepaged_wait, false,
+ msecs_to_jiffies(khugepaged_scan_sleep_millisecs));
} else if (khugepaged_enabled())
wait_event_freezable(khugepaged_wait,
khugepaged_wait_event());
__SetPageHead(page);
for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
__SetPageTail(p);
+ set_page_count(p, 0);
p->first_page = page;
}
}
if (anon_vma)
put_anon_vma(anon_vma);
-out:
unlock_page(hpage);
+out:
if (rc != -EAGAIN) {
list_del(&hpage->lru);
put_page(hpage);
*
* Returns @bdi's dirty limit in pages. The term "dirty" in the context of
* dirty balancing includes all PG_dirty, PG_writeback and NFS unstable pages.
- * And the "limit" in the name is not seriously taken as hard limit in
- * balance_dirty_pages().
+ *
+ * Note that balance_dirty_pages() will only seriously take it as a hard limit
+ * when sleeping max_pause per page is not enough to keep the dirty pages under
+ * control. For example, when the device is completely stalled due to some error
+ * conditions, or when there are 1000 dd tasks writing to a slow 10MB/s USB key.
+ * In the other normal situations, it acts more gently by throttling the tasks
+ * more (rather than completely block them) when the bdi dirty pages go high.
*
* It allocates high/low dirty limits to fast/slow devices, in order to prevent
* - starving fast devices
*/
if (unlikely(bdi_thresh > thresh))
bdi_thresh = thresh;
+ /*
+ * It's very possible that bdi_thresh is close to 0 not because the
+ * device is slow, but that it has remained inactive for long time.
+ * Honour such devices a reasonable good (hopefully IO efficient)
+ * threshold, so that the occasional writes won't be blocked and active
+ * writes can rampup the threshold quickly.
+ */
bdi_thresh = max(bdi_thresh, (limit - dirty) / 8);
/*
* scale global setpoint to bdi's:
*
* 8 serves as the safety ratio.
*/
- if (bdi_dirty)
- t = min(t, bdi_dirty * HZ / (8 * bw + 1));
+ t = min(t, bdi_dirty * HZ / (8 * bw + 1));
/*
* The pause time will be settled within range (max_pause/4, max_pause).
if (task_ratelimit)
break;
+ /*
+ * In the case of an unresponding NFS server and the NFS dirty
+ * pages exceeds dirty_thresh, give the other good bdi's a pipe
+ * to go through, so that tasks on them still remain responsive.
+ *
+ * In theory 1 page is enough to keep the comsumer-producer
+ * pipe going: the flusher cleans 1 page => the task dirties 1
+ * more page. However bdi_dirty has accounting errors. So use
+ * the larger and more IO friendly bdi_stat_error.
+ */
+ if (bdi_dirty <= bdi_stat_error(bdi))
+ break;
+
if (fatal_signal_pending(current))
break;
}
__SetPageHead(page);
for (i = 1; i < nr_pages; i++) {
struct page *p = page + i;
-
__SetPageTail(p);
+ set_page_count(p, 0);
p->first_page = page;
}
}
unsigned long block_migratetype;
int reserve;
- /* Get the start pfn, end pfn and the number of blocks to reserve */
+ /*
+ * Get the start pfn, end pfn and the number of blocks to reserve
+ * We have to be careful to be aligned to pageblock_nr_pages to
+ * make sure that we always check pfn_valid for the first page in
+ * the block.
+ */
start_pfn = zone->zone_start_pfn;
end_pfn = start_pfn + zone->spanned_pages;
+ start_pfn = roundup(start_pfn, pageblock_nr_pages);
reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
pageblock_order;
PARTIAL_AC,
PARTIAL_L3,
EARLY,
+ LATE,
FULL
} g_cpucache_up;
{
struct cache_sizes *s = malloc_sizes;
- if (g_cpucache_up != FULL)
+ if (g_cpucache_up < LATE)
return;
for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) {
{
struct kmem_cache *cachep;
+ g_cpucache_up = LATE;
+
/* Annotate slab for lockdep -- annotate the malloc caches */
init_lock_keys();
goto fail;
addr = __vmalloc_area_node(area, gfp_mask, prot, node, caller);
+ if (!addr)
+ return NULL;
/*
* In this function, newly allocated vm_struct is not added
*/
void register_shrinker(struct shrinker *shrinker)
{
- shrinker->nr = 0;
+ atomic_long_set(&shrinker->nr_in_batch, 0);
down_write(&shrinker_rwsem);
list_add_tail(&shrinker->list, &shrinker_list);
up_write(&shrinker_rwsem);
list_for_each_entry(shrinker, &shrinker_list, list) {
unsigned long long delta;
- unsigned long total_scan;
- unsigned long max_pass;
+ long total_scan;
+ long max_pass;
int shrink_ret = 0;
long nr;
long new_nr;
long batch_size = shrinker->batch ? shrinker->batch
: SHRINK_BATCH;
+ max_pass = do_shrinker_shrink(shrinker, shrink, 0);
+ if (max_pass <= 0)
+ continue;
+
/*
* copy the current shrinker scan count into a local variable
* and zero it so that other concurrent shrinker invocations
* don't also do this scanning work.
*/
- do {
- nr = shrinker->nr;
- } while (cmpxchg(&shrinker->nr, nr, 0) != nr);
+ nr = atomic_long_xchg(&shrinker->nr_in_batch, 0);
total_scan = nr;
- max_pass = do_shrinker_shrink(shrinker, shrink, 0);
delta = (4 * nr_pages_scanned) / shrinker->seeks;
delta *= max_pass;
do_div(delta, lru_pages + 1);
* manner that handles concurrent updates. If we exhausted the
* scan, there is no need to do an update.
*/
- do {
- nr = shrinker->nr;
- new_nr = total_scan + nr;
- if (total_scan <= 0)
- break;
- } while (cmpxchg(&shrinker->nr, nr, new_nr) != nr);
+ if (total_scan > 0)
+ new_nr = atomic_long_add_return(total_scan,
+ &shrinker->nr_in_batch);
+ else
+ new_nr = atomic_long_read(&shrinker->nr_in_batch);
trace_mm_shrink_slab_end(shrinker, shrink_ret, nr, new_nr);
}
if (tt_global_entry) {
/* This node is probably going to update its tt table */
tt_global_entry->orig_node->tt_poss_change = true;
- /* The global entry has to be marked as PENDING and has to be
+ /* The global entry has to be marked as ROAMING and has to be
* kept for consistency purpose */
- tt_global_entry->flags |= TT_CLIENT_PENDING;
+ tt_global_entry->flags |= TT_CLIENT_ROAM;
+ tt_global_entry->roam_at = jiffies;
+
send_roam_adv(bat_priv, tt_global_entry->addr,
tt_global_entry->orig_node);
}
const char *message, bool roaming)
{
struct tt_global_entry *tt_global_entry = NULL;
+ struct tt_local_entry *tt_local_entry = NULL;
tt_global_entry = tt_global_hash_find(bat_priv, addr);
if (!tt_global_entry)
if (tt_global_entry->orig_node == orig_node) {
if (roaming) {
- tt_global_entry->flags |= TT_CLIENT_ROAM;
- tt_global_entry->roam_at = jiffies;
- goto out;
+ /* if we are deleting a global entry due to a roam
+ * event, there are two possibilities:
+ * 1) the client roamed from node A to node B => we mark
+ * it with TT_CLIENT_ROAM, we start a timer and we
+ * wait for node B to claim it. In case of timeout
+ * the entry is purged.
+ * 2) the client roamed to us => we can directly delete
+ * the global entry, since it is useless now. */
+ tt_local_entry = tt_local_hash_find(bat_priv,
+ tt_global_entry->addr);
+ if (!tt_local_entry) {
+ tt_global_entry->flags |= TT_CLIENT_ROAM;
+ tt_global_entry->roam_at = jiffies;
+ goto out;
+ }
}
_tt_global_del(bat_priv, tt_global_entry, message);
}
out:
if (tt_global_entry)
tt_global_entry_free_ref(tt_global_entry);
+ if (tt_local_entry)
+ tt_local_entry_free_ref(tt_local_entry);
}
void tt_global_del_orig(struct bat_priv *bat_priv,
static void __bnep_link_session(struct bnep_session *s)
{
- /* It's safe to call __module_get() here because sessions are added
- by the socket layer which has to hold the reference to this module.
- */
- __module_get(THIS_MODULE);
list_add(&s->list, &bnep_session_list);
}
static void __bnep_unlink_session(struct bnep_session *s)
{
list_del(&s->list);
- module_put(THIS_MODULE);
}
static int bnep_send(struct bnep_session *s, void *data, size_t len)
up_write(&bnep_session_sem);
free_netdev(dev);
+ module_put_and_exit(0);
return 0;
}
__bnep_link_session(s);
+ __module_get(THIS_MODULE);
s->task = kthread_run(bnep_session, s, "kbnepd %s", dev->name);
if (IS_ERR(s->task)) {
/* Session thread start failed, gotta cleanup. */
+ module_put(THIS_MODULE);
unregister_netdev(dev);
__bnep_unlink_session(s);
err = PTR_ERR(s->task);
static void __cmtp_link_session(struct cmtp_session *session)
{
- __module_get(THIS_MODULE);
list_add(&session->list, &cmtp_session_list);
}
static void __cmtp_unlink_session(struct cmtp_session *session)
{
list_del(&session->list);
- module_put(THIS_MODULE);
}
static void __cmtp_copy_session(struct cmtp_session *session, struct cmtp_conninfo *ci)
up_write(&cmtp_session_sem);
kfree(session);
+ module_put_and_exit(0);
return 0;
}
__cmtp_link_session(session);
+ __module_get(THIS_MODULE);
session->task = kthread_run(cmtp_session, session, "kcmtpd_ctr_%d",
session->num);
if (IS_ERR(session->task)) {
+ module_put(THIS_MODULE);
err = PTR_ERR(session->task);
goto unlink;
}
{
hci_setup_event_mask(hdev);
- if (hdev->lmp_ver > 1)
+ if (hdev->hci_ver > 1)
hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
if (hdev->features[6] & LMP_SIMPLE_PAIR) {
#include <net/sock.h>
#include "br_private.h"
+#include "br_private_stp.h"
static inline size_t br_nlmsg_size(void)
{
p->state = new_state;
br_log_state(p);
+
+ spin_lock_bh(&p->br->lock);
+ br_port_state_selection(p->br);
+ spin_unlock_bh(&p->br->lock);
+
br_ifinfo_notify(RTM_NEWLINK, p);
return 0;
struct net_bridge_port *p;
unsigned int liveports = 0;
- /* Don't change port states if userspace is handling STP */
- if (br->stp_enabled == BR_USER_STP)
- return;
-
list_for_each_entry(p, &br->port_list, list) {
if (p->state == BR_STATE_DISABLED)
continue;
- if (p->port_no == br->root_port) {
- p->config_pending = 0;
- p->topology_change_ack = 0;
- br_make_forwarding(p);
- } else if (br_is_designated_port(p)) {
- del_timer(&p->message_age_timer);
- br_make_forwarding(p);
- } else {
- p->config_pending = 0;
- p->topology_change_ack = 0;
- br_make_blocking(p);
+ /* Don't change port states if userspace is handling STP */
+ if (br->stp_enabled != BR_USER_STP) {
+ if (p->port_no == br->root_port) {
+ p->config_pending = 0;
+ p->topology_change_ack = 0;
+ br_make_forwarding(p);
+ } else if (br_is_designated_port(p)) {
+ del_timer(&p->message_age_timer);
+ br_make_forwarding(p);
+ } else {
+ p->config_pending = 0;
+ p->topology_change_ack = 0;
+ br_make_blocking(p);
+ }
}
if (p->state == BR_STATE_FORWARDING)
static int cffrml_transmit(struct cflayer *layr, struct cfpkt *pkt)
{
- int tmp;
u16 chks;
u16 len;
+ __le16 data;
+
struct cffrml *this = container_obj(layr);
if (this->dofcs) {
chks = cfpkt_iterate(pkt, cffrml_checksum, 0xffff);
- tmp = cpu_to_le16(chks);
- cfpkt_add_trail(pkt, &tmp, 2);
+ data = cpu_to_le16(chks);
+ cfpkt_add_trail(pkt, &data, 2);
} else {
cfpkt_pad_trail(pkt, 2);
}
len = cfpkt_getlen(pkt);
- tmp = cpu_to_le16(len);
- cfpkt_add_head(pkt, &tmp, 2);
+ data = cpu_to_le16(len);
+ cfpkt_add_head(pkt, &data, 2);
cfpkt_info(pkt)->hdr_len += 2;
if (cfpkt_erroneous(pkt)) {
pr_err("Packet is erroneous!\n");
int i, j;
int numrep;
int firstn;
- int rc = -1;
BUG_ON(ruleno >= map->max_rules);
* that this may or may not correspond to the specific types
* referenced by the crush rule.
*/
- if (force >= 0) {
- if (force >= map->max_devices ||
- map->device_parents[force] == 0) {
- /*dprintk("CRUSH: forcefed device dne\n");*/
- rc = -1; /* force fed device dne */
- goto out;
- }
- if (!is_out(map, weight, force, x)) {
- while (1) {
- force_context[++force_pos] = force;
- if (force >= 0)
- force = map->device_parents[force];
- else
- force = map->bucket_parents[-1-force];
- if (force == 0)
- break;
- }
+ if (force >= 0 &&
+ force < map->max_devices &&
+ map->device_parents[force] != 0 &&
+ !is_out(map, weight, force, x)) {
+ while (1) {
+ force_context[++force_pos] = force;
+ if (force >= 0)
+ force = map->device_parents[force];
+ else
+ force = map->bucket_parents[-1-force];
+ if (force == 0)
+ break;
}
}
BUG_ON(1);
}
}
- rc = result_len;
-
-out:
- return rc;
+ return result_len;
}
for_each_net(net) {
for_each_netdev(net, dev) {
if (dev == last)
- break;
+ goto outroll;
if (dev->flags & IFF_UP) {
nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
}
}
+outroll:
raw_notifier_chain_unregister(&netdev_chain, nb);
goto unlock;
}
sizeof(struct dev_iter_state));
}
+int dev_seq_open_ops(struct inode *inode, struct file *file,
+ const struct seq_operations *ops)
+{
+ return seq_open_net(inode, file, ops, sizeof(struct dev_iter_state));
+}
+
static const struct file_operations dev_seq_fops = {
.owner = THIS_MODULE,
.open = dev_seq_open,
static int dev_mc_seq_open(struct inode *inode, struct file *file)
{
- return seq_open_net(inode, file, &dev_mc_seq_ops,
- sizeof(struct seq_net_private));
+ return dev_seq_open_ops(inode, file, &dev_mc_seq_ops);
}
static const struct file_operations dev_mc_seq_fops = {
struct net *net = seq_file_net(seq);
struct neigh_table *tbl = state->tbl;
- pn = pn->next;
+ do {
+ pn = pn->next;
+ } while (pn && !net_eq(pneigh_net(pn), net));
+
while (!pn) {
if (++state->bucket > PNEIGH_HASHMASK)
break;
* but then some measure against one socket starving all other sockets
* would be needed.
*
- * It was 128 by default. Experiments with real servers show, that
+ * The minimum value of it is 128. Experiments with real servers show that
* it is absolutely not enough even at 100conn/sec. 256 cures most
- * of problems. This value is adjusted to 128 for very small machines
- * (<=32Mb of memory) and to 1024 on normal or better ones (>=256Mb).
+ * of problems.
+ * This value is adjusted to 128 for low memory machines,
+ * and it will increase in proportion to the memory of machine.
* Note : Dont forget somaxconn that may limit backlog too.
*/
int sysctl_max_syn_backlog = 256;
}
late_initcall(net_secret_init);
+#ifdef CONFIG_INET
static u32 seq_scale(u32 seq)
{
/*
*/
return seq + (ktime_to_ns(ktime_get_real()) >> 6);
}
+#endif
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
__u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
* @shiftlen: shift up to this many bytes
*
* Attempts to shift up to shiftlen worth of bytes, which may be less than
- * the length of the skb, from tgt to skb. Returns number bytes shifted.
+ * the length of the skb, from skb to tgt. Returns number bytes shifted.
* It's up to caller to free skb if everything was shifted.
*
* If @tgt runs out of frags, the whole operation is aborted.
rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
inet->inet_sport, inet->inet_dport, sk);
if (IS_ERR(rt)) {
+ err = PTR_ERR(rt);
rt = NULL;
goto failure;
}
static int dn_dst_gc(struct dst_ops *ops);
static struct dst_entry *dn_dst_check(struct dst_entry *, __u32);
static unsigned int dn_dst_default_advmss(const struct dst_entry *dst);
-static unsigned int dn_dst_default_mtu(const struct dst_entry *dst);
+static unsigned int dn_dst_mtu(const struct dst_entry *dst);
static void dn_dst_destroy(struct dst_entry *);
static struct dst_entry *dn_dst_negative_advice(struct dst_entry *);
static void dn_dst_link_failure(struct sk_buff *);
.gc = dn_dst_gc,
.check = dn_dst_check,
.default_advmss = dn_dst_default_advmss,
- .default_mtu = dn_dst_default_mtu,
+ .mtu = dn_dst_mtu,
.cow_metrics = dst_cow_metrics_generic,
.destroy = dn_dst_destroy,
.negative_advice = dn_dst_negative_advice,
return dn_mss_from_pmtu(dst->dev, dst_mtu(dst));
}
-static unsigned int dn_dst_default_mtu(const struct dst_entry *dst)
+static unsigned int dn_dst_mtu(const struct dst_entry *dst)
{
- return dst->dev->mtu;
+ unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
+
+ return mtu ? : dst->dev->mtu;
}
static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst, const void *daddr)
void dn_start_slow_timer(struct sock *sk)
{
- sk->sk_timer.expires = jiffies + SLOW_INTERVAL;
- sk->sk_timer.function = dn_slow_timer;
- sk->sk_timer.data = (unsigned long)sk;
-
- add_timer(&sk->sk_timer);
+ setup_timer(&sk->sk_timer, dn_slow_timer, (unsigned long)sk);
+ sk_reset_timer(sk, &sk->sk_timer, jiffies + SLOW_INTERVAL);
}
void dn_stop_slow_timer(struct sock *sk)
{
- del_timer(&sk->sk_timer);
+ sk_stop_timer(sk, &sk->sk_timer);
}
static void dn_slow_timer(unsigned long arg)
struct sock *sk = (struct sock *)arg;
struct dn_scp *scp = DN_SK(sk);
- sock_hold(sk);
bh_lock_sock(sk);
if (sock_owned_by_user(sk)) {
- sk->sk_timer.expires = jiffies + HZ / 10;
- add_timer(&sk->sk_timer);
+ sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 10);
goto out;
}
scp->keepalive_fxn(sk);
}
- sk->sk_timer.expires = jiffies + SLOW_INTERVAL;
-
- add_timer(&sk->sk_timer);
+ sk_reset_timer(sk, &sk->sk_timer, jiffies + SLOW_INTERVAL);
out:
bh_unlock_sock(sk);
sock_put(sk);
void __user *buffer,
size_t *lenp, loff_t *ppos)
{
+ int old_value = *(int *)ctl->data;
int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
+ int new_value = *(int *)ctl->data;
if (write) {
struct ipv4_devconf *cnf = ctl->extra1;
if (cnf == net->ipv4.devconf_dflt)
devinet_copy_dflt_conf(net, i);
+ if (i == IPV4_DEVCONF_ACCEPT_LOCAL - 1)
+ if ((new_value == 0) && (old_value != 0))
+ rt_cache_flush(net, 0);
}
return ret;
if (err) {
int j;
- pmc->sfcount[sfmode]--;
+ if (!delta)
+ pmc->sfcount[sfmode]--;
for (j=0; j<i; j++)
(void) ip_mc_del1_src(pmc, sfmode, &psfsrc[j]);
} else if (isexclude != (pmc->sfcount[MCAST_EXCLUDE] != 0)) {
icsk->icsk_ca_ops->name);
}
- if ((ext & (1 << (INET_DIAG_TOS - 1))) && (sk->sk_family != AF_INET6))
- RTA_PUT_U8(skb, INET_DIAG_TOS, inet->tos);
-
r->idiag_family = sk->sk_family;
r->idiag_state = sk->sk_state;
r->idiag_timer = 0;
r->id.idiag_src[0] = inet->inet_rcv_saddr;
r->id.idiag_dst[0] = inet->inet_daddr;
+ /* IPv6 dual-stack sockets use inet->tos for IPv4 connections,
+ * hence this needs to be included regardless of socket family.
+ */
+ if (ext & (1 << (INET_DIAG_TOS - 1)))
+ RTA_PUT_U8(skb, INET_DIAG_TOS, inet->tos);
+
#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
if (r->idiag_family == AF_INET6) {
const struct ipv6_pinfo *np = inet6_sk(sk);
+ if (ext & (1 << (INET_DIAG_TCLASS - 1)))
+ RTA_PUT_U8(skb, INET_DIAG_TCLASS, np->tclass);
+
ipv6_addr_copy((struct in6_addr *)r->id.idiag_src,
&np->rcv_saddr);
ipv6_addr_copy((struct in6_addr *)r->id.idiag_dst,
&np->daddr);
- if (ext & (1 << (INET_DIAG_TCLASS - 1)))
- RTA_PUT_U8(skb, INET_DIAG_TCLASS, np->tclass);
}
#endif
rt = skb_rtable(skb);
- if (opt->is_strictroute && ip_hdr(skb)->daddr != rt->rt_gateway)
+ if (opt->is_strictroute && opt->nexthop != rt->rt_gateway)
goto sr_failed;
if (unlikely(skb->len > dst_mtu(&rt->dst) && !skb_is_gso(skb) &&
) {
if (srrptr + 3 > srrspace)
break;
- if (memcmp(&ip_hdr(skb)->daddr, &optptr[srrptr-1], 4) == 0)
+ if (memcmp(&opt->nexthop, &optptr[srrptr-1], 4) == 0)
break;
}
if (srrptr + 3 <= srrspace) {
opt->is_changed = 1;
ip_rt_get_source(&optptr[srrptr-1], skb, rt);
+ ip_hdr(skb)->daddr = opt->nexthop;
optptr[2] = srrptr+4;
} else if (net_ratelimit())
printk(KERN_CRIT "ip_forward(): Argh! Destination lost!\n");
}
if (srrptr <= srrspace) {
opt->srr_is_hit = 1;
- iph->daddr = nexthop;
+ opt->nexthop = nexthop;
opt->is_changed = 1;
}
return 0;
if (register_netdevice(dev) < 0)
goto failed_free;
+ strcpy(nt->parms.name, dev->name);
+
dev_hold(dev);
ipip_tunnel_link(ipn, nt);
return nt;
struct ip_tunnel *tunnel = netdev_priv(dev);
tunnel->dev = dev;
- strcpy(tunnel->parms.name, dev->name);
memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
static int __net_init ipip_init_net(struct net *net)
{
struct ipip_net *ipn = net_generic(net, ipip_net_id);
+ struct ip_tunnel *t;
int err;
ipn->tunnels[0] = ipn->tunnels_wc;
if ((err = register_netdev(ipn->fb_tunnel_dev)))
goto err_reg_dev;
+ t = netdev_priv(ipn->fb_tunnel_dev);
+
+ strcpy(t->parms.name, ipn->fb_tunnel_dev->name);
return 0;
err_reg_dev:
/* Change in oif may mean change in hh_len. */
hh_len = skb_dst(skb)->dev->hard_header_len;
if (skb_headroom(skb) < hh_len &&
- pskb_expand_head(skb, hh_len - skb_headroom(skb), 0, GFP_ATOMIC))
+ pskb_expand_head(skb, HH_DATA_ALIGN(hh_len - skb_headroom(skb)),
+ 0, GFP_ATOMIC))
return -1;
return 0;
# raw + specific targets
config IP_NF_RAW
tristate 'raw table support (required for NOTRACK/TRACE)'
- depends on NETFILTER_ADVANCED
help
This option adds a `raw' table to iptables. This table is the very
first in the netfilter framework and hooks in at the PREROUTING
#include <net/secure_seq.h>
#define RT_FL_TOS(oldflp4) \
- ((u32)(oldflp4->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK)))
+ ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
#define IP_MAX_MTU 0xFFF0
static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
static int ip_rt_min_advmss __read_mostly = 256;
static int rt_chain_length_max __read_mostly = 20;
+static int redirect_genid;
/*
* Interface to generic destination cache.
static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
static unsigned int ipv4_default_advmss(const struct dst_entry *dst);
-static unsigned int ipv4_default_mtu(const struct dst_entry *dst);
+static unsigned int ipv4_mtu(const struct dst_entry *dst);
static void ipv4_dst_destroy(struct dst_entry *dst);
static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
static void ipv4_link_failure(struct sk_buff *skb);
.gc = rt_garbage_collect,
.check = ipv4_dst_check,
.default_advmss = ipv4_default_advmss,
- .default_mtu = ipv4_default_mtu,
+ .mtu = ipv4_mtu,
.cow_metrics = ipv4_cow_metrics,
.destroy = ipv4_dst_destroy,
.ifdown = ipv4_dst_ifdown,
else {
struct rtable *r = v;
struct neighbour *n;
- int len;
+ int len, HHUptod;
+ rcu_read_lock();
n = dst_get_neighbour(&r->dst);
+ HHUptod = (n && (n->nud_state & NUD_CONNECTED)) ? 1 : 0;
+ rcu_read_unlock();
+
seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t"
"%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
r->dst.dev ? r->dst.dev->name : "*",
dst_metric(&r->dst, RTAX_RTTVAR)),
r->rt_key_tos,
-1,
- (n && (n->nud_state & NUD_CONNECTED)) ? 1 : 0,
+ HHUptod,
r->rt_spec_dst, &len);
seq_printf(seq, "%*s\n", 127 - len, "");
get_random_bytes(&shuffle, sizeof(shuffle));
atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
+ redirect_genid++;
}
/*
spin_unlock_bh(rt_hash_lock_addr(hash));
}
-static int check_peer_redir(struct dst_entry *dst, struct inet_peer *peer)
+static void check_peer_redir(struct dst_entry *dst, struct inet_peer *peer)
{
struct rtable *rt = (struct rtable *) dst;
__be32 orig_gw = rt->rt_gateway;
rt->rt_gateway = peer->redirect_learned.a4;
n = ipv4_neigh_lookup(&rt->dst, &rt->rt_gateway);
- if (IS_ERR(n))
- return PTR_ERR(n);
+ if (IS_ERR(n)) {
+ rt->rt_gateway = orig_gw;
+ return;
+ }
old_n = xchg(&rt->dst._neighbour, n);
if (old_n)
neigh_release(old_n);
- if (!n || !(n->nud_state & NUD_VALID)) {
- if (n)
- neigh_event_send(n, NULL);
- rt->rt_gateway = orig_gw;
- return -EAGAIN;
+ if (!(n->nud_state & NUD_VALID)) {
+ neigh_event_send(n, NULL);
} else {
rt->rt_flags |= RTCF_REDIRECTED;
call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
}
- return 0;
}
/* called in rcu_read_lock() section */
peer = rt->peer;
if (peer) {
- if (peer->redirect_learned.a4 != new_gw) {
+ if (peer->redirect_learned.a4 != new_gw ||
+ peer->redirect_genid != redirect_genid) {
peer->redirect_learned.a4 = new_gw;
+ peer->redirect_genid = redirect_genid;
atomic_inc(&__rt_peer_genid);
}
check_peer_redir(&rt->dst, peer);
}
-static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
+static void ipv4_validate_peer(struct rtable *rt)
{
- struct rtable *rt = (struct rtable *) dst;
-
- if (rt_is_expired(rt))
- return NULL;
if (rt->rt_peer_genid != rt_peer_genid()) {
struct inet_peer *peer;
peer = rt->peer;
if (peer) {
- check_peer_pmtu(dst, peer);
+ check_peer_pmtu(&rt->dst, peer);
+ if (peer->redirect_genid != redirect_genid)
+ peer->redirect_learned.a4 = 0;
if (peer->redirect_learned.a4 &&
- peer->redirect_learned.a4 != rt->rt_gateway) {
- if (check_peer_redir(dst, peer))
- return NULL;
- }
+ peer->redirect_learned.a4 != rt->rt_gateway)
+ check_peer_redir(&rt->dst, peer);
}
rt->rt_peer_genid = rt_peer_genid();
}
+}
+
+static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
+{
+ struct rtable *rt = (struct rtable *) dst;
+
+ if (rt_is_expired(rt))
+ return NULL;
+ ipv4_validate_peer(rt);
return dst;
}
return advmss;
}
-static unsigned int ipv4_default_mtu(const struct dst_entry *dst)
+static unsigned int ipv4_mtu(const struct dst_entry *dst)
{
- unsigned int mtu = dst->dev->mtu;
+ const struct rtable *rt = (const struct rtable *) dst;
+ unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
+
+ if (mtu && rt_is_output_route(rt))
+ return mtu;
+
+ mtu = dst->dev->mtu;
if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
- const struct rtable *rt = (const struct rtable *) dst;
if (rt->rt_gateway != rt->rt_dst && mtu > 576)
mtu = 576;
dst_init_metrics(&rt->dst, peer->metrics, false);
check_peer_pmtu(&rt->dst, peer);
+ if (peer->redirect_genid != redirect_genid)
+ peer->redirect_learned.a4 = 0;
if (peer->redirect_learned.a4 &&
peer->redirect_learned.a4 != rt->rt_gateway) {
rt->rt_gateway = peer->redirect_learned.a4;
rth->rt_mark == skb->mark &&
net_eq(dev_net(rth->dst.dev), net) &&
!rt_is_expired(rth)) {
+ ipv4_validate_peer(rth);
if (noref) {
dst_use_noref(&rth->dst, jiffies);
skb_dst_set_noref(skb, &rth->dst);
static struct rtable *__mkroute_output(const struct fib_result *res,
const struct flowi4 *fl4,
__be32 orig_daddr, __be32 orig_saddr,
- int orig_oif, struct net_device *dev_out,
+ int orig_oif, __u8 orig_rtos,
+ struct net_device *dev_out,
unsigned int flags)
{
struct fib_info *fi = res->fi;
- u32 tos = RT_FL_TOS(fl4);
struct in_device *in_dev;
u16 type = res->type;
struct rtable *rth;
rth->rt_genid = rt_genid(dev_net(dev_out));
rth->rt_flags = flags;
rth->rt_type = type;
- rth->rt_key_tos = tos;
+ rth->rt_key_tos = orig_rtos;
rth->rt_dst = fl4->daddr;
rth->rt_src = fl4->saddr;
rth->rt_route_iif = 0;
static struct rtable *ip_route_output_slow(struct net *net, struct flowi4 *fl4)
{
struct net_device *dev_out = NULL;
- u32 tos = RT_FL_TOS(fl4);
+ __u8 tos = RT_FL_TOS(fl4);
unsigned int flags = 0;
struct fib_result res;
struct rtable *rth;
make_route:
rth = __mkroute_output(&res, fl4, orig_daddr, orig_saddr, orig_oif,
- dev_out, flags);
+ tos, dev_out, flags);
if (!IS_ERR(rth)) {
unsigned int hash;
(IPTOS_RT_MASK | RTO_ONLINK)) &&
net_eq(dev_net(rth->dst.dev), net) &&
!rt_is_expired(rth)) {
+ ipv4_validate_peer(rth);
dst_use(&rth->dst, jiffies);
RT_CACHE_STAT_INC(out_hit);
rcu_read_unlock_bh();
return NULL;
}
-static unsigned int ipv4_blackhole_default_mtu(const struct dst_entry *dst)
+static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst)
{
- return 0;
+ unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
+
+ return mtu ? : dst->dev->mtu;
}
static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
.protocol = cpu_to_be16(ETH_P_IP),
.destroy = ipv4_dst_destroy,
.check = ipv4_blackhole_dst_check,
- .default_mtu = ipv4_blackhole_default_mtu,
+ .mtu = ipv4_blackhole_mtu,
.default_advmss = ipv4_default_advmss,
.update_pmtu = ipv4_rt_blackhole_update_pmtu,
.cow_metrics = ipv4_rt_blackhole_cow_metrics,
struct inet_sock *inet = inet_sk(sk);
struct sockaddr_in *sin = (struct sockaddr_in *)msg->msg_name;
struct sk_buff *skb;
- unsigned int ulen;
+ unsigned int ulen, copied;
int peeked;
int err;
int is_udplite = IS_UDPLITE(sk);
goto out;
ulen = skb->len - sizeof(struct udphdr);
- if (len > ulen)
- len = ulen;
- else if (len < ulen)
+ copied = len;
+ if (copied > ulen)
+ copied = ulen;
+ else if (copied < ulen)
msg->msg_flags |= MSG_TRUNC;
/*
* coverage checksum (UDP-Lite), do it before the copy.
*/
- if (len < ulen || UDP_SKB_CB(skb)->partial_cov) {
+ if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
if (udp_lib_checksum_complete(skb))
goto csum_copy_err;
}
if (skb_csum_unnecessary(skb))
err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
- msg->msg_iov, len);
+ msg->msg_iov, copied);
else {
err = skb_copy_and_csum_datagram_iovec(skb,
sizeof(struct udphdr),
if (inet->cmsg_flags)
ip_cmsg_recv(msg, skb);
- err = len;
+ err = copied;
if (flags & MSG_TRUNC)
err = ulen;
return ERR_PTR(-EACCES);
/* Add default multicast route */
- addrconf_add_mroute(dev);
+ if (!(dev->flags & IFF_LOOPBACK))
+ addrconf_add_mroute(dev);
/* Add link local route */
addrconf_add_lroute(dev);
* request_sock (formerly open request) hash tables.
*/
static u32 inet6_synq_hash(const struct in6_addr *raddr, const __be16 rport,
- const u32 rnd, const u16 synq_hsize)
+ const u32 rnd, const u32 synq_hsize)
{
u32 c;
goto e_inval;
if (val > 255 || val < -1)
goto e_inval;
- np->mcast_hops = val;
+ np->mcast_hops = (val == -1 ? IPV6_DEFAULT_MCASTHOPS : val);
retv = 0;
break;
}
if (!rt->rt6i_peer)
rt6_bind_peer(rt, 1);
- if (inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ))
+ if (!inet_peer_xrlim_allow(rt->rt6i_peer, 1*HZ))
goto release;
if (dev->addr_len) {
config IP6_NF_RAW
tristate 'raw table support (required for TRACE)'
- depends on NETFILTER_ADVANCED
help
This option adds a `raw' table to ip6tables. This table is the very
first in the netfilter framework and hooks in at the PREROUTING
const struct in6_addr *dest);
static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
static unsigned int ip6_default_advmss(const struct dst_entry *dst);
-static unsigned int ip6_default_mtu(const struct dst_entry *dst);
+static unsigned int ip6_mtu(const struct dst_entry *dst);
static struct dst_entry *ip6_negative_advice(struct dst_entry *);
static void ip6_dst_destroy(struct dst_entry *);
static void ip6_dst_ifdown(struct dst_entry *,
.gc_thresh = 1024,
.check = ip6_dst_check,
.default_advmss = ip6_default_advmss,
- .default_mtu = ip6_default_mtu,
+ .mtu = ip6_mtu,
.cow_metrics = ipv6_cow_metrics,
.destroy = ip6_dst_destroy,
.ifdown = ip6_dst_ifdown,
.neigh_lookup = ip6_neigh_lookup,
};
-static unsigned int ip6_blackhole_default_mtu(const struct dst_entry *dst)
+static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
{
- return 0;
+ unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
+
+ return mtu ? : dst->dev->mtu;
}
static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu)
.protocol = cpu_to_be16(ETH_P_IPV6),
.destroy = ip6_dst_destroy,
.check = ip6_dst_check,
- .default_mtu = ip6_blackhole_default_mtu,
+ .mtu = ip6_blackhole_mtu,
.default_advmss = ip6_default_advmss,
.update_pmtu = ip6_rt_blackhole_update_pmtu,
.cow_metrics = ip6_rt_blackhole_cow_metrics,
int attempts = !in_softirq();
if (!(rt->rt6i_flags&RTF_GATEWAY)) {
- if (rt->rt6i_dst.plen != 128 &&
+ if (ort->rt6i_dst.plen != 128 &&
ipv6_addr_equal(&ort->rt6i_dst.addr, daddr))
rt->rt6i_flags |= RTF_ANYCAST;
ipv6_addr_copy(&rt->rt6i_gateway, daddr);
return mtu;
}
-static unsigned int ip6_default_mtu(const struct dst_entry *dst)
+static unsigned int ip6_mtu(const struct dst_entry *dst)
{
- unsigned int mtu = IPV6_MIN_MTU;
struct inet6_dev *idev;
+ unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
+
+ if (mtu)
+ return mtu;
+
+ mtu = IPV6_MIN_MTU;
rcu_read_lock();
idev = __in6_dev_get(dst->dev);
if (register_netdevice(dev) < 0)
goto failed_free;
+ strcpy(nt->parms.name, dev->name);
+
dev_hold(dev);
ipip6_tunnel_link(sitn, nt);
struct ip_tunnel *tunnel = netdev_priv(dev);
tunnel->dev = dev;
- strcpy(tunnel->parms.name, dev->name);
memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
static int __net_init sit_init_net(struct net *net)
{
struct sit_net *sitn = net_generic(net, sit_net_id);
+ struct ip_tunnel *t;
int err;
sitn->tunnels[0] = sitn->tunnels_wc;
if ((err = register_netdev(sitn->fb_tunnel_dev)))
goto err_reg_dev;
+ t = netdev_priv(sitn->fb_tunnel_dev);
+
+ strcpy(t->parms.name, sitn->fb_tunnel_dev->name);
return 0;
err_reg_dev:
if (!want_cookie || tmp_opt.tstamp_ok)
TCP_ECN_create_request(req, tcp_hdr(skb));
+ treq->iif = sk->sk_bound_dev_if;
+
+ /* So that link locals have meaning */
+ if (!sk->sk_bound_dev_if &&
+ ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
+ treq->iif = inet6_iif(skb);
+
if (!isn) {
struct inet_peer *peer = NULL;
atomic_inc(&skb->users);
treq->pktopts = skb;
}
- treq->iif = sk->sk_bound_dev_if;
-
- /* So that link locals have meaning */
- if (!sk->sk_bound_dev_if &&
- ipv6_addr_type(&treq->rmt_addr) & IPV6_ADDR_LINKLOCAL)
- treq->iif = inet6_iif(skb);
if (want_cookie) {
isn = cookie_v6_init_sequence(sk, skb, &req->mss);
struct ipv6_pinfo *np = inet6_sk(sk);
struct inet_sock *inet = inet_sk(sk);
struct sk_buff *skb;
- unsigned int ulen;
+ unsigned int ulen, copied;
int peeked;
int err;
int is_udplite = IS_UDPLITE(sk);
goto out;
ulen = skb->len - sizeof(struct udphdr);
- if (len > ulen)
- len = ulen;
- else if (len < ulen)
+ copied = len;
+ if (copied > ulen)
+ copied = ulen;
+ else if (copied < ulen)
msg->msg_flags |= MSG_TRUNC;
is_udp4 = (skb->protocol == htons(ETH_P_IP));
* coverage checksum (UDP-Lite), do it before the copy.
*/
- if (len < ulen || UDP_SKB_CB(skb)->partial_cov) {
+ if (copied < ulen || UDP_SKB_CB(skb)->partial_cov) {
if (udp_lib_checksum_complete(skb))
goto csum_copy_err;
}
if (skb_csum_unnecessary(skb))
err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr),
- msg->msg_iov,len);
+ msg->msg_iov, copied );
else {
err = skb_copy_and_csum_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov);
if (err == -EINVAL)
datagram_recv_ctl(sk, msg, skb);
}
- err = len;
+ err = copied;
if (flags & MSG_TRUNC)
err = ulen;
/* Get routing info from the tunnel socket */
skb_dst_drop(skb);
- skb_dst_set(skb, dst_clone(__sk_dst_get(sk)));
+ skb_dst_set(skb, dst_clone(__sk_dst_check(sk, 0)));
inet = inet_sk(sk);
fl = &inet->cork.fl;
return -ENOENT;
}
+ /* if we're already stopping ignore any new requests to stop */
+ if (test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
+ spin_unlock_bh(&sta->lock);
+ return -EALREADY;
+ }
+
if (test_bit(HT_AGG_STATE_WANT_START, &tid_tx->state)) {
/* not even started yet! */
ieee80211_assign_tid_tx(sta, tid, NULL);
return 0;
}
+ set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);
+
spin_unlock_bh(&sta->lock);
#ifdef CONFIG_MAC80211_HT_DEBUG
sta->sta.addr, tid);
#endif /* CONFIG_MAC80211_HT_DEBUG */
- set_bit(HT_AGG_STATE_STOPPING, &tid_tx->state);
-
del_timer_sync(&tid_tx->addba_resp_timer);
/*
*/
clear_bit(HT_AGG_STATE_OPERATIONAL, &tid_tx->state);
+ /*
+ * There might be a few packets being processed right now (on
+ * another CPU) that have already gotten past the aggregation
+ * check when it was still OPERATIONAL and consequently have
+ * IEEE80211_TX_CTL_AMPDU set. In that case, this code might
+ * call into the driver at the same time or even before the
+ * TX paths calls into it, which could confuse the driver.
+ *
+ * Wait for all currently running TX paths to finish before
+ * telling the driver. New packets will not go through since
+ * the aggregation session is no longer OPERATIONAL.
+ */
+ synchronize_net();
+
tid_tx->stop_initiator = initiator;
tid_tx->tx_stop = tx;
__release(agg_queue);
}
+/*
+ * splice packets from the STA's pending to the local pending,
+ * requires a call to ieee80211_agg_splice_finish later
+ */
+static void __acquires(agg_queue)
+ieee80211_agg_splice_packets(struct ieee80211_local *local,
+ struct tid_ampdu_tx *tid_tx, u16 tid)
+{
+ int queue = ieee80211_ac_from_tid(tid);
+ unsigned long flags;
+
+ ieee80211_stop_queue_agg(local, tid);
+
+ if (WARN(!tid_tx, "TID %d gone but expected when splicing aggregates"
+ " from the pending queue\n", tid))
+ return;
+
+ if (!skb_queue_empty(&tid_tx->pending)) {
+ spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
+ /* copy over remaining packets */
+ skb_queue_splice_tail_init(&tid_tx->pending,
+ &local->pending[queue]);
+ spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
+ }
+}
+
+static void __releases(agg_queue)
+ieee80211_agg_splice_finish(struct ieee80211_local *local, u16 tid)
+{
+ ieee80211_wake_queue_agg(local, tid);
+}
+
void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
{
struct tid_ampdu_tx *tid_tx;
tid_tx = rcu_dereference_protected_tid_tx(sta, tid);
/*
- * While we're asking the driver about the aggregation,
- * stop the AC queue so that we don't have to worry
- * about frames that came in while we were doing that,
- * which would require us to put them to the AC pending
- * afterwards which just makes the code more complex.
+ * Start queuing up packets for this aggregation session.
+ * We're going to release them once the driver is OK with
+ * that.
*/
- ieee80211_stop_queue_agg(local, tid);
-
clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);
/*
- * make sure no packets are being processed to get
- * valid starting sequence number
+ * Make sure no packets are being processed. This ensures that
+ * we have a valid starting sequence number and that in-flight
+ * packets have been flushed out and no packets for this TID
+ * will go into the driver during the ampdu_action call.
*/
synchronize_net();
" tid %d\n", tid);
#endif
spin_lock_bh(&sta->lock);
+ ieee80211_agg_splice_packets(local, tid_tx, tid);
ieee80211_assign_tid_tx(sta, tid, NULL);
+ ieee80211_agg_splice_finish(local, tid);
spin_unlock_bh(&sta->lock);
- ieee80211_wake_queue_agg(local, tid);
kfree_rcu(tid_tx, rcu_head);
return;
}
- /* we can take packets again now */
- ieee80211_wake_queue_agg(local, tid);
-
/* activate the timer for the recipient's addBA response */
mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL);
#ifdef CONFIG_MAC80211_HT_DEBUG
}
EXPORT_SYMBOL(ieee80211_start_tx_ba_session);
-/*
- * splice packets from the STA's pending to the local pending,
- * requires a call to ieee80211_agg_splice_finish later
- */
-static void __acquires(agg_queue)
-ieee80211_agg_splice_packets(struct ieee80211_local *local,
- struct tid_ampdu_tx *tid_tx, u16 tid)
-{
- int queue = ieee80211_ac_from_tid(tid);
- unsigned long flags;
-
- ieee80211_stop_queue_agg(local, tid);
-
- if (WARN(!tid_tx, "TID %d gone but expected when splicing aggregates"
- " from the pending queue\n", tid))
- return;
-
- if (!skb_queue_empty(&tid_tx->pending)) {
- spin_lock_irqsave(&local->queue_stop_reason_lock, flags);
- /* copy over remaining packets */
- skb_queue_splice_tail_init(&tid_tx->pending,
- &local->pending[queue]);
- spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
- }
-}
-
-static void __releases(agg_queue)
-ieee80211_agg_splice_finish(struct ieee80211_local *local, u16 tid)
-{
- ieee80211_wake_queue_agg(local, tid);
-}
-
static void ieee80211_agg_tx_operational(struct ieee80211_local *local,
struct sta_info *sta, u16 tid)
{
goto out;
}
- del_timer(&tid_tx->addba_resp_timer);
+ del_timer_sync(&tid_tx->addba_resp_timer);
#ifdef CONFIG_MAC80211_HT_DEBUG
printk(KERN_DEBUG "switched off addBA timer for tid %d\n", tid);
#endif
+
+ /*
+ * addba_resp_timer may have fired before we got here, and
+ * caused WANT_STOP to be set. If the stop then was already
+ * processed further, STOPPING might be set.
+ */
+ if (test_bit(HT_AGG_STATE_WANT_STOP, &tid_tx->state) ||
+ test_bit(HT_AGG_STATE_STOPPING, &tid_tx->state)) {
+#ifdef CONFIG_MAC80211_HT_DEBUG
+ printk(KERN_DEBUG
+ "got addBA resp for tid %d but we already gave up\n",
+ tid);
+#endif
+ goto out;
+ }
+
/*
* IEEE 802.11-2007 7.3.1.14:
* In an ADDBA Response frame, when the Status Code field
PRINT_HT_CAP((htc->cap & BIT(10)), "HT Delayed Block Ack");
- PRINT_HT_CAP((htc->cap & BIT(11)), "Max AMSDU length: "
- "3839 bytes");
PRINT_HT_CAP(!(htc->cap & BIT(11)), "Max AMSDU length: "
+ "3839 bytes");
+ PRINT_HT_CAP((htc->cap & BIT(11)), "Max AMSDU length: "
"7935 bytes");
/*
if (!local->int_scan_req)
return -ENOMEM;
+ for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
+ if (!local->hw.wiphy->bands[band])
+ continue;
+ local->int_scan_req->rates[band] = (u32) -1;
+ }
+
/* if low-level driver supports AP, we also support VLAN */
if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) {
hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP_VLAN);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
struct ieee80211_radiotap_header *rthdr;
unsigned char *pos;
- __le16 txflags;
+ u16 txflags;
rthdr = (struct ieee80211_radiotap_header *) skb_push(skb, rtap_len);
txflags = 0;
if (!(info->flags & IEEE80211_TX_STAT_ACK) &&
!is_multicast_ether_addr(hdr->addr1))
- txflags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_FAIL);
+ txflags |= IEEE80211_RADIOTAP_F_TX_FAIL;
if ((info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) ||
(info->status.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
- txflags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_CTS);
+ txflags |= IEEE80211_RADIOTAP_F_TX_CTS;
else if (info->status.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS)
- txflags |= cpu_to_le16(IEEE80211_RADIOTAP_F_TX_RTS);
+ txflags |= IEEE80211_RADIOTAP_F_TX_RTS;
put_unaligned_le16(txflags, pos);
pos += 2;
struct ieee80211_sub_if_data,
u.ap);
- memset(&sta->sta.drv_priv, 0, hw->sta_data_size);
WARN_ON(drv_sta_add(local, sdata, &sta->sta));
}
}
config NF_CONNTRACK_NETBIOS_NS
tristate "NetBIOS name service protocol support"
- depends on NETFILTER_ADVANCED
select NF_CONNTRACK_BROADCAST
help
NetBIOS name service requests are sent as broadcast messages from an
tristate '"NOTRACK" target support'
depends on IP_NF_RAW || IP6_NF_RAW
depends on NF_CONNTRACK
- depends on NETFILTER_ADVANCED
help
The NOTRACK target allows a select rule to specify
which packets *not* to enter the conntrack/NAT
const struct ip_set_hash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipport4_elem data = { };
- u32 ip, ip_to, p = 0, port, port_to;
+ u32 ip, ip_to = 0, p = 0, port, port_to;
u32 timeout = h->timeout;
bool with_ports = false;
int ret;
const struct ip_set_hash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipportip4_elem data = { };
- u32 ip, ip_to, p = 0, port, port_to;
+ u32 ip, ip_to = 0, p = 0, port, port_to;
u32 timeout = h->timeout;
bool with_ports = false;
int ret;
const struct ip_set_hash *h = set->data;
ipset_adtfn adtfn = set->variant->adt[adt];
struct hash_ipportnet4_elem data = { .cidr = HOST_MASK };
- u32 ip, ip_to, p = 0, port, port_to;
+ u32 ip, ip_to = 0, p = 0, port, port_to;
u32 ip2_from = 0, ip2_to, ip2_last, ip2;
u32 timeout = h->timeout;
bool with_ports = false;
static DEFINE_MUTEX(nf_ct_ecache_mutex);
-struct nf_ct_event_notifier __rcu *nf_conntrack_event_cb __read_mostly;
-EXPORT_SYMBOL_GPL(nf_conntrack_event_cb);
-
-struct nf_exp_event_notifier __rcu *nf_expect_event_cb __read_mostly;
-EXPORT_SYMBOL_GPL(nf_expect_event_cb);
-
/* deliver cached events and clear cache entry - must be called with locally
* disabled softirqs */
void nf_ct_deliver_cached_events(struct nf_conn *ct)
{
+ struct net *net = nf_ct_net(ct);
unsigned long events;
struct nf_ct_event_notifier *notify;
struct nf_conntrack_ecache *e;
rcu_read_lock();
- notify = rcu_dereference(nf_conntrack_event_cb);
+ notify = rcu_dereference(net->ct.nf_conntrack_event_cb);
if (notify == NULL)
goto out_unlock;
}
EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events);
-int nf_conntrack_register_notifier(struct nf_ct_event_notifier *new)
+int nf_conntrack_register_notifier(struct net *net,
+ struct nf_ct_event_notifier *new)
{
int ret = 0;
struct nf_ct_event_notifier *notify;
mutex_lock(&nf_ct_ecache_mutex);
- notify = rcu_dereference_protected(nf_conntrack_event_cb,
+ notify = rcu_dereference_protected(net->ct.nf_conntrack_event_cb,
lockdep_is_held(&nf_ct_ecache_mutex));
if (notify != NULL) {
ret = -EBUSY;
goto out_unlock;
}
- RCU_INIT_POINTER(nf_conntrack_event_cb, new);
+ RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, new);
mutex_unlock(&nf_ct_ecache_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(nf_conntrack_register_notifier);
-void nf_conntrack_unregister_notifier(struct nf_ct_event_notifier *new)
+void nf_conntrack_unregister_notifier(struct net *net,
+ struct nf_ct_event_notifier *new)
{
struct nf_ct_event_notifier *notify;
mutex_lock(&nf_ct_ecache_mutex);
- notify = rcu_dereference_protected(nf_conntrack_event_cb,
+ notify = rcu_dereference_protected(net->ct.nf_conntrack_event_cb,
lockdep_is_held(&nf_ct_ecache_mutex));
BUG_ON(notify != new);
- RCU_INIT_POINTER(nf_conntrack_event_cb, NULL);
+ RCU_INIT_POINTER(net->ct.nf_conntrack_event_cb, NULL);
mutex_unlock(&nf_ct_ecache_mutex);
}
EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier);
-int nf_ct_expect_register_notifier(struct nf_exp_event_notifier *new)
+int nf_ct_expect_register_notifier(struct net *net,
+ struct nf_exp_event_notifier *new)
{
int ret = 0;
struct nf_exp_event_notifier *notify;
mutex_lock(&nf_ct_ecache_mutex);
- notify = rcu_dereference_protected(nf_expect_event_cb,
+ notify = rcu_dereference_protected(net->ct.nf_expect_event_cb,
lockdep_is_held(&nf_ct_ecache_mutex));
if (notify != NULL) {
ret = -EBUSY;
goto out_unlock;
}
- RCU_INIT_POINTER(nf_expect_event_cb, new);
+ RCU_INIT_POINTER(net->ct.nf_expect_event_cb, new);
mutex_unlock(&nf_ct_ecache_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(nf_ct_expect_register_notifier);
-void nf_ct_expect_unregister_notifier(struct nf_exp_event_notifier *new)
+void nf_ct_expect_unregister_notifier(struct net *net,
+ struct nf_exp_event_notifier *new)
{
struct nf_exp_event_notifier *notify;
mutex_lock(&nf_ct_ecache_mutex);
- notify = rcu_dereference_protected(nf_expect_event_cb,
+ notify = rcu_dereference_protected(net->ct.nf_expect_event_cb,
lockdep_is_held(&nf_ct_ecache_mutex));
BUG_ON(notify != new);
- RCU_INIT_POINTER(nf_expect_event_cb, NULL);
+ RCU_INIT_POINTER(net->ct.nf_expect_event_cb, NULL);
mutex_unlock(&nf_ct_ecache_mutex);
}
EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier);
* (C) 2001 by Jay Schulist <jschlst@samba.org>
* (C) 2002-2006 by Harald Welte <laforge@gnumonks.org>
* (C) 2003 by Patrick Mchardy <kaber@trash.net>
- * (C) 2005-2008 by Pablo Neira Ayuso <pablo@netfilter.org>
+ * (C) 2005-2011 by Pablo Neira Ayuso <pablo@netfilter.org>
*
* Initial connection tracking via netlink development funded and
* generally made possible by Network Robots, Inc. (www.networkrobots.com)
MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK);
MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK_EXP);
+static int __net_init ctnetlink_net_init(struct net *net)
+{
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+ int ret;
+
+ ret = nf_conntrack_register_notifier(net, &ctnl_notifier);
+ if (ret < 0) {
+ pr_err("ctnetlink_init: cannot register notifier.\n");
+ goto err_out;
+ }
+
+ ret = nf_ct_expect_register_notifier(net, &ctnl_notifier_exp);
+ if (ret < 0) {
+ pr_err("ctnetlink_init: cannot expect register notifier.\n");
+ goto err_unreg_notifier;
+ }
+#endif
+ return 0;
+
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+err_unreg_notifier:
+ nf_conntrack_unregister_notifier(net, &ctnl_notifier);
+err_out:
+ return ret;
+#endif
+}
+
+static void ctnetlink_net_exit(struct net *net)
+{
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+ nf_ct_expect_unregister_notifier(net, &ctnl_notifier_exp);
+ nf_conntrack_unregister_notifier(net, &ctnl_notifier);
+#endif
+}
+
+static void __net_exit ctnetlink_net_exit_batch(struct list_head *net_exit_list)
+{
+ struct net *net;
+
+ list_for_each_entry(net, net_exit_list, exit_list)
+ ctnetlink_net_exit(net);
+}
+
+static struct pernet_operations ctnetlink_net_ops = {
+ .init = ctnetlink_net_init,
+ .exit_batch = ctnetlink_net_exit_batch,
+};
+
static int __init ctnetlink_init(void)
{
int ret;
goto err_unreg_subsys;
}
-#ifdef CONFIG_NF_CONNTRACK_EVENTS
- ret = nf_conntrack_register_notifier(&ctnl_notifier);
- if (ret < 0) {
- pr_err("ctnetlink_init: cannot register notifier.\n");
+ if (register_pernet_subsys(&ctnetlink_net_ops)) {
+ pr_err("ctnetlink_init: cannot register pernet operations\n");
goto err_unreg_exp_subsys;
}
- ret = nf_ct_expect_register_notifier(&ctnl_notifier_exp);
- if (ret < 0) {
- pr_err("ctnetlink_init: cannot expect register notifier.\n");
- goto err_unreg_notifier;
- }
-#endif
-
return 0;
-#ifdef CONFIG_NF_CONNTRACK_EVENTS
-err_unreg_notifier:
- nf_conntrack_unregister_notifier(&ctnl_notifier);
err_unreg_exp_subsys:
nfnetlink_subsys_unregister(&ctnl_exp_subsys);
-#endif
err_unreg_subsys:
nfnetlink_subsys_unregister(&ctnl_subsys);
err_out:
pr_info("ctnetlink: unregistering from nfnetlink.\n");
nf_ct_remove_userspace_expectations();
-#ifdef CONFIG_NF_CONNTRACK_EVENTS
- nf_ct_expect_unregister_notifier(&ctnl_notifier_exp);
- nf_conntrack_unregister_notifier(&ctnl_notifier);
-#endif
-
+ unregister_pernet_subsys(&ctnetlink_net_ops);
nfnetlink_subsys_unregister(&ctnl_exp_subsys);
nfnetlink_subsys_unregister(&ctnl_subsys);
}
struct netlbl_domaddr_map *addrmap = NULL;
struct netlbl_domaddr4_map *map4 = NULL;
struct netlbl_domaddr6_map *map6 = NULL;
- const struct in_addr *addr4, *mask4;
- const struct in6_addr *addr6, *mask6;
entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
if (entry == NULL)
INIT_LIST_HEAD(&addrmap->list6);
switch (family) {
- case AF_INET:
- addr4 = addr;
- mask4 = mask;
+ case AF_INET: {
+ const struct in_addr *addr4 = addr;
+ const struct in_addr *mask4 = mask;
map4 = kzalloc(sizeof(*map4), GFP_ATOMIC);
if (map4 == NULL)
goto cfg_unlbl_map_add_failure;
if (ret_val != 0)
goto cfg_unlbl_map_add_failure;
break;
- case AF_INET6:
- addr6 = addr;
- mask6 = mask;
+ }
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+ case AF_INET6: {
+ const struct in6_addr *addr6 = addr;
+ const struct in6_addr *mask6 = mask;
map6 = kzalloc(sizeof(*map6), GFP_ATOMIC);
if (map6 == NULL)
goto cfg_unlbl_map_add_failure;
map6->list.addr.s6_addr32[3] &= mask6->s6_addr32[3];
ipv6_addr_copy(&map6->list.mask, mask6);
map6->list.valid = 1;
- ret_val = netlbl_af4list_add(&map4->list,
- &addrmap->list4);
+ ret_val = netlbl_af6list_add(&map6->list,
+ &addrmap->list6);
if (ret_val != 0)
goto cfg_unlbl_map_add_failure;
break;
+ }
+#endif /* IPv6 */
default:
goto cfg_unlbl_map_add_failure;
break;
case AF_INET:
addr_len = sizeof(struct in_addr);
break;
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
case AF_INET6:
addr_len = sizeof(struct in6_addr);
break;
+#endif /* IPv6 */
default:
return -EPFNOSUPPORT;
}
case AF_INET:
addr_len = sizeof(struct in_addr);
break;
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
case AF_INET6:
addr_len = sizeof(struct in6_addr);
break;
+#endif /* IPv6 */
default:
return -EPFNOSUPPORT;
}
struct gred_sched_data *q;
if (table->tab[dp] == NULL) {
- table->tab[dp] = kzalloc(sizeof(*q), GFP_KERNEL);
+ table->tab[dp] = kzalloc(sizeof(*q), GFP_ATOMIC);
if (table->tab[dp] == NULL)
return -ENOMEM;
}
ctl->Plog, ctl->Scell_log,
nla_data(tb[TCA_RED_STAB]));
- if (skb_queue_empty(&sch->q))
- red_end_of_idle_period(&q->parms);
+ if (!q->qdisc->q.qlen)
+ red_start_of_idle_period(&q->parms);
sch_tree_unlock(sch);
return 0;
static int
-__teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev)
+__teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res,
+ struct net_device *dev, struct netdev_queue *txq,
+ struct neighbour *mn)
{
- struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, 0);
- struct teql_sched_data *q = qdisc_priv(dev_queue->qdisc);
- struct neighbour *mn = dst_get_neighbour(skb_dst(skb));
+ struct teql_sched_data *q = qdisc_priv(txq->qdisc);
struct neighbour *n = q->ncache;
if (mn->tbl == NULL)
}
static inline int teql_resolve(struct sk_buff *skb,
- struct sk_buff *skb_res, struct net_device *dev)
+ struct sk_buff *skb_res,
+ struct net_device *dev,
+ struct netdev_queue *txq)
{
- struct netdev_queue *txq = netdev_get_tx_queue(dev, 0);
+ struct dst_entry *dst = skb_dst(skb);
+ struct neighbour *mn;
+ int res;
+
if (txq->qdisc == &noop_qdisc)
return -ENODEV;
- if (dev->header_ops == NULL ||
- skb_dst(skb) == NULL ||
- dst_get_neighbour(skb_dst(skb)) == NULL)
+ if (!dev->header_ops || !dst)
return 0;
- return __teql_resolve(skb, skb_res, dev);
+
+ rcu_read_lock();
+ mn = dst_get_neighbour(dst);
+ res = mn ? __teql_resolve(skb, skb_res, dev, txq, mn) : 0;
+ rcu_read_unlock();
+
+ return res;
}
static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
continue;
}
- switch (teql_resolve(skb, skb_res, slave)) {
+ switch (teql_resolve(skb, skb_res, slave, slave_txq)) {
case 0:
if (__netif_tx_trylock(slave_txq)) {
unsigned int length = qdisc_pkt_len(skb);
struct sctp_auth_bytes *key;
/* Verify that we are not going to overflow INT_MAX */
- if ((INT_MAX - key_len) < sizeof(struct sctp_auth_bytes))
+ if (key_len > (INT_MAX - sizeof(struct sctp_auth_bytes)))
return NULL;
/* Allocate the shared key */
if ((UNIXCB(skb).pid != siocb->scm->pid) ||
(UNIXCB(skb).cred != siocb->scm->cred)) {
skb_queue_head(&sk->sk_receive_queue, skb);
+ sk->sk_data_ready(sk, skb->len);
break;
}
} else {
chunk = min_t(unsigned int, skb->len, size);
if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
skb_queue_head(&sk->sk_receive_queue, skb);
+ sk->sk_data_ready(sk, skb->len);
if (copied == 0)
copied = -EFAULT;
break;
/* put the skb back if we didn't use it up.. */
if (skb->len) {
skb_queue_head(&sk->sk_receive_queue, skb);
+ sk->sk_data_ready(sk, skb->len);
break;
}
/* put message back and return */
skb_queue_head(&sk->sk_receive_queue, skb);
+ sk->sk_data_ready(sk, skb->len);
break;
}
} while (size);
[NL80211_ATTR_IFINDEX] = { .type = NLA_U32 },
[NL80211_ATTR_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ-1 },
- [NL80211_ATTR_MAC] = { .type = NLA_BINARY, .len = ETH_ALEN },
- [NL80211_ATTR_PREV_BSSID] = { .type = NLA_BINARY, .len = ETH_ALEN },
+ [NL80211_ATTR_MAC] = { .len = ETH_ALEN },
+ [NL80211_ATTR_PREV_BSSID] = { .len = ETH_ALEN },
[NL80211_ATTR_KEY] = { .type = NLA_NESTED, },
[NL80211_ATTR_KEY_DATA] = { .type = NLA_BINARY,
#define REG_DBG_PRINT(args...)
#endif
+static struct regulatory_request core_request_world = {
+ .initiator = NL80211_REGDOM_SET_BY_CORE,
+ .alpha2[0] = '0',
+ .alpha2[1] = '0',
+ .intersect = false,
+ .processed = true,
+ .country_ie_env = ENVIRON_ANY,
+};
+
/* Receipt of information from last regulatory request */
-static struct regulatory_request *last_request;
+static struct regulatory_request *last_request = &core_request_world;
/* To trigger userspace events */
static struct platform_device *reg_pdev;
module_param(ieee80211_regdom, charp, 0444);
MODULE_PARM_DESC(ieee80211_regdom, "IEEE 802.11 regulatory domain code");
-static void reset_regdomains(void)
+static void reset_regdomains(bool full_reset)
{
/* avoid freeing static information or freeing something twice */
if (cfg80211_regdomain == cfg80211_world_regdom)
cfg80211_world_regdom = &world_regdom;
cfg80211_regdomain = NULL;
+
+ if (!full_reset)
+ return;
+
+ if (last_request != &core_request_world)
+ kfree(last_request);
+ last_request = &core_request_world;
}
/*
{
BUG_ON(!last_request);
- reset_regdomains();
+ reset_regdomains(false);
cfg80211_world_regdom = rd;
cfg80211_regdomain = rd;
}
new_request:
- kfree(last_request);
+ if (last_request != &core_request_world)
+ kfree(last_request);
last_request = pending_request;
last_request->intersect = intersect;
{
struct regulatory_request *request;
- kfree(last_request);
- last_request = NULL;
-
request = kzalloc(sizeof(struct regulatory_request),
GFP_KERNEL);
if (!request)
mutex_lock(&cfg80211_mutex);
mutex_lock(®_mutex);
- reset_regdomains();
+ reset_regdomains(true);
restore_alpha2(alpha2, reset_user);
/*
}
request_wiphy = wiphy_idx_to_wiphy(last_request->wiphy_idx);
+ if (!request_wiphy &&
+ (last_request->initiator == NL80211_REGDOM_SET_BY_DRIVER ||
+ last_request->initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE)) {
+ schedule_delayed_work(®_timeout, 0);
+ return -ENODEV;
+ }
if (!last_request->intersect) {
int r;
if (last_request->initiator != NL80211_REGDOM_SET_BY_DRIVER) {
- reset_regdomains();
+ reset_regdomains(false);
cfg80211_regdomain = rd;
return 0;
}
if (r)
return r;
- reset_regdomains();
+ reset_regdomains(false);
cfg80211_regdomain = rd;
return 0;
}
rd = NULL;
- reset_regdomains();
+ reset_regdomains(false);
cfg80211_regdomain = intersected_rd;
return 0;
kfree(rd);
rd = NULL;
- reset_regdomains();
+ reset_regdomains(false);
cfg80211_regdomain = intersected_rd;
return 0;
mutex_lock(&cfg80211_mutex);
mutex_lock(®_mutex);
- reset_regdomains();
-
- kfree(last_request);
+ reset_regdomains(true);
- last_request = NULL;
dev_set_uevent_suppress(®_pdev->dev, true);
platform_device_unregister(reg_pdev);
return dst_metric_advmss(dst->path);
}
-static unsigned int xfrm_default_mtu(const struct dst_entry *dst)
+static unsigned int xfrm_mtu(const struct dst_entry *dst)
{
- return dst_mtu(dst->path);
+ unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
+
+ return mtu ? : dst_mtu(dst->path);
}
static struct neighbour *xfrm_neigh_lookup(const struct dst_entry *dst, const void *daddr)
dst_ops->check = xfrm_dst_check;
if (likely(dst_ops->default_advmss == NULL))
dst_ops->default_advmss = xfrm_default_advmss;
- if (likely(dst_ops->default_mtu == NULL))
- dst_ops->default_mtu = xfrm_default_mtu;
+ if (likely(dst_ops->mtu == NULL))
+ dst_ops->mtu = xfrm_mtu;
if (likely(dst_ops->negative_advice == NULL))
dst_ops->negative_advice = xfrm_negative_advice;
if (likely(dst_ops->link_failure == NULL))
static int d_namespace_path(struct path *path, char *buf, int buflen,
char **name, int flags)
{
- struct path root, tmp;
char *res;
- int connected, error = 0;
+ int error = 0;
+ int connected = 1;
+
+ if (path->mnt->mnt_flags & MNT_INTERNAL) {
+ /* it's not mounted anywhere */
+ res = dentry_path(path->dentry, buf, buflen);
+ *name = res;
+ if (IS_ERR(res)) {
+ *name = buf;
+ return PTR_ERR(res);
+ }
+ if (path->dentry->d_sb->s_magic == PROC_SUPER_MAGIC &&
+ strncmp(*name, "/sys/", 5) == 0) {
+ /* TODO: convert over to using a per namespace
+ * control instead of hard coded /proc
+ */
+ return prepend(name, *name - buf, "/proc", 5);
+ }
+ return 0;
+ }
- /* Get the root we want to resolve too, released below */
+ /* resolve paths relative to chroot?*/
if (flags & PATH_CHROOT_REL) {
- /* resolve paths relative to chroot */
+ struct path root;
get_fs_root(current->fs, &root);
- } else {
- /* resolve paths relative to namespace */
- root.mnt = current->nsproxy->mnt_ns->root;
- root.dentry = root.mnt->mnt_root;
- path_get(&root);
+ res = __d_path(path, &root, buf, buflen);
+ if (res && !IS_ERR(res)) {
+ /* everything's fine */
+ *name = res;
+ path_put(&root);
+ goto ok;
+ }
+ path_put(&root);
+ connected = 0;
}
- tmp = root;
- res = __d_path(path, &tmp, buf, buflen);
+ res = d_absolute_path(path, buf, buflen);
*name = res;
/* handle error conditions - and still allow a partial path to
*name = buf;
goto out;
}
+ if (!our_mnt(path->mnt))
+ connected = 0;
+ok:
/* Handle two cases:
* 1. A deleted dentry && profile is not allowing mediation of deleted
* 2. On some filesystems, newly allocated dentries appear to the
goto out;
}
- /* Determine if the path is connected to the expected root */
- connected = tmp.dentry == root.dentry && tmp.mnt == root.mnt;
-
- /* If the path is not connected,
+ /* If the path is not connected to the expected root,
* check if it is a sysctl and handle specially else remove any
* leading / that __d_path may have returned.
* Unless
* namespace root.
*/
if (!connected) {
- /* is the disconnect path a sysctl? */
- if (tmp.dentry->d_sb->s_magic == PROC_SUPER_MAGIC &&
- strncmp(*name, "/sys/", 5) == 0) {
- /* TODO: convert over to using a per namespace
- * control instead of hard coded /proc
- */
- error = prepend(name, *name - buf, "/proc", 5);
- } else if (!(flags & PATH_CONNECT_PATH) &&
+ if (!(flags & PATH_CONNECT_PATH) &&
!(((flags & CHROOT_NSCONNECT) == CHROOT_NSCONNECT) &&
- (tmp.mnt == current->nsproxy->mnt_ns->root &&
- tmp.dentry == tmp.mnt->mnt_root))) {
+ our_mnt(path->mnt))) {
/* disconnected path, don't return pathname starting
* with '/'
*/
}
out:
- path_put(&root);
-
return error;
}
{
char *pos = ERR_PTR(-ENOMEM);
if (buflen >= 256) {
- struct path ns_root = { };
/* go to whatever namespace root we are under */
- pos = __d_path(path, &ns_root, buffer, buflen - 1);
+ pos = d_absolute_path(path, buffer, buflen - 1);
if (!IS_ERR(pos) && *pos == '/' && pos[1]) {
struct inode *inode = path->dentry->d_inode;
if (inode && S_ISDIR(inode->i_mode)) {
pos = tomoyo_get_local_path(path->dentry, buf,
buf_len - 1);
/* Get absolute name for the rest. */
- else
+ else {
pos = tomoyo_get_absolute_path(path, buf, buf_len - 1);
+ /*
+ * Fall back to local name if absolute name is not
+ * available.
+ */
+ if (pos == ERR_PTR(-EINVAL))
+ pos = tomoyo_get_local_path(path->dentry, buf,
+ buf_len - 1);
+ }
encode:
if (IS_ERR(pos))
continue;
SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1043, 0x81e7, "ASUS M2V", POS_FIX_LPIB),
+ SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS 1101HA", POS_FIX_LPIB),
SND_PCI_QUIRK(0x104d, 0x9069, "Sony VPCS11V9E", POS_FIX_LPIB),
- SND_PCI_QUIRK(0x1106, 0x3288, "ASUS M2V-MX SE", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1297, 0x3166, "Shuttle", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1458, 0xa022, "ga-ma770-ud3", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB),
/* SCH */
{ PCI_DEVICE(0x8086, 0x811b),
.driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP |
- AZX_DCAPS_BUFSIZE},
+ AZX_DCAPS_BUFSIZE | AZX_DCAPS_POSFIX_LPIB }, /* Poulsbo */
+ /* ICH */
{ PCI_DEVICE(0x8086, 0x2668),
.driver_data = AZX_DRIVER_ICH | AZX_DCAPS_OLD_SSYNC |
AZX_DCAPS_BUFSIZE }, /* ICH6 */
imux = &spec->input_mux[mux_idx];
if (!imux->num_items && mux_idx > 0)
imux = &spec->input_mux[0];
+ if (!imux->num_items)
+ return 0;
if (idx >= imux->num_items)
idx = imux->num_items - 1;
case AUTO_PIN_SPEAKER_OUT:
if (cfg->line_outs == 1)
return "Speaker";
+ if (cfg->line_outs == 2)
+ return ch ? "Bass Speaker" : "Speaker";
break;
case AUTO_PIN_HP_OUT:
/* for multi-io case, only the primary out */
if (!nid)
continue;
if (found_in_nid_list(nid, spec->multiout.dac_nids,
- spec->multiout.num_dacs))
+ ARRAY_SIZE(spec->private_dac_nids)))
continue;
if (found_in_nid_list(nid, spec->multiout.hp_out_nid,
ARRAY_SIZE(spec->multiout.hp_out_nid)))
return 0;
}
+/* return 0 if no possible DAC is found, 1 if one or more found */
static int alc_auto_fill_extra_dacs(struct hda_codec *codec, int num_outs,
const hda_nid_t *pins, hda_nid_t *dacs)
{
if (!dacs[i])
dacs[i] = alc_auto_look_for_dac(codec, pins[i]);
}
- return 0;
+ return 1;
}
static int alc_auto_fill_multi_ios(struct hda_codec *codec,
static int alc_auto_fill_dac_nids(struct hda_codec *codec)
{
struct alc_spec *spec = codec->spec;
- const struct auto_pin_cfg *cfg = &spec->autocfg;
+ struct auto_pin_cfg *cfg = &spec->autocfg;
bool redone = false;
int i;
spec->multiout.extra_out_nid[0] = 0;
memset(spec->private_dac_nids, 0, sizeof(spec->private_dac_nids));
spec->multiout.dac_nids = spec->private_dac_nids;
+ spec->multi_ios = 0;
/* fill hard-wired DACs first */
if (!redone) {
for (i = 0; i < cfg->line_outs; i++) {
if (spec->private_dac_nids[i])
spec->multiout.num_dacs++;
- else
+ else {
memmove(spec->private_dac_nids + i,
spec->private_dac_nids + i + 1,
sizeof(hda_nid_t) * (cfg->line_outs - i - 1));
+ spec->private_dac_nids[cfg->line_outs - 1] = 0;
+ }
}
if (cfg->line_outs == 1 && cfg->line_out_type != AUTO_PIN_SPEAKER_OUT) {
if (cfg->line_out_type != AUTO_PIN_HP_OUT)
alc_auto_fill_extra_dacs(codec, cfg->hp_outs, cfg->hp_pins,
spec->multiout.hp_out_nid);
- if (cfg->line_out_type != AUTO_PIN_SPEAKER_OUT)
- alc_auto_fill_extra_dacs(codec, cfg->speaker_outs, cfg->speaker_pins,
- spec->multiout.extra_out_nid);
+ if (cfg->line_out_type != AUTO_PIN_SPEAKER_OUT) {
+ int err = alc_auto_fill_extra_dacs(codec, cfg->speaker_outs,
+ cfg->speaker_pins,
+ spec->multiout.extra_out_nid);
+ /* if no speaker volume is assigned, try again as the primary
+ * output
+ */
+ if (!err && cfg->speaker_outs > 0 &&
+ cfg->line_out_type == AUTO_PIN_HP_OUT) {
+ cfg->hp_outs = cfg->line_outs;
+ memcpy(cfg->hp_pins, cfg->line_out_pins,
+ sizeof(cfg->hp_pins));
+ cfg->line_outs = cfg->speaker_outs;
+ memcpy(cfg->line_out_pins, cfg->speaker_pins,
+ sizeof(cfg->speaker_pins));
+ cfg->speaker_outs = 0;
+ memset(cfg->speaker_pins, 0, sizeof(cfg->speaker_pins));
+ cfg->line_out_type = AUTO_PIN_SPEAKER_OUT;
+ redone = false;
+ goto again;
+ }
+ }
return 0;
}
}
static int alc_auto_create_extra_out(struct hda_codec *codec, hda_nid_t pin,
- hda_nid_t dac, const char *pfx)
+ hda_nid_t dac, const char *pfx,
+ int cidx)
{
struct alc_spec *spec = codec->spec;
hda_nid_t sw, vol;
if (is_ctl_used(spec->sw_ctls, val))
return 0; /* already created */
mark_ctl_usage(spec->sw_ctls, val);
- return add_pb_sw_ctrl(spec, ALC_CTL_WIDGET_MUTE, pfx, val);
+ return __add_pb_sw_ctrl(spec, ALC_CTL_WIDGET_MUTE, pfx, cidx, val);
}
sw = alc_look_for_out_mute_nid(codec, pin, dac);
vol = alc_look_for_out_vol_nid(codec, pin, dac);
- err = alc_auto_add_stereo_vol(codec, pfx, 0, vol);
+ err = alc_auto_add_stereo_vol(codec, pfx, cidx, vol);
if (err < 0)
return err;
- err = alc_auto_add_stereo_sw(codec, pfx, 0, sw);
+ err = alc_auto_add_stereo_sw(codec, pfx, cidx, sw);
if (err < 0)
return err;
return 0;
hda_nid_t dac = *dacs;
if (!dac)
dac = spec->multiout.dac_nids[0];
- return alc_auto_create_extra_out(codec, *pins, dac, pfx);
+ return alc_auto_create_extra_out(codec, *pins, dac, pfx, 0);
}
if (dacs[num_pins - 1]) {
/* OK, we have a multi-output system with individual volumes */
for (i = 0; i < num_pins; i++) {
- snprintf(name, sizeof(name), "%s %s",
- pfx, channel_name[i]);
- err = alc_auto_create_extra_out(codec, pins[i], dacs[i],
- name);
+ if (num_pins >= 3) {
+ snprintf(name, sizeof(name), "%s %s",
+ pfx, channel_name[i]);
+ err = alc_auto_create_extra_out(codec, pins[i], dacs[i],
+ name, 0);
+ } else {
+ err = alc_auto_create_extra_out(codec, pins[i], dacs[i],
+ pfx, i);
+ }
if (err < 0)
return err;
}
unsigned int gpio_mute;
unsigned int gpio_led;
unsigned int gpio_led_polarity;
+ unsigned int vref_mute_led_nid; /* pin NID for mute-LED vref control */
unsigned int vref_led;
/* stream */
spec->eapd_switch = val;
get_int_hint(codec, "gpio_led_polarity", &spec->gpio_led_polarity);
if (get_int_hint(codec, "gpio_led", &spec->gpio_led)) {
- if (spec->gpio_led <= 8) {
- spec->gpio_mask |= spec->gpio_led;
- spec->gpio_dir |= spec->gpio_led;
- if (spec->gpio_led_polarity)
- spec->gpio_data |= spec->gpio_led;
- }
+ spec->gpio_mask |= spec->gpio_led;
+ spec->gpio_dir |= spec->gpio_led;
+ if (spec->gpio_led_polarity)
+ spec->gpio_data |= spec->gpio_led;
}
}
int pinctl, def_conf;
/* power on when no jack detection is available */
- if (!spec->hp_detect) {
+ /* or when the VREF is used for controlling LED */
+ if (!spec->hp_detect ||
+ spec->vref_mute_led_nid == nid) {
stac_toggle_power_map(codec, nid, 1);
continue;
}
if (sscanf(dev->name, "HP_Mute_LED_%d_%x",
&spec->gpio_led_polarity,
&spec->gpio_led) == 2) {
- if (spec->gpio_led < 4)
+ unsigned int max_gpio;
+ max_gpio = snd_hda_param_read(codec, codec->afg,
+ AC_PAR_GPIO_CAP);
+ max_gpio &= AC_GPIO_IO_COUNT;
+ if (spec->gpio_led < max_gpio)
spec->gpio_led = 1 << spec->gpio_led;
+ else
+ spec->vref_mute_led_nid = spec->gpio_led;
return 1;
}
if (sscanf(dev->name, "HP_Mute_LED_%d",
set_hp_led_gpio(codec);
return 1;
}
+ /* BIOS bug: unfilled OEM string */
+ if (strstr(dev->name, "HP_Mute_LED_P_G")) {
+ set_hp_led_gpio(codec);
+ spec->gpio_led_polarity = 1;
+ return 1;
+ }
}
/*
struct sigmatel_spec *spec = codec->spec;
/* sync mute LED */
- if (spec->gpio_led) {
- if (spec->gpio_led <= 8) {
- stac_gpio_set(codec, spec->gpio_mask,
- spec->gpio_dir, spec->gpio_data);
- } else {
- stac_vrefout_set(codec,
- spec->gpio_led, spec->vref_led);
- }
- }
- return 0;
-}
-
-static int stac92xx_post_suspend(struct hda_codec *codec)
-{
- struct sigmatel_spec *spec = codec->spec;
- if (spec->gpio_led > 8) {
- /* with vref-out pin used for mute led control
- * codec AFG is prevented from D3 state, but on
- * system suspend it can (and should) be used
- */
- snd_hda_codec_read(codec, codec->afg, 0,
- AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
- }
+ if (spec->vref_mute_led_nid)
+ stac_vrefout_set(codec, spec->vref_mute_led_nid,
+ spec->vref_led);
+ else if (spec->gpio_led)
+ stac_gpio_set(codec, spec->gpio_mask,
+ spec->gpio_dir, spec->gpio_data);
return 0;
}
struct sigmatel_spec *spec = codec->spec;
if (power_state == AC_PWRST_D3) {
- if (spec->gpio_led > 8) {
+ if (spec->vref_mute_led_nid) {
/* with vref-out pin used for mute led control
* codec AFG is prevented from D3 state
*/
}
}
/*polarity defines *not* muted state level*/
- if (spec->gpio_led <= 8) {
+ if (!spec->vref_mute_led_nid) {
if (muted)
spec->gpio_data &= ~spec->gpio_led; /* orange */
else
muted_lvl = spec->gpio_led_polarity ?
AC_PINCTL_VREF_GRD : AC_PINCTL_VREF_HIZ;
spec->vref_led = muted ? muted_lvl : notmtd_lvl;
- stac_vrefout_set(codec, spec->gpio_led, spec->vref_led);
+ stac_vrefout_set(codec, spec->vref_mute_led_nid,
+ spec->vref_led);
}
return 0;
}
#ifdef CONFIG_SND_HDA_POWER_SAVE
if (spec->gpio_led) {
- if (spec->gpio_led <= 8) {
+ if (!spec->vref_mute_led_nid) {
spec->gpio_mask |= spec->gpio_led;
spec->gpio_dir |= spec->gpio_led;
spec->gpio_data |= spec->gpio_led;
} else {
codec->patch_ops.set_power_state =
stac92xx_set_power_state;
- codec->patch_ops.post_suspend =
- stac92xx_post_suspend;
}
codec->patch_ops.pre_resume = stac92xx_pre_resume;
codec->patch_ops.check_power_status =
#ifdef CONFIG_SND_HDA_POWER_SAVE
if (spec->gpio_led) {
- if (spec->gpio_led <= 8) {
+ if (!spec->vref_mute_led_nid) {
spec->gpio_mask |= spec->gpio_led;
spec->gpio_dir |= spec->gpio_led;
spec->gpio_data |= spec->gpio_led;
} else {
codec->patch_ops.set_power_state =
stac92xx_set_power_state;
- codec->patch_ops.post_suspend =
- stac92xx_post_suspend;
}
codec->patch_ops.pre_resume = stac92xx_pre_resume;
codec->patch_ops.check_power_status =
static int index = SNDRV_DEFAULT_IDX1; /* Index 0-MAX */
static char *id = SNDRV_DEFAULT_STR1; /* ID for this card */
static int enable = 1;
+static int codecs = 1;
module_param(index, int, 0444);
MODULE_PARM_DESC(index, "Index value for SiS7019 Audio Accelerator.");
MODULE_PARM_DESC(id, "ID string for SiS7019 Audio Accelerator.");
module_param(enable, bool, 0444);
MODULE_PARM_DESC(enable, "Enable SiS7019 Audio Accelerator.");
+module_param(codecs, int, 0444);
+MODULE_PARM_DESC(codecs, "Set bit to indicate that codec number is expected to be present (default 1)");
static DEFINE_PCI_DEVICE_TABLE(snd_sis7019_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_SI, 0x7019) },
dma_addr_t silence_dma_addr;
};
+/* These values are also used by the module param 'codecs' to indicate
+ * which codecs should be present.
+ */
#define SIS_PRIMARY_CODEC_PRESENT 0x0001
#define SIS_SECONDARY_CODEC_PRESENT 0x0002
#define SIS_TERTIARY_CODEC_PRESENT 0x0004
{
unsigned long io = sis->ioport;
void __iomem *ioaddr = sis->ioaddr;
+ unsigned long timeout;
u16 status;
int count;
int i;
while ((inw(io + SIS_AC97_STATUS) & SIS_AC97_STATUS_BUSY) && --count)
udelay(1);
+ /* Command complete, we can let go of the semaphore now.
+ */
+ outl(SIS_AC97_SEMA_RELEASE, io + SIS_AC97_SEMA);
+ if (!count)
+ return -EIO;
+
/* Now that we've finished the reset, find out what's attached.
+ * There are some codec/board combinations that take an extremely
+ * long time to come up. 350+ ms has been observed in the field,
+ * so we'll give them up to 500ms.
*/
- status = inl(io + SIS_AC97_STATUS);
- if (status & SIS_AC97_STATUS_CODEC_READY)
- sis->codecs_present |= SIS_PRIMARY_CODEC_PRESENT;
- if (status & SIS_AC97_STATUS_CODEC2_READY)
- sis->codecs_present |= SIS_SECONDARY_CODEC_PRESENT;
- if (status & SIS_AC97_STATUS_CODEC3_READY)
- sis->codecs_present |= SIS_TERTIARY_CODEC_PRESENT;
-
- /* All done, let go of the semaphore, and check for errors
+ sis->codecs_present = 0;
+ timeout = msecs_to_jiffies(500) + jiffies;
+ while (time_before_eq(jiffies, timeout)) {
+ status = inl(io + SIS_AC97_STATUS);
+ if (status & SIS_AC97_STATUS_CODEC_READY)
+ sis->codecs_present |= SIS_PRIMARY_CODEC_PRESENT;
+ if (status & SIS_AC97_STATUS_CODEC2_READY)
+ sis->codecs_present |= SIS_SECONDARY_CODEC_PRESENT;
+ if (status & SIS_AC97_STATUS_CODEC3_READY)
+ sis->codecs_present |= SIS_TERTIARY_CODEC_PRESENT;
+
+ if (sis->codecs_present == codecs)
+ break;
+
+ msleep(1);
+ }
+
+ /* All done, check for errors.
*/
- outl(SIS_AC97_SEMA_RELEASE, io + SIS_AC97_SEMA);
- if (!sis->codecs_present || !count)
+ if (!sis->codecs_present) {
+ printk(KERN_ERR "sis7019: could not find any codecs\n");
return -EIO;
+ }
+
+ if (sis->codecs_present != codecs) {
+ printk(KERN_WARNING "sis7019: missing codecs, found %0x, expected %0x\n",
+ sis->codecs_present, codecs);
+ }
/* Let the hardware know that the audio driver is alive,
* and enable PCM slots on the AC-link for L/R playback (3 & 4) and
if (!enable)
goto error_out;
+ /* The user can specify which codecs should be present so that we
+ * can wait for them to show up if they are slow to recover from
+ * the AC97 cold reset. We default to a single codec, the primary.
+ *
+ * We assume that SIS_PRIMARY_*_PRESENT matches bits 0-2.
+ */
+ codecs &= SIS_PRIMARY_CODEC_PRESENT | SIS_SECONDARY_CODEC_PRESENT |
+ SIS_TERTIARY_CODEC_PRESENT;
+ if (!codecs)
+ codecs = SIS_PRIMARY_CODEC_PRESENT;
+
rc = snd_card_create(index, id, THIS_MODULE, sizeof(*sis), &card);
if (rc < 0)
goto error_out;
config SND_ATMEL_SOC
tristate "SoC Audio for the Atmel System-on-Chip"
- depends on ARCH_AT91 || AVR32
+ depends on ARCH_AT91
help
Say Y or M if you want to add support for codecs attached to
the ATMEL SSC interface. You will also need
Say Y if you want to add support for SoC audio on WM8731-based
AT91sam9g20 evaluation board.
-config SND_AT32_SOC_PLAYPAQ
- tristate "SoC Audio support for PlayPaq with WM8510"
- depends on SND_ATMEL_SOC && BOARD_PLAYPAQ && AT91_PROGRAMMABLE_CLOCKS
- select SND_ATMEL_SOC_SSC
- select SND_SOC_WM8510
- help
- Say Y or M here if you want to add support for SoC audio
- on the LRS PlayPaq.
-
-config SND_AT32_SOC_PLAYPAQ_SLAVE
- bool "Run CODEC on PlayPaq in slave mode"
- depends on SND_AT32_SOC_PLAYPAQ
- default n
- help
- Say Y if you want to run with the AT32 SSC generating the BCLK
- and FRAME signals on the PlayPaq. Unless you want to play
- with the AT32 as the SSC master, you probably want to say N here,
- as this will give you better sound quality.
-
config SND_AT91_SOC_AFEB9260
tristate "SoC Audio support for AFEB9260 board"
depends on ARCH_AT91 && MACH_AFEB9260 && SND_ATMEL_SOC
# AT91 Machine Support
snd-soc-sam9g20-wm8731-objs := sam9g20_wm8731.o
-# AT32 Machine Support
-snd-soc-playpaq-objs := playpaq_wm8510.o
-
obj-$(CONFIG_SND_AT91_SOC_SAM9G20_WM8731) += snd-soc-sam9g20-wm8731.o
-obj-$(CONFIG_SND_AT32_SOC_PLAYPAQ) += snd-soc-playpaq.o
obj-$(CONFIG_SND_AT91_SOC_AFEB9260) += snd-soc-afeb9260.o
+++ /dev/null
-/* sound/soc/at32/playpaq_wm8510.c
- * ASoC machine driver for PlayPaq using WM8510 codec
- *
- * Copyright (C) 2008 Long Range Systems
- * Geoffrey Wossum <gwossum@acm.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This code is largely inspired by sound/soc/at91/eti_b1_wm8731.c
- *
- * NOTE: If you don't have the AT32 enhanced portmux configured (which
- * isn't currently in the mainline or Atmel patched kernel), you will
- * need to set the MCLK pin (PA30) to peripheral A in your board initialization
- * code. Something like:
- * at32_select_periph(GPIO_PIN_PA(30), GPIO_PERIPH_A, 0);
- *
- */
-
-/* #define DEBUG */
-
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/clk.h>
-#include <linux/timer.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-
-#include <sound/core.h>
-#include <sound/pcm.h>
-#include <sound/pcm_params.h>
-#include <sound/soc.h>
-
-#include <mach/at32ap700x.h>
-#include <mach/portmux.h>
-
-#include "../codecs/wm8510.h"
-#include "atmel-pcm.h"
-#include "atmel_ssc_dai.h"
-
-
-/*-------------------------------------------------------------------------*\
- * constants
-\*-------------------------------------------------------------------------*/
-#define MCLK_PIN GPIO_PIN_PA(30)
-#define MCLK_PERIPH GPIO_PERIPH_A
-
-
-/*-------------------------------------------------------------------------*\
- * data types
-\*-------------------------------------------------------------------------*/
-/* SSC clocking data */
-struct ssc_clock_data {
- /* CMR div */
- unsigned int cmr_div;
-
- /* Frame period (as needed by xCMR.PERIOD) */
- unsigned int period;
-
- /* The SSC clock rate these settings where calculated for */
- unsigned long ssc_rate;
-};
-
-
-/*-------------------------------------------------------------------------*\
- * module data
-\*-------------------------------------------------------------------------*/
-static struct clk *_gclk0;
-static struct clk *_pll0;
-
-#define CODEC_CLK (_gclk0)
-
-
-/*-------------------------------------------------------------------------*\
- * Sound SOC operations
-\*-------------------------------------------------------------------------*/
-#if defined CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE
-static struct ssc_clock_data playpaq_wm8510_calc_ssc_clock(
- struct snd_pcm_hw_params *params,
- struct snd_soc_dai *cpu_dai)
-{
- struct at32_ssc_info *ssc_p = snd_soc_dai_get_drvdata(cpu_dai);
- struct ssc_device *ssc = ssc_p->ssc;
- struct ssc_clock_data cd;
- unsigned int rate, width_bits, channels;
- unsigned int bitrate, ssc_div;
- unsigned actual_rate;
-
-
- /*
- * Figure out required bitrate
- */
- rate = params_rate(params);
- channels = params_channels(params);
- width_bits = snd_pcm_format_physical_width(params_format(params));
- bitrate = rate * width_bits * channels;
-
-
- /*
- * Figure out required SSC divider and period for required bitrate
- */
- cd.ssc_rate = clk_get_rate(ssc->clk);
- ssc_div = cd.ssc_rate / bitrate;
- cd.cmr_div = ssc_div / 2;
- if (ssc_div & 1) {
- /* round cmr_div up */
- cd.cmr_div++;
- }
- cd.period = width_bits - 1;
-
-
- /*
- * Find actual rate, compare to requested rate
- */
- actual_rate = (cd.ssc_rate / (cd.cmr_div * 2)) / (2 * (cd.period + 1));
- pr_debug("playpaq_wm8510: Request rate = %u, actual rate = %u\n",
- rate, actual_rate);
-
-
- return cd;
-}
-#endif /* CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE */
-
-
-
-static int playpaq_wm8510_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params)
-{
- struct snd_soc_pcm_runtime *rtd = substream->private_data;
- struct snd_soc_dai *codec_dai = rtd->codec_dai;
- struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
- struct at32_ssc_info *ssc_p = snd_soc_dai_get_drvdata(cpu_dai);
- struct ssc_device *ssc = ssc_p->ssc;
- unsigned int pll_out = 0, bclk = 0, mclk_div = 0;
- int ret;
-
-
- /* Due to difficulties with getting the correct clocks from the AT32's
- * PLL0, we're going to let the CODEC be in charge of all the clocks
- */
-#if !defined CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE
- const unsigned int fmt = (SND_SOC_DAIFMT_I2S |
- SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBM_CFM);
-#else
- struct ssc_clock_data cd;
- const unsigned int fmt = (SND_SOC_DAIFMT_I2S |
- SND_SOC_DAIFMT_NB_NF |
- SND_SOC_DAIFMT_CBS_CFS);
-#endif
-
- if (ssc == NULL) {
- pr_warning("playpaq_wm8510_hw_params: ssc is NULL!\n");
- return -EINVAL;
- }
-
-
- /*
- * Figure out PLL and BCLK dividers for WM8510
- */
- switch (params_rate(params)) {
- case 48000:
- pll_out = 24576000;
- mclk_div = WM8510_MCLKDIV_2;
- bclk = WM8510_BCLKDIV_8;
- break;
-
- case 44100:
- pll_out = 22579200;
- mclk_div = WM8510_MCLKDIV_2;
- bclk = WM8510_BCLKDIV_8;
- break;
-
- case 22050:
- pll_out = 22579200;
- mclk_div = WM8510_MCLKDIV_4;
- bclk = WM8510_BCLKDIV_8;
- break;
-
- case 16000:
- pll_out = 24576000;
- mclk_div = WM8510_MCLKDIV_6;
- bclk = WM8510_BCLKDIV_8;
- break;
-
- case 11025:
- pll_out = 22579200;
- mclk_div = WM8510_MCLKDIV_8;
- bclk = WM8510_BCLKDIV_8;
- break;
-
- case 8000:
- pll_out = 24576000;
- mclk_div = WM8510_MCLKDIV_12;
- bclk = WM8510_BCLKDIV_8;
- break;
-
- default:
- pr_warning("playpaq_wm8510: Unsupported sample rate %d\n",
- params_rate(params));
- return -EINVAL;
- }
-
-
- /*
- * set CPU and CODEC DAI configuration
- */
- ret = snd_soc_dai_set_fmt(codec_dai, fmt);
- if (ret < 0) {
- pr_warning("playpaq_wm8510: "
- "Failed to set CODEC DAI format (%d)\n",
- ret);
- return ret;
- }
- ret = snd_soc_dai_set_fmt(cpu_dai, fmt);
- if (ret < 0) {
- pr_warning("playpaq_wm8510: "
- "Failed to set CPU DAI format (%d)\n",
- ret);
- return ret;
- }
-
-
- /*
- * Set CPU clock configuration
- */
-#if defined CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE
- cd = playpaq_wm8510_calc_ssc_clock(params, cpu_dai);
- pr_debug("playpaq_wm8510: cmr_div = %d, period = %d\n",
- cd.cmr_div, cd.period);
- ret = snd_soc_dai_set_clkdiv(cpu_dai, AT32_SSC_CMR_DIV, cd.cmr_div);
- if (ret < 0) {
- pr_warning("playpaq_wm8510: Failed to set CPU CMR_DIV (%d)\n",
- ret);
- return ret;
- }
- ret = snd_soc_dai_set_clkdiv(cpu_dai, AT32_SSC_TCMR_PERIOD,
- cd.period);
- if (ret < 0) {
- pr_warning("playpaq_wm8510: "
- "Failed to set CPU transmit period (%d)\n",
- ret);
- return ret;
- }
-#endif /* CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE */
-
-
- /*
- * Set CODEC clock configuration
- */
- pr_debug("playpaq_wm8510: "
- "pll_in = %ld, pll_out = %u, bclk = %x, mclk = %x\n",
- clk_get_rate(CODEC_CLK), pll_out, bclk, mclk_div);
-
-
-#if !defined CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE
- ret = snd_soc_dai_set_clkdiv(codec_dai, WM8510_BCLKDIV, bclk);
- if (ret < 0) {
- pr_warning
- ("playpaq_wm8510: Failed to set CODEC DAI BCLKDIV (%d)\n",
- ret);
- return ret;
- }
-#endif /* CONFIG_SND_AT32_SOC_PLAYPAQ_SLAVE */
-
-
- ret = snd_soc_dai_set_pll(codec_dai, 0, 0,
- clk_get_rate(CODEC_CLK), pll_out);
- if (ret < 0) {
- pr_warning("playpaq_wm8510: Failed to set CODEC DAI PLL (%d)\n",
- ret);
- return ret;
- }
-
-
- ret = snd_soc_dai_set_clkdiv(codec_dai, WM8510_MCLKDIV, mclk_div);
- if (ret < 0) {
- pr_warning("playpaq_wm8510: Failed to set CODEC MCLKDIV (%d)\n",
- ret);
- return ret;
- }
-
-
- return 0;
-}
-
-
-
-static struct snd_soc_ops playpaq_wm8510_ops = {
- .hw_params = playpaq_wm8510_hw_params,
-};
-
-
-
-static const struct snd_soc_dapm_widget playpaq_dapm_widgets[] = {
- SND_SOC_DAPM_MIC("Int Mic", NULL),
- SND_SOC_DAPM_SPK("Ext Spk", NULL),
-};
-
-
-
-static const struct snd_soc_dapm_route intercon[] = {
- /* speaker connected to SPKOUT */
- {"Ext Spk", NULL, "SPKOUTP"},
- {"Ext Spk", NULL, "SPKOUTN"},
-
- {"Mic Bias", NULL, "Int Mic"},
- {"MICN", NULL, "Mic Bias"},
- {"MICP", NULL, "Mic Bias"},
-};
-
-
-
-static int playpaq_wm8510_init(struct snd_soc_pcm_runtime *rtd)
-{
- struct snd_soc_codec *codec = rtd->codec;
- struct snd_soc_dapm_context *dapm = &codec->dapm;
- int i;
-
- /*
- * Add DAPM widgets
- */
- for (i = 0; i < ARRAY_SIZE(playpaq_dapm_widgets); i++)
- snd_soc_dapm_new_control(dapm, &playpaq_dapm_widgets[i]);
-
-
-
- /*
- * Setup audio path interconnects
- */
- snd_soc_dapm_add_routes(dapm, intercon, ARRAY_SIZE(intercon));
-
-
-
- /* always connected pins */
- snd_soc_dapm_enable_pin(dapm, "Int Mic");
- snd_soc_dapm_enable_pin(dapm, "Ext Spk");
-
-
-
- /* Make CSB show PLL rate */
- snd_soc_dai_set_clkdiv(rtd->codec_dai, WM8510_OPCLKDIV,
- WM8510_OPCLKDIV_1 | 4);
-
- return 0;
-}
-
-
-
-static struct snd_soc_dai_link playpaq_wm8510_dai = {
- .name = "WM8510",
- .stream_name = "WM8510 PCM",
- .cpu_dai_name= "atmel-ssc-dai.0",
- .platform_name = "atmel-pcm-audio",
- .codec_name = "wm8510-codec.0-0x1a",
- .codec_dai_name = "wm8510-hifi",
- .init = playpaq_wm8510_init,
- .ops = &playpaq_wm8510_ops,
-};
-
-
-
-static struct snd_soc_card snd_soc_playpaq = {
- .name = "LRS_PlayPaq_WM8510",
- .dai_link = &playpaq_wm8510_dai,
- .num_links = 1,
-};
-
-static struct platform_device *playpaq_snd_device;
-
-
-static int __init playpaq_asoc_init(void)
-{
- int ret = 0;
-
- /*
- * Configure MCLK for WM8510
- */
- _gclk0 = clk_get(NULL, "gclk0");
- if (IS_ERR(_gclk0)) {
- _gclk0 = NULL;
- ret = PTR_ERR(_gclk0);
- goto err_gclk0;
- }
- _pll0 = clk_get(NULL, "pll0");
- if (IS_ERR(_pll0)) {
- _pll0 = NULL;
- ret = PTR_ERR(_pll0);
- goto err_pll0;
- }
- ret = clk_set_parent(_gclk0, _pll0);
- if (ret) {
- pr_warning("snd-soc-playpaq: "
- "Failed to set PLL0 as parent for DAC clock\n");
- goto err_set_clk;
- }
- clk_set_rate(CODEC_CLK, 12000000);
- clk_enable(CODEC_CLK);
-
-#if defined CONFIG_AT32_ENHANCED_PORTMUX
- at32_select_periph(MCLK_PIN, MCLK_PERIPH, 0);
-#endif
-
-
- /*
- * Create and register platform device
- */
- playpaq_snd_device = platform_device_alloc("soc-audio", 0);
- if (playpaq_snd_device == NULL) {
- ret = -ENOMEM;
- goto err_device_alloc;
- }
-
- platform_set_drvdata(playpaq_snd_device, &snd_soc_playpaq);
-
- ret = platform_device_add(playpaq_snd_device);
- if (ret) {
- pr_warning("playpaq_wm8510: platform_device_add failed (%d)\n",
- ret);
- goto err_device_add;
- }
-
- return 0;
-
-
-err_device_add:
- if (playpaq_snd_device != NULL) {
- platform_device_put(playpaq_snd_device);
- playpaq_snd_device = NULL;
- }
-err_device_alloc:
-err_set_clk:
- if (_pll0 != NULL) {
- clk_put(_pll0);
- _pll0 = NULL;
- }
-err_pll0:
- if (_gclk0 != NULL) {
- clk_put(_gclk0);
- _gclk0 = NULL;
- }
- return ret;
-}
-
-
-static void __exit playpaq_asoc_exit(void)
-{
- if (_gclk0 != NULL) {
- clk_put(_gclk0);
- _gclk0 = NULL;
- }
- if (_pll0 != NULL) {
- clk_put(_pll0);
- _pll0 = NULL;
- }
-
-#if defined CONFIG_AT32_ENHANCED_PORTMUX
- at32_free_pin(MCLK_PIN);
-#endif
-
- platform_device_unregister(playpaq_snd_device);
- playpaq_snd_device = NULL;
-}
-
-module_init(playpaq_asoc_init);
-module_exit(playpaq_asoc_exit);
-
-MODULE_AUTHOR("Geoffrey Wossum <gwossum@acm.org>");
-MODULE_DESCRIPTION("ASoC machine driver for LRS PlayPaq");
-MODULE_LICENSE("GPL");
select SND_SOC_CX20442
select SND_SOC_DA7210 if I2C
select SND_SOC_DFBMCS320
- select SND_SOC_JZ4740_CODEC if SOC_JZ4740
+ select SND_SOC_JZ4740_CODEC
select SND_SOC_LM4857 if I2C
select SND_SOC_MAX98088 if I2C
select SND_SOC_MAX98095 if I2C
#define AD1836_ADC_CTRL2 13
#define AD1836_ADC_WORD_LEN_MASK 0x30
-#define AD1836_ADC_WORD_OFFSET 5
+#define AD1836_ADC_WORD_OFFSET 4
#define AD1836_ADC_SERFMT_MASK (7 << 6)
#define AD1836_ADC_SERFMT_PCK256 (0x4 << 6)
#define AD1836_ADC_SERFMT_PCK128 (0x5 << 6)
static int cs4270_soc_resume(struct snd_soc_codec *codec)
{
struct cs4270_private *cs4270 = snd_soc_codec_get_drvdata(codec);
- struct i2c_client *i2c_client = to_i2c_client(codec->dev);
int reg;
regulator_bulk_enable(ARRAY_SIZE(cs4270->supplies),
ndelay(500);
/* first restore the entire register cache ... */
- for (reg = CS4270_FIRSTREG; reg <= CS4270_LASTREG; reg++) {
- u8 val = snd_soc_read(codec, reg);
-
- if (i2c_smbus_write_byte_data(i2c_client, reg, val)) {
- dev_err(codec->dev, "i2c write failed\n");
- return -EIO;
- }
- }
+ snd_soc_cache_sync(codec);
/* ... then disable the power-down bits */
reg = snd_soc_read(codec, CS4270_PWRCTL);
static struct snd_soc_codec_driver soc_codec_device_cs42l51 = {
.probe = cs42l51_probe,
- .reg_cache_size = CS42L51_NUMREGS,
+ .reg_cache_size = CS42L51_NUMREGS + 1,
.reg_word_size = sizeof(u8),
};
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/io.h>
#include <linux/delay.h>
unsigned int mask = mc->max;
unsigned int val = (ucontrol->value.integer.value[0] & mask);
unsigned int val2 = (ucontrol->value.integer.value[1] & mask);
- unsigned int change = 1;
+ unsigned int change = 0;
- if (((max9877_regs[reg] >> shift) & mask) == val)
- change = 0;
+ if (((max9877_regs[reg] >> shift) & mask) != val)
+ change = 1;
- if (((max9877_regs[reg2] >> shift) & mask) == val2)
- change = 0;
+ if (((max9877_regs[reg2] >> shift) & mask) != val2)
+ change = 1;
if (change) {
max9877_regs[reg] &= ~(mask << shift);
static int __init uda1380_modinit(void)
{
- int ret;
+ int ret = 0;
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
ret = i2c_add_driver(&uda1380_i2c_driver);
if (ret != 0)
pr_err("Failed to register UDA1380 I2C driver: %d\n", ret);
#endif
- return 0;
+ return ret;
}
module_init(uda1380_modinit);
}
if (memcmp(fw->data, "WMFW", 4) != 0) {
+ memcpy(&data32, fw->data, sizeof(data32));
+ data32 = be32_to_cpu(data32);
dev_err(codec->dev, "%s: firmware has bad file magic %08x\n",
name, data32);
goto err;
};
static const struct snd_soc_dapm_widget wm8994_adc_revd_widgets[] = {
-SND_SOC_DAPM_MUX_E("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux,
- adc_mux_ev, SND_SOC_DAPM_PRE_PMU),
-SND_SOC_DAPM_MUX_E("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux,
- adc_mux_ev, SND_SOC_DAPM_PRE_PMU),
+SND_SOC_DAPM_VIRT_MUX_E("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux,
+ adc_mux_ev, SND_SOC_DAPM_PRE_PMU),
+SND_SOC_DAPM_VIRT_MUX_E("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux,
+ adc_mux_ev, SND_SOC_DAPM_PRE_PMU),
};
static const struct snd_soc_dapm_widget wm8994_adc_widgets[] = {
-SND_SOC_DAPM_MUX("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux),
-SND_SOC_DAPM_MUX("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux),
+SND_SOC_DAPM_VIRT_MUX("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux),
+SND_SOC_DAPM_VIRT_MUX("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux),
};
static const struct snd_soc_dapm_widget wm8994_dapm_widgets[] = {
bclk |= best << WM8994_AIF1_BCLK_DIV_SHIFT;
lrclk = bclk_rate / params_rate(params);
+ if (!lrclk) {
+ dev_err(dai->dev, "Unable to generate LRCLK from %dHz BCLK\n",
+ bclk_rate);
+ return -EINVAL;
+ }
dev_dbg(dai->dev, "Using LRCLK rate %d for actual LRCLK %dHz\n",
lrclk, bclk_rate / lrclk);
switch (wm8994->revision) {
case 0:
case 1:
+ case 2:
+ case 3:
wm8994->hubs.dcs_codes_l = -9;
wm8994->hubs.dcs_codes_r = -5;
break;
break;
case 24576000:
ratediv = WM8996_SYSCLK_DIV;
+ wm8996->sysclk /= 2;
case 12288000:
snd_soc_update_bits(codec, WM8996_AIF_RATE,
WM8996_SYSCLK_RATE, WM8996_SYSCLK_RATE);
}
if (strcasecmp(sprop, "i2s-slave") == 0) {
- machine_data->dai_format = SND_SOC_DAIFMT_I2S;
+ machine_data->dai_format =
+ SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBM_CFM;
machine_data->codec_clk_direction = SND_SOC_CLOCK_OUT;
machine_data->cpu_clk_direction = SND_SOC_CLOCK_IN;
}
machine_data->clk_frequency = be32_to_cpup(iprop);
} else if (strcasecmp(sprop, "i2s-master") == 0) {
- machine_data->dai_format = SND_SOC_DAIFMT_I2S;
+ machine_data->dai_format =
+ SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_CBS_CFS;
machine_data->codec_clk_direction = SND_SOC_CLOCK_IN;
machine_data->cpu_clk_direction = SND_SOC_CLOCK_OUT;
} else if (strcasecmp(sprop, "lj-slave") == 0) {
- machine_data->dai_format = SND_SOC_DAIFMT_LEFT_J;
+ machine_data->dai_format =
+ SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_CBM_CFM;
machine_data->codec_clk_direction = SND_SOC_CLOCK_OUT;
machine_data->cpu_clk_direction = SND_SOC_CLOCK_IN;
} else if (strcasecmp(sprop, "lj-master") == 0) {
- machine_data->dai_format = SND_SOC_DAIFMT_LEFT_J;
+ machine_data->dai_format =
+ SND_SOC_DAIFMT_LEFT_J | SND_SOC_DAIFMT_CBS_CFS;
machine_data->codec_clk_direction = SND_SOC_CLOCK_IN;
machine_data->cpu_clk_direction = SND_SOC_CLOCK_OUT;
} else if (strcasecmp(sprop, "rj-slave") == 0) {
- machine_data->dai_format = SND_SOC_DAIFMT_RIGHT_J;
+ machine_data->dai_format =
+ SND_SOC_DAIFMT_RIGHT_J | SND_SOC_DAIFMT_CBM_CFM;
machine_data->codec_clk_direction = SND_SOC_CLOCK_OUT;
machine_data->cpu_clk_direction = SND_SOC_CLOCK_IN;
} else if (strcasecmp(sprop, "rj-master") == 0) {
- machine_data->dai_format = SND_SOC_DAIFMT_RIGHT_J;
+ machine_data->dai_format =
+ SND_SOC_DAIFMT_RIGHT_J | SND_SOC_DAIFMT_CBS_CFS;
machine_data->codec_clk_direction = SND_SOC_CLOCK_IN;
machine_data->cpu_clk_direction = SND_SOC_CLOCK_OUT;
} else if (strcasecmp(sprop, "ac97-slave") == 0) {
- machine_data->dai_format = SND_SOC_DAIFMT_AC97;
+ machine_data->dai_format =
+ SND_SOC_DAIFMT_AC97 | SND_SOC_DAIFMT_CBM_CFM;
machine_data->codec_clk_direction = SND_SOC_CLOCK_OUT;
machine_data->cpu_clk_direction = SND_SOC_CLOCK_IN;
} else if (strcasecmp(sprop, "ac97-master") == 0) {
- machine_data->dai_format = SND_SOC_DAIFMT_AC97;
+ machine_data->dai_format =
+ SND_SOC_DAIFMT_AC97 | SND_SOC_DAIFMT_CBS_CFS;
machine_data->codec_clk_direction = SND_SOC_CLOCK_IN;
machine_data->cpu_clk_direction = SND_SOC_CLOCK_OUT;
} else {
config SND_SOC_MX27VIS_AIC32X4
tristate "SoC audio support for Visstrim M10 boards"
- depends on MACH_IMX27_VISSTRIM_M10
+ depends on MACH_IMX27_VISSTRIM_M10 && I2C
select SND_SOC_TLV320AIC32X4
select SND_MXC_SOC_MX2
help
config SND_KIRKWOOD_SOC_OPENRD
tristate "SoC Audio support for Kirkwood Openrd Client"
depends on SND_KIRKWOOD_SOC && (MACH_OPENRD_CLIENT || MACH_OPENRD_ULTIMATE)
+ depends on I2C
select SND_KIRKWOOD_SOC_I2S
select SND_SOC_CS42L51
help
config SND_KIRKWOOD_SOC_T5325
tristate "SoC Audio support for HP t5325"
- depends on SND_KIRKWOOD_SOC && MACH_T5325
+ depends on SND_KIRKWOOD_SOC && MACH_T5325 && I2C
select SND_KIRKWOOD_SOC_I2S
select SND_SOC_ALC5623
help
platform_driver_unregister(&mxs_pcm_driver);
}
module_exit(snd_mxs_pcm_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mxs-pcm-audio");
MODULE_AUTHOR("Freescale Semiconductor, Inc.");
MODULE_DESCRIPTION("MXS ALSA SoC Machine driver");
MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mxs-sgtl5000");
config SND_SOC_RAUMFELD
tristate "SoC Audio support Raumfeld audio adapter"
depends on SND_PXA2XX_SOC && (MACH_RAUMFELD_SPEAKER || MACH_RAUMFELD_CONNECTOR)
+ depends on I2C && SPI_MASTER
select SND_PXA_SOC_SSP
select SND_SOC_CS4270
select SND_SOC_AK4104
config SND_PXA2XX_SOC_HX4700
tristate "SoC Audio support for HP iPAQ hx4700"
- depends on SND_PXA2XX_SOC && MACH_H4700
+ depends on SND_PXA2XX_SOC && MACH_H4700 && I2C
select SND_PXA2XX_SOC_I2S
select SND_SOC_AK4641
help
snd_soc_card_hx4700.dev = &pdev->dev;
ret = snd_soc_register_card(&snd_soc_card_hx4700);
if (ret)
- return ret;
+ gpio_free_array(hx4700_audio_gpios,
+ ARRAY_SIZE(hx4700_audio_gpios));
- return 0;
+ return ret;
}
static int __devexit hx4700_audio_remove(struct platform_device *pdev)
{
struct snd_soc_codec *codec = rtd->codec;
struct snd_soc_dapm_context *dapm = &codec->dapm;
- int err;
/* These endpoints are not being used. */
snd_soc_dapm_nc_pin(dapm, "LINPUT2");
.dai_link = &jive_dai,
.num_links = 1,
- .dapm_widgtets = wm8750_dapm_widgets,
+ .dapm_widgets = wm8750_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(wm8750_dapm_widgets),
.dapm_routes = audio_map,
.num_dapm_routes = ARRAY_SIZE(audio_map),
*
*/
+#include <linux/module.h>
#include <sound/soc.h>
static struct snd_soc_card smdk2443;
#include "../codecs/wm8994.h"
#include <sound/pcm_params.h>
+#include <linux/module.h>
/*
* Default CFG switch settings to use this driver:
snd_soc_dapm_ignore_suspend(&card->dapm, "Headset Mic");
snd_soc_dapm_ignore_suspend(&card->dapm, "Main AMIC");
snd_soc_dapm_ignore_suspend(&card->dapm, "Main DMIC");
- snd_soc_dapm_ignore_suspend(&card->dapm, "Speaker");
+ snd_soc_dapm_ignore_suspend(&card->dapm, "Main Speaker");
snd_soc_dapm_ignore_suspend(&card->dapm, "WM1250 Output");
snd_soc_dapm_ignore_suspend(&card->dapm, "WM1250 Input");
struct snd_soc_card *card = dev_get_drvdata(dev);
int i, ac97_control = 0;
+ /* If the initialization of this soc device failed, there is no codec
+ * associated with it. Just bail out in this case.
+ */
+ if (list_empty(&card->codec_dev_list))
+ return 0;
+
/* AC97 devices might have other drivers hanging off them so
* need to resume immediately. Other drivers don't have that
* problem and may take a substantial amount of time to resume
}
EXPORT_SYMBOL_GPL(snd_soc_params_to_bclk);
-static struct snd_soc_platform_driver dummy_platform;
+static const struct snd_pcm_hardware dummy_dma_hardware = {
+ .formats = 0xffffffff,
+ .channels_min = 1,
+ .channels_max = UINT_MAX,
+
+ /* Random values to keep userspace happy when checking constraints */
+ .info = SNDRV_PCM_INFO_INTERLEAVED |
+ SNDRV_PCM_INFO_BLOCK_TRANSFER,
+ .buffer_bytes_max = 128*1024,
+ .period_bytes_min = PAGE_SIZE,
+ .period_bytes_max = PAGE_SIZE*2,
+ .periods_min = 2,
+ .periods_max = 128,
+};
+
+static int dummy_dma_open(struct snd_pcm_substream *substream)
+{
+ snd_soc_set_runtime_hwparams(substream, &dummy_dma_hardware);
+
+ return 0;
+}
+
+static struct snd_pcm_ops dummy_dma_ops = {
+ .open = dummy_dma_open,
+ .ioctl = snd_pcm_lib_ioctl,
+};
+
+static struct snd_soc_platform_driver dummy_platform = {
+ .ops = &dummy_dma_ops,
+};
static __devinit int snd_soc_dummy_probe(struct platform_device *pdev)
{
}
}
},
+{
+ /* Roland GAIA SH-01 */
+ USB_DEVICE(0x0582, 0x0111),
+ .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+ .vendor_name = "Roland",
+ .product_name = "GAIA",
+ .ifnum = QUIRK_ANY_INTERFACE,
+ .type = QUIRK_COMPOSITE,
+ .data = (const struct snd_usb_audio_quirk[]) {
+ {
+ .ifnum = 0,
+ .type = QUIRK_AUDIO_STANDARD_INTERFACE
+ },
+ {
+ .ifnum = 1,
+ .type = QUIRK_AUDIO_STANDARD_INTERFACE
+ },
+ {
+ .ifnum = 2,
+ .type = QUIRK_MIDI_FIXED_ENDPOINT,
+ .data = &(const struct snd_usb_midi_endpoint_info) {
+ .out_cables = 0x0003,
+ .in_cables = 0x0003
+ }
+ },
+ {
+ .ifnum = -1
+ }
+ }
+ }
+},
{
USB_DEVICE(0x0582, 0x0113),
.driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
list_for_each_entry(counter, &evsel_list->entries, node) {
if (create_perf_stat_counter(counter, first) < 0) {
- if (errno == EINVAL || errno == ENOSYS || errno == ENOENT) {
+ if (errno == EINVAL || errno == ENOSYS ||
+ errno == ENOENT || errno == EOPNOTSUPP) {
if (verbose)
ui__warning("%s event is not supported by the kernel.\n",
event_name(counter));
return size;
}
+static void hists__init(struct hists *hists)
+{
+ memset(hists, 0, sizeof(*hists));
+ hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
+ hists->entries_in = &hists->entries_in_array[0];
+ hists->entries_collapsed = RB_ROOT;
+ hists->entries = RB_ROOT;
+ pthread_mutex_init(&hists->lock, NULL);
+}
+
void perf_evsel__init(struct perf_evsel *evsel,
struct perf_event_attr *attr, int idx)
{
/*
* write event string as passed on cmdline
*/
- ret = do_write_string(fd, attr->name);
+ ret = do_write_string(fd, event_name(attr));
if (ret < 0)
return ret;
/*
return ret;
}
-
-void hists__init(struct hists *hists)
-{
- memset(hists, 0, sizeof(*hists));
- hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
- hists->entries_in = &hists->entries_in_array[0];
- hists->entries_collapsed = RB_ROOT;
- hists->entries = RB_ROOT;
- pthread_mutex_init(&hists->lock, NULL);
-}
struct callchain_cursor callchain_cursor;
};
-void hists__init(struct hists *hists);
-
struct hist_entry *__hists__add_entry(struct hists *self,
struct addr_location *al,
struct symbol *parent, u64 period);
}
map = cpu_map__new(cpu_list);
+ if (map == NULL) {
+ pr_err("Invalid cpu_list\n");
+ return -1;
+ }
for (i = 0; i < map->nr; i++) {
int cpu = map->map[i];
field = malloc_or_die(sizeof(*field));
type = process_arg(event, field, &token);
+ while (type == EVENT_OP)
+ type = process_op(event, field, &token);
if (test_type_token(type, token, EVENT_DELIM, ","))
goto out_free;