[KVM,ARM] Trap guest accesses to GICv3 common
system registers
+ kvm-arm.vgic_v4_enable=
+ [KVM,ARM] Allow use of GICv4 for direct injection of
+ LPIs.
+
kvm-intel.ept= [KVM,Intel] Disable extended page tables
(virtualized MMU) support on capable Intel chips.
Default is 1 (enabled)
instead using the legacy FADT method
profile= [KNL] Enable kernel profiling via /proc/profile
- Format: [schedule,]<number>
+ Format: [<profiletype>,]<number>
+ Param: <profiletype>: "schedule", "sleep", or "kvm"
+ [defaults to kernel profiling]
Param: "schedule" - profile schedule points.
- Param: <number> - step/bucket size as a power of 2 for
- statistical time based profiling.
Param: "sleep" - profile D-state sleeping (millisecs).
Requires CONFIG_SCHEDSTATS
Param: "kvm" - profile VM exits.
+ Param: <number> - step/bucket size as a power of 2 for
+ statistical time based profiling.
prompt_ramdisk= [RAM] List of RAM disks to prompt for floppy disk
before loading.
printk("Read : CPU %d, count %ld\n", cpu,
local_read(&per_cpu(counters, cpu)));
}
- del_timer(&test_timer);
- test_timer.expires = jiffies + 1000;
- add_timer(&test_timer);
+ mod_timer(&test_timer, jiffies + 1000);
}
static int __init test_init(void)
{
/* initialize the timer that will increment the counter */
- init_timer(&test_timer);
- test_timer.function = do_test_timer;
- test_timer.expires = jiffies + 1;
- add_timer(&test_timer);
+ timer_setup(&test_timer, do_test_timer, 0);
+ mod_timer(&test_timer, jiffies + 1);
return 0;
}
example:
-display@di0 {
+disp0 {
compatible = "fsl,imx-parallel-display";
edid = [edid-data];
interface-pix-fmt = "rgb24";
5. make initrd for Dom0/DomU
# make -C linux-2.6.18-xen.hg ARCH=ia64 modules_install \
- O=$(/bin/pwd)/build-linux-2.6.18-xen_ia64
+ O=$(pwd)/build-linux-2.6.18-xen_ia64
# mkinitrd -f /boot/efi/efi/redhat/initrd-2.6.18.8-xen.img \
2.6.18.8-xen --builtin mptspi --builtin mptbase \
--builtin mptscsih --builtin uhci-hcd --builtin ohci-hcd \
:Author: Randy Dunlap <rdunlap@infradead.org>
:Author: Andrew Murray <amurray@mpc-data.co.uk>
-
Integer types
=============
Raw pointer value SHOULD be printed with %p. The kernel supports
the following extended format specifiers for pointer types:
+Pointer Types
+=============
+
+Pointers printed without a specifier extension (i.e unadorned %p) are
+hashed to give a unique identifier without leaking kernel addresses to user
+space. On 64 bit machines the first 32 bits are zeroed. If you _really_
+want the address see %px below.
+
+::
+
+ %p abcdef12 or 00000000abcdef12
+
Symbols/Function Pointers
=========================
printk("Faulted at %pS\n", (void *)regs->ip);
printk(" %s%pB\n", (reliable ? "" : "? "), (void *)*stack);
-
Kernel Pointers
===============
::
- %pK 0x01234567 or 0x0123456789abcdef
+ %pK 01234567 or 0123456789abcdef
For printing kernel pointers which should be hidden from unprivileged
users. The behaviour of ``%pK`` depends on the ``kptr_restrict sysctl`` - see
Documentation/sysctl/kernel.txt for more details.
+Unmodified Addresses
+====================
+
+::
+
+ %px 01234567 or 0123456789abcdef
+
+For printing pointers when you _really_ want to print the address. Please
+consider whether or not you are leaking sensitive information about the
+Kernel layout in memory before printing pointers with %px. %px is
+functionally equivalent to %lx. %px is preferred to %lx because it is more
+uniquely grep'able. If, in the future, we need to modify the way the Kernel
+handles printing pointers it will be nice to be able to find the call
+sites.
+
Struct Resources
================
which can be found in Documentation/process/submitting-patches.rst. Code without a
proper signoff cannot be merged into the mainline.
+ - Co-Developed-by: states that the patch was also created by another developer
+ along with the original author. This is useful at times when multiple
+ people work on a single patch. Note, this person also needs to have a
+ Signed-off-by: line in the patch as well.
+
- Acked-by: indicates an agreement by another developer (often a
maintainer of the relevant code) that the patch is appropriate for
inclusion into the kernel.
It does so by decrementing the runtime of the executing task Ti at a pace equal
to
- dq = -max{ Ui, (1 - Uinact) } dt
+ dq = -max{ Ui / Umax, (1 - Uinact - Uextra) } dt
- where Uinact is the inactive utilization, computed as (this_bq - running_bw),
- and Ui is the bandwidth of task Ti.
+ where:
+
+ - Ui is the bandwidth of task Ti;
+ - Umax is the maximum reclaimable utilization (subjected to RT throttling
+ limits);
+ - Uinact is the (per runqueue) inactive utilization, computed as
+ (this_bq - running_bw);
+ - Uextra is the (per runqueue) extra reclaimable utilization
+ (subjected to RT throttling limits).
Let's now see a trivial example of two deadline tasks with runtime equal
defined key type will return its data as is. If a key type does not
implement this function, error EOPNOTSUPP will result.
- As much of the data as can be fitted into the buffer will be copied to
- userspace if the buffer pointer is not NULL.
-
- On a successful return, the function will always return the amount of data
- available rather than the amount copied.
+ If the specified buffer is too small, then the size of the buffer required
+ will be returned. Note that in this case, the contents of the buffer may
+ have been overwritten in some undefined way.
+ Otherwise, on success, the function will return the amount of data copied
+ into the buffer.
* Instantiate a partially constructed key::
<name-of-detected-video-adapter> tells what video adapter did Linux detect
-- it's either a generic adapter name (MDA, CGA, HGC, EGA, VGA, VESA VGA [a VGA
with VESA-compliant BIOS]) or a chipset name (e.g., Trident). Direct detection
-of chipsets is turned off by default (see CONFIG_VIDEO_SVGA in chapter 4 to see
-how to enable it if you really want) as it's inherently unreliable due to
+of chipsets is turned off by default as it's inherently unreliable due to
absolutely insane PC design.
"0 0F00 80x25" means that the first menu item (the menu items are numbered
0x0f05 VGA 80x30 (480 scans, 16-point font)
0x0f06 VGA 80x34 (480 scans, 14-point font)
0x0f07 VGA 80x60 (480 scans, 8-point font)
- 0x0f08 Graphics hack (see the CONFIG_VIDEO_HACK paragraph below)
+ 0x0f08 Graphics hack (see the VIDEO_GFX_HACK paragraph below)
0x1000 to 0x7fff - modes specified by resolution. The code has a "0xRRCC"
form where RR is a number of rows and CC is a number of columns.
Options
~~~~~~~
-Some options can be set in the source text (in arch/i386/boot/video.S).
-All of them are simple #define's -- change them to #undef's when you want to
-switch them off. Currently supported:
-
-CONFIG_VIDEO_SVGA - enables autodetection of SVGA cards. This is switched
-off by default as it's a bit unreliable due to terribly bad PC design. If you
-really want to have the adapter autodetected (maybe in case the ``scan`` feature
-doesn't work on your machine), switch this on and don't cry if the results
-are not completely sane. In case you really need this feature, please drop me
-a mail as I think of removing it some day.
-
-CONFIG_VIDEO_VESA - enables autodetection of VESA modes. If it doesn't work
-on your machine (or displays a "Error: Scanning of VESA modes failed" message),
-you can switch it off and report as a bug.
-
-CONFIG_VIDEO_COMPACT - enables compacting of the video mode list. If there
-are more modes with the same screen size, only the first one is kept (see above
-for more info on mode ordering). However, in very strange cases it's possible
-that the first "version" of the mode doesn't work although some of the others
-do -- in this case turn this switch off to see the rest.
-
-CONFIG_VIDEO_RETAIN - enables retaining of screen contents when switching
-video modes. Works only with some boot loaders which leave enough room for the
-buffer. (If you have old LILO, you can adjust heap_end_ptr and loadflags
-in setup.S, but it's better to upgrade the boot loader...)
-
-CONFIG_VIDEO_LOCAL - enables inclusion of "local modes" in the list. The
-local modes are added automatically to the beginning of the list not depending
-on hardware configuration. The local modes are listed in the source text after
-the "local_mode_table:" line. The comment before this line describes the format
-of the table (which also includes a video card name to be displayed on the
-top of the menu).
-
-CONFIG_VIDEO_400_HACK - force setting of 400 scan lines for standard VGA
-modes. This option is intended to be used on certain buggy BIOSes which draw
-some useless logo using font download and then fail to reset the correct mode.
-Don't use unless needed as it forces resetting the video card.
-
-CONFIG_VIDEO_GFX_HACK - includes special hack for setting of graphics modes
-to be used later by special drivers (e.g., 800x600 on IBM ThinkPad -- see
-ftp://ftp.phys.keio.ac.jp/pub/XFree86/800x600/XF86Configs/XF86Config.IBM_TP560).
+Build options for arch/x86/boot/* are selected by the kernel kconfig
+utility and the kernel .config file.
+
+VIDEO_GFX_HACK - includes special hack for setting of graphics modes
+to be used later by special drivers.
Allows to set _any_ BIOS mode including graphic ones and forcing specific
text screen resolution instead of peeking it from BIOS variables. Don't use
unless you think you know what you're doing. To activate this setup, use
-mode number 0x0f08 (see section 3).
+mode number 0x0f08 (see the Mode IDs section above).
Still doesn't work?
~~~~~~~~~~~~~~~~~~~
When the mode detection doesn't work (e.g., the mode list is incorrect or
the machine hangs instead of displaying the menu), try to switch off some of
-the configuration options listed in section 4. If it fails, you can still use
+the configuration options listed under "Options". If it fails, you can still use
your kernel with the video mode set directly via the kernel parameter.
In either case, please send me a bug report containing what _exactly_
end setting". Adding 0x8000 to the mode ID might fix the problem. Unfortunately,
this must be done manually -- no autodetection mechanisms are available.
-If you have a VGA card and your display still looks as on EGA, your BIOS
-is probably broken and you need to set the CONFIG_VIDEO_400_HACK switch to
-force setting of the correct mode.
-
History
~~~~~~~
value lower than this limit will be ignored and the old configuration will be
retained.
-Note: the value of dirty_bytes also must be set greater than
-dirty_background_bytes or the amount of memory corresponding to
-dirty_background_ratio.
-
==============================================================
dirty_expire_centisecs
The total available memory is not equal to total system memory.
-Note: dirty_ratio must be set greater than dirty_background_ratio or
-ratio corresponding to dirty_background_bytes.
-
==============================================================
dirty_writeback_centisecs
- SMP 배리어 짝맞추기.
- 메모리 배리어 시퀀스의 예.
- 읽기 메모리 배리어 vs 로드 예측.
- - 이행성
+ - Multicopy 원자성.
(*) 명시적 커널 배리어.
해줍니다.
+데이터 의존성에 의해 제공되는 이 순서규칙은 이를 포함하고 있는 CPU 에
+지역적임을 알아두시기 바랍니다. 더 많은 정보를 위해선 "Multicopy 원자성"
+섹션을 참고하세요.
+
+
데이터 의존성 배리어는 매우 중요한데, 예를 들어 RCU 시스템에서 그렇습니다.
include/linux/rcupdate.h 의 rcu_assign_pointer() 와 rcu_dereference() 를
참고하세요. 여기서 데이터 의존성 배리어는 RCU 로 관리되는 포인터의 타겟을 현재
주어진 if 문의 then 절과 else 절에게만 (그리고 이 두 절 내에서 호출되는
함수들에게까지) 적용되지, 이 if 문을 뒤따르는 코드에는 적용되지 않습니다.
-마지막으로, 컨트롤 의존성은 이행성 (transitivity) 을 제공하지 -않습니다-. 이건
-'x' 와 'y' 가 둘 다 0 이라는 초기값을 가졌다는 가정 하의 두개의 예제로
-보이겠습니다:
-
- CPU 0 CPU 1
- ======================= =======================
- r1 = READ_ONCE(x); r2 = READ_ONCE(y);
- if (r1 > 0) if (r2 > 0)
- WRITE_ONCE(y, 1); WRITE_ONCE(x, 1);
-
- assert(!(r1 == 1 && r2 == 1));
-
-이 두 CPU 예제에서 assert() 의 조건은 항상 참일 것입니다. 그리고, 만약 컨트롤
-의존성이 이행성을 (실제로는 그러지 않지만) 보장한다면, 다음의 CPU 가 추가되어도
-아래의 assert() 조건은 참이 될것입니다:
- CPU 2
- =====================
- WRITE_ONCE(x, 2);
+컨트롤 의존성에 의해 제공되는 이 순서규칙은 이를 포함하고 있는 CPU 에
+지역적입니다. 더 많은 정보를 위해선 "Multicopy 원자성" 섹션을 참고하세요.
- assert(!(r1 == 2 && r2 == 1 && x == 2)); /* FAILS!!! */
-
-하지만 컨트롤 의존성은 이행성을 제공하지 -않기- 때문에, 세개의 CPU 예제가 실행
-완료된 후에 위의 assert() 의 조건은 거짓으로 평가될 수 있습니다. 세개의 CPU
-예제가 순서를 지키길 원한다면, CPU 0 와 CPU 1 코드의 로드와 스토어 사이, "if"
-문 바로 다음에 smp_mb()를 넣어야 합니다. 더 나아가서, 최초의 두 CPU 예제는
-매우 위험하므로 사용되지 않아야 합니다.
-
-이 두개의 예제는 다음 논문:
-http://www.cl.cam.ac.uk/users/pes20/ppc-supplemental/test6.pdf 와
-이 사이트: https://www.cl.cam.ac.uk/~pes20/ppcmem/index.html 에 나온 LB 와 WWC
-리트머스 테스트입니다.
요약하자면:
(*) 컨트롤 의존성은 보통 다른 타입의 배리어들과 짝을 맞춰 사용됩니다.
- (*) 컨트롤 의존성은 이행성을 제공하지 -않습니다-. 이행성이 필요하다면,
- smp_mb() 를 사용하세요.
+ (*) 컨트롤 의존성은 multicopy 원자성을 제공하지 -않습니다-. 모든 CPU 들이
+ 특정 스토어를 동시에 보길 원한다면, smp_mb() 를 사용하세요.
(*) 컴파일러는 컨트롤 의존성을 이해하고 있지 않습니다. 따라서 컴파일러가
여러분의 코드를 망가뜨리지 않도록 하는건 여러분이 해야 하는 일입니다.
CPU 간 상호작용을 다룰 때에 일부 타입의 메모리 배리어는 항상 짝을 맞춰
사용되어야 합니다. 적절하게 짝을 맞추지 않은 코드는 사실상 에러에 가깝습니다.
-범용 배리어들은 범용 배리어끼리도 짝을 맞추지만 이행성이 없는 대부분의 다른
-타입의 배리어들과도 짝을 맞춥니다. ACQUIRE 배리어는 RELEASE 배리어와 짝을
-맞춥니다만, 둘 다 범용 배리어를 포함해 다른 배리어들과도 짝을 맞출 수 있습니다.
-쓰기 배리어는 데이터 의존성 배리어나 컨트롤 의존성, ACQUIRE 배리어, RELEASE
-배리어, 읽기 배리어, 또는 범용 배리어와 짝을 맞춥니다. 비슷하게 읽기 배리어나
-컨트롤 의존성, 또는 데이터 의존성 배리어는 쓰기 배리어나 ACQUIRE 배리어,
-RELEASE 배리어, 또는 범용 배리어와 짝을 맞추는데, 다음과 같습니다:
+범용 배리어들은 범용 배리어끼리도 짝을 맞추지만 multicopy 원자성이 없는
+대부분의 다른 타입의 배리어들과도 짝을 맞춥니다. ACQUIRE 배리어는 RELEASE
+배리어와 짝을 맞춥니다만, 둘 다 범용 배리어를 포함해 다른 배리어들과도 짝을
+맞출 수 있습니다. 쓰기 배리어는 데이터 의존성 배리어나 컨트롤 의존성, ACQUIRE
+배리어, RELEASE 배리어, 읽기 배리어, 또는 범용 배리어와 짝을 맞춥니다.
+비슷하게 읽기 배리어나 컨트롤 의존성, 또는 데이터 의존성 배리어는 쓰기 배리어나
+ACQUIRE 배리어, RELEASE 배리어, 또는 범용 배리어와 짝을 맞추는데, 다음과
+같습니다:
CPU 1 CPU 2
=============== ===============
=============== ===============================
r1 = READ_ONCE(y);
<범용 배리어>
- WRITE_ONCE(y, 1); if (r2 = READ_ONCE(x)) {
+ WRITE_ONCE(x, 1); if (r2 = READ_ONCE(x)) {
<묵시적 컨트롤 의존성>
WRITE_ONCE(y, 1);
}
: : +-------+
-이행성
-------
+MULTICOPY 원자성
+----------------
-이행성(transitivity)은 실제의 컴퓨터 시스템에서 항상 제공되지는 않는, 순서
-맞추기에 대한 상당히 직관적인 개념입니다. 다음의 예가 이행성을 보여줍니다:
+Multicopy 원자성은 실제의 컴퓨터 시스템에서 항상 제공되지는 않는, 순서 맞추기에
+대한 상당히 직관적인 개념으로, 특정 스토어가 모든 CPU 들에게 동시에 보여지게
+됨을, 달리 말하자면 모든 CPU 들이 모든 스토어들이 보여지는 순서를 동의하게 되는
+것입니다. 하지만, 완전한 multicopy 원자성의 사용은 가치있는 하드웨어
+최적화들을 무능하게 만들어버릴 수 있어서, 보다 완화된 형태의 ``다른 multicopy
+원자성'' 라는 이름의, 특정 스토어가 모든 -다른- CPU 들에게는 동시에 보여지게
+하는 보장을 대신 제공합니다. 이 문서의 뒷부분들은 이 완화된 형태에 대해 논하게
+됩니다만, 단순히 ``multicopy 원자성'' 이라고 부르겠습니다.
+
+다음의 예가 multicopy 원자성을 보입니다:
CPU 1 CPU 2 CPU 3
======================= ======================= =======================
{ X = 0, Y = 0 }
- STORE X=1 LOAD X STORE Y=1
- <범용 배리어> <범용 배리어>
- LOAD Y LOAD X
-
-CPU 2 의 X 로드가 1을 리턴했고 Y 로드가 0을 리턴했다고 해봅시다. 이는 CPU 2 의
-X 로드가 CPU 1 의 X 스토어 뒤에 이루어졌고 CPU 2 의 Y 로드는 CPU 3 의 Y 스토어
-전에 이루어졌음을 의미합니다. 그럼 "CPU 3 의 X 로드는 0을 리턴할 수 있나요?"
-
-CPU 2 의 X 로드는 CPU 1 의 스토어 후에 이루어졌으니, CPU 3 의 X 로드는 1을
-리턴하는게 자연스럽습니다. 이런 생각이 이행성의 한 예입니다: CPU A 에서 실행된
-로드가 CPU B 에서의 같은 변수에 대한 로드를 뒤따른다면, CPU A 의 로드는 CPU B
-의 로드가 내놓은 값과 같거나 그 후의 값을 내놓아야 합니다.
-
-리눅스 커널에서 범용 배리어의 사용은 이행성을 보장합니다. 따라서, 앞의 예에서
-CPU 2 의 X 로드가 1을, Y 로드는 0을 리턴했다면, CPU 3 의 X 로드는 반드시 1을
-리턴합니다.
-
-하지만, 읽기나 쓰기 배리어에 대해서는 이행성이 보장되지 -않습니다-. 예를 들어,
-앞의 예에서 CPU 2 의 범용 배리어가 아래처럼 읽기 배리어로 바뀐 경우를 생각해
-봅시다:
+ STORE X=1 r1=LOAD X (reads 1) LOAD Y (reads 1)
+ <범용 배리어> <읽기 배리어>
+ STORE Y=r1 LOAD X
+
+CPU 2 의 Y 로의 스토어에 사용되는 X 로드의 결과가 1 이었고 CPU 3 의 Y 로드가
+1을 리턴했다고 해봅시다. 이는 CPU 1 의 X 로의 스토어가 CPU 2 의 X 로부터의
+로드를 앞서고 CPU 2 의 Y 로의 스토어가 CPU 3 의 Y 로부터의 로드를 앞섬을
+의미합니다. 또한, 여기서의 메모리 배리어들은 CPU 2 가 자신의 로드를 자신의
+스토어 전에 수행하고, CPU 3 가 Y 로부터의 로드를 X 로부터의 로드 전에 수행함을
+보장합니다. 그럼 "CPU 3 의 X 로부터의 로드는 0 을 리턴할 수 있을까요?"
+
+CPU 3 의 X 로드가 CPU 2 의 로드보다 뒤에 이루어졌으므로, CPU 3 의 X 로부터의
+로드는 1 을 리턴한다고 예상하는게 당연합니다. 이런 예상은 multicopy
+원자성으로부터 나옵니다: CPU B 에서 수행된 로드가 CPU A 의 같은 변수로부터의
+로드를 뒤따른다면 (그리고 CPU A 가 자신이 읽은 값으로 먼저 해당 변수에 스토어
+하지 않았다면) multicopy 원자성을 제공하는 시스템에서는, CPU B 의 로드가 CPU A
+의 로드와 같은 값 또는 그 나중 값을 리턴해야만 합니다. 하지만, 리눅스 커널은
+시스템들이 multicopy 원자성을 제공할 것을 요구하지 않습니다.
+
+앞의 범용 메모리 배리어의 사용은 모든 multicopy 원자성의 부족을 보상해줍니다.
+앞의 예에서, CPU 2 의 X 로부터의 로드가 1 을 리턴했고 CPU 3 의 Y 로부터의
+로드가 1 을 리턴했다면, CPU 3 의 X 로부터의 로드는 1을 리턴해야만 합니다.
+
+하지만, 의존성, 읽기 배리어, 쓰기 배리어는 항상 non-multicopy 원자성을 보상해
+주지는 않습니다. 예를 들어, CPU 2 의 범용 배리어가 앞의 예에서 사라져서
+아래처럼 데이터 의존성만 남게 되었다고 해봅시다:
CPU 1 CPU 2 CPU 3
======================= ======================= =======================
{ X = 0, Y = 0 }
- STORE X=1 LOAD X STORE Y=1
- <읽기 배리어> <범용 배리어>
- LOAD Y LOAD X
-
-이 코드는 이행성을 갖지 않습니다: 이 예에서는, CPU 2 의 X 로드가 1을
-리턴하고, Y 로드는 0을 리턴하지만 CPU 3 의 X 로드가 0을 리턴하는 것도 완전히
-합법적입니다.
-
-CPU 2 의 읽기 배리어가 자신의 읽기는 순서를 맞춰줘도, CPU 1 의 스토어와의
-순서를 맞춰준다고는 보장할 수 없다는게 핵심입니다. 따라서, CPU 1 과 CPU 2 가
-버퍼나 캐시를 공유하는 시스템에서 이 예제 코드가 실행된다면, CPU 2 는 CPU 1 이
-쓴 값에 좀 빨리 접근할 수 있을 것입니다. 따라서 CPU 1 과 CPU 2 의 접근으로
-조합된 순서를 모든 CPU 가 동의할 수 있도록 하기 위해 범용 배리어가 필요합니다.
-
-범용 배리어는 "글로벌 이행성"을 제공해서, 모든 CPU 들이 오퍼레이션들의 순서에
-동의하게 할 것입니다. 반면, release-acquire 조합은 "로컬 이행성" 만을
-제공해서, 해당 조합이 사용된 CPU 들만이 해당 액세스들의 조합된 순서에 동의함이
-보장됩니다. 예를 들어, 존경스런 Herman Hollerith 의 C 코드로 보면:
+ STORE X=1 r1=LOAD X (reads 1) LOAD Y (reads 1)
+ <데이터 의존성> <읽기 배리어>
+ STORE Y=r1 LOAD X (reads 0)
+
+이 변화는 non-multicopy 원자성이 만연하게 합니다: 이 예에서, CPU 2 의 X
+로부터의 로드가 1을 리턴하고, CPU 3 의 Y 로부터의 로드가 1 을 리턴하는데, CPU 3
+의 X 로부터의 로드가 0 을 리턴하는게 완전히 합법적입니다.
+
+핵심은, CPU 2 의 데이터 의존성이 자신의 로드와 스토어를 순서짓지만, CPU 1 의
+스토어에 대한 순서는 보장하지 않는다는 것입니다. 따라서, 이 예제가 CPU 1 과
+CPU 2 가 스토어 버퍼나 한 수준의 캐시를 공유하는, multicopy 원자성을 제공하지
+않는 시스템에서 수행된다면 CPU 2 는 CPU 1 의 쓰기에 이른 접근을 할 수도
+있습니다. 따라서, 모든 CPU 들이 여러 접근들의 조합된 순서에 대해서 동의하게
+하기 위해서는 범용 배리어가 필요합니다.
+
+범용 배리어는 non-multicopy 원자성만 보상할 수 있는게 아니라, -모든- CPU 들이
+-모든- 오퍼레이션들의 순서를 동일하게 인식하게 하는 추가적인 순서 보장을
+만들어냅니다. 반대로, release-acquire 짝의 연결은 이런 추가적인 순서는
+제공하지 않는데, 해당 연결에 들어있는 CPU 들만이 메모리 접근의 조합된 순서에
+대해 동의할 것으로 보장됨을 의미합니다. 예를 들어, 존경스런 Herman Hollerith
+의 코드를 C 코드로 변환하면:
int u, v, x, y, z;
}
cpu0(), cpu1(), 그리고 cpu2() 는 smp_store_release()/smp_load_acquire() 쌍의
-연결을 통한 로컬 이행성에 동참하고 있으므로, 다음과 같은 결과는 나오지 않을
-겁니다:
+연결에 참여되어 있으므로, 다음과 같은 결과는 나오지 않을 겁니다:
r0 == 1 && r1 == 1 && r2 == 1
r1 == 1 && r5 == 0
-하지만, release-acquire 타동성은 동참한 CPU 들에만 적용되므로 cpu3() 에는
-적용되지 않습니다. 따라서, 다음과 같은 결과가 가능합니다:
+하지만, release-acquire 에 의해 제공되는 순서는 해당 연결에 동참한 CPU 들에만
+적용되므로 cpu3() 에, 적어도 스토어들 외에는 적용되지 않습니다. 따라서, 다음과
+같은 결과가 가능합니다:
r0 == 0 && r1 == 1 && r2 == 1 && r3 == 0 && r4 == 0
이런 결과는 어떤 것도 재배치 되지 않는, 순차적 일관성을 가진 가상의
시스템에서도 일어날 수 있음을 기억해 두시기 바랍니다.
-다시 말하지만, 당신의 코드가 글로벌 이행성을 필요로 한다면, 범용 배리어를
-사용하십시오.
+다시 말하지만, 당신의 코드가 모든 오퍼레이션들의 완전한 순서를 필요로 한다면,
+범용 배리어를 사용하십시오.
==================
Chapter 7.1: Memory-Access Ordering
Chapter 7.4: Buffering and Combining Memory Writes
+ARM Architecture Reference Manual (ARMv8, for ARMv8-A architecture profile)
+ Chapter B2: The AArch64 Application Level Memory Model
+
IA-32 Intel Architecture Software Developer's Manual, Volume 3:
System Programming Guide
Chapter 7.1: Locked Atomic Operations
Appendix D: Formal Specification of the Memory Models
Appendix J: Programming with the Memory Models
+Storage in the PowerPC (Stone and Fitzgerald)
+
UltraSPARC Programmer Reference Manual
Chapter 5: Memory Accesses and Cacheability
Chapter 15: Sparc-V9 Memory Models
-EINVAL: Inconsistent restored data
-EFAULT: Invalid guest ram access
-EBUSY: One or more VCPUS are running
+ -EACCES: The virtual ITS is backed by a physical GICv4 ITS, and the
+ state is not available
KVM_DEV_ARM_VGIC_GRP_ITS_REGS
Attributes:
-Memory Protection Keys for Userspace (PKU aka PKEYs) is a CPU feature
-which will be found on future Intel CPUs.
+Memory Protection Keys for Userspace (PKU aka PKEYs) is a feature
+which is found on Intel's Skylake "Scalable Processor" Server CPUs.
+It will be avalable in future non-server parts.
+
+For anyone wishing to test or use this feature, it is available in
+Amazon's EC2 C5 instances and is known to work there using an Ubuntu
+17.04 image.
Memory Protection Keys provides a mechanism for enforcing page-based
protections, but without requiring modification of the page tables
F: Documentation/devicetree/bindings/display/mxsfb-drm.txt
MYRICOM MYRI-10G 10GbE DRIVER (MYRI10GE)
-M: Hyong-Youb Kim <hykim@myri.com>
+M: Chris Lee <christopher.lee@cspi.com>
L: netdev@vger.kernel.org
-W: https://www.myricom.com/support/downloads/myri10ge.html
+W: https://www.cspi.com/ethernet-products/support/downloads/
S: Supported
F: drivers/net/ethernet/myricom/myri10ge/
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
-PATCHLEVEL = 14
+PATCHLEVEL = 15
SUBLEVEL = 0
-EXTRAVERSION =
+EXTRAVERSION = -rc1
NAME = Fearless Coyote
# *DOCUMENTATION*
# check that the output directory actually exists
saved-output := $(KBUILD_OUTPUT)
KBUILD_OUTPUT := $(shell mkdir -p $(KBUILD_OUTPUT) && cd $(KBUILD_OUTPUT) \
- && /bin/pwd)
+ && pwd)
$(if $(KBUILD_OUTPUT),, \
$(error failed to create output directory "$(saved-output)"))
$(srctree) $(objtree) $(VERSION) $(PATCHLEVEL)
endif
+ifeq ($(cc-name),clang)
+ifneq ($(CROSS_COMPILE),)
+CLANG_TARGET := --target=$(notdir $(CROSS_COMPILE:%-=%))
+GCC_TOOLCHAIN := $(realpath $(dir $(shell which $(LD)))/..)
+endif
+ifneq ($(GCC_TOOLCHAIN),)
+CLANG_GCC_TC := --gcc-toolchain=$(GCC_TOOLCHAIN)
+endif
+KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
+KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
+KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
+KBUILD_CFLAGS += $(call cc-disable-warning, unused-variable)
+KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier)
+KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
+KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
+# Quiet clang warning: comparison of unsigned expression < 0 is always false
+KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare)
+# CLANG uses a _MergedGlobals as optimization, but this breaks modpost, as the
+# source of a reference will be _MergedGlobals and not on of the whitelisted names.
+# See modpost pattern 2
+KBUILD_CFLAGS += $(call cc-option, -mno-global-merge,)
+KBUILD_CFLAGS += $(call cc-option, -fcatch-undefined-behavior)
+KBUILD_CFLAGS += $(call cc-option, -no-integrated-as)
+KBUILD_AFLAGS += $(call cc-option, -no-integrated-as)
+else
+
+# These warnings generated too much noise in a regular build.
+# Use make W=1 to enable them (see scripts/Makefile.extrawarn)
+KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
+KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
+endif
+
ifeq ($(config-targets),1)
# ===========================================================================
# *config targets only - make sure prerequisites are updated, and descend
endif
KBUILD_CFLAGS += $(stackp-flag)
-ifeq ($(cc-name),clang)
-ifneq ($(CROSS_COMPILE),)
-CLANG_TARGET := --target=$(notdir $(CROSS_COMPILE:%-=%))
-GCC_TOOLCHAIN := $(realpath $(dir $(shell which $(LD)))/..)
-endif
-ifneq ($(GCC_TOOLCHAIN),)
-CLANG_GCC_TC := --gcc-toolchain=$(GCC_TOOLCHAIN)
-endif
-KBUILD_CFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
-KBUILD_AFLAGS += $(CLANG_TARGET) $(CLANG_GCC_TC)
-KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
-KBUILD_CFLAGS += $(call cc-disable-warning, unused-variable)
-KBUILD_CFLAGS += $(call cc-disable-warning, format-invalid-specifier)
-KBUILD_CFLAGS += $(call cc-disable-warning, gnu)
-KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
-# Quiet clang warning: comparison of unsigned expression < 0 is always false
-KBUILD_CFLAGS += $(call cc-disable-warning, tautological-compare)
-# CLANG uses a _MergedGlobals as optimization, but this breaks modpost, as the
-# source of a reference will be _MergedGlobals and not on of the whitelisted names.
-# See modpost pattern 2
-KBUILD_CFLAGS += $(call cc-option, -mno-global-merge,)
-KBUILD_CFLAGS += $(call cc-option, -fcatch-undefined-behavior)
-KBUILD_CFLAGS += $(call cc-option, -no-integrated-as)
-KBUILD_AFLAGS += $(call cc-option, -no-integrated-as)
-else
-
-# These warnings generated too much noise in a regular build.
-# Use make W=1 to enable them (see scripts/Makefile.extrawarn)
-KBUILD_CFLAGS += $(call cc-disable-warning, unused-but-set-variable)
-KBUILD_CFLAGS += $(call cc-disable-warning, unused-const-variable)
-endif
-
ifdef CONFIG_FRAME_POINTER
KBUILD_CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls
else
PHONY += $(vmlinux-dirs)
$(vmlinux-dirs): prepare scripts
- $(Q)$(MAKE) $(build)=$@
+ $(Q)$(MAKE) $(build)=$@ need-builtin=1
define filechk_kernel.release
echo "$(KERNELVERSION)$$($(CONFIG_SHELL) $(srctree)/scripts/setlocalversion $(srctree))"
$(Q)$(MAKE) $(build)=$(package-dir) $@
%pkg: include/config/kernel.release FORCE
$(Q)$(MAKE) $(build)=$(package-dir) $@
-rpm: include/config/kernel.release FORCE
- $(Q)$(MAKE) $(build)=$(package-dir) $@
+rpm: rpm-pkg
+ @echo " WARNING: \"rpm\" target will be removed after Linux 4.18"
+ @echo " Please use \"rpm-pkg\" instead."
# Brief documentation of the typical targets used
$(call cmd,rmdirs)
$(call cmd,rmfiles)
@find $(if $(KBUILD_EXTMOD), $(KBUILD_EXTMOD), .) $(RCS_FIND_IGNORE) \
- \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \
+ \( -name '*.[aios]' -o -name '*.ko' -o -name '.*.cmd' \
-o -name '*.ko.*' -o -name '*.dtb' -o -name '*.dtb.S' \
- -o -name '*.dwo' \
+ -o -name '*.dwo' -o -name '*.lst' \
-o -name '*.su' \
-o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
-o -name '*.symtypes' -o -name 'modules.order' \
}
static void
-srmcons_receive_chars(unsigned long data)
+srmcons_receive_chars(struct timer_list *t)
{
- struct srmcons_private *srmconsp = (struct srmcons_private *)data;
+ struct srmcons_private *srmconsp = from_timer(srmconsp, t, timer);
struct tty_port *port = &srmconsp->port;
unsigned long flags;
int incr = 10;
static int __init
srmcons_init(void)
{
- setup_timer(&srmcons_singleton.timer, srmcons_receive_chars,
- (unsigned long)&srmcons_singleton);
+ timer_setup(&srmcons_singleton.timer, srmcons_receive_chars, 0);
if (srm_is_registered_console) {
struct tty_driver *driver;
int err;
select OF
select OF_EARLY_FLATTREE
select OF_RESERVED_MEM
- select PERF_USE_VMALLOC
+ select PERF_USE_VMALLOC if ARC_CACHE_VIPT_ALIASING
select HAVE_DEBUG_STACKOVERFLOW
select HAVE_GENERIC_DMA_COHERENT
select HAVE_KERNEL_GZIP
ranges = <0x00000000 0x0 0xe0000000 0x10000000>;
interrupt-parent = <&mb_intc>;
+ creg_rst: reset-controller@11220 {
+ compatible = "snps,axs10x-reset";
+ #reset-cells = <1>;
+ reg = <0x11220 0x4>;
+ };
+
i2sclk: i2sclk@100a0 {
compatible = "snps,axs10x-i2s-pll-clock";
reg = <0x100a0 0x10>;
clocks = <&apbclk>;
clock-names = "stmmaceth";
max-speed = <100>;
+ resets = <&creg_rst 5>;
+ reset-names = "stmmaceth";
};
ehci@0x40000 {
/* Build Configuration Registers */
#define ARC_REG_AUX_DCCM 0x18 /* DCCM Base Addr ARCv2 */
+#define ARC_REG_ERP_CTRL 0x3F /* ARCv2 Error protection control */
#define ARC_REG_DCCM_BASE_BUILD 0x61 /* DCCM Base Addr ARCompact */
#define ARC_REG_CRC_BCR 0x62
#define ARC_REG_VECBASE_BCR 0x68
#define ARC_REG_PERIBASE_BCR 0x69
#define ARC_REG_FP_BCR 0x6B /* ARCompact: Single-Precision FPU */
#define ARC_REG_DPFP_BCR 0x6C /* ARCompact: Dbl Precision FPU */
+#define ARC_REG_ERP_BUILD 0xc7 /* ARCv2 Error protection Build: ECC/Parity */
#define ARC_REG_FP_V2_BCR 0xc8 /* ARCv2 FPU */
#define ARC_REG_SLC_BCR 0xce
#define ARC_REG_DCCM_BUILD 0x74 /* DCCM size (common) */
#define ARC_REG_D_UNCACH_BCR 0x6A
#define ARC_REG_BPU_BCR 0xc0
#define ARC_REG_ISA_CFG_BCR 0xc1
+#define ARC_REG_LPB_BUILD 0xE9 /* ARCv2 Loop Buffer Build */
#define ARC_REG_RTT_BCR 0xF2
#define ARC_REG_IRQ_BCR 0xF3
+#define ARC_REG_MICRO_ARCH_BCR 0xF9 /* ARCv2 Product revision */
#define ARC_REG_SMART_BCR 0xFF
#define ARC_REG_CLUSTER_BCR 0xcf
#define ARC_REG_AUX_ICCM 0x208 /* ICCM Base Addr (ARCv2) */
+#define ARC_REG_LPB_CTRL 0x488 /* ARCv2 Loop Buffer control */
/* Common for ARCompact and ARCv2 status register */
#define ARC_REG_STATUS32 0x0A
#endif
};
+/* Error Protection Build: ECC/Parity */
+struct bcr_erp {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad3:5, mmu:3, pad2:4, ic:3, dc:3, pad1:6, ver:8;
+#else
+ unsigned int ver:8, pad1:6, dc:3, ic:3, pad2:4, mmu:3, pad3:5;
+#endif
+};
+
+/* Error Protection Control */
+struct ctl_erp {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad2:27, mpd:1, pad1:2, dpd:1, dpi:1;
+#else
+ unsigned int dpi:1, dpd:1, pad1:2, mpd:1, pad2:27;
+#endif
+};
+
+struct bcr_lpb {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad:16, entries:8, ver:8;
+#else
+ unsigned int ver:8, entries:8, pad:16;
+#endif
+};
+
struct bcr_generic {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int info:24, ver:8;
struct cpuinfo_arc_ccm iccm, dccm;
struct {
unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, swape:1, pad1:2,
- fpu_sp:1, fpu_dp:1, dual_iss_enb:1, dual_iss_exist:1, pad2:4,
+ fpu_sp:1, fpu_dp:1, dual:1, dual_enb:1, pad2:4,
debug:1, ap:1, smart:1, rtt:1, pad3:4,
timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4;
} extn;
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
- if (__test_and_set_bit(idx, pmu_cpu->used_mask)) {
- idx = find_first_zero_bit(pmu_cpu->used_mask,
- arc_pmu->n_counters);
- if (idx == arc_pmu->n_counters)
- return -EAGAIN;
-
- __set_bit(idx, pmu_cpu->used_mask);
- hwc->idx = idx;
- }
+ idx = ffz(pmu_cpu->used_mask[0]);
+ if (idx == arc_pmu->n_counters)
+ return -EAGAIN;
+
+ __set_bit(idx, pmu_cpu->used_mask);
+ hwc->idx = idx;
write_aux_reg(ARC_REG_PCT_INDEX, idx);
struct perf_sample_data data;
struct arc_pmu_cpu *pmu_cpu = this_cpu_ptr(&arc_pmu_cpu);
struct pt_regs *regs;
- int active_ints;
+ unsigned int active_ints;
int idx;
arc_pmu_disable(&arc_pmu->pmu);
active_ints = read_aux_reg(ARC_REG_PCT_INT_ACT);
+ if (!active_ints)
+ goto done;
regs = get_irq_regs();
- for (idx = 0; idx < arc_pmu->n_counters; idx++) {
- struct perf_event *event = pmu_cpu->act_counter[idx];
+ do {
+ struct perf_event *event;
struct hw_perf_event *hwc;
- if (!(active_ints & (1 << idx)))
- continue;
+ idx = __ffs(active_ints);
/* Reset interrupt flag by writing of 1 */
write_aux_reg(ARC_REG_PCT_INT_ACT, 1 << idx);
write_aux_reg(ARC_REG_PCT_INT_CTRL,
read_aux_reg(ARC_REG_PCT_INT_CTRL) | (1 << idx));
+ event = pmu_cpu->act_counter[idx];
hwc = &event->hw;
WARN_ON_ONCE(hwc->idx != idx);
arc_perf_event_update(event, &event->hw, event->hw.idx);
perf_sample_data_init(&data, 0, hwc->last_period);
- if (!arc_pmu_event_set_period(event))
- continue;
+ if (arc_pmu_event_set_period(event)) {
+ if (perf_event_overflow(event, &data, regs))
+ arc_pmu_stop(event, 0);
+ }
- if (perf_event_overflow(event, &data, regs))
- arc_pmu_stop(event, 0);
- }
+ active_ints &= ~(1U << idx);
+ } while (active_ints);
+done:
arc_pmu_enable(&arc_pmu->pmu);
return IRQ_HANDLED;
pr_err("This core does not have performance counters!\n");
return -ENODEV;
}
+ BUILD_BUG_ON(ARC_PERF_MAX_COUNTERS > 32);
BUG_ON(pct_bcr.c > ARC_PERF_MAX_COUNTERS);
READ_BCR(ARC_REG_CC_BUILD, cc_bcr);
unsigned int exec_ctrl;
READ_BCR(AUX_EXEC_CTRL, exec_ctrl);
- cpu->extn.dual_iss_exist = 1;
- cpu->extn.dual_iss_enb = exec_ctrl & 1;
+ cpu->extn.dual_enb = exec_ctrl & 1;
+
+ /* dual issue always present for this core */
+ cpu->extn.dual = 1;
}
}
cpu_id, cpu->name, cpu->details,
is_isa_arcompact() ? "ARCompact" : "ARCv2",
IS_AVAIL1(cpu->isa.be, "[Big-Endian]"),
- IS_AVAIL3(cpu->extn.dual_iss_exist, cpu->extn.dual_iss_enb, " Dual-Issue"));
+ IS_AVAIL3(cpu->extn.dual, cpu->extn.dual_enb, " Dual-Issue "));
n += scnprintf(buf + n, len - n, "Timers\t\t: %s%s%s%s%s%s\nISA Extn\t: ",
IS_AVAIL1(cpu->extn.timer0, "Timer0 "),
if (cpu->bpu.ver)
n += scnprintf(buf + n, len - n,
- "BPU\t\t: %s%s match, cache:%d, Predict Table:%d\n",
+ "BPU\t\t: %s%s match, cache:%d, Predict Table:%d",
IS_AVAIL1(cpu->bpu.full, "full"),
IS_AVAIL1(!cpu->bpu.full, "partial"),
cpu->bpu.num_cache, cpu->bpu.num_pred);
+ if (is_isa_arcv2()) {
+ struct bcr_lpb lpb;
+
+ READ_BCR(ARC_REG_LPB_BUILD, lpb);
+ if (lpb.ver) {
+ unsigned int ctl;
+ ctl = read_aux_reg(ARC_REG_LPB_CTRL);
+
+ n += scnprintf(buf + n, len - n, " Loop Buffer:%d %s",
+ lpb.entries,
+ IS_DISABLED_RUN(!ctl));
+ }
+ }
+
+ n += scnprintf(buf + n, len - n, "\n");
return buf;
}
cpu->dccm.base_addr, TO_KB(cpu->dccm.sz),
cpu->iccm.base_addr, TO_KB(cpu->iccm.sz));
+ if (is_isa_arcv2()) {
+
+ /* Error Protection: ECC/Parity */
+ struct bcr_erp erp;
+ READ_BCR(ARC_REG_ERP_BUILD, erp);
+
+ if (erp.ver) {
+ struct ctl_erp ctl;
+ READ_BCR(ARC_REG_ERP_CTRL, ctl);
+
+ /* inverted bits: 0 means enabled */
+ n += scnprintf(buf + n, len - n, "Extn [ECC]\t: %s%s%s%s%s%s\n",
+ IS_AVAIL3(erp.ic, !ctl.dpi, "IC "),
+ IS_AVAIL3(erp.dc, !ctl.dpd, "DC "),
+ IS_AVAIL3(erp.mmu, !ctl.mpd, "MMU "));
+ }
+ }
+
n += scnprintf(buf + n, len - n, "OS ABI [v%d]\t: %s\n",
EF_ARC_OSABI_CURRENT >> 8,
EF_ARC_OSABI_CURRENT == EF_ARC_OSABI_V3 ?
tmp = read_aux_reg(ARC_REG_MMU_BCR);
mmu->ver = (tmp >> 24);
- if (mmu->ver <= 2) {
- mmu2 = (struct bcr_mmu_1_2 *)&tmp;
- mmu->pg_sz_k = TO_KB(0x2000);
- mmu->sets = 1 << mmu2->sets;
- mmu->ways = 1 << mmu2->ways;
- mmu->u_dtlb = mmu2->u_dtlb;
- mmu->u_itlb = mmu2->u_itlb;
- } else if (mmu->ver == 3) {
- mmu3 = (struct bcr_mmu_3 *)&tmp;
- mmu->pg_sz_k = 1 << (mmu3->pg_sz - 1);
- mmu->sets = 1 << mmu3->sets;
- mmu->ways = 1 << mmu3->ways;
- mmu->u_dtlb = mmu3->u_dtlb;
- mmu->u_itlb = mmu3->u_itlb;
- mmu->sasid = mmu3->sasid;
+ if (is_isa_arcompact()) {
+ if (mmu->ver <= 2) {
+ mmu2 = (struct bcr_mmu_1_2 *)&tmp;
+ mmu->pg_sz_k = TO_KB(0x2000);
+ mmu->sets = 1 << mmu2->sets;
+ mmu->ways = 1 << mmu2->ways;
+ mmu->u_dtlb = mmu2->u_dtlb;
+ mmu->u_itlb = mmu2->u_itlb;
+ } else {
+ mmu3 = (struct bcr_mmu_3 *)&tmp;
+ mmu->pg_sz_k = 1 << (mmu3->pg_sz - 1);
+ mmu->sets = 1 << mmu3->sets;
+ mmu->ways = 1 << mmu3->ways;
+ mmu->u_dtlb = mmu3->u_dtlb;
+ mmu->u_itlb = mmu3->u_itlb;
+ mmu->sasid = mmu3->sasid;
+ }
} else {
mmu4 = (struct bcr_mmu_4 *)&tmp;
mmu->pg_sz_k = 1 << (mmu4->sz0 - 1);
void arc_mmu_init(void)
{
- char str[256];
struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu;
+ char str[256];
+ int compat = 0;
pr_info("%s", arc_mmu_mumbojumbo(0, str, sizeof(str)));
*/
BUILD_BUG_ON(!IS_ALIGNED(STACK_TOP, PMD_SIZE));
- /* For efficiency sake, kernel is compile time built for a MMU ver
- * This must match the hardware it is running on.
- * Linux built for MMU V2, if run on MMU V1 will break down because V1
- * hardware doesn't understand cmds such as WriteNI, or IVUTLB
- * On the other hand, Linux built for V1 if run on MMU V2 will do
- * un-needed workarounds to prevent memcpy thrashing.
- * Similarly MMU V3 has new features which won't work on older MMU
+ /*
+ * Ensure that MMU features assumed by kernel exist in hardware.
+ * For older ARC700 cpus, it has to be exact match, since the MMU
+ * revisions were not backwards compatible (MMUv3 TLB layout changed
+ * so even if kernel for v2 didn't use any new cmds of v3, it would
+ * still not work.
+ * For HS cpus, MMUv4 was baseline and v5 is backwards compatible
+ * (will run older software).
*/
- if (mmu->ver != CONFIG_ARC_MMU_VER) {
+ if (is_isa_arcompact() && mmu->ver == CONFIG_ARC_MMU_VER)
+ compat = 1;
+ else if (is_isa_arcv2() && mmu->ver >= CONFIG_ARC_MMU_VER)
+ compat = 1;
+
+ if (!compat) {
panic("MMU ver %d doesn't match kernel built for %d...\n",
mmu->ver, CONFIG_ARC_MMU_VER);
}
select MIGHT_HAVE_PCI
select GENERIC_IRQ_CHIP
select GPIOLIB
+ select AXS101 if ISA_ARCOMPACT
+ select AXS103 if ISA_ARCV2
help
Support for the ARC AXS10x Software Development Platforms.
axs10x_enable_gpio_intc_wire();
- /*
- * Reset ethernet IP core.
- * TODO: get rid of this quirk after axs10x reset driver (or simple
- * reset driver) will be available in upstream.
- */
- iowrite32((1 << 5), (void __iomem *) CREG_MB_SW_RESET);
-
scnprintf(mb, 32, "MainBoard v%d", mb_rev);
axs10x_print_board_ver(CREG_MB_VER, mb);
}
default y if ARCH_EBSA110 || DEBUG_FOOTBRIDGE_COM1 || DEBUG_GEMINI || ARCH_RPC
config DEBUG_UNCOMPRESS
- bool
+ bool "Enable decompressor debugging via DEBUG_LL output"
depends on ARCH_MULTIPLATFORM || PLAT_SAMSUNG || ARM_SINGLE_ARMV7M
- default y if DEBUG_LL && !DEBUG_OMAP2PLUS_UART && \
+ depends on DEBUG_LL && !DEBUG_OMAP2PLUS_UART && \
(!DEBUG_TEGRA_UART || !ZBOOT_ROM) && \
!DEBUG_BRCMSTB_UART
help
#endif
.endm
+ .macro bug, msg, line
+#ifdef CONFIG_THUMB2_KERNEL
+1: .inst 0xde02
+#else
+1: .inst 0xe7f001f2
+#endif
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+ .pushsection .rodata.str, "aMS", %progbits, 1
+2: .asciz "\msg"
+ .popsection
+ .pushsection __bug_table, "aw"
+ .align 2
+ .word 1b, 2b
+ .hword \line
+ .popsection
+#endif
+ .endm
+
#endif /* __ASM_ASSEMBLER_H__ */
}
#define __HAVE_ARCH_PTE_SPECIAL
-#define __HAVE_ARCH_PMD_WRITE
#define pmd_write(pmd) (pmd_isclear((pmd), L_PMD_SECT_RDONLY))
#define pmd_dirty(pmd) (pmd_isset((pmd), L_PMD_SECT_DIRTY))
#define pud_page(pud) pmd_page(__pmd(pud_val(pud)))
#define pte_valid_user(pte) \
(pte_valid(pte) && pte_isset((pte), L_PTE_USER) && pte_young(pte))
+static inline bool pte_access_permitted(pte_t pte, bool write)
+{
+ pteval_t mask = L_PTE_PRESENT | L_PTE_USER;
+ pteval_t needed = mask;
+
+ if (write)
+ mask |= L_PTE_RDONLY;
+
+ return (pte_val(pte) & mask) == needed;
+}
+#define pte_access_permitted pte_access_permitted
+
#if __LINUX_ARM_ARCH__ < 6
static inline void __sync_icache_dcache(pte_t pteval)
{
mov r2, sp
ldr r1, [r2, #\offset + S_PSR] @ get calling cpsr
ldr lr, [r2, #\offset + S_PC]! @ get pc
+ tst r1, #0xcf
+ bne 1f
msr spsr_cxsf, r1 @ save in spsr_svc
#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
@ We must avoid clrex due to Cortex-A15 erratum #830321
@ after ldm {}^
add sp, sp, #\offset + PT_REGS_SIZE
movs pc, lr @ return & move spsr_svc into cpsr
+1: bug "Returning to usermode but unexpected PSR bits set?", \@
#elif defined(CONFIG_CPU_V7M)
@ V7M restore.
@ Note that we don't need to do clrex here as clearing the local
ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
ldr lr, [sp, #\offset + S_PC] @ get pc
add sp, sp, #\offset + S_SP
+ tst r1, #0xcf
+ bne 1f
msr spsr_cxsf, r1 @ save in spsr_svc
@ We must avoid clrex due to Cortex-A15 erratum #830321
.endif
add sp, sp, #PT_REGS_SIZE - S_SP
movs pc, lr @ return & move spsr_svc into cpsr
+1: bug "Returning to usermode but unexpected PSR bits set?", \@
#endif /* !CONFIG_THUMB2_KERNEL */
.endm
#
source "virt/kvm/Kconfig"
+source "virt/lib/Kconfig"
menuconfig VIRTUALIZATION
bool "Virtualization"
select PREEMPT_NOTIFIERS
select ANON_INODES
select ARM_GIC
+ select ARM_GIC_V3
+ select ARM_GIC_V3_ITS
select HAVE_KVM_CPU_RELAX_INTERCEPT
select HAVE_KVM_ARCH_TLB_FLUSH_ALL
select KVM_MMIO
select HAVE_KVM_IRQCHIP
select HAVE_KVM_IRQ_ROUTING
select HAVE_KVM_MSI
+ select IRQ_BYPASS_MANAGER
+ select HAVE_KVM_IRQ_BYPASS
depends on ARM_VIRT_EXT && ARM_LPAE && ARM_ARCH_TIMER
---help---
Support hosting virtualized guest machines.
obj-y += $(KVM)/arm/vgic/vgic-irqfd.o
obj-y += $(KVM)/arm/vgic/vgic-v2.o
obj-y += $(KVM)/arm/vgic/vgic-v3.o
+obj-y += $(KVM)/arm/vgic/vgic-v4.o
obj-y += $(KVM)/arm/vgic/vgic-mmio.o
obj-y += $(KVM)/arm/vgic/vgic-mmio-v2.o
obj-y += $(KVM)/arm/vgic/vgic-mmio-v3.o
static struct timer_list power_button_poll_timer;
-static void power_button_poll(unsigned long dummy)
+static void power_button_poll(struct timer_list *unused)
{
if (gpio_get_value(N2100_POWER_BUTTON) == 0) {
ctrl_alt_del();
pr_err("could not set power GPIO as input\n");
}
/* Set up power button poll timer */
- init_timer(&power_button_poll_timer);
- power_button_poll_timer.function = power_button_poll;
+ timer_setup(&power_button_poll_timer, power_button_poll, 0);
power_button_poll_timer.expires = jiffies + (HZ / 10);
add_timer(&power_button_poll_timer);
return 0;
/* Must hold the button down for at least this many counts to be processed */
#define PBUTTON_HOLDDOWN_COUNT 4 /* 2 secs */
-static void dsmg600_power_handler(unsigned long data);
+static void dsmg600_power_handler(struct timer_list *unused);
static DEFINE_TIMER(dsmg600_power_timer, dsmg600_power_handler);
-static void dsmg600_power_handler(unsigned long data)
+static void dsmg600_power_handler(struct timer_list *unused)
{
/* This routine is called twice per second to check the
* state of the power button.
/* Must hold the button down for at least this many counts to be processed */
#define PBUTTON_HOLDDOWN_COUNT 4 /* 2 secs */
-static void nas100d_power_handler(unsigned long data);
+static void nas100d_power_handler(struct timer_list *unused);
static DEFINE_TIMER(nas100d_power_timer, nas100d_power_handler);
-static void nas100d_power_handler(unsigned long data)
+static void nas100d_power_handler(struct timer_list *unused)
{
/* This routine is called twice per second to check the
* state of the power button.
static void __iomem *db88f5281_7seg;
static struct timer_list db88f5281_timer;
-static void db88f5281_7seg_event(unsigned long data)
+static void db88f5281_7seg_event(struct timer_list *unused)
{
static int count = 0;
writel(0, db88f5281_7seg + (count << 4));
printk(KERN_ERR "Failed to ioremap db88f5281_7seg\n");
return -EIO;
}
- setup_timer(&db88f5281_timer, db88f5281_7seg_event, 0);
+ timer_setup(&db88f5281_timer, db88f5281_7seg_event, 0);
mod_timer(&db88f5281_timer, jiffies + 2 * HZ);
}
.val = PMD_SECT_USER,
.set = "USR",
}, {
- .mask = L_PMD_SECT_RDONLY,
- .val = L_PMD_SECT_RDONLY,
+ .mask = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
+ .val = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
.set = "ro",
.clear = "RW",
#elif __LINUX_ARM_ARCH__ >= 6
.start = (unsigned long)_stext,
.end = (unsigned long)__init_begin,
#ifdef CONFIG_ARM_LPAE
- .mask = ~L_PMD_SECT_RDONLY,
- .prot = L_PMD_SECT_RDONLY,
+ .mask = ~(L_PMD_SECT_RDONLY | PMD_SECT_AP2),
+ .prot = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
#else
.mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
.prot = PMD_SECT_APX | PMD_SECT_AP_WRITE,
#define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd))
-#define __HAVE_ARCH_PMD_WRITE
#define pmd_write(pmd) pte_write(pmd_pte(pmd))
#define pmd_mkhuge(pmd) (__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
#
source "virt/kvm/Kconfig"
+source "virt/lib/Kconfig"
menuconfig VIRTUALIZATION
bool "Virtualization"
select HAVE_KVM_MSI
select HAVE_KVM_IRQCHIP
select HAVE_KVM_IRQ_ROUTING
+ select IRQ_BYPASS_MANAGER
+ select HAVE_KVM_IRQ_BYPASS
---help---
Support hosting virtualized guest machines.
We don't support KVM with 16K page tables yet, due to the multiple
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-irqfd.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-v2.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-v3.o
+kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-v4.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-mmio.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-mmio-v2.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-mmio-v3.o
return 1;
}
-static void nmi_wdt_timer(unsigned long data)
+static void nmi_wdt_timer(struct timer_list *unused)
{
if (check_nmi_wdt_touched())
nmi_wdt_keepalive();
nmi_wdt_start();
nmi_active = true;
- init_timer(&ntimer);
- ntimer.function = nmi_wdt_timer;
+ timer_setup(&ntimer, nmi_wdt_timer, 0);
ntimer.expires = jiffies + NMI_CHECK_TIMEOUT;
add_timer(&ntimer);
#endif
}
-static void nosound( unsigned long ignored );
+static void nosound(struct timer_list *unused);
static DEFINE_TIMER(sound_timer, nosound);
void amiga_mksound( unsigned int hz, unsigned int ticks )
}
-static void nosound( unsigned long ignored )
+static void nosound(struct timer_list *unused)
{
/* turn off DMA for audio channel 2 */
custom.dmacon = DMAF_AUD2;
* some function protos
*/
static void mac_init_asc( void );
-static void mac_nosound( unsigned long );
+static void mac_nosound(struct timer_list *);
static void mac_quadra_start_bell( unsigned int, unsigned int, unsigned int );
-static void mac_quadra_ring_bell( unsigned long );
+static void mac_quadra_ring_bell(struct timer_list *);
static void mac_av_start_bell( unsigned int, unsigned int, unsigned int );
static void ( *mac_special_bell )( unsigned int, unsigned int, unsigned int );
/*
* regular ASC: stop whining ..
*/
-static void mac_nosound( unsigned long ignored )
+static void mac_nosound(struct timer_list *unused)
{
mac_asc_regs[ ASC_ENABLE ] = 0;
}
* already load the wave table, or at least call this one...
* This piece keeps reloading the wave table until done.
*/
-static void mac_quadra_ring_bell( unsigned long ignored )
+static void mac_quadra_ring_bell(struct timer_list *unused)
{
int i, count = mac_asc_samplespersec / HZ;
unsigned long flags;
#include <linux/atomic.h>
#include <linux/mm_types.h>
+#include <linux/sched.h>
#include <asm/bitops.h>
#include <asm/mmu.h>
bcm97435svmb.dtb
obj-y += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
-
-# Force kbuild to make empty built-in.o if necessary
-obj- += dummy.o
dtb-$(CONFIG_CAVIUM_OCTEON_SOC) += octeon_3xxx.dtb octeon_68xx.dtb
obj-y += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
-
-# Force kbuild to make empty built-in.o if necessary
-obj- += dummy.o
dtb-$(CONFIG_MACH_PISTACHIO) += pistachio_marduk.dtb
obj-$(CONFIG_MACH_PISTACHIO) += pistachio_marduk.dtb.o
-
-# Force kbuild to make empty built-in.o if necessary
-obj- += dummy.o
dtb-$(CONFIG_JZ4780_CI20) += ci20.dtb
obj-y += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
-
-# Force kbuild to make empty built-in.o if necessary
-obj- += dummy.o
dtb-$(CONFIG_DT_EASY50712) += easy50712.dtb
obj-y += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
-
-# Force kbuild to make empty built-in.o if necessary
-obj- += dummy.o
dtb-$(CONFIG_LEGACY_BOARD_SEAD3) += sead3.dtb
obj-y += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
-
-# Force kbuild to make empty built-in.o if necessary
-obj- += dummy.o
dtb-$(CONFIG_DT_XLP_RVP) += xlp_rvp.dtb
obj-y += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
-
-# Force kbuild to make empty built-in.o if necessary
-obj- += dummy.o
dtb-$(CONFIG_FIT_IMAGE_FDT_NI169445) += 169445.dtb
-
-# Force kbuild to make empty built-in.o if necessary
-obj- += dummy.o
pic32mzda_sk.dtb
obj-y += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
-
-# Force kbuild to make empty built-in.o if necessary
-obj- += dummy.o
dtb-$(CONFIG_ATH79) += ar9331_dragino_ms14.dtb
dtb-$(CONFIG_ATH79) += ar9331_omega.dtb
dtb-$(CONFIG_ATH79) += ar9331_tl_mr3020.dtb
-
-# Force kbuild to make empty built-in.o if necessary
-obj- += dummy.o
dtb-$(CONFIG_DTB_VOCORE2) += vocore2.dtb
obj-y += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
-
-# Force kbuild to make empty built-in.o if necessary
-obj- += dummy.o
dtb-$(CONFIG_FIT_IMAGE_FDT_XILFPGA) += nexys4ddr.dtb
obj-y += $(patsubst %.dtb, %.dtb.o, $(dtb-y))
-
-# Force kbuild to make empty built-in.o if necessary
-obj- += dummy.o
extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd);
-#define __HAVE_ARCH_PMD_WRITE
+#define pmd_write pmd_write
static inline int pmd_write(pmd_t pmd)
{
return !!(pmd_val(pmd) & _PAGE_WRITE);
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
int r = -EINTR;
- sigset_t sigsaved;
- if (vcpu->sigset_active)
- sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
+ kvm_sigset_activate(vcpu);
if (vcpu->mmio_needed) {
if (!vcpu->mmio_is_write)
local_irq_enable();
out:
- if (vcpu->sigset_active)
- sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+ kvm_sigset_deactivate(vcpu);
return r;
}
.write = pvc_scroll_proc_write,
};
-void pvc_proc_timerfunc(unsigned long data)
+void pvc_proc_timerfunc(struct timer_list *unused)
{
if (scroll_dir < 0)
pvc_move(DISPLAY|RIGHT);
if (proc_entry == NULL)
goto error;
- setup_timer(&timer, pvc_proc_timerfunc, 0UL);
+ timer_setup(&timer, pvc_proc_timerfunc, 0);
return 0;
error:
}
}
-static void scroll_display_message(unsigned long unused);
+static void scroll_display_message(struct timer_list *unused);
static DEFINE_TIMER(mips_scroll_timer, scroll_display_message);
-static void scroll_display_message(unsigned long unused)
+static void scroll_display_message(struct timer_list *unused)
{
mips_display_message(&display_string[display_count++]);
if (display_count == max_display_count)
#define PDC_CONS_POLL_DELAY (30 * HZ / 1000)
-static void pdc_console_poll(unsigned long unused);
+static void pdc_console_poll(struct timer_list *unused);
static DEFINE_TIMER(pdc_console_timer, pdc_console_poll);
static struct tty_port tty_port;
.chars_in_buffer = pdc_console_tty_chars_in_buffer,
};
-static void pdc_console_poll(unsigned long unused)
+static void pdc_console_poll(struct timer_list *unused)
{
int data, count = 0;
}
#endif /* CONFIG_NUMA_BALANCING */
-#define __HAVE_ARCH_PMD_WRITE
#define pmd_write(pmd) pte_write(pmd_pte(pmd))
#define __pmd_write(pmd) __pte_write(pmd_pte(pmd))
#define pmd_savedwrite(pmd) pte_savedwrite(pmd_pte(pmd))
#include <linux/io.h>
#include <asm/opal.h>
-/*
- * For static allocation of some of the structures.
- */
-#define IMC_MAX_PMUS 32
-
/*
* Compatibility macros for IMC devices
*/
extern int init_imc_pmu(struct device_node *parent,
struct imc_pmu *pmu_ptr, int pmu_id);
extern void thread_imc_disable(void);
+extern int get_max_nest_dev(void);
#endif /* __ASM_POWERPC_IMC_PMU_H */
struct iommu_group *grp);
extern int kvmppc_switch_mmu_to_hpt(struct kvm *kvm);
extern int kvmppc_switch_mmu_to_radix(struct kvm *kvm);
+extern void kvmppc_setup_partition_table(struct kvm *kvm);
extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
struct kvm_create_spapr_tce_64 *args);
*/
if ((version & 0xffffff00) == 0x004e0100)
cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD1;
- else if ((version & 0xffffefff) == 0x004e0200)
- cur_cpu_spec->cpu_features &= ~CPU_FTR_POWER9_DD2_1;
+ else if ((version & 0xffffefff) == 0x004e0201)
+ cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
}
static void __init cpufeatures_setup_finished(void)
local_irq_restore(flags);
}
-static void tau_timeout_smp(unsigned long unused)
+static void tau_timeout_smp(struct timer_list *unused)
{
/* schedule ourselves to be run again */
/* first, set up the window shrinking timer */
- setup_timer(&tau_timer, tau_timeout_smp, 0UL);
+ timer_setup(&tau_timer, tau_timeout_smp, 0);
tau_timer.expires = jiffies + shrink_timer;
add_timer(&tau_timer);
unsigned long vpte, rpte, guest_rpte;
int ret;
struct revmap_entry *rev;
- unsigned long apsize, psize, avpn, pteg, hash;
+ unsigned long apsize, avpn, pteg, hash;
unsigned long new_idx, new_pteg, replace_vpte;
+ int pshift;
hptep = (__be64 *)(old->virt + (idx << 4));
goto out;
rpte = be64_to_cpu(hptep[1]);
- psize = hpte_base_page_size(vpte, rpte);
- avpn = HPTE_V_AVPN_VAL(vpte) & ~((psize - 1) >> 23);
+ pshift = kvmppc_hpte_base_page_shift(vpte, rpte);
+ avpn = HPTE_V_AVPN_VAL(vpte) & ~(((1ul << pshift) - 1) >> 23);
pteg = idx / HPTES_PER_GROUP;
if (vpte & HPTE_V_SECONDARY)
pteg = ~pteg;
offset = (avpn & 0x1f) << 23;
vsid = avpn >> 5;
/* We can find more bits from the pteg value */
- if (psize < (1ULL << 23))
- offset |= ((vsid ^ pteg) & old_hash_mask) * psize;
+ if (pshift < 23)
+ offset |= ((vsid ^ pteg) & old_hash_mask) << pshift;
- hash = vsid ^ (offset / psize);
+ hash = vsid ^ (offset >> pshift);
} else {
unsigned long offset, vsid;
/* We only have 40 - 23 bits of seg_off in avpn */
offset = (avpn & 0x1ffff) << 23;
vsid = avpn >> 17;
- if (psize < (1ULL << 23))
- offset |= ((vsid ^ (vsid << 25) ^ pteg) & old_hash_mask) * psize;
+ if (pshift < 23)
+ offset |= ((vsid ^ (vsid << 25) ^ pteg) & old_hash_mask) << pshift;
- hash = vsid ^ (vsid << 25) ^ (offset / psize);
+ hash = vsid ^ (vsid << 25) ^ (offset >> pshift);
}
new_pteg = hash & new_hash_mask;
ssize_t nb;
long int err, ret;
int mmu_ready;
+ int pshift;
if (!access_ok(VERIFY_READ, buf, count))
return -EFAULT;
err = -EINVAL;
if (!(v & HPTE_V_VALID))
goto out;
+ pshift = kvmppc_hpte_base_page_shift(v, r);
+ if (pshift <= 0)
+ goto out;
lbuf += 2;
nb += HPTE_SIZE;
goto out;
}
if (!mmu_ready && is_vrma_hpte(v)) {
- unsigned long psize = hpte_base_page_size(v, r);
- unsigned long senc = slb_pgsize_encoding(psize);
- unsigned long lpcr;
+ unsigned long senc, lpcr;
+ senc = slb_pgsize_encoding(1ul << pshift);
kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
(VRMA_VSID << SLB_VSID_SHIFT_1T);
- lpcr = senc << (LPCR_VRMASD_SH - 4);
- kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD);
+ if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
+ lpcr = senc << (LPCR_VRMASD_SH - 4);
+ kvmppc_update_lpcr(kvm, lpcr,
+ LPCR_VRMASD);
+ } else {
+ kvmppc_setup_partition_table(kvm);
+ }
mmu_ready = 1;
}
++i;
static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
-static void kvmppc_setup_partition_table(struct kvm *kvm);
static inline struct kvm_vcpu *next_runnable_thread(struct kvmppc_vcore *vc,
int *ip)
return;
}
-static void kvmppc_setup_partition_table(struct kvm *kvm)
+void kvmppc_setup_partition_table(struct kvm *kvm)
{
unsigned long dw0, dw1;
spin_unlock_irqrestore(&vcpu->arch.wdt_lock, flags);
}
-void kvmppc_watchdog_func(unsigned long data)
+void kvmppc_watchdog_func(struct timer_list *t)
{
- struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
+ struct kvm_vcpu *vcpu = from_timer(vcpu, t, arch.wdt_timer);
u32 tsr, new_tsr;
int final;
{
/* setup watchdog timer once */
spin_lock_init(&vcpu->arch.wdt_lock);
- setup_timer(&vcpu->arch.wdt_timer, kvmppc_watchdog_func,
- (unsigned long)vcpu);
+ timer_setup(&vcpu->arch.wdt_timer, kvmppc_watchdog_func, 0);
/*
* Clear DBSR.MRR to avoid guest debug interrupt as
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
int r;
- sigset_t sigsaved;
if (vcpu->mmio_needed) {
vcpu->mmio_needed = 0;
#endif
}
- if (vcpu->sigset_active)
- sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
+ kvm_sigset_activate(vcpu);
if (run->immediate_exit)
r = -EINTR;
else
r = kvmppc_vcpu_run(run, vcpu);
- if (vcpu->sigset_active)
- sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+ kvm_sigset_deactivate(vcpu);
return r;
}
#include <asm/tlbflush.h>
#include <asm/page.h>
#include <asm/code-patching.h>
+#include <asm/setup.h>
static int __patch_instruction(unsigned int *addr, unsigned int instr)
{
* During early early boot patch_instruction is called
* when text_poke_area is not ready, but we still need
* to allow patching. We just do the plain old patching
- * We use slab_is_available and per cpu read * via this_cpu_read
- * of text_poke_area. Per-CPU areas might not be up early
- * this can create problems with just using this_cpu_read()
*/
- if (!slab_is_available() || !this_cpu_read(text_poke_area))
+ if (!this_cpu_read(*PTRRELOC(&text_poke_area)))
return __patch_instruction(addr, instr);
local_irq_save(flags);
return !slice_area_is_free(mm, start, end - start);
}
-static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret)
+static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret,
+ unsigned long high_limit)
{
unsigned long i;
if (!slice_low_has_vma(mm, i))
ret->low_slices |= 1u << i;
- if (mm->context.slb_addr_limit <= SLICE_LOW_TOP)
+ if (high_limit <= SLICE_LOW_TOP)
return;
- for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit); i++)
+ for (i = 0; i < GET_HIGH_SLICE_INDEX(high_limit); i++)
if (!slice_high_has_vma(mm, i))
__set_bit(i, ret->high_slices);
}
-static void slice_mask_for_size(struct mm_struct *mm, int psize, struct slice_mask *ret)
+static void slice_mask_for_size(struct mm_struct *mm, int psize, struct slice_mask *ret,
+ unsigned long high_limit)
{
unsigned char *hpsizes;
int index, mask_index;
if (((lpsizes >> (i * 4)) & 0xf) == psize)
ret->low_slices |= 1u << i;
+ if (high_limit <= SLICE_LOW_TOP)
+ return;
+
hpsizes = mm->context.high_slices_psize;
- for (i = 0; i < GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit); i++) {
+ for (i = 0; i < GET_HIGH_SLICE_INDEX(high_limit); i++) {
mask_index = i & 0x1;
index = i >> 1;
if (((hpsizes[index] >> (mask_index * 4)) & 0xf) == psize)
struct slice_mask mask, struct slice_mask available)
{
DECLARE_BITMAP(result, SLICE_NUM_HIGH);
+ /*
+ * Make sure we just do bit compare only to the max
+ * addr limit and not the full bit map size.
+ */
unsigned long slice_count = GET_HIGH_SLICE_INDEX(mm->context.slb_addr_limit);
bitmap_and(result, mask.high_slices,
/* First make up a "good" mask of slices that have the right size
* already
*/
- slice_mask_for_size(mm, psize, &good_mask);
+ slice_mask_for_size(mm, psize, &good_mask, high_limit);
slice_print_mask(" good_mask", good_mask);
/*
#ifdef CONFIG_PPC_64K_PAGES
/* If we support combo pages, we can allow 64k pages in 4k slices */
if (psize == MMU_PAGE_64K) {
- slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask);
+ slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask, high_limit);
if (fixed)
slice_or_mask(&good_mask, &compat_mask);
}
return newaddr;
}
}
-
- /* We don't fit in the good mask, check what other slices are
+ /*
+ * We don't fit in the good mask, check what other slices are
* empty and thus can be converted
*/
- slice_mask_for_free(mm, &potential_mask);
+ slice_mask_for_free(mm, &potential_mask, high_limit);
slice_or_mask(&potential_mask, &good_mask);
slice_print_mask(" potential", potential_mask);
{
struct slice_mask mask, available;
unsigned int psize = mm->context.user_psize;
+ unsigned long high_limit = mm->context.slb_addr_limit;
if (radix_enabled())
return 0;
slice_range_to_mask(addr, len, &mask);
- slice_mask_for_size(mm, psize, &available);
+ slice_mask_for_size(mm, psize, &available, high_limit);
#ifdef CONFIG_PPC_64K_PAGES
/* We need to account for 4k slices too */
if (psize == MMU_PAGE_64K) {
struct slice_mask compat_mask;
- slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask);
+ slice_mask_for_size(mm, MMU_PAGE_4K, &compat_mask, high_limit);
slice_or_mask(&available, &compat_mask);
}
#endif
* This routine will alternate loading the virtual counters for
* virtual CPUs
*/
-static void cell_virtual_cntr(unsigned long data)
+static void cell_virtual_cntr(struct timer_list *unused)
{
int i, prev_hdw_thread, next_hdw_thread;
u32 cpu;
static void start_virt_cntrs(void)
{
- setup_timer(&timer_virt_cntr, cell_virtual_cntr, 0UL);
+ timer_setup(&timer_virt_cntr, cell_virtual_cntr, 0);
timer_virt_cntr.expires = jiffies + HZ / 10;
add_timer(&timer_virt_cntr);
}
* periodically based on kernel timer to switch which SPU is
* being monitored in a round robbin fashion.
*/
-static void spu_evnt_swap(unsigned long data)
+static void spu_evnt_swap(struct timer_list *unused)
{
int node;
int cur_phys_spu, nxt_phys_spu, cur_spu_evnt_phys_spu_indx;
static void start_spu_event_swap(void)
{
- setup_timer(&timer_spu_event_swap, spu_evnt_swap, 0UL);
+ timer_setup(&timer_spu_event_swap, spu_evnt_swap, 0);
timer_spu_event_swap.expires = jiffies + HZ / 25;
add_timer(&timer_spu_event_swap);
}
*/
static DEFINE_MUTEX(nest_init_lock);
static DEFINE_PER_CPU(struct imc_pmu_ref *, local_nest_imc_refc);
-static struct imc_pmu *per_nest_pmu_arr[IMC_MAX_PMUS];
+static struct imc_pmu **per_nest_pmu_arr;
static cpumask_t nest_imc_cpumask;
struct imc_pmu_ref *nest_imc_refc;
static int nest_pmus;
static void nest_change_cpu_context(int old_cpu, int new_cpu)
{
struct imc_pmu **pn = per_nest_pmu_arr;
- int i;
if (old_cpu < 0 || new_cpu < 0)
return;
- for (i = 0; *pn && i < IMC_MAX_PMUS; i++, pn++)
+ while (*pn) {
perf_pmu_migrate_context(&(*pn)->pmu, old_cpu, new_cpu);
+ pn++;
+ }
}
static int ppc_nest_imc_cpu_offline(unsigned int cpu)
* Nest HW counter memory resides in a per-chip reserve-memory (HOMER).
* Get the base memory addresss for this cpu.
*/
- chip_id = topology_physical_package_id(event->cpu);
+ chip_id = cpu_to_chip_id(event->cpu);
pcni = pmu->mem_info;
do {
if (pcni->id == chip_id) {
*/
static int core_imc_mem_init(int cpu, int size)
{
- int phys_id, rc = 0, core_id = (cpu / threads_per_core);
+ int nid, rc = 0, core_id = (cpu / threads_per_core);
struct imc_mem_info *mem_info;
/*
* alloc_pages_node() will allocate memory for core in the
* local node only.
*/
- phys_id = topology_physical_package_id(cpu);
+ nid = cpu_to_node(cpu);
mem_info = &core_imc_pmu->mem_info[core_id];
mem_info->id = core_id;
/* We need only vbase for core counters */
- mem_info->vbase = page_address(alloc_pages_node(phys_id,
+ mem_info->vbase = page_address(alloc_pages_node(nid,
GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
__GFP_NOWARN, get_order(size)));
if (!mem_info->vbase)
static int thread_imc_mem_alloc(int cpu_id, int size)
{
u64 ldbar_value, *local_mem = per_cpu(thread_imc_mem, cpu_id);
- int phys_id = topology_physical_package_id(cpu_id);
+ int nid = cpu_to_node(cpu_id);
if (!local_mem) {
/*
* This case could happen only once at start, since we dont
* free the memory in cpu offline path.
*/
- local_mem = page_address(alloc_pages_node(phys_id,
+ local_mem = page_address(alloc_pages_node(nid,
GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE |
__GFP_NOWARN, get_order(size)));
if (!local_mem)
kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]->attrs);
kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]);
kfree(pmu_ptr);
+ kfree(per_nest_pmu_arr);
return;
}
return -ENOMEM;
/* Needed for hotplug/migration */
+ if (!per_nest_pmu_arr) {
+ per_nest_pmu_arr = kcalloc(get_max_nest_dev() + 1,
+ sizeof(struct imc_pmu *),
+ GFP_KERNEL);
+ if (!per_nest_pmu_arr)
+ return -ENOMEM;
+ }
per_nest_pmu_arr[pmu_index] = pmu_ptr;
break;
case IMC_DOMAIN_CORE:
CALC_LOAD(spu_avenrun[2], EXP_15, active_tasks);
}
-static void spusched_wake(unsigned long data)
+static void spusched_wake(struct timer_list *unused)
{
mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
wake_up_process(spusched_task);
}
-static void spuloadavg_wake(unsigned long data)
+static void spuloadavg_wake(struct timer_list *unused)
{
mod_timer(&spuloadavg_timer, jiffies + LOAD_FREQ);
spu_calc_load();
}
spin_lock_init(&spu_prio->runq_lock);
- setup_timer(&spusched_timer, spusched_wake, 0);
- setup_timer(&spuloadavg_timer, spuloadavg_wake, 0);
+ timer_setup(&spusched_timer, spusched_wake, 0);
+ timer_setup(&spuloadavg_timer, spuloadavg_wake, 0);
spusched_task = kthread_run(spusched_thread, NULL, "spusched");
if (IS_ERR(spusched_task)) {
return IRQ_HANDLED;
}
-static void kw_i2c_timeout(unsigned long data)
+static void kw_i2c_timeout(struct timer_list *t)
{
- struct pmac_i2c_host_kw *host = (struct pmac_i2c_host_kw *)data;
+ struct pmac_i2c_host_kw *host = from_timer(host, t, timeout_timer);
unsigned long flags;
spin_lock_irqsave(&host->lock, flags);
mutex_init(&host->mutex);
init_completion(&host->complete);
spin_lock_init(&host->lock);
- setup_timer(&host->timeout_timer, kw_i2c_timeout, (unsigned long)host);
+ timer_setup(&host->timeout_timer, kw_i2c_timeout, 0);
psteps = of_get_property(np, "AAPL,address-step", NULL);
steps = psteps ? (*psteps) : 0x10;
put_online_cpus();
}
+int get_max_nest_dev(void)
+{
+ struct device_node *node;
+ u32 pmu_units = 0, type;
+
+ for_each_compatible_node(node, NULL, IMC_DTB_UNIT_COMPAT) {
+ if (of_property_read_u32(node, "type", &type))
+ continue;
+
+ if (type == IMC_TYPE_CHIP)
+ pmu_units++;
+ }
+
+ return pmu_units;
+}
+
static int opal_imc_counters_probe(struct platform_device *pdev)
{
struct device_node *imc_dev = pdev->dev.of_node;
break;
}
- if (!imc_pmu_create(imc_dev, pmu_count, domain))
- pmu_count++;
+ if (!imc_pmu_create(imc_dev, pmu_count, domain)) {
+ if (domain == IMC_DOMAIN_NEST)
+ pmu_count++;
+ }
}
return 0;
}
return -1;
}
+EXPORT_SYMBOL(chip_to_vas_id);
static int vas_probe(struct platform_device *pdev)
{
+# SPDX-License-Identifier: GPL-2.0
#
# s390/Makefile
#
# for "archclean" and "archdep" for cleaning up and making dependencies for
# this architecture
#
-# This file is subject to the terms and conditions of the GNU General Public
-# License. See the file "COPYING" in the main directory of this archive
-# for more details.
-#
# Copyright (C) 1994 by Linus Torvalds
#
+// SPDX-License-Identifier: GPL-2.0
/*
* Base infrastructure for Linux-z/VM Monitor Stream, Stage 1.
* Exports appldata_register_ops() and appldata_unregister_ops() for the
+// SPDX-License-Identifier: GPL-2.0
/*
* Data gathering module for Linux-VM Monitor Stream, Stage 1.
* Collects data related to memory management.
+// SPDX-License-Identifier: GPL-2.0
/*
* Data gathering module for Linux-VM Monitor Stream, Stage 1.
* Collects accumulated network statistics (Packets received/transmitted,
+// SPDX-License-Identifier: GPL-2.0
/*
* Data gathering module for Linux-VM Monitor Stream, Stage 1.
* Collects misc. OS related data (CPU utilization, running processes).
#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
#
# arch/s390x/boot/install.sh
#
-# This file is subject to the terms and conditions of the GNU General Public
-# License. See the file "COPYING" in the main directory of this archive
-# for more details.
-#
# Copyright (C) 1995 by Linus Torvalds
#
# Adapted from code in arch/i386/boot/Makefile by H. Peter Anvin
+// SPDX-License-Identifier: GPL-2.0+
/*
* Cryptographic API.
*
* Harald Freudenberger <freude@de.ibm.com>
*
* Derived from "crypto/aes_generic.c"
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
*/
#define KMSG_COMPONENT "aes_s390"
+// SPDX-License-Identifier: GPL-2.0
/*
* s390 arch random implementation.
*
* Copyright IBM Corp. 2017
* Author(s): Harald Freudenberger <freude@de.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
*/
#include <linux/kernel.h>
+// SPDX-License-Identifier: GPL-2.0
/*
* Crypto-API module for CRC-32 algorithms implemented with the
* z/Architecture Vector Extension Facility.
+// SPDX-License-Identifier: GPL-2.0+
/*
* Cryptographic API.
*
* Copyright IBM Corp. 2003, 2011
* Author(s): Thomas Spatzier
* Jan Glauber (jan.glauber@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
*/
#include <linux/init.h>
+// SPDX-License-Identifier: GPL-2.0
/*
* Cryptographic API.
*
+// SPDX-License-Identifier: GPL-2.0
/*
* Cryptographic API.
*
* Copyright IBM Corp. 2017
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
* Harald Freudenberger <freude@de.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
*/
#define KMSG_COMPONENT "paes_s390"
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2006, 2015
* Author(s): Jan Glauber <jan.glauber@de.ibm.com>
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Cryptographic API.
*
*
* Copyright IBM Corp. 2007
* Author(s): Jan Glauber (jang@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
*/
#ifndef _CRYPTO_ARCH_S390_SHA_H
#define _CRYPTO_ARCH_S390_SHA_H
+// SPDX-License-Identifier: GPL-2.0+
/*
* Cryptographic API.
*
* s390 Version:
* Copyright IBM Corp. 2005, 2011
* Author(s): Jan Glauber (jang@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
*/
#include <crypto/internal/hash.h>
#include <linux/init.h>
+// SPDX-License-Identifier: GPL-2.0+
/*
* Cryptographic API.
*
*
* Copyright IBM Corp. 2007
* Author(s): Jan Glauber (jang@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
*/
#include <crypto/internal/hash.h>
#include <crypto/sha.h>
+// SPDX-License-Identifier: GPL-2.0+
/*
* Cryptographic API.
*
*
* Copyright IBM Corp. 2007
* Author(s): Jan Glauber (jang@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
*/
#include <crypto/internal/hash.h>
+// SPDX-License-Identifier: GPL-1.0+
/*
* Hypervisor filesystem for Linux on s390.
*
* Copyright IBM Corp. 2006, 2008
* Author(s): Michael Holzheu <holzheu@de.ibm.com>
- * License: GPL
*/
#define KMSG_COMPONENT "hypfs"
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* CPU-measurement facilities
*
* Copyright IBM Corp. 2012
* Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
* Jan Glauber <jang@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
*/
#ifndef _ASM_S390_CPU_MF_H
#define _ASM_S390_CPU_MF_H
#define CORE_DUMP_USE_REGSET
#define ELF_EXEC_PAGESIZE PAGE_SIZE
-/*
- * This is the base location for PIE (ET_DYN with INTERP) loads. On
- * 64-bit, this is raised to 4GB to leave the entire 32-bit address
- * space open for things that want to use the area for 32-bit pointers.
- */
-#define ELF_ET_DYN_BASE (is_compat_task() ? 0x000400000UL : \
- 0x100000000UL)
+/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
+ use of this is to invoke "./ld.so someprog" to test out a new version of
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. 64-bit
+ tasks are aligned to 4GB. */
+#define ELF_ET_DYN_BASE (is_compat_task() ? \
+ (STACK_TOP / 3 * 2) : \
+ (STACK_TOP / 3 * 2) & ~((1UL << 32) - 1))
/* This yields a mask that user programs can use to figure out what
instruction set this CPU supports. */
+/* SPDX-License-Identifier: GPL-2.0+ */
#ifndef _ASM_S390_KPROBES_H
#define _ASM_S390_KPROBES_H
/*
* Kernel Probes (KProbes)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
* Copyright IBM Corp. 2002, 2006
*
* 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* definition for kernel virtual machines on s390
*
* Copyright IBM Corp. 2008, 2009
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
* Author(s): Carsten Otte <cotte@de.ibm.com>
*/
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* definition for paravirtual devices on s390
*
* Copyright IBM Corp. 2008
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
* Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
*/
/*
*
* Copyright IBM Corp. 2007,2008
* Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2.
*/
#ifndef __S390_KVM_PARA_H
#define __S390_KVM_PARA_H
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* livepatch.h - s390-specific Kernel Live Patching Core
*
* Jiri Slaby
*/
-/*
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- */
-
#ifndef ASM_LIVEPATCH_H
#define ASM_LIVEPATCH_H
#ifdef CONFIG_PGSTE
mm->context.alloc_pgste = page_table_allocate_pgste ||
test_thread_flag(TIF_PGSTE) ||
- current->mm->context.alloc_pgste;
+ (current->mm && current->mm->context.alloc_pgste);
mm->context.has_pgste = 0;
mm->context.use_skey = 0;
mm->context.use_cmma = 0;
return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT;
}
-#define __HAVE_ARCH_PMD_WRITE
+#define pmd_write pmd_write
static inline int pmd_write(pmd_t pmd)
{
return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
return pud;
}
+#define pud_write pud_write
+static inline int pud_write(pud_t pud)
+{
+ return (pud_val(pud) & _REGION3_ENTRY_WRITE) != 0;
+}
+
static inline pud_t pud_mkclean(pud_t pud)
{
if (pud_large(pud)) {
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Access to user system call parameters and results
*
* Copyright IBM Corp. 2008
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
*/
#ifndef _ASM_SYSCALL_H
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* definition for store system information stsi
*
* Copyright IBM Corp. 2001, 2008
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
* Author(s): Ulrich Weigand <weigand@de.ibm.com>
* Christian Borntraeger <borntraeger@de.ibm.com>
*/
static inline void topology_init_early(void) { }
static inline void topology_schedule_update(void) { }
static inline int topology_cpu_init(struct cpu *cpu) { return 0; }
+static inline int topology_cpu_dedicated(int cpu_nr) { return 0; }
static inline void topology_expect_change(void) { }
#endif /* CONFIG_SCHED_TOPOLOGY */
*
* Copyright IBM Corp. 2008
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
* Author(s): Carsten Otte <cotte@de.ibm.com>
* Christian Borntraeger <borntraeger@de.ibm.com>
*/
*
* Copyright IBM Corp. 2008
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
* Author(s): Christian Borntraeger <borntraeger@de.ibm.com>
*/
*
* Copyright 2014 IBM Corp.
* Author(s): Alexander Yarygin <yarygin@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
*/
#ifndef __LINUX_KVM_PERF_S390_H
*
* Copyright IBM Corp. 2013
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
* Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
*/
#ifndef __KVM_VIRTIO_CCW_H
* Eric Rossman (edrossma@us.ibm.com)
*
* Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __ASM_S390_ZCRYPT_H
else
except_str = "-";
caller = (unsigned long) entry->caller;
- rc += sprintf(out_buf, "%02i %011ld:%06lu %1u %1s %02i %p ",
+ rc += sprintf(out_buf, "%02i %011ld:%06lu %1u %1s %02i %pK ",
area, sec, usec, level, except_str,
entry->id.fields.cpuid, (void *)caller);
return rc;
+// SPDX-License-Identifier: GPL-2.0
/*
* Disassemble s390 instructions.
*
unsigned char opfrag;
int i;
+ /* Search the opcode offset table to find an entry which
+ * matches the beginning of the opcode. If there is no match
+ * the last entry will be used, which is the default entry for
+ * unknown instructions as well as 1-byte opcode instructions.
+ */
for (i = 0; i < ARRAY_SIZE(opcode_offset); i++) {
entry = &opcode_offset[i];
- if (entry->opcode == code[0] || entry->opcode == 0)
+ if (entry->opcode == code[0])
break;
}
start += opsize;
pr_cont("%s", buffer);
ptr = buffer;
- ptr += sprintf(ptr, "\n\t ");
+ ptr += sprintf(ptr, "\n ");
hops++;
}
pr_cont("\n");
+// SPDX-License-Identifier: GPL-2.0
/*
* Stack dumping functions
*
*/
ENTRY(__switch_to)
stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
- lgr %r1,%r2
- aghi %r1,__TASK_thread # thread_struct of prev task
- lg %r5,__TASK_stack(%r3) # start of kernel stack of next
- stg %r15,__THREAD_ksp(%r1) # store kernel stack of prev
- lgr %r1,%r3
- aghi %r1,__TASK_thread # thread_struct of next task
+ lghi %r4,__TASK_stack
+ lghi %r1,__TASK_thread
+ lg %r5,0(%r4,%r3) # start of kernel stack of next
+ stg %r15,__THREAD_ksp(%r1,%r2) # store kernel stack of prev
lgr %r15,%r5
aghi %r15,STACK_INIT # end of kernel stack of next
stg %r3,__LC_CURRENT # store task struct of next
stg %r15,__LC_KERNEL_STACK # store end of kernel stack
- lg %r15,__THREAD_ksp(%r1) # load kernel stack of next
- mvc __LC_CURRENT_PID(4,%r0),__TASK_pid(%r3) # store pid of next
+ lg %r15,__THREAD_ksp(%r1,%r3) # load kernel stack of next
+ aghi %r3,__TASK_pid
+ mvc __LC_CURRENT_PID(4,%r0),0(%r3) # store pid of next
lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
TSTMSK __LC_MACHINE_FLAGS,MACHINE_FLAG_LPP
bzr %r14
+// SPDX-License-Identifier: GPL-2.0
/*
* ipl/reipl/dump support for Linux on s390.
*
+// SPDX-License-Identifier: GPL-2.0+
/*
* Kernel Probes (KProbes)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
* Copyright IBM Corp. 2002, 2006
*
* s390 port, used ppc64 as template. Mike Grundy <grundym@us.ibm.com>
+// SPDX-License-Identifier: GPL-2.0
/*
* Linux Guest Relocation (LGR) detection
*
+// SPDX-License-Identifier: GPL-2.0+
/*
* Kernel module help for s390.
*
*
* based on i386 version
* Copyright (C) 2001 Rusty Russell.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/module.h>
#include <linux/elf.h>
+// SPDX-License-Identifier: GPL-2.0
/*
* Machine check handler
*
+// SPDX-License-Identifier: GPL-2.0
/*
* Performance event support for s390x - CPU-measurement Counter Facility
*
* Copyright IBM Corp. 2012, 2017
* Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
*/
#define KMSG_COMPONENT "cpum_cf"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+// SPDX-License-Identifier: GPL-2.0
/*
* Performance event support for the System z CPU-measurement Sampling Facility
*
* Copyright IBM Corp. 2013
* Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
*/
#define KMSG_COMPONENT "cpum_sf"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+// SPDX-License-Identifier: GPL-2.0
/*
* Performance event support for s390x
*
* Copyright IBM Corp. 2012, 2013
* Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
*/
#define KMSG_COMPONENT "perf"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
.get = s390_gs_cb_get,
.set = s390_gs_cb_set,
},
+ {
+ .core_note_type = NT_S390_GS_BC,
+ .n = sizeof(struct gs_cb) / sizeof(__u64),
+ .size = sizeof(__u64),
+ .align = sizeof(__u64),
+ .get = s390_gs_bc_get,
+ .set = s390_gs_bc_set,
+ },
{
.core_note_type = NT_S390_RI_CB,
.n = sizeof(struct runtime_instr_cb) / sizeof(__u64),
+// SPDX-License-Identifier: GPL-2.0
/*
* S390 version
* Copyright IBM Corp. 1999, 2012
#include <asm/sigp.h>
#include <asm/idle.h>
#include <asm/nmi.h>
+#include <asm/topology.h>
#include "entry.h"
enum {
+// SPDX-License-Identifier: GPL-2.0
/*
* Stack trace management functions
*
+// SPDX-License-Identifier: GPL-2.0
/*
* store hypervisor information instruction emulation functions.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
* Copyright IBM Corp. 2016
* Author(s): Janosch Frank <frankja@linux.vnet.ibm.com>
*/
+// SPDX-License-Identifier: GPL-2.0
/*
* Time of day based timer functions.
*
}
}
-static void stp_timeout(unsigned long dummy)
+static void stp_timeout(struct timer_list *unused)
{
queue_work(time_sync_wq, &stp_work);
}
{
if (!test_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags))
return 0;
- setup_timer(&stp_timer, stp_timeout, 0UL);
+ timer_setup(&stp_timer, stp_timeout, 0);
time_init_wq();
if (!stp_online)
return 0;
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2007, 2011
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
+// SPDX-License-Identifier: GPL-2.0
/*
* vdso setup for s390
*
* Copyright IBM Corp. 2008
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
*/
#include <linux/init.h>
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Userland implementation of clock_getres() for 32 bits processes in a
* s390 kernel for use in the vDSO
*
* Copyright IBM Corp. 2008
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
*/
#include <asm/vdso.h>
#include <asm/asm-offsets.h>
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Userland implementation of clock_gettime() for 32 bits processes in a
* s390 kernel for use in the vDSO
*
* Copyright IBM Corp. 2008
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
*/
#include <asm/vdso.h>
#include <asm/asm-offsets.h>
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Userland implementation of gettimeofday() for 32 bits processes in a
* s390 kernel for use in the vDSO
*
* Copyright IBM Corp. 2008
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
*/
#include <asm/vdso.h>
#include <asm/asm-offsets.h>
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Userland implementation of clock_getres() for 64 bits processes in a
* s390 kernel for use in the vDSO
*
* Copyright IBM Corp. 2008
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
*/
#include <asm/vdso.h>
#include <asm/asm-offsets.h>
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Userland implementation of clock_gettime() for 64 bits processes in a
* s390 kernel for use in the vDSO
*
* Copyright IBM Corp. 2008
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
*/
#include <asm/vdso.h>
#include <asm/asm-offsets.h>
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Userland implementation of gettimeofday() for 64 bits processes in a
* s390 kernel for use in the vDSO
*
* Copyright IBM Corp. 2008
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
*/
#include <asm/vdso.h>
#include <asm/asm-offsets.h>
+// SPDX-License-Identifier: GPL-2.0
/*
* Virtual cpu timer based timer functions.
*
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
int rc;
- sigset_t sigsaved;
if (kvm_run->immediate_exit)
return -EINTR;
return 0;
}
- if (vcpu->sigset_active)
- sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
+ kvm_sigset_activate(vcpu);
if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
kvm_s390_vcpu_start(vcpu);
disable_cpu_timer_accounting(vcpu);
store_regs(vcpu, kvm_run);
- if (vcpu->sigset_active)
- sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+ kvm_sigset_deactivate(vcpu);
vcpu->stat.exit_userspace++;
return rc;
+// SPDX-License-Identifier: GPL-2.0
/*
* Collaborative memory management interface.
*
static struct task_struct *cmm_thread_ptr;
static DECLARE_WAIT_QUEUE_HEAD(cmm_thread_wait);
-static DEFINE_TIMER(cmm_timer, NULL);
-static void cmm_timer_fn(unsigned long);
+static void cmm_timer_fn(struct timer_list *);
static void cmm_set_timer(void);
+static DEFINE_TIMER(cmm_timer, cmm_timer_fn);
static long cmm_alloc_pages(long nr, long *counter,
struct cmm_page_array **list)
if (mod_timer(&cmm_timer, jiffies + cmm_timeout_seconds*HZ))
return;
}
- cmm_timer.function = cmm_timer_fn;
- cmm_timer.data = 0;
cmm_timer.expires = jiffies + cmm_timeout_seconds*HZ;
add_timer(&cmm_timer);
}
-static void cmm_timer_fn(unsigned long ignored)
+static void cmm_timer_fn(struct timer_list *unused)
{
long nr;
+// SPDX-License-Identifier: GPL-2.0
/*
* KVM guest address space mapping code
*
+// SPDX-License-Identifier: GPL-2.0+
/*
* flexible mmap layout support
*
* Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
* All Rights Reserved.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- *
* Started by Ingo Molnar <mingo@elte.hu>
*/
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2007, 2011
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2012
*
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2012,2015
*
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2012
*
+// SPDX-License-Identifier: GPL-2.0
/*
* s390 specific pci instructions
*
}
}
-static void heartbeat_timer(unsigned long data)
+static void heartbeat_timer(struct timer_list *t)
{
- struct heartbeat_data *hd = (struct heartbeat_data *)data;
+ struct heartbeat_data *hd = from_timer(hd, t, timer);
static unsigned bit = 0, up = 1;
heartbeat_toggle_bit(hd, bit, hd->flags & HEARTBEAT_INVERTED);
}
}
- setup_timer(&hd->timer, heartbeat_timer, (unsigned long)hd);
+ timer_setup(&hd->timer, heartbeat_timer, 0);
platform_set_drvdata(pdev, hd);
return mod_timer(&hd->timer, jiffies + 1);
return cap66 > 0;
}
-static void pcibios_enable_err(unsigned long __data)
+static void pcibios_enable_err(struct timer_list *t)
{
- struct pci_channel *hose = (struct pci_channel *)__data;
+ struct pci_channel *hose = from_timer(hose, t, err_timer);
del_timer(&hose->err_timer);
printk(KERN_DEBUG "PCI: re-enabling error IRQ.\n");
enable_irq(hose->err_irq);
}
-static void pcibios_enable_serr(unsigned long __data)
+static void pcibios_enable_serr(struct timer_list *t)
{
- struct pci_channel *hose = (struct pci_channel *)__data;
+ struct pci_channel *hose = from_timer(hose, t, serr_timer);
del_timer(&hose->serr_timer);
printk(KERN_DEBUG "PCI: re-enabling system error IRQ.\n");
void pcibios_enable_timers(struct pci_channel *hose)
{
if (hose->err_irq) {
- init_timer(&hose->err_timer);
- hose->err_timer.data = (unsigned long)hose;
- hose->err_timer.function = pcibios_enable_err;
+ timer_setup(&hose->err_timer, pcibios_enable_err, 0);
}
if (hose->serr_irq) {
- init_timer(&hose->serr_timer);
- hose->serr_timer.data = (unsigned long)hose;
- hose->serr_timer.function = pcibios_enable_serr;
+ timer_setup(&hose->serr_timer, pcibios_enable_serr, 0);
}
}
}
static DEVICE_ATTR(switch, S_IRUGO, switch_show, NULL);
-static void switch_timer(unsigned long data)
+static void switch_timer(struct timer_list *t)
{
- struct push_switch *psw = (struct push_switch *)data;
+ struct push_switch *psw = from_timer(psw, t, debounce);
schedule_work(&psw->work);
}
}
INIT_WORK(&psw->work, switch_work_handler);
- init_timer(&psw->debounce);
-
- psw->debounce.function = switch_timer;
- psw->debounce.data = (unsigned long)psw;
+ timer_setup(&psw->debounce, switch_timer, 0);
/* Workqueue API brain-damage */
psw->pdev = pdev;
return pte_pfn(pte);
}
-#define __HAVE_ARCH_PMD_WRITE
+#define pmd_write pmd_write
static inline unsigned long pmd_write(pmd_t pmd)
{
pte_t pte = __pte(pmd_val(pmd));
lib-$(CONFIG_SPARC64) += multi3.o
lib-$(CONFIG_SPARC64) += fls.o
lib-$(CONFIG_SPARC64) += fls64.o
-obj-$(CONFIG_SPARC64) += NG4fls.o
+lib-$(CONFIG_SPARC64) += NG4fls.o
lib-$(CONFIG_SPARC64) += copy_page.o clear_page.o bzero.o
lib-$(CONFIG_SPARC64) += csum_copy.o csum_copy_from_user.o csum_copy_to_user.o
if (!(pmd_val(pmd) & _PAGE_VALID))
return 0;
- if (write && !pmd_write(pmd))
+ if (!pmd_access_permitted(pmd, write))
return 0;
refs = 0;
if (!(pud_val(pud) & _PAGE_VALID))
return 0;
- if (write && !pud_write(pud))
+ if (!pud_access_permitted(pud, write))
return 0;
refs = 0;
#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
#define pmd_huge_page(pmd) pte_huge(pmd_pte(pmd))
#define pmd_mkhuge(pmd) pte_pmd(pte_mkhuge(pmd_pte(pmd)))
-#define __HAVE_ARCH_PMD_WRITE
#define pfn_pmd(pfn, pgprot) pte_pmd(pfn_pte((pfn), (pgprot)))
#define pmd_pfn(pmd) pte_pfn(pmd_pte(pmd))
If unsure, say Y.
config X86_INTEL_UMIP
- def_bool n
+ def_bool y
depends on CPU_SUP_INTEL
prompt "Intel User Mode Instruction Prevention" if EXPERT
---help---
The User Mode Instruction Prevention (UMIP) is a security
feature in newer Intel processors. If enabled, a general
- protection fault is issued if the instructions SGDT, SLDT,
- SIDT, SMSW and STR are executed in user mode.
+ protection fault is issued if the SGDT, SLDT, SIDT, SMSW
+ or STR instructions are executed in user mode. These instructions
+ unnecessarily expose information about the hardware state.
+
+ The vast majority of applications do not use these instructions.
+ For the very few that do, software emulation is provided in
+ specific cases in protected and virtual-8086 modes. Emulated
+ results are dummy.
config X86_INTEL_MPX
prompt "Intel MPX (Memory Protection Extensions)"
static void mem_avoid_memmap(char *str)
{
static int i;
- int rc;
if (i >= MAX_MEMMAP_REGIONS)
return;
return 0;
tmp_cmdline = malloc(len + 1);
- if (!tmp_cmdline )
+ if (!tmp_cmdline)
error("Failed to allocate space for tmp_cmdline");
memcpy(tmp_cmdline, args, len);
cmd_line |= boot_params->hdr.cmd_line_ptr;
/* Calculate size of cmd_line. */
ptr = (char *)(unsigned long)cmd_line;
- for (cmd_line_size = 0; ptr[cmd_line_size++]; )
+ for (cmd_line_size = 0; ptr[cmd_line_size++];)
;
mem_avoid[MEM_AVOID_CMDLINE].start = cmd_line;
mem_avoid[MEM_AVOID_CMDLINE].size = cmd_line_size;
END(native_usergs_sysret64)
#endif /* CONFIG_PARAVIRT */
-.macro TRACE_IRQS_IRETQ
+.macro TRACE_IRQS_FLAGS flags:req
#ifdef CONFIG_TRACE_IRQFLAGS
- bt $9, EFLAGS(%rsp) /* interrupts off? */
+ bt $9, \flags /* interrupts off? */
jnc 1f
TRACE_IRQS_ON
1:
#endif
.endm
+.macro TRACE_IRQS_IRETQ
+ TRACE_IRQS_FLAGS EFLAGS(%rsp)
+.endm
+
/*
* When dynamic function tracer is enabled it will add a breakpoint
* to all locations that it is about to modify, sync CPUs, update
movq %rsp, PER_CPU_VAR(rsp_scratch)
movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
- TRACE_IRQS_OFF
-
/* Construct struct pt_regs on stack */
pushq $__USER_DS /* pt_regs->ss */
pushq PER_CPU_VAR(rsp_scratch) /* pt_regs->sp */
sub $(6*8), %rsp /* pt_regs->bp, bx, r12-15 not saved */
UNWIND_HINT_REGS extra=0
+ TRACE_IRQS_OFF
+
/*
* If we need to do entry work or if we guess we'll need to do
* exit work, go straight to the slow path.
FRAME_BEGIN
pushfq
DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
+ TRACE_IRQS_OFF
SWAPGS
.Lgs_change:
movl %edi, %gs
2: ALTERNATIVE "", "mfence", X86_BUG_SWAPGS_FENCE
SWAPGS
+ TRACE_IRQS_FLAGS (%rsp)
popfq
FRAME_END
ret
EVENT_ATTR_STR(cycles-ct, cycles_ct, "event=0x3c,in_tx=1,in_tx_cp=1");
static struct attribute *hsw_events_attrs[] = {
+ EVENT_PTR(mem_ld_hsw),
+ EVENT_PTR(mem_st_hsw),
+ EVENT_PTR(td_slots_issued),
+ EVENT_PTR(td_slots_retired),
+ EVENT_PTR(td_fetch_bubbles),
+ EVENT_PTR(td_total_slots),
+ EVENT_PTR(td_total_slots_scale),
+ EVENT_PTR(td_recovery_bubbles),
+ EVENT_PTR(td_recovery_bubbles_scale),
+ NULL
+};
+
+static struct attribute *hsw_tsx_events_attrs[] = {
EVENT_PTR(tx_start),
EVENT_PTR(tx_commit),
EVENT_PTR(tx_abort),
EVENT_PTR(el_conflict),
EVENT_PTR(cycles_t),
EVENT_PTR(cycles_ct),
- EVENT_PTR(mem_ld_hsw),
- EVENT_PTR(mem_st_hsw),
- EVENT_PTR(td_slots_issued),
- EVENT_PTR(td_slots_retired),
- EVENT_PTR(td_fetch_bubbles),
- EVENT_PTR(td_total_slots),
- EVENT_PTR(td_total_slots_scale),
- EVENT_PTR(td_recovery_bubbles),
- EVENT_PTR(td_recovery_bubbles_scale),
NULL
};
+static __init struct attribute **get_hsw_events_attrs(void)
+{
+ return boot_cpu_has(X86_FEATURE_RTM) ?
+ merge_attr(hsw_events_attrs, hsw_tsx_events_attrs) :
+ hsw_events_attrs;
+}
+
static ssize_t freeze_on_smi_show(struct device *cdev,
struct device_attribute *attr,
char *buf)
x86_pmu.hw_config = hsw_hw_config;
x86_pmu.get_event_constraints = hsw_get_event_constraints;
- x86_pmu.cpu_events = hsw_events_attrs;
+ x86_pmu.cpu_events = get_hsw_events_attrs();
x86_pmu.lbr_double_abort = true;
extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
hsw_format_attr : nhm_format_attr;
x86_pmu.hw_config = hsw_hw_config;
x86_pmu.get_event_constraints = hsw_get_event_constraints;
- x86_pmu.cpu_events = hsw_events_attrs;
+ x86_pmu.cpu_events = get_hsw_events_attrs();
x86_pmu.limit_period = bdw_limit_period;
extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
hsw_format_attr : nhm_format_attr;
extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
hsw_format_attr : nhm_format_attr;
extra_attr = merge_attr(extra_attr, skl_format_attr);
- x86_pmu.cpu_events = hsw_events_attrs;
+ x86_pmu.cpu_events = get_hsw_events_attrs();
intel_pmu_pebs_data_source_skl(
boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X);
pr_cont("Skylake events, ");
int i, phys_id, pkg;
phys_id = uncore_pcibus_to_physid(pdev->bus);
- pkg = topology_phys_to_logical_pkg(phys_id);
box = pci_get_drvdata(pdev);
if (!box) {
+ pkg = topology_phys_to_logical_pkg(phys_id);
for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) {
if (uncore_extra_pci_dev[pkg].dev[i] == pdev) {
uncore_extra_pci_dev[pkg].dev[i] = NULL;
return;
pci_set_drvdata(pdev, NULL);
- pmu->boxes[pkg] = NULL;
+ pmu->boxes[box->pkgid] = NULL;
if (atomic_dec_return(&pmu->activeboxes) == 0)
uncore_pmu_unregister(pmu);
uncore_box_exit(box);
struct intel_uncore_box {
int pci_phys_id;
- int pkgid;
+ int pkgid; /* Logical package ID */
int n_active; /* number of active events */
int n_events;
int cpu; /* cpu to collect events */
if (reg1->idx != EXTRA_REG_NONE) {
int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER;
- int pkg = topology_phys_to_logical_pkg(box->pci_phys_id);
+ int pkg = box->pkgid;
struct pci_dev *filter_pdev = uncore_extra_pci_dev[pkg].dev[idx];
if (filter_pdev) {
NULL,
};
+/* Bit 7 'Use Occupancy' is not available for counter 0 on BDX */
+static struct event_constraint bdx_uncore_pcu_constraints[] = {
+ EVENT_CONSTRAINT(0x80, 0xe, 0x80),
+ EVENT_CONSTRAINT_END
+};
+
void bdx_uncore_cpu_init(void)
{
if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
uncore_msr_uncores = bdx_msr_uncores;
+
+ hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints;
}
static struct intel_uncore_type bdx_uncore_ha = {
extern unsigned long task_size_32bit(void);
extern unsigned long task_size_64bit(int full_addr_space);
extern unsigned long get_mmap_base(int is_legacy);
+extern bool mmap_address_hint_valid(unsigned long addr, unsigned long len);
#ifdef CONFIG_X86_32
void *dmar_data;
};
#endif
-#ifdef CONFIG_HT_IRQ
- struct {
- int ht_pos;
- int ht_idx;
- struct pci_dev *ht_dev;
- void *ht_update;
- };
-#endif
#ifdef CONFIG_X86_UV
struct {
int uv_limit;
+++ /dev/null
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_X86_HYPERTRANSPORT_H
-#define _ASM_X86_HYPERTRANSPORT_H
-
-/*
- * Constants for x86 Hypertransport Interrupts.
- */
-
-#define HT_IRQ_LOW_BASE 0xf8000000
-
-#define HT_IRQ_LOW_VECTOR_SHIFT 16
-#define HT_IRQ_LOW_VECTOR_MASK 0x00ff0000
-#define HT_IRQ_LOW_VECTOR(v) \
- (((v) << HT_IRQ_LOW_VECTOR_SHIFT) & HT_IRQ_LOW_VECTOR_MASK)
-
-#define HT_IRQ_LOW_DEST_ID_SHIFT 8
-#define HT_IRQ_LOW_DEST_ID_MASK 0x0000ff00
-#define HT_IRQ_LOW_DEST_ID(v) \
- (((v) << HT_IRQ_LOW_DEST_ID_SHIFT) & HT_IRQ_LOW_DEST_ID_MASK)
-
-#define HT_IRQ_LOW_DM_PHYSICAL 0x0000000
-#define HT_IRQ_LOW_DM_LOGICAL 0x0000040
-
-#define HT_IRQ_LOW_RQEOI_EDGE 0x0000000
-#define HT_IRQ_LOW_RQEOI_LEVEL 0x0000020
-
-
-#define HT_IRQ_LOW_MT_FIXED 0x0000000
-#define HT_IRQ_LOW_MT_ARBITRATED 0x0000004
-#define HT_IRQ_LOW_MT_SMI 0x0000008
-#define HT_IRQ_LOW_MT_NMI 0x000000c
-#define HT_IRQ_LOW_MT_INIT 0x0000010
-#define HT_IRQ_LOW_MT_STARTUP 0x0000014
-#define HT_IRQ_LOW_MT_EXTINT 0x0000018
-#define HT_IRQ_LOW_MT_LINT1 0x000008c
-#define HT_IRQ_LOW_MT_LINT0 0x0000098
-
-#define HT_IRQ_LOW_IRQ_MASKED 0x0000001
-
-
-#define HT_IRQ_HIGH_DEST_ID_SHIFT 0
-#define HT_IRQ_HIGH_DEST_ID_MASK 0x00ffffff
-#define HT_IRQ_HIGH_DEST_ID(v) \
- ((((v) >> 8) << HT_IRQ_HIGH_DEST_ID_SHIFT) & HT_IRQ_HIGH_DEST_ID_MASK)
-
-#endif /* _ASM_X86_HYPERTRANSPORT_H */
void __user *insn_get_addr_ref(struct insn *insn, struct pt_regs *regs);
int insn_get_modrm_rm_off(struct insn *insn, struct pt_regs *regs);
unsigned long insn_get_seg_base(struct pt_regs *regs, int seg_reg_idx);
-char insn_get_code_seg_params(struct pt_regs *regs);
+int insn_get_code_seg_params(struct pt_regs *regs);
#endif /* _ASM_X86_INSN_EVAL_H */
#endif
+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
+extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
+extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
+
/**
* virt_to_phys - map virtual addresses to physical
* @address: address to remap
static inline void arch_init_msi_domain(struct irq_domain *domain) { }
#endif
-#ifdef CONFIG_HT_IRQ
-extern void arch_init_htirq_domain(struct irq_domain *domain);
-#else
-static inline void arch_init_htirq_domain(struct irq_domain *domain) { }
-#endif
-
#endif
static inline int emulate_instruction(struct kvm_vcpu *vcpu,
int emulation_type)
{
- return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0);
+ return x86_emulate_instruction(vcpu, 0,
+ emulation_type | EMULTYPE_NO_REEXECUTE, NULL, 0);
}
void kvm_enable_efer_bits(u64);
unsigned long address, pmd_t *pmdp);
-#define __HAVE_ARCH_PMD_WRITE
+#define pmd_write pmd_write
static inline int pmd_write(pmd_t pmd)
{
return pmd_flags(pmd) & _PAGE_RW;
clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
}
+#define pud_write pud_write
+static inline int pud_write(pud_t pud)
+{
+ return pud_flags(pud) & _PAGE_RW;
+}
+
/*
* clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
*
/* Index into per_cpu list: */
u16 cpu_index;
u32 microcode;
+ unsigned initialized : 1;
} __randomize_layout;
struct cpuid_regs {
#ifdef CONFIG_X86_IO_APIC
#define MP_ISA_BUS 0
+static int __init mp_register_ioapic_irq(u8 bus_irq, u8 polarity,
+ u8 trigger, u32 gsi);
+
static void __init mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger,
u32 gsi)
{
- int ioapic;
- int pin;
- struct mpc_intsrc mp_irq;
-
/*
* Check bus_irq boundary.
*/
return;
}
- /*
- * Convert 'gsi' to 'ioapic.pin'.
- */
- ioapic = mp_find_ioapic(gsi);
- if (ioapic < 0)
- return;
- pin = mp_find_ioapic_pin(ioapic, gsi);
-
/*
* TBD: This check is for faulty timer entries, where the override
* erroneously sets the trigger to level, resulting in a HUGE
if ((bus_irq == 0) && (trigger == 3))
trigger = 1;
- mp_irq.type = MP_INTSRC;
- mp_irq.irqtype = mp_INT;
- mp_irq.irqflag = (trigger << 2) | polarity;
- mp_irq.srcbus = MP_ISA_BUS;
- mp_irq.srcbusirq = bus_irq; /* IRQ */
- mp_irq.dstapic = mpc_ioapic_id(ioapic); /* APIC ID */
- mp_irq.dstirq = pin; /* INTIN# */
-
- mp_save_irq(&mp_irq);
-
+ if (mp_register_ioapic_irq(bus_irq, polarity, trigger, gsi) < 0)
+ return;
/*
* Reset default identity mapping if gsi is also an legacy IRQ,
* otherwise there will be more than one entry with the same GSI
return 0;
}
+static int __init mp_register_ioapic_irq(u8 bus_irq, u8 polarity,
+ u8 trigger, u32 gsi)
+{
+ struct mpc_intsrc mp_irq;
+ int ioapic, pin;
+
+ /* Convert 'gsi' to 'ioapic.pin'(INTIN#) */
+ ioapic = mp_find_ioapic(gsi);
+ if (ioapic < 0) {
+ pr_warn("Failed to find ioapic for gsi : %u\n", gsi);
+ return ioapic;
+ }
+
+ pin = mp_find_ioapic_pin(ioapic, gsi);
+
+ mp_irq.type = MP_INTSRC;
+ mp_irq.irqtype = mp_INT;
+ mp_irq.irqflag = (trigger << 2) | polarity;
+ mp_irq.srcbus = MP_ISA_BUS;
+ mp_irq.srcbusirq = bus_irq;
+ mp_irq.dstapic = mpc_ioapic_id(ioapic);
+ mp_irq.dstirq = pin;
+
+ mp_save_irq(&mp_irq);
+
+ return 0;
+}
+
static int __init
acpi_parse_ioapic(struct acpi_subtable_header * header, const unsigned long end)
{
if (acpi_sci_flags & ACPI_MADT_POLARITY_MASK)
polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK;
- mp_override_legacy_irq(bus_irq, polarity, trigger, gsi);
+ if (bus_irq < NR_IRQS_LEGACY)
+ mp_override_legacy_irq(bus_irq, polarity, trigger, gsi);
+ else
+ mp_register_ioapic_irq(bus_irq, polarity, trigger, gsi);
+
acpi_penalize_sci_irq(bus_irq, trigger, polarity);
/*
obj-$(CONFIG_X86_IO_APIC) += io_apic.o
obj-$(CONFIG_PCI_MSI) += msi.o
-obj-$(CONFIG_HT_IRQ) += htirq.o
obj-$(CONFIG_SMP) += ipi.o
ifeq ($(CONFIG_X86_64),y)
+++ /dev/null
-/*
- * Support Hypertransport IRQ
- *
- * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
- * Moved from arch/x86/kernel/apic/io_apic.c.
- * Jiang Liu <jiang.liu@linux.intel.com>
- * Add support of hierarchical irqdomain
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/mm.h>
-#include <linux/interrupt.h>
-#include <linux/init.h>
-#include <linux/device.h>
-#include <linux/pci.h>
-#include <linux/htirq.h>
-#include <asm/irqdomain.h>
-#include <asm/hw_irq.h>
-#include <asm/apic.h>
-#include <asm/hypertransport.h>
-
-static struct irq_domain *htirq_domain;
-
-/*
- * Hypertransport interrupt support
- */
-static int
-ht_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force)
-{
- struct irq_data *parent = data->parent_data;
- int ret;
-
- ret = parent->chip->irq_set_affinity(parent, mask, force);
- if (ret >= 0) {
- struct ht_irq_msg msg;
- struct irq_cfg *cfg = irqd_cfg(data);
-
- fetch_ht_irq_msg(data->irq, &msg);
- msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK |
- HT_IRQ_LOW_DEST_ID_MASK);
- msg.address_lo |= HT_IRQ_LOW_VECTOR(cfg->vector) |
- HT_IRQ_LOW_DEST_ID(cfg->dest_apicid);
- msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
- msg.address_hi |= HT_IRQ_HIGH_DEST_ID(cfg->dest_apicid);
- write_ht_irq_msg(data->irq, &msg);
- }
-
- return ret;
-}
-
-static struct irq_chip ht_irq_chip = {
- .name = "PCI-HT",
- .irq_mask = mask_ht_irq,
- .irq_unmask = unmask_ht_irq,
- .irq_ack = irq_chip_ack_parent,
- .irq_set_affinity = ht_set_affinity,
- .irq_retrigger = irq_chip_retrigger_hierarchy,
- .flags = IRQCHIP_SKIP_SET_WAKE,
-};
-
-static int htirq_domain_alloc(struct irq_domain *domain, unsigned int virq,
- unsigned int nr_irqs, void *arg)
-{
- struct ht_irq_cfg *ht_cfg;
- struct irq_alloc_info *info = arg;
- struct pci_dev *dev;
- irq_hw_number_t hwirq;
- int ret;
-
- if (nr_irqs > 1 || !info)
- return -EINVAL;
-
- dev = info->ht_dev;
- hwirq = (info->ht_idx & 0xFF) |
- PCI_DEVID(dev->bus->number, dev->devfn) << 8 |
- (pci_domain_nr(dev->bus) & 0xFFFFFFFF) << 24;
- if (irq_find_mapping(domain, hwirq) > 0)
- return -EEXIST;
-
- ht_cfg = kmalloc(sizeof(*ht_cfg), GFP_KERNEL);
- if (!ht_cfg)
- return -ENOMEM;
-
- ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, info);
- if (ret < 0) {
- kfree(ht_cfg);
- return ret;
- }
-
- /* Initialize msg to a value that will never match the first write. */
- ht_cfg->msg.address_lo = 0xffffffff;
- ht_cfg->msg.address_hi = 0xffffffff;
- ht_cfg->dev = info->ht_dev;
- ht_cfg->update = info->ht_update;
- ht_cfg->pos = info->ht_pos;
- ht_cfg->idx = 0x10 + (info->ht_idx * 2);
- irq_domain_set_info(domain, virq, hwirq, &ht_irq_chip, ht_cfg,
- handle_edge_irq, ht_cfg, "edge");
-
- return 0;
-}
-
-static void htirq_domain_free(struct irq_domain *domain, unsigned int virq,
- unsigned int nr_irqs)
-{
- struct irq_data *irq_data = irq_domain_get_irq_data(domain, virq);
-
- BUG_ON(nr_irqs != 1);
- kfree(irq_data->chip_data);
- irq_domain_free_irqs_top(domain, virq, nr_irqs);
-}
-
-static int htirq_domain_activate(struct irq_domain *domain,
- struct irq_data *irq_data, bool early)
-{
- struct ht_irq_msg msg;
- struct irq_cfg *cfg = irqd_cfg(irq_data);
-
- msg.address_hi = HT_IRQ_HIGH_DEST_ID(cfg->dest_apicid);
- msg.address_lo =
- HT_IRQ_LOW_BASE |
- HT_IRQ_LOW_DEST_ID(cfg->dest_apicid) |
- HT_IRQ_LOW_VECTOR(cfg->vector) |
- ((apic->irq_dest_mode == 0) ?
- HT_IRQ_LOW_DM_PHYSICAL :
- HT_IRQ_LOW_DM_LOGICAL) |
- HT_IRQ_LOW_RQEOI_EDGE |
- ((apic->irq_delivery_mode != dest_LowestPrio) ?
- HT_IRQ_LOW_MT_FIXED :
- HT_IRQ_LOW_MT_ARBITRATED) |
- HT_IRQ_LOW_IRQ_MASKED;
- write_ht_irq_msg(irq_data->irq, &msg);
- return 0;
-}
-
-static void htirq_domain_deactivate(struct irq_domain *domain,
- struct irq_data *irq_data)
-{
- struct ht_irq_msg msg;
-
- memset(&msg, 0, sizeof(msg));
- write_ht_irq_msg(irq_data->irq, &msg);
-}
-
-static const struct irq_domain_ops htirq_domain_ops = {
- .alloc = htirq_domain_alloc,
- .free = htirq_domain_free,
- .activate = htirq_domain_activate,
- .deactivate = htirq_domain_deactivate,
-};
-
-void __init arch_init_htirq_domain(struct irq_domain *parent)
-{
- struct fwnode_handle *fn;
-
- if (disable_apic)
- return;
-
- fn = irq_domain_alloc_named_fwnode("PCI-HT");
- if (!fn)
- goto warn;
-
- htirq_domain = irq_domain_create_tree(fn, &htirq_domain_ops, NULL);
- irq_domain_free_fwnode(fn);
- if (!htirq_domain)
- goto warn;
-
- htirq_domain->parent = parent;
- return;
-
-warn:
- pr_warn("Failed to initialize irqdomain for HTIRQ.\n");
-}
-
-int arch_setup_ht_irq(int idx, int pos, struct pci_dev *dev,
- ht_irq_update_t *update)
-{
- struct irq_alloc_info info;
-
- if (!htirq_domain)
- return -ENOSYS;
-
- init_irq_alloc_info(&info, NULL);
- info.ht_idx = idx;
- info.ht_pos = pos;
- info.ht_dev = dev;
- info.ht_update = update;
-
- return irq_domain_alloc_irqs(htirq_domain, 1, dev_to_node(&dev->dev),
- &info);
-}
-
-void arch_teardown_ht_irq(unsigned int irq)
-{
- irq_domain_free_irqs(irq, 1);
-}
/*
- * Local APIC related interfaces to support IOAPIC, MSI, HT_IRQ etc.
+ * Local APIC related interfaces to support IOAPIC, MSI, etc.
*
* Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
* Moved from arch/x86/kernel/apic/io_apic.c.
nr_irqs = NR_VECTORS * nr_cpu_ids;
nr = (gsi_top + nr_legacy_irqs()) + 8 * nr_cpu_ids;
-#if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ)
+#if defined(CONFIG_PCI_MSI)
/*
* for MSI and HT dyn irq
*/
irq_set_default_host(x86_vector_domain);
arch_init_msi_domain(x86_vector_domain);
- arch_init_htirq_domain(x86_vector_domain);
BUG_ON(!alloc_cpumask_var(&vector_searchmask, GFP_KERNEL));
cr4_set_bits(X86_CR4_UMIP);
+ pr_info("x86/cpu: Activated the Intel User Mode Instruction Prevention (UMIP) CPU feature\n");
+
return;
out:
}
static unsigned long mpf_base;
+static bool mpf_found;
static unsigned long __init get_mpc_size(unsigned long physptr)
{
if (!smp_found_config)
return;
- if (!mpf_base)
+ if (!mpf_found)
return;
if (acpi_lapic && early)
smp_found_config = 1;
#endif
mpf_base = base;
+ mpf_found = true;
pr_info("found SMP MP-table at [mem %#010lx-%#010lx] mapped at [%p]\n",
base, base + sizeof(*mpf) - 1, mpf);
if (!enable_update_mptable)
return 0;
- if (!mpf_base)
+ if (!mpf_found)
return 0;
mpf = early_memremap(mpf_base, sizeof(*mpf));
EXPORT_PER_CPU_SYMBOL(cpu_info);
/* Logical package management. We might want to allocate that dynamically */
-static int *physical_to_logical_pkg __read_mostly;
-static unsigned long *physical_package_map __read_mostly;;
-static unsigned int max_physical_pkg_id __read_mostly;
unsigned int __max_logical_packages __read_mostly;
EXPORT_SYMBOL(__max_logical_packages);
static unsigned int logical_packages __read_mostly;
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
}
+/**
+ * topology_phys_to_logical_pkg - Map a physical package id to a logical
+ *
+ * Returns logical package id or -1 if not found
+ */
+int topology_phys_to_logical_pkg(unsigned int phys_pkg)
+{
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
+
+ if (c->initialized && c->phys_proc_id == phys_pkg)
+ return c->logical_proc_id;
+ }
+ return -1;
+}
+EXPORT_SYMBOL(topology_phys_to_logical_pkg);
+
/**
* topology_update_package_map - Update the physical to logical package map
* @pkg: The physical package id as retrieved via CPUID
*/
int topology_update_package_map(unsigned int pkg, unsigned int cpu)
{
- unsigned int new;
+ int new;
- /* Called from early boot ? */
- if (!physical_package_map)
- return 0;
-
- if (pkg >= max_physical_pkg_id)
- return -EINVAL;
-
- /* Set the logical package id */
- if (test_and_set_bit(pkg, physical_package_map))
+ /* Already available somewhere? */
+ new = topology_phys_to_logical_pkg(pkg);
+ if (new >= 0)
goto found;
- if (logical_packages >= __max_logical_packages) {
- pr_warn("Package %u of CPU %u exceeds BIOS package data %u.\n",
- logical_packages, cpu, __max_logical_packages);
- return -ENOSPC;
- }
-
new = logical_packages++;
if (new != pkg) {
pr_info("CPU %u Converting physical %u to logical package %u\n",
cpu, pkg, new);
}
- physical_to_logical_pkg[pkg] = new;
-
found:
- cpu_data(cpu).logical_proc_id = physical_to_logical_pkg[pkg];
+ cpu_data(cpu).logical_proc_id = new;
return 0;
}
-/**
- * topology_phys_to_logical_pkg - Map a physical package id to a logical
- *
- * Returns logical package id or -1 if not found
- */
-int topology_phys_to_logical_pkg(unsigned int phys_pkg)
-{
- if (phys_pkg >= max_physical_pkg_id)
- return -1;
- return physical_to_logical_pkg[phys_pkg];
-}
-EXPORT_SYMBOL(topology_phys_to_logical_pkg);
-
-static void __init smp_init_package_map(struct cpuinfo_x86 *c, unsigned int cpu)
-{
- unsigned int ncpus;
- size_t size;
-
- /*
- * Today neither Intel nor AMD support heterogenous systems. That
- * might change in the future....
- *
- * While ideally we'd want '* smp_num_siblings' in the below @ncpus
- * computation, this won't actually work since some Intel BIOSes
- * report inconsistent HT data when they disable HT.
- *
- * In particular, they reduce the APIC-IDs to only include the cores,
- * but leave the CPUID topology to say there are (2) siblings.
- * This means we don't know how many threads there will be until
- * after the APIC enumeration.
- *
- * By not including this we'll sometimes over-estimate the number of
- * logical packages by the amount of !present siblings, but this is
- * still better than MAX_LOCAL_APIC.
- *
- * We use total_cpus not nr_cpu_ids because nr_cpu_ids can be limited
- * on the command line leading to a similar issue as the HT disable
- * problem because the hyperthreads are usually enumerated after the
- * primary cores.
- */
- ncpus = boot_cpu_data.x86_max_cores;
- if (!ncpus) {
- pr_warn("x86_max_cores == zero !?!?");
- ncpus = 1;
- }
-
- __max_logical_packages = DIV_ROUND_UP(total_cpus, ncpus);
- logical_packages = 0;
-
- /*
- * Possibly larger than what we need as the number of apic ids per
- * package can be smaller than the actual used apic ids.
- */
- max_physical_pkg_id = DIV_ROUND_UP(MAX_LOCAL_APIC, ncpus);
- size = max_physical_pkg_id * sizeof(unsigned int);
- physical_to_logical_pkg = kmalloc(size, GFP_KERNEL);
- memset(physical_to_logical_pkg, 0xff, size);
- size = BITS_TO_LONGS(max_physical_pkg_id) * sizeof(unsigned long);
- physical_package_map = kzalloc(size, GFP_KERNEL);
-
- pr_info("Max logical packages: %u\n", __max_logical_packages);
-
- topology_update_package_map(c->phys_proc_id, cpu);
-}
-
void __init smp_store_boot_cpu_info(void)
{
int id = 0; /* CPU 0 */
*c = boot_cpu_data;
c->cpu_index = id;
- smp_init_package_map(c, id);
+ topology_update_package_map(c->phys_proc_id, id);
+ c->initialized = true;
}
/*
{
struct cpuinfo_x86 *c = &cpu_data(id);
- *c = boot_cpu_data;
+ /* Copy boot_cpu_data only on the first bringup */
+ if (!c->initialized)
+ *c = boot_cpu_data;
c->cpu_index = id;
/*
* During boot time, CPU0 has this setup already. Save the info when
* bringing up AP or offlined CPU0.
*/
identify_secondary_cpu(c);
+ c->initialized = true;
}
static bool
void __init native_smp_cpus_done(unsigned int max_cpus)
{
+ int ncpus;
+
pr_debug("Boot done\n");
+ /*
+ * Today neither Intel nor AMD support heterogenous systems so
+ * extrapolate the boot cpu's data to all packages.
+ */
+ ncpus = cpu_data(0).booted_cores * smp_num_siblings;
+ __max_logical_packages = DIV_ROUND_UP(nr_cpu_ids, ncpus);
+ pr_info("Max logical packages: %u\n", __max_logical_packages);
if (x86_has_numa_in_package)
set_sched_topology(x86_numa_in_package_topology);
if (len > TASK_SIZE)
return -ENOMEM;
+ /* No address checking. See comment at mmap_address_hint_valid() */
if (flags & MAP_FIXED)
return addr;
/* requesting a specific address */
if (addr) {
- addr = PAGE_ALIGN(addr);
+ addr &= PAGE_MASK;
+ if (!mmap_address_hint_valid(addr, len))
+ goto get_unmapped_area;
+
vma = find_vma(mm, addr);
- if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vm_start_gap(vma)))
+ if (!vma || addr + len <= vm_start_gap(vma))
return addr;
}
+get_unmapped_area:
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.length = len;
#define UMIP_INST_SGDT 0 /* 0F 01 /0 */
#define UMIP_INST_SIDT 1 /* 0F 01 /1 */
-#define UMIP_INST_SMSW 3 /* 0F 01 /4 */
+#define UMIP_INST_SMSW 2 /* 0F 01 /4 */
+#define UMIP_INST_SLDT 3 /* 0F 00 /0 */
+#define UMIP_INST_STR 4 /* 0F 00 /1 */
+
+const char * const umip_insns[5] = {
+ [UMIP_INST_SGDT] = "SGDT",
+ [UMIP_INST_SIDT] = "SIDT",
+ [UMIP_INST_SMSW] = "SMSW",
+ [UMIP_INST_SLDT] = "SLDT",
+ [UMIP_INST_STR] = "STR",
+};
+
+#define umip_pr_err(regs, fmt, ...) \
+ umip_printk(regs, KERN_ERR, fmt, ##__VA_ARGS__)
+#define umip_pr_warning(regs, fmt, ...) \
+ umip_printk(regs, KERN_WARNING, fmt, ##__VA_ARGS__)
+
+/**
+ * umip_printk() - Print a rate-limited message
+ * @regs: Register set with the context in which the warning is printed
+ * @log_level: Kernel log level to print the message
+ * @fmt: The text string to print
+ *
+ * Print the text contained in @fmt. The print rate is limited to bursts of 5
+ * messages every two minutes. The purpose of this customized version of
+ * printk() is to print messages when user space processes use any of the
+ * UMIP-protected instructions. Thus, the printed text is prepended with the
+ * task name and process ID number of the current task as well as the
+ * instruction and stack pointers in @regs as seen when entering kernel mode.
+ *
+ * Returns:
+ *
+ * None.
+ */
+static __printf(3, 4)
+void umip_printk(const struct pt_regs *regs, const char *log_level,
+ const char *fmt, ...)
+{
+ /* Bursts of 5 messages every two minutes */
+ static DEFINE_RATELIMIT_STATE(ratelimit, 2 * 60 * HZ, 5);
+ struct task_struct *tsk = current;
+ struct va_format vaf;
+ va_list args;
+
+ if (!__ratelimit(&ratelimit))
+ return;
+
+ va_start(args, fmt);
+ vaf.fmt = fmt;
+ vaf.va = &args;
+ printk("%s" pr_fmt("%s[%d] ip:%lx sp:%lx: %pV"), log_level, tsk->comm,
+ task_pid_nr(tsk), regs->ip, regs->sp, &vaf);
+ va_end(args);
+}
/**
* identify_insn() - Identify a UMIP-protected instruction
default:
return -EINVAL;
}
+ } else if (insn->opcode.bytes[1] == 0x0) {
+ if (X86_MODRM_REG(insn->modrm.value) == 0)
+ return UMIP_INST_SLDT;
+ else if (X86_MODRM_REG(insn->modrm.value) == 1)
+ return UMIP_INST_STR;
+ else
+ return -EINVAL;
+ } else {
+ return -EINVAL;
}
-
- /* SLDT AND STR are not emulated */
- return -EINVAL;
}
/**
if (!(show_unhandled_signals && unhandled_signal(tsk, SIGSEGV)))
return;
- pr_err_ratelimited("%s[%d] umip emulation segfault ip:%lx sp:%lx error:%x in %lx\n",
- tsk->comm, task_pid_nr(tsk), regs->ip,
- regs->sp, X86_PF_USER | X86_PF_WRITE,
- regs->ip);
+ umip_pr_err(regs, "segfault in emulation. error%x\n",
+ X86_PF_USER | X86_PF_WRITE);
}
/**
unsigned char buf[MAX_INSN_SIZE];
void __user *uaddr;
struct insn insn;
- char seg_defs;
+ int seg_defs;
if (!regs)
return false;
- /* Do not emulate 64-bit processes. */
- if (user_64bit_mode(regs))
- return false;
-
/*
* If not in user-space long mode, a custom code segment could be in
* use. This is true in protected mode (if the process defined a local
if (umip_inst < 0)
return false;
+ umip_pr_warning(regs, "%s instruction cannot be used by applications.\n",
+ umip_insns[umip_inst]);
+
+ /* Do not emulate SLDT, STR or user long mode processes. */
+ if (umip_inst == UMIP_INST_STR || umip_inst == UMIP_INST_SLDT || user_64bit_mode(regs))
+ return false;
+
+ umip_pr_warning(regs, "For now, expensive software emulation returns the result.\n");
+
if (emulate_umip_insn(&insn, umip_inst, dummy_data, &dummy_data_size))
return false;
[CPUID_8086_0001_EDX] = {0x80860001, 0, CPUID_EDX},
[CPUID_1_ECX] = { 1, 0, CPUID_ECX},
[CPUID_C000_0001_EDX] = {0xc0000001, 0, CPUID_EDX},
- [CPUID_8000_0001_ECX] = {0xc0000001, 0, CPUID_ECX},
+ [CPUID_8000_0001_ECX] = {0x80000001, 0, CPUID_ECX},
[CPUID_7_0_EBX] = { 7, 0, CPUID_EBX},
[CPUID_D_1_EAX] = { 0xd, 1, CPUID_EAX},
[CPUID_F_0_EDX] = { 0xf, 0, CPUID_EDX},
fxstate_size(ctxt));
}
+/*
+ * FXRSTOR might restore XMM registers not provided by the guest. Fill
+ * in the host registers (via FXSAVE) instead, so they won't be modified.
+ * (preemption has to stay disabled until FXRSTOR).
+ *
+ * Use noinline to keep the stack for other functions called by callers small.
+ */
+static noinline int fxregs_fixup(struct fxregs_state *fx_state,
+ const size_t used_size)
+{
+ struct fxregs_state fx_tmp;
+ int rc;
+
+ rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_tmp));
+ memcpy((void *)fx_state + used_size, (void *)&fx_tmp + used_size,
+ __fxstate_size(16) - used_size);
+
+ return rc;
+}
+
static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
{
struct fxregs_state fx_state;
if (rc != X86EMUL_CONTINUE)
return rc;
+ size = fxstate_size(ctxt);
+ rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+
ctxt->ops->get_fpu(ctxt);
- size = fxstate_size(ctxt);
if (size < __fxstate_size(16)) {
- rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
+ rc = fxregs_fixup(&fx_state, size);
if (rc != X86EMUL_CONTINUE)
goto out;
}
- rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
- if (rc != X86EMUL_CONTINUE)
- goto out;
-
if (fx_state.mxcsr >> 16) {
rc = emulate_gp(ctxt, 0);
goto out;
bool op_prefix = false;
bool has_seg_override = false;
struct opcode opcode;
+ u16 dummy;
+ struct desc_struct desc;
ctxt->memop.type = OP_NONE;
ctxt->memopp = NULL;
switch (mode) {
case X86EMUL_MODE_REAL:
case X86EMUL_MODE_VM86:
+ def_op_bytes = def_ad_bytes = 2;
+ ctxt->ops->get_segment(ctxt, &dummy, &desc, NULL, VCPU_SREG_CS);
+ if (desc.d)
+ def_op_bytes = def_ad_bytes = 4;
+ break;
case X86EMUL_MODE_PROT16:
def_op_bytes = def_ad_bytes = 2;
break;
old_irr = ioapic->irr;
ioapic->irr |= mask;
- if (edge)
+ if (edge) {
ioapic->irr_delivered &= ~mask;
- if ((edge && old_irr == ioapic->irr) ||
- (!edge && entry.fields.remote_irr)) {
- ret = 0;
- goto out;
+ if (old_irr == ioapic->irr) {
+ ret = 0;
+ goto out;
+ }
}
ret = ioapic_service(ioapic, irq, line_status);
index == RTC_GSI) {
if (kvm_apic_match_dest(vcpu, NULL, 0,
e->fields.dest_id, e->fields.dest_mode) ||
- (e->fields.trig_mode == IOAPIC_EDGE_TRIG &&
- kvm_apic_pending_eoi(vcpu, e->fields.vector)))
+ kvm_apic_pending_eoi(vcpu, e->fields.vector))
__set_bit(e->fields.vector,
ioapic_handled_vectors);
}
{
unsigned index;
bool mask_before, mask_after;
+ int old_remote_irr, old_delivery_status;
union kvm_ioapic_redirect_entry *e;
switch (ioapic->ioregsel) {
return;
e = &ioapic->redirtbl[index];
mask_before = e->fields.mask;
+ /* Preserve read-only fields */
+ old_remote_irr = e->fields.remote_irr;
+ old_delivery_status = e->fields.delivery_status;
if (ioapic->ioregsel & 1) {
e->bits &= 0xffffffff;
e->bits |= (u64) val << 32;
} else {
e->bits &= ~0xffffffffULL;
e->bits |= (u32) val;
- e->fields.remote_irr = 0;
}
+ e->fields.remote_irr = old_remote_irr;
+ e->fields.delivery_status = old_delivery_status;
+
+ /*
+ * Some OSes (Linux, Xen) assume that Remote IRR bit will
+ * be cleared by IOAPIC hardware when the entry is configured
+ * as edge-triggered. This behavior is used to simulate an
+ * explicit EOI on IOAPICs that don't have the EOI register.
+ */
+ if (e->fields.trig_mode == IOAPIC_EDGE_TRIG)
+ e->fields.remote_irr = 0;
+
mask_after = e->fields.mask;
if (mask_before != mask_after)
kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after);
struct kvm_lapic_irq irqe;
int ret;
- if (entry->fields.mask)
+ if (entry->fields.mask ||
+ (entry->fields.trig_mode == IOAPIC_LEVEL_TRIG &&
+ entry->fields.remote_irr))
return -1;
ioapic_debug("dest=%x dest_mode=%x delivery_mode=%x "
recalculate_apic_map(apic->vcpu->kvm);
}
+static inline u32 kvm_apic_calc_x2apic_ldr(u32 id)
+{
+ return ((id >> 4) << 16) | (1 << (id & 0xf));
+}
+
static inline void kvm_apic_set_x2apic_id(struct kvm_lapic *apic, u32 id)
{
- u32 ldr = ((id >> 4) << 16) | (1 << (id & 0xf));
+ u32 ldr = kvm_apic_calc_x2apic_ldr(id);
WARN_ON_ONCE(id != apic->vcpu->vcpu_id);
{
if (apic_x2apic_mode(vcpu->arch.apic)) {
u32 *id = (u32 *)(s->regs + APIC_ID);
+ u32 *ldr = (u32 *)(s->regs + APIC_LDR);
if (vcpu->kvm->arch.x2apic_format) {
if (*id != vcpu->vcpu_id)
else
*id <<= 24;
}
+
+ /* In x2APIC mode, the LDR is fixed and based on the id */
+ if (set)
+ *ldr = kvm_apic_calc_x2apic_ldr(*id);
}
return 0;
{
struct vmcb_control_area *c, *h;
struct nested_state *g;
+ u32 h_intercept_exceptions;
mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
h = &svm->nested.hsave->control;
g = &svm->nested;
+ /* No need to intercept #UD if L1 doesn't intercept it */
+ h_intercept_exceptions =
+ h->intercept_exceptions & ~(1U << UD_VECTOR);
+
c->intercept_cr = h->intercept_cr | g->intercept_cr;
c->intercept_dr = h->intercept_dr | g->intercept_dr;
- c->intercept_exceptions = h->intercept_exceptions | g->intercept_exceptions;
+ c->intercept_exceptions =
+ h_intercept_exceptions | g->intercept_exceptions;
c->intercept = h->intercept | g->intercept;
}
{
int er;
+ WARN_ON_ONCE(is_guest_mode(&svm->vcpu));
er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD);
+ if (er == EMULATE_USER_EXIT)
+ return 0;
if (er != EMULATE_DONE)
kvm_queue_exception(&svm->vcpu, UD_VECTOR);
return 1;
u32 ecx = msr->index;
u64 data = msr->data;
switch (ecx) {
+ case MSR_IA32_CR_PAT:
+ if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
+ return 1;
+ vcpu->arch.pat = data;
+ svm->vmcb->save.g_pat = data;
+ mark_dirty(svm->vmcb, VMCB_NPT);
+ break;
case MSR_IA32_TSC:
kvm_write_tsc(vcpu, msr);
break;
static bool __read_mostly enable_vpid = 1;
module_param_named(vpid, enable_vpid, bool, 0444);
+static bool __read_mostly enable_vnmi = 1;
+module_param_named(vnmi, enable_vnmi, bool, S_IRUGO);
+
static bool __read_mostly flexpriority_enabled = 1;
module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO);
bool nmi_known_unmasked;
unsigned long vmcs_host_cr3; /* May not match real cr3 */
unsigned long vmcs_host_cr4; /* May not match real cr4 */
+ /* Support for vnmi-less CPUs */
+ int soft_vnmi_blocked;
+ ktime_t entry_time;
+ s64 vnmi_blocked_time;
struct list_head loaded_vmcss_on_cpu_link;
};
SECONDARY_EXEC_ENABLE_INVPCID;
}
+static inline bool cpu_has_virtual_nmis(void)
+{
+ return vmcs_config.pin_based_exec_ctrl & PIN_BASED_VIRTUAL_NMIS;
+}
+
static inline bool cpu_has_vmx_wbinvd_exit(void)
{
return vmcs_config.cpu_based_2nd_exec_ctrl &
(vmcs12->secondary_vm_exec_control & bit);
}
-static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12)
-{
- return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS;
-}
-
static inline bool nested_cpu_has_preemption_timer(struct vmcs12 *vmcs12)
{
return vmcs12->pin_based_vm_exec_control &
{
u32 eb;
- eb = (1u << PF_VECTOR) | (1u << UD_VECTOR) | (1u << MC_VECTOR) |
+ eb = (1u << PF_VECTOR) | (1u << MC_VECTOR) |
(1u << DB_VECTOR) | (1u << AC_VECTOR);
if ((vcpu->guest_debug &
(KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP)) ==
*/
if (is_guest_mode(vcpu))
eb |= get_vmcs12(vcpu)->exception_bitmap;
+ else
+ eb |= 1u << UD_VECTOR;
vmcs_write32(EXCEPTION_BITMAP, eb);
}
&_vmexit_control) < 0)
return -EIO;
- min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING |
- PIN_BASED_VIRTUAL_NMIS;
- opt = PIN_BASED_POSTED_INTR | PIN_BASED_VMX_PREEMPTION_TIMER;
+ min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING;
+ opt = PIN_BASED_VIRTUAL_NMIS | PIN_BASED_POSTED_INTR |
+ PIN_BASED_VMX_PREEMPTION_TIMER;
if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS,
&_pin_based_exec_control) < 0)
return -EIO;
if (!kvm_vcpu_apicv_active(&vmx->vcpu))
pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR;
+
+ if (!enable_vnmi)
+ pin_based_exec_ctrl &= ~PIN_BASED_VIRTUAL_NMIS;
+
/* Enable the preemption timer dynamically */
pin_based_exec_ctrl &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
return pin_based_exec_ctrl;
vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
}
- vmcs_writel(GUEST_RFLAGS, 0x02);
+ kvm_set_rflags(vcpu, X86_EFLAGS_FIXED);
kvm_rip_write(vcpu, 0xfff0);
vmcs_writel(GUEST_GDTR_BASE, 0);
static void enable_nmi_window(struct kvm_vcpu *vcpu)
{
- if (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) {
+ if (!enable_vnmi ||
+ vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) {
enable_irq_window(vcpu);
return;
}
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
+ if (!enable_vnmi) {
+ /*
+ * Tracking the NMI-blocked state in software is built upon
+ * finding the next open IRQ window. This, in turn, depends on
+ * well-behaving guests: They have to keep IRQs disabled at
+ * least as long as the NMI handler runs. Otherwise we may
+ * cause NMI nesting, maybe breaking the guest. But as this is
+ * highly unlikely, we can live with the residual risk.
+ */
+ vmx->loaded_vmcs->soft_vnmi_blocked = 1;
+ vmx->loaded_vmcs->vnmi_blocked_time = 0;
+ }
+
++vcpu->stat.nmi_injections;
vmx->loaded_vmcs->nmi_known_unmasked = false;
struct vcpu_vmx *vmx = to_vmx(vcpu);
bool masked;
+ if (!enable_vnmi)
+ return vmx->loaded_vmcs->soft_vnmi_blocked;
if (vmx->loaded_vmcs->nmi_known_unmasked)
return false;
masked = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_NMI;
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- vmx->loaded_vmcs->nmi_known_unmasked = !masked;
- if (masked)
- vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
- GUEST_INTR_STATE_NMI);
- else
- vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
- GUEST_INTR_STATE_NMI);
+ if (!enable_vnmi) {
+ if (vmx->loaded_vmcs->soft_vnmi_blocked != masked) {
+ vmx->loaded_vmcs->soft_vnmi_blocked = masked;
+ vmx->loaded_vmcs->vnmi_blocked_time = 0;
+ }
+ } else {
+ vmx->loaded_vmcs->nmi_known_unmasked = !masked;
+ if (masked)
+ vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
+ GUEST_INTR_STATE_NMI);
+ else
+ vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
+ GUEST_INTR_STATE_NMI);
+ }
}
static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
if (to_vmx(vcpu)->nested.nested_run_pending)
return 0;
+ if (!enable_vnmi &&
+ to_vmx(vcpu)->loaded_vmcs->soft_vnmi_blocked)
+ return 0;
+
return !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
(GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI
| GUEST_INTR_STATE_NMI));
return 1; /* already handled by vmx_vcpu_run() */
if (is_invalid_opcode(intr_info)) {
- if (is_guest_mode(vcpu)) {
- kvm_queue_exception(vcpu, UD_VECTOR);
- return 1;
- }
+ WARN_ON_ONCE(is_guest_mode(vcpu));
er = emulate_instruction(vcpu, EMULTYPE_TRAP_UD);
+ if (er == EMULATE_USER_EXIT)
+ return 0;
if (er != EMULATE_DONE)
kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
* AAK134, BY25.
*/
if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
+ enable_vnmi &&
(exit_qualification & INTR_INFO_UNBLOCK_NMI))
vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI);
static int handle_nmi_window(struct kvm_vcpu *vcpu)
{
+ WARN_ON_ONCE(!enable_vnmi);
vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL,
CPU_BASED_VIRTUAL_NMI_PENDING);
++vcpu->stat.nmi_window_exits;
if (kvm_test_request(KVM_REQ_EVENT, vcpu))
return 1;
- err = emulate_instruction(vcpu, EMULTYPE_NO_REEXECUTE);
+ err = emulate_instruction(vcpu, 0);
if (err == EMULATE_USER_EXIT) {
++vcpu->stat.mmio_exits;
if (!cpu_has_vmx_flexpriority())
flexpriority_enabled = 0;
+ if (!cpu_has_virtual_nmis())
+ enable_vnmi = 0;
+
/*
* set_apic_access_page_addr() is used to reload apic access
* page upon invalidation. No need to do anything if not
}
/* Create a new VMCS */
- item = kmalloc(sizeof(struct vmcs02_list), GFP_KERNEL);
+ item = kzalloc(sizeof(struct vmcs02_list), GFP_KERNEL);
if (!item)
return NULL;
item->vmcs02.vmcs = alloc_vmcs();
*/
static void free_nested(struct vcpu_vmx *vmx)
{
- if (!vmx->nested.vmxon)
+ if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon)
return;
vmx->nested.vmxon = false;
+ vmx->nested.smm.vmxon = false;
free_vpid(vmx->nested.vpid02);
vmx->nested.posted_intr_nv = -1;
vmx->nested.current_vmptr = -1ull;
* "blocked by NMI" bit has to be set before next VM entry.
*/
if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
+ enable_vnmi &&
(exit_qualification & INTR_INFO_UNBLOCK_NMI))
vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
GUEST_INTR_STATE_NMI);
return 0;
}
+ if (unlikely(!enable_vnmi &&
+ vmx->loaded_vmcs->soft_vnmi_blocked)) {
+ if (vmx_interrupt_allowed(vcpu)) {
+ vmx->loaded_vmcs->soft_vnmi_blocked = 0;
+ } else if (vmx->loaded_vmcs->vnmi_blocked_time > 1000000000LL &&
+ vcpu->arch.nmi_pending) {
+ /*
+ * This CPU don't support us in finding the end of an
+ * NMI-blocked window if the guest runs with IRQs
+ * disabled. So we pull the trigger after 1 s of
+ * futile waiting, but inform the user about this.
+ */
+ printk(KERN_WARNING "%s: Breaking out of NMI-blocked "
+ "state on VCPU %d after 1 s timeout\n",
+ __func__, vcpu->vcpu_id);
+ vmx->loaded_vmcs->soft_vnmi_blocked = 0;
+ }
+ }
+
if (exit_reason < kvm_vmx_max_exit_handlers
&& kvm_vmx_exit_handlers[exit_reason])
return kvm_vmx_exit_handlers[exit_reason](vcpu);
idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK;
- if (vmx->loaded_vmcs->nmi_known_unmasked)
- return;
- /*
- * Can't use vmx->exit_intr_info since we're not sure what
- * the exit reason is.
- */
- exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
- unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0;
- vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
- /*
- * SDM 3: 27.7.1.2 (September 2008)
- * Re-set bit "block by NMI" before VM entry if vmexit caused by
- * a guest IRET fault.
- * SDM 3: 23.2.2 (September 2008)
- * Bit 12 is undefined in any of the following cases:
- * If the VM exit sets the valid bit in the IDT-vectoring
- * information field.
- * If the VM exit is due to a double fault.
- */
- if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi &&
- vector != DF_VECTOR && !idtv_info_valid)
- vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
- GUEST_INTR_STATE_NMI);
- else
- vmx->loaded_vmcs->nmi_known_unmasked =
- !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO)
- & GUEST_INTR_STATE_NMI);
+ if (enable_vnmi) {
+ if (vmx->loaded_vmcs->nmi_known_unmasked)
+ return;
+ /*
+ * Can't use vmx->exit_intr_info since we're not sure what
+ * the exit reason is.
+ */
+ exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
+ unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0;
+ vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
+ /*
+ * SDM 3: 27.7.1.2 (September 2008)
+ * Re-set bit "block by NMI" before VM entry if vmexit caused by
+ * a guest IRET fault.
+ * SDM 3: 23.2.2 (September 2008)
+ * Bit 12 is undefined in any of the following cases:
+ * If the VM exit sets the valid bit in the IDT-vectoring
+ * information field.
+ * If the VM exit is due to a double fault.
+ */
+ if ((exit_intr_info & INTR_INFO_VALID_MASK) && unblock_nmi &&
+ vector != DF_VECTOR && !idtv_info_valid)
+ vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
+ GUEST_INTR_STATE_NMI);
+ else
+ vmx->loaded_vmcs->nmi_known_unmasked =
+ !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO)
+ & GUEST_INTR_STATE_NMI);
+ } else if (unlikely(vmx->loaded_vmcs->soft_vnmi_blocked))
+ vmx->loaded_vmcs->vnmi_blocked_time +=
+ ktime_to_ns(ktime_sub(ktime_get(),
+ vmx->loaded_vmcs->entry_time));
}
static void __vmx_complete_interrupts(struct kvm_vcpu *vcpu,
struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long debugctlmsr, cr3, cr4;
+ /* Record the guest's net vcpu time for enforced NMI injections. */
+ if (unlikely(!enable_vnmi &&
+ vmx->loaded_vmcs->soft_vnmi_blocked))
+ vmx->loaded_vmcs->entry_time = ktime_get();
+
/* Don't enter VMX if guest state is invalid, let the exit handler
start emulation until we arrive back to a valid state */
if (vmx->emulation_required)
cr4_fixed1_update(X86_CR4_SMEP, ebx, bit(X86_FEATURE_SMEP));
cr4_fixed1_update(X86_CR4_SMAP, ebx, bit(X86_FEATURE_SMAP));
cr4_fixed1_update(X86_CR4_PKE, ecx, bit(X86_FEATURE_PKU));
- /* TODO: Use X86_CR4_UMIP and X86_FEATURE_UMIP macros */
- cr4_fixed1_update(bit(11), ecx, bit(2));
+ cr4_fixed1_update(X86_CR4_UMIP, ecx, bit(X86_FEATURE_UMIP));
#undef cr4_fixed1_update
}
return 1;
}
+ if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) &&
+ (is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu) ||
+ (vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD)))
+ return 1;
+
return 0;
}
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long exit_qual;
-
- if (kvm_event_needs_reinjection(vcpu))
- return -EBUSY;
+ bool block_nested_events =
+ vmx->nested.nested_run_pending || kvm_event_needs_reinjection(vcpu);
if (vcpu->arch.exception.pending &&
nested_vmx_check_exception(vcpu, &exit_qual)) {
- if (vmx->nested.nested_run_pending)
+ if (block_nested_events)
return -EBUSY;
nested_vmx_inject_exception_vmexit(vcpu, exit_qual);
vcpu->arch.exception.pending = false;
if (nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) &&
vmx->nested.preemption_timer_expired) {
- if (vmx->nested.nested_run_pending)
+ if (block_nested_events)
return -EBUSY;
nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0);
return 0;
}
if (vcpu->arch.nmi_pending && nested_exit_on_nmi(vcpu)) {
- if (vmx->nested.nested_run_pending)
+ if (block_nested_events)
return -EBUSY;
nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
NMI_VECTOR | INTR_TYPE_NMI_INTR |
if ((kvm_cpu_has_interrupt(vcpu) || external_intr) &&
nested_exit_on_intr(vcpu)) {
- if (vmx->nested.nested_run_pending)
+ if (block_nested_events)
return -EBUSY;
nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0);
return 0;
kvm_clear_interrupt_queue(vcpu);
}
+static void load_vmcs12_mmu_host_state(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12)
+{
+ u32 entry_failure_code;
+
+ nested_ept_uninit_mmu_context(vcpu);
+
+ /*
+ * Only PDPTE load can fail as the value of cr3 was checked on entry and
+ * couldn't have changed.
+ */
+ if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code))
+ nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL);
+
+ if (!enable_ept)
+ vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
+}
+
/*
* A part of what we need to when the nested L2 guest exits and we want to
* run its L1 parent, is to reset L1's guest state to the host state specified
struct vmcs12 *vmcs12)
{
struct kvm_segment seg;
- u32 entry_failure_code;
if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER)
vcpu->arch.efer = vmcs12->host_ia32_efer;
vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
vmx_set_cr4(vcpu, vmcs12->host_cr4);
- nested_ept_uninit_mmu_context(vcpu);
-
- /*
- * Only PDPTE load can fail as the value of cr3 was checked on entry and
- * couldn't have changed.
- */
- if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &entry_failure_code))
- nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL);
-
- if (!enable_ept)
- vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
+ load_vmcs12_mmu_host_state(vcpu, vmcs12);
if (enable_vpid) {
/*
* accordingly.
*/
nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
+
+ load_vmcs12_mmu_host_state(vcpu, vmcs12);
+
/*
* The emulated instruction was already skipped in
* nested_vmx_run, but the updated RIP was never
static bool __read_mostly ignore_msrs = 0;
module_param(ignore_msrs, bool, S_IRUGO | S_IWUSR);
+static bool __read_mostly report_ignored_msrs = true;
+module_param(report_ignored_msrs, bool, S_IRUGO | S_IWUSR);
+
unsigned int min_timer_period_us = 500;
module_param(min_timer_period_us, uint, S_IRUGO | S_IWUSR);
/* both __this_cpu_read() and rdtsc() should be on the same cpu */
get_cpu();
- kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL,
- &hv_clock.tsc_shift,
- &hv_clock.tsc_to_system_mul);
- ret = __pvclock_read_cycles(&hv_clock, rdtsc());
+ if (__this_cpu_read(cpu_tsc_khz)) {
+ kvm_get_time_scale(NSEC_PER_SEC, __this_cpu_read(cpu_tsc_khz) * 1000LL,
+ &hv_clock.tsc_shift,
+ &hv_clock.tsc_to_system_mul);
+ ret = __pvclock_read_cycles(&hv_clock, rdtsc());
+ } else
+ ret = ktime_get_boot_ns() + ka->kvmclock_offset;
put_cpu();
*/
BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);
+ if (guest_hv_clock.version & 1)
+ ++guest_hv_clock.version; /* first time write, random junk */
+
vcpu->hv_clock.version = guest_hv_clock.version + 1;
kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
&vcpu->hv_clock,
/* Drop writes to this legacy MSR -- see rdmsr
* counterpart for further detail.
*/
- vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n", msr, data);
+ if (report_ignored_msrs)
+ vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n",
+ msr, data);
break;
case MSR_AMD64_OSVW_ID_LENGTH:
if (!guest_cpuid_has(vcpu, X86_FEATURE_OSVW))
msr, data);
return 1;
} else {
- vcpu_unimpl(vcpu, "ignored wrmsr: 0x%x data 0x%llx\n",
- msr, data);
+ if (report_ignored_msrs)
+ vcpu_unimpl(vcpu,
+ "ignored wrmsr: 0x%x data 0x%llx\n",
+ msr, data);
break;
}
}
msr_info->index);
return 1;
} else {
- vcpu_unimpl(vcpu, "ignored rdmsr: 0x%x\n", msr_info->index);
+ if (report_ignored_msrs)
+ vcpu_unimpl(vcpu, "ignored rdmsr: 0x%x\n",
+ msr_info->index);
msr_info->data = 0;
}
break;
vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
vcpu->run->internal.ndata = 0;
- r = EMULATE_FAIL;
+ r = EMULATE_USER_EXIT;
}
kvm_queue_exception(vcpu, UD_VECTOR);
if (reexecute_instruction(vcpu, cr2, write_fault_to_spt,
emulation_type))
return EMULATE_DONE;
+ if (ctxt->have_exception && inject_emulated_exception(vcpu))
+ return EMULATE_DONE;
if (emulation_type & EMULTYPE_SKIP)
return EMULATE_FAIL;
return handle_emulation_failure(vcpu);
{
struct fpu *fpu = ¤t->thread.fpu;
int r;
- sigset_t sigsaved;
fpu__initialize(fpu);
- if (vcpu->sigset_active)
- sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
+ kvm_sigset_activate(vcpu);
if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
if (kvm_run->immediate_exit) {
out:
post_kvm_run_save(vcpu);
- if (vcpu->sigset_active)
- sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+ kvm_sigset_deactivate(vcpu);
return r;
}
*
* Returns:
*
- * A signed 8-bit value containing the default parameters on success.
+ * An int containing ORed-in default parameters on success.
*
* -EINVAL on error.
*/
-char insn_get_code_seg_params(struct pt_regs *regs)
+int insn_get_code_seg_params(struct pt_regs *regs)
{
struct desc_struct *desc;
short sel;
GrpTable: Grp3_1
0: TEST Eb,Ib
-1:
+1: TEST Eb,Ib
2: NOT Eb
3: NEG Eb
4: MUL AL,Eb
if (len > TASK_SIZE)
return -ENOMEM;
+ /* No address checking. See comment at mmap_address_hint_valid() */
if (flags & MAP_FIXED) {
if (prepare_hugepage_range(file, addr, len))
return -EINVAL;
}
if (addr) {
- addr = ALIGN(addr, huge_page_size(h));
+ addr &= huge_page_mask(h);
+ if (!mmap_address_hint_valid(addr, len))
+ goto get_unmapped_area;
+
vma = find_vma(mm, addr);
- if (TASK_SIZE - len >= addr &&
- (!vma || addr + len <= vm_start_gap(vma)))
+ if (!vma || addr + len <= vm_start_gap(vma))
return addr;
}
+
+get_unmapped_area:
if (mm->get_unmapped_area == arch_get_unmapped_area)
return hugetlb_get_unmapped_area_bottomup(file, addr, len,
pgoff, flags);
#include <linux/compat.h>
#include <asm/elf.h>
+#include "physaddr.h"
+
struct va_alignment __read_mostly va_align = {
.flags = -1,
};
return "[mpx]";
return NULL;
}
+
+/**
+ * mmap_address_hint_valid - Validate the address hint of mmap
+ * @addr: Address hint
+ * @len: Mapping length
+ *
+ * Check whether @addr and @addr + @len result in a valid mapping.
+ *
+ * On 32bit this only checks whether @addr + @len is <= TASK_SIZE.
+ *
+ * On 64bit with 5-level page tables another sanity check is required
+ * because mappings requested by mmap(@addr, 0) which cross the 47-bit
+ * virtual address boundary can cause the following theoretical issue:
+ *
+ * An application calls mmap(addr, 0), i.e. without MAP_FIXED, where @addr
+ * is below the border of the 47-bit address space and @addr + @len is
+ * above the border.
+ *
+ * With 4-level paging this request succeeds, but the resulting mapping
+ * address will always be within the 47-bit virtual address space, because
+ * the hint address does not result in a valid mapping and is
+ * ignored. Hence applications which are not prepared to handle virtual
+ * addresses above 47-bit work correctly.
+ *
+ * With 5-level paging this request would be granted and result in a
+ * mapping which crosses the border of the 47-bit virtual address
+ * space. If the application cannot handle addresses above 47-bit this
+ * will lead to misbehaviour and hard to diagnose failures.
+ *
+ * Therefore ignore address hints which would result in a mapping crossing
+ * the 47-bit virtual address boundary.
+ *
+ * Note, that in the same scenario with MAP_FIXED the behaviour is
+ * different. The request with @addr < 47-bit and @addr + @len > 47-bit
+ * fails on a 4-level paging machine but succeeds on a 5-level paging
+ * machine. It is reasonable to expect that an application does not rely on
+ * the failure of such a fixed mapping request, so the restriction is not
+ * applied.
+ */
+bool mmap_address_hint_valid(unsigned long addr, unsigned long len)
+{
+ if (TASK_SIZE - len < addr)
+ return false;
+
+ return (addr > DEFAULT_MAP_WINDOW) == (addr + len > DEFAULT_MAP_WINDOW);
+}
+
+/* Can we access it for direct reading/writing? Must be RAM: */
+int valid_phys_addr_range(phys_addr_t addr, size_t count)
+{
+ return addr + count <= __pa(high_memory);
+}
+
+/* Can we access it through mmap? Must be a valid physical address: */
+int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
+{
+ phys_addr_t addr = (phys_addr_t)pfn << PAGE_SHIFT;
+
+ return phys_addr_valid(addr + count - 1);
+}
struct bio *bio_split(struct bio *bio, int sectors,
gfp_t gfp, struct bio_set *bs)
{
- struct bio *split = NULL;
+ struct bio *split;
BUG_ON(sectors <= 0);
BUG_ON(sectors >= bio_sectors(bio));
wake_up_all(&q->mq_freeze_wq);
}
-static void blk_rq_timed_out_timer(unsigned long data)
+static void blk_rq_timed_out_timer(struct timer_list *t)
{
- struct request_queue *q = (struct request_queue *)data;
+ struct request_queue *q = from_timer(q, t, timeout);
kblockd_schedule_work(&q->timeout_work);
}
q->backing_dev_info->name = "block";
q->node = node_id;
- setup_timer(&q->backing_dev_info->laptop_mode_wb_timer,
- laptop_mode_timer_fn, (unsigned long) q);
- setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
+ timer_setup(&q->backing_dev_info->laptop_mode_wb_timer,
+ laptop_mode_timer_fn, 0);
+ timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
INIT_WORK(&q->timeout_work, NULL);
INIT_LIST_HEAD(&q->queue_head);
INIT_LIST_HEAD(&q->timeout_list);
rcu_read_unlock();
}
-static void blk_stat_timer_fn(unsigned long data)
+static void blk_stat_timer_fn(struct timer_list *t)
{
- struct blk_stat_callback *cb = (void *)data;
+ struct blk_stat_callback *cb = from_timer(cb, t, timer);
unsigned int bucket;
int cpu;
cb->bucket_fn = bucket_fn;
cb->data = data;
cb->buckets = buckets;
- setup_timer(&cb->timer, blk_stat_timer_fn, (unsigned long)cb);
+ timer_setup(&cb->timer, blk_stat_timer_fn, 0);
return cb;
}
ret = wbt_init(q);
if (ret)
return ret;
-
- rwb = q->rq_wb;
- if (!rwb)
- return -EINVAL;
}
+ rwb = q->rq_wb;
if (val == -1)
rwb->min_lat_nsec = wbt_default_latency_nsec(q);
else if (val >= 0)
bool track_bio_latency;
};
-static void throtl_pending_timer_fn(unsigned long arg);
+static void throtl_pending_timer_fn(struct timer_list *t);
static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd)
{
INIT_LIST_HEAD(&sq->queued[0]);
INIT_LIST_HEAD(&sq->queued[1]);
sq->pending_tree = RB_ROOT;
- setup_timer(&sq->pending_timer, throtl_pending_timer_fn,
- (unsigned long)sq);
+ timer_setup(&sq->pending_timer, throtl_pending_timer_fn, 0);
}
static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp, int node)
* the top-level service_tree is reached, throtl_data->dispatch_work is
* kicked so that the ready bio's are issued.
*/
-static void throtl_pending_timer_fn(unsigned long arg)
+static void throtl_pending_timer_fn(struct timer_list *t)
{
- struct throtl_service_queue *sq = (void *)arg;
+ struct throtl_service_queue *sq = from_timer(sq, t, pending_timer);
struct throtl_grp *tg = sq_to_tg(sq);
struct throtl_data *td = sq_to_td(sq);
struct request_queue *q = td->queue;
if (wbt_is_read(stat))
wb_timestamp(rwb, &rwb->last_comp);
- wbt_clear_state(stat);
} else {
WARN_ON_ONCE(stat == rwb->sync_cookie);
__wbt_done(rwb, wbt_stat_to_mask(stat));
- wbt_clear_state(stat);
}
+ wbt_clear_state(stat);
}
/*
/*
* At this point we know it's a buffered write. If this is
- * kswapd trying to free memory, or REQ_SYNC is set, set, then
+ * kswapd trying to free memory, or REQ_SYNC is set, then
* it's WB_SYNC_ALL writeback, and we'll use the max limit for
* that. If the write is marked as a background write, then use
* the idle limit, or go to normal if we haven't had competing
init_waitqueue_head(&rwb->rq_wait[i].wait);
}
- rwb->wc = 1;
- rwb->queue_depth = RWB_DEF_DEPTH;
rwb->last_comp = rwb->last_issue = jiffies;
rwb->queue = q;
rwb->win_nsec = RWB_WINDOW_NSEC;
disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO;
disk->flags |= GENHD_FL_NO_PART_SCAN;
} else {
+ int ret;
+
/* Register BDI before referencing it from bdev */
disk_to_dev(disk)->devt = devt;
- bdi_register_owner(disk->queue->backing_dev_info,
- disk_to_dev(disk));
+ ret = bdi_register_owner(disk->queue->backing_dev_info,
+ disk_to_dev(disk));
+ WARN_ON(ret);
blk_register_region(disk_devt(disk), disk->minors, NULL,
exact_match, exact_lock, disk);
}
if (minors > DISK_MAX_PARTS) {
printk(KERN_ERR
- "block: can't allocated more than %d partitions\n",
+ "block: can't allocate more than %d partitions\n",
DISK_MAX_PARTS);
minors = DISK_MAX_PARTS;
}
}
EXPORT_SYMBOL_GPL(af_alg_sendpage);
+/**
+ * af_alg_free_resources - release resources required for crypto request
+ */
+void af_alg_free_resources(struct af_alg_async_req *areq)
+{
+ struct sock *sk = areq->sk;
+
+ af_alg_free_areq_sgls(areq);
+ sock_kfree_s(sk, areq, areq->areqlen);
+}
+EXPORT_SYMBOL_GPL(af_alg_free_resources);
+
/**
* af_alg_async_cb - AIO callback handler
*
struct kiocb *iocb = areq->iocb;
unsigned int resultlen;
- lock_sock(sk);
-
/* Buffer size written by crypto operation. */
resultlen = areq->outlen;
- af_alg_free_areq_sgls(areq);
- sock_kfree_s(sk, areq, areq->areqlen);
- __sock_put(sk);
+ af_alg_free_resources(areq);
+ sock_put(sk);
iocb->ki_complete(iocb, err ? err : resultlen, 0);
-
- release_sock(sk);
}
EXPORT_SYMBOL_GPL(af_alg_async_cb);
struct aead_tfm *aeadc = pask->private;
struct crypto_aead *tfm = aeadc->aead;
struct crypto_skcipher *null_tfm = aeadc->null_tfm;
- unsigned int as = crypto_aead_authsize(tfm);
+ unsigned int i, as = crypto_aead_authsize(tfm);
struct af_alg_async_req *areq;
- struct af_alg_tsgl *tsgl;
- struct scatterlist *src;
+ struct af_alg_tsgl *tsgl, *tmp;
+ struct scatterlist *rsgl_src, *tsgl_src = NULL;
int err = 0;
size_t used = 0; /* [in] TX bufs to be en/decrypted */
size_t outlen = 0; /* [out] RX bufs produced by kernel */
}
processed = used + ctx->aead_assoclen;
- tsgl = list_first_entry(&ctx->tsgl_list, struct af_alg_tsgl, list);
+ list_for_each_entry_safe(tsgl, tmp, &ctx->tsgl_list, list) {
+ for (i = 0; i < tsgl->cur; i++) {
+ struct scatterlist *process_sg = tsgl->sg + i;
+
+ if (!(process_sg->length) || !sg_page(process_sg))
+ continue;
+ tsgl_src = process_sg;
+ break;
+ }
+ if (tsgl_src)
+ break;
+ }
+ if (processed && !tsgl_src) {
+ err = -EFAULT;
+ goto free;
+ }
/*
* Copy of AAD from source to destination
*/
/* Use the RX SGL as source (and destination) for crypto op. */
- src = areq->first_rsgl.sgl.sg;
+ rsgl_src = areq->first_rsgl.sgl.sg;
if (ctx->enc) {
/*
* v v
* RX SGL: AAD || PT || Tag
*/
- err = crypto_aead_copy_sgl(null_tfm, tsgl->sg,
+ err = crypto_aead_copy_sgl(null_tfm, tsgl_src,
areq->first_rsgl.sgl.sg, processed);
if (err)
goto free;
*/
/* Copy AAD || CT to RX SGL buffer for in-place operation. */
- err = crypto_aead_copy_sgl(null_tfm, tsgl->sg,
+ err = crypto_aead_copy_sgl(null_tfm, tsgl_src,
areq->first_rsgl.sgl.sg, outlen);
if (err)
goto free;
areq->tsgl);
} else
/* no RX SGL present (e.g. authentication only) */
- src = areq->tsgl;
+ rsgl_src = areq->tsgl;
}
/* Initialize the crypto operation */
- aead_request_set_crypt(&areq->cra_u.aead_req, src,
+ aead_request_set_crypt(&areq->cra_u.aead_req, rsgl_src,
areq->first_rsgl.sgl.sg, used, ctx->iv);
aead_request_set_ad(&areq->cra_u.aead_req, ctx->aead_assoclen);
aead_request_set_tfm(&areq->cra_u.aead_req, tfm);
if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) {
/* AIO operation */
+ sock_hold(sk);
areq->iocb = msg->msg_iocb;
aead_request_set_callback(&areq->cra_u.aead_req,
CRYPTO_TFM_REQ_MAY_BACKLOG,
af_alg_async_cb, areq);
err = ctx->enc ? crypto_aead_encrypt(&areq->cra_u.aead_req) :
crypto_aead_decrypt(&areq->cra_u.aead_req);
+
+ /* AIO operation in progress */
+ if (err == -EINPROGRESS || err == -EBUSY) {
+ /* Remember output size that will be generated. */
+ areq->outlen = outlen;
+
+ return -EIOCBQUEUED;
+ }
+
+ sock_put(sk);
} else {
/* Synchronous operation */
aead_request_set_callback(&areq->cra_u.aead_req,
&ctx->wait);
}
- /* AIO operation in progress */
- if (err == -EINPROGRESS) {
- sock_hold(sk);
-
- /* Remember output size that will be generated. */
- areq->outlen = outlen;
-
- return -EIOCBQUEUED;
- }
free:
- af_alg_free_areq_sgls(areq);
- sock_kfree_s(sk, areq, areq->areqlen);
+ af_alg_free_resources(areq);
return err ? err : outlen;
}
if (msg->msg_iocb && !is_sync_kiocb(msg->msg_iocb)) {
/* AIO operation */
+ sock_hold(sk);
areq->iocb = msg->msg_iocb;
skcipher_request_set_callback(&areq->cra_u.skcipher_req,
CRYPTO_TFM_REQ_MAY_SLEEP,
err = ctx->enc ?
crypto_skcipher_encrypt(&areq->cra_u.skcipher_req) :
crypto_skcipher_decrypt(&areq->cra_u.skcipher_req);
+
+ /* AIO operation in progress */
+ if (err == -EINPROGRESS || err == -EBUSY) {
+ /* Remember output size that will be generated. */
+ areq->outlen = len;
+
+ return -EIOCBQUEUED;
+ }
+
+ sock_put(sk);
} else {
/* Synchronous operation */
skcipher_request_set_callback(&areq->cra_u.skcipher_req,
&ctx->wait);
}
- /* AIO operation in progress */
- if (err == -EINPROGRESS) {
- sock_hold(sk);
-
- /* Remember output size that will be generated. */
- areq->outlen = len;
-
- return -EIOCBQUEUED;
- }
free:
- af_alg_free_areq_sgls(areq);
- sock_kfree_s(sk, areq, areq->areqlen);
+ af_alg_free_resources(areq);
return err ? err : len;
}
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("PKCS#7 testing key type");
+MODULE_AUTHOR("Red Hat, Inc.");
static unsigned pkcs7_usage;
module_param_named(usage, pkcs7_usage, uint, S_IWUSR | S_IRUGO);
#define pr_fmt(fmt) "PKCS7: "fmt
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/err.h>
#include "pkcs7_parser.h"
#include "pkcs7-asn1.h"
+MODULE_DESCRIPTION("PKCS#7 parser");
+MODULE_AUTHOR("Red Hat, Inc.");
+MODULE_LICENSE("GPL");
+
struct pkcs7_parse_context {
struct pkcs7_message *msg; /* Message being constructed */
struct pkcs7_signed_info *sinfo; /* SignedInfo being constructed */
#include <crypto/public_key.h>
#include <crypto/akcipher.h>
+MODULE_DESCRIPTION("In-software asymmetric public-key subtype");
+MODULE_AUTHOR("Red Hat, Inc.");
MODULE_LICENSE("GPL");
/*
module_exit(x509_key_exit);
MODULE_DESCRIPTION("X.509 certificate parser");
+MODULE_AUTHOR("Red Hat, Inc.");
MODULE_LICENSE("GPL");
scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2);
scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2);
+ scatterwalk_done(&walk->in, 0, walk->total);
+ scatterwalk_done(&walk->out, 0, walk->total);
+
walk->iv = req->iv;
walk->oiv = req->iv;
int count;
struct acpi_hardware_id *id;
+ /* Avoid unnecessarily loading modules for non present devices. */
+ if (!acpi_device_is_present(acpi_dev))
+ return 0;
+
/*
* Since we skip ACPI_DT_NAMESPACE_HID from the modalias below, 0 should
* be returned if ACPI_DT_NAMESPACE_HID is the only ACPI/PNP ID in the
{
struct acpi_ec *ec = NULL;
int ret;
+ bool is_ecdt = false;
+ acpi_status status;
strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME);
strcpy(acpi_device_class(device), ACPI_EC_CLASS);
- ec = acpi_ec_alloc();
- if (!ec)
- return -ENOMEM;
- if (ec_parse_device(device->handle, 0, ec, NULL) !=
- AE_CTRL_TERMINATE) {
+ if (!strcmp(acpi_device_hid(device), ACPI_ECDT_HID)) {
+ is_ecdt = true;
+ ec = boot_ec;
+ } else {
+ ec = acpi_ec_alloc();
+ if (!ec)
+ return -ENOMEM;
+ status = ec_parse_device(device->handle, 0, ec, NULL);
+ if (status != AE_CTRL_TERMINATE) {
ret = -EINVAL;
goto err_alloc;
+ }
}
if (acpi_is_boot_ec(ec)) {
- boot_ec_is_ecdt = false;
- /*
- * Trust PNP0C09 namespace location rather than ECDT ID.
- *
- * But trust ECDT GPE rather than _GPE because of ASUS quirks,
- * so do not change boot_ec->gpe to ec->gpe.
- */
- boot_ec->handle = ec->handle;
- acpi_handle_debug(ec->handle, "duplicated.\n");
- acpi_ec_free(ec);
- ec = boot_ec;
- ret = acpi_config_boot_ec(ec, ec->handle, true, false);
+ boot_ec_is_ecdt = is_ecdt;
+ if (!is_ecdt) {
+ /*
+ * Trust PNP0C09 namespace location rather than
+ * ECDT ID. But trust ECDT GPE rather than _GPE
+ * because of ASUS quirks, so do not change
+ * boot_ec->gpe to ec->gpe.
+ */
+ boot_ec->handle = ec->handle;
+ acpi_handle_debug(ec->handle, "duplicated.\n");
+ acpi_ec_free(ec);
+ ec = boot_ec;
+ }
+ ret = acpi_config_boot_ec(ec, ec->handle, true, is_ecdt);
} else
ret = acpi_ec_setup(ec, true);
if (ret)
ret = !!request_region(ec->command_addr, 1, "EC cmd");
WARN(!ret, "Could not request EC cmd io port 0x%lx", ec->command_addr);
- /* Reprobe devices depending on the EC */
- acpi_walk_dep_device_list(ec->handle);
+ if (!is_ecdt) {
+ /* Reprobe devices depending on the EC */
+ acpi_walk_dep_device_list(ec->handle);
+ }
acpi_handle_debug(ec->handle, "enumerated.\n");
return 0;
static const struct acpi_device_id ec_device_ids[] = {
{"PNP0C09", 0},
+ {ACPI_ECDT_HID, 0},
{"", 0},
};
* Note: ec->handle can be valid if this function is called after
* acpi_ec_add(), hence the fast path.
*/
- if (boot_ec->handle != ACPI_ROOT_OBJECT)
- handle = boot_ec->handle;
- else if (!acpi_ec_ecdt_get_handle(&handle))
- return -ENODEV;
- return acpi_config_boot_ec(boot_ec, handle, true, true);
+ if (boot_ec->handle == ACPI_ROOT_OBJECT) {
+ if (!acpi_ec_ecdt_get_handle(&handle))
+ return -ENODEV;
+ boot_ec->handle = handle;
+ }
+
+ /* Register to ACPI bus with PM ops attached */
+ return acpi_bus_register_early_device(ACPI_BUS_TYPE_ECDT_EC);
}
#if 0
/* Drivers must be started after acpi_ec_query_init() */
dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver);
+ /*
+ * Register ECDT to ACPI bus only when PNP0C09 probe fails. This is
+ * useful for platforms (confirmed on ASUS X550ZE) with valid ECDT
+ * settings but invalid DSDT settings.
+ * https://bugzilla.kernel.org/show_bug.cgi?id=196847
+ */
ecdt_fail = acpi_ec_ecdt_start();
return ecdt_fail && dsdt_fail ? -ENODEV : 0;
}
bool acpi_device_is_battery(struct acpi_device *adev);
bool acpi_device_is_first_physical_node(struct acpi_device *adev,
const struct device *dev);
+int acpi_bus_register_early_device(int type);
/* --------------------------------------------------------------------------
Device Matching and Notification
case ACPI_BUS_TYPE_SLEEP_BUTTON:
strcpy(device->pnp.bus_id, "SLPF");
break;
+ case ACPI_BUS_TYPE_ECDT_EC:
+ strcpy(device->pnp.bus_id, "ECDT");
+ break;
default:
acpi_get_name(device->handle, ACPI_SINGLE_NAME, &buffer);
/* Clean up trailing underscores (if any) */
case ACPI_BUS_TYPE_SLEEP_BUTTON:
acpi_add_id(pnp, ACPI_BUTTON_HID_SLEEPF);
break;
+ case ACPI_BUS_TYPE_ECDT_EC:
+ acpi_add_id(pnp, ACPI_ECDT_HID);
+ break;
}
}
}
EXPORT_SYMBOL_GPL(acpi_bus_trim);
+int acpi_bus_register_early_device(int type)
+{
+ struct acpi_device *device = NULL;
+ int result;
+
+ result = acpi_add_single_object(&device, NULL,
+ type, ACPI_STA_DEFAULT);
+ if (result)
+ return result;
+
+ device->flags.match_driver = true;
+ return device_attach(&device->dev);
+}
+EXPORT_SYMBOL_GPL(acpi_bus_register_early_device);
+
static int acpi_bus_scan_fixed(void)
{
int result = 0;
*/
-static void do_housekeeping (unsigned long arg);
+static void do_housekeeping (struct timer_list *t);
/********** globals **********/
static unsigned short debug = 0;
};
/********** housekeeping **********/
-static void do_housekeeping (unsigned long arg) {
- amb_dev * dev = (amb_dev *) arg;
+static void do_housekeeping (struct timer_list *t) {
+ amb_dev * dev = from_timer(dev, t, housekeeping);
// could collect device-specific (not driver/atm-linux) stats here
PRINTD (DBG_INFO, "registered Madge ATM adapter (no. %d) (%p) at %p",
dev->atm_dev->number, dev, dev->atm_dev);
- dev->atm_dev->dev_data = (void *) dev;
+ dev->atm_dev->dev_data = (void *) dev;
// register our address
amb_esi (dev, dev->atm_dev->esi);
dev->atm_dev->ci_range.vpi_bits = NUM_VPI_BITS;
dev->atm_dev->ci_range.vci_bits = NUM_VCI_BITS;
- setup_timer(&dev->housekeeping, do_housekeeping,
- (unsigned long)dev);
+ timer_setup(&dev->housekeeping, do_housekeeping, 0);
mod_timer(&dev->housekeeping, jiffies);
// enable host interrupts
#ifdef FS_POLL_FREQ
-static void fs_poll (unsigned long data)
+static void fs_poll (struct timer_list *t)
{
- struct fs_dev *dev = (struct fs_dev *) data;
+ struct fs_dev *dev = from_timer(dev, t, timer);
fs_irq (0, dev);
dev->timer.expires = jiffies + FS_POLL_FREQ;
}
#ifdef FS_POLL_FREQ
- init_timer (&dev->timer);
- dev->timer.data = (unsigned long) dev;
- dev->timer.function = fs_poll;
+ timer_setup(&dev->timer, fs_poll, 0);
dev->timer.expires = jiffies + FS_POLL_FREQ;
add_timer (&dev->timer);
#endif
ASSERT(fore200e_vcc);
len = sprintf(page,
- " %08x %03d %05d %1d %09lu %05d/%05d %09lu %05d/%05d\n",
- (u32)(unsigned long)vcc,
+ " %pK %03d %05d %1d %09lu %05d/%05d %09lu %05d/%05d\n",
+ vcc,
vcc->vpi, vcc->vci, fore200e_atm2fore_aal(vcc->qos.aal),
fore200e_vcc->tx_pdu,
fore200e_vcc->tx_min_pdu > 0xFFFF ? 0 : fore200e_vcc->tx_min_pdu,
/********** globals **********/
-static void do_housekeeping (unsigned long arg);
+static void do_housekeeping (struct timer_list *t);
static unsigned short debug = 0;
static unsigned short vpi_bits = 0;
/********** housekeeping **********/
-static void do_housekeeping (unsigned long arg) {
+static void do_housekeeping (struct timer_list *t) {
// just stats at the moment
- hrz_dev * dev = (hrz_dev *) arg;
+ hrz_dev * dev = from_timer(dev, t, housekeeping);
// collect device-specific (not driver/atm-linux) stats here
dev->tx_cell_count += rd_regw (dev, TX_CELL_COUNT_OFF);
dev->atm_dev->ci_range.vpi_bits = vpi_bits;
dev->atm_dev->ci_range.vci_bits = 10-vpi_bits;
- setup_timer(&dev->housekeeping, do_housekeeping, (unsigned long) dev);
+ timer_setup(&dev->housekeeping, do_housekeeping, 0);
mod_timer(&dev->housekeeping, jiffies);
out:
#define PUT(val,reg) dev->ops->phy_put(dev,val,IDT77105_##reg)
#define GET(reg) dev->ops->phy_get(dev,IDT77105_##reg)
-static void idt77105_stats_timer_func(unsigned long);
-static void idt77105_restart_timer_func(unsigned long);
+static void idt77105_stats_timer_func(struct timer_list *);
+static void idt77105_restart_timer_func(struct timer_list *);
static DEFINE_TIMER(stats_timer, idt77105_stats_timer_func);
* a separate copy of the stats allows implementation of
* an ioctl which gathers the stats *without* zero'ing them.
*/
-static void idt77105_stats_timer_func(unsigned long dummy)
+static void idt77105_stats_timer_func(struct timer_list *unused)
{
struct idt77105_priv *walk;
struct atm_dev *dev;
* interrupts need to be disabled when the cable is pulled out
* to avoid lots of spurious cell error interrupts.
*/
-static void idt77105_restart_timer_func(unsigned long dummy)
+static void idt77105_restart_timer_func(struct timer_list *unused)
{
struct idt77105_priv *walk;
struct atm_dev *dev;
static void
-tst_timer(unsigned long data)
+tst_timer(struct timer_list *t)
{
- struct idt77252_dev *card = (struct idt77252_dev *)data;
+ struct idt77252_dev *card = from_timer(card, t, tst_timer);
unsigned long base, idle, jump;
unsigned long flags;
u32 pc;
spin_lock_init(&card->cmd_lock);
spin_lock_init(&card->tst_lock);
- setup_timer(&card->tst_timer, tst_timer, (unsigned long)card);
+ timer_setup(&card->tst_timer, tst_timer, 0);
/* Do the I/O remapping... */
card->membase = ioremap(membase, 1024);
static IADEV *ia_dev[8];
static struct atm_dev *_ia_dev[8];
static int iadev_count;
-static void ia_led_timer(unsigned long arg);
+static void ia_led_timer(struct timer_list *unused);
static DEFINE_TIMER(ia_timer, ia_led_timer);
static int IA_TX_BUF = DFL_TX_BUFFERS, IA_TX_BUF_SZ = DFL_TX_BUF_SZ;
static int IA_RX_BUF = DFL_RX_BUFFERS, IA_RX_BUF_SZ = DFL_RX_BUF_SZ;
return;
}
-static void ia_led_timer(unsigned long arg) {
+static void ia_led_timer(struct timer_list *unused) {
unsigned long flags;
static u_char blinking[8] = {0, 0, 0, 0, 0, 0, 0, 0};
u_char i;
lanai->pci);
if (unlikely(lanai->service.start == NULL))
return -ENOMEM;
- DPRINTK("allocated service buffer at 0x%08lX, size %zu(%d)\n",
- (unsigned long) lanai->service.start,
+ DPRINTK("allocated service buffer at %p, size %zu(%d)\n",
+ lanai->service.start,
lanai_buf_size(&lanai->service),
lanai_buf_size_cardorder(&lanai->service));
/* Clear ServWrite register to be safe */
}
#endif /* !DEBUG_RW */
-static void lanai_timed_poll(unsigned long arg)
+static void lanai_timed_poll(struct timer_list *t)
{
- struct lanai_dev *lanai = (struct lanai_dev *) arg;
+ struct lanai_dev *lanai = from_timer(lanai, t, timer);
#ifndef DEBUG_RW
unsigned long flags;
#ifdef USE_POWERDOWN
static inline void lanai_timed_poll_start(struct lanai_dev *lanai)
{
- init_timer(&lanai->timer);
+ timer_setup(&lanai->timer, lanai_timed_poll, 0);
lanai->timer.expires = jiffies + LANAI_POLL_PERIOD;
- lanai->timer.data = (unsigned long) lanai;
- lanai->timer.function = lanai_timed_poll;
add_timer(&lanai->timer);
}
#endif
memcpy(atmdev->esi, eeprom_mac(lanai), ESI_LEN);
lanai_timed_poll_start(lanai);
- printk(KERN_NOTICE DEV_LABEL "(itf %d): rev.%d, base=0x%lx, irq=%u "
+ printk(KERN_NOTICE DEV_LABEL "(itf %d): rev.%d, base=%p, irq=%u "
"(%pMF)\n", lanai->number, (int) lanai->pci->revision,
- (unsigned long) lanai->base, lanai->pci->irq, atmdev->esi);
+ lanai->base, lanai->pci->irq, atmdev->esi);
printk(KERN_NOTICE DEV_LABEL "(itf %d): LANAI%s, serialno=%u(0x%X), "
"board_rev=%d\n", lanai->number,
lanai->type==lanai2 ? "2" : "HB", (unsigned int) lanai->serialno,
#ifdef EXTRA_DEBUG
static void which_list(ns_dev * card, struct sk_buff *skb);
#endif
-static void ns_poll(unsigned long arg);
+static void ns_poll(struct timer_list *unused);
static void ns_phy_put(struct atm_dev *dev, unsigned char value,
unsigned long addr);
static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr);
XPRINTK("nicstar: nicstar_init() returned.\n");
if (!error) {
- init_timer(&ns_timer);
+ timer_setup(&ns_timer, ns_poll, 0);
ns_timer.expires = jiffies + NS_POLL_PERIOD;
- ns_timer.data = 0UL;
- ns_timer.function = ns_poll;
add_timer(&ns_timer);
}
}
#endif /* EXTRA_DEBUG */
-static void ns_poll(unsigned long arg)
+static void ns_poll(struct timer_list *unused)
{
int i;
ns_dev *card;
default:
return -EINVAL;
}
- dev->ops->phy_put(dev, control, reg);
+ dev->ops->phy_put(dev, control, reg);
PRIV(dev)->loop_mode = mode;
return 0;
}
config IMG_ASCII_LCD
tristate "Imagination Technologies ASCII LCD Display"
+ depends on HAS_IOMEM
default y if MIPS_MALTA || MIPS_SEAD3
select SYSCON
help
* Use timer struct to check if the given source is initialized
* by wakeup_source_add.
*/
- return ws->timer.function != (TIMER_FUNC_TYPE)pm_wakeup_timer_fn;
+ return ws->timer.function != pm_wakeup_timer_fn;
}
/*
/*
Initialize the Monitoring Timer.
*/
- init_timer(&Controller->MonitoringTimer);
+ timer_setup(&Controller->MonitoringTimer,
+ DAC960_MonitoringTimerFunction, 0);
Controller->MonitoringTimer.expires =
jiffies + DAC960_MonitoringTimerInterval;
- Controller->MonitoringTimer.data = (unsigned long) Controller;
- Controller->MonitoringTimer.function = DAC960_MonitoringTimerFunction;
add_timer(&Controller->MonitoringTimer);
Controller->ControllerInitialized = true;
return true;
the status of DAC960 Controllers.
*/
-static void DAC960_MonitoringTimerFunction(unsigned long TimerData)
+static void DAC960_MonitoringTimerFunction(struct timer_list *t)
{
- DAC960_Controller_T *Controller = (DAC960_Controller_T *) TimerData;
+ DAC960_Controller_T *Controller = from_timer(Controller, t, MonitoringTimer);
DAC960_Command_T *Command;
unsigned long flags;
static irqreturn_t DAC960_P_InterruptHandler(int, void *);
static void DAC960_V1_QueueMonitoringCommand(DAC960_Command_T *);
static void DAC960_V2_QueueMonitoringCommand(DAC960_Command_T *);
-static void DAC960_MonitoringTimerFunction(unsigned long);
+static void DAC960_MonitoringTimerFunction(struct timer_list *);
static void DAC960_Message(DAC960_MessageLevel_T, unsigned char *,
DAC960_Controller_T *, ...);
static void DAC960_CreateProcEntries(DAC960_Controller_T *);
d->rttavg = RTTAVG_INIT;
d->rttdev = RTTDEV_INIT;
- d->timer.function = (TIMER_FUNC_TYPE)rexmit_timer;
+ d->timer.function = rexmit_timer;
skb = skb_clone(skb, GFP_ATOMIC);
if (skb) {
static void fd_select_side( int side );
static void fd_select_drive( int drive );
static void fd_deselect( void );
-static void fd_motor_off_timer( unsigned long dummy );
-static void check_change( unsigned long dummy );
+static void fd_motor_off_timer(struct timer_list *unused);
+static void check_change(struct timer_list *unused);
static irqreturn_t floppy_irq (int irq, void *dummy);
static void fd_error( void );
static int do_format(int drive, int type, struct atari_format_descr *desc);
static void fd_seek( void );
static void fd_seek_done( int status );
static void fd_rwsec( void );
-static void fd_readtrack_check( unsigned long dummy );
+static void fd_readtrack_check(struct timer_list *unused);
static void fd_rwsec_done( int status );
static void fd_rwsec_done1(int status);
static void fd_writetrack( void );
static void fd_writetrack_done( int status );
-static void fd_times_out( unsigned long dummy );
+static void fd_times_out(struct timer_list *unused);
static void finish_fdc( void );
static void finish_fdc_done( int dummy );
static void setup_req_params( int drive );
* counts the index signals, which arrive only if one drive is selected.
*/
-static void fd_motor_off_timer( unsigned long dummy )
+static void fd_motor_off_timer(struct timer_list *unused)
{
unsigned char status;
* as possible) and keep track of the current state of the write protection.
*/
-static void check_change( unsigned long dummy )
+static void check_change(struct timer_list *unused)
{
static int drive = 0;
}
-static void fd_readtrack_check( unsigned long dummy )
+static void fd_readtrack_check(struct timer_list *unused)
{
unsigned long flags, addr, addr2;
fd_error();
}
-static void fd_times_out( unsigned long dummy )
+static void fd_times_out(struct timer_list *unused)
{
atari_disable_irq( IRQ_MFP_FDC );
if (!FloppyIRQHandler) goto end; /* int occurred after timer was fired, but
{
struct nullb_device *dev = to_nullb_device(item);
- badblocks_exit(&dev->badblocks);
null_free_device_storage(dev, false);
null_free_dev(dev);
}
static void null_free_dev(struct nullb_device *dev)
{
+ if (!dev)
+ return;
+
+ badblocks_exit(&dev->badblocks);
kfree(dev);
}
return 0;
}
-static void creg_cmd_timed_out(unsigned long data)
+static void creg_cmd_timed_out(struct timer_list *t)
{
- struct rsxx_cardinfo *card = (struct rsxx_cardinfo *) data;
+ struct rsxx_cardinfo *card = from_timer(card, t, creg_ctrl.cmd_timer);
struct creg_cmd *cmd;
spin_lock(&card->creg_ctrl.lock);
mutex_init(&card->creg_ctrl.reset_lock);
INIT_LIST_HEAD(&card->creg_ctrl.queue);
spin_lock_init(&card->creg_ctrl.lock);
- setup_timer(&card->creg_ctrl.cmd_timer, creg_cmd_timed_out,
- (unsigned long) card);
+ timer_setup(&card->creg_ctrl.cmd_timer, creg_cmd_timed_out, 0);
return 0;
}
rsxx_complete_dma(ctrl, dma, status);
}
-static void dma_engine_stalled(unsigned long data)
+static void dma_engine_stalled(struct timer_list *t)
{
- struct rsxx_dma_ctrl *ctrl = (struct rsxx_dma_ctrl *)data;
+ struct rsxx_dma_ctrl *ctrl = from_timer(ctrl, t, activity_timer);
int cnt;
if (atomic_read(&ctrl->stats.hw_q_depth) == 0 ||
mutex_init(&ctrl->work_lock);
INIT_LIST_HEAD(&ctrl->queue);
- setup_timer(&ctrl->activity_timer, dma_engine_stalled,
- (unsigned long)ctrl);
+ timer_setup(&ctrl->activity_timer, dma_engine_stalled, 0);
ctrl->issue_wq = alloc_ordered_workqueue(DRIVER_NAME"_issue", 0);
if (!ctrl->issue_wq)
blk_mq_start_hw_queues(skdev->queue);
}
-static void skd_timer_tick(ulong arg)
+static void skd_timer_tick(struct timer_list *t)
{
- struct skd_device *skdev = (struct skd_device *)arg;
+ struct skd_device *skdev = from_timer(skdev, t, timer);
unsigned long reqflags;
u32 state;
{
int rc;
- setup_timer(&skdev->timer, skd_timer_tick, (ulong)skdev);
+ timer_setup(&skdev->timer, skd_timer_tick, 0);
rc = mod_timer(&skdev->timer, (jiffies + HZ));
if (rc)
static void vdc_ldc_reset(struct vdc_port *port);
static void vdc_ldc_reset_work(struct work_struct *work);
-static void vdc_ldc_reset_timer(unsigned long _arg);
+static void vdc_ldc_reset_timer(struct timer_list *t);
static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio)
{
*/
ldc_timeout = mdesc_get_property(hp, vdev->mp, "vdc-timeout", NULL);
port->ldc_timeout = ldc_timeout ? *ldc_timeout : 0;
- setup_timer(&port->ldc_reset_timer, vdc_ldc_reset_timer,
- (unsigned long)port);
+ timer_setup(&port->ldc_reset_timer, vdc_ldc_reset_timer, 0);
INIT_WORK(&port->ldc_reset_work, vdc_ldc_reset_work);
err = vio_driver_init(&port->vio, vdev, VDEV_DISK,
__blk_end_request_all(req, BLK_STS_IOERR);
}
-static void vdc_ldc_reset_timer(unsigned long _arg)
+static void vdc_ldc_reset_timer(struct timer_list *t)
{
- struct vdc_port *port = (struct vdc_port *) _arg;
+ struct vdc_port *port = from_timer(port, t, ldc_reset_timer);
struct vio_driver_state *vio = &port->vio;
unsigned long flags;
if (fs->timeout_pending)
del_timer(&fs->timeout);
fs->timeout.expires = jiffies + nticks;
- fs->timeout.function = (TIMER_FUNC_TYPE)proc;
+ fs->timeout.function = proc;
add_timer(&fs->timeout);
fs->timeout_pending = 1;
}
set_fault_to_battery_status(card);
}
-static void check_all_batteries(unsigned long ptr)
+static void check_all_batteries(struct timer_list *unused)
{
int i;
static void init_battery_timer(void)
{
- init_timer(&battery_timer);
- battery_timer.function = check_all_batteries;
+ timer_setup(&battery_timer, check_all_batteries, 0);
battery_timer.expires = jiffies + (HZ * 60);
add_timer(&battery_timer);
}
spin_unlock_irqrestore(&ace->lock, flags);
}
-static void ace_stall_timer(unsigned long data)
+static void ace_stall_timer(struct timer_list *t)
{
- struct ace_device *ace = (void *)data;
+ struct ace_device *ace = from_timer(ace, t, stall_timer);
unsigned long flags;
dev_warn(ace->dev,
* Initialize the state machine tasklet and stall timer
*/
tasklet_init(&ace->fsm_tasklet, ace_fsm_tasklet, (unsigned long)ace);
- setup_timer(&ace->stall_timer, ace_stall_timer, (unsigned long)ace);
+ timer_setup(&ace->stall_timer, ace_stall_timer, 0);
/*
* Initialize the request queue
#endif /* TRACING */
static DEFINE_MUTEX(dtlk_mutex);
-static void dtlk_timer_tick(unsigned long data);
+static void dtlk_timer_tick(struct timer_list *unused);
static int dtlk_major;
static int dtlk_port_lpc;
return mask;
}
-static void dtlk_timer_tick(unsigned long data)
+static void dtlk_timer_tick(struct timer_list *unused)
{
TRACE_TEXT(" dtlk_timer_tick");
wake_up_interruptible(&dtlk_process_list);
/* Last time scheduled */
static unsigned long long hangcheck_tsc, hangcheck_tsc_margin;
-static void hangcheck_fire(unsigned long);
+static void hangcheck_fire(struct timer_list *);
static DEFINE_TIMER(hangcheck_ticktock, hangcheck_fire);
-static void hangcheck_fire(unsigned long data)
+static void hangcheck_fire(struct timer_list *unused)
{
unsigned long long cur_tsc, tsc_diff;
.unlocked_ioctl = bt_bmc_ioctl,
};
-static void poll_timer(unsigned long data)
+static void poll_timer(struct timer_list *t)
{
- struct bt_bmc *bt_bmc = (void *)data;
+ struct bt_bmc *bt_bmc = from_timer(bt_bmc, t, poll_timer);
bt_bmc->poll_timer.expires += msecs_to_jiffies(500);
wake_up(&bt_bmc->queue);
dev_info(dev, "Using IRQ %d\n", bt_bmc->irq);
} else {
dev_info(dev, "No IRQ; using timer\n");
- setup_timer(&bt_bmc->poll_timer, poll_timer,
- (unsigned long)bt_bmc);
+ timer_setup(&bt_bmc->poll_timer, poll_timer, 0);
bt_bmc->poll_timer.expires = jiffies + msecs_to_jiffies(10);
add_timer(&bt_bmc->poll_timer);
}
static atomic_t stop_operation;
-static void ipmi_timeout(unsigned long data)
+static void ipmi_timeout(struct timer_list *unused)
{
ipmi_smi_t intf;
int nt = 0;
#endif /* CONFIG_IPMI_PROC_INTERFACE */
- setup_timer(&ipmi_timer, ipmi_timeout, 0);
+ timer_setup(&ipmi_timer, ipmi_timeout, 0);
mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
spin_unlock_irqrestore(&smi_info->si_lock, flags);
}
-static void smi_timeout(unsigned long data)
+static void smi_timeout(struct timer_list *t)
{
- struct smi_info *smi_info = (struct smi_info *) data;
+ struct smi_info *smi_info = from_timer(smi_info, t, si_timer);
enum si_sm_result smi_result;
unsigned long flags;
unsigned long jiffies_now;
new_smi->intf = intf;
/* Set up the timer that drives the interface. */
- setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
+ timer_setup(&new_smi->si_timer, smi_timeout, 0);
smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
/* Try to claim any interrupts. */
}
}
-static void retry_timeout(unsigned long data)
+static void retry_timeout(struct timer_list *t)
{
- struct ssif_info *ssif_info = (void *) data;
+ struct ssif_info *ssif_info = from_timer(ssif_info, t, retry_timer);
unsigned long oflags, *flags;
bool waiting;
spin_lock_init(&ssif_info->lock);
ssif_info->ssif_state = SSIF_NORMAL;
- setup_timer(&ssif_info->retry_timer, retry_timeout,
- (unsigned long)ssif_info);
+ timer_setup(&ssif_info->retry_timer, retry_timeout, 0);
for (i = 0; i < SSIF_NUM_STATS; i++)
atomic_set(&ssif_info->stats[i], 0);
size_t size = vma->vm_end - vma->vm_start;
phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
+ /* Does it even fit in phys_addr_t? */
+ if (offset >> PAGE_SHIFT != vma->vm_pgoff)
+ return -EINVAL;
+
/* It's illegal to wrap around the end of the physical address space. */
if (offset + (phys_addr_t)size - 1 < offset)
return -EINVAL;
#define __NWBUTTON_C /* Tell the header file who we are */
#include "nwbutton.h"
-static void button_sequence_finished (unsigned long parameters);
+static void button_sequence_finished(struct timer_list *unused);
static int button_press_count; /* The count of button presses */
/* Times for the end of a sequence */
* any matching registered function callbacks, initiate reboot, etc.).
*/
-static void button_sequence_finished (unsigned long parameters)
+static void button_sequence_finished(struct timer_list *unused)
{
if (IS_ENABLED(CONFIG_NWBUTTON_REBOOT) &&
button_press_count == reboot_count)
/* Function prototypes: */
-static void button_sequence_finished (unsigned long parameters);
+static void button_sequence_finished(struct timer_list *unused);
static irqreturn_t button_handler (int irq, void *dev_id);
int button_init (void);
int button_add_callback (void (*callback) (void), int count);
static DECLARE_WAIT_QUEUE_HEAD(rtc_wait);
#ifdef RTC_IRQ
-static void rtc_dropped_irq(unsigned long data);
+static void rtc_dropped_irq(struct timer_list *unused);
static DEFINE_TIMER(rtc_irq_timer, rtc_dropped_irq);
#endif
* for something that requires a steady > 1KHz signal anyways.)
*/
-static void rtc_dropped_irq(unsigned long data)
+static void rtc_dropped_irq(struct timer_list *unused)
{
unsigned long freq;
#include "tpm.h"
#include "tpm-dev.h"
-static void user_reader_timeout(unsigned long ptr)
+static void user_reader_timeout(struct timer_list *t)
{
- struct file_priv *priv = (struct file_priv *)ptr;
+ struct file_priv *priv = from_timer(priv, t, user_read_timer);
pr_warn("TPM user space timeout is deprecated (pid=%d)\n",
task_tgid_nr(current));
priv->chip = chip;
atomic_set(&priv->data_pending, 0);
mutex_init(&priv->buffer_mutex);
- setup_timer(&priv->user_read_timer, user_reader_timeout,
- (unsigned long)priv);
+ timer_setup(&priv->user_read_timer, user_reader_timeout, 0);
INIT_WORK(&priv->work, timeout_work);
file->private_data = priv;
return ret;
}
-void timer_of_exit(struct timer_of *to)
+/**
+ * timer_of_cleanup - release timer_of ressources
+ * @to: timer_of structure
+ *
+ * Release the ressources that has been used in timer_of_init().
+ * This function should be called in init error cases
+ */
+void __init timer_of_cleanup(struct timer_of *to)
{
if (to->flags & TIMER_OF_IRQ)
timer_irq_exit(&to->of_irq);
extern int __init timer_of_init(struct device_node *np,
struct timer_of *to);
-extern void timer_of_exit(struct timer_of *to);
+extern void __init timer_of_cleanup(struct timer_of *to);
#endif
config LOONGSON2_CPUFREQ
tristate "Loongson2 CPUFreq Driver"
+ depends on LEMOTE_MACH2F
help
This option adds a CPUFreq driver for loongson processors which
support software configurable cpu frequency.
config LOONGSON1_CPUFREQ
tristate "Loongson1 CPUFreq Driver"
+ depends on LOONGSON1_LS1B
help
This option adds a CPUFreq driver for loongson1 processors which
support software configurable cpu frequency.
return 0;
}
device_initcall(mtk_cpufreq_driver_init);
+
+MODULE_DESCRIPTION("MediaTek CPUFreq driver");
+MODULE_AUTHOR("Pi-Cheng Chen <pi-cheng.chen@linaro.org>");
+MODULE_LICENSE("GPL v2");
return dev_dax_huge_fault(vmf, PE_SIZE_PTE);
}
+static int dev_dax_split(struct vm_area_struct *vma, unsigned long addr)
+{
+ struct file *filp = vma->vm_file;
+ struct dev_dax *dev_dax = filp->private_data;
+ struct dax_region *dax_region = dev_dax->region;
+
+ if (!IS_ALIGNED(addr, dax_region->align))
+ return -EINVAL;
+ return 0;
+}
+
static const struct vm_operations_struct dax_vm_ops = {
.fault = dev_dax_fault,
.huge_fault = dev_dax_huge_fault,
+ .split = dev_dax_split,
};
static int dax_mmap(struct file *filp, struct vm_area_struct *vma)
return err;
}
-static void dummy_callback(unsigned long ignored) {}
+static void dummy_callback(struct timer_list *unused) {}
static int suspend_cpu(int index, bool broadcast)
{
pr_info("CPU %d entering suspend cycles, states 1 through %d\n",
cpu, drv->state_count - 1);
- setup_timer_on_stack(&wakeup_timer, dummy_callback, 0);
+ timer_setup_on_stack(&wakeup_timer, dummy_callback, 0);
for (i = 0; i < NUM_SUSPEND_CYCLE; ++i) {
int index;
/*
struct amdgpu_queue_mgr *mgr);
int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
struct amdgpu_queue_mgr *mgr,
- int hw_ip, int instance, int ring,
+ u32 hw_ip, u32 instance, u32 ring,
struct amdgpu_ring **out_ring);
/*
/* sdma */
struct amdgpu_sdma sdma;
- union {
- struct {
- /* uvd */
- struct amdgpu_uvd uvd;
+ /* uvd */
+ struct amdgpu_uvd uvd;
- /* vce */
- struct amdgpu_vce vce;
- };
+ /* vce */
+ struct amdgpu_vce vce;
- /* vcn */
- struct amdgpu_vcn vcn;
- };
+ /* vcn */
+ struct amdgpu_vcn vcn;
/* firmwares */
struct amdgpu_firmware firmware;
{
struct amdgpu_device *adev = get_amdgpu_device(kgd);
struct cik_sdma_rlc_registers *m;
+ unsigned long end_jiffies;
uint32_t sdma_base_addr;
+ uint32_t data;
m = get_sdma_mqd(mqd);
sdma_base_addr = get_sdma_base_addr(m);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
- m->sdma_rlc_virtual_addr);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
+ m->sdma_rlc_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE,
- m->sdma_rlc_rb_base);
+ end_jiffies = msecs_to_jiffies(2000) + jiffies;
+ while (true) {
+ data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
+ if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
+ break;
+ if (time_after(jiffies, end_jiffies))
+ return -ETIME;
+ usleep_range(500, 1000);
+ }
+ if (m->sdma_engine_id) {
+ data = RREG32(mmSDMA1_GFX_CONTEXT_CNTL);
+ data = REG_SET_FIELD(data, SDMA1_GFX_CONTEXT_CNTL,
+ RESUME_CTX, 0);
+ WREG32(mmSDMA1_GFX_CONTEXT_CNTL, data);
+ } else {
+ data = RREG32(mmSDMA0_GFX_CONTEXT_CNTL);
+ data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL,
+ RESUME_CTX, 0);
+ WREG32(mmSDMA0_GFX_CONTEXT_CNTL, data);
+ }
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL,
+ m->sdma_rlc_doorbell);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
+ m->sdma_rlc_virtual_addr);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdma_rlc_rb_base);
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
m->sdma_rlc_rb_base_hi);
-
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
m->sdma_rlc_rb_rptr_addr_lo);
-
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
m->sdma_rlc_rb_rptr_addr_hi);
-
- WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL,
- m->sdma_rlc_doorbell);
-
WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
m->sdma_rlc_rb_cntl);
}
WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0);
- WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, 0);
+ WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
+ RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) |
+ SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
return 0;
}
return false;
}
- tmp = bios[0x18] | (bios[0x19] << 8);
- if (bios[tmp + 0x14] != 0x0) {
- DRM_INFO("Not an x86 BIOS ROM\n");
- return false;
- }
-
bios_header_start = bios[0x48] | (bios[0x49] << 8);
if (!bios_header_start) {
DRM_INFO("Can't locate bios header\n");
if (candidate->robj == validated)
break;
+ /* We can't move pinned BOs here */
+ if (bo->pin_count)
+ continue;
+
other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
/* Check if this BO is in one of the domains we need space for */
memset(wait, 0, sizeof(*wait));
wait->out.status = (r > 0);
wait->out.first_signaled = first;
- /* set return value 0 to indicate success */
- r = array[first]->error;
+
+ if (first < fence_count && array[first])
+ r = array[first]->error;
+ else
+ r = 0;
err_free_fence_array:
for (i = 0; i < fence_count; i++)
pm_pg_lock = (*pos >> 23) & 1;
if (*pos & (1ULL << 62)) {
- se_bank = (*pos >> 24) & 0x3FF;
- sh_bank = (*pos >> 34) & 0x3FF;
- instance_bank = (*pos >> 44) & 0x3FF;
+ se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
+ sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
+ instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
if (se_bank == 0x3FF)
se_bank = 0xFFFFFFFF;
pm_pg_lock = (*pos >> 23) & 1;
if (*pos & (1ULL << 62)) {
- se_bank = (*pos >> 24) & 0x3FF;
- sh_bank = (*pos >> 34) & 0x3FF;
- instance_bank = (*pos >> 44) & 0x3FF;
+ se_bank = (*pos & GENMASK_ULL(33, 24)) >> 24;
+ sh_bank = (*pos & GENMASK_ULL(43, 34)) >> 34;
+ instance_bank = (*pos & GENMASK_ULL(53, 44)) >> 44;
if (se_bank == 0x3FF)
se_bank = 0xFFFFFFFF;
return -EINVAL;
/* decode offset */
- offset = (*pos & 0x7F);
- se = ((*pos >> 7) & 0xFF);
- sh = ((*pos >> 15) & 0xFF);
- cu = ((*pos >> 23) & 0xFF);
- wave = ((*pos >> 31) & 0xFF);
- simd = ((*pos >> 37) & 0xFF);
+ offset = (*pos & GENMASK_ULL(6, 0));
+ se = (*pos & GENMASK_ULL(14, 7)) >> 7;
+ sh = (*pos & GENMASK_ULL(22, 15)) >> 15;
+ cu = (*pos & GENMASK_ULL(30, 23)) >> 23;
+ wave = (*pos & GENMASK_ULL(36, 31)) >> 31;
+ simd = (*pos & GENMASK_ULL(44, 37)) >> 37;
/* switch to the specific se/sh/cu */
mutex_lock(&adev->grbm_idx_mutex);
return -EINVAL;
/* decode offset */
- offset = (*pos & 0xFFF); /* in dwords */
- se = ((*pos >> 12) & 0xFF);
- sh = ((*pos >> 20) & 0xFF);
- cu = ((*pos >> 28) & 0xFF);
- wave = ((*pos >> 36) & 0xFF);
- simd = ((*pos >> 44) & 0xFF);
- thread = ((*pos >> 52) & 0xFF);
- bank = ((*pos >> 60) & 1);
+ offset = *pos & GENMASK_ULL(11, 0);
+ se = (*pos & GENMASK_ULL(19, 12)) >> 12;
+ sh = (*pos & GENMASK_ULL(27, 20)) >> 20;
+ cu = (*pos & GENMASK_ULL(35, 28)) >> 28;
+ wave = (*pos & GENMASK_ULL(43, 36)) >> 36;
+ simd = (*pos & GENMASK_ULL(51, 44)) >> 44;
+ thread = (*pos & GENMASK_ULL(59, 52)) >> 52;
+ bank = (*pos & GENMASK_ULL(61, 60)) >> 60;
data = kmalloc_array(1024, sizeof(*data), GFP_KERNEL);
if (!data)
{0x1002, 0x686c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
{0x1002, 0x687f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
/* Raven */
- {0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU|AMD_EXP_HW_SUPPORT},
+ {0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU},
{0, 0, 0}
};
*
* Checks for fence activity.
*/
-static void amdgpu_fence_fallback(unsigned long arg)
+static void amdgpu_fence_fallback(struct timer_list *t)
{
- struct amdgpu_ring *ring = (void *)arg;
+ struct amdgpu_ring *ring = from_timer(ring, t,
+ fence_drv.fallback_timer);
amdgpu_fence_process(ring);
}
atomic_set(&ring->fence_drv.last_seq, 0);
ring->fence_drv.initialized = false;
- setup_timer(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback,
- (unsigned long)ring);
+ timer_setup(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback, 0);
ring->fence_drv.num_fences_mask = num_hw_submission * 2 - 1;
spin_lock_init(&ring->fence_drv.lock);
flags, NULL, resv, 0, &bo);
if (r) {
if (r != -ERESTARTSYS) {
+ if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) {
+ flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+ goto retry;
+ }
+
if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) {
initial_domain |= AMDGPU_GEM_DOMAIN_GTT;
goto retry;
r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm,
bo->tbo.ttm->pages);
if (r)
- goto unlock_mmap_sem;
+ goto release_object;
r = amdgpu_bo_reserve(bo, true);
if (r)
free_pages:
release_pages(bo->tbo.ttm->pages, bo->tbo.ttm->num_pages);
-unlock_mmap_sem:
- up_read(¤t->mm->mmap_sem);
-
release_object:
drm_gem_object_put_unlocked(gobj);
if (args->va_address < AMDGPU_VA_RESERVED_SIZE) {
dev_err(&dev->pdev->dev,
- "va_address 0x%lX is in reserved area 0x%X\n",
- (unsigned long)args->va_address,
- AMDGPU_VA_RESERVED_SIZE);
+ "va_address 0x%LX is in reserved area 0x%LX\n",
+ args->va_address, AMDGPU_VA_RESERVED_SIZE);
return -EINVAL;
}
{
struct amdgpu_gtt_mgr *mgr = man->priv;
- spin_lock(&mgr->lock);
- if (!drm_mm_clean(&mgr->mm)) {
- spin_unlock(&mgr->lock);
- return -EBUSY;
- }
-
drm_mm_takedown(&mgr->mm);
spin_unlock(&mgr->lock);
kfree(mgr);
struct amdgpu_device *adev = dev_get_drvdata(dev);
umode_t effective_mode = attr->mode;
+ /* no skipping for powerplay */
+ if (adev->powerplay.cgs_device)
+ return effective_mode;
+
/* Skip limit attributes if DPM is not enabled */
if (!adev->pm.dpm_enabled &&
(attr == &sensor_dev_attr_temp1_crit.dev_attr.attr ||
int flags)
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
+ struct dma_buf *buf;
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) ||
bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID)
return ERR_PTR(-EPERM);
- return drm_gem_prime_export(dev, gobj, flags);
+ buf = drm_gem_prime_export(dev, gobj, flags);
+ if (!IS_ERR(buf))
+ buf->file->f_mapping = dev->anon_inode->i_mapping;
+ return buf;
}
static int amdgpu_identity_map(struct amdgpu_device *adev,
struct amdgpu_queue_mapper *mapper,
- int ring,
+ u32 ring,
struct amdgpu_ring **out_ring)
{
switch (mapper->hw_ip) {
static int amdgpu_lru_map(struct amdgpu_device *adev,
struct amdgpu_queue_mapper *mapper,
- int user_ring, bool lru_pipe_order,
+ u32 user_ring, bool lru_pipe_order,
struct amdgpu_ring **out_ring)
{
int r, i, j;
*/
int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
struct amdgpu_queue_mgr *mgr,
- int hw_ip, int instance, int ring,
+ u32 hw_ip, u32 instance, u32 ring,
struct amdgpu_ring **out_ring)
{
int r, ip_num_rings;
#define AMDGPU_MMHUB 1
/* hardcode that limit for now */
-#define AMDGPU_VA_RESERVED_SIZE (8 << 20)
+#define AMDGPU_VA_RESERVED_SIZE (8ULL << 20)
+
/* max vmids dedicated for process */
#define AMDGPU_VM_MAX_RESERVED_VMID 1
struct amdgpu_vram_mgr *mgr = man->priv;
spin_lock(&mgr->lock);
- if (!drm_mm_clean(&mgr->mm)) {
- spin_unlock(&mgr->lock);
- return -EBUSY;
- }
-
drm_mm_takedown(&mgr->mm);
spin_unlock(&mgr->lock);
kfree(mgr);
{mmPA_SC_RASTER_CONFIG_1, true},
};
-static uint32_t cik_read_indexed_register(struct amdgpu_device *adev,
- u32 se_num, u32 sh_num,
- u32 reg_offset)
+
+static uint32_t cik_get_register_value(struct amdgpu_device *adev,
+ bool indexed, u32 se_num,
+ u32 sh_num, u32 reg_offset)
{
- uint32_t val;
+ if (indexed) {
+ uint32_t val;
+ unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num;
+ unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num;
+
+ switch (reg_offset) {
+ case mmCC_RB_BACKEND_DISABLE:
+ return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable;
+ case mmGC_USER_RB_BACKEND_DISABLE:
+ return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable;
+ case mmPA_SC_RASTER_CONFIG:
+ return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config;
+ case mmPA_SC_RASTER_CONFIG_1:
+ return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1;
+ }
- mutex_lock(&adev->grbm_idx_mutex);
- if (se_num != 0xffffffff || sh_num != 0xffffffff)
- amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
+ mutex_lock(&adev->grbm_idx_mutex);
+ if (se_num != 0xffffffff || sh_num != 0xffffffff)
+ amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff);
- val = RREG32(reg_offset);
+ val = RREG32(reg_offset);
- if (se_num != 0xffffffff || sh_num != 0xffffffff)
- amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
- mutex_unlock(&adev->grbm_idx_mutex);
- return val;
+ if (se_num != 0xffffffff || sh_num != 0xffffffff)
+ amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
+ mutex_unlock(&adev->grbm_idx_mutex);
+ return val;
+ } else {
+ unsigned idx;
+
+ switch (reg_offset) {
+ case mmGB_ADDR_CONFIG:
+ return adev->gfx.config.gb_addr_config;
+ case mmMC_ARB_RAMCFG:
+ return adev->gfx.config.mc_arb_ramcfg;
+ case mmGB_TILE_MODE0:
+ case mmGB_TILE_MODE1:
+ case mmGB_TILE_MODE2:
+ case mmGB_TILE_MODE3:
+ case mmGB_TILE_MODE4:
+ case mmGB_TILE_MODE5:
+ case mmGB_TILE_MODE6:
+ case mmGB_TILE_MODE7:
+ case mmGB_TILE_MODE8:
+ case mmGB_TILE_MODE9:
+ case mmGB_TILE_MODE10:
+ case mmGB_TILE_MODE11:
+ case mmGB_TILE_MODE12:
+ case mmGB_TILE_MODE13:
+ case mmGB_TILE_MODE14:
+ case mmGB_TILE_MODE15:
+ case mmGB_TILE_MODE16:
+ case mmGB_TILE_MODE17:
+ case mmGB_TILE_MODE18:
+ case mmGB_TILE_MODE19:
+ case mmGB_TILE_MODE20:
+ case mmGB_TILE_MODE21:
+ case mmGB_TILE_MODE22:
+ case mmGB_TILE_MODE23:
+ case mmGB_TILE_MODE24:
+ case mmGB_TILE_MODE25:
+ case mmGB_TILE_MODE26:
+ case mmGB_TILE_MODE27:
+ case mmGB_TILE_MODE28:
+ case mmGB_TILE_MODE29:
+ case mmGB_TILE_MODE30:
+ case mmGB_TILE_MODE31:
+ idx = (reg_offset - mmGB_TILE_MODE0);
+ return adev->gfx.config.tile_mode_array[idx];
+ case mmGB_MACROTILE_MODE0:
+ case mmGB_MACROTILE_MODE1:
+ case mmGB_MACROTILE_MODE2:
+ case mmGB_MACROTILE_MODE3:
+ case mmGB_MACROTILE_MODE4:
+ case mmGB_MACROTILE_MODE5:
+ case mmGB_MACROTILE_MODE6:
+ case mmGB_MACROTILE_MODE7:
+ case mmGB_MACROTILE_MODE8:
+ case mmGB_MACROTILE_MODE9:
+ case mmGB_MACROTILE_MODE10:
+ case mmGB_MACROTILE_MODE11:
+ case mmGB_MACROTILE_MODE12:
+ case mmGB_MACROTILE_MODE13:
+ case mmGB_MACROTILE_MODE14:
+ case mmGB_MACROTILE_MODE15:
+ idx = (reg_offset - mmGB_MACROTILE_MODE0);
+ return adev->gfx.config.macrotile_mode_array[idx];
+ default:
+ return RREG32(reg_offset);
+ }
+ }
}
static int cik_read_register(struct amdgpu_device *adev, u32 se_num,
*value = 0;
for (i = 0; i < ARRAY_SIZE(cik_allowed_read_registers); i++) {
+ bool indexed = cik_allowed_read_registers[i].grbm_indexed;
+
if (reg_offset != cik_allowed_read_registers[i].reg_offset)
continue;
- *value = cik_allowed_read_registers[i].grbm_indexed ?
- cik_read_indexed_register(adev, se_num,
- sh_num, reg_offset) :
- RREG32(reg_offset);
+ *value = cik_get_register_value(adev, indexed, se_num, sh_num,
+ reg_offset);
return 0;
}
return -EINVAL;
adev->gfx.config.backend_enable_mask,
num_rb_pipes);
}
+
+ /* cache the values for userspace */
+ for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
+ for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
+ gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff);
+ adev->gfx.config.rb_config[i][j].rb_backend_disable =
+ RREG32(mmCC_RB_BACKEND_DISABLE);
+ adev->gfx.config.rb_config[i][j].user_rb_backend_disable =
+ RREG32(mmGC_USER_RB_BACKEND_DISABLE);
+ adev->gfx.config.rb_config[i][j].raster_config =
+ RREG32(mmPA_SC_RASTER_CONFIG);
+ adev->gfx.config.rb_config[i][j].raster_config_1 =
+ RREG32(mmPA_SC_RASTER_CONFIG_1);
+ }
+ }
+ gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff);
mutex_unlock(&adev->grbm_idx_mutex);
}
gfx_v7_0_cp_compute_fini(adev);
gfx_v7_0_rlc_fini(adev);
gfx_v7_0_mec_fini(adev);
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
+ &adev->gfx.rlc.clear_state_gpu_addr,
+ (void **)&adev->gfx.rlc.cs_ptr);
+ if (adev->gfx.rlc.cp_table_size) {
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
+ &adev->gfx.rlc.cp_table_gpu_addr,
+ (void **)&adev->gfx.rlc.cp_table_ptr);
+ }
gfx_v7_0_free_microcode(adev);
return 0;
gfx_v8_0_mec_fini(adev);
gfx_v8_0_rlc_fini(adev);
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
+ &adev->gfx.rlc.clear_state_gpu_addr,
+ (void **)&adev->gfx.rlc.cs_ptr);
+ if ((adev->asic_type == CHIP_CARRIZO) ||
+ (adev->asic_type == CHIP_STONEY)) {
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
+ &adev->gfx.rlc.cp_table_gpu_addr,
+ (void **)&adev->gfx.rlc.cp_table_ptr);
+ }
gfx_v8_0_free_microcode(adev);
return 0;
SOC15_REG_OFFSET(GC, 0, mmTD_CNTL), 0x01bd9f33, 0x00000800
};
+static const u32 golden_settings_gc_9_x_common[] =
+{
+ SOC15_REG_OFFSET(GC, 0, mmGRBM_CAM_INDEX), 0xffffffff, 0x00000000,
+ SOC15_REG_OFFSET(GC, 0, mmGRBM_CAM_DATA), 0xffffffff, 0x2544c382
+};
+
#define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
#define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
default:
break;
}
+
+ amdgpu_program_register_sequence(adev, golden_settings_gc_9_x_common,
+ (const u32)ARRAY_SIZE(golden_settings_gc_9_x_common));
}
static void gfx_v9_0_scratch_init(struct amdgpu_device *adev)
start + SQIND_WAVE_SGPRS_OFFSET, size, dst);
}
+static void gfx_v9_0_read_wave_vgprs(struct amdgpu_device *adev, uint32_t simd,
+ uint32_t wave, uint32_t thread,
+ uint32_t start, uint32_t size,
+ uint32_t *dst)
+{
+ wave_read_regs(
+ adev, simd, wave, thread,
+ start + SQIND_WAVE_VGPRS_OFFSET, size, dst);
+}
static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
.get_gpu_clock_counter = &gfx_v9_0_get_gpu_clock_counter,
.select_se_sh = &gfx_v9_0_select_se_sh,
.read_wave_data = &gfx_v9_0_read_wave_data,
.read_wave_sgprs = &gfx_v9_0_read_wave_sgprs,
+ .read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
};
static void gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
gfx_v9_0_mec_fini(adev);
gfx_v9_0_ngg_fini(adev);
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
+ &adev->gfx.rlc.clear_state_gpu_addr,
+ (void **)&adev->gfx.rlc.cs_ptr);
+ if (adev->asic_type == CHIP_RAVEN) {
+ amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
+ &adev->gfx.rlc.cp_table_gpu_addr,
+ (void **)&adev->gfx.rlc.cp_table_ptr);
+ }
gfx_v9_0_free_microcode(adev);
return 0;
static int gmc_v9_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 3, 3 };
+ /*
+ * The latest engine allocation on gfx9 is:
+ * Engine 0, 1: idle
+ * Engine 2, 3: firmware
+ * Engine 4~13: amdgpu ring, subject to change when ring number changes
+ * Engine 14~15: idle
+ * Engine 16: kfd tlb invalidation
+ * Engine 17: Gart flushes
+ */
+ unsigned vm_inv_eng[AMDGPU_MAX_VMHUBS] = { 4, 4 };
unsigned i;
for(i = 0; i < adev->num_rings; ++i) {
ring->funcs->vmhub);
}
- /* Engine 17 is used for GART flushes */
+ /* Engine 16 is used for KFD and 17 for GART flushes */
for(i = 0; i < AMDGPU_MAX_VMHUBS; ++i)
- BUG_ON(vm_inv_eng[i] > 17);
+ BUG_ON(vm_inv_eng[i] > 16);
return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
}
static void vcn_v1_0_set_irq_funcs(struct amdgpu_device *adev)
{
- adev->uvd.irq.num_types = adev->vcn.num_enc_rings + 1;
+ adev->vcn.irq.num_types = adev->vcn.num_enc_rings + 1;
adev->vcn.irq.funcs = &vcn_v1_0_irq_funcs;
}
#include <linux/sched.h>
#include <linux/moduleparam.h>
#include <linux/device.h>
+#include <linux/printk.h>
#include "kfd_priv.h"
#define KFD_DRIVER_AUTHOR "AMD Inc. and others"
kfd_process_destroy_wq();
kfd_topology_shutdown();
kfd_chardev_exit();
- dev_info(kfd_device, "Removed module\n");
+ pr_info("amdkfd: Removed module\n");
}
module_init(kfd_module_init);
struct cik_sdma_rlc_registers *m;
m = get_sdma_mqd(mqd);
- m->sdma_rlc_rb_cntl = ffs(q->queue_size / sizeof(unsigned int)) <<
- SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
+ m->sdma_rlc_rb_cntl = (ffs(q->queue_size / sizeof(unsigned int)) - 1)
+ << SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT |
1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT;
switch (type) {
case KFD_QUEUE_TYPE_SDMA:
+ if (dev->dqm->queue_count >=
+ CIK_SDMA_QUEUES_PER_ENGINE * CIK_SDMA_ENGINE_NUM) {
+ pr_err("Over-subscription is not allowed for SDMA.\n");
+ retval = -EPERM;
+ goto err_create_queue;
+ }
+
+ retval = create_cp_queue(pqm, dev, &q, properties, f, *qid);
+ if (retval != 0)
+ goto err_create_queue;
+ pqn->q = q;
+ pqn->kq = NULL;
+ retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd,
+ &q->properties.vmid);
+ pr_debug("DQM returned %d for create_queue\n", retval);
+ print_queue(q);
+ break;
+
case KFD_QUEUE_TYPE_COMPUTE:
/* check if there is over subscription */
if ((sched_policy == KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
aconnector = to_amdgpu_dm_connector(connector);
- if (aconnector->dc_link->type == dc_connection_mst_branch) {
+ if (aconnector->dc_link->type == dc_connection_mst_branch &&
+ aconnector->mst_mgr.aux) {
DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n",
aconnector, aconnector->base.base.id);
mutex_lock(&aconnector->hpd_lock);
dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
+
+ if (aconnector->fake_enable && aconnector->dc_link->local_sink)
+ aconnector->fake_enable = false;
+
aconnector->dc_sink = NULL;
amdgpu_dm_update_connector_after_detect(aconnector);
mutex_unlock(&aconnector->hpd_lock);
ret = drm_atomic_helper_resume(ddev, adev->dm.cached_state);
- drm_atomic_state_put(adev->dm.cached_state);
adev->dm.cached_state = NULL;
amdgpu_dm_irq_resume_late(adev);
.link = aconnector->dc_link,
.sink_signal = SIGNAL_TYPE_VIRTUAL
};
- struct edid *edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
+ struct edid *edid;
if (!aconnector->base.edid_blob_ptr ||
!aconnector->base.edid_blob_ptr->data) {
return;
}
+ edid = (struct edid *) aconnector->base.edid_blob_ptr->data;
+
aconnector->edid = edid;
aconnector->dc_em_sink = dc_link_add_remote_sink(
update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode,
dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream);
+ if (!dm_new_crtc_state->stream)
+ continue;
+
status = dc_stream_get_status(dm_new_crtc_state->stream);
WARN_ON(!status);
WARN_ON(!status->plane_count);
- if (!dm_new_crtc_state->stream)
- continue;
-
/*TODO How it works with MPO ?*/
if (!dc_commit_planes_to_stream(
dm->dc,
drm_atomic_helper_commit_hw_done(state);
if (wait_for_vblank)
- drm_atomic_helper_wait_for_vblanks(dev, state);
+ drm_atomic_helper_wait_for_flip_done(dev, state);
drm_atomic_helper_cleanup_planes(dev, state);
}
return;
disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc);
- acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
+ if (!disconnected_acrtc)
+ return;
- if (!disconnected_acrtc || !acrtc_state->stream)
+ acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state);
+ if (!acrtc_state->stream)
return;
/*
}
}
- if (dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
+ if (enable && dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) &&
dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) {
new_crtc_state->mode_changed = false;
}
} else {
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
- if (!drm_atomic_crtc_needs_modeset(new_crtc_state))
+ if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
+ !new_crtc_state->color_mgmt_changed)
continue;
if (!new_crtc_state->enable)
if (signal == signal_type_info_tbl[i].type)
break;
+ if (i == NUM_ELEMENTS(signal_type_info_tbl))
+ goto fail;
+
dm_logger_append(&entry, "[%s][ConnIdx:%d] ",
signal_type_info_tbl[i].name,
link->link_index);
dm_logger_append(&entry, "^\n");
dm_helpers_dc_conn_log(ctx, &entry, event);
+
+fail:
dm_logger_close(&entry);
va_end(args);
struct graphics_object_id *dest_object_id)
{
uint32_t number;
- uint16_t *id;
+ uint16_t *id = NULL;
ATOM_OBJECT *object;
struct bios_parser *bp = BP_FROM_DCB(dcb);
number = get_dest_obj_list(bp, object, &id);
- if (number <= index)
+ if (number <= index || !id)
return BP_RESULT_BADINPUT;
*dest_object_id = object_id_from_bios_object_id(id[index]);
goto failed_alloc;
}
+ link->link_index = dc->link_count;
+ dc->links[dc->link_count] = link;
+ dc->link_count++;
+
link->ctx = dc->ctx;
link->dc = dc;
link->connector_signal = SIGNAL_TYPE_VIRTUAL;
link->link_id.enum_id = ENUM_ID_1;
link->link_enc = kzalloc(sizeof(*link->link_enc), GFP_KERNEL);
+ if (!link->link_enc) {
+ BREAK_TO_DEBUGGER();
+ goto failed_alloc;
+ }
+
+ link->link_status.dpcd_caps = &link->dpcd_caps;
+
enc_init.ctx = dc->ctx;
enc_init.channel = CHANNEL_ID_UNKNOWN;
enc_init.hpd_source = HPD_SOURCEID_UNKNOWN;
enc_init.encoder.id = ENCODER_ID_INTERNAL_VIRTUAL;
enc_init.encoder.enum_id = ENUM_ID_1;
virtual_link_encoder_construct(link->link_enc, &enc_init);
-
- link->link_index = dc->link_count;
- dc->links[dc->link_count] = link;
- dc->link_count++;
}
return true;
sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT;
detect_dp_sink_caps(link);
- /* DP active dongles */
- if (is_dp_active_dongle(link)) {
- link->type = dc_connection_active_dongle;
- if (!link->dpcd_caps.sink_count.bits.SINK_COUNT) {
- /*
- * active dongle unplug processing for short irq
- */
- link_disconnect_sink(link);
- return;
- }
-
- if (link->dpcd_caps.dongle_type !=
- DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
- *converter_disable_audio = true;
- }
- }
if (is_mst_supported(link)) {
sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
link->type = dc_connection_mst_branch;
sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT;
}
}
+
+ if (link->type != dc_connection_mst_branch &&
+ is_dp_active_dongle(link)) {
+ /* DP active dongles */
+ link->type = dc_connection_active_dongle;
+ if (!link->dpcd_caps.sink_count.bits.SINK_COUNT) {
+ /*
+ * active dongle unplug processing for short irq
+ */
+ link_disconnect_sink(link);
+ return;
+ }
+
+ if (link->dpcd_caps.dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER)
+ *converter_disable_audio = true;
+ }
} else {
/* DP passive dongles */
sink_caps->signal = dp_passive_dongle_detection(link->ddc,
link->link_enc->funcs->disable_output(link->link_enc, signal, link);
}
+bool dp_active_dongle_validate_timing(
+ const struct dc_crtc_timing *timing,
+ const struct dc_dongle_caps *dongle_caps)
+{
+ unsigned int required_pix_clk = timing->pix_clk_khz;
+
+ if (dongle_caps->dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER ||
+ dongle_caps->extendedCapValid == false)
+ return true;
+
+ /* Check Pixel Encoding */
+ switch (timing->pixel_encoding) {
+ case PIXEL_ENCODING_RGB:
+ case PIXEL_ENCODING_YCBCR444:
+ break;
+ case PIXEL_ENCODING_YCBCR422:
+ if (!dongle_caps->is_dp_hdmi_ycbcr422_pass_through)
+ return false;
+ break;
+ case PIXEL_ENCODING_YCBCR420:
+ if (!dongle_caps->is_dp_hdmi_ycbcr420_pass_through)
+ return false;
+ break;
+ default:
+ /* Invalid Pixel Encoding*/
+ return false;
+ }
+
+
+ /* Check Color Depth and Pixel Clock */
+ if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ required_pix_clk /= 2;
+
+ switch (timing->display_color_depth) {
+ case COLOR_DEPTH_666:
+ case COLOR_DEPTH_888:
+ /*888 and 666 should always be supported*/
+ break;
+ case COLOR_DEPTH_101010:
+ if (dongle_caps->dp_hdmi_max_bpc < 10)
+ return false;
+ required_pix_clk = required_pix_clk * 10 / 8;
+ break;
+ case COLOR_DEPTH_121212:
+ if (dongle_caps->dp_hdmi_max_bpc < 12)
+ return false;
+ required_pix_clk = required_pix_clk * 12 / 8;
+ break;
+
+ case COLOR_DEPTH_141414:
+ case COLOR_DEPTH_161616:
+ default:
+ /* These color depths are currently not supported */
+ return false;
+ }
+
+ if (required_pix_clk > dongle_caps->dp_hdmi_max_pixel_clk)
+ return false;
+
+ return true;
+}
+
enum dc_status dc_link_validate_mode_timing(
const struct dc_stream_state *stream,
struct dc_link *link,
const struct dc_crtc_timing *timing)
{
uint32_t max_pix_clk = stream->sink->dongle_max_pix_clk;
+ struct dc_dongle_caps *dongle_caps = &link->link_status.dpcd_caps->dongle_caps;
/* A hack to avoid failing any modes for EDID override feature on
* topology change such as lower quality cable for DP or different dongle
if (link->remote_sinks[0])
return DC_OK;
+ /* Passive Dongle */
if (0 != max_pix_clk && timing->pix_clk_khz > max_pix_clk)
- return DC_EXCEED_DONGLE_MAX_CLK;
+ return DC_EXCEED_DONGLE_CAP;
+
+ /* Active Dongle*/
+ if (!dp_active_dongle_validate_timing(timing, dongle_caps))
+ return DC_EXCEED_DONGLE_CAP;
switch (stream->signal) {
case SIGNAL_TYPE_EDP:
struct dc_link *link,
union hpd_irq_data *hpd_irq_dpcd_data)
{
- uint8_t irq_reg_rx_power_state;
+ uint8_t irq_reg_rx_power_state = 0;
enum dc_status dpcd_result = DC_ERROR_UNEXPECTED;
union lane_status lane_status;
uint32_t lane;
if (link->cur_link_settings.lane_count == 0)
return return_code;
- /*1. Check that we can handle interrupt: Not in FS DOS,
- * Not in "Display Timeout" state, Link is trained.
- */
- dpcd_result = core_link_read_dpcd(link,
- DP_SET_POWER,
- &irq_reg_rx_power_state,
- sizeof(irq_reg_rx_power_state));
+ /*1. Check that Link Status changed, before re-training.*/
- if (dpcd_result != DC_OK) {
- irq_reg_rx_power_state = DP_SET_POWER_D0;
- dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ,
- "%s: DPCD read failed to obtain power state.\n",
- __func__);
+ /*parse lane status*/
+ for (lane = 0; lane < link->cur_link_settings.lane_count; lane++) {
+ /* check status of lanes 0,1
+ * changed DpcdAddress_Lane01Status (0x202)
+ */
+ lane_status.raw = get_nibble_at_index(
+ &hpd_irq_dpcd_data->bytes.lane01_status.raw,
+ lane);
+
+ if (!lane_status.bits.CHANNEL_EQ_DONE_0 ||
+ !lane_status.bits.CR_DONE_0 ||
+ !lane_status.bits.SYMBOL_LOCKED_0) {
+ /* if one of the channel equalization, clock
+ * recovery or symbol lock is dropped
+ * consider it as (link has been
+ * dropped) dp sink status has changed
+ */
+ sink_status_changed = true;
+ break;
+ }
}
- if (irq_reg_rx_power_state == DP_SET_POWER_D0) {
-
- /*2. Check that Link Status changed, before re-training.*/
-
- /*parse lane status*/
- for (lane = 0;
- lane < link->cur_link_settings.lane_count;
- lane++) {
+ /* Check interlane align.*/
+ if (sink_status_changed ||
+ !hpd_irq_dpcd_data->bytes.lane_status_updated.bits.INTERLANE_ALIGN_DONE) {
- /* check status of lanes 0,1
- * changed DpcdAddress_Lane01Status (0x202)*/
- lane_status.raw = get_nibble_at_index(
- &hpd_irq_dpcd_data->bytes.lane01_status.raw,
- lane);
-
- if (!lane_status.bits.CHANNEL_EQ_DONE_0 ||
- !lane_status.bits.CR_DONE_0 ||
- !lane_status.bits.SYMBOL_LOCKED_0) {
- /* if one of the channel equalization, clock
- * recovery or symbol lock is dropped
- * consider it as (link has been
- * dropped) dp sink status has changed*/
- sink_status_changed = true;
- break;
- }
+ dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ,
+ "%s: Link Status changed.\n", __func__);
- }
+ return_code = true;
- /* Check interlane align.*/
- if (sink_status_changed ||
- !hpd_irq_dpcd_data->bytes.lane_status_updated.bits.
- INTERLANE_ALIGN_DONE) {
+ /*2. Check that we can handle interrupt: Not in FS DOS,
+ * Not in "Display Timeout" state, Link is trained.
+ */
+ dpcd_result = core_link_read_dpcd(link,
+ DP_SET_POWER,
+ &irq_reg_rx_power_state,
+ sizeof(irq_reg_rx_power_state));
+ if (dpcd_result != DC_OK) {
dm_logger_write(link->ctx->logger, LOG_HW_HPD_IRQ,
- "%s: Link Status changed.\n",
+ "%s: DPCD read failed to obtain power state.\n",
__func__);
-
- return_code = true;
+ } else {
+ if (irq_reg_rx_power_state != DP_SET_POWER_D0)
+ return_code = false;
}
}
(dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER);
}
+static int translate_dpcd_max_bpc(enum dpcd_downstream_port_max_bpc bpc)
+{
+ switch (bpc) {
+ case DOWN_STREAM_MAX_8BPC:
+ return 8;
+ case DOWN_STREAM_MAX_10BPC:
+ return 10;
+ case DOWN_STREAM_MAX_12BPC:
+ return 12;
+ case DOWN_STREAM_MAX_16BPC:
+ return 16;
+ default:
+ break;
+ }
+
+ return -1;
+}
+
static void get_active_converter_info(
uint8_t data, struct dc_link *link)
{
hdmi_caps.bits.YCrCr420_CONVERSION;
link->dpcd_caps.dongle_caps.dp_hdmi_max_bpc =
- hdmi_color_caps.bits.MAX_BITS_PER_COLOR_COMPONENT;
+ translate_dpcd_max_bpc(
+ hdmi_color_caps.bits.MAX_BITS_PER_COLOR_COMPONENT);
link->dpcd_caps.dongle_caps.extendedCapValid = true;
}
right_view = (plane_state->rotation == ROTATION_ANGLE_270) != sec_split;
if (right_view) {
- data->viewport.width /= 2;
- data->viewport_c.width /= 2;
- data->viewport.x += data->viewport.width;
- data->viewport_c.x += data->viewport_c.width;
+ data->viewport.x += data->viewport.width / 2;
+ data->viewport_c.x += data->viewport_c.width / 2;
/* Ceil offset pipe */
- data->viewport.width += data->viewport.width % 2;
- data->viewport_c.width += data->viewport_c.width % 2;
+ data->viewport.width = (data->viewport.width + 1) / 2;
+ data->viewport_c.width = (data->viewport_c.width + 1) / 2;
} else {
data->viewport.width /= 2;
data->viewport_c.width /= 2;
if (pipe_ctx->top_pipe && pipe_ctx->top_pipe->plane_state ==
pipe_ctx->plane_state) {
if (stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) {
- pipe_ctx->plane_res.scl_data.recout.height /= 2;
- pipe_ctx->plane_res.scl_data.recout.y += pipe_ctx->plane_res.scl_data.recout.height;
+ pipe_ctx->plane_res.scl_data.recout.y += pipe_ctx->plane_res.scl_data.recout.height / 2;
/* Floor primary pipe, ceil 2ndary pipe */
- pipe_ctx->plane_res.scl_data.recout.height += pipe_ctx->plane_res.scl_data.recout.height % 2;
+ pipe_ctx->plane_res.scl_data.recout.height = (pipe_ctx->plane_res.scl_data.recout.height + 1) / 2;
} else {
- pipe_ctx->plane_res.scl_data.recout.width /= 2;
- pipe_ctx->plane_res.scl_data.recout.x += pipe_ctx->plane_res.scl_data.recout.width;
- pipe_ctx->plane_res.scl_data.recout.width += pipe_ctx->plane_res.scl_data.recout.width % 2;
+ pipe_ctx->plane_res.scl_data.recout.x += pipe_ctx->plane_res.scl_data.recout.width / 2;
+ pipe_ctx->plane_res.scl_data.recout.width = (pipe_ctx->plane_res.scl_data.recout.width + 1) / 2;
}
} else if (pipe_ctx->bottom_pipe &&
pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state) {
pipe_ctx->plane_res.scl_data.h_active = timing->h_addressable + timing->h_border_left + timing->h_border_right;
pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable + timing->v_border_top + timing->v_border_bottom;
+
/* Taps calculations */
if (pipe_ctx->plane_res.xfm != NULL)
res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps(
if (pipe_ctx->plane_res.dpp != NULL)
res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps(
pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
-
if (!res) {
/* Try 24 bpp linebuffer */
pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_24BPP;
- res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps(
- pipe_ctx->plane_res.xfm, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
+ if (pipe_ctx->plane_res.xfm != NULL)
+ res = pipe_ctx->plane_res.xfm->funcs->transform_get_optimal_number_of_taps(
+ pipe_ctx->plane_res.xfm,
+ &pipe_ctx->plane_res.scl_data,
+ &plane_state->scaling_quality);
- res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps(
- pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality);
+ if (pipe_ctx->plane_res.dpp != NULL)
+ res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps(
+ pipe_ctx->plane_res.dpp,
+ &pipe_ctx->plane_res.scl_data,
+ &plane_state->scaling_quality);
}
if (res)
head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream);
- if (!head_pipe)
+ if (!head_pipe) {
ASSERT(0);
+ return NULL;
+ }
if (!head_pipe->plane_state)
return head_pipe;
static struct audio *find_first_free_audio(
struct resource_context *res_ctx,
- const struct resource_pool *pool)
+ const struct resource_pool *pool,
+ enum engine_id id)
{
int i;
for (i = 0; i < pool->audio_count; i++) {
if ((res_ctx->is_audio_acquired[i] == false) && (res_ctx->is_stream_enc_acquired[i] == true)) {
+ /*we have enough audio endpoint, find the matching inst*/
+ if (id != i)
+ continue;
+
return pool->audios[i];
}
}
dc_is_audio_capable_signal(pipe_ctx->stream->signal) &&
stream->audio_info.mode_count) {
pipe_ctx->stream_res.audio = find_first_free_audio(
- &context->res_ctx, pool);
+ &context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id);
/*
* Audio assigned in order first come first get.
enum dc_status result = DC_ERROR_UNEXPECTED;
int i, j;
+ if (!new_ctx)
+ return DC_ERROR_UNEXPECTED;
+
if (dc->res_pool->funcs->validate_global) {
result = dc->res_pool->funcs->validate_global(dc, new_ctx);
if (result != DC_OK)
return result;
}
- for (i = 0; new_ctx && i < new_ctx->stream_count; i++) {
+ for (i = 0; i < new_ctx->stream_count; i++) {
struct dc_stream_state *stream = new_ctx->streams[i];
for (j = 0; j < dc->res_pool->pipe_count; j++) {
struct input_pixel_processor *ipp = pipe_ctx->plane_res.ipp;
struct mem_input *mi = pipe_ctx->plane_res.mi;
struct hubp *hubp = pipe_ctx->plane_res.hubp;
- struct transform *xfm = pipe_ctx->plane_res.xfm;
struct dpp *dpp = pipe_ctx->plane_res.dpp;
struct dc_cursor_position pos_cpy = *position;
struct dc_cursor_mi_param param = {
if (mi != NULL && mi->funcs->set_cursor_position != NULL)
mi->funcs->set_cursor_position(mi, &pos_cpy, ¶m);
- if (hubp != NULL && hubp->funcs->set_cursor_position != NULL)
- hubp->funcs->set_cursor_position(hubp, &pos_cpy, ¶m);
+ if (!hubp)
+ continue;
- if (xfm != NULL && xfm->funcs->set_cursor_position != NULL)
- xfm->funcs->set_cursor_position(xfm, &pos_cpy, ¶m, hubp->curs_attr.width);
+ if (hubp->funcs->set_cursor_position != NULL)
+ hubp->funcs->set_cursor_position(hubp, &pos_cpy, ¶m);
if (dpp != NULL && dpp->funcs->set_cursor_position != NULL)
dpp->funcs->set_cursor_position(dpp, &pos_cpy, ¶m, hubp->curs_attr.width);
uint32_t value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL);
set_reg_field_value(value, 1,
- AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
- CLOCK_GATING_DISABLE);
- set_reg_field_value(value, 1,
- AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
- AUDIO_ENABLED);
+ AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
+ CLOCK_GATING_DISABLE);
+ set_reg_field_value(value, 1,
+ AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL,
+ AUDIO_ENABLED);
AZ_REG_WRITE(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL, value);
value = AZ_REG_READ(AZALIA_F0_CODEC_PIN_CONTROL_HOT_PLUG_CONTROL);
*/
uint32_t max_retries = 50;
+ /*we need turn on clock before programming AFMT block*/
+ REG_UPDATE(AFMT_CNTL, AFMT_AUDIO_CLOCK_EN, 1);
+
if (REG(AFMT_VBI_PACKET_CONTROL1)) {
if (packet_index >= 8)
ASSERT(0);
struct dc_link *link = stream->sink->link;
struct dc *dc = pipe_ctx->stream->ctx->dc;
+ if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
+ pipe_ctx->stream_res.stream_enc->funcs->stop_hdmi_info_packets(
+ pipe_ctx->stream_res.stream_enc);
+
+ if (dc_is_dp_signal(pipe_ctx->stream->signal))
+ pipe_ctx->stream_res.stream_enc->funcs->stop_dp_info_packets(
+ pipe_ctx->stream_res.stream_enc);
+
+ pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
+ pipe_ctx->stream_res.stream_enc, true);
if (pipe_ctx->stream_res.audio) {
pipe_ctx->stream_res.audio->funcs->az_disable(pipe_ctx->stream_res.audio);
*/
}
- if (dc_is_hdmi_signal(pipe_ctx->stream->signal))
- pipe_ctx->stream_res.stream_enc->funcs->stop_hdmi_info_packets(
- pipe_ctx->stream_res.stream_enc);
-
- if (dc_is_dp_signal(pipe_ctx->stream->signal))
- pipe_ctx->stream_res.stream_enc->funcs->stop_dp_info_packets(
- pipe_ctx->stream_res.stream_enc);
-
- pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
- pipe_ctx->stream_res.stream_enc, true);
-
-
/* blank at encoder level */
if (dc_is_dp_signal(pipe_ctx->stream->signal)) {
if (pipe_ctx->stream->sink->link->connector_signal == SIGNAL_TYPE_EDP)
if (pipe_ctx->stream->sink->link->psr_enabled)
return DC_ERROR_UNEXPECTED;
+ /* Nothing to compress */
+ if (!pipe_ctx->plane_state)
+ return DC_ERROR_UNEXPECTED;
+
/* Only for non-linear tiling */
if (pipe_ctx->plane_state->tiling_info.gfx8.array_mode == DC_ARRAY_LINEAR_GENERAL)
return DC_ERROR_UNEXPECTED;
pipe_need_reprogram(pipe_ctx_old, pipe_ctx)) {
struct clock_source *old_clk = pipe_ctx_old->clock_source;
- /* disable already, no need to disable again */
- if (pipe_ctx->stream && !pipe_ctx->stream->dpms_off)
+ /* Disable if new stream is null. O/w, if stream is
+ * disabled already, no need to disable again.
+ */
+ if (!pipe_ctx->stream || !pipe_ctx->stream->dpms_off)
core_link_disable_stream(pipe_ctx_old, FREE_ACQUIRED_RESOURCE);
pipe_ctx_old->stream_res.tg->funcs->set_blank(pipe_ctx_old->stream_res.tg, true);
struct dce110_opp *dce110_oppv = kzalloc(sizeof(*dce110_oppv),
GFP_KERNEL);
- if ((dce110_tgv == NULL) ||
- (dce110_xfmv == NULL) ||
- (dce110_miv == NULL) ||
- (dce110_oppv == NULL))
- return false;
+ if (!dce110_tgv || !dce110_xfmv || !dce110_miv || !dce110_oppv) {
+ kfree(dce110_tgv);
+ kfree(dce110_xfmv);
+ kfree(dce110_miv);
+ kfree(dce110_oppv);
+ return false;
+ }
dce110_opp_v_construct(dce110_oppv, ctx);
enum signal_type signal)
{
uint32_t h_blank;
- uint32_t h_back_porch;
- uint32_t hsync_offset = timing->h_border_right +
- timing->h_front_porch;
- uint32_t h_sync_start = timing->h_addressable + hsync_offset;
+ uint32_t h_back_porch, hsync_offset, h_sync_start;
struct dce110_timing_generator *tg110 = DCE110TG_FROM_TG(tg);
if (!timing)
return false;
+ hsync_offset = timing->h_border_right + timing->h_front_porch;
+ h_sync_start = timing->h_addressable + hsync_offset;
+
/* Currently we don't support 3D, so block all 3D timings */
if (timing->timing_3d_format != TIMING_3D_FORMAT_NONE)
return false;
struct pipe_ctx *head_pipe = resource_get_head_pipe_for_stream(res_ctx, stream);
struct pipe_ctx *idle_pipe = find_idle_secondary_pipe(res_ctx, pool);
- if (!head_pipe)
+ if (!head_pipe) {
ASSERT(0);
+ return NULL;
+ }
if (!idle_pipe)
- return false;
+ return NULL;
idle_pipe->stream = head_pipe->stream;
idle_pipe->stream_res.tg = head_pipe->stream_res.tg;
timing->timing_3d_format != TIMING_3D_FORMAT_INBAND_FA)
return false;
- if (timing->timing_3d_format != TIMING_3D_FORMAT_NONE &&
- tg->ctx->dc->debug.disable_stereo_support)
- return false;
/* Temporarily blocking interlacing mode until it's supported */
if (timing->flags.INTERLACE == 1)
return false;
DC_FAIL_DETACH_SURFACES = 8,
DC_FAIL_SURFACE_VALIDATE = 9,
DC_NO_DP_LINK_BANDWIDTH = 10,
- DC_EXCEED_DONGLE_MAX_CLK = 11,
+ DC_EXCEED_DONGLE_CAP = 11,
DC_SURFACE_PIXEL_FORMAT_UNSUPPORTED = 12,
DC_FAIL_BANDWIDTH_VALIDATE = 13, /* BW and Watermark validation */
DC_FAIL_SCALING = 14,
struct transform *xfm_base,
const struct dc_cursor_attributes *attr);
- void (*set_cursor_position)(
- struct transform *xfm_base,
- const struct dc_cursor_position *pos,
- const struct dc_cursor_mi_param *param,
- uint32_t width
- );
-
};
const uint16_t *get_filter_2tap_16p(void);
if (vddci_id_buf[i] == virtual_voltage_id) {
for (j = 0; j < profile->ucLeakageBinNum; j++) {
if (efuse_voltage_id <= leakage_bin[j]) {
- *vddci = vddci_buf[j * profile->ucElbVDDC_Num + i];
+ *vddci = vddci_buf[j * profile->ucElbVDDCI_Num + i];
break;
}
}
const ATOM_Tonga_POWERPLAYTABLE *powerplay_table)
{
hwmgr->platform_descriptor.overdriveLimit.engineClock =
- le16_to_cpu(powerplay_table->ulMaxODEngineClock);
+ le32_to_cpu(powerplay_table->ulMaxODEngineClock);
hwmgr->platform_descriptor.overdriveLimit.memoryClock =
- le16_to_cpu(powerplay_table->ulMaxODMemoryClock);
+ le32_to_cpu(powerplay_table->ulMaxODMemoryClock);
hwmgr->platform_descriptor.minOverdriveVDDC = 0;
hwmgr->platform_descriptor.maxOverdriveVDDC = 0;
"Trying to Unfreeze MCLK DPM when DPM is disabled",
);
PP_ASSERT_WITH_CODE(0 == smum_send_msg_to_smc(hwmgr,
- PPSMC_MSG_SCLKDPM_UnfreezeLevel),
+ PPSMC_MSG_MCLKDPM_UnfreezeLevel),
"Failed to unfreeze MCLK DPM during UnFreezeSclkMclkDPM Function!",
return -EINVAL);
}
uint32_t config_telemetry = 0;
struct pp_atomfwctrl_voltage_table vol_table;
struct cgs_system_info sys_info = {0};
+ uint32_t reg;
data = kzalloc(sizeof(struct vega10_hwmgr), GFP_KERNEL);
if (data == NULL)
advanceFanControlParameters.usFanPWMMinLimit *
hwmgr->thermal_controller.fanInfo.ulMaxRPM / 100;
+ reg = soc15_get_register_offset(DF_HWID, 0,
+ mmDF_CS_AON0_DramBaseAddress0_BASE_IDX,
+ mmDF_CS_AON0_DramBaseAddress0);
+ data->mem_channels = (cgs_read_register(hwmgr->device, reg) &
+ DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK) >>
+ DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
+ PP_ASSERT_WITH_CODE(data->mem_channels < ARRAY_SIZE(channel_number),
+ "Mem Channel Index Exceeded maximum!",
+ return -EINVAL);
+
return result;
}
struct vega10_single_dpm_table *dpm_table =
&(data->dpm_table.mem_table);
int result = 0;
- uint32_t i, j, reg, mem_channels;
+ uint32_t i, j;
for (i = 0; i < dpm_table->count; i++) {
result = vega10_populate_single_memory_level(hwmgr,
i++;
}
- reg = soc15_get_register_offset(DF_HWID, 0,
- mmDF_CS_AON0_DramBaseAddress0_BASE_IDX,
- mmDF_CS_AON0_DramBaseAddress0);
- mem_channels = (cgs_read_register(hwmgr->device, reg) &
- DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK) >>
- DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
- PP_ASSERT_WITH_CODE(mem_channels < ARRAY_SIZE(channel_number),
- "Mem Channel Index Exceeded maximum!",
- return -1);
-
- pp_table->NumMemoryChannels = cpu_to_le16(mem_channels);
+ pp_table->NumMemoryChannels = (uint16_t)(data->mem_channels);
pp_table->MemoryChannelWidth =
- cpu_to_le16(HBM_MEMORY_CHANNEL_WIDTH *
- channel_number[mem_channels]);
+ (uint16_t)(HBM_MEMORY_CHANNEL_WIDTH *
+ channel_number[data->mem_channels]);
pp_table->LowestUclkReservedForUlv =
(uint8_t)(data->lowest_uclk_reserved_for_ulv);
uint32_t config_telemetry;
uint32_t smu_version;
uint32_t acg_loop_state;
+ uint32_t mem_channels;
};
#define VEGA10_DPM2_NEAR_TDP_DEC 10
formats, ARRAY_SIZE(formats),
NULL,
DRM_PLANE_TYPE_PRIMARY, NULL);
- if (ret) {
+ if (ret)
return ERR_PTR(ret);
- }
drm_plane_helper_add(plane, &hdlcd_plane_helper_funcs);
hdlcd->plane = plane;
#include <linux/spinlock.h>
#include <linux/clk.h>
#include <linux/component.h>
+#include <linux/console.h>
#include <linux/list.h>
#include <linux/of_graph.h>
#include <linux/of_reserved_mem.h>
err_free:
drm_mode_config_cleanup(drm);
dev_set_drvdata(dev, NULL);
- drm_dev_unref(drm);
+ drm_dev_put(drm);
return ret;
}
pm_runtime_disable(drm->dev);
of_reserved_mem_device_release(drm->dev);
drm_mode_config_cleanup(drm);
- drm_dev_unref(drm);
+ drm_dev_put(drm);
drm->dev_private = NULL;
dev_set_drvdata(dev, NULL);
}
return 0;
drm_kms_helper_poll_disable(drm);
+ drm_fbdev_cma_set_suspend_unlocked(hdlcd->fbdev, 1);
hdlcd->state = drm_atomic_helper_suspend(drm);
if (IS_ERR(hdlcd->state)) {
+ drm_fbdev_cma_set_suspend_unlocked(hdlcd->fbdev, 0);
drm_kms_helper_poll_enable(drm);
return PTR_ERR(hdlcd->state);
}
return 0;
drm_atomic_helper_resume(drm, hdlcd->state);
+ drm_fbdev_cma_set_suspend_unlocked(hdlcd->fbdev, 0);
drm_kms_helper_poll_enable(drm);
- pm_runtime_set_active(dev);
return 0;
}
/* We rely on firmware to set mclk to a sensible level. */
clk_set_rate(hwdev->pxlclk, crtc->state->adjusted_mode.crtc_clock * 1000);
- hwdev->modeset(hwdev, &vm);
- hwdev->leave_config_mode(hwdev);
+ hwdev->hw->modeset(hwdev, &vm);
+ hwdev->hw->leave_config_mode(hwdev);
drm_crtc_vblank_on(crtc);
}
struct malidp_hw_device *hwdev = malidp->dev;
int err;
+ /* always disable planes on the CRTC that is being turned off */
+ drm_atomic_helper_disable_planes_on_crtc(old_state, false);
+
drm_crtc_vblank_off(crtc);
- hwdev->enter_config_mode(hwdev);
+ hwdev->hw->enter_config_mode(hwdev);
+
clk_disable_unprepare(hwdev->pxlclk);
err = pm_runtime_put(crtc->dev->dev);
mclk_calc:
drm_display_mode_to_videomode(&state->adjusted_mode, &vm);
- ret = hwdev->se_calc_mclk(hwdev, s, &vm);
+ ret = hwdev->hw->se_calc_mclk(hwdev, s, &vm);
if (ret < 0)
return -EINVAL;
return 0;
struct malidp_hw_device *hwdev = malidp->dev;
malidp_hw_enable_irq(hwdev, MALIDP_DE_BLOCK,
- hwdev->map.de_irq_map.vsync_irq);
+ hwdev->hw->map.de_irq_map.vsync_irq);
return 0;
}
struct malidp_hw_device *hwdev = malidp->dev;
malidp_hw_disable_irq(hwdev, MALIDP_DE_BLOCK,
- hwdev->map.de_irq_map.vsync_irq);
+ hwdev->hw->map.de_irq_map.vsync_irq);
}
static const struct drm_crtc_funcs malidp_crtc_funcs = {
* directly.
*/
malidp_hw_write(hwdev, gamma_write_mask,
- hwdev->map.coeffs_base + MALIDP_COEF_TABLE_ADDR);
+ hwdev->hw->map.coeffs_base + MALIDP_COEF_TABLE_ADDR);
for (i = 0; i < MALIDP_COEFFTAB_NUM_COEFFS; ++i)
malidp_hw_write(hwdev, data[i],
- hwdev->map.coeffs_base +
+ hwdev->hw->map.coeffs_base +
MALIDP_COEF_TABLE_DATA);
}
for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; ++i)
malidp_hw_write(hwdev,
mc->coloradj_coeffs[i],
- hwdev->map.coeffs_base +
+ hwdev->hw->map.coeffs_base +
MALIDP_COLOR_ADJ_COEF + 4 * i);
malidp_hw_setbits(hwdev, MALIDP_DISP_FUNC_CADJ,
struct malidp_hw_device *hwdev = malidp->dev;
struct malidp_se_config *s = &cs->scaler_config;
struct malidp_se_config *old_s = &old_cs->scaler_config;
- u32 se_control = hwdev->map.se_base +
- ((hwdev->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) ?
+ u32 se_control = hwdev->hw->map.se_base +
+ ((hwdev->hw->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) ?
0x10 : 0xC);
u32 layer_control = se_control + MALIDP_SE_LAYER_CONTROL;
u32 scr = se_control + MALIDP_SE_SCALING_CONTROL;
return;
}
- hwdev->se_set_scaling_coeffs(hwdev, s, old_s);
+ hwdev->hw->se_set_scaling_coeffs(hwdev, s, old_s);
val = malidp_hw_read(hwdev, se_control);
val |= MALIDP_SE_SCALING_EN | MALIDP_SE_ALPHA_EN;
int ret;
atomic_set(&malidp->config_valid, 0);
- hwdev->set_config_valid(hwdev);
+ hwdev->hw->set_config_valid(hwdev);
/* don't wait for config_valid flag if we are in config mode */
- if (hwdev->in_config_mode(hwdev))
+ if (hwdev->hw->in_config_mode(hwdev))
return 0;
ret = wait_event_interruptible_timeout(malidp->wq,
struct malidp_hw_device *hwdev = malidp->dev;
/* we can only suspend if the hardware is in config mode */
- WARN_ON(!hwdev->in_config_mode(hwdev));
+ WARN_ON(!hwdev->hw->in_config_mode(hwdev));
hwdev->pm_suspended = true;
clk_disable_unprepare(hwdev->mclk);
if (!hwdev)
return -ENOMEM;
- /*
- * copy the associated data from malidp_drm_of_match to avoid
- * having to keep a reference to the OF node after binding
- */
- memcpy(hwdev, of_device_get_match_data(dev), sizeof(*hwdev));
+ hwdev->hw = (struct malidp_hw *)of_device_get_match_data(dev);
malidp->dev = hwdev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
goto query_hw_fail;
}
- ret = hwdev->query_hw(hwdev);
+ ret = hwdev->hw->query_hw(hwdev);
if (ret) {
DRM_ERROR("Invalid HW configuration\n");
goto query_hw_fail;
}
- version = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_DE_CORE_ID);
+ version = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_DE_CORE_ID);
DRM_INFO("found ARM Mali-DP%3x version r%dp%d\n", version >> 16,
(version >> 12) & 0xf, (version >> 8) & 0xf);
for (i = 0; i < MAX_OUTPUT_CHANNELS; i++)
out_depth = (out_depth << 8) | (output_width[i] & 0xf);
- malidp_hw_write(hwdev, out_depth, hwdev->map.out_depth_base);
+ malidp_hw_write(hwdev, out_depth, hwdev->hw->map.out_depth_base);
atomic_set(&malidp->config_valid, 0);
init_waitqueue_head(&malidp->wq);
malidp_runtime_pm_suspend(dev);
drm->dev_private = NULL;
dev_set_drvdata(dev, NULL);
- drm_dev_unref(drm);
+ drm_dev_put(drm);
alloc_fail:
of_reserved_mem_device_release(dev);
malidp_runtime_pm_suspend(dev);
drm->dev_private = NULL;
dev_set_drvdata(dev, NULL);
- drm_dev_unref(drm);
+ drm_dev_put(drm);
of_reserved_mem_device_release(dev);
}
malidp_hw_setbits(hwdev, MALIDP500_DC_CONFIG_REQ, MALIDP500_DC_CONTROL);
while (count) {
- status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+ status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS);
if ((status & MALIDP500_DC_CONFIG_REQ) == MALIDP500_DC_CONFIG_REQ)
break;
/*
malidp_hw_clearbits(hwdev, MALIDP_CFG_VALID, MALIDP500_CONFIG_VALID);
malidp_hw_clearbits(hwdev, MALIDP500_DC_CONFIG_REQ, MALIDP500_DC_CONTROL);
while (count) {
- status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+ status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS);
if ((status & MALIDP500_DC_CONFIG_REQ) == 0)
break;
usleep_range(100, 1000);
{
u32 status;
- status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+ status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS);
if ((status & MALIDP500_DC_CONFIG_REQ) == MALIDP500_DC_CONFIG_REQ)
return true;
malidp_hw_setbits(hwdev, MALIDP550_DC_CONFIG_REQ, MALIDP550_DC_CONTROL);
while (count) {
- status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+ status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS);
if ((status & MALIDP550_DC_CONFIG_REQ) == MALIDP550_DC_CONFIG_REQ)
break;
/*
malidp_hw_clearbits(hwdev, MALIDP_CFG_VALID, MALIDP550_CONFIG_VALID);
malidp_hw_clearbits(hwdev, MALIDP550_DC_CONFIG_REQ, MALIDP550_DC_CONTROL);
while (count) {
- status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+ status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS);
if ((status & MALIDP550_DC_CONFIG_REQ) == 0)
break;
usleep_range(100, 1000);
{
u32 status;
- status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
+ status = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_REG_STATUS);
if ((status & MALIDP550_DC_CONFIG_REQ) == MALIDP550_DC_CONFIG_REQ)
return true;
return 0;
}
-const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES] = {
+const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = {
[MALIDP_500] = {
.map = {
.coeffs_base = MALIDP500_COEFFS_BASE,
{
u32 base = malidp_get_block_base(hwdev, block);
- if (hwdev->map.features & MALIDP_REGMAP_HAS_CLEARIRQ)
+ if (hwdev->hw->map.features & MALIDP_REGMAP_HAS_CLEARIRQ)
malidp_hw_write(hwdev, irq, base + MALIDP_REG_CLEARIRQ);
else
malidp_hw_write(hwdev, irq, base + MALIDP_REG_STATUS);
struct drm_device *drm = arg;
struct malidp_drm *malidp = drm->dev_private;
struct malidp_hw_device *hwdev;
+ struct malidp_hw *hw;
const struct malidp_irq_map *de;
u32 status, mask, dc_status;
irqreturn_t ret = IRQ_NONE;
hwdev = malidp->dev;
- de = &hwdev->map.de_irq_map;
+ hw = hwdev->hw;
+ de = &hw->map.de_irq_map;
/*
* if we are suspended it is likely that we were invoked because
return IRQ_NONE;
/* first handle the config valid IRQ */
- dc_status = malidp_hw_read(hwdev, hwdev->map.dc_base + MALIDP_REG_STATUS);
- if (dc_status & hwdev->map.dc_irq_map.vsync_irq) {
+ dc_status = malidp_hw_read(hwdev, hw->map.dc_base + MALIDP_REG_STATUS);
+ if (dc_status & hw->map.dc_irq_map.vsync_irq) {
/* we have a page flip event */
atomic_set(&malidp->config_valid, 1);
malidp_hw_clear_irq(hwdev, MALIDP_DC_BLOCK, dc_status);
/* first enable the DC block IRQs */
malidp_hw_enable_irq(hwdev, MALIDP_DC_BLOCK,
- hwdev->map.dc_irq_map.irq_mask);
+ hwdev->hw->map.dc_irq_map.irq_mask);
/* now enable the DE block IRQs */
malidp_hw_enable_irq(hwdev, MALIDP_DE_BLOCK,
- hwdev->map.de_irq_map.irq_mask);
+ hwdev->hw->map.de_irq_map.irq_mask);
return 0;
}
struct malidp_hw_device *hwdev = malidp->dev;
malidp_hw_disable_irq(hwdev, MALIDP_DE_BLOCK,
- hwdev->map.de_irq_map.irq_mask);
+ hwdev->hw->map.de_irq_map.irq_mask);
malidp_hw_disable_irq(hwdev, MALIDP_DC_BLOCK,
- hwdev->map.dc_irq_map.irq_mask);
+ hwdev->hw->map.dc_irq_map.irq_mask);
}
static irqreturn_t malidp_se_irq(int irq, void *arg)
struct drm_device *drm = arg;
struct malidp_drm *malidp = drm->dev_private;
struct malidp_hw_device *hwdev = malidp->dev;
+ struct malidp_hw *hw = hwdev->hw;
+ const struct malidp_irq_map *se = &hw->map.se_irq_map;
u32 status, mask;
/*
if (hwdev->pm_suspended)
return IRQ_NONE;
- status = malidp_hw_read(hwdev, hwdev->map.se_base + MALIDP_REG_STATUS);
- if (!(status & hwdev->map.se_irq_map.irq_mask))
+ status = malidp_hw_read(hwdev, hw->map.se_base + MALIDP_REG_STATUS);
+ if (!(status & se->irq_mask))
return IRQ_NONE;
- mask = malidp_hw_read(hwdev, hwdev->map.se_base + MALIDP_REG_MASKIRQ);
- status = malidp_hw_read(hwdev, hwdev->map.se_base + MALIDP_REG_STATUS);
+ mask = malidp_hw_read(hwdev, hw->map.se_base + MALIDP_REG_MASKIRQ);
+ status = malidp_hw_read(hwdev, hw->map.se_base + MALIDP_REG_STATUS);
status &= mask;
/* ToDo: status decoding and firing up of VSYNC and page flip events */
}
malidp_hw_enable_irq(hwdev, MALIDP_SE_BLOCK,
- hwdev->map.se_irq_map.irq_mask);
+ hwdev->hw->map.se_irq_map.irq_mask);
return 0;
}
struct malidp_hw_device *hwdev = malidp->dev;
malidp_hw_disable_irq(hwdev, MALIDP_SE_BLOCK,
- hwdev->map.se_irq_map.irq_mask);
+ hwdev->hw->map.se_irq_map.irq_mask);
}
/* Unlike DP550/650, DP500 has 3 stride registers in its video layer. */
#define MALIDP_DEVICE_LV_HAS_3_STRIDES BIT(0)
-struct malidp_hw_device {
- const struct malidp_hw_regmap map;
- void __iomem *regs;
+struct malidp_hw_device;
- /* APB clock */
- struct clk *pclk;
- /* AXI clock */
- struct clk *aclk;
- /* main clock for display core */
- struct clk *mclk;
- /* pixel clock for display core */
- struct clk *pxlclk;
+/*
+ * Static structure containing hardware specific data and pointers to
+ * functions that behave differently between various versions of the IP.
+ */
+struct malidp_hw {
+ const struct malidp_hw_regmap map;
/*
* Validate the driver instance against the hardware bits
struct videomode *vm);
u8 features;
-
- u8 min_line_size;
- u16 max_line_size;
-
- /* track the device PM state */
- bool pm_suspended;
-
- /* size of memory used for rotating layers, up to two banks available */
- u32 rotation_memory[2];
};
/* Supported variants of the hardware */
MALIDP_MAX_DEVICES
};
-extern const struct malidp_hw_device malidp_device[MALIDP_MAX_DEVICES];
+extern const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES];
+
+/*
+ * Structure used by the driver during runtime operation.
+ */
+struct malidp_hw_device {
+ struct malidp_hw *hw;
+ void __iomem *regs;
+
+ /* APB clock */
+ struct clk *pclk;
+ /* AXI clock */
+ struct clk *aclk;
+ /* main clock for display core */
+ struct clk *mclk;
+ /* pixel clock for display core */
+ struct clk *pxlclk;
+
+ u8 min_line_size;
+ u16 max_line_size;
+
+ /* track the device PM state */
+ bool pm_suspended;
+
+ /* size of memory used for rotating layers, up to two banks available */
+ u32 rotation_memory[2];
+};
static inline u32 malidp_hw_read(struct malidp_hw_device *hwdev, u32 reg)
{
{
switch (block) {
case MALIDP_SE_BLOCK:
- return hwdev->map.se_base;
+ return hwdev->hw->map.se_base;
case MALIDP_DC_BLOCK:
- return hwdev->map.dc_base;
+ return hwdev->hw->map.dc_base;
}
return 0;
static inline bool malidp_hw_pitch_valid(struct malidp_hw_device *hwdev,
unsigned int pitch)
{
- return !(pitch & (hwdev->map.bus_align_bytes - 1));
+ return !(pitch & (hwdev->hw->map.bus_align_bytes - 1));
}
/* U16.16 */
};
u32 val = MALIDP_SE_SET_ENH_LIMIT_LOW(MALIDP_SE_ENH_LOW_LEVEL) |
MALIDP_SE_SET_ENH_LIMIT_HIGH(MALIDP_SE_ENH_HIGH_LEVEL);
- u32 image_enh = hwdev->map.se_base +
- ((hwdev->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) ?
+ u32 image_enh = hwdev->hw->map.se_base +
+ ((hwdev->hw->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) ?
0x10 : 0xC) + MALIDP_SE_IMAGE_ENH;
u32 enh_coeffs = image_enh + MALIDP_SE_ENH_COEFF0;
int i;
struct malidp_plane *mp = to_malidp_plane(plane);
if (mp->base.fb)
- drm_framebuffer_unreference(mp->base.fb);
+ drm_framebuffer_put(mp->base.fb);
drm_plane_helper_disable(plane);
drm_plane_cleanup(plane);
fb = state->fb;
- ms->format = malidp_hw_get_format_id(&mp->hwdev->map, mp->layer->id,
- fb->format->format);
+ ms->format = malidp_hw_get_format_id(&mp->hwdev->hw->map,
+ mp->layer->id,
+ fb->format->format);
if (ms->format == MALIDP_INVALID_FORMAT_ID)
return -EINVAL;
* third plane stride register.
*/
if (ms->n_planes == 3 &&
- !(mp->hwdev->features & MALIDP_DEVICE_LV_HAS_3_STRIDES) &&
+ !(mp->hwdev->hw->features & MALIDP_DEVICE_LV_HAS_3_STRIDES) &&
(state->fb->pitches[1] != state->fb->pitches[2]))
return -EINVAL;
if (state->rotation & MALIDP_ROTATED_MASK) {
int val;
- val = mp->hwdev->rotmem_required(mp->hwdev, state->crtc_h,
- state->crtc_w,
- fb->format->format);
+ val = mp->hwdev->hw->rotmem_required(mp->hwdev, state->crtc_h,
+ state->crtc_w,
+ fb->format->format);
if (val < 0)
return val;
return;
if (num_planes == 3)
- num_strides = (mp->hwdev->features &
+ num_strides = (mp->hwdev->hw->features &
MALIDP_DEVICE_LV_HAS_3_STRIDES) ? 3 : 2;
for (i = 0; i < num_strides; ++i)
struct drm_plane_state *old_state)
{
struct malidp_plane *mp;
- const struct malidp_hw_regmap *map;
struct malidp_plane_state *ms = to_malidp_plane_state(plane->state);
u32 src_w, src_h, dest_w, dest_h, val;
int i;
mp = to_malidp_plane(plane);
- map = &mp->hwdev->map;
/* convert src values from Q16 fixed point to integer */
src_w = plane->state->src_w >> 16;
int malidp_de_planes_init(struct drm_device *drm)
{
struct malidp_drm *malidp = drm->dev_private;
- const struct malidp_hw_regmap *map = &malidp->dev->map;
+ const struct malidp_hw_regmap *map = &malidp->dev->hw->map;
struct malidp_plane *plane = NULL;
enum drm_plane_type plane_type;
unsigned long crtcs = 1 << drm->mode_config.num_crtc;
};
#ifdef CONFIG_DRM_I2C_ADV7511_CEC
-int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511,
- unsigned int offset);
+int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511);
void adv7511_cec_irq_process(struct adv7511 *adv7511, unsigned int irq1);
+#else
+static inline int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511)
+{
+ unsigned int offset = adv7511->type == ADV7533 ?
+ ADV7533_REG_CEC_OFFSET : 0;
+
+ regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset,
+ ADV7511_CEC_CTRL_POWER_DOWN);
+ return 0;
+}
#endif
#ifdef CONFIG_DRM_I2C_ADV7533
return 0;
}
-int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511,
- unsigned int offset)
+int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511)
{
+ unsigned int offset = adv7511->type == ADV7533 ?
+ ADV7533_REG_CEC_OFFSET : 0;
int ret = adv7511_cec_parse_dt(dev, adv7511);
if (ret)
- return ret;
+ goto err_cec_parse_dt;
adv7511->cec_adap = cec_allocate_adapter(&adv7511_cec_adap_ops,
adv7511, dev_name(dev), CEC_CAP_DEFAULTS, ADV7511_MAX_ADDRS);
- if (IS_ERR(adv7511->cec_adap))
- return PTR_ERR(adv7511->cec_adap);
+ if (IS_ERR(adv7511->cec_adap)) {
+ ret = PTR_ERR(adv7511->cec_adap);
+ goto err_cec_alloc;
+ }
regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset, 0);
/* cec soft reset */
((adv7511->cec_clk_freq / 750000) - 1) << 2);
ret = cec_register_adapter(adv7511->cec_adap, dev);
- if (ret) {
- cec_delete_adapter(adv7511->cec_adap);
- adv7511->cec_adap = NULL;
- }
- return ret;
+ if (ret)
+ goto err_cec_register;
+ return 0;
+
+err_cec_register:
+ cec_delete_adapter(adv7511->cec_adap);
+ adv7511->cec_adap = NULL;
+err_cec_alloc:
+ dev_info(dev, "Initializing CEC failed with error %d, disabling CEC\n",
+ ret);
+err_cec_parse_dt:
+ regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset,
+ ADV7511_CEC_CTRL_POWER_DOWN);
+ return ret == -EPROBE_DEFER ? ret : 0;
}
struct device *dev = &i2c->dev;
unsigned int main_i2c_addr = i2c->addr << 1;
unsigned int edid_i2c_addr = main_i2c_addr + 4;
- unsigned int offset;
unsigned int val;
int ret;
if (adv7511->type == ADV7511)
adv7511_set_link_config(adv7511, &link_config);
+ ret = adv7511_cec_init(dev, adv7511);
+ if (ret)
+ goto err_unregister_cec;
+
adv7511->bridge.funcs = &adv7511_bridge_funcs;
adv7511->bridge.of_node = dev->of_node;
drm_bridge_add(&adv7511->bridge);
adv7511_audio_init(dev, adv7511);
-
- offset = adv7511->type == ADV7533 ? ADV7533_REG_CEC_OFFSET : 0;
-
-#ifdef CONFIG_DRM_I2C_ADV7511_CEC
- ret = adv7511_cec_init(dev, adv7511, offset);
- if (ret)
- goto err_unregister_cec;
-#else
- regmap_write(adv7511->regmap, ADV7511_REG_CEC_CTRL + offset,
- ADV7511_CEC_CTRL_POWER_DOWN);
-#endif
-
return 0;
err_unregister_cec:
#include <linux/of_graph.h>
+struct lvds_encoder {
+ struct drm_bridge bridge;
+ struct drm_bridge *panel_bridge;
+};
+
+static int lvds_encoder_attach(struct drm_bridge *bridge)
+{
+ struct lvds_encoder *lvds_encoder = container_of(bridge,
+ struct lvds_encoder,
+ bridge);
+
+ return drm_bridge_attach(bridge->encoder, lvds_encoder->panel_bridge,
+ bridge);
+}
+
+static struct drm_bridge_funcs funcs = {
+ .attach = lvds_encoder_attach,
+};
+
static int lvds_encoder_probe(struct platform_device *pdev)
{
struct device_node *port;
struct device_node *endpoint;
struct device_node *panel_node;
struct drm_panel *panel;
- struct drm_bridge *bridge;
+ struct lvds_encoder *lvds_encoder;
+
+ lvds_encoder = devm_kzalloc(&pdev->dev, sizeof(*lvds_encoder),
+ GFP_KERNEL);
+ if (!lvds_encoder)
+ return -ENOMEM;
/* Locate the panel DT node. */
port = of_graph_get_port_by_id(pdev->dev.of_node, 1);
return -EPROBE_DEFER;
}
- bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_LVDS);
- if (IS_ERR(bridge))
- return PTR_ERR(bridge);
+ lvds_encoder->panel_bridge =
+ devm_drm_panel_bridge_add(&pdev->dev,
+ panel, DRM_MODE_CONNECTOR_LVDS);
+ if (IS_ERR(lvds_encoder->panel_bridge))
+ return PTR_ERR(lvds_encoder->panel_bridge);
+
+ /* The panel_bridge bridge is attached to the panel's of_node,
+ * but we need a bridge attached to our of_node for our user
+ * to look up.
+ */
+ lvds_encoder->bridge.of_node = pdev->dev.of_node;
+ lvds_encoder->bridge.funcs = &funcs;
+ drm_bridge_add(&lvds_encoder->bridge);
- platform_set_drvdata(pdev, bridge);
+ platform_set_drvdata(pdev, lvds_encoder);
return 0;
}
static int lvds_encoder_remove(struct platform_device *pdev)
{
- struct drm_bridge *bridge = platform_get_drvdata(pdev);
+ struct lvds_encoder *lvds_encoder = platform_get_drvdata(pdev);
- drm_bridge_remove(bridge);
+ drm_bridge_remove(&lvds_encoder->bridge);
return 0;
}
struct device *dev;
struct clk *isfr_clk;
struct clk *iahb_clk;
+ struct clk *cec_clk;
struct dw_hdmi_i2c *i2c;
struct hdmi_data_info hdmi_data;
goto err_isfr;
}
+ hdmi->cec_clk = devm_clk_get(hdmi->dev, "cec");
+ if (PTR_ERR(hdmi->cec_clk) == -ENOENT) {
+ hdmi->cec_clk = NULL;
+ } else if (IS_ERR(hdmi->cec_clk)) {
+ ret = PTR_ERR(hdmi->cec_clk);
+ if (ret != -EPROBE_DEFER)
+ dev_err(hdmi->dev, "Cannot get HDMI cec clock: %d\n",
+ ret);
+
+ hdmi->cec_clk = NULL;
+ goto err_iahb;
+ } else {
+ ret = clk_prepare_enable(hdmi->cec_clk);
+ if (ret) {
+ dev_err(hdmi->dev, "Cannot enable HDMI cec clock: %d\n",
+ ret);
+ goto err_iahb;
+ }
+ }
+
/* Product and revision IDs */
hdmi->version = (hdmi_readb(hdmi, HDMI_DESIGN_ID) << 8)
| (hdmi_readb(hdmi, HDMI_REVISION_ID) << 0);
cec_notifier_put(hdmi->cec_notifier);
clk_disable_unprepare(hdmi->iahb_clk);
+ if (hdmi->cec_clk)
+ clk_disable_unprepare(hdmi->cec_clk);
err_isfr:
clk_disable_unprepare(hdmi->isfr_clk);
err_res:
clk_disable_unprepare(hdmi->iahb_clk);
clk_disable_unprepare(hdmi->isfr_clk);
+ if (hdmi->cec_clk)
+ clk_disable_unprepare(hdmi->cec_clk);
if (hdmi->i2c)
i2c_del_adapter(&hdmi->i2c->adap);
#define DP0_ACTIVEVAL 0x0650
#define DP0_SYNCVAL 0x0654
#define DP0_MISC 0x0658
-#define TU_SIZE_RECOMMENDED (0x3f << 16) /* LSCLK cycles per TU */
+#define TU_SIZE_RECOMMENDED (63) /* LSCLK cycles per TU */
#define BPC_6 (0 << 5)
#define BPC_8 (1 << 5)
tmp = (tmp << 8) | buf[i];
i++;
if (((i % 4) == 0) || (i == size)) {
- tc_write(DP0_AUXWDATA(i >> 2), tmp);
+ tc_write(DP0_AUXWDATA((i - 1) >> 2), tmp);
tmp = 0;
}
}
ret = drm_dp_link_probe(&tc->aux, &tc->link.base);
if (ret < 0)
goto err_dpcd_read;
- if ((tc->link.base.rate != 162000) && (tc->link.base.rate != 270000))
- goto err_dpcd_inval;
+ if (tc->link.base.rate != 162000 && tc->link.base.rate != 270000) {
+ dev_dbg(tc->dev, "Falling to 2.7 Gbps rate\n");
+ tc->link.base.rate = 270000;
+ }
+
+ if (tc->link.base.num_lanes > 2) {
+ dev_dbg(tc->dev, "Falling to 2 lanes\n");
+ tc->link.base.num_lanes = 2;
+ }
ret = drm_dp_dpcd_readb(&tc->aux, DP_MAX_DOWNSPREAD, tmp);
if (ret < 0)
err_dpcd_read:
dev_err(tc->dev, "failed to read DPCD: %d\n", ret);
return ret;
-err_dpcd_inval:
- dev_err(tc->dev, "invalid DPCD\n");
- return -EINVAL;
}
static int tc_set_video_mode(struct tc_data *tc, struct drm_display_mode *mode)
int lower_margin = mode->vsync_start - mode->vdisplay;
int vsync_len = mode->vsync_end - mode->vsync_start;
+ /*
+ * Recommended maximum number of symbols transferred in a transfer unit:
+ * DIV_ROUND_UP((input active video bandwidth in bytes) * tu_size,
+ * (output active video bandwidth in bytes))
+ * Must be less than tu_size.
+ */
+ max_tu_symbol = TU_SIZE_RECOMMENDED - 1;
+
dev_dbg(tc->dev, "set mode %dx%d\n",
mode->hdisplay, mode->vdisplay);
dev_dbg(tc->dev, "H margin %d,%d sync %d\n",
dev_dbg(tc->dev, "total: %dx%d\n", mode->htotal, mode->vtotal);
- /* LCD Ctl Frame Size */
- tc_write(VPCTRL0, (0x40 << 20) /* VSDELAY */ |
+ /*
+ * LCD Ctl Frame Size
+ * datasheet is not clear of vsdelay in case of DPI
+ * assume we do not need any delay when DPI is a source of
+ * sync signals
+ */
+ tc_write(VPCTRL0, (0 << 20) /* VSDELAY */ |
OPXLFMT_RGB888 | FRMSYNC_DISABLED | MSF_DISABLED);
- tc_write(HTIM01, (left_margin << 16) | /* H back porch */
- (hsync_len << 0)); /* Hsync */
- tc_write(HTIM02, (right_margin << 16) | /* H front porch */
- (mode->hdisplay << 0)); /* width */
+ tc_write(HTIM01, (ALIGN(left_margin, 2) << 16) | /* H back porch */
+ (ALIGN(hsync_len, 2) << 0)); /* Hsync */
+ tc_write(HTIM02, (ALIGN(right_margin, 2) << 16) | /* H front porch */
+ (ALIGN(mode->hdisplay, 2) << 0)); /* width */
tc_write(VTIM01, (upper_margin << 16) | /* V back porch */
(vsync_len << 0)); /* Vsync */
tc_write(VTIM02, (lower_margin << 16) | /* V front porch */
/* DP Main Stream Attributes */
vid_sync_dly = hsync_len + left_margin + mode->hdisplay;
tc_write(DP0_VIDSYNCDELAY,
- (0x003e << 16) | /* thresh_dly */
+ (max_tu_symbol << 16) | /* thresh_dly */
(vid_sync_dly << 0));
tc_write(DP0_TOTALVAL, (mode->vtotal << 16) | (mode->htotal));
tc_write(DPIPXLFMT, VS_POL_ACTIVE_LOW | HS_POL_ACTIVE_LOW |
DE_POL_ACTIVE_HIGH | SUB_CFG_TYPE_CONFIG1 | DPI_BPP_RGB888);
- /*
- * Recommended maximum number of symbols transferred in a transfer unit:
- * DIV_ROUND_UP((input active video bandwidth in bytes) * tu_size,
- * (output active video bandwidth in bytes))
- * Must be less than tu_size.
- */
- max_tu_symbol = TU_SIZE_RECOMMENDED - 1;
- tc_write(DP0_MISC, (max_tu_symbol << 23) | TU_SIZE_RECOMMENDED | BPC_8);
+ tc_write(DP0_MISC, (max_tu_symbol << 23) | (TU_SIZE_RECOMMENDED << 16) |
+ BPC_8);
return 0;
err:
unsigned int rate;
u32 dp_phy_ctrl;
int timeout;
- bool aligned;
- bool ready;
u32 value;
int ret;
u8 tmp[8];
ret = drm_dp_dpcd_read_link_status(aux, tmp + 2);
if (ret < 0)
goto err_dpcd_read;
- ready = (tmp[2] == ((DP_CHANNEL_EQ_BITS << 4) | /* Lane1 */
- DP_CHANNEL_EQ_BITS)); /* Lane0 */
- aligned = tmp[4] & DP_INTERLANE_ALIGN_DONE;
- } while ((--timeout) && !(ready && aligned));
+ } while ((--timeout) &&
+ !(drm_dp_channel_eq_ok(tmp + 2, tc->link.base.num_lanes)));
if (timeout == 0) {
/* Read DPCD 0x200-0x201 */
ret = drm_dp_dpcd_read(aux, DP_SINK_COUNT, tmp, 2);
if (ret < 0)
goto err_dpcd_read;
+ dev_err(dev, "channel(s) EQ not ok\n");
dev_info(dev, "0x0200 SINK_COUNT: 0x%02x\n", tmp[0]);
dev_info(dev, "0x0201 DEVICE_SERVICE_IRQ_VECTOR: 0x%02x\n",
tmp[1]);
dev_info(dev, "0x0206 ADJUST_REQUEST_LANE0_1: 0x%02x\n",
tmp[6]);
- if (!ready)
- dev_err(dev, "Lane0/1 not ready\n");
- if (!aligned)
- dev_err(dev, "Lane0/1 not aligned\n");
return -EAGAIN;
}
static int tc_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
- /* Accept any mode */
+ /* DPI interface clock limitation: upto 154 MHz */
+ if (mode->clock > 154000)
+ return MODE_CLOCK_HIGH;
+
return MODE_OK;
}
return;
for_each_oldnew_crtc_in_state(old_state, crtc, old_crtc_state, new_crtc_state, i) {
- if (!new_crtc_state->active || !new_crtc_state->planes_changed)
+ if (!new_crtc_state->active)
continue;
ret = drm_crtc_vblank_get(crtc);
config->link_status_property,
0);
+ drm_object_attach_property(&connector->base,
+ config->non_desktop_property,
+ 0);
+
if (drm_core_check_feature(dev, DRIVER_ATOMIC)) {
drm_object_attach_property(&connector->base, config->prop_crtc_id, 0);
}
* value of link-status is "GOOD". If something fails during or after modeset,
* the kernel driver may set this to "BAD" and issue a hotplug uevent. Drivers
* should update this value using drm_mode_connector_set_link_status_property().
+ * non_desktop:
+ * Indicates the output should be ignored for purposes of displaying a
+ * standard desktop environment or console. This is most likely because
+ * the output device is not rectilinear.
*
* Connectors also have one standardized atomic property:
*
return -ENOMEM;
dev->mode_config.link_status_property = prop;
+ prop = drm_property_create_bool(dev, DRM_MODE_PROP_IMMUTABLE, "non-desktop");
+ if (!prop)
+ return -ENOMEM;
+ dev->mode_config.non_desktop_property = prop;
+
return 0;
}
if (edid)
size = EDID_LENGTH * (1 + edid->extensions);
+ drm_object_property_set_value(&connector->base,
+ dev->mode_config.non_desktop_property,
+ connector->display_info.non_desktop);
+
ret = drm_property_replace_global_blob(dev,
&connector->edid_blob_ptr,
size,
#define EDID_QUIRK_FORCE_6BPC (1 << 10)
/* Force 10bpc */
#define EDID_QUIRK_FORCE_10BPC (1 << 11)
+/* Non desktop display (i.e. HMD) */
+#define EDID_QUIRK_NON_DESKTOP (1 << 12)
struct detailed_mode_closure {
struct drm_connector *connector;
/* Rotel RSX-1058 forwards sink's EDID but only does HDMI 1.1*/
{ "ETR", 13896, EDID_QUIRK_FORCE_8BPC },
+
+ /* HTC Vive VR Headset */
+ { "HVR", 0xaa01, EDID_QUIRK_NON_DESKTOP },
};
/*
}
static void drm_add_display_info(struct drm_connector *connector,
- struct edid *edid)
+ struct edid *edid, u32 quirks)
{
struct drm_display_info *info = &connector->display_info;
info->max_tmds_clock = 0;
info->dvi_dual = false;
+ info->non_desktop = !!(quirks & EDID_QUIRK_NON_DESKTOP);
+
if (edid->revision < 3)
return;
* To avoid multiple parsing of same block, lets parse that map
* from sink info, before parsing CEA modes.
*/
- drm_add_display_info(connector, edid);
+ drm_add_display_info(connector, edid, quirks);
/*
* EDID spec says modes should be preferred in this order:
drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
const struct drm_display_mode *mode,
enum hdmi_quantization_range rgb_quant_range,
- bool rgb_quant_range_selectable)
+ bool rgb_quant_range_selectable,
+ bool is_hdmi2_sink)
{
/*
* CEA-861:
* YQ-field to match the RGB Quantization Range being transmitted
* (e.g., when Limited Range RGB, set YQ=0 or when Full Range RGB,
* set YQ=1) and the Sink shall ignore the YQ-field."
+ *
+ * Unfortunate certain sinks (eg. VIZ Model 67/E261VA) get confused
+ * by non-zero YQ when receiving RGB. There doesn't seem to be any
+ * good way to tell which version of CEA-861 the sink supports, so
+ * we limit non-zero YQ to HDMI 2.0 sinks only as HDMI 2.0 is based
+ * on on CEA-861-F.
*/
- if (rgb_quant_range == HDMI_QUANTIZATION_RANGE_LIMITED)
+ if (!is_hdmi2_sink ||
+ rgb_quant_range == HDMI_QUANTIZATION_RANGE_LIMITED)
frame->ycc_quantization_range =
HDMI_YCC_QUANTIZATION_RANGE_LIMITED;
else
if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) {
DRM_INFO("Cannot find any crtc or sizes\n");
+
+ /* First time: disable all crtc's.. */
+ if (!fb_helper->deferred_setup && !READ_ONCE(fb_helper->dev->master))
+ restore_fbdev_mode(fb_helper);
return -EAGAIN;
}
{
bool enable;
+ if (connector->display_info.non_desktop)
+ return false;
+
if (strict)
enable = connector->status == connector_status_connected;
else
connector = fb_helper->connector_info[i]->connector;
enabled[i] = drm_connector_enabled(connector, true);
DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id,
- enabled[i] ? "yes" : "no");
+ connector->display_info.non_desktop ? "non desktop" : enabled[i] ? "yes" : "no");
+
any_enabled |= enabled[i];
}
e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
e->event.base.length = sizeof(e->event);
e->event.vbl.user_data = page_flip->user_data;
+ e->event.vbl.crtc_id = crtc->base.id;
ret = drm_event_reserve_init(dev, file_priv, &e->base, &e->event.base);
if (ret) {
kfree(e);
spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
}
-static void vblank_disable_fn(unsigned long arg)
+static void vblank_disable_fn(struct timer_list *t)
{
- struct drm_vblank_crtc *vblank = (void *)arg;
+ struct drm_vblank_crtc *vblank = from_timer(vblank, t, disable_timer);
struct drm_device *dev = vblank->dev;
unsigned int pipe = vblank->pipe;
unsigned long irqflags;
vblank->dev = dev;
vblank->pipe = i;
init_waitqueue_head(&vblank->queue);
- setup_timer(&vblank->disable_timer, vblank_disable_fn,
- (unsigned long)vblank);
+ timer_setup(&vblank->disable_timer, vblank_disable_fn, 0);
seqlock_init(&vblank->seqlock);
}
if (drm_vblank_offdelay == 0)
return;
else if (drm_vblank_offdelay < 0)
- vblank_disable_fn((unsigned long)vblank);
+ vblank_disable_fn(&vblank->disable_timer);
else if (!dev->vblank_disable_immediate)
mod_timer(&vblank->disable_timer,
jiffies + ((drm_vblank_offdelay * HZ)/1000));
spin_unlock_irqrestore(&dev->event_lock, irqflags);
if (disable_irq)
- vblank_disable_fn((unsigned long)vblank);
+ vblank_disable_fn(&vblank->disable_timer);
return true;
}
.atomic_flush = exynos_crtc_handle_event,
};
-static void vidi_fake_vblank_timer(unsigned long arg)
+static void vidi_fake_vblank_timer(struct timer_list *t)
{
- struct vidi_context *ctx = (void *)arg;
+ struct vidi_context *ctx = from_timer(ctx, t, timer);
if (drm_crtc_handle_vblank(&ctx->crtc->base))
mod_timer(&ctx->timer,
ctx->pdev = pdev;
- setup_timer(&ctx->timer, vidi_fake_vblank_timer, (unsigned long)ctx);
+ timer_setup(&ctx->timer, vidi_fake_vblank_timer, 0);
mutex_init(&ctx->lock);
return PTR_ERR(fsl_dev->state);
}
- clk_disable_unprepare(fsl_dev->pix_clk);
clk_disable_unprepare(fsl_dev->clk);
return 0;
if (fsl_dev->tcon)
fsl_tcon_bypass_enable(fsl_dev->tcon);
fsl_dcu_drm_init_planes(fsl_dev->drm);
+ enable_irq(fsl_dev->irq);
drm_atomic_helper_resume(fsl_dev->drm, fsl_dev->state);
console_lock();
console_unlock();
drm_kms_helper_poll_enable(fsl_dev->drm);
- enable_irq(fsl_dev->irq);
return 0;
}
{
struct drm_encoder *encoder = &fsl_dev->encoder;
struct drm_connector *connector = &fsl_dev->connector.base;
- struct drm_mode_config *mode_config = &fsl_dev->drm->mode_config;
int ret;
fsl_dev->connector.encoder = encoder;
if (ret < 0)
goto err_sysfs;
- drm_object_property_set_value(&connector->base,
- mode_config->dpms_property,
- DRM_MODE_DPMS_OFF);
-
ret = drm_panel_attach(panel, connector);
if (ret) {
dev_err(fsl_dev->dev, "failed to attach panel\n");
* we have seen a HPD inactive->active transition. This code implements
* that delay.
*/
-static void tda998x_edid_delay_done(unsigned long data)
+static void tda998x_edid_delay_done(struct timer_list *t)
{
- struct tda998x_priv *priv = (struct tda998x_priv *)data;
+ struct tda998x_priv *priv = from_timer(priv, t, edid_delay_timer);
priv->edid_delay_active = false;
wake_up(&priv->edid_delay_waitq);
mutex_init(&priv->mutex); /* protect the page access */
init_waitqueue_head(&priv->edid_delay_waitq);
- setup_timer(&priv->edid_delay_timer, tda998x_edid_delay_done,
- (unsigned long)priv);
+ timer_setup(&priv->edid_delay_timer, tda998x_edid_delay_done, 0);
INIT_WORK(&priv->detect_work, tda998x_detect_work);
/* wake up the device: */
struct intel_shadow_bb_entry *entry_obj;
struct intel_vgpu *vgpu = s->vgpu;
unsigned long gma = 0;
- uint32_t bb_size;
+ int bb_size;
void *dst = NULL;
int ret = 0;
static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
int type, unsigned int resolution)
{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
if (WARN_ON(resolution >= GVT_EDID_NUM))
port->type = type;
emulate_monitor_status_change(vgpu);
+ vgpu_vreg(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE;
return 0;
}
goto err_unpin_mm;
}
+ ret = intel_gvt_generate_request(workload);
+ if (ret) {
+ gvt_vgpu_err("fail to generate request\n");
+ goto err_unpin_mm;
+ }
+
ret = prepare_shadow_batch_buffer(workload);
if (ret) {
gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n");
#define GTT_HAW 46
-#define ADDR_1G_MASK (((1UL << (GTT_HAW - 30 + 1)) - 1) << 30)
-#define ADDR_2M_MASK (((1UL << (GTT_HAW - 21 + 1)) - 1) << 21)
-#define ADDR_4K_MASK (((1UL << (GTT_HAW - 12 + 1)) - 1) << 12)
+#define ADDR_1G_MASK (((1UL << (GTT_HAW - 30)) - 1) << 30)
+#define ADDR_2M_MASK (((1UL << (GTT_HAW - 21)) - 1) << 21)
+#define ADDR_4K_MASK (((1UL << (GTT_HAW - 12)) - 1) << 12)
static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
{
return intel_vgpu_default_mmio_write(vgpu, offset, &v, bytes);
}
-static int skl_misc_ctl_write(struct intel_vgpu *vgpu, unsigned int offset,
- void *p_data, unsigned int bytes)
-{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- u32 v = *(u32 *)p_data;
-
- if (!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv))
- return intel_vgpu_default_mmio_write(vgpu,
- offset, p_data, bytes);
-
- switch (offset) {
- case 0x4ddc:
- /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
- vgpu_vreg(vgpu, offset) = v & ~(1 << 31);
- break;
- case 0x42080:
- /* bypass WaCompressedResourceDisplayNewHashMode */
- vgpu_vreg(vgpu, offset) = v & ~(1 << 15);
- break;
- case 0xe194:
- /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
- vgpu_vreg(vgpu, offset) = v & ~(1 << 8);
- break;
- case 0x7014:
- /* bypass WaCompressedResourceSamplerPbeMediaNewHashMode */
- vgpu_vreg(vgpu, offset) = v & ~(1 << 13);
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-
static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL);
- MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL,
- skl_misc_ctl_write);
+ MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS,
+ NULL, NULL);
MMIO_DFH(0x9030, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x20a0, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x2420, D_ALL, F_CMD_ACCESS, NULL, NULL);
MMIO_D(0x6e570, D_BDW_PLUS);
MMIO_D(0x65f10, D_BDW_PLUS);
- MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL,
- skl_misc_ctl_write);
+ MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL);
MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
MMIO_DFH(GEN9_GAMT_ECO_REG_RW_IA, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
- MMIO_DH(0x4ddc, D_SKL_PLUS, NULL, skl_misc_ctl_write);
- MMIO_DH(0x42080, D_SKL_PLUS, NULL, skl_misc_ctl_write);
+ MMIO_DH(0x4ddc, D_SKL_PLUS, NULL, NULL);
+ MMIO_DH(0x42080, D_SKL_PLUS, NULL, NULL);
MMIO_D(0x45504, D_SKL_PLUS);
MMIO_D(0x45520, D_SKL_PLUS);
MMIO_D(0x46000, D_SKL_PLUS);
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
enum intel_engine_id ring_id = req->engine->id;
struct intel_vgpu_workload *workload;
+ unsigned long flags;
if (!is_gvt_request(req)) {
- spin_lock_bh(&scheduler->mmio_context_lock);
+ spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
if (action == INTEL_CONTEXT_SCHEDULE_IN &&
scheduler->engine_owner[ring_id]) {
/* Switch ring from vGPU to host. */
NULL, ring_id);
scheduler->engine_owner[ring_id] = NULL;
}
- spin_unlock_bh(&scheduler->mmio_context_lock);
+ spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
return NOTIFY_OK;
}
switch (action) {
case INTEL_CONTEXT_SCHEDULE_IN:
- spin_lock_bh(&scheduler->mmio_context_lock);
+ spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
if (workload->vgpu != scheduler->engine_owner[ring_id]) {
/* Switch ring from host to vGPU or vGPU to vGPU. */
intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
} else
gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
ring_id, workload->vgpu->id);
- spin_unlock_bh(&scheduler->mmio_context_lock);
+ spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
atomic_set(&workload->shadow_ctx_active, 1);
break;
case INTEL_CONTEXT_SCHEDULE_OUT:
struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
struct intel_engine_cs *engine = dev_priv->engine[ring_id];
- struct drm_i915_gem_request *rq;
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_ring *ring;
int ret;
ret = populate_shadow_context(workload);
if (ret)
goto err_unpin;
+ workload->shadowed = true;
+ return 0;
+
+err_unpin:
+ engine->context_unpin(engine, shadow_ctx);
+err_shadow:
+ release_shadow_wa_ctx(&workload->wa_ctx);
+err_scan:
+ return ret;
+}
+
+int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
+{
+ int ring_id = workload->ring_id;
+ struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
+ struct intel_engine_cs *engine = dev_priv->engine[ring_id];
+ struct drm_i915_gem_request *rq;
+ struct intel_vgpu *vgpu = workload->vgpu;
+ struct i915_gem_context *shadow_ctx = vgpu->shadow_ctx;
+ int ret;
rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
if (IS_ERR(rq)) {
ret = copy_workload_to_ring_buffer(workload);
if (ret)
goto err_unpin;
- workload->shadowed = true;
return 0;
err_unpin:
engine->context_unpin(engine, shadow_ctx);
-err_shadow:
release_shadow_wa_ctx(&workload->wa_ctx);
-err_scan:
return ret;
}
void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu);
void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx);
+
+int intel_gvt_generate_request(struct intel_vgpu_workload *workload);
+
#endif
intel_guc_resume(dev_priv);
intel_modeset_init_hw(dev);
+ intel_init_clock_gating(dev_priv);
spin_lock_irq(&dev_priv->irq_lock);
if (dev_priv->display.hpd_irq_setup)
ret = vlv_resume_prepare(dev_priv, true);
}
+ intel_uncore_runtime_resume(dev_priv);
+
/*
* No point of rolling back things in case of an error, as the best
* we can do is to hope that things will still work (and disable RPM).
spin_lock_init(&mn->lock);
mn->mn.ops = &i915_gem_userptr_notifier;
mn->objects = RB_ROOT_CACHED;
- mn->wq = alloc_workqueue("i915-userptr-release", WQ_UNBOUND, 0);
+ mn->wq = alloc_workqueue("i915-userptr-release",
+ WQ_UNBOUND | WQ_MEM_RECLAIM,
+ 0);
if (mn->wq == NULL) {
kfree(mn);
return ERR_PTR(-ENOMEM);
dev_priv->mm.userptr_wq =
alloc_workqueue("i915-userptr-acquire",
- WQ_HIGHPRI | WQ_MEM_RECLAIM,
+ WQ_HIGHPRI | WQ_UNBOUND,
0);
if (!dev_priv->mm.userptr_wq)
return -ENOMEM;
if (has_transparent_hugepage()) {
struct super_block *sb = gemfs->mnt_sb;
- char options[] = "huge=within_size";
+ /* FIXME: Disabled until we get W/A for read BW issue. */
+ char options[] = "huge=never";
int flags = 0;
int err;
GEM_BUG_ON(RB_EMPTY_NODE(&wait->node));
rb_erase(&wait->node, &b->waiters);
+ RB_CLEAR_NODE(&wait->node);
out:
GEM_BUG_ON(b->irq_wait == wait);
int intel_backlight_device_register(struct intel_connector *connector);
void intel_backlight_device_unregister(struct intel_connector *connector);
#else /* CONFIG_BACKLIGHT_CLASS_DEVICE */
-static int intel_backlight_device_register(struct intel_connector *connector)
+static inline int intel_backlight_device_register(struct intel_connector *connector)
{
return 0;
}
/* Due to peculiar init order wrt to hpd handling this is separate. */
if (drm_fb_helper_initial_config(&ifbdev->helper,
- ifbdev->preferred_bpp)) {
+ ifbdev->preferred_bpp))
intel_fbdev_unregister(to_i915(ifbdev->helper.dev));
- intel_fbdev_fini(to_i915(ifbdev->helper.dev));
- }
}
void intel_fbdev_initial_config_async(struct drm_device *dev)
{
struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
- if (ifbdev)
+ if (!ifbdev)
+ return;
+
+ intel_fbdev_sync(ifbdev);
+ if (ifbdev->vma)
drm_fb_helper_hotplug_event(&ifbdev->helper);
}
crtc_state->limited_color_range ?
HDMI_QUANTIZATION_RANGE_LIMITED :
HDMI_QUANTIZATION_RANGE_FULL,
- intel_hdmi->rgb_quant_range_selectable);
+ intel_hdmi->rgb_quant_range_selectable,
+ is_hdmi2_sink);
/* TODO: handle pixel repetition for YCBCR420 outputs */
intel_write_infoframe(encoder, crtc_state, &frame);
gmbus_is_index_read(struct i2c_msg *msgs, int i, int num)
{
return (i + 1 < num &&
- !(msgs[i].flags & I2C_M_RD) && msgs[i].len <= 2 &&
+ msgs[i].addr == msgs[i + 1].addr &&
+ !(msgs[i].flags & I2C_M_RD) &&
+ (msgs[i].len == 1 || msgs[i].len == 2) &&
(msgs[i + 1].flags & I2C_M_RD));
}
i915_check_and_clear_faults(dev_priv);
}
+void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv)
+{
+ iosf_mbi_register_pmic_bus_access_notifier(
+ &dev_priv->uncore.pmic_bus_access_nb);
+}
+
void intel_uncore_sanitize(struct drm_i915_private *dev_priv)
{
i915_modparams.enable_rc6 =
* bus, which will be busy after this notification, leading to:
* "render: timed out waiting for forcewake ack request."
* errors.
+ *
+ * The notifier is unregistered during intel_runtime_suspend(),
+ * so it's ok to access the HW here without holding a RPM
+ * wake reference -> disable wakeref asserts for the time of
+ * the access.
*/
+ disable_rpm_wakeref_asserts(dev_priv);
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+ enable_rpm_wakeref_asserts(dev_priv);
break;
case MBI_PMIC_BUS_ACCESS_END:
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
void intel_uncore_fini(struct drm_i915_private *dev_priv);
void intel_uncore_suspend(struct drm_i915_private *dev_priv);
void intel_uncore_resume_early(struct drm_i915_private *dev_priv);
+void intel_uncore_runtime_resume(struct drm_i915_private *dev_priv);
u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv);
void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
i915_sw_fence_fini(fence);
}
-static void timed_fence_wake(unsigned long data)
+static void timed_fence_wake(struct timer_list *t)
{
- struct timed_fence *tf = (struct timed_fence *)data;
+ struct timed_fence *tf = from_timer(tf, t, timer);
i915_sw_fence_commit(&tf->fence);
}
{
onstack_fence_init(&tf->fence);
- setup_timer_on_stack(&tf->timer, timed_fence_wake, (unsigned long)tf);
+ timer_setup_on_stack(&tf->timer, timed_fence_wake, 0);
if (time_after(expires, jiffies))
mod_timer(&tf->timer, expires);
plane_disabling = true;
}
- if (plane_disabling) {
- drm_atomic_helper_wait_for_vblanks(dev, state);
+ /*
+ * The flip done wait is only strictly required by imx-drm if a deferred
+ * plane disable is in-flight. As the core requires blocking commits
+ * to wait for the flip it is done here unconditionally. This keeps the
+ * workitem around a bit longer than required for the majority of
+ * non-blocking commits, but we accept that for the sake of simplicity.
+ */
+ drm_atomic_helper_wait_for_flip_done(dev, state);
+ if (plane_disabling) {
for_each_old_plane_in_state(state, plane, old_plane_state, i)
ipu_plane_disable_deferred(plane);
if (crtc->state) {
if (crtc->state->mode_blob)
- drm_property_unreference_blob(crtc->state->mode_blob);
+ drm_property_blob_put(crtc->state->mode_blob);
state = to_imx_crtc_state(crtc->state);
memset(state, 0, sizeof(*state));
&imx_pd_connector_helper_funcs);
drm_connector_init(drm, &imxpd->connector,
&imx_pd_connector_funcs,
- DRM_MODE_CONNECTOR_VGA);
+ DRM_MODE_CONNECTOR_DPI);
}
if (imxpd->panel)
return NULL;
}
-static void a5xx_preempt_timer(unsigned long data)
+static void a5xx_preempt_timer(struct timer_list *t)
{
- struct a5xx_gpu *a5xx_gpu = (struct a5xx_gpu *) data;
+ struct a5xx_gpu *a5xx_gpu = from_timer(a5xx_gpu, t, preempt_timer);
struct msm_gpu *gpu = &a5xx_gpu->base.base;
struct drm_device *dev = gpu->dev;
struct msm_drm_private *priv = dev->dev_private;
}
}
- setup_timer(&a5xx_gpu->preempt_timer, a5xx_preempt_timer,
- (unsigned long) a5xx_gpu);
+ timer_setup(&a5xx_gpu->preempt_timer, a5xx_preempt_timer, 0);
}
round_jiffies_up(jiffies + DRM_MSM_HANGCHECK_JIFFIES));
}
-static void hangcheck_handler(unsigned long data)
+static void hangcheck_handler(struct timer_list *t)
{
- struct msm_gpu *gpu = (struct msm_gpu *)data;
+ struct msm_gpu *gpu = from_timer(gpu, t, hangcheck_timer);
struct drm_device *dev = gpu->dev;
struct msm_drm_private *priv = dev->dev_private;
struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
INIT_WORK(&gpu->recover_work, recover_worker);
- setup_timer(&gpu->hangcheck_timer, hangcheck_handler,
- (unsigned long)gpu);
+ timer_setup(&gpu->hangcheck_timer, hangcheck_handler, 0);
spin_lock_init(&gpu->perf_lock);
config DRM_OMAP_PANEL_DPI
tristate "Generic DPI panel"
+ depends on BACKLIGHT_CLASS_DEVICE
help
Driver for generic DPI panels.
}
static const struct soc_device_attribute dpi_soc_devices[] = {
- { .family = "OMAP3[456]*" },
- { .family = "[AD]M37*" },
+ { .machine = "OMAP3[456]*" },
+ { .machine = "[AD]M37*" },
{ /* sentinel */ }
};
}
#ifdef DSI_CATCH_MISSING_TE
-static void dsi_te_timeout(unsigned long arg)
+static void dsi_te_timeout(struct timer_list *unused)
{
DSSERR("TE not received for 250ms!\n");
}
dsi_framedone_timeout_work_callback);
#ifdef DSI_CATCH_MISSING_TE
- init_timer(&dsi->te_timer);
- dsi->te_timer.function = dsi_te_timeout;
- dsi->te_timer.data = 0;
+ timer_setup(&dsi->te_timer, dsi_te_timeout, 0);
#endif
dsi_mem = platform_get_resource_byname(dsidev, IORESOURCE_MEM, "proto");
{
const u32 caps = CEC_CAP_TRANSMIT | CEC_CAP_LOG_ADDRS |
CEC_CAP_PASSTHROUGH | CEC_CAP_RC;
- unsigned int ret;
+ int ret;
core->adap = cec_allocate_adapter(&hdmi_cec_adap_ops, core,
"omap4", caps, CEC_MAX_LOG_ADDRS);
bool audio_use_mclk;
};
-static const struct hdmi4_features hdmi4_es1_features = {
+static const struct hdmi4_features hdmi4430_es1_features = {
.cts_swmode = false,
.audio_use_mclk = false,
};
-static const struct hdmi4_features hdmi4_es2_features = {
+static const struct hdmi4_features hdmi4430_es2_features = {
.cts_swmode = true,
.audio_use_mclk = false,
};
-static const struct hdmi4_features hdmi4_es3_features = {
+static const struct hdmi4_features hdmi4_features = {
.cts_swmode = true,
.audio_use_mclk = true,
};
static const struct soc_device_attribute hdmi4_soc_devices[] = {
- { .family = "OMAP4", .revision = "ES1.?", .data = &hdmi4_es1_features },
- { .family = "OMAP4", .revision = "ES2.?", .data = &hdmi4_es2_features },
- { .family = "OMAP4", .data = &hdmi4_es3_features },
+ {
+ .machine = "OMAP4430",
+ .revision = "ES1.?",
+ .data = &hdmi4430_es1_features,
+ },
+ {
+ .machine = "OMAP4430",
+ .revision = "ES2.?",
+ .data = &hdmi4430_es2_features,
+ },
+ {
+ .family = "OMAP4",
+ .data = &hdmi4_features,
+ },
{ /* sentinel */ }
};
match = of_match_node(dmm_of_match, dev->dev.of_node);
if (!match) {
dev_err(&dev->dev, "failed to find matching device node\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto fail;
}
omap_dmm->plat_data = match->data;
WREG32(VM_INVALIDATE_REQUEST, 0x1);
}
-static void cik_pcie_init_compute_vmid(struct radeon_device *rdev)
-{
- int i;
- uint32_t sh_mem_bases, sh_mem_config;
-
- sh_mem_bases = 0x6000 | 0x6000 << 16;
- sh_mem_config = ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED);
- sh_mem_config |= DEFAULT_MTYPE(MTYPE_NONCACHED);
-
- mutex_lock(&rdev->srbm_mutex);
- for (i = 8; i < 16; i++) {
- cik_srbm_select(rdev, 0, 0, 0, i);
- /* CP and shaders */
- WREG32(SH_MEM_CONFIG, sh_mem_config);
- WREG32(SH_MEM_APE1_BASE, 1);
- WREG32(SH_MEM_APE1_LIMIT, 0);
- WREG32(SH_MEM_BASES, sh_mem_bases);
- }
- cik_srbm_select(rdev, 0, 0, 0, 0);
- mutex_unlock(&rdev->srbm_mutex);
-}
-
/**
* cik_pcie_gart_enable - gart enable
*
cik_srbm_select(rdev, 0, 0, 0, 0);
mutex_unlock(&rdev->srbm_mutex);
- cik_pcie_init_compute_vmid(rdev);
-
cik_pcie_gart_tlb_flush(rdev);
DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
(unsigned)(rdev->mc.gtt_size >> 20),
}
info->par = rfbdev;
- info->skip_vt_switch = true;
ret = radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj);
if (ret) {
goto err_pllref;
}
- pm_runtime_enable(dev);
-
dsi->dsi_host.ops = &dw_mipi_dsi_host_ops;
dsi->dsi_host.dev = dev;
ret = mipi_dsi_host_register(&dsi->dsi_host);
}
dev_set_drvdata(dev, dsi);
+ pm_runtime_enable(dev);
return 0;
err_mipi_dsi_host:
spin_unlock_irqrestore(&psr->lock, flags);
}
-static void psr_flush_handler(unsigned long data)
+static void psr_flush_handler(struct timer_list *t)
{
- struct psr_drv *psr = (struct psr_drv *)data;
+ struct psr_drv *psr = from_timer(psr, t, flush_timer);
unsigned long flags;
/* If the state has changed since we initiated the flush, do nothing */
if (!psr)
return -ENOMEM;
- setup_timer(&psr->flush_timer, psr_flush_handler, (unsigned long)psr);
+ timer_setup(&psr->flush_timer, psr_flush_handler, 0);
spin_lock_init(&psr->lock);
psr->active = true;
struct reset_control *rst;
struct clk *clk_parent;
- struct clk *clk_brick;
struct clk *clk_safe;
- struct clk *clk_src;
+ struct clk *clk_out;
+ struct clk *clk_pad;
struct clk *clk_dp;
struct clk *clk;
clk_disable_unprepare(sor->clk);
- err = clk_set_parent(sor->clk, parent);
+ err = clk_set_parent(sor->clk_out, parent);
if (err < 0)
return err;
return 0;
}
-struct tegra_clk_sor_brick {
+struct tegra_clk_sor_pad {
struct clk_hw hw;
struct tegra_sor *sor;
};
-static inline struct tegra_clk_sor_brick *to_brick(struct clk_hw *hw)
+static inline struct tegra_clk_sor_pad *to_pad(struct clk_hw *hw)
{
- return container_of(hw, struct tegra_clk_sor_brick, hw);
+ return container_of(hw, struct tegra_clk_sor_pad, hw);
}
-static const char * const tegra_clk_sor_brick_parents[] = {
+static const char * const tegra_clk_sor_pad_parents[] = {
"pll_d2_out0", "pll_dp"
};
-static int tegra_clk_sor_brick_set_parent(struct clk_hw *hw, u8 index)
+static int tegra_clk_sor_pad_set_parent(struct clk_hw *hw, u8 index)
{
- struct tegra_clk_sor_brick *brick = to_brick(hw);
- struct tegra_sor *sor = brick->sor;
+ struct tegra_clk_sor_pad *pad = to_pad(hw);
+ struct tegra_sor *sor = pad->sor;
u32 value;
value = tegra_sor_readl(sor, SOR_CLK_CNTRL);
return 0;
}
-static u8 tegra_clk_sor_brick_get_parent(struct clk_hw *hw)
+static u8 tegra_clk_sor_pad_get_parent(struct clk_hw *hw)
{
- struct tegra_clk_sor_brick *brick = to_brick(hw);
- struct tegra_sor *sor = brick->sor;
+ struct tegra_clk_sor_pad *pad = to_pad(hw);
+ struct tegra_sor *sor = pad->sor;
u8 parent = U8_MAX;
u32 value;
return parent;
}
-static const struct clk_ops tegra_clk_sor_brick_ops = {
- .set_parent = tegra_clk_sor_brick_set_parent,
- .get_parent = tegra_clk_sor_brick_get_parent,
+static const struct clk_ops tegra_clk_sor_pad_ops = {
+ .set_parent = tegra_clk_sor_pad_set_parent,
+ .get_parent = tegra_clk_sor_pad_get_parent,
};
-static struct clk *tegra_clk_sor_brick_register(struct tegra_sor *sor,
- const char *name)
+static struct clk *tegra_clk_sor_pad_register(struct tegra_sor *sor,
+ const char *name)
{
- struct tegra_clk_sor_brick *brick;
+ struct tegra_clk_sor_pad *pad;
struct clk_init_data init;
struct clk *clk;
- brick = devm_kzalloc(sor->dev, sizeof(*brick), GFP_KERNEL);
- if (!brick)
+ pad = devm_kzalloc(sor->dev, sizeof(*pad), GFP_KERNEL);
+ if (!pad)
return ERR_PTR(-ENOMEM);
- brick->sor = sor;
+ pad->sor = sor;
init.name = name;
init.flags = 0;
- init.parent_names = tegra_clk_sor_brick_parents;
- init.num_parents = ARRAY_SIZE(tegra_clk_sor_brick_parents);
- init.ops = &tegra_clk_sor_brick_ops;
+ init.parent_names = tegra_clk_sor_pad_parents;
+ init.num_parents = ARRAY_SIZE(tegra_clk_sor_pad_parents);
+ init.ops = &tegra_clk_sor_pad_ops;
- brick->hw.init = &init;
+ pad->hw.init = &init;
- clk = devm_clk_register(sor->dev, &brick->hw);
+ clk = devm_clk_register(sor->dev, &pad->hw);
return clk;
}
/* switch to safe parent clock */
err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
- if (err < 0)
+ if (err < 0) {
dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
+ return err;
+ }
value = tegra_sor_readl(sor, SOR_DP_PADCTL0);
value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 |
/* switch to safe parent clock */
err = tegra_sor_set_parent_clock(sor, sor->clk_safe);
- if (err < 0)
+ if (err < 0) {
dev_err(sor->dev, "failed to set safe parent clock: %d\n", err);
+ return;
+ }
div = clk_get_rate(sor->clk) / 1000000 * 4;
tegra_sor_writel(sor, value, SOR_XBAR_CTRL);
/* switch to parent clock */
- err = clk_set_parent(sor->clk_src, sor->clk_parent);
- if (err < 0)
- dev_err(sor->dev, "failed to set source clock: %d\n", err);
-
- err = tegra_sor_set_parent_clock(sor, sor->clk_src);
- if (err < 0)
+ err = clk_set_parent(sor->clk, sor->clk_parent);
+ if (err < 0) {
dev_err(sor->dev, "failed to set parent clock: %d\n", err);
+ return;
+ }
+
+ err = tegra_sor_set_parent_clock(sor, sor->clk_pad);
+ if (err < 0) {
+ dev_err(sor->dev, "failed to set pad clock: %d\n", err);
+ return;
+ }
value = SOR_INPUT_CONTROL_HDMI_SRC_SELECT(dc->pipe);
}
if (sor->soc->supports_hdmi || sor->soc->supports_dp) {
- sor->clk_src = devm_clk_get(&pdev->dev, "source");
- if (IS_ERR(sor->clk_src)) {
- err = PTR_ERR(sor->clk_src);
- dev_err(sor->dev, "failed to get source clock: %d\n",
- err);
+ struct device_node *np = pdev->dev.of_node;
+ const char *name;
+
+ /*
+ * For backwards compatibility with Tegra210 device trees,
+ * fall back to the old clock name "source" if the new "out"
+ * clock is not available.
+ */
+ if (of_property_match_string(np, "clock-names", "out") < 0)
+ name = "source";
+ else
+ name = "out";
+
+ sor->clk_out = devm_clk_get(&pdev->dev, name);
+ if (IS_ERR(sor->clk_out)) {
+ err = PTR_ERR(sor->clk_out);
+ dev_err(sor->dev, "failed to get %s clock: %d\n",
+ name, err);
goto remove;
}
}
goto remove;
}
+ /*
+ * Starting with Tegra186, the BPMP provides an implementation for
+ * the pad output clock, so we have to look it up from device tree.
+ */
+ sor->clk_pad = devm_clk_get(&pdev->dev, "pad");
+ if (IS_ERR(sor->clk_pad)) {
+ if (sor->clk_pad != ERR_PTR(-ENOENT)) {
+ err = PTR_ERR(sor->clk_pad);
+ goto remove;
+ }
+
+ /*
+ * If the pad output clock is not available, then we assume
+ * we're on Tegra210 or earlier and have to provide our own
+ * implementation.
+ */
+ sor->clk_pad = NULL;
+ }
+
+ /*
+ * The bootloader may have set up the SOR such that it's module clock
+ * is sourced by one of the display PLLs. However, that doesn't work
+ * without properly having set up other bits of the SOR.
+ */
+ err = clk_set_parent(sor->clk_out, sor->clk_safe);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to use safe clock: %d\n", err);
+ goto remove;
+ }
+
platform_set_drvdata(pdev, sor);
pm_runtime_enable(&pdev->dev);
- pm_runtime_get_sync(&pdev->dev);
- sor->clk_brick = tegra_clk_sor_brick_register(sor, "sor1_brick");
- pm_runtime_put(&pdev->dev);
+ /*
+ * On Tegra210 and earlier, provide our own implementation for the
+ * pad output clock.
+ */
+ if (!sor->clk_pad) {
+ err = pm_runtime_get_sync(&pdev->dev);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to get runtime PM: %d\n",
+ err);
+ goto remove;
+ }
+
+ sor->clk_pad = tegra_clk_sor_pad_register(sor,
+ "sor1_pad_clkout");
+ pm_runtime_put(&pdev->dev);
+ }
- if (IS_ERR(sor->clk_brick)) {
- err = PTR_ERR(sor->clk_brick);
- dev_err(&pdev->dev, "failed to register SOR clock: %d\n", err);
+ if (IS_ERR(sor->clk_pad)) {
+ err = PTR_ERR(sor->clk_pad);
+ dev_err(&pdev->dev, "failed to register SOR pad clock: %d\n",
+ err);
goto remove;
}
controller, for example AM33xx in beagle-bone, DA8xx, or
OMAP-L1xx. This driver replaces the FB_DA8XX fbdev driver.
-config DRM_TILCDC_SLAVE_COMPAT
- bool "Support device tree blobs using TI LCDC Slave binding"
- depends on DRM_TILCDC
- default y
- select OF_RESOLVE
- select OF_OVERLAY
- help
- Choose this option if you need a kernel that is compatible
- with device tree blobs using the obsolete "ti,tilcdc,slave"
- binding. If you find "ti,tilcdc,slave"-string from your DTB,
- you probably need this. Otherwise you do not.
ccflags-y += -Werror
endif
-obj-$(CONFIG_DRM_TILCDC_SLAVE_COMPAT) += tilcdc_slave_compat.o \
- tilcdc_slave_compat.dtb.o
-
tilcdc-y := \
tilcdc_plane.o \
tilcdc_crtc.o \
+++ /dev/null
-/*
- * Copyright (C) 2015 Texas Instruments
- * Author: Jyri Sarha <jsarha@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- */
-
-/*
- * To support the old "ti,tilcdc,slave" binding the binding has to be
- * transformed to the new external encoder binding.
- */
-
-#include <linux/kernel.h>
-#include <linux/of.h>
-#include <linux/of_graph.h>
-#include <linux/of_fdt.h>
-#include <linux/slab.h>
-#include <linux/list.h>
-
-#include "tilcdc_slave_compat.h"
-
-struct kfree_table {
- int total;
- int num;
- void **table;
-};
-
-static int __init kfree_table_init(struct kfree_table *kft)
-{
- kft->total = 32;
- kft->num = 0;
- kft->table = kmalloc(kft->total * sizeof(*kft->table),
- GFP_KERNEL);
- if (!kft->table)
- return -ENOMEM;
-
- return 0;
-}
-
-static int __init kfree_table_add(struct kfree_table *kft, void *p)
-{
- if (kft->num == kft->total) {
- void **old = kft->table;
-
- kft->total *= 2;
- kft->table = krealloc(old, kft->total * sizeof(*kft->table),
- GFP_KERNEL);
- if (!kft->table) {
- kft->table = old;
- kfree(p);
- return -ENOMEM;
- }
- }
- kft->table[kft->num++] = p;
- return 0;
-}
-
-static void __init kfree_table_free(struct kfree_table *kft)
-{
- int i;
-
- for (i = 0; i < kft->num; i++)
- kfree(kft->table[i]);
-
- kfree(kft->table);
-}
-
-static
-struct property * __init tilcdc_prop_dup(const struct property *prop,
- struct kfree_table *kft)
-{
- struct property *nprop;
-
- nprop = kzalloc(sizeof(*nprop), GFP_KERNEL);
- if (!nprop || kfree_table_add(kft, nprop))
- return NULL;
-
- nprop->name = kstrdup(prop->name, GFP_KERNEL);
- if (!nprop->name || kfree_table_add(kft, nprop->name))
- return NULL;
-
- nprop->value = kmemdup(prop->value, prop->length, GFP_KERNEL);
- if (!nprop->value || kfree_table_add(kft, nprop->value))
- return NULL;
-
- nprop->length = prop->length;
-
- return nprop;
-}
-
-static void __init tilcdc_copy_props(struct device_node *from,
- struct device_node *to,
- const char * const props[],
- struct kfree_table *kft)
-{
- struct property *prop;
- int i;
-
- for (i = 0; props[i]; i++) {
- prop = of_find_property(from, props[i], NULL);
- if (!prop)
- continue;
-
- prop = tilcdc_prop_dup(prop, kft);
- if (!prop)
- continue;
-
- prop->next = to->properties;
- to->properties = prop;
- }
-}
-
-static int __init tilcdc_prop_str_update(struct property *prop,
- const char *str,
- struct kfree_table *kft)
-{
- prop->value = kstrdup(str, GFP_KERNEL);
- if (kfree_table_add(kft, prop->value) || !prop->value)
- return -ENOMEM;
- prop->length = strlen(str)+1;
- return 0;
-}
-
-static void __init tilcdc_node_disable(struct device_node *node)
-{
- struct property *prop;
-
- prop = kzalloc(sizeof(*prop), GFP_KERNEL);
- if (!prop)
- return;
-
- prop->name = "status";
- prop->value = "disabled";
- prop->length = strlen((char *)prop->value)+1;
-
- of_update_property(node, prop);
-}
-
-static struct device_node * __init tilcdc_get_overlay(struct kfree_table *kft)
-{
- const int size = __dtb_tilcdc_slave_compat_end -
- __dtb_tilcdc_slave_compat_begin;
- static void *overlay_data;
- struct device_node *overlay;
-
- if (!size) {
- pr_warn("%s: No overlay data\n", __func__);
- return NULL;
- }
-
- overlay_data = kmemdup(__dtb_tilcdc_slave_compat_begin,
- size, GFP_KERNEL);
- if (!overlay_data || kfree_table_add(kft, overlay_data))
- return NULL;
-
- of_fdt_unflatten_tree(overlay_data, NULL, &overlay);
- if (!overlay) {
- pr_warn("%s: Unfattening overlay tree failed\n", __func__);
- return NULL;
- }
-
- return overlay;
-}
-
-static const struct of_device_id tilcdc_slave_of_match[] __initconst = {
- { .compatible = "ti,tilcdc,slave", },
- {},
-};
-
-static const struct of_device_id tilcdc_of_match[] __initconst = {
- { .compatible = "ti,am33xx-tilcdc", },
- {},
-};
-
-static const struct of_device_id tilcdc_tda998x_of_match[] __initconst = {
- { .compatible = "nxp,tda998x", },
- {},
-};
-
-static const char * const tilcdc_slave_props[] __initconst = {
- "pinctrl-names",
- "pinctrl-0",
- "pinctrl-1",
- NULL
-};
-
-static void __init tilcdc_convert_slave_node(void)
-{
- struct device_node *slave = NULL, *lcdc = NULL;
- struct device_node *i2c = NULL, *fragment = NULL;
- struct device_node *overlay, *encoder;
- struct property *prop;
- /* For all memory needed for the overlay tree. This memory can
- be freed after the overlay has been applied. */
- struct kfree_table kft;
- int ovcs_id, ret;
-
- if (kfree_table_init(&kft))
- return;
-
- lcdc = of_find_matching_node(NULL, tilcdc_of_match);
- slave = of_find_matching_node(NULL, tilcdc_slave_of_match);
-
- if (!slave || !of_device_is_available(lcdc))
- goto out;
-
- i2c = of_parse_phandle(slave, "i2c", 0);
- if (!i2c) {
- pr_err("%s: Can't find i2c node trough phandle\n", __func__);
- goto out;
- }
-
- overlay = tilcdc_get_overlay(&kft);
- if (!overlay)
- goto out;
-
- encoder = of_find_matching_node(overlay, tilcdc_tda998x_of_match);
- if (!encoder) {
- pr_err("%s: Failed to find tda998x node\n", __func__);
- goto out;
- }
-
- tilcdc_copy_props(slave, encoder, tilcdc_slave_props, &kft);
-
- for_each_child_of_node(overlay, fragment) {
- prop = of_find_property(fragment, "target-path", NULL);
- if (!prop)
- continue;
- if (!strncmp("i2c", (char *)prop->value, prop->length))
- if (tilcdc_prop_str_update(prop, i2c->full_name, &kft))
- goto out;
- if (!strncmp("lcdc", (char *)prop->value, prop->length))
- if (tilcdc_prop_str_update(prop, lcdc->full_name, &kft))
- goto out;
- }
-
- tilcdc_node_disable(slave);
-
- ovcs_id = 0;
- ret = of_overlay_apply(overlay, &ovcs_id);
- if (ret)
- pr_err("%s: Applying overlay changeset failed: %d\n",
- __func__, ret);
- else
- pr_info("%s: ti,tilcdc,slave node successfully converted\n",
- __func__);
-out:
- kfree_table_free(&kft);
- of_node_put(i2c);
- of_node_put(slave);
- of_node_put(lcdc);
- of_node_put(fragment);
-}
-
-static int __init tilcdc_slave_compat_init(void)
-{
- tilcdc_convert_slave_node();
- return 0;
-}
-
-subsys_initcall(tilcdc_slave_compat_init);
+++ /dev/null
-/*
- * DTS overlay for converting ti,tilcdc,slave binding to new binding.
- *
- * Copyright (C) 2015 Texas Instruments Inc.
- * Author: Jyri Sarha <jsarha@ti.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- */
-
-/*
- * target-path property values are simple tags that are replaced with
- * correct values in tildcdc_slave_compat.c. Some properties are also
- * copied over from the ti,tilcdc,slave node.
- */
-
-/dts-v1/;
-/ {
- fragment@0 {
- target-path = "i2c";
- __overlay__ {
- #address-cells = <1>;
- #size-cells = <0>;
- tda19988 {
- compatible = "nxp,tda998x";
- reg = <0x70>;
- status = "okay";
-
- port {
- hdmi_0: endpoint@0 {
- remote-endpoint = <&lcd_0>;
- };
- };
- };
- };
- };
-
- fragment@1 {
- target-path = "lcdc";
- __overlay__ {
- port {
- lcd_0: endpoint@0 {
- remote-endpoint = <&hdmi_0>;
- };
- };
- };
- };
-
- __local_fixups__ {
- fragment@0 {
- __overlay__ {
- tda19988 {
- port {
- endpoint@0 {
- remote-endpoint = <0>;
- };
- };
- };
- };
- };
- fragment@1 {
- __overlay__ {
- port {
- endpoint@0 {
- remote-endpoint = <0>;
- };
- };
- };
- };
- };
-};
+++ /dev/null
-/*
- * Copyright (C) 2015 Texas Instruments
- * Author: Jyri Sarha <jsarha@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 as published by
- * the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program. If not, see <http://www.gnu.org/licenses/>.
- */
-/* This header declares the symbols defined in tilcdc_slave_compat.dts */
-
-#ifndef __TILCDC_SLAVE_COMPAT_H__
-#define __TILCDC_SLAVE_COMPAT_H__
-
-extern uint8_t __dtb_tilcdc_slave_compat_begin[];
-extern uint8_t __dtb_tilcdc_slave_compat_end[];
-
-#endif /* __TILCDC_SLAVE_COMPAT_H__ */
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- for (j = 0; j < HPAGE_PMD_NR; ++j)
- if (p++ != pages[i + j])
- break;
+ if (!(flags & TTM_PAGE_FLAG_DMA32)) {
+ for (j = 0; j < HPAGE_PMD_NR; ++j)
+ if (p++ != pages[i + j])
+ break;
- if (j == HPAGE_PMD_NR)
- order = HPAGE_PMD_ORDER;
+ if (j == HPAGE_PMD_NR)
+ order = HPAGE_PMD_ORDER;
+ }
#endif
if (page_count(pages[i]) != 1)
i = 0;
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- while (npages >= HPAGE_PMD_NR) {
- gfp_t huge_flags = gfp_flags;
+ if (!(gfp_flags & GFP_DMA32)) {
+ while (npages >= HPAGE_PMD_NR) {
+ gfp_t huge_flags = gfp_flags;
- huge_flags |= GFP_TRANSHUGE;
- huge_flags &= ~__GFP_MOVABLE;
- huge_flags &= ~__GFP_COMP;
- p = alloc_pages(huge_flags, HPAGE_PMD_ORDER);
- if (!p)
- break;
+ huge_flags |= GFP_TRANSHUGE;
+ huge_flags &= ~__GFP_MOVABLE;
+ huge_flags &= ~__GFP_COMP;
+ p = alloc_pages(huge_flags, HPAGE_PMD_ORDER);
+ if (!p)
+ break;
- for (j = 0; j < HPAGE_PMD_NR; ++j)
- pages[i++] = p++;
+ for (j = 0; j < HPAGE_PMD_NR; ++j)
+ pages[i++] = p++;
- npages -= HPAGE_PMD_NR;
+ npages -= HPAGE_PMD_NR;
+ }
}
#endif
}
EXPORT_SYMBOL(ttm_pool_unpopulate);
-#if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU)
int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt)
{
unsigned i, j;
ttm_pool_unpopulate(&tt->ttm);
}
EXPORT_SYMBOL(ttm_unmap_and_unpopulate_pages);
-#endif
int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
{
mutex_unlock(&bo->madv_lock);
}
-static void vc4_bo_cache_time_timer(unsigned long data)
+static void vc4_bo_cache_time_timer(struct timer_list *t)
{
- struct drm_device *dev = (struct drm_device *)data;
- struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_dev *vc4 = from_timer(vc4, t, bo_cache.time_timer);
schedule_work(&vc4->bo_cache.time_work);
}
INIT_LIST_HEAD(&vc4->bo_cache.time_list);
INIT_WORK(&vc4->bo_cache.time_work, vc4_bo_cache_time_work);
- setup_timer(&vc4->bo_cache.time_timer,
- vc4_bo_cache_time_timer,
- (unsigned long)dev);
+ timer_setup(&vc4->bo_cache.time_timer, vc4_bo_cache_time_timer, 0);
return 0;
}
}
static void
-vc4_hangcheck_elapsed(unsigned long data)
+vc4_hangcheck_elapsed(struct timer_list *t)
{
- struct drm_device *dev = (struct drm_device *)data;
- struct vc4_dev *vc4 = to_vc4_dev(dev);
+ struct vc4_dev *vc4 = from_timer(vc4, t, hangcheck.timer);
+ struct drm_device *dev = vc4->dev;
uint32_t ct0ca, ct1ca;
unsigned long irqflags;
struct vc4_exec_info *bin_exec, *render_exec;
spin_lock_init(&vc4->job_lock);
INIT_WORK(&vc4->hangcheck.reset_work, vc4_reset_work);
- setup_timer(&vc4->hangcheck.timer,
- vc4_hangcheck_elapsed,
- (unsigned long)dev);
+ timer_setup(&vc4->hangcheck.timer, vc4_hangcheck_elapsed, 0);
INIT_WORK(&vc4->job_done_work, vc4_job_done_work);
vc4_encoder->limited_rgb_range ?
HDMI_QUANTIZATION_RANGE_LIMITED :
HDMI_QUANTIZATION_RANGE_FULL,
- vc4_encoder->rgb_range_selectable);
+ vc4_encoder->rgb_range_selectable,
+ false);
vc4_hdmi_write_infoframe(encoder, &frame);
}
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
+ /* Undo the effects of a previous vc4_irq_uninstall. */
+ enable_irq(dev->irq);
+
/* Enable both the render done and out of memory interrupts. */
V3D_WRITE(V3D_INTENA, V3D_DRIVER_IRQS);
/* Clear any pending interrupts we might have left. */
V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS);
+ /* Finish any interrupt handler still in flight. */
+ disable_irq(dev->irq);
+
cancel_work_sync(&vc4->overflow_mem_work);
}
.timeline_value_str = vgem_fence_timeline_value_str,
};
-static void vgem_fence_timeout(unsigned long data)
+static void vgem_fence_timeout(struct timer_list *t)
{
- struct vgem_fence *fence = (struct vgem_fence *)data;
+ struct vgem_fence *fence = from_timer(fence, t, timer);
dma_fence_signal(&fence->base);
}
dma_fence_init(&fence->base, &vgem_fence_ops, &fence->lock,
dma_fence_context_alloc(1), 1);
- setup_timer(&fence->timer, vgem_fence_timeout, (unsigned long)fence);
+ timer_setup(&fence->timer, vgem_fence_timeout, 0);
/* We force the fence to expire within 10s to prevent driver hangs */
mod_timer(&fence->timer, jiffies + VGEM_FENCE_TIMEOUT);
static void
-via_dmablit_timer(unsigned long data)
+via_dmablit_timer(struct timer_list *t)
{
- drm_via_blitq_t *blitq = (drm_via_blitq_t *) data;
+ drm_via_blitq_t *blitq = from_timer(blitq, t, poll_timer);
struct drm_device *dev = blitq->dev;
int engine = (int)
(blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues);
init_waitqueue_head(blitq->blit_queue + j);
init_waitqueue_head(&blitq->busy_queue);
INIT_WORK(&blitq->wq, via_dmablit_workqueue);
- setup_timer(&blitq->poll_timer, via_dmablit_timer,
- (unsigned long)blitq);
+ timer_setup(&blitq->poll_timer, via_dmablit_timer, 0);
}
}
void ipu_dc_enable_channel(struct ipu_dc *dc)
{
- int di;
u32 reg;
- di = dc->di;
-
reg = readl(dc->base + DC_WR_CH_CONF);
reg |= DC_WR_CH_CONF_PROG_TYPE_NORMAL;
writel(reg, dc->base + DC_WR_CH_CONF);
dev_err(&appleir->input_dev->dev, "possible flat battery?\n");
}
-static void key_up_tick(unsigned long data)
+static void key_up_tick(struct timer_list *t)
{
- struct appleir *appleir = (struct appleir *)data;
+ struct appleir *appleir = from_timer(appleir, t, key_up_timer);
struct hid_device *hid = appleir->hid;
unsigned long flags;
hid->quirks |= HID_QUIRK_HIDINPUT_FORCE;
spin_lock_init(&appleir->lock);
- setup_timer(&appleir->key_up_timer,
- key_up_tick, (unsigned long) appleir);
+ timer_setup(&appleir->key_up_timer, key_up_tick, 0);
hid_set_drvdata(hid, appleir);
return;
}
-static void pcmidi_sustained_note_release(unsigned long data)
+static void pcmidi_sustained_note_release(struct timer_list *t)
{
- struct pcmidi_sustain *pms = (struct pcmidi_sustain *)data;
+ struct pcmidi_sustain *pms = from_timer(pms, t, timer);
pcmidi_send_note(pms->pm, pms->status, pms->note, pms->velocity);
pms->in_use = 0;
pms = &pm->sustained_notes[i];
pms->in_use = 0;
pms->pm = pm;
- setup_timer(&pms->timer, pcmidi_sustained_note_release,
- (unsigned long)pms);
+ timer_setup(&pms->timer, pcmidi_sustained_note_release, 0);
}
}
spin_unlock_irqrestore(&wdata->state.lock, flags);
}
-static void wiimote_init_timeout(unsigned long arg)
+static void wiimote_init_timeout(struct timer_list *t)
{
- struct wiimote_data *wdata = (void*)arg;
+ struct wiimote_data *wdata = from_timer(wdata, t, timer);
wiimote_schedule(wdata);
}
wdata->state.cmd_battery = 0xff;
INIT_WORK(&wdata->init_worker, wiimote_init_worker);
- setup_timer(&wdata->timer, wiimote_init_timeout, (long)wdata);
+ timer_setup(&wdata->timer, wiimote_init_timeout, 0);
return wdata;
}
data->timeout_cnt = 0;
}
-static void ssp_wdt_timer_func(unsigned long ptr)
+static void ssp_wdt_timer_func(struct timer_list *t)
{
- struct ssp_data *data = (struct ssp_data *)ptr;
+ struct ssp_data *data = from_timer(data, t, wdt_timer);
switch (data->fw_dl_state) {
case SSP_FW_DL_STATE_FAIL:
INIT_WORK(&data->work_wdt, ssp_wdt_work_func);
INIT_DELAYED_WORK(&data->work_refresh, ssp_refresh_task);
- setup_timer(&data->wdt_timer, ssp_wdt_timer_func, (unsigned long)data);
+ timer_setup(&data->wdt_timer, ssp_wdt_timer_func, 0);
ret = request_threaded_irq(data->spi->irq, NULL,
ssp_irq_thread_fn,
sg_list_start = umem->sg_head.sgl;
while (npages) {
- ret = get_user_pages(cur_base,
+ ret = get_user_pages_longterm(cur_base,
min_t(unsigned long, npages,
PAGE_SIZE / sizeof (struct page *)),
gup_flags, page_list, vma_list);
return -ENOMEM;
}
-static void delay_time_func(unsigned long ctx)
+static void delay_time_func(struct timer_list *t)
{
- struct mlx5_ib_dev *dev = (struct mlx5_ib_dev *)ctx;
+ struct mlx5_ib_dev *dev = from_timer(dev, t, delay_timer);
dev->fill_delay = 0;
}
return -ENOMEM;
}
- setup_timer(&dev->delay_timer, delay_time_func, (unsigned long)dev);
+ timer_setup(&dev->delay_timer, delay_time_func, 0);
for (i = 0; i < MAX_MR_CACHE_ENTRIES; i++) {
ent = &cache->ent[i];
INIT_LIST_HEAD(&ent->head);
spin_unlock_irqrestore(&catas_lock, flags);
}
-static void poll_catas(unsigned long dev_ptr)
+static void poll_catas(struct timer_list *t)
{
- struct mthca_dev *dev = (struct mthca_dev *) dev_ptr;
+ struct mthca_dev *dev = from_timer(dev, t, catas_err.timer);
int i;
for (i = 0; i < dev->catas_err.size; ++i)
{
phys_addr_t addr;
- init_timer(&dev->catas_err.timer);
+ timer_setup(&dev->catas_err.timer, poll_catas, 0);
dev->catas_err.map = NULL;
addr = pci_resource_start(dev->pdev, 0) +
return;
}
- dev->catas_err.timer.data = (unsigned long) dev;
- dev->catas_err.timer.function = poll_catas;
dev->catas_err.timer.expires = jiffies + MTHCA_CATAS_POLL_INTERVAL;
INIT_LIST_HEAD(&dev->catas_err.list);
add_timer(&dev->catas_err.timer);
if (!nesvnic->event_timer.function) {
ib_dispatch_event(&event);
nesvnic->last_dispatched_event = event.event;
- nesvnic->event_timer.function = (TIMER_FUNC_TYPE)nes_handle_delayed_event;
+ nesvnic->event_timer.function = nes_handle_delayed_event;
nesvnic->event_timer.expires = jiffies + NES_EVENT_DELAY;
add_timer(&nesvnic->event_timer);
} else {
}
EXPORT_SYMBOL(gameport_stop_polling);
-static void gameport_run_poll_handler(unsigned long d)
+static void gameport_run_poll_handler(struct timer_list *t)
{
- struct gameport *gameport = (struct gameport *)d;
+ struct gameport *gameport = from_timer(gameport, t, poll_timer);
gameport->poll_handler(gameport);
if (gameport->poll_cnt)
INIT_LIST_HEAD(&gameport->node);
spin_lock_init(&gameport->timer_lock);
- setup_timer(&gameport->poll_timer, gameport_run_poll_handler,
- (unsigned long)gameport);
+ timer_setup(&gameport->poll_timer, gameport_run_poll_handler, 0);
}
/*
*/
void input_enable_softrepeat(struct input_dev *dev, int delay, int period)
{
- dev->timer.function = (TIMER_FUNC_TYPE)input_repeat_key;
+ dev->timer.function = input_repeat_key;
dev->rep[REP_DELAY] = delay;
dev->rep[REP_PERIOD] = period;
}
return 0;
}
-static void db9_timer(unsigned long private)
+static void db9_timer(struct timer_list *t)
{
- struct db9 *db9 = (void *) private;
+ struct db9 *db9 = from_timer(db9, t, timer);
struct parport *port = db9->pd->port;
struct input_dev *dev = db9->dev[0];
struct input_dev *dev2 = db9->dev[1];
db9->pd = pd;
db9->mode = mode;
db9->parportno = pp->number;
- setup_timer(&db9->timer, db9_timer, (long)db9);
+ timer_setup(&db9->timer, db9_timer, 0);
for (i = 0; i < (min(db9_mode->n_pads, DB9_MAX_DEVICES)); i++) {
* gc_timer() initiates reads of console pads data.
*/
-static void gc_timer(unsigned long private)
+static void gc_timer(struct timer_list *t)
{
- struct gc *gc = (void *) private;
+ struct gc *gc = from_timer(gc, t, timer);
/*
* N64 pads - must be read first, any read confuses them for 200 us
mutex_init(&gc->mutex);
gc->pd = pd;
gc->parportno = pp->number;
- setup_timer(&gc->timer, gc_timer, (long) gc);
+ timer_setup(&gc->timer, gc_timer, 0);
for (i = 0; i < n_pads && i < GC_MAX_DEVICES; i++) {
if (!pads[i])
* tgfx_timer() reads and analyzes TurboGraFX joystick data.
*/
-static void tgfx_timer(unsigned long private)
+static void tgfx_timer(struct timer_list *t)
{
- struct tgfx *tgfx = (void *) private;
+ struct tgfx *tgfx = from_timer(tgfx, t, timer);
struct input_dev *dev;
int data1, data2, i;
mutex_init(&tgfx->sem);
tgfx->pd = pd;
tgfx->parportno = pp->number;
- setup_timer(&tgfx->timer, tgfx_timer, (long)tgfx);
+ timer_setup(&tgfx->timer, tgfx_timer, 0);
for (i = 0; i < n_devs; i++) {
if (n_buttons[i] < 1)
!(data1 & S3C2410_ADCDAT0_UPDOWN));
}
-static void touch_timer_fire(unsigned long data)
+static void touch_timer_fire(struct timer_list *unused)
{
unsigned long data0;
unsigned long data1;
static void init_iova_rcaches(struct iova_domain *iovad);
static void free_iova_rcaches(struct iova_domain *iovad);
static void fq_destroy_all_entries(struct iova_domain *iovad);
-static void fq_flush_timeout(unsigned long data);
+static void fq_flush_timeout(struct timer_list *t);
void
init_iova_domain(struct iova_domain *iovad, unsigned long granule,
spin_lock_init(&fq->lock);
}
- setup_timer(&iovad->fq_timer, fq_flush_timeout, (unsigned long)iovad);
+ timer_setup(&iovad->fq_timer, fq_flush_timeout, 0);
atomic_set(&iovad->fq_timer_on, 0);
return 0;
}
}
-static void fq_flush_timeout(unsigned long data)
+static void fq_flush_timeout(struct timer_list *t)
{
- struct iova_domain *iovad = (struct iova_domain *)data;
+ struct iova_domain *iovad = from_timer(iovad, t, fq_timer);
int cpu;
atomic_set(&iovad->fq_timer_on, 0);
config ARM_GIC_V3_ITS
bool
+ select GENERIC_MSI_IRQ_DOMAIN
+ default ARM_GIC_V3
+
+config ARM_GIC_V3_ITS_PCI
+ bool
+ depends on ARM_GIC_V3_ITS
depends on PCI
depends on PCI_MSI
+ default ARM_GIC_V3_ITS
config ARM_NVIC
bool
obj-$(CONFIG_ARCH_REALVIEW) += irq-gic-realview.o
obj-$(CONFIG_ARM_GIC_V2M) += irq-gic-v2m.o
obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-common.o
-obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v3-its-pci-msi.o irq-gic-v3-its-platform-msi.o irq-gic-v4.o
+obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v3-its-platform-msi.o irq-gic-v4.o
+obj-$(CONFIG_ARM_GIC_V3_ITS_PCI) += irq-gic-v3-its-pci-msi.o
obj-$(CONFIG_PARTITION_PERCPU) += irq-partition-percpu.o
obj-$(CONFIG_HISILICON_IRQ_MBIGEN) += irq-mbigen.o
obj-$(CONFIG_ARM_NVIC) += irq-nvic.o
int nr_parts;
struct partition_affinity *parts;
- parts_node = of_find_node_by_name(gic_node, "ppi-partitions");
+ parts_node = of_get_child_by_name(gic_node, "ppi-partitions");
if (!parts_node)
return;
nr_parts = of_get_child_count(parts_node);
if (!nr_parts)
- return;
+ goto out_put_node;
parts = kzalloc(sizeof(*parts) * nr_parts, GFP_KERNEL);
if (WARN_ON(!parts))
- return;
+ goto out_put_node;
for_each_child_of_node(parts_node, child_part) {
struct partition_affinity *part;
gic_data.ppi_descs[i] = desc;
}
+
+out_put_node:
+ of_node_put(parts_node);
}
static void __init gic_of_setup_kvm_info(struct device_node *node)
err = gic_validate_dist_version(acpi_data.dist_base);
if (err) {
- pr_err("No distributor detected at @%p, giving up",
+ pr_err("No distributor detected at @%p, giving up\n",
acpi_data.dist_base);
goto out_dist_unmap;
}
.map = map,
},
};
+ int ret;
/*
* The host will never see that interrupt firing again, so it
*/
irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
- return irq_set_vcpu_affinity(irq, &info);
+ ret = irq_set_vcpu_affinity(irq, &info);
+ if (ret)
+ irq_clear_status_flags(irq, IRQ_DISABLE_UNLAZY);
+
+ return ret;
}
int its_get_vlpi(int irq, struct its_vlpi_map *map)
/* Ioremap the registers */
priv->pdc_base = devm_ioremap(&pdev->dev, res_regs->start,
- res_regs->end - res_regs->start);
+ resource_size(res_regs));
if (!priv->pdc_base)
return -EIO;
irq_set_handler(data->irq, handle_level_irq);
break;
default:
- pr_err("No such irq type %d", type);
+ pr_err("No such irq type %d\n", type);
return -EINVAL;
}
break;
default:
- pr_err("No such irq type %d", type);
+ pr_err("No such irq type %d\n", type);
return -EINVAL;
}
}
data->base = of_iomap(node, 0);
- if (IS_ERR(data->base)) {
- err = PTR_ERR(data->base);
+ if (!data->base) {
+ err = -ENODEV;
goto out_free;
}
{
struct combiner *combiner;
size_t alloc_sz;
- u32 nregs;
+ int nregs;
int err;
nregs = count_registers(pdev);
send_message(card, &cmdcmsg);
}
-static void listentimerfunc(unsigned long x)
+static void listentimerfunc(struct timer_list *t)
{
- capidrv_contr *card = (capidrv_contr *)x;
+ capidrv_contr *card = from_timer(card, t, listentimer);
if (card->state != ST_LISTEN_NONE && card->state != ST_LISTEN_ACTIVE)
printk(KERN_ERR "%s: controller dead ??\n", card->name);
send_listen(card);
return -1;
}
card->owner = THIS_MODULE;
- setup_timer(&card->listentimer, listentimerfunc, (unsigned long)card);
+ timer_setup(&card->listentimer, listentimerfunc, 0);
strcpy(card->name, id);
card->contrnr = contr;
card->nbchan = profp->nbchannel;
/***************************/
/* timer callback function */
/***************************/
-static void deflect_timer_expire(ulong arg)
+static void deflect_timer_expire(struct timer_list *t)
{
unsigned long flags;
- struct call_struc *cs = (struct call_struc *) arg;
+ struct call_struc *cs = from_timer(cs, t, timer);
spin_lock_irqsave(&divert_lock, flags);
del_timer(&cs->timer); /* delete active timer */
/* allocate mem for information struct */
if (!(cs = kmalloc(sizeof(struct call_struc), GFP_ATOMIC)))
return (-ENOMEM); /* no memory */
- setup_timer(&cs->timer, deflect_timer_expire, (ulong)cs);
+ timer_setup(&cs->timer, deflect_timer_expire, 0);
cs->info[0] = '\0';
cs->ics.driver = drvid;
cs->ics.command = ISDN_CMD_PROT_IO; /* protocol specific io */
return (0); /* no external deflection needed */
if (!(cs = kmalloc(sizeof(struct call_struc), GFP_ATOMIC)))
return (0); /* no memory */
- setup_timer(&cs->timer, deflect_timer_expire,
- (ulong)cs);
+ timer_setup(&cs->timer, deflect_timer_expire, 0);
cs->info[0] = '\0';
cs->ics = *ic; /* copy incoming data */
static int um_idi_open(struct inode *inode, struct file *file);
static int um_idi_release(struct inode *inode, struct file *file);
static int remove_entity(void *entity);
-static void diva_um_timer_function(unsigned long data);
+static void diva_um_timer_function(struct timer_list *t);
/*
* proc entry
p_os = (diva_um_idi_os_context_t *) diva_um_id_get_os_context(e);
init_waitqueue_head(&p_os->read_wait);
init_waitqueue_head(&p_os->close_wait);
- setup_timer(&p_os->diva_timer_id, (void *)diva_um_timer_function,
- (unsigned long)p_os);
+ timer_setup(&p_os->diva_timer_id, diva_um_timer_function, 0);
p_os->aborted = 0;
p_os->adapter_nr = adapter_nr;
return (1);
}
static
-void diva_um_timer_function(unsigned long data)
+void diva_um_timer_function(struct timer_list *t)
{
- diva_um_idi_os_context_t *p_os = (diva_um_idi_os_context_t *) data;
+ diva_um_idi_os_context_t *p_os = from_timer(p_os, t, diva_timer_id);
p_os->aborted = 1;
wake_up_interruptible(&p_os->read_wait);
*/
static void
-hfcmulti_dbusy_timer(struct hfc_multi *hc)
+hfcmulti_dbusy_timer(struct timer_list *t)
{
}
if (hc->dnum[pt]) {
mode_hfcmulti(hc, dch->slot, dch->dev.D.protocol,
-1, 0, -1, 0);
- setup_timer(&dch->timer, (void *)hfcmulti_dbusy_timer,
- (long)dch);
+ timer_setup(&dch->timer, hfcmulti_dbusy_timer, 0);
}
for (i = 1; i <= 31; i++) {
if (!((1 << i) & hc->bmask[pt])) /* skip unused chan */
hc->chan[i].slot_rx = -1;
hc->chan[i].conf = -1;
mode_hfcmulti(hc, i, dch->dev.D.protocol, -1, 0, -1, 0);
- setup_timer(&dch->timer, (void *)hfcmulti_dbusy_timer,
- (long)dch);
+ timer_setup(&dch->timer, hfcmulti_dbusy_timer, 0);
hc->chan[i - 2].slot_tx = -1;
hc->chan[i - 2].slot_rx = -1;
hc->chan[i - 2].conf = -1;
* Timer function called when kernel timer expires
*/
static void
-hfcpci_Timer(struct hfc_pci *hc)
+hfcpci_Timer(struct timer_list *t)
{
+ struct hfc_pci *hc = from_timer(hc, t, hw.timer);
hc->hw.timer.expires = jiffies + 75;
/* WD RESET */
/*
* timer callback for D-chan busy resolution. Currently no function
*/
static void
-hfcpci_dbusy_timer(struct hfc_pci *hc)
+hfcpci_dbusy_timer(struct timer_list *t)
{
}
inithfcpci(struct hfc_pci *hc)
{
printk(KERN_DEBUG "inithfcpci: entered\n");
- setup_timer(&hc->dch.timer, (void *)hfcpci_dbusy_timer,
- (long)&hc->dch);
+ timer_setup(&hc->dch.timer, hfcpci_dbusy_timer, 0);
hc->chanlimit = 2;
mode_hfcpci(&hc->bch[0], 1, -1);
mode_hfcpci(&hc->bch[1], 2, -1);
Write_hfc(hc, HFCPCI_INT_M1, hc->hw.int_m1);
/* At this point the needed PCI config is done */
/* fifos are still not enabled */
- setup_timer(&hc->hw.timer, (void *)hfcpci_Timer, (long)hc);
+ timer_setup(&hc->hw.timer, hfcpci_Timer, 0);
/* default PCM master */
test_and_set_bit(HFC_CFG_MASTER, &hc->cfg);
return 0;
EXPORT_SYMBOL(mISDNisar_irq);
static void
-ftimer_handler(unsigned long data)
+ftimer_handler(struct timer_list *t)
{
- struct isar_ch *ch = (struct isar_ch *)data;
+ struct isar_ch *ch = from_timer(ch, t, ftimer);
pr_debug("%s: ftimer flags %lx\n", ch->is->name, ch->bch.Flags);
test_and_clear_bit(FLG_FTI_RUN, &ch->bch.Flags);
}
if (isar->version != 1)
return -EINVAL;
- setup_timer(&isar->ch[0].ftimer, &ftimer_handler,
- (long)&isar->ch[0]);
+ timer_setup(&isar->ch[0].ftimer, ftimer_handler, 0);
test_and_set_bit(FLG_INITIALIZED, &isar->ch[0].bch.Flags);
- setup_timer(&isar->ch[1].ftimer, &ftimer_handler,
- (long)&isar->ch[1]);
+ timer_setup(&isar->ch[1].ftimer, ftimer_handler, 0);
test_and_set_bit(FLG_INITIALIZED, &isar->ch[1].bch.Flags);
return 0;
}
static int isdn_timer_cnt3 = 0;
static void
-isdn_timer_funct(ulong dummy)
+isdn_timer_funct(struct timer_list *unused)
{
int tf = dev->tflags;
if (tf & ISDN_TIMER_FAST) {
printk(KERN_WARNING "isdn: Could not allocate device-struct.\n");
return -EIO;
}
- init_timer(&dev->timer);
- dev->timer.function = isdn_timer_funct;
+ timer_setup(&dev->timer, isdn_timer_funct, 0);
spin_lock_init(&dev->lock);
spin_lock_init(&dev->timerlock);
#ifdef MODULE
/* called via cisco_timer.function */
static void
-isdn_net_ciscohdlck_slarp_send_keepalive(unsigned long data)
+isdn_net_ciscohdlck_slarp_send_keepalive(struct timer_list *t)
{
- isdn_net_local *lp = (isdn_net_local *) data;
+ isdn_net_local *lp = from_timer(lp, t, cisco_timer);
struct sk_buff *skb;
unsigned char *p;
unsigned long last_cisco_myseq = lp->cisco_myseq;
/* send slarp request because interface/seq.no.s reset */
isdn_net_ciscohdlck_slarp_send_request(lp);
- init_timer(&lp->cisco_timer);
- lp->cisco_timer.data = (unsigned long) lp;
- lp->cisco_timer.function = isdn_net_ciscohdlck_slarp_send_keepalive;
+ timer_setup(&lp->cisco_timer,
+ isdn_net_ciscohdlck_slarp_send_keepalive, 0);
lp->cisco_timer.expires = jiffies + lp->cisco_keepalive_period * HZ;
add_timer(&lp->cisco_timer);
}
static void isdn_ppp_ccp_reset_free(struct ippp_struct *is);
static void isdn_ppp_ccp_reset_free_state(struct ippp_struct *is,
unsigned char id);
-static void isdn_ppp_ccp_timer_callback(unsigned long closure);
+static void isdn_ppp_ccp_timer_callback(struct timer_list *t);
static struct ippp_ccp_reset_state *isdn_ppp_ccp_reset_alloc_state(struct ippp_struct *is,
unsigned char id);
static void isdn_ppp_ccp_reset_trans(struct ippp_struct *is,
/* The timer callback function which is called when a ResetReq has timed out,
aka has never been answered by a ResetAck */
-static void isdn_ppp_ccp_timer_callback(unsigned long closure)
+static void isdn_ppp_ccp_timer_callback(struct timer_list *t)
{
struct ippp_ccp_reset_state *rs =
- (struct ippp_ccp_reset_state *)closure;
+ from_timer(rs, t, timer);
if (!rs) {
printk(KERN_ERR "ippp_ccp: timer cb with zero closure.\n");
rs->state = CCPResetIdle;
rs->is = is;
rs->id = id;
- setup_timer(&rs->timer, isdn_ppp_ccp_timer_callback,
- (unsigned long)rs);
+ timer_setup(&rs->timer, isdn_ppp_ccp_timer_callback, 0);
is->reset->rs[id] = rs;
}
return rs;
* into the tty's buffer.
*/
static void
-isdn_tty_modem_do_ncarrier(unsigned long data)
+isdn_tty_modem_do_ncarrier(struct timer_list *t)
{
- modem_info *info = (modem_info *) data;
+ modem_info *info = from_timer(info, t, nc_timer);
isdn_tty_modem_result(RESULT_NO_CARRIER, info);
}
info->isdn_channel = -1;
info->drv_index = -1;
info->xmit_size = ISDN_SERIAL_XMIT_SIZE;
- setup_timer(&info->nc_timer, isdn_tty_modem_do_ncarrier,
- (unsigned long)info);
+ timer_setup(&info->nc_timer, isdn_tty_modem_do_ncarrier, 0);
skb_queue_head_init(&info->xmit_queue);
#ifdef CONFIG_ISDN_AUDIO
skb_queue_head_init(&info->dtmf_queue);
mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(1000));
}
-void pblk_write_timer_fn(unsigned long data)
+void pblk_write_timer_fn(struct timer_list *t)
{
- struct pblk *pblk = (struct pblk *)data;
+ struct pblk *pblk = from_timer(pblk, t, wtimer);
/* kick the write thread every tick to flush outstanding data */
pblk_write_kick(pblk);
goto next_gc_group;
}
-static void pblk_gc_timer(unsigned long data)
+static void pblk_gc_timer(struct timer_list *t)
{
- struct pblk *pblk = (struct pblk *)data;
+ struct pblk *pblk = from_timer(pblk, t, gc.gc_timer);
pblk_gc_kick(pblk);
}
goto fail_free_writer_kthread;
}
- setup_timer(&gc->gc_timer, pblk_gc_timer, (unsigned long)pblk);
+ timer_setup(&gc->gc_timer, pblk_gc_timer, 0);
mod_timer(&gc->gc_timer, jiffies + msecs_to_jiffies(GC_TIME_MSECS));
gc->gc_active = 0;
static int pblk_writer_init(struct pblk *pblk)
{
- setup_timer(&pblk->wtimer, pblk_write_timer_fn, (unsigned long)pblk);
+ timer_setup(&pblk->wtimer, pblk_write_timer_fn, 0);
mod_timer(&pblk->wtimer, jiffies + msecs_to_jiffies(100));
pblk->writer_ts = kthread_create(pblk_write_ts, pblk, "pblk-writer-t");
return rl->rb_max_io;
}
-static void pblk_rl_u_timer(unsigned long data)
+static void pblk_rl_u_timer(struct timer_list *t)
{
- struct pblk_rl *rl = (struct pblk_rl *)data;
+ struct pblk_rl *rl = from_timer(rl, t, u_timer);
/* Release user I/O state. Protect from GC */
smp_store_release(&rl->rb_user_active, 0);
atomic_set(&rl->rb_gc_cnt, 0);
atomic_set(&rl->rb_space, -1);
- setup_timer(&rl->u_timer, pblk_rl_u_timer, (unsigned long)rl);
+ timer_setup(&rl->u_timer, pblk_rl_u_timer, 0);
rl->rb_user_active = 0;
rl->rb_gc_active = 0;
* pblk write thread
*/
int pblk_write_ts(void *data);
-void pblk_write_timer_fn(unsigned long data);
+void pblk_write_timer_fn(struct timer_list *t);
void pblk_write_should_kick(struct pblk *pblk);
/*
/*
* timed GC every interval.
*/
-static void rrpc_gc_timer(unsigned long data)
+static void rrpc_gc_timer(struct timer_list *t)
{
- struct rrpc *rrpc = (struct rrpc *)data;
+ struct rrpc *rrpc = from_timer(rrpc, t, gc_timer);
rrpc_gc_kick(rrpc);
mod_timer(&rrpc->gc_timer, jiffies + msecs_to_jiffies(10));
if (!rrpc->kgc_wq)
return -ENOMEM;
- setup_timer(&rrpc->gc_timer, rrpc_gc_timer, (unsigned long)rrpc);
+ timer_setup(&rrpc->gc_timer, rrpc_gc_timer, 0);
return 0;
}
if (b == -1)
goto err;
- k->ptr[i] = PTR(ca->buckets[b].gen,
+ k->ptr[i] = MAKE_PTR(ca->buckets[b].gen,
bucket_to_sector(c, b),
ca->sb.nr_this_dev);
c->shrink.scan_objects = bch_mca_scan;
c->shrink.seeks = 4;
c->shrink.batch = c->btree_pages * 2;
- register_shrinker(&c->shrink);
+
+ if (register_shrinker(&c->shrink))
+ pr_warn("bcache: %s: could not register shrinker",
+ __func__);
return 0;
}
return false;
for (i = 0; i < KEY_PTRS(l); i++)
- if (l->ptr[i] + PTR(0, KEY_SIZE(l), 0) != r->ptr[i] ||
+ if (l->ptr[i] + MAKE_PTR(0, KEY_SIZE(l), 0) != r->ptr[i] ||
PTR_BUCKET_NR(b->c, l, i) != PTR_BUCKET_NR(b->c, r, i))
return false;
* find a sequence of buckets with valid journal entries
*/
for (i = 0; i < ca->sb.njournal_buckets; i++) {
+ /*
+ * We must try the index l with ZERO first for
+ * correctness due to the scenario that the journal
+ * bucket is circular buffer which might have wrapped
+ */
l = (i * 2654435769U) % ca->sb.njournal_buckets;
if (test_bit(l, bitmap))
continue;
ja->cur_idx = next;
- k->ptr[n++] = PTR(0,
+ k->ptr[n++] = MAKE_PTR(0,
bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
ca->sb.nr_this_dev);
}
{
struct search *s = container_of(cl, struct search, cl);
struct bio *bio = &s->bio.bio;
- struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
/*
- * If cache device is dirty (dc->has_dirty is non-zero), then
- * recovery a failed read request from cached device may get a
- * stale data back. So read failure recovery is only permitted
- * when cache device is clean.
+ * If read request hit dirty data (s->read_dirty_data is true),
+ * then recovery a failed read request from cached device may
+ * get a stale data back. So read failure recovery is only
+ * permitted when read request hit clean data in cache device,
+ * or when cache read race happened.
*/
- if (s->recoverable &&
- (dc && !atomic_read(&dc->has_dirty))) {
+ if (s->recoverable && !s->read_dirty_data) {
/* Retry from the backing device: */
trace_bcache_read_retry(s->orig_bio);
sizeof(struct saa7146_buf),
file, &dev->v4l2_lock);
- vv->vbi_read_timeout.function = (TIMER_FUNC_TYPE)vbi_read_timeout;
+ vv->vbi_read_timeout.function = vbi_read_timeout;
vv->vbi_read_timeout_file = file;
/* initialize the brs */
}
}
-static void viu_vid_timeout(unsigned long data)
+static void viu_vid_timeout(struct timer_list *t)
{
- struct viu_dev *dev = (struct viu_dev *)data;
+ struct viu_dev *dev = from_timer(dev, t, vidq.timeout);
struct viu_buf *buf;
struct viu_dmaqueue *vidq = &dev->vidq;
viu_dev->decoder = v4l2_i2c_new_subdev(&viu_dev->v4l2_dev, ad,
"saa7113", VIU_VIDEO_DECODER_ADDR, NULL);
- setup_timer(&viu_dev->vidq.timeout, viu_vid_timeout,
- (unsigned long)viu_dev);
+ timer_setup(&viu_dev->vidq.timeout, viu_vid_timeout, 0);
viu_dev->std = V4L2_STD_NTSC_M;
viu_dev->first = 1;
}
}
-static void s5p_mfc_watchdog(unsigned long arg)
+static void s5p_mfc_watchdog(struct timer_list *t)
{
- struct s5p_mfc_dev *dev = (struct s5p_mfc_dev *)arg;
+ struct s5p_mfc_dev *dev = from_timer(dev, t, watchdog_timer);
if (test_bit(0, &dev->hw_lock))
atomic_inc(&dev->watchdog_cnt);
dev->hw_lock = 0;
INIT_WORK(&dev->watchdog_work, s5p_mfc_watchdog_worker);
atomic_set(&dev->watchdog_cnt, 0);
- init_timer(&dev->watchdog_timer);
- dev->watchdog_timer.data = (unsigned long)dev;
- dev->watchdog_timer.function = s5p_mfc_watchdog;
+ timer_setup(&dev->watchdog_timer, s5p_mfc_watchdog, 0);
ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
if (ret)
#define FIFO_LEN 1024
-static void c8sectpfe_timer_interrupt(unsigned long ac8sectpfei)
+static void c8sectpfe_timer_interrupt(struct timer_list *t)
{
- struct c8sectpfei *fei = (struct c8sectpfei *)ac8sectpfei;
+ struct c8sectpfei *fei = from_timer(fei, t, timer);
struct channel_info *channel;
int chan_num;
}
/* Setup timer interrupt */
- setup_timer(&fei->timer, c8sectpfe_timer_interrupt,
- (unsigned long)fei);
+ timer_setup(&fei->timer, c8sectpfe_timer_interrupt, 0);
mutex_init(&fei->lock);
schedule_irq(dev, ctx->transtime);
}
-static void device_isr(unsigned long priv)
+static void device_isr(struct timer_list *t)
{
- struct vim2m_dev *vim2m_dev = (struct vim2m_dev *)priv;
+ struct vim2m_dev *vim2m_dev = from_timer(vim2m_dev, t, timer);
struct vim2m_ctx *curr_ctx;
struct vb2_v4l2_buffer *src_vb, *dst_vb;
unsigned long flags;
v4l2_info(&dev->v4l2_dev,
"Device registered as /dev/video%d\n", vfd->num);
- setup_timer(&dev->timer, device_isr, (long)dev);
+ timer_setup(&dev->timer, device_isr, 0);
platform_set_drvdata(pdev, dev);
dev->m2m_dev = v4l2_m2m_init(&m2m_ops);
static void au0828_restart_dvb_streaming(struct work_struct *work);
-static void au0828_bulk_timeout(unsigned long data)
+static void au0828_bulk_timeout(struct timer_list *t)
{
- struct au0828_dev *dev = (struct au0828_dev *) data;
+ struct au0828_dev *dev = from_timer(dev, t, bulk_timeout);
dprintk(1, "%s called\n", __func__);
dev->bulk_timeout_running = 0;
return ret;
}
- dev->bulk_timeout.function = au0828_bulk_timeout;
- dev->bulk_timeout.data = (unsigned long) dev;
- init_timer(&dev->bulk_timeout);
+ timer_setup(&dev->bulk_timeout, au0828_bulk_timeout, 0);
return 0;
}
/* This function ensures that video frames continue to be delivered even if
the ITU-656 input isn't receiving any data (thereby preventing applications
such as tvtime from hanging) */
-static void au0828_vid_buffer_timeout(unsigned long data)
+static void au0828_vid_buffer_timeout(struct timer_list *t)
{
- struct au0828_dev *dev = (struct au0828_dev *) data;
+ struct au0828_dev *dev = from_timer(dev, t, vid_timeout);
struct au0828_dmaqueue *dma_q = &dev->vidq;
struct au0828_buffer *buf;
unsigned char *vid_data;
spin_unlock_irqrestore(&dev->slock, flags);
}
-static void au0828_vbi_buffer_timeout(unsigned long data)
+static void au0828_vbi_buffer_timeout(struct timer_list *t)
{
- struct au0828_dev *dev = (struct au0828_dev *) data;
+ struct au0828_dev *dev = from_timer(dev, t, vbi_timeout);
struct au0828_dmaqueue *dma_q = &dev->vbiq;
struct au0828_buffer *buf;
unsigned char *vbi_data;
INIT_LIST_HEAD(&dev->vidq.active);
INIT_LIST_HEAD(&dev->vbiq.active);
- setup_timer(&dev->vid_timeout, au0828_vid_buffer_timeout,
- (unsigned long)dev);
- setup_timer(&dev->vbi_timeout, au0828_vbi_buffer_timeout,
- (unsigned long)dev);
+ timer_setup(&dev->vid_timeout, au0828_vid_buffer_timeout, 0);
+ timer_setup(&dev->vbi_timeout, au0828_vbi_buffer_timeout, 0);
dev->width = NTSC_STD_W;
dev->height = NTSC_STD_H;
dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n",
data, size, dma->nr_pages);
- err = get_user_pages(data & PAGE_MASK, dma->nr_pages,
+ err = get_user_pages_longterm(data & PAGE_MASK, dma->nr_pages,
flags, dma->pages, NULL);
if (err != dma->nr_pages) {
dma->nr_pages = (err >= 0) ? err : 0;
- dprintk(1, "get_user_pages: err=%d [%d]\n", err, dma->nr_pages);
+ dprintk(1, "get_user_pages_longterm: err=%d [%d]\n", err,
+ dma->nr_pages);
return err < 0 ? err : -EINVAL;
}
return 0;
return 0;
}
-static void msb_cache_flush_timer(unsigned long data)
+static void msb_cache_flush_timer(struct timer_list *t)
{
- struct msb_data *msb = (struct msb_data *)data;
+ struct msb_data *msb = from_timer(msb, t, cache_flush_timer);
msb->need_flush_cache = true;
queue_work(msb->io_queue, &msb->io_work);
}
static int msb_cache_init(struct msb_data *msb)
{
- setup_timer(&msb->cache_flush_timer, msb_cache_flush_timer,
- (unsigned long)msb);
+ timer_setup(&msb->cache_flush_timer, msb_cache_flush_timer, 0);
if (!msb->cache)
msb->cache = kzalloc(msb->block_size, GFP_KERNEL);
},
};
-static void rtsx_usb_sg_timed_out(unsigned long data)
+static void rtsx_usb_sg_timed_out(struct timer_list *t)
{
- struct rtsx_ucr *ucr = (struct rtsx_ucr *)data;
+ struct rtsx_ucr *ucr = from_timer(ucr, t, sg_timer);
dev_dbg(&ucr->pusb_intf->dev, "%s: sg transfer timed out", __func__);
usb_sg_cancel(&ucr->current_sg);
goto out_init_fail;
/* initialize USB SG transfer timer */
- setup_timer(&ucr->sg_timer, rtsx_usb_sg_timed_out, (unsigned long) ucr);
+ timer_setup(&ucr->sg_timer, rtsx_usb_sg_timed_out, 0);
ret = mfd_add_hotplug_devices(&intf->dev, rtsx_usb_cells,
ARRAY_SIZE(rtsx_usb_cells));
return err;
}
-static void mmc_retune_timer(unsigned long data)
+static void mmc_retune_timer(struct timer_list *t)
{
- struct mmc_host *host = (struct mmc_host *)data;
+ struct mmc_host *host = from_timer(host, t, retune_timer);
mmc_retune_needed(host);
}
init_waitqueue_head(&host->wq);
INIT_DELAYED_WORK(&host->detect, mmc_rescan);
INIT_DELAYED_WORK(&host->sdio_irq_work, sdio_irq_work);
- setup_timer(&host->retune_timer, mmc_retune_timer, (unsigned long)host);
+ timer_setup(&host->retune_timer, mmc_retune_timer, 0);
/*
* By default, hosts do not support SGIO or large requests.
pr_debug("MTDSB: New superblock for device %d (\"%s\")\n",
mtd->index, mtd->name);
- ret = fill_super(sb, data, flags & MS_SILENT ? 1 : 0);
+ ret = fill_super(sb, data, flags & SB_SILENT ? 1 : 0);
if (ret < 0) {
deactivate_locked_super(sb);
return ERR_PTR(ret);
}
/* go */
- sb->s_flags |= MS_ACTIVE;
+ sb->s_flags |= SB_ACTIVE;
return dget(sb->s_root);
/* new mountpoint for an already mounted superblock */
not_an_MTD_device:
#endif /* CONFIG_BLOCK */
- if (!(flags & MS_SILENT))
+ if (!(flags & SB_SILENT))
printk(KERN_NOTICE
"MTD: Attempt to mount non-MTD device \"%s\"\n",
dev_name);
/* flush timer, runs a second after last write */
-static void sm_cache_flush_timer(unsigned long data)
+static void sm_cache_flush_timer(struct timer_list *t)
{
- struct sm_ftl *ftl = (struct sm_ftl *)data;
+ struct sm_ftl *ftl = from_timer(ftl, t, timer);
queue_work(cache_flush_workqueue, &ftl->flush_work);
}
mutex_init(&ftl->mutex);
- setup_timer(&ftl->timer, sm_cache_flush_timer, (unsigned long)ftl);
+ timer_setup(&ftl->timer, sm_cache_flush_timer, 0);
INIT_WORK(&ftl->flush_work, sm_cache_flush_work);
init_completion(&ftl->erase_completion);
return -EINVAL;
bond_opt_initval(&newval,
- nla_get_be64(data[IFLA_BOND_AD_ACTOR_SYSTEM]));
+ nla_get_u64(data[IFLA_BOND_AD_ACTOR_SYSTEM]));
err = __bond_opt_set(bond, BOND_OPT_AD_ACTOR_SYSTEM, &newval);
if (err)
return err;
static LIST_HEAD(cfhsi_list);
-static void cfhsi_inactivity_tout(unsigned long arg)
+static void cfhsi_inactivity_tout(struct timer_list *t)
{
- struct cfhsi *cfhsi = (struct cfhsi *)arg;
+ struct cfhsi *cfhsi = from_timer(cfhsi, t, inactivity_timer);
netdev_dbg(cfhsi->ndev, "%s.\n",
__func__);
schedule_work(&cfhsi->out_of_sync_work);
}
-static void cfhsi_rx_slowpath(unsigned long arg)
+static void cfhsi_rx_slowpath(struct timer_list *t)
{
- struct cfhsi *cfhsi = (struct cfhsi *)arg;
+ struct cfhsi *cfhsi = from_timer(cfhsi, t, rx_slowpath_timer);
netdev_dbg(cfhsi->ndev, "%s.\n",
__func__);
wake_up_interruptible(&cfhsi->wake_down_wait);
}
-static void cfhsi_aggregation_tout(unsigned long arg)
+static void cfhsi_aggregation_tout(struct timer_list *t)
{
- struct cfhsi *cfhsi = (struct cfhsi *)arg;
+ struct cfhsi *cfhsi = from_timer(cfhsi, t, aggregation_timer);
netdev_dbg(cfhsi->ndev, "%s.\n",
__func__);
init_waitqueue_head(&cfhsi->flush_fifo_wait);
/* Setup the inactivity timer. */
- setup_timer(&cfhsi->inactivity_timer, cfhsi_inactivity_tout,
- (unsigned long)cfhsi);
+ timer_setup(&cfhsi->inactivity_timer, cfhsi_inactivity_tout, 0);
/* Setup the slowpath RX timer. */
- setup_timer(&cfhsi->rx_slowpath_timer, cfhsi_rx_slowpath,
- (unsigned long)cfhsi);
+ timer_setup(&cfhsi->rx_slowpath_timer, cfhsi_rx_slowpath, 0);
/* Setup the aggregation timer. */
- setup_timer(&cfhsi->aggregation_timer, cfhsi_aggregation_tout,
- (unsigned long)cfhsi);
+ timer_setup(&cfhsi->aggregation_timer, cfhsi_aggregation_tout, 0);
/* Activate HSI interface. */
res = cfhsi->ops->cfhsi_up(cfhsi->ops);
static unsigned int network_tr_ctrl_shadow = 0;
+/* Timers */
+static void e100_check_speed(struct timer_list *unused);
+static void e100_clear_network_leds(struct timer_list *unused);
+static void e100_check_duplex(struct timer_list *unused);
+static DEFINE_TIMER(speed_timer, e100_check_speed);
+static DEFINE_TIMER(clear_led_timer, e100_clear_network_leds);
+static DEFINE_TIMER(duplex_timer, e100_check_duplex);
+static struct net_device *timer_dev;
+
/* Network speed indication. */
-static DEFINE_TIMER(speed_timer, NULL);
-static DEFINE_TIMER(clear_led_timer, NULL);
static int current_speed; /* Speed read from transceiver */
static int current_speed_selection; /* Speed selected by user */
static unsigned long led_next_time;
static int rx_queue_len;
/* Duplex */
-static DEFINE_TIMER(duplex_timer, NULL);
static int full_duplex;
static enum duplex current_duplex;
static void update_tx_stats(struct net_device_stats *);
static int e100_probe_transceiver(struct net_device* dev);
-static void e100_check_speed(unsigned long priv);
static void e100_set_speed(struct net_device* dev, unsigned long speed);
-static void e100_check_duplex(unsigned long priv);
static void e100_set_duplex(struct net_device* dev, enum duplex);
static void e100_negotiate(struct net_device* dev);
static unsigned char e100_receive_mdio_bit(void);
static void e100_reset_transceiver(struct net_device* net);
-static void e100_clear_network_leds(unsigned long dummy);
static void e100_set_network_leds(int active);
static const struct ethtool_ops e100_ethtool_ops;
current_speed = 10;
current_speed_selection = 0; /* Auto */
speed_timer.expires = jiffies + NET_LINK_UP_CHECK_INTERVAL;
- speed_timer.data = (unsigned long)dev;
- speed_timer.function = e100_check_speed;
-
- clear_led_timer.function = e100_clear_network_leds;
- clear_led_timer.data = (unsigned long)dev;
full_duplex = 0;
current_duplex = autoneg;
duplex_timer.expires = jiffies + NET_DUPLEX_CHECK_INTERVAL;
- duplex_timer.data = (unsigned long)dev;
- duplex_timer.function = e100_check_duplex;
+
+ timer_dev = dev;
/* Initialize mii interface */
np->mii_if.phy_id_mask = 0x1f;
}
#endif
static void
-e100_check_speed(unsigned long priv)
+e100_check_speed(struct timer_list *unused)
{
- struct net_device* dev = (struct net_device*)priv;
+ struct net_device* dev = timer_dev;
struct net_local *np = netdev_priv(dev);
static int led_initiated = 0;
unsigned long data;
}
static void
-e100_check_duplex(unsigned long priv)
+e100_check_duplex(struct timer_list *unused)
{
- struct net_device *dev = (struct net_device *)priv;
+ struct net_device *dev = timer_dev;
struct net_local *np = netdev_priv(dev);
int old_duplex;
}
static void
-e100_clear_network_leds(unsigned long dummy)
+e100_clear_network_leds(struct timer_list *unused)
{
- struct net_device *dev = (struct net_device *)dummy;
+ struct net_device *dev = timer_dev;
struct net_local *np = netdev_priv(dev);
spin_lock(&np->led_lock);
reg = reg_readl(priv, REG_SPHY_CNTRL);
if (enable) {
reg |= PHY_RESET;
- reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS | CK25_DIS);
+ reg &= ~(EXT_PWR_DOWN | IDDQ_BIAS | IDDQ_GLOBAL_PWR | CK25_DIS);
reg_writel(priv, reg, REG_SPHY_CNTRL);
udelay(21);
reg = reg_readl(priv, REG_SPHY_CNTRL);
mutex_unlock(&chip->reg_lock);
}
-static void mv88e6xxx_phy_ppu_reenable_timer(unsigned long _ps)
+static void mv88e6xxx_phy_ppu_reenable_timer(struct timer_list *t)
{
- struct mv88e6xxx_chip *chip = (void *)_ps;
+ struct mv88e6xxx_chip *chip = from_timer(chip, t, ppu_timer);
schedule_work(&chip->ppu_work);
}
{
mutex_init(&chip->ppu_mutex);
INIT_WORK(&chip->ppu_work, mv88e6xxx_phy_ppu_reenable_work);
- setup_timer(&chip->ppu_timer, mv88e6xxx_phy_ppu_reenable_timer,
- (unsigned long)chip);
+ timer_setup(&chip->ppu_timer, mv88e6xxx_phy_ppu_reenable_timer, 0);
}
static void mv88e6xxx_phy_ppu_state_destroy(struct mv88e6xxx_chip *chip)
static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave);
-static void eql_timer(unsigned long param)
+static void eql_timer(struct timer_list *t)
{
- equalizer_t *eql = (equalizer_t *) param;
+ equalizer_t *eql = from_timer(eql, t, timer);
struct list_head *this, *tmp, *head;
spin_lock(&eql->queue.lock);
{
equalizer_t *eql = netdev_priv(dev);
- setup_timer(&eql->timer, eql_timer, (unsigned long)eql);
+ timer_setup(&eql->timer, eql_timer, 0);
eql->timer.expires = jiffies + EQL_DEFAULT_RESCHED_IVAL;
spin_lock_init(&eql->queue.lock);
return;
}
-static void tx_reclaim_skb_timeout(unsigned long lp)
+static void tx_reclaim_skb_timeout(struct timer_list *t)
{
- tx_reclaim_skb((struct bfin_mac_local *)lp);
+ struct bfin_mac_local *lp = from_timer(lp, t, tx_reclaim_timer);
+
+ tx_reclaim_skb(lp);
}
static int bfin_mac_hard_start_xmit(struct sk_buff *skb,
ndev->netdev_ops = &bfin_mac_netdev_ops;
ndev->ethtool_ops = &bfin_mac_ethtool_ops;
- setup_timer(&lp->tx_reclaim_timer, tx_reclaim_skb_timeout,
- (unsigned long)lp);
+ timer_setup(&lp->tx_reclaim_timer, tx_reclaim_skb_timeout, 0);
lp->flags = 0;
netif_napi_add(ndev, &lp->napi, bfin_mac_poll, CONFIG_BFIN_RX_DESC_NUM);
* The routine called when the error timer expires, to track the number of
* recurring errors.
*/
-static void et131x_error_timer_handler(unsigned long data)
+static void et131x_error_timer_handler(struct timer_list *t)
{
- struct et131x_adapter *adapter = (struct et131x_adapter *)data;
+ struct et131x_adapter *adapter = from_timer(adapter, t, error_timer);
struct phy_device *phydev = adapter->netdev->phydev;
if (et1310_in_phy_coma(adapter)) {
int result;
/* Start the timer to track NIC errors */
- setup_timer(&adapter->error_timer, et131x_error_timer_handler,
- (unsigned long)adapter);
+ timer_setup(&adapter->error_timer, et131x_error_timer_handler, 0);
adapter->error_timer.expires = jiffies +
msecs_to_jiffies(TX_ERROR_PERIOD);
add_timer(&adapter->error_timer);
(netdev->features & GENMASK_ULL(63, 32)) >> 32;
}
-static void ena_timer_service(unsigned long data)
+static void ena_timer_service(struct timer_list *t)
{
- struct ena_adapter *adapter = (struct ena_adapter *)data;
+ struct ena_adapter *adapter = from_timer(adapter, t, timer_service);
u8 *debug_area = adapter->ena_dev->host_attr.debug_area_virt_addr;
struct ena_admin_host_info *host_info =
adapter->ena_dev->host_attr.host_info;
ena_update_hints(adapter, &get_feat_ctx.hw_hints);
- setup_timer(&adapter->timer_service, ena_timer_service,
- (unsigned long)adapter);
+ timer_setup(&adapter->timer_service, ena_timer_service, 0);
mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
dev_info(&pdev->dev, "%s found at mem %lx, mac addr %pM Queues %d\n",
return 0;
}
-static void aq_nic_service_timer_cb(unsigned long param)
+static void aq_nic_service_timer_cb(struct timer_list *t)
{
- struct aq_nic_s *self = (struct aq_nic_s *)param;
+ struct aq_nic_s *self = from_timer(self, t, service_timer);
struct net_device *ndev = aq_nic_get_ndev(self);
int err = 0;
unsigned int i = 0U;
jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL);
}
-static void aq_nic_polling_timer_cb(unsigned long param)
+static void aq_nic_polling_timer_cb(struct timer_list *t)
{
- struct aq_nic_s *self = (struct aq_nic_s *)param;
+ struct aq_nic_s *self = from_timer(self, t, polling_timer);
struct aq_vec_s *aq_vec = NULL;
unsigned int i = 0U;
err = aq_nic_update_interrupt_moderation_settings(self);
if (err)
goto err_exit;
- setup_timer(&self->service_timer, &aq_nic_service_timer_cb,
- (unsigned long)self);
+ timer_setup(&self->service_timer, aq_nic_service_timer_cb, 0);
mod_timer(&self->service_timer, jiffies +
AQ_CFG_SERVICE_TIMER_INTERVAL);
if (self->aq_nic_cfg.is_polling) {
- setup_timer(&self->polling_timer, &aq_nic_polling_timer_cb,
- (unsigned long)self);
+ timer_setup(&self->polling_timer, aq_nic_polling_timer_cb, 0);
mod_timer(&self->polling_timer, jiffies +
AQ_CFG_POLLING_TIMER_INTERVAL);
} else {
* atl1c_phy_config - Timer Call-back
* @data: pointer to netdev cast into an unsigned long
*/
-static void atl1c_phy_config(unsigned long data)
+static void atl1c_phy_config(struct timer_list *t)
{
- struct atl1c_adapter *adapter = (struct atl1c_adapter *) data;
+ struct atl1c_adapter *adapter = from_timer(adapter, t,
+ phy_config_timer);
struct atl1c_hw *hw = &adapter->hw;
unsigned long flags;
adapter->mii.phy_id_mask = 0x1f;
adapter->mii.reg_num_mask = MDIO_CTRL_REG_MASK;
netif_napi_add(netdev, &adapter->napi, atl1c_clean, 64);
- setup_timer(&adapter->phy_config_timer, atl1c_phy_config,
- (unsigned long)adapter);
+ timer_setup(&adapter->phy_config_timer, atl1c_phy_config, 0);
/* setup the private structure */
err = atl1c_sw_init(adapter);
if (err) {
* atl1e_phy_config - Timer Call-back
* @data: pointer to netdev cast into an unsigned long
*/
-static void atl1e_phy_config(unsigned long data)
+static void atl1e_phy_config(struct timer_list *t)
{
- struct atl1e_adapter *adapter = (struct atl1e_adapter *) data;
+ struct atl1e_adapter *adapter = from_timer(adapter, t,
+ phy_config_timer);
struct atl1e_hw *hw = &adapter->hw;
unsigned long flags;
netif_napi_add(netdev, &adapter->napi, atl1e_clean, 64);
- setup_timer(&adapter->phy_config_timer, atl1e_phy_config,
- (unsigned long)adapter);
+ timer_setup(&adapter->phy_config_timer, atl1e_phy_config, 0);
/* get user settings */
atl1e_check_options(adapter);
* atl1_phy_config - Timer Call-back
* @data: pointer to netdev cast into an unsigned long
*/
-static void atl1_phy_config(unsigned long data)
+static void atl1_phy_config(struct timer_list *t)
{
- struct atl1_adapter *adapter = (struct atl1_adapter *)data;
+ struct atl1_adapter *adapter = from_timer(adapter, t,
+ phy_config_timer);
struct atl1_hw *hw = &adapter->hw;
unsigned long flags;
/* assume we have no link for now */
netif_carrier_off(netdev);
- setup_timer(&adapter->phy_config_timer, atl1_phy_config,
- (unsigned long)adapter);
+ timer_setup(&adapter->phy_config_timer, atl1_phy_config, 0);
adapter->phy_timer_pending = false;
INIT_WORK(&adapter->reset_dev_task, atl1_reset_dev_task);
* atl2_watchdog - Timer Call-back
* @data: pointer to netdev cast into an unsigned long
*/
-static void atl2_watchdog(unsigned long data)
+static void atl2_watchdog(struct timer_list *t)
{
- struct atl2_adapter *adapter = (struct atl2_adapter *) data;
+ struct atl2_adapter *adapter = from_timer(adapter, t, watchdog_timer);
if (!test_bit(__ATL2_DOWN, &adapter->flags)) {
u32 drop_rxd, drop_rxs;
* atl2_phy_config - Timer Call-back
* @data: pointer to netdev cast into an unsigned long
*/
-static void atl2_phy_config(unsigned long data)
+static void atl2_phy_config(struct timer_list *t)
{
- struct atl2_adapter *adapter = (struct atl2_adapter *) data;
+ struct atl2_adapter *adapter = from_timer(adapter, t,
+ phy_config_timer);
struct atl2_hw *hw = &adapter->hw;
unsigned long flags;
atl2_check_options(adapter);
- setup_timer(&adapter->watchdog_timer, atl2_watchdog,
- (unsigned long)adapter);
+ timer_setup(&adapter->watchdog_timer, atl2_watchdog, 0);
- setup_timer(&adapter->phy_config_timer, atl2_phy_config,
- (unsigned long)adapter);
+ timer_setup(&adapter->phy_config_timer, atl2_phy_config, 0);
INIT_WORK(&adapter->reset_task, atl2_reset_task);
INIT_WORK(&adapter->link_chg_task, atl2_link_chg_task);
}
}
-static void b44_timer(unsigned long __opaque)
+static void b44_timer(struct timer_list *t)
{
- struct b44 *bp = (struct b44 *) __opaque;
+ struct b44 *bp = from_timer(bp, t, timer);
spin_lock_irq(&bp->lock);
goto out;
}
- setup_timer(&bp->timer, b44_timer, (unsigned long)bp);
+ timer_setup(&bp->timer, b44_timer, 0);
bp->timer.expires = jiffies + HZ;
add_timer(&bp->timer);
}
static void
-bnx2_timer(unsigned long data)
+bnx2_timer(struct timer_list *t)
{
- struct bnx2 *bp = (struct bnx2 *) data;
+ struct bnx2 *bp = from_timer(bp, t, timer);
if (!netif_running(bp->dev))
return;
bnx2_set_default_link(bp);
bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
- setup_timer(&bp->timer, bnx2_timer, (unsigned long)bp);
+ timer_setup(&bp->timer, bnx2_timer, 0);
bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
#ifdef BCM_CNIC
bp->fw_drv_pulse_wr_seq);
}
-static void bnx2x_timer(unsigned long data)
+static void bnx2x_timer(struct timer_list *t)
{
- struct bnx2x *bp = (struct bnx2x *) data;
+ struct bnx2x *bp = from_timer(bp, t, timer);
if (!netif_running(bp->dev))
return;
bp->current_interval = CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ;
- setup_timer(&bp->timer, bnx2x_timer, (unsigned long)bp);
+ timer_setup(&bp->timer, bnx2x_timer, 0);
bp->timer.expires = jiffies + bp->current_interval;
if (SHMEM2_HAS(bp, dcbx_lldp_params_offset) &&
}
#endif
-static void bnxt_timer(unsigned long data)
+static void bnxt_timer(struct timer_list *t)
{
- struct bnxt *bp = (struct bnxt *)data;
+ struct bnxt *bp = from_timer(bp, t, timer);
struct net_device *dev = bp->dev;
if (!netif_running(dev))
bnxt_init_dflt_coal(bp);
- setup_timer(&bp->timer, bnxt_timer, (unsigned long)bp);
+ timer_setup(&bp->timer, bnxt_timer, 0);
bp->current_interval = BNXT_TIMER_INTERVAL;
clear_bit(BNXT_STATE_OPEN, &bp->state);
/* Read A2 portion of the EEPROM */
if (length) {
start -= ETH_MODULE_SFF_8436_LEN;
- bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 1, start,
- length, data);
+ rc = bnxt_read_sfp_module_eeprom_info(bp, I2C_DEV_ADDR_A2, 1,
+ start, length, data);
}
return rc;
}
}
}
-static void tg3_timer(unsigned long __opaque)
+static void tg3_timer(struct timer_list *t)
{
- struct tg3 *tp = (struct tg3 *) __opaque;
+ struct tg3 *tp = from_timer(tp, t, timer);
spin_lock(&tp->lock);
tp->asf_multiplier = (HZ / tp->timer_offset) *
TG3_FW_UPDATE_FREQ_SEC;
- setup_timer(&tp->timer, tg3_timer, (unsigned long)tp);
+ timer_setup(&tp->timer, tg3_timer, 0);
}
static void tg3_timer_start(struct tg3 *tp)
/* Offload checksum calculation to HW */
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- hdr->csum_l3 = 1; /* Enable IP csum calculation */
hdr->l3_offset = skb_network_offset(skb);
hdr->l4_offset = skb_transport_offset(skb);
}
#ifdef CONFIG_RFS_ACCEL
-void enic_flow_may_expire(unsigned long data)
+void enic_flow_may_expire(struct timer_list *t)
{
- struct enic *enic = (struct enic *)data;
+ struct enic *enic = from_timer(enic, t, rfs_h.rfs_may_expire);
bool res;
int j;
#ifdef CONFIG_RFS_ACCEL
int enic_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
u16 rxq_index, u32 flow_id);
-void enic_flow_may_expire(unsigned long data);
+void enic_flow_may_expire(struct timer_list *t);
static inline void enic_rfs_timer_start(struct enic *enic)
{
- setup_timer(&enic->rfs_h.rfs_may_expire, enic_flow_may_expire,
- (unsigned long)enic);
+ timer_setup(&enic->rfs_h.rfs_may_expire, enic_flow_may_expire, 0);
mod_timer(&enic->rfs_h.rfs_may_expire, jiffies + HZ/4);
}
return work_done;
}
-static void enic_notify_timer(unsigned long data)
+static void enic_notify_timer(struct timer_list *t)
{
- struct enic *enic = (struct enic *)data;
+ struct enic *enic = from_timer(enic, t, notify_timer);
enic_notify_check(enic);
/* Setup notification timer, HW reset task, and wq locks
*/
- setup_timer(&enic->notify_timer, enic_notify_timer,
- (unsigned long)enic);
+ timer_setup(&enic->notify_timer, enic_notify_timer, 0);
enic_set_rx_coal_setting(enic);
INIT_WORK(&enic->reset, enic_reset);
rar_num = E1000_RAR_ENTRIES;
- /* Zero out the other 15 receive addresses. */
- e_dbg("Clearing RAR[1-15]\n");
+ /* Zero out the following 14 receive addresses. RAR[15] is for
+ * manageability
+ */
+ e_dbg("Clearing RAR[1-14]\n");
for (i = 1; i < rar_num; i++) {
E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
E1000_WRITE_FLUSH();
#define NVM_SIZE_MULTIPLIER 4096 /*multiplier for NVMS field */
#define E1000_FLASH_BASE_ADDR 0xE000 /*offset of NVM access regs */
#define E1000_CTRL_EXT_NVMVS 0x3 /*NVM valid sector */
-#define E1000_TARC0_CB_MULTIQ_3_REQ (1 << 28 | 1 << 29)
+#define E1000_TARC0_CB_MULTIQ_3_REQ 0x30000000
+#define E1000_TARC0_CB_MULTIQ_2_REQ 0x20000000
#define PCIE_ICH8_SNOOP_ALL PCIE_NO_SNOOP_ALL
#define E1000_ICH_RAR_ENTRIES 7
ew32(IOSFPC, reg_val);
reg_val = er32(TARC(0));
- /* SPT and KBL Si errata workaround to avoid Tx hang */
- reg_val &= ~BIT(28);
- reg_val |= BIT(29);
+ /* SPT and KBL Si errata workaround to avoid Tx hang.
+ * Dropping the number of outstanding requests from
+ * 3 to 2 in order to avoid a buffer overrun.
+ */
+ reg_val &= ~E1000_TARC0_CB_MULTIQ_3_REQ;
+ reg_val |= E1000_TARC0_CB_MULTIQ_2_REQ;
ew32(TARC(0), reg_val);
}
}
break;
/* prevent any other reads prior to eop_desc */
- read_barrier_depends();
+ smp_rmb();
/* if DD is not set pending work has not been completed */
if (!(eop_desc->flags & FM10K_TXD_FLAG_DONE))
#define I40E_AQ_LEN 256
#define I40E_AQ_WORK_LIMIT 66 /* max number of VFs + a little */
#define I40E_MAX_USER_PRIORITY 8
-#define I40E_MAX_QUEUES_PER_CH 64
#define I40E_DEFAULT_TRAFFIC_CLASS BIT(0)
#define I40E_DEFAULT_MSG_ENABLE 4
#define I40E_QUEUE_WAIT_RETRY_LIMIT 10
hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
}
+ /* Newer versions of firmware require lock when reading the NVM */
+ if (hw->aq.api_maj_ver > 1 ||
+ (hw->aq.api_maj_ver == 1 &&
+ hw->aq.api_min_ver >= 5))
+ hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
+
/* The ability to RX (not drop) 802.1ad frames was added in API 1.7 */
if (hw->aq.api_maj_ver > 1 ||
(hw->aq.api_maj_ver == 1 &&
hw->pf_id = (u8)(func_rid & 0x7);
if (hw->mac.type == I40E_MAC_X722)
- hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE;
+ hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE |
+ I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
status = i40e_init_nvm(hw);
return status;
* we don't need to do the PF Reset
*/
if (!cnt) {
+ u32 reg2 = 0;
if (hw->revision_id == 0)
cnt = I40E_PF_RESET_WAIT_COUNT_A0;
else
reg = rd32(hw, I40E_PFGEN_CTRL);
if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
break;
+ reg2 = rd32(hw, I40E_GLGEN_RSTAT);
+ if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
+ hw_dbg(hw, "Core reset upcoming. Skipping PF reset request.\n");
+ hw_dbg(hw, "I40E_GLGEN_RSTAT = 0x%x\n", reg2);
+ return I40E_ERR_NOT_READY;
+ }
usleep_range(1000, 2000);
}
if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
return aq_ret;
}
+/**
+ * i40e_set_promiscuous - set promiscuous mode
+ * @pf: board private structure
+ * @promisc: promisc on or off
+ *
+ * There are different ways of setting promiscuous mode on a PF depending on
+ * what state/environment we're in. This identifies and sets it appropriately.
+ * Returns 0 on success.
+ **/
+static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc)
+{
+ struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
+ struct i40e_hw *hw = &pf->hw;
+ i40e_status aq_ret;
+
+ if (vsi->type == I40E_VSI_MAIN &&
+ pf->lan_veb != I40E_NO_VEB &&
+ !(pf->flags & I40E_FLAG_MFP_ENABLED)) {
+ /* set defport ON for Main VSI instead of true promisc
+ * this way we will get all unicast/multicast and VLAN
+ * promisc behavior but will not get VF or VMDq traffic
+ * replicated on the Main VSI.
+ */
+ if (promisc)
+ aq_ret = i40e_aq_set_default_vsi(hw,
+ vsi->seid,
+ NULL);
+ else
+ aq_ret = i40e_aq_clear_default_vsi(hw,
+ vsi->seid,
+ NULL);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "Set default VSI failed, err %s, aq_err %s\n",
+ i40e_stat_str(hw, aq_ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ }
+ } else {
+ aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
+ hw,
+ vsi->seid,
+ promisc, NULL,
+ true);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "set unicast promisc failed, err %s, aq_err %s\n",
+ i40e_stat_str(hw, aq_ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ }
+ aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
+ hw,
+ vsi->seid,
+ promisc, NULL);
+ if (aq_ret) {
+ dev_info(&pf->pdev->dev,
+ "set multicast promisc failed, err %s, aq_err %s\n",
+ i40e_stat_str(hw, aq_ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ }
+ }
+
+ if (!aq_ret)
+ pf->cur_promisc = promisc;
+
+ return aq_ret;
+}
+
/**
* i40e_sync_vsi_filters - Update the VSI filter list to the HW
* @vsi: ptr to the VSI
cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
test_bit(__I40E_VSI_OVERFLOW_PROMISC,
vsi->state));
- if ((vsi->type == I40E_VSI_MAIN) &&
- (pf->lan_veb != I40E_NO_VEB) &&
- !(pf->flags & I40E_FLAG_MFP_ENABLED)) {
- /* set defport ON for Main VSI instead of true promisc
- * this way we will get all unicast/multicast and VLAN
- * promisc behavior but will not get VF or VMDq traffic
- * replicated on the Main VSI.
- */
- if (pf->cur_promisc != cur_promisc) {
- pf->cur_promisc = cur_promisc;
- if (cur_promisc)
- aq_ret =
- i40e_aq_set_default_vsi(hw,
- vsi->seid,
- NULL);
- else
- aq_ret =
- i40e_aq_clear_default_vsi(hw,
- vsi->seid,
- NULL);
- if (aq_ret) {
- retval = i40e_aq_rc_to_posix(aq_ret,
- hw->aq.asq_last_status);
- dev_info(&pf->pdev->dev,
- "Set default VSI failed on %s, err %s, aq_err %s\n",
- vsi_name,
- i40e_stat_str(hw, aq_ret),
- i40e_aq_str(hw,
- hw->aq.asq_last_status));
- }
- }
- } else {
- aq_ret = i40e_aq_set_vsi_unicast_promiscuous(
- hw,
- vsi->seid,
- cur_promisc, NULL,
- true);
- if (aq_ret) {
- retval =
- i40e_aq_rc_to_posix(aq_ret,
- hw->aq.asq_last_status);
- dev_info(&pf->pdev->dev,
- "set unicast promisc failed on %s, err %s, aq_err %s\n",
- vsi_name,
- i40e_stat_str(hw, aq_ret),
- i40e_aq_str(hw,
- hw->aq.asq_last_status));
- }
- aq_ret = i40e_aq_set_vsi_multicast_promiscuous(
- hw,
- vsi->seid,
- cur_promisc, NULL);
- if (aq_ret) {
- retval =
- i40e_aq_rc_to_posix(aq_ret,
- hw->aq.asq_last_status);
- dev_info(&pf->pdev->dev,
- "set multicast promisc failed on %s, err %s, aq_err %s\n",
- vsi_name,
- i40e_stat_str(hw, aq_ret),
- i40e_aq_str(hw,
- hw->aq.asq_last_status));
- }
- }
- aq_ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
- vsi->seid,
- cur_promisc, NULL);
+ aq_ret = i40e_set_promiscuous(pf, cur_promisc);
if (aq_ret) {
retval = i40e_aq_rc_to_posix(aq_ret,
- pf->hw.aq.asq_last_status);
+ hw->aq.asq_last_status);
dev_info(&pf->pdev->dev,
- "set brdcast promisc failed, err %s, aq_err %s\n",
- i40e_stat_str(hw, aq_ret),
- i40e_aq_str(hw,
- hw->aq.asq_last_status));
+ "Setting promiscuous %s failed on %s, err %s aq_err %s\n",
+ cur_promisc ? "on" : "off",
+ vsi_name,
+ i40e_stat_str(hw, aq_ret),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
}
}
out:
break;
/* prevent any other reads prior to eop_desc */
- read_barrier_depends();
+ smp_rmb();
/* if the descriptor isn't done, no work yet to do */
if (!(eop_desc->cmd_type_offset_bsz &
return -EINVAL;
*reconfig_rss = false;
-
- if (num_queues > I40E_MAX_QUEUES_PER_CH) {
- dev_err(&pf->pdev->dev,
- "Failed to create VMDq VSI. User requested num_queues (%d) > I40E_MAX_QUEUES_PER_VSI (%u)\n",
- num_queues, I40E_MAX_QUEUES_PER_CH);
- return -EINVAL;
- }
-
if (vsi->current_rss_size) {
if (num_queues > vsi->current_rss_size) {
dev_dbg(&pf->pdev->dev,
dev_err(&pf->pdev->dev,
"Failed to add cloud filter, err %s\n",
i40e_stat_str(&pf->hw, err));
- err = i40e_aq_rc_to_posix(err, pf->hw.aq.asq_last_status);
goto err;
}
if (!lock_acquired)
rtnl_unlock();
+ /* Restore promiscuous settings */
+ ret = i40e_set_promiscuous(pf, pf->cur_promisc);
+ if (ret)
+ dev_warn(&pf->pdev->dev,
+ "Failed to restore promiscuous setting: %s, err %s aq_err %s\n",
+ pf->cur_promisc ? "on" : "off",
+ i40e_stat_str(&pf->hw, ret),
+ i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
+
i40e_reset_all_vfs(pf, true);
/* tell the firmware that we're starting */
i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
u16 *data)
{
- i40e_status ret_code;
+ i40e_status ret_code = 0;
- ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
+ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
if (ret_code)
return ret_code;
ret_code = __i40e_read_nvm_word(hw, offset, data);
- i40e_release_nvm(hw);
+ if (hw->flags & I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK)
+ i40e_release_nvm(hw);
return ret_code;
}
break;
/* prevent any other reads prior to eop_desc */
- read_barrier_depends();
+ smp_rmb();
i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
/* we have caught up to head, no work left to do */
#define I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0)
#define I40E_HW_FLAG_802_1AD_CAPABLE BIT_ULL(1)
#define I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE BIT_ULL(2)
+#define I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3)
u64 flags;
/* Used in set switch config AQ command */
}
return i40e_vc_send_msg_to_vf(vf, VIRTCHNL_OP_REQUEST_QUEUES, 0,
- (u8 *)vfres, sizeof(vfres));
+ (u8 *)vfres, sizeof(*vfres));
}
/**
struct i40e_mac_filter *f;
f = i40e_find_mac(vsi, al->list[i].addr);
- if (!f)
+ if (!f) {
f = i40e_add_mac_filter(vsi, al->list[i].addr);
- if (!f) {
- dev_err(&pf->pdev->dev,
- "Unable to add MAC filter %pM for VF %d\n",
- al->list[i].addr, vf->vf_id);
- ret = I40E_ERR_PARAM;
- spin_unlock_bh(&vsi->mac_filter_hash_lock);
- goto error_param;
- } else {
- vf->num_mac++;
+ if (!f) {
+ dev_err(&pf->pdev->dev,
+ "Unable to add MAC filter %pM for VF %d\n",
+ al->list[i].addr, vf->vf_id);
+ ret = I40E_ERR_PARAM;
+ spin_unlock_bh(&vsi->mac_filter_hash_lock);
+ goto error_param;
+ } else {
+ vf->num_mac++;
+ }
}
}
spin_unlock_bh(&vsi->mac_filter_hash_lock);
break;
/* prevent any other reads prior to eop_desc */
- read_barrier_depends();
+ smp_rmb();
i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
/* if the descriptor isn't done, no work yet to do */
.setup_qvlist = i40evf_client_setup_qvlist,
};
+/**
+ * i40evf_client_get_params - retrieve relevant client parameters
+ * @vsi: VSI with parameters
+ * @params: client param struct
+ **/
+static
+void i40evf_client_get_params(struct i40e_vsi *vsi, struct i40e_params *params)
+{
+ int i;
+
+ memset(params, 0, sizeof(struct i40e_params));
+ params->mtu = vsi->netdev->mtu;
+ params->link_up = vsi->back->link_up;
+
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+ params->qos.prio_qos[i].tc = 0;
+ params->qos.prio_qos[i].qs_handle = vsi->qs_handle;
+ }
+}
+
/**
* i40evf_notify_client_message - call the client message receive callback
* @vsi: the VSI associated with this client
return;
cinst = vsi->back->cinst;
- memset(¶ms, 0, sizeof(params));
- params.mtu = vsi->netdev->mtu;
- params.link_up = vsi->back->link_up;
- params.qos.prio_qos[0].qs_handle = vsi->qs_handle;
if (!cinst || !cinst->client || !cinst->client->ops ||
!cinst->client->ops->l2_param_change) {
"Cannot locate client instance l2_param_change function\n");
return;
}
+ i40evf_client_get_params(vsi, ¶ms);
+ cinst->lan_info.params = params;
cinst->client->ops->l2_param_change(&cinst->lan_info, cinst->client,
¶ms);
}
i40evf_client_add_instance(struct i40evf_adapter *adapter)
{
struct i40e_client_instance *cinst = NULL;
- struct netdev_hw_addr *mac = NULL;
struct i40e_vsi *vsi = &adapter->vsi;
- int i;
+ struct netdev_hw_addr *mac = NULL;
+ struct i40e_params params;
if (!vf_registered_client)
goto out;
cinst->lan_info.version.major = I40EVF_CLIENT_VERSION_MAJOR;
cinst->lan_info.version.minor = I40EVF_CLIENT_VERSION_MINOR;
cinst->lan_info.version.build = I40EVF_CLIENT_VERSION_BUILD;
+ i40evf_client_get_params(vsi, ¶ms);
+ cinst->lan_info.params = params;
set_bit(__I40E_CLIENT_INSTANCE_NONE, &cinst->state);
cinst->lan_info.msix_count = adapter->num_iwarp_msix;
cinst->lan_info.msix_entries =
&adapter->msix_entries[adapter->iwarp_base_vector];
- for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
- cinst->lan_info.params.qos.prio_qos[i].tc = 0;
- cinst->lan_info.params.qos.prio_qos[i].qs_handle =
- vsi->qs_handle;
- }
-
mac = list_first_entry(&cinst->lan_info.netdev->dev_addrs.list,
struct netdev_hw_addr, list);
if (mac)
adapter->flags &= ~I40EVF_FLAG_SERVICE_CLIENT_REQUESTED;
goto out;
}
+ if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS) {
+ i40evf_notify_client_l2_params(&adapter->vsi);
+ adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS;
+ goto out;
+ }
if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_CLOSE) {
i40evf_notify_client_close(&adapter->vsi, false);
adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_CLOSE;
if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_OPEN) {
i40evf_notify_client_open(&adapter->vsi);
adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_OPEN;
- goto out;
- }
- if (adapter->flags & I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS) {
- i40evf_notify_client_l2_params(&adapter->vsi);
- adapter->flags &= ~I40EVF_FLAG_CLIENT_NEEDS_L2_PARAMS;
}
out:
clear_bit(__I40EVF_IN_CLIENT_TASK, &adapter->crit_section);
break;
/* prevent any other reads prior to eop_desc */
- read_barrier_depends();
+ smp_rmb();
/* if DD is not set pending work has not been completed */
if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
break;
/* prevent any other reads prior to eop_desc */
- read_barrier_depends();
+ smp_rmb();
/* if DD is not set pending work has not been completed */
if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
break;
/* prevent any other reads prior to eop_desc */
- read_barrier_depends();
+ smp_rmb();
/* if DD is not set pending work has not been completed */
if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
break;
/* prevent any other reads prior to eop_desc */
- read_barrier_depends();
+ smp_rmb();
/* if DD is not set pending work has not been completed */
if (!(eop_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD)))
spin_unlock_bh(&mp->mib_counters_lock);
}
-static void mib_counters_timer_wrapper(unsigned long _mp)
+static void mib_counters_timer_wrapper(struct timer_list *t)
{
- struct mv643xx_eth_private *mp = (void *)_mp;
+ struct mv643xx_eth_private *mp = from_timer(mp, t, mib_counters_timer);
mib_counters_update(mp);
mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
}
return work_done;
}
-static inline void oom_timer_wrapper(unsigned long data)
+static inline void oom_timer_wrapper(struct timer_list *t)
{
- struct mv643xx_eth_private *mp = (void *)data;
+ struct mv643xx_eth_private *mp = from_timer(mp, t, rx_oom);
napi_schedule(&mp->napi);
}
mib_counters_clear(mp);
- setup_timer(&mp->mib_counters_timer, mib_counters_timer_wrapper,
- (unsigned long)mp);
+ timer_setup(&mp->mib_counters_timer, mib_counters_timer_wrapper, 0);
mp->mib_counters_timer.expires = jiffies + 30 * HZ;
spin_lock_init(&mp->mib_counters_lock);
netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, NAPI_POLL_WEIGHT);
- setup_timer(&mp->rx_oom, oom_timer_wrapper, (unsigned long)mp);
+ timer_setup(&mp->rx_oom, oom_timer_wrapper, 0);
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
val &= ~MVPP22_CTRL4_EXT_PIN_GMII_SEL;
writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
-
- val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
- val |= MVPP2_GMAC_DISABLE_PADDING;
- val &= ~MVPP2_GMAC_FLOW_CTRL_MASK;
- writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
} else if (phy_interface_mode_is_rgmii(port->phy_interface)) {
val = readl(port->base + MVPP22_GMAC_CTRL_4_REG);
val |= MVPP22_CTRL4_EXT_PIN_GMII_SEL |
MVPP22_CTRL4_QSGMII_BYPASS_ACTIVE;
val &= ~MVPP22_CTRL4_DP_CLK_SEL;
writel(val, port->base + MVPP22_GMAC_CTRL_4_REG);
-
- val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
- val &= ~MVPP2_GMAC_DISABLE_PADDING;
- writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
}
/* The port is connected to a copper PHY */
sizeof(*txq_pcpu->buffs),
GFP_KERNEL);
if (!txq_pcpu->buffs)
- goto cleanup;
+ return -ENOMEM;
txq_pcpu->count = 0;
txq_pcpu->reserved_num = 0;
&txq_pcpu->tso_headers_dma,
GFP_KERNEL);
if (!txq_pcpu->tso_headers)
- goto cleanup;
+ return -ENOMEM;
}
return 0;
-cleanup:
- for_each_present_cpu(cpu) {
- txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
- kfree(txq_pcpu->buffs);
-
- dma_free_coherent(port->dev->dev.parent,
- txq_pcpu->size * TSO_HEADER_SIZE,
- txq_pcpu->tso_headers,
- txq_pcpu->tso_headers_dma);
- }
-
- dma_free_coherent(port->dev->dev.parent,
- txq->size * MVPP2_DESC_ALIGNED_SIZE,
- txq->descs, txq->descs_dma);
-
- return -ENOMEM;
}
/* Free allocated TXQ resources */
else if (!IS_ALIGNED(ring->tx_pending, 32))
new_tx_pending = ALIGN(ring->tx_pending, 32);
+ /* The Tx ring size cannot be smaller than the minimum number of
+ * descriptors needed for TSO.
+ */
+ if (new_tx_pending < MVPP2_MAX_SKB_DESCS)
+ new_tx_pending = ALIGN(MVPP2_MAX_SKB_DESCS, 32);
+
if (ring->rx_pending != new_rx_pending) {
netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
ring->rx_pending, new_rx_pending);
for_each_available_child_of_node(dn, port_node) {
err = mvpp2_port_probe(pdev, port_node, priv, i);
if (err < 0)
- goto err_mg_clk;
+ goto err_port_probe;
i++;
}
priv->stats_queue = create_singlethread_workqueue(priv->queue_name);
if (!priv->stats_queue) {
err = -ENOMEM;
- goto err_mg_clk;
+ goto err_port_probe;
}
platform_set_drvdata(pdev, priv);
return 0;
+err_port_probe:
+ i = 0;
+ for_each_available_child_of_node(dn, port_node) {
+ if (priv->port_list[i])
+ mvpp2_port_remove(priv->port_list[i]);
+ i++;
+ }
err_mg_clk:
clk_disable_unprepare(priv->axi_clk);
if (priv->hw_version == MVPP22)
}
}
-static inline void rxq_refill_timer_wrapper(unsigned long data)
+static inline void rxq_refill_timer_wrapper(struct timer_list *t)
{
- struct pxa168_eth_private *pep = (void *)data;
+ struct pxa168_eth_private *pep = from_timer(pep, t, timeout);
napi_schedule(&pep->napi);
}
netif_napi_add(dev, &pep->napi, pxa168_rx_poll, pep->rx_ring_size);
memset(&pep->timeout, 0, sizeof(struct timer_list));
- setup_timer(&pep->timeout, rxq_refill_timer_wrapper,
- (unsigned long)pep);
+ timer_setup(&pep->timeout, rxq_refill_timer_wrapper, 0);
pep->smi_bus = mdiobus_alloc();
if (!pep->smi_bus) {
* get an interrupt when carrier is detected, need to poll for
* link coming up.
*/
-static void xm_link_timer(unsigned long arg)
+static void xm_link_timer(struct timer_list *t)
{
- struct skge_port *skge = (struct skge_port *) arg;
+ struct skge_port *skge = from_timer(skge, t, link_timer);
struct net_device *dev = skge->netdev;
struct skge_hw *hw = skge->hw;
int port = skge->port;
/* Only used for Genesis XMAC */
if (is_genesis(hw))
- setup_timer(&skge->link_timer, xm_link_timer, (unsigned long) skge);
+ timer_setup(&skge->link_timer, xm_link_timer, 0);
else {
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
NETIF_F_RXCSUM;
}
}
-static void sky2_watchdog(unsigned long arg)
+static void sky2_watchdog(struct timer_list *t)
{
- struct sky2_hw *hw = (struct sky2_hw *) arg;
+ struct sky2_hw *hw = from_timer(hw, t, watchdog_timer);
/* Check for lost IRQ once a second */
if (sky2_read32(hw, B0_ISRC)) {
sky2_show_addr(dev1);
}
- setup_timer(&hw->watchdog_timer, sky2_watchdog, (unsigned long) hw);
+ timer_setup(&hw->watchdog_timer, sky2_watchdog, 0);
INIT_WORK(&hw->restart_work, sky2_restart);
pci_set_drvdata(pdev, hw);
mlxsw_sp_ipip_entry_ol_down_event(mlxsw_sp, ipip_entry);
}
-static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_rif *rif);
+static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *old_rif,
+ struct mlxsw_sp_rif *new_rif);
static int
mlxsw_sp_ipip_entry_ol_lb_update(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_ipip_entry *ipip_entry,
return PTR_ERR(new_lb_rif);
ipip_entry->ol_lb = new_lb_rif;
- if (keep_encap) {
- list_splice_init(&old_lb_rif->common.nexthop_list,
- &new_lb_rif->common.nexthop_list);
- mlxsw_sp_nexthop_rif_update(mlxsw_sp, &new_lb_rif->common);
- }
+ if (keep_encap)
+ mlxsw_sp_nexthop_rif_migrate(mlxsw_sp, &old_lb_rif->common,
+ &new_lb_rif->common);
mlxsw_sp_rif_destroy(&old_lb_rif->common);
return 0;
}
+static void mlxsw_sp_nexthop_rif_update(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *rif);
+
/**
* Update the offload related to an IPIP entry. This always updates decap, and
* in addition to that it also:
{
struct mlxsw_sp_ipip_entry *ipip_entry =
mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
+ enum mlxsw_sp_l3proto ul_proto;
+ union mlxsw_sp_l3addr saddr;
+ u32 ul_tb_id;
if (!ipip_entry)
return 0;
+
+ /* For flat configuration cases, moving overlay to a different VRF might
+ * cause local address conflict, and the conflicting tunnels need to be
+ * demoted.
+ */
+ ul_tb_id = mlxsw_sp_ipip_dev_ul_tb_id(ol_dev);
+ ul_proto = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt]->ul_proto;
+ saddr = mlxsw_sp_ipip_netdev_saddr(ul_proto, ol_dev);
+ if (mlxsw_sp_ipip_demote_tunnel_by_saddr(mlxsw_sp, ul_proto,
+ saddr, ul_tb_id,
+ ipip_entry)) {
+ mlxsw_sp_ipip_entry_demote_tunnel(mlxsw_sp, ipip_entry);
+ return 0;
+ }
+
return __mlxsw_sp_ipip_entry_update_tunnel(mlxsw_sp, ipip_entry,
true, false, false, extack);
}
return ul_dev ? (ul_dev->flags & IFF_UP) : true;
}
-static int mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_nexthop *nh,
- struct net_device *ol_dev)
+static void mlxsw_sp_nexthop_ipip_init(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_nexthop *nh,
+ struct mlxsw_sp_ipip_entry *ipip_entry)
{
bool removing;
if (!nh->nh_grp->gateway || nh->ipip_entry)
- return 0;
-
- nh->ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, ol_dev);
- if (!nh->ipip_entry)
- return -ENOENT;
+ return;
- removing = !mlxsw_sp_ipip_netdev_ul_up(ol_dev);
+ nh->ipip_entry = ipip_entry;
+ removing = !mlxsw_sp_ipip_netdev_ul_up(ipip_entry->ol_dev);
__mlxsw_sp_nexthop_neigh_update(nh, removing);
- return 0;
+ mlxsw_sp_nexthop_rif_init(nh, &ipip_entry->ol_lb->common);
}
static void mlxsw_sp_nexthop_ipip_fini(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_nexthop *nh,
struct fib_nh *fib_nh)
{
- struct mlxsw_sp_router *router = mlxsw_sp->router;
+ const struct mlxsw_sp_ipip_ops *ipip_ops;
struct net_device *dev = fib_nh->nh_dev;
- enum mlxsw_sp_ipip_type ipipt;
+ struct mlxsw_sp_ipip_entry *ipip_entry;
struct mlxsw_sp_rif *rif;
int err;
- if (mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, fib_nh, &ipipt) &&
- router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev,
- MLXSW_SP_L3_PROTO_IPV4)) {
- nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
- err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, dev);
- if (err)
- return err;
- mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common);
- return 0;
+ ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
+ if (ipip_entry) {
+ ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
+ if (ipip_ops->can_offload(mlxsw_sp, dev,
+ MLXSW_SP_L3_PROTO_IPV4)) {
+ nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
+ mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
+ return 0;
+ }
}
nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
}
}
+static void mlxsw_sp_nexthop_rif_migrate(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_rif *old_rif,
+ struct mlxsw_sp_rif *new_rif)
+{
+ struct mlxsw_sp_nexthop *nh;
+
+ list_splice_init(&old_rif->nexthop_list, &new_rif->nexthop_list);
+ list_for_each_entry(nh, &new_rif->nexthop_list, rif_list_node)
+ nh->rif = new_rif;
+ mlxsw_sp_nexthop_rif_update(mlxsw_sp, new_rif);
+}
+
static void mlxsw_sp_nexthop_rif_gone_sync(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_rif *rif)
{
case RTN_LOCAL:
ipip_entry = mlxsw_sp_ipip_entry_find_by_decap(mlxsw_sp, dev,
MLXSW_SP_L3_PROTO_IPV4, dip);
- if (ipip_entry) {
+ if (ipip_entry && ipip_entry->ol_dev->flags & IFF_UP) {
fib_entry->type = MLXSW_SP_FIB_ENTRY_TYPE_IPIP_DECAP;
return mlxsw_sp_fib_entry_decap_init(mlxsw_sp,
fib_entry,
struct mlxsw_sp_nexthop *nh,
const struct rt6_info *rt)
{
- struct mlxsw_sp_router *router = mlxsw_sp->router;
+ const struct mlxsw_sp_ipip_ops *ipip_ops;
+ struct mlxsw_sp_ipip_entry *ipip_entry;
struct net_device *dev = rt->dst.dev;
- enum mlxsw_sp_ipip_type ipipt;
struct mlxsw_sp_rif *rif;
int err;
- if (mlxsw_sp_nexthop6_ipip_type(mlxsw_sp, rt, &ipipt) &&
- router->ipip_ops_arr[ipipt]->can_offload(mlxsw_sp, dev,
- MLXSW_SP_L3_PROTO_IPV6)) {
- nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
- err = mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, dev);
- if (err)
- return err;
- mlxsw_sp_nexthop_rif_init(nh, &nh->ipip_entry->ol_lb->common);
- return 0;
+ ipip_entry = mlxsw_sp_ipip_entry_find_by_ol_dev(mlxsw_sp, dev);
+ if (ipip_entry) {
+ ipip_ops = mlxsw_sp->router->ipip_ops_arr[ipip_entry->ipipt];
+ if (ipip_ops->can_offload(mlxsw_sp, dev,
+ MLXSW_SP_L3_PROTO_IPV6)) {
+ nh->type = MLXSW_SP_NEXTHOP_TYPE_IPIP;
+ mlxsw_sp_nexthop_ipip_init(mlxsw_sp, nh, ipip_entry);
+ return 0;
+ }
}
nh->type = MLXSW_SP_NEXTHOP_TYPE_ETH;
* cannot detect a NIC with a parity error in a timely fashion if the
* NIC is lightly loaded.
*/
-static void myri10ge_watchdog_timer(unsigned long arg)
+static void myri10ge_watchdog_timer(struct timer_list *t)
{
struct myri10ge_priv *mgp;
struct myri10ge_slice_state *ss;
u32 rx_pause_cnt;
u16 cmd;
- mgp = (struct myri10ge_priv *)arg;
+ mgp = from_timer(mgp, t, watchdog_timer);
rx_pause_cnt = ntohl(mgp->ss[0].fw_stats->dropped_pause);
busy_slice_cnt = 0;
pci_save_state(pdev);
/* Setup the watchdog timer */
- setup_timer(&mgp->watchdog_timer, myri10ge_watchdog_timer,
- (unsigned long)mgp);
+ timer_setup(&mgp->watchdog_timer, myri10ge_watchdog_timer, 0);
netdev->ethtool_ops = &myri10ge_ethtool_ops;
INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog);
{
int err;
- if (prog && !prog->aux->offload)
- return -EINVAL;
+ if (prog) {
+ struct bpf_dev_offload *offload = prog->aux->offload;
+
+ if (!offload)
+ return -EINVAL;
+ if (offload->netdev != nn->dp.netdev)
+ return -EINVAL;
+ }
if (prog && old_prog) {
u8 cap;
tx_skb->dma_len,
DMA_TO_DEVICE);
else
- pci_unmap_page(np->pci_dev, tx_skb->dma,
+ dma_unmap_page(&np->pci_dev->dev, tx_skb->dma,
tx_skb->dma_len,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
tx_skb->dma = 0;
}
}
* pch_gbe_watchdog - Watchdog process
* @data: Board private structure
*/
-static void pch_gbe_watchdog(unsigned long data)
+static void pch_gbe_watchdog(struct timer_list *t)
{
- struct pch_gbe_adapter *adapter = (struct pch_gbe_adapter *)data;
+ struct pch_gbe_adapter *adapter = from_timer(adapter, t,
+ watchdog_timer);
struct net_device *netdev = adapter->netdev;
struct pch_gbe_hw *hw = &adapter->hw;
dev_err(&pdev->dev, "Invalid MAC address, "
"interface disabled.\n");
}
- setup_timer(&adapter->watchdog_timer, pch_gbe_watchdog,
- (unsigned long)adapter);
+ timer_setup(&adapter->watchdog_timer, pch_gbe_watchdog, 0);
INIT_WORK(&adapter->reset_task, pch_gbe_reset_task);
#define TX_CLEAN_INTERVAL HZ
-static void pasemi_mac_tx_timer(unsigned long data)
+static void pasemi_mac_tx_timer(struct timer_list *t)
{
- struct pasemi_mac_txring *txring = (struct pasemi_mac_txring *)data;
+ struct pasemi_mac_txring *txring = from_timer(txring, t, clean_timer);
struct pasemi_mac *mac = txring->mac;
pasemi_mac_clean_tx(txring);
if (dev->phydev)
phy_start(dev->phydev);
- setup_timer(&mac->tx->clean_timer, pasemi_mac_tx_timer,
- (unsigned long)mac->tx);
+ timer_setup(&mac->tx->clean_timer, pasemi_mac_tx_timer, 0);
mod_timer(&mac->tx->clean_timer, jiffies + HZ);
return 0;
qdev->pci_slot = (u8) PCI_SLOT(qdev->pdev->devfn);
}
-static void ql3xxx_timer(unsigned long ptr)
+static void ql3xxx_timer(struct timer_list *t)
{
- struct ql3_adapter *qdev = (struct ql3_adapter *)ptr;
+ struct ql3_adapter *qdev = from_timer(qdev, t, adapter_timer);
queue_delayed_work(qdev->workqueue, &qdev->link_state_work, 0);
}
INIT_DELAYED_WORK(&qdev->tx_timeout_work, ql_tx_timeout_work);
INIT_DELAYED_WORK(&qdev->link_state_work, ql_link_state_machine_work);
- setup_timer(&qdev->adapter_timer, ql3xxx_timer, (unsigned long)qdev);
+ timer_setup(&qdev->adapter_timer, ql3xxx_timer, 0);
qdev->adapter_timer.expires = jiffies + HZ * 2; /* two second delay */
if (!cards_found) {
return ret;
}
-static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
- struct rtl8169_private *tp = netdev_priv(dev);
- int ret;
-
- del_timer_sync(&tp->timer);
-
- rtl_lock_work(tp);
- ret = rtl8169_set_speed(dev, cmd->autoneg, ethtool_cmd_speed(cmd),
- cmd->duplex, cmd->advertising);
- rtl_unlock_work(tp);
-
- return ret;
-}
-
static netdev_features_t rtl8169_fix_features(struct net_device *dev,
netdev_features_t features)
{
return rc;
}
+static int rtl8169_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct rtl8169_private *tp = netdev_priv(dev);
+ int rc;
+ u32 advertising;
+
+ if (!ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising))
+ return -EINVAL;
+
+ del_timer_sync(&tp->timer);
+
+ rtl_lock_work(tp);
+ rc = rtl8169_set_speed(dev, cmd->base.autoneg, cmd->base.speed,
+ cmd->base.duplex, advertising);
+ rtl_unlock_work(tp);
+
+ return rc;
+}
+
static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
void *p)
{
.get_link = ethtool_op_get_link,
.get_coalesce = rtl_get_coalesce,
.set_coalesce = rtl_set_coalesce,
- .set_settings = rtl8169_set_settings,
.get_msglevel = rtl8169_get_msglevel,
.set_msglevel = rtl8169_set_msglevel,
.get_regs = rtl8169_get_regs,
.get_ts_info = ethtool_op_get_ts_info,
.nway_reset = rtl8169_nway_reset,
.get_link_ksettings = rtl8169_get_link_ksettings,
+ .set_link_ksettings = rtl8169_set_link_ksettings,
};
static void rtl8169_get_mac_version(struct rtl8169_private *tp,
return err;
}
-static void ofdpa_fdb_cleanup(unsigned long data)
+static void ofdpa_fdb_cleanup(struct timer_list *t)
{
- struct ofdpa *ofdpa = (struct ofdpa *)data;
+ struct ofdpa *ofdpa = from_timer(ofdpa, t, fdb_cleanup_timer);
struct ofdpa_port *ofdpa_port;
struct ofdpa_fdb_tbl_entry *entry;
struct hlist_node *tmp;
hash_init(ofdpa->neigh_tbl);
spin_lock_init(&ofdpa->neigh_tbl_lock);
- setup_timer(&ofdpa->fdb_cleanup_timer, ofdpa_fdb_cleanup,
- (unsigned long) ofdpa);
+ timer_setup(&ofdpa->fdb_cleanup_timer, ofdpa_fdb_cleanup, 0);
mod_timer(&ofdpa->fdb_cleanup_timer, jiffies);
ofdpa->ageing_time = BR_DEFAULT_AGEING_TIME;
/*
* dwmac-stm32.c - DWMAC Specific Glue layer for STM32 MCU
*
- * Copyright (C) Alexandre Torgue 2015
- * Author: Alexandre Torgue <alexandre.torgue@gmail.com>
+ * Copyright (C) STMicroelectronics SA 2017
+ * Author: Alexandre Torgue <alexandre.torgue@st.com> for STMicroelectronics.
* License terms: GNU General Public License (GPL), version 2
*
*/
* if there is no data transfer and if we are not in LPI state,
* then MAC Transmitter can be moved to LPI state.
*/
-static void stmmac_eee_ctrl_timer(unsigned long arg)
+static void stmmac_eee_ctrl_timer(struct timer_list *t)
{
- struct stmmac_priv *priv = (struct stmmac_priv *)arg;
+ struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
stmmac_enable_eee_mode(priv);
mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
spin_lock_irqsave(&priv->lock, flags);
if (!priv->eee_active) {
priv->eee_active = 1;
- setup_timer(&priv->eee_ctrl_timer,
- stmmac_eee_ctrl_timer,
- (unsigned long)priv);
+ timer_setup(&priv->eee_ctrl_timer,
+ stmmac_eee_ctrl_timer, 0);
mod_timer(&priv->eee_ctrl_timer,
STMMAC_LPI_T(eee_timer));
* Description:
* This is the timer handler to directly invoke the stmmac_tx_clean.
*/
-static void stmmac_tx_timer(unsigned long data)
+static void stmmac_tx_timer(struct timer_list *t)
{
- struct stmmac_priv *priv = (struct stmmac_priv *)data;
+ struct stmmac_priv *priv = from_timer(priv, t, txtimer);
u32 tx_queues_count = priv->plat->tx_queues_to_use;
u32 queue;
{
priv->tx_coal_frames = STMMAC_TX_FRAMES;
priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
- setup_timer(&priv->txtimer, stmmac_tx_timer, (unsigned long)priv);
+ timer_setup(&priv->txtimer, stmmac_tx_timer, 0);
priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
add_timer(&priv->txtimer);
}
return IRQ_HANDLED;
}
-static void xlgmac_tx_timer(unsigned long data)
+static void xlgmac_tx_timer(struct timer_list *t)
{
- struct xlgmac_channel *channel = (struct xlgmac_channel *)data;
+ struct xlgmac_channel *channel = from_timer(channel, t, tx_timer);
struct xlgmac_pdata *pdata = channel->pdata;
struct napi_struct *napi;
if (!channel->tx_ring)
break;
- setup_timer(&channel->tx_timer, xlgmac_tx_timer,
- (unsigned long)channel);
+ timer_setup(&channel->tx_timer, xlgmac_tx_timer, 0);
}
}
}
EXPORT_SYMBOL_GPL(cpsw_ale_control_get);
-static void cpsw_ale_timer(unsigned long arg)
+static void cpsw_ale_timer(struct timer_list *t)
{
- struct cpsw_ale *ale = (struct cpsw_ale *)arg;
+ struct cpsw_ale *ale = from_timer(ale, t, timer);
cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
cpsw_ale_control_set(ale, 0, ALE_ENABLE, 1);
cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1);
- setup_timer(&ale->timer, cpsw_ale_timer, (unsigned long)ale);
+ timer_setup(&ale->timer, cpsw_ale_timer, 0);
if (ale->ageout) {
ale->timer.expires = jiffies + ale->ageout;
add_timer(&ale->timer);
return -EOPNOTSUPP;
}
-static void netcp_ethss_timer(unsigned long arg)
+static void netcp_ethss_timer(struct timer_list *t)
{
- struct gbe_priv *gbe_dev = (struct gbe_priv *)arg;
+ struct gbe_priv *gbe_dev = from_timer(gbe_dev, t, timer);
struct gbe_intf *gbe_intf;
struct gbe_slave *slave;
}
spin_unlock_bh(&gbe_dev->hw_stats_lock);
- setup_timer(&gbe_dev->timer, netcp_ethss_timer,
- (unsigned long)gbe_dev);
+ timer_setup(&gbe_dev->timer, netcp_ethss_timer, 0);
gbe_dev->timer.expires = jiffies + GBE_TIMER_INTERVAL;
add_timer(&gbe_dev->timer);
*inst_priv = gbe_dev;
spin_unlock_irqrestore(&priv->lock, flags);
return;
}
- priv->timer.function = (TIMER_FUNC_TYPE)tlan_timer;
+ priv->timer.function = tlan_timer;
if (!in_irq())
spin_unlock_irqrestore(&priv->lock, flags);
tlan_dio_write8(dev->base_addr,
TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT);
if (priv->timer.function == NULL) {
- priv->timer.function = (TIMER_FUNC_TYPE)tlan_timer;
+ priv->timer.function = tlan_timer;
priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
priv->timer_set_at = jiffies;
priv->timer_type = TLAN_TIMER_ACTIVITY;
tlan_dio_write8(dev->base_addr,
TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT);
if (priv->timer.function == NULL) {
- priv->timer.function = (TIMER_FUNC_TYPE)tlan_timer;
+ priv->timer.function = tlan_timer;
priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
priv->timer_set_at = jiffies;
priv->timer_type = TLAN_TIMER_ACTIVITY;
* packets, including updating the queue tail pointer.
*/
static void
-spider_net_cleanup_tx_ring(struct spider_net_card *card)
+spider_net_cleanup_tx_ring(struct timer_list *t)
{
+ struct spider_net_card *card = from_timer(card, t, tx_timer);
if ((spider_net_release_tx_chain(card, 0) != 0) &&
(card->netdev->flags & IFF_UP)) {
spider_net_kick_tx_dma(card);
spider_net_refill_rx_chain(card);
spider_net_enable_rxdmac(card);
- spider_net_cleanup_tx_ring(card);
+ spider_net_cleanup_tx_ring(&card->tx_timer);
/* if all packets are in the stack, enable interrupts and return 0 */
/* if not, return 1 */
* @data: used for pointer to card structure
*
*/
-static void spider_net_link_phy(unsigned long data)
+static void spider_net_link_phy(struct timer_list *t)
{
- struct spider_net_card *card = (struct spider_net_card *)data;
+ struct spider_net_card *card = from_timer(card, t, aneg_timer);
struct mii_phy *phy = &card->phy;
/* if link didn't come up after SPIDER_NET_ANEG_TIMEOUT tries, setup phy again */
pci_set_drvdata(card->pdev, netdev);
- setup_timer(&card->tx_timer,
- (void(*)(unsigned long))spider_net_cleanup_tx_ring,
- (unsigned long)card);
+ timer_setup(&card->tx_timer, spider_net_cleanup_tx_ring, 0);
netdev->irq = card->pdev->irq;
card->aneg_count = 0;
- setup_timer(&card->aneg_timer, spider_net_link_phy,
- (unsigned long)card);
+ timer_setup(&card->aneg_timer, spider_net_link_phy, 0);
netif_napi_add(netdev, &card->napi,
spider_net_poll, SPIDER_NET_NAPI_WEIGHT);
else
name = "Rhine III";
- netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n",
- name, (long)ioaddr, dev->dev_addr, rp->irq);
+ netdev_info(dev, "VIA %s at %p, %pM, IRQ %d\n",
+ name, ioaddr, dev->dev_addr, rp->irq);
dev_set_drvdata(hwdev, dev);
config XILINX_LL_TEMAC
tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver"
depends on (PPC || MICROBLAZE)
+ depends on !64BIT || BROKEN
select PHYLIB
---help---
This driver supports the Xilinx 10/100/1000 LocalLink TEMAC
}
if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX]) {
+#if IS_ENABLED(CONFIG_IPV6)
if (changelink) {
attrtype = IFLA_GENEVE_UDP_ZERO_CSUM6_TX;
goto change_notsup;
}
if (nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX]))
info->key.tun_flags &= ~TUNNEL_CSUM;
+#else
+ NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_UDP_ZERO_CSUM6_TX],
+ "IPv6 support not enabled in the kernel");
+ return -EPFNOSUPPORT;
+#endif
}
if (data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX]) {
+#if IS_ENABLED(CONFIG_IPV6)
if (changelink) {
attrtype = IFLA_GENEVE_UDP_ZERO_CSUM6_RX;
goto change_notsup;
}
if (nla_get_u8(data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX]))
*use_udp6_rx_checksums = false;
+#else
+ NL_SET_ERR_MSG_ATTR(extack, data[IFLA_GENEVE_UDP_ZERO_CSUM6_RX],
+ "IPv6 support not enabled in the kernel");
+ return -EPFNOSUPPORT;
+#endif
}
return 0;
goto nla_put_failure;
if (metadata && nla_put_flag(skb, IFLA_GENEVE_COLLECT_METADATA))
- goto nla_put_failure;
+ goto nla_put_failure;
+#if IS_ENABLED(CONFIG_IPV6)
if (nla_put_u8(skb, IFLA_GENEVE_UDP_ZERO_CSUM6_RX,
!geneve->use_udp6_rx_checksums))
goto nla_put_failure;
+#endif
return 0;
} else
if (when != TIMER_OFF)
{
- scc->tx_t.function = (TIMER_FUNC_TYPE)handler;
+ scc->tx_t.function = handler;
scc->tx_t.expires = jiffies + (when*HZ)/100;
add_timer(&scc->tx_t);
}
if (scc->kiss.maxdefer != 0 && scc->kiss.maxdefer != TIMER_OFF)
{
- scc->tx_wdog.function = (TIMER_FUNC_TYPE)t_busy;
+ scc->tx_wdog.function = t_busy;
scc->tx_wdog.expires = jiffies + HZ*scc->kiss.maxdefer;
add_timer(&scc->tx_wdog);
}
if (scc->kiss.maxkeyup != 0 && scc->kiss.maxkeyup != TIMER_OFF)
{
- scc->tx_wdog.function = (TIMER_FUNC_TYPE)t_maxkeyup;
+ scc->tx_wdog.function = t_maxkeyup;
scc->tx_wdog.expires = jiffies + HZ*scc->kiss.maxkeyup;
add_timer(&scc->tx_wdog);
}
del_timer(&scc->tx_wdog);
- scc->tx_wdog.function = (TIMER_FUNC_TYPE)scc_stop_calibrate;
+ scc->tx_wdog.function = scc_stop_calibrate;
scc->tx_wdog.expires = jiffies + HZ*duration;
add_timer(&scc->tx_wdog);
return false;
}
-static void *ipvlan_get_L3_hdr(struct sk_buff *skb, int *type)
+static void *ipvlan_get_L3_hdr(struct ipvl_port *port, struct sk_buff *skb, int *type)
{
void *lyr3h = NULL;
case htons(ETH_P_ARP): {
struct arphdr *arph;
- if (unlikely(!pskb_may_pull(skb, sizeof(*arph))))
+ if (unlikely(!pskb_may_pull(skb, arp_hdr_len(port->dev))))
return NULL;
arph = arp_hdr(skb);
/* Only Neighbour Solicitation pkts need different treatment */
if (ipv6_addr_any(&ip6h->saddr) &&
ip6h->nexthdr == NEXTHDR_ICMP) {
+ struct icmp6hdr *icmph;
+
+ if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h) + sizeof(*icmph))))
+ return NULL;
+
+ ip6h = ipv6_hdr(skb);
+ icmph = (struct icmp6hdr *)(ip6h + 1);
+
+ if (icmph->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION) {
+ /* Need to access the ipv6 address in body */
+ if (unlikely(!pskb_may_pull(skb, sizeof(*ip6h) + sizeof(*icmph)
+ + sizeof(struct in6_addr))))
+ return NULL;
+
+ ip6h = ipv6_hdr(skb);
+ icmph = (struct icmp6hdr *)(ip6h + 1);
+ }
+
*type = IPVL_ICMPV6;
- lyr3h = ip6h + 1;
+ lyr3h = icmph;
}
break;
}
struct ipvl_addr *addr;
int addr_type;
- lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
+ lyr3h = ipvlan_get_L3_hdr(ipvlan->port, skb, &addr_type);
if (!lyr3h)
goto out;
if (!ipvlan_is_vepa(ipvlan->port) &&
ether_addr_equal(eth->h_dest, eth->h_source)) {
- lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
+ lyr3h = ipvlan_get_L3_hdr(ipvlan->port, skb, &addr_type);
if (lyr3h) {
addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true);
if (addr) {
int addr_type;
if (ether_addr_equal(eth->h_source, skb->dev->dev_addr)) {
- lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
+ lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type);
if (!lyr3h)
return true;
struct sk_buff *skb = *pskb;
rx_handler_result_t ret = RX_HANDLER_PASS;
- lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
+ lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type);
if (!lyr3h)
goto out;
} else {
struct ipvl_addr *addr;
- lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
+ lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type);
if (!lyr3h)
return ret;
if (!port || port->mode != IPVLAN_MODE_L3S)
goto out;
- lyr3h = ipvlan_get_L3_hdr(skb, &addr_type);
+ lyr3h = ipvlan_get_L3_hdr(port, skb, &addr_type);
if (!lyr3h)
goto out;
};
MODULE_DEVICE_TABLE(mdio, cortina_tbl);
+
+MODULE_DESCRIPTION("Cortina EDC CDR 10G Ethernet PHY driver");
+MODULE_AUTHOR("NXP");
+MODULE_LICENSE("GPL");
* link takes priority and the other port is completely locked out.
*/
#include <linux/phy.h>
+#include <linux/marvell_phy.h>
enum {
MV_PCS_BASE_T = 0x0000,
static struct phy_driver mv3310_drivers[] = {
{
.phy_id = 0x002b09aa,
- .phy_id_mask = 0xffffffff,
+ .phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "mv88x3310",
.features = SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Full |
module_phy_driver(mv3310_drivers);
static struct mdio_device_id __maybe_unused mv3310_tbl[] = {
- { 0x002b09aa, 0xffffffff },
+ { 0x002b09aa, MARVELL_PHY_ID_MASK },
{ },
};
MODULE_DEVICE_TABLE(mdio, mv3310_tbl);
static void slip_unesc6(struct slip *sl, unsigned char c);
#endif
#ifdef CONFIG_SLIP_SMART
-static void sl_keepalive(unsigned long sls);
-static void sl_outfill(unsigned long sls);
+static void sl_keepalive(struct timer_list *t);
+static void sl_outfill(struct timer_list *t);
static int sl_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
#endif
sl->mode = SL_MODE_DEFAULT;
#ifdef CONFIG_SLIP_SMART
/* initialize timer_list struct */
- setup_timer(&sl->keepalive_timer, sl_keepalive, (unsigned long)sl);
- setup_timer(&sl->outfill_timer, sl_outfill, (unsigned long)sl);
+ timer_setup(&sl->keepalive_timer, sl_keepalive, 0);
+ timer_setup(&sl->outfill_timer, sl_outfill, 0);
#endif
slip_devs[i] = dev;
return sl;
* added by Stanislav Voronyi. All changes before marked VSV
*/
-static void sl_outfill(unsigned long sls)
+static void sl_outfill(struct timer_list *t)
{
- struct slip *sl = (struct slip *)sls;
+ struct slip *sl = from_timer(sl, t, outfill_timer);
spin_lock(&sl->lock);
spin_unlock(&sl->lock);
}
-static void sl_keepalive(unsigned long sls)
+static void sl_keepalive(struct timer_list *t)
{
- struct slip *sl = (struct slip *)sls;
+ struct slip *sl = from_timer(sl, t, keepalive_timer);
spin_lock(&sl->lock);
case TUNSETOFFLOAD:
/* let the user check for future flags */
if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
- TUN_F_TSO_ECN))
+ TUN_F_TSO_ECN | TUN_F_UFO))
return -EINVAL;
rtnl_lock();
if (ring->ring->is_tx) {
dir = DMA_TO_DEVICE;
order = 0;
- size = tbnet_frame_size(tf);
+ size = TBNET_FRAME_SIZE;
} else {
dir = DMA_FROM_DEVICE;
order = TBNET_RX_PAGE_ORDER;
static struct tbnet_frame *tbnet_get_tx_buffer(struct tbnet *net)
{
struct tbnet_ring *ring = &net->tx_ring;
+ struct device *dma_dev = tb_ring_dma_device(ring->ring);
struct tbnet_frame *tf;
unsigned int index;
tf = &ring->frames[index];
tf->frame.size = 0;
- tf->frame.buffer_phy = 0;
+
+ dma_sync_single_for_cpu(dma_dev, tf->frame.buffer_phy,
+ tbnet_frame_size(tf), DMA_TO_DEVICE);
return tf;
}
bool canceled)
{
struct tbnet_frame *tf = container_of(frame, typeof(*tf), frame);
- struct device *dma_dev = tb_ring_dma_device(ring);
struct tbnet *net = netdev_priv(tf->dev);
- dma_unmap_page(dma_dev, tf->frame.buffer_phy, tbnet_frame_size(tf),
- DMA_TO_DEVICE);
- tf->frame.buffer_phy = 0;
-
/* Return buffer to the ring */
net->tx_ring.prod++;
static int tbnet_alloc_tx_buffers(struct tbnet *net)
{
struct tbnet_ring *ring = &net->tx_ring;
+ struct device *dma_dev = tb_ring_dma_device(ring->ring);
unsigned int i;
for (i = 0; i < TBNET_RING_SIZE; i++) {
struct tbnet_frame *tf = &ring->frames[i];
+ dma_addr_t dma_addr;
tf->page = alloc_page(GFP_KERNEL);
if (!tf->page) {
return -ENOMEM;
}
+ dma_addr = dma_map_page(dma_dev, tf->page, 0, TBNET_FRAME_SIZE,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dma_dev, dma_addr)) {
+ __free_page(tf->page);
+ tf->page = NULL;
+ tbnet_free_buffers(ring);
+ return -ENOMEM;
+ }
+
tf->dev = net->dev;
+ tf->frame.buffer_phy = dma_addr;
tf->frame.callback = tbnet_tx_callback;
tf->frame.sof = TBIP_PDF_FRAME_START;
tf->frame.eof = TBIP_PDF_FRAME_END;
return 0;
}
-static bool tbnet_xmit_map(struct device *dma_dev, struct tbnet_frame *tf)
-{
- dma_addr_t dma_addr;
-
- dma_addr = dma_map_page(dma_dev, tf->page, 0, tbnet_frame_size(tf),
- DMA_TO_DEVICE);
- if (dma_mapping_error(dma_dev, dma_addr))
- return false;
-
- tf->frame.buffer_phy = dma_addr;
- return true;
-}
-
static bool tbnet_xmit_csum_and_map(struct tbnet *net, struct sk_buff *skb,
struct tbnet_frame **frames, u32 frame_count)
{
if (skb->ip_summed != CHECKSUM_PARTIAL) {
/* No need to calculate checksum so we just update the
- * total frame count and map the frames for DMA.
+ * total frame count and sync the frames for DMA.
*/
for (i = 0; i < frame_count; i++) {
hdr = page_address(frames[i]->page);
hdr->frame_count = cpu_to_le32(frame_count);
- if (!tbnet_xmit_map(dma_dev, frames[i]))
- goto err_unmap;
+ dma_sync_single_for_device(dma_dev,
+ frames[i]->frame.buffer_phy,
+ tbnet_frame_size(frames[i]), DMA_TO_DEVICE);
}
return true;
*tucso = csum_fold(wsum);
/* Checksum is finally calculated and we don't touch the memory
- * anymore, so DMA map the frames now.
+ * anymore, so DMA sync the frames now.
*/
for (i = 0; i < frame_count; i++) {
- if (!tbnet_xmit_map(dma_dev, frames[i]))
- goto err_unmap;
+ dma_sync_single_for_device(dma_dev, frames[i]->frame.buffer_phy,
+ tbnet_frame_size(frames[i]), DMA_TO_DEVICE);
}
return true;
-
-err_unmap:
- while (i--)
- dma_unmap_page(dma_dev, frames[i]->frame.buffer_phy,
- tbnet_frame_size(frames[i]), DMA_TO_DEVICE);
-
- return false;
}
static void *tbnet_kmap_frag(struct sk_buff *skb, unsigned int frag_num,
spin_unlock_bh(&tun->lock);
}
-static void tun_flow_cleanup(unsigned long data)
+static void tun_flow_cleanup(struct timer_list *t)
{
- struct tun_struct *tun = (struct tun_struct *)data;
+ struct tun_struct *tun = from_timer(tun, t, flow_gc_timer);
unsigned long delay = tun->ageing_time;
unsigned long next_timer = jiffies + delay;
unsigned long count = 0;
INIT_HLIST_HEAD(&tun->flows[i]);
tun->ageing_time = TUN_FLOW_EXPIRE;
- setup_timer(&tun->flow_gc_timer, tun_flow_cleanup, (unsigned long)tun);
+ timer_setup(&tun->flow_gc_timer, tun_flow_cleanup, 0);
+ mod_timer(&tun->flow_gc_timer,
+ round_jiffies_up(jiffies + tun->ageing_time));
}
static void tun_flow_uninit(struct tun_struct *tun)
features |= NETIF_F_TSO6;
arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
}
+
+ arg &= ~TUN_F_UFO;
}
/* This gives the user a way to test for new features in future by
static int __vxlan_fdb_delete(struct vxlan_dev *vxlan,
const unsigned char *addr, union vxlan_addr ip,
- __be16 port, __be32 src_vni, u32 vni, u32 ifindex,
- u16 vid)
+ __be16 port, __be32 src_vni, __be32 vni,
+ u32 ifindex, u16 vid)
{
struct vxlan_fdb *f;
struct vxlan_rdst *rd = NULL;
return NET_RX_DROP;
}
-static void ppp_timer(unsigned long arg)
+static void ppp_timer(struct timer_list *t)
{
- struct proto *proto = (struct proto *)arg;
+ struct proto *proto = from_timer(proto, t, timer);
struct ppp *ppp = get_ppp(proto->dev);
unsigned long flags;
for (i = 0; i < IDX_COUNT; i++) {
struct proto *proto = &ppp->protos[i];
proto->dev = dev;
- setup_timer(&proto->timer, ppp_timer, (unsigned long)proto);
+ timer_setup(&proto->timer, ppp_timer, 0);
proto->state = CLOSED;
}
ppp->protos[IDX_LCP].pid = PID_LCP;
break;
}
- data = kmalloc(xc.len, GFP_KERNEL);
- if (!data) {
- ret = -ENOMEM;
+ data = memdup_user(xc.data, xc.len);
+ if (IS_ERR(data)) {
+ ret = PTR_ERR(data);
break;
}
-
- if(copy_from_user(data, xc.data, xc.len))
- {
- kfree(data);
- ret = -ENOMEM;
- break;
- }
printk("%s: Starting load of data Len: %d at 0x%p == 0x%p\n", dev->name, xc.len, xc.data, data);
#define MICHAEL_MIC_LEN 8
-static int ath10k_htt_rx_crypto_tail_len(struct ath10k *ar,
- enum htt_rx_mpdu_encrypt_type type)
+static int ath10k_htt_rx_crypto_mic_len(struct ath10k *ar,
+ enum htt_rx_mpdu_encrypt_type type)
{
switch (type) {
case HTT_RX_MPDU_ENCRYPT_NONE:
- return 0;
case HTT_RX_MPDU_ENCRYPT_WEP40:
case HTT_RX_MPDU_ENCRYPT_WEP104:
- return IEEE80211_WEP_ICV_LEN;
case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
- return IEEE80211_TKIP_ICV_LEN;
+ return 0;
case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
return IEEE80211_CCMP_MIC_LEN;
case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
return 0;
}
+static int ath10k_htt_rx_crypto_icv_len(struct ath10k *ar,
+ enum htt_rx_mpdu_encrypt_type type)
+{
+ switch (type) {
+ case HTT_RX_MPDU_ENCRYPT_NONE:
+ case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
+ case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
+ case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
+ case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
+ return 0;
+ case HTT_RX_MPDU_ENCRYPT_WEP40:
+ case HTT_RX_MPDU_ENCRYPT_WEP104:
+ return IEEE80211_WEP_ICV_LEN;
+ case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
+ case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
+ return IEEE80211_TKIP_ICV_LEN;
+ case HTT_RX_MPDU_ENCRYPT_WEP128:
+ case HTT_RX_MPDU_ENCRYPT_WAPI:
+ break;
+ }
+
+ ath10k_warn(ar, "unsupported encryption type %d\n", type);
+ return 0;
+}
+
struct amsdu_subframe_hdr {
u8 dst[ETH_ALEN];
u8 src[ETH_ALEN];
/* Tail */
if (status->flag & RX_FLAG_IV_STRIPPED) {
skb_trim(msdu, msdu->len -
- ath10k_htt_rx_crypto_tail_len(ar, enctype));
+ ath10k_htt_rx_crypto_mic_len(ar, enctype));
+
+ skb_trim(msdu, msdu->len -
+ ath10k_htt_rx_crypto_icv_len(ar, enctype));
} else {
/* MIC */
- if ((status->flag & RX_FLAG_MIC_STRIPPED) &&
- enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2)
- skb_trim(msdu, msdu->len - 8);
+ if (status->flag & RX_FLAG_MIC_STRIPPED)
+ skb_trim(msdu, msdu->len -
+ ath10k_htt_rx_crypto_mic_len(ar, enctype));
/* ICV */
- if (status->flag & RX_FLAG_ICV_STRIPPED &&
- enctype != HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2)
+ if (status->flag & RX_FLAG_ICV_STRIPPED)
skb_trim(msdu, msdu->len -
- ath10k_htt_rx_crypto_tail_len(ar, enctype));
+ ath10k_htt_rx_crypto_icv_len(ar, enctype));
}
/* MMIC */
if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
!ieee80211_has_morefrags(hdr->frame_control) &&
enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
- skb_trim(msdu, msdu->len - 8);
+ skb_trim(msdu, msdu->len - MICHAEL_MIC_LEN);
/* Head */
if (status->flag & RX_FLAG_IV_STRIPPED) {
if (!avp->assoc)
return false;
- skb = ieee80211_nullfunc_get(sc->hw, vif);
+ skb = ieee80211_nullfunc_get(sc->hw, vif, false);
if (!skb)
return false;
}
/* External RF module */
- iris_node = of_find_node_by_name(mmio_node, "iris");
+ iris_node = of_get_child_by_name(mmio_node, "iris");
if (iris_node) {
if (of_device_is_compatible(iris_node, "qcom,wcn3620"))
wcn->rf_id = RF_IRIS_WCN3620;
/* LED trigger */
static int tx_activity;
-static void at76_ledtrig_tx_timerfunc(unsigned long data);
+static void at76_ledtrig_tx_timerfunc(struct timer_list *unused);
static DEFINE_TIMER(ledtrig_tx_timer, at76_ledtrig_tx_timerfunc);
DEFINE_LED_TRIGGER(ledtrig_tx);
-static void at76_ledtrig_tx_timerfunc(unsigned long data)
+static void at76_ledtrig_tx_timerfunc(struct timer_list *unused)
{
static int tx_lastactivity;
/**
* brcmf_btcoex_timerfunc() - BT coex timer callback
*/
-static void brcmf_btcoex_timerfunc(ulong data)
+static void brcmf_btcoex_timerfunc(struct timer_list *t)
{
- struct brcmf_btcoex_info *bt_local = (struct brcmf_btcoex_info *)data;
+ struct brcmf_btcoex_info *bt_local = from_timer(bt_local, t, timer);
brcmf_dbg(TRACE, "enter\n");
bt_local->timer_on = false;
/* Set up timer for BT */
btci->timer_on = false;
btci->timeout = BRCMF_BTCOEX_OPPR_WIN_TIME;
- setup_timer(&btci->timer, brcmf_btcoex_timerfunc, (ulong)btci);
+ timer_setup(&btci->timer, brcmf_btcoex_timerfunc, 0);
btci->cfg = cfg;
btci->saved_regs_part1 = false;
btci->saved_regs_part2 = false;
brcmf_notify_escan_complete(cfg, cfg->escan_info.ifp, true, true);
}
-static void brcmf_escan_timeout(unsigned long data)
+static void brcmf_escan_timeout(struct timer_list *t)
{
struct brcmf_cfg80211_info *cfg =
- (struct brcmf_cfg80211_info *)data;
+ from_timer(cfg, t, escan_timeout);
if (cfg->int_escan_map || cfg->scan_request) {
brcmf_err("timer expired\n");
brcmf_cfg80211_escan_handler);
cfg->escan_info.escan_state = WL_ESCAN_STATE_IDLE;
/* Init scan_timeout timer */
- setup_timer(&cfg->escan_timeout, brcmf_escan_timeout,
- (unsigned long)cfg);
+ timer_setup(&cfg->escan_timeout, brcmf_escan_timeout, 0);
INIT_WORK(&cfg->escan_timeout_work,
brcmf_cfg80211_escan_timeout_worker);
}
}
static void
-brcmf_sdio_watchdog(unsigned long data)
+brcmf_sdio_watchdog(struct timer_list *t)
{
- struct brcmf_sdio *bus = (struct brcmf_sdio *)data;
+ struct brcmf_sdio *bus = from_timer(bus, t, timer);
if (bus->watchdog_tsk) {
complete(&bus->watchdog_wait);
init_waitqueue_head(&bus->dcmd_resp_wait);
/* Set up the watchdog timer */
- setup_timer(&bus->timer, brcmf_sdio_watchdog,
- (unsigned long)bus);
+ timer_setup(&bus->timer, brcmf_sdio_watchdog, 0);
/* Initialize watchdog thread */
init_completion(&bus->watchdog_wait);
bus->watchdog_tsk = kthread_run(brcmf_sdio_watchdog_thread,
#define IWL9000_SMEM_OFFSET 0x400000
#define IWL9000_SMEM_LEN 0x68000
-#define IWL9000_FW_PRE "iwlwifi-9000-pu-a0-jf-a0-"
+#define IWL9000A_FW_PRE "iwlwifi-9000-pu-a0-jf-a0-"
+#define IWL9000B_FW_PRE "iwlwifi-9000-pu-b0-jf-b0-"
#define IWL9000RFB_FW_PRE "iwlwifi-9000-pu-a0-jf-b0-"
#define IWL9260A_FW_PRE "iwlwifi-9260-th-a0-jf-a0-"
#define IWL9260B_FW_PRE "iwlwifi-9260-th-b0-jf-b0-"
-#define IWL9000_MODULE_FIRMWARE(api) \
- IWL9000_FW_PRE "-" __stringify(api) ".ucode"
+#define IWL9000A_MODULE_FIRMWARE(api) \
+ IWL9000A_FW_PRE __stringify(api) ".ucode"
+#define IWL9000B_MODULE_FIRMWARE(api) \
+ IWL9000B_FW_PRE __stringify(api) ".ucode"
#define IWL9000RFB_MODULE_FIRMWARE(api) \
- IWL9000RFB_FW_PRE "-" __stringify(api) ".ucode"
+ IWL9000RFB_FW_PRE __stringify(api) ".ucode"
#define IWL9260A_MODULE_FIRMWARE(api) \
- IWL9260A_FW_PRE "-" __stringify(api) ".ucode"
+ IWL9260A_FW_PRE __stringify(api) ".ucode"
#define IWL9260B_MODULE_FIRMWARE(api) \
- IWL9260B_FW_PRE "-" __stringify(api) ".ucode"
+ IWL9260B_FW_PRE __stringify(api) ".ucode"
#define NVM_HW_SECTION_NUM_FAMILY_9000 10
.nvm_ver = IWL9000_NVM_VERSION,
.nvm_calib_ver = IWL9000_TX_POWER_VERSION,
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+};
+
+const struct iwl_cfg iwl9460_2ac_cfg_soc = {
+ .name = "Intel(R) Dual Band Wireless AC 9460",
+ .fw_name_pre = IWL9000A_FW_PRE,
+ .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
+ .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+ IWL_DEVICE_9000,
+ .ht_params = &iwl9000_ht_params,
+ .nvm_ver = IWL9000_NVM_VERSION,
+ .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
.integrated = true,
+ .soc_latency = 5000,
+};
+
+const struct iwl_cfg iwl9461_2ac_cfg_soc = {
+ .name = "Intel(R) Dual Band Wireless AC 9461",
+ .fw_name_pre = IWL9000A_FW_PRE,
+ .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
+ .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+ IWL_DEVICE_9000,
+ .ht_params = &iwl9000_ht_params,
+ .nvm_ver = IWL9000_NVM_VERSION,
+ .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ .integrated = true,
+ .soc_latency = 5000,
+};
+
+const struct iwl_cfg iwl9462_2ac_cfg_soc = {
+ .name = "Intel(R) Dual Band Wireless AC 9462",
+ .fw_name_pre = IWL9000A_FW_PRE,
+ .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
+ .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+ IWL_DEVICE_9000,
+ .ht_params = &iwl9000_ht_params,
+ .nvm_ver = IWL9000_NVM_VERSION,
+ .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ .integrated = true,
+ .soc_latency = 5000,
};
const struct iwl_cfg iwl9560_2ac_cfg = {
.nvm_ver = IWL9000_NVM_VERSION,
.nvm_calib_ver = IWL9000_TX_POWER_VERSION,
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
- .integrated = true,
};
-MODULE_FIRMWARE(IWL9000_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
+const struct iwl_cfg iwl9560_2ac_cfg_soc = {
+ .name = "Intel(R) Dual Band Wireless AC 9560",
+ .fw_name_pre = IWL9000A_FW_PRE,
+ .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE,
+ .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE,
+ IWL_DEVICE_9000,
+ .ht_params = &iwl9000_ht_params,
+ .nvm_ver = IWL9000_NVM_VERSION,
+ .nvm_calib_ver = IWL9000_TX_POWER_VERSION,
+ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
+ .integrated = true,
+ .soc_latency = 5000,
+};
+MODULE_FIRMWARE(IWL9000A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
+MODULE_FIRMWARE(IWL9000B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL9000RFB_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL9260A_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL9260B_MODULE_FIRMWARE(IWL9000_UCODE_API_MAX));
#define IWL_A000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-"
#define IWL_A000_HR_MODULE_FIRMWARE(api) \
- IWL_A000_HR_FW_PRE "-" __stringify(api) ".ucode"
+ IWL_A000_HR_FW_PRE __stringify(api) ".ucode"
#define IWL_A000_JF_MODULE_FIRMWARE(api) \
- IWL_A000_JF_FW_PRE "-" __stringify(api) ".ucode"
+ IWL_A000_JF_FW_PRE __stringify(api) ".ucode"
#define IWL_A000_HR_F0_QNJ_MODULE_FIRMWARE(api) \
- IWL_A000_HR_F0_FW_PRE "-" __stringify(api) ".ucode"
+ IWL_A000_HR_F0_FW_PRE __stringify(api) ".ucode"
#define IWL_A000_JF_B0_QNJ_MODULE_FIRMWARE(api) \
- IWL_A000_JF_B0_FW_PRE "-" __stringify(api) ".ucode"
+ IWL_A000_JF_B0_FW_PRE __stringify(api) ".ucode"
#define IWL_A000_HR_A0_QNJ_MODULE_FIRMWARE(api) \
- IWL_A000_HR_A0_FW_PRE "-" __stringify(api) ".ucode"
+ IWL_A000_HR_A0_FW_PRE __stringify(api) ".ucode"
#define NVM_HW_SECTION_NUM_FAMILY_A000 10
* was received. We need to ensure we receive the statistics in order
* to update the temperature used for calibrating the TXPOWER.
*/
-static void iwl_bg_statistics_periodic(unsigned long data)
+static void iwl_bg_statistics_periodic(struct timer_list *t)
{
- struct iwl_priv *priv = (struct iwl_priv *)data;
+ struct iwl_priv *priv = from_timer(priv, t, statistics_periodic);
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
* this function is to perform continuous uCode event logging operation
* if enabled
*/
-static void iwl_bg_ucode_trace(unsigned long data)
+static void iwl_bg_ucode_trace(struct timer_list *t)
{
- struct iwl_priv *priv = (struct iwl_priv *)data;
+ struct iwl_priv *priv = from_timer(priv, t, ucode_trace);
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
return;
if (priv->lib->bt_params)
iwlagn_bt_setup_deferred_work(priv);
- setup_timer(&priv->statistics_periodic, iwl_bg_statistics_periodic,
- (unsigned long)priv);
+ timer_setup(&priv->statistics_periodic, iwl_bg_statistics_periodic, 0);
- setup_timer(&priv->ucode_trace, iwl_bg_ucode_trace,
- (unsigned long)priv);
+ timer_setup(&priv->ucode_trace, iwl_bg_ucode_trace, 0);
}
void iwl_cancel_deferred_work(struct iwl_priv *priv)
* without doing anything, driver should continue the 5 seconds timer
* to wake up uCode for temperature check until temperature drop below CT
*/
-static void iwl_tt_check_exit_ct_kill(unsigned long data)
+static void iwl_tt_check_exit_ct_kill(struct timer_list *t)
{
- struct iwl_priv *priv = (struct iwl_priv *)data;
+ struct iwl_priv *priv = from_timer(priv, t,
+ thermal_throttle.ct_kill_exit_tm);
struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
unsigned long flags;
}
}
-static void iwl_tt_ready_for_ct_kill(unsigned long data)
+static void iwl_tt_ready_for_ct_kill(struct timer_list *t)
{
- struct iwl_priv *priv = (struct iwl_priv *)data;
+ struct iwl_priv *priv = from_timer(priv, t,
+ thermal_throttle.ct_kill_waiting_tm);
struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
if (test_bit(STATUS_EXIT_PENDING, &priv->status))
memset(tt, 0, sizeof(struct iwl_tt_mgmt));
tt->state = IWL_TI_0;
- setup_timer(&priv->thermal_throttle.ct_kill_exit_tm,
- iwl_tt_check_exit_ct_kill, (unsigned long)priv);
- setup_timer(&priv->thermal_throttle.ct_kill_waiting_tm,
- iwl_tt_ready_for_ct_kill, (unsigned long)priv);
+ timer_setup(&priv->thermal_throttle.ct_kill_exit_tm,
+ iwl_tt_check_exit_ct_kill, 0);
+ timer_setup(&priv->thermal_throttle.ct_kill_waiting_tm,
+ iwl_tt_ready_for_ct_kill, 0);
/* setup deferred ct kill work */
INIT_WORK(&priv->tt_work, iwl_bg_tt_work);
INIT_WORK(&priv->ct_enter, iwl_bg_ct_enter);
} __packed; /* SCAN_CONFIG_DB_CMD_API_S */
#define SCAN_TWO_LMACS 2
+#define SCAN_LB_LMAC_IDX 0
+#define SCAN_HB_LMAC_IDX 1
struct iwl_scan_config {
__le32 flags;
IWL_UMAC_SCAN_GEN_FLAGS_MATCH = BIT(9),
IWL_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL = BIT(10),
IWL_UMAC_SCAN_GEN_FLAGS_LMAC2_FRAGMENTED = BIT(11),
+ IWL_UMAC_SCAN_GEN_FLAGS_ADAPTIVE_DWELL = BIT(13),
};
/**
* @uid: scan id, &enum iwl_umac_scan_uid_offsets
* @ooc_priority: out of channel priority - &enum iwl_scan_priority
* @general_flags: &enum iwl_umac_scan_general_flags
- * @reserved2: for future use and alignment
* @scan_start_mac_id: report the scan start TSF time according to this mac TSF
* @extended_dwell: dwell time for channels 1, 6 and 11
* @active_dwell: dwell time for active scan
* @passive_dwell: dwell time for passive scan
* @fragmented_dwell: dwell time for fragmented passive scan
+ * @adwell_default_n_aps: for adaptive dwell the default number of APs
+ * per channel
+ * @adwell_default_n_aps_social: for adaptive dwell the default
+ * number of APs per social (1,6,11) channel
+ * @adwell_max_budget: for adaptive dwell the maximal budget of TU to be added
+ * to total scan time
* @max_out_time: max out of serving channel time, per LMAC - for CDB there
* are 2 LMACs
* @suspend_time: max suspend time, per LMAC - for CDB there are 2 LMACs
* @channel_flags: &enum iwl_scan_channel_flags
* @n_channels: num of channels in scan request
* @reserved: for future use and alignment
+ * @reserved2: for future use and alignment
+ * @reserved3: for future use and alignment
* @data: &struct iwl_scan_channel_cfg_umac and
* &struct iwl_scan_req_umac_tail
*/
__le32 flags;
__le32 uid;
__le32 ooc_priority;
- /* SCAN_GENERAL_PARAMS_API_S_VER_4 */
__le16 general_flags;
- u8 reserved2;
+ u8 reserved;
u8 scan_start_mac_id;
- u8 extended_dwell;
- u8 active_dwell;
- u8 passive_dwell;
- u8 fragmented_dwell;
union {
struct {
+ u8 extended_dwell;
+ u8 active_dwell;
+ u8 passive_dwell;
+ u8 fragmented_dwell;
__le32 max_out_time;
__le32 suspend_time;
__le32 scan_priority;
- /* SCAN_CHANNEL_PARAMS_API_S_VER_4 */
+ /* SCAN_CHANNEL_PARAMS_API_S_VER_1 */
u8 channel_flags;
u8 n_channels;
- __le16 reserved;
+ __le16 reserved2;
u8 data[];
} v1; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */
struct {
+ u8 extended_dwell;
+ u8 active_dwell;
+ u8 passive_dwell;
+ u8 fragmented_dwell;
__le32 max_out_time[SCAN_TWO_LMACS];
__le32 suspend_time[SCAN_TWO_LMACS];
__le32 scan_priority;
- /* SCAN_CHANNEL_PARAMS_API_S_VER_4 */
+ /* SCAN_CHANNEL_PARAMS_API_S_VER_1 */
u8 channel_flags;
u8 n_channels;
- __le16 reserved;
+ __le16 reserved2;
u8 data[];
} v6; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_6 */
+ struct {
+ u8 active_dwell;
+ u8 passive_dwell;
+ u8 fragmented_dwell;
+ u8 adwell_default_n_aps;
+ u8 adwell_default_n_aps_social;
+ u8 reserved3;
+ __le16 adwell_max_budget;
+ __le32 max_out_time[SCAN_TWO_LMACS];
+ __le32 suspend_time[SCAN_TWO_LMACS];
+ __le32 scan_priority;
+ /* SCAN_CHANNEL_PARAMS_API_S_VER_1 */
+ u8 channel_flags;
+ u8 n_channels;
+ __le16 reserved2;
+ u8 data[];
+ } v7; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_7 */
};
} __packed;
-#define IWL_SCAN_REQ_UMAC_SIZE sizeof(struct iwl_scan_req_umac)
+#define IWL_SCAN_REQ_UMAC_SIZE_V7 sizeof(struct iwl_scan_req_umac)
+#define IWL_SCAN_REQ_UMAC_SIZE_V6 (sizeof(struct iwl_scan_req_umac) - \
+ 2 * sizeof(u8) - sizeof(__le16))
#define IWL_SCAN_REQ_UMAC_SIZE_V1 (sizeof(struct iwl_scan_req_umac) - \
- 2 * sizeof(__le32))
+ 2 * sizeof(__le32) - 2 * sizeof(u8) - \
+ sizeof(__le16))
/**
* struct iwl_umac_scan_abort
IWL_UCODE_TLV_API_STA_TYPE = (__force iwl_ucode_tlv_api_t)30,
IWL_UCODE_TLV_API_NAN2_VER2 = (__force iwl_ucode_tlv_api_t)31,
/* API Set 1 */
+ IWL_UCODE_TLV_API_ADAPTIVE_DWELL = (__force iwl_ucode_tlv_api_t)32,
IWL_UCODE_TLV_API_NEW_BEACON_TEMPLATE = (__force iwl_ucode_tlv_api_t)34,
IWL_UCODE_TLV_API_NEW_RX_STATS = (__force iwl_ucode_tlv_api_t)35,
IWL_UCODE_TLV_API_COEX_ATS_EXTERNAL = (__force iwl_ucode_tlv_api_t)37,
u32 dccm2_len;
u32 smem_offset;
u32 smem_len;
+ u32 soc_latency;
u16 nvm_ver;
u16 nvm_calib_ver;
u16 rx_with_siso_diversity:1,
extern const struct iwl_cfg iwl9270_2ac_cfg;
extern const struct iwl_cfg iwl9460_2ac_cfg;
extern const struct iwl_cfg iwl9560_2ac_cfg;
+extern const struct iwl_cfg iwl9460_2ac_cfg_soc;
+extern const struct iwl_cfg iwl9461_2ac_cfg_soc;
+extern const struct iwl_cfg iwl9462_2ac_cfg_soc;
+extern const struct iwl_cfg iwl9560_2ac_cfg_soc;
extern const struct iwl_cfg iwla000_2ac_cfg_hr;
extern const struct iwl_cfg iwla000_2ac_cfg_hr_cdb;
extern const struct iwl_cfg iwla000_2ac_cfg_jf;
IWL_UCODE_TLV_CAPA_D0I3_SUPPORT);
}
+static inline bool iwl_mvm_is_adaptive_dwell_supported(struct iwl_mvm *mvm)
+{
+ return fw_has_api(&mvm->fw->ucode_capa,
+ IWL_UCODE_TLV_API_ADAPTIVE_DWELL);
+}
+
static inline bool iwl_mvm_enter_d0i3_on_suspend(struct iwl_mvm *mvm)
{
/* For now we only use this mode to differentiate between
u32 measurement_dwell;
};
+static inline void *iwl_mvm_get_scan_req_umac_data(struct iwl_mvm *mvm)
+{
+ struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
+
+ if (iwl_mvm_is_adaptive_dwell_supported(mvm))
+ return (void *)&cmd->v7.data;
+
+ if (iwl_mvm_has_new_tx_api(mvm))
+ return (void *)&cmd->v6.data;
+
+ return (void *)&cmd->v1.data;
+}
+
static u8 iwl_mvm_scan_rx_ant(struct iwl_mvm *mvm)
{
if (mvm->scan_rx_ant != ANT_NONE)
{
struct iwl_mvm_scan_timing_params *timing = &scan_timing[params->type];
+ if (iwl_mvm_is_regular_scan(params))
+ cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
+ else
+ cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_2);
+
+ if (iwl_mvm_is_adaptive_dwell_supported(mvm)) {
+ if (params->measurement_dwell) {
+ cmd->v7.active_dwell = params->measurement_dwell;
+ cmd->v7.passive_dwell = params->measurement_dwell;
+ } else {
+ cmd->v7.active_dwell = IWL_SCAN_DWELL_ACTIVE;
+ cmd->v7.passive_dwell = IWL_SCAN_DWELL_PASSIVE;
+ }
+ cmd->v7.fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED;
+
+ cmd->v7.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
+ cmd->v7.max_out_time[SCAN_LB_LMAC_IDX] =
+ cpu_to_le32(timing->max_out_time);
+ cmd->v7.suspend_time[SCAN_LB_LMAC_IDX] =
+ cpu_to_le32(timing->suspend_time);
+ if (iwl_mvm_is_cdb_supported(mvm)) {
+ cmd->v7.max_out_time[SCAN_HB_LMAC_IDX] =
+ cpu_to_le32(timing->max_out_time);
+ cmd->v7.suspend_time[SCAN_HB_LMAC_IDX] =
+ cpu_to_le32(timing->suspend_time);
+ }
+
+ return;
+ }
+
if (params->measurement_dwell) {
- cmd->active_dwell = params->measurement_dwell;
- cmd->passive_dwell = params->measurement_dwell;
- cmd->extended_dwell = params->measurement_dwell;
+ cmd->v1.active_dwell = params->measurement_dwell;
+ cmd->v1.passive_dwell = params->measurement_dwell;
+ cmd->v1.extended_dwell = params->measurement_dwell;
} else {
- cmd->active_dwell = IWL_SCAN_DWELL_ACTIVE;
- cmd->passive_dwell = IWL_SCAN_DWELL_PASSIVE;
- cmd->extended_dwell = IWL_SCAN_DWELL_EXTENDED;
+ cmd->v1.active_dwell = IWL_SCAN_DWELL_ACTIVE;
+ cmd->v1.passive_dwell = IWL_SCAN_DWELL_PASSIVE;
+ cmd->v1.extended_dwell = IWL_SCAN_DWELL_EXTENDED;
}
- cmd->fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED;
+ cmd->v1.fragmented_dwell = IWL_SCAN_DWELL_FRAGMENTED;
if (iwl_mvm_has_new_tx_api(mvm)) {
cmd->v6.scan_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
- cmd->v6.max_out_time[0] = cpu_to_le32(timing->max_out_time);
- cmd->v6.suspend_time[0] = cpu_to_le32(timing->suspend_time);
+ cmd->v6.max_out_time[SCAN_LB_LMAC_IDX] =
+ cpu_to_le32(timing->max_out_time);
+ cmd->v6.suspend_time[SCAN_LB_LMAC_IDX] =
+ cpu_to_le32(timing->suspend_time);
if (iwl_mvm_is_cdb_supported(mvm)) {
- cmd->v6.max_out_time[1] =
+ cmd->v6.max_out_time[SCAN_HB_LMAC_IDX] =
cpu_to_le32(timing->max_out_time);
- cmd->v6.suspend_time[1] =
+ cmd->v6.suspend_time[SCAN_HB_LMAC_IDX] =
cpu_to_le32(timing->suspend_time);
}
} else {
cmd->v1.scan_priority =
cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
}
-
- if (iwl_mvm_is_regular_scan(params))
- cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_6);
- else
- cmd->ooc_priority = cpu_to_le32(IWL_SCAN_PRIORITY_EXT_2);
}
static void
int type)
{
struct iwl_scan_req_umac *cmd = mvm->scan_cmd;
- void *cmd_data = iwl_mvm_has_new_tx_api(mvm) ?
- (void *)&cmd->v6.data : (void *)&cmd->v1.data;
+ void *cmd_data = iwl_mvm_get_scan_req_umac_data(mvm);
struct iwl_scan_req_umac_tail *sec_part = cmd_data +
sizeof(struct iwl_scan_channel_cfg_umac) *
mvm->fw->ucode_capa.n_scan_channels;
IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
IWL_SCAN_CHANNEL_FLAG_CACHE_ADD;
- if (iwl_mvm_has_new_tx_api(mvm)) {
+ if (iwl_mvm_is_adaptive_dwell_supported(mvm)) {
+ cmd->v7.channel_flags = channel_flags;
+ cmd->v7.n_channels = params->n_channels;
+ } else if (iwl_mvm_has_new_tx_api(mvm)) {
cmd->v6.channel_flags = channel_flags;
cmd->v6.n_channels = params->n_channels;
} else {
{
int base_size = IWL_SCAN_REQ_UMAC_SIZE_V1;
- if (iwl_mvm_has_new_tx_api(mvm))
- base_size = IWL_SCAN_REQ_UMAC_SIZE;
+ if (iwl_mvm_is_adaptive_dwell_supported(mvm))
+ base_size = IWL_SCAN_REQ_UMAC_SIZE_V7;
+ else if (iwl_mvm_has_new_tx_api(mvm))
+ base_size = IWL_SCAN_REQ_UMAC_SIZE_V6;
if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN))
return base_size +
{IWL_PCI_DEVICE(0x2526, 0x0238, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x023C, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0260, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x0264, iwl9461_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x02A0, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x02A4, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x1010, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x1030, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x1210, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x1410, iwl9270_2ac_cfg)},
- {IWL_PCI_DEVICE(0x2526, 0x1420, iwl9460_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x1420, iwl9460_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x2526, 0x1610, iwl9270_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x4010, iwl9260_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x4030, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x40A4, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0xA014, iwl9260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2526, 0x42A4, iwl9462_2ac_cfg_soc)},
{IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)},
{IWL_PCI_DEVICE(0x271B, 0x0014, iwl9160_2ac_cfg)},
{IWL_PCI_DEVICE(0x271B, 0x0210, iwl9160_2ac_cfg)},
- {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x31DC, 0x003C, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0034, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0038, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x003C, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0xA370, 0x0034, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0xA370, 0x0038, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0xA370, 0x003C, iwl9560_2ac_cfg)},
- {IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg)},
- {IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x271B, 0x0214, iwl9260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x0034, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x0038, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x003C, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x0060, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2720, 0x0064, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2720, 0x00A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2720, 0x00A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2720, 0x0230, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x0234, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x0238, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x023C, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x0260, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2720, 0x0264, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2720, 0x02A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2720, 0x02A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x2720, 0x4030, iwl9560_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x2720, 0x40A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x0060, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x0064, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x00A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x00A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x0260, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x0264, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x02A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x30DC, 0x02A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0038, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x003C, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0064, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x00A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x00A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0230, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0234, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0238, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x023C, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0260, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x0264, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x02A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x02A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x4030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x4034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x31DC, 0x40A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x34F0, 0x0034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x34F0, 0x02A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0000, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0010, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0038, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x003C, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0064, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x00A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x00A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0210, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0230, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0234, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0238, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x023C, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0260, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0264, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x02A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x02A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0310, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0410, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0510, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0610, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0710, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x0A10, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x2010, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x2A10, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x4030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x4034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0x9DF0, 0x40A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x0034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x0038, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x003C, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x0064, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x00A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x00A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x0230, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x0234, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x0238, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x023C, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x0260, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x0264, iwl9461_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x02A0, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x02A4, iwl9462_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x1030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x4030, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x4034, iwl9560_2ac_cfg_soc)},
+ {IWL_PCI_DEVICE(0xA370, 0x40A4, iwl9462_2ac_cfg_soc)},
/* a000 Series */
{IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg_hr_cdb)},
memset(ptr, 0, sizeof(*ptr));
}
-static void iwl_pcie_txq_stuck_timer(unsigned long data)
+static void iwl_pcie_txq_stuck_timer(struct timer_list *t)
{
- struct iwl_txq *txq = (void *)data;
+ struct iwl_txq *txq = from_timer(txq, t, stuck_timer);
struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
if (WARN_ON(txq->entries || txq->tfds))
return -EINVAL;
- setup_timer(&txq->stuck_timer, iwl_pcie_txq_stuck_timer,
- (unsigned long)txq);
+ timer_setup(&txq->stuck_timer, iwl_pcie_txq_stuck_timer, 0);
txq->trans_pcie = trans_pcie;
txq->n_window = slots_num;
#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
-static void ap_handle_timer(unsigned long data)
+static void ap_handle_timer(struct timer_list *t)
{
- struct sta_info *sta = (struct sta_info *) data;
+ struct sta_info *sta = from_timer(sta, t, timer);
local_info_t *local;
struct ap_data *ap;
unsigned long next_time = 0;
}
#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
- init_timer(&sta->timer);
+ timer_setup(&sta->timer, ap_handle_timer, 0);
sta->timer.expires = jiffies + ap->max_inactivity;
- sta->timer.data = (unsigned long) sta;
- sta->timer.function = ap_handle_timer;
if (!ap->local->hostapd)
add_timer(&sta->timer);
#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
}
-static void hostap_passive_scan(unsigned long data)
+static void hostap_passive_scan(struct timer_list *t)
{
- local_info_t *local = (local_info_t *) data;
+ local_info_t *local = from_timer(local, t, passive_scan_timer);
struct net_device *dev = local->dev;
u16 chan;
* used to monitor that local->last_tick_timer is being updated. If not,
* interrupt busy-loop is assumed and driver tries to recover by masking out
* some events. */
-static void hostap_tick_timer(unsigned long data)
+static void hostap_tick_timer(struct timer_list *t)
{
static unsigned long last_inquire = 0;
- local_info_t *local = (local_info_t *) data;
+ local_info_t *local = from_timer(local, t, tick_timer);
local->last_tick_timer = jiffies;
/* Inquire CommTallies every 10 seconds to keep the statistics updated
lib80211_crypt_info_init(&local->crypt_info, dev->name, &local->lock);
- init_timer(&local->passive_scan_timer);
- local->passive_scan_timer.data = (unsigned long) local;
- local->passive_scan_timer.function = hostap_passive_scan;
-
- init_timer(&local->tick_timer);
- local->tick_timer.data = (unsigned long) local;
- local->tick_timer.function = hostap_tick_timer;
+ timer_setup(&local->passive_scan_timer, hostap_passive_scan, 0);
+ timer_setup(&local->tick_timer, hostap_tick_timer, 0);
local->tick_timer.expires = jiffies + 2 * HZ;
add_timer(&local->tick_timer);
mod_timer(timer, expire);
}
-static void ezusb_request_timerfn(u_long _ctx)
+static void ezusb_request_timerfn(struct timer_list *t)
{
- struct request_context *ctx = (void *) _ctx;
+ struct request_context *ctx = from_timer(ctx, t, timer);
ctx->outurb->transfer_flags |= URB_ASYNC_UNLINK;
if (usb_unlink_urb(ctx->outurb) == -EINPROGRESS) {
refcount_set(&ctx->refcount, 1);
init_completion(&ctx->done);
- setup_timer(&ctx->timer, ezusb_request_timerfn, (u_long)ctx);
+ timer_setup(&ctx->timer, ezusb_request_timerfn, 0);
return ctx;
}
return -EFAULT;
}
- mac->scan_timeout.function = (TIMER_FUNC_TYPE)qtnf_scan_timeout;
+ mac->scan_timeout.function = qtnf_scan_timeout;
mod_timer(&mac->scan_timeout,
jiffies + QTNF_SCAN_TIMEOUT_SEC * HZ);
mac->iflist[i].vifid = i;
qtnf_sta_list_init(&mac->iflist[i].sta_list);
mutex_init(&mac->mac_lock);
- setup_timer(&mac->scan_timeout, NULL, 0);
+ timer_setup(&mac->scan_timeout, NULL, 0);
}
qtnf_mac_init_primary_intf(mac);
local->card_status = CARD_DL_PARAM;
/* Start kernel timer to wait for dl startup to complete. */
local->timer.expires = jiffies + HZ / 2;
- local->timer.function = (TIMER_FUNC_TYPE)verify_dl_startup;
+ local->timer.function = verify_dl_startup;
add_timer(&local->timer);
dev_dbg(&link->dev,
"ray_cs dl_startup_params started timer for verify_dl_startup\n");
dev_dbg(&link->dev,
"ray_cs interrupt network \"%s\" start failed\n",
memtmp);
- local->timer.function = (TIMER_FUNC_TYPE)start_net;
+ local->timer.function = start_net;
} else {
dev_dbg(&link->dev,
"ray_cs interrupt network \"%s\" join failed\n",
memtmp);
- local->timer.function = (TIMER_FUNC_TYPE)join_net;
+ local->timer.function = join_net;
}
add_timer(&local->timer);
}
del_timer(&local->timer);
if (build_auth_frame(local, local->bss_id, OPEN_AUTH_REQUEST)) {
- local->timer.function = (TIMER_FUNC_TYPE)join_net;
+ local->timer.function = join_net;
} else {
- local->timer.function = (TIMER_FUNC_TYPE)authenticate_timeout;
+ local->timer.function = authenticate_timeout;
}
local->timer.expires = jiffies + HZ * 2;
add_timer(&local->timer);
del_timer(&local->timer);
local->timer.expires = jiffies + HZ * 2;
- local->timer.function = (TIMER_FUNC_TYPE)join_net;
+ local->timer.function = join_net;
add_timer(&local->timer);
local->card_status = CARD_ASSOC_FAILED;
return;
priv->bss_loss_state++;
- skb = ieee80211_nullfunc_get(priv->hw, priv->vif);
+ skb = ieee80211_nullfunc_get(priv->hw, priv->vif, false);
WARN_ON(!skb);
if (skb)
cw1200_tx(priv->hw, NULL, skb);
.rate = 0xFF,
};
- frame.skb = ieee80211_nullfunc_get(priv->hw, priv->vif);
+ frame.skb = ieee80211_nullfunc_get(priv->hw, priv->vif, false);
if (!frame.skb)
return -ENOMEM;
size = sizeof(struct wl12xx_null_data_template);
ptr = NULL;
} else {
- skb = ieee80211_nullfunc_get(wl->hw, wl->vif);
+ skb = ieee80211_nullfunc_get(wl->hw, wl->vif, false);
if (!skb)
goto out;
size = skb->len;
ptr = NULL;
} else {
skb = ieee80211_nullfunc_get(wl->hw,
- wl12xx_wlvif_to_vif(wlvif));
+ wl12xx_wlvif_to_vif(wlvif),
+ false);
if (!skb)
goto out;
size = skb->len;
struct sk_buff *skb = NULL;
int ret = -ENOMEM;
- skb = ieee80211_nullfunc_get(wl->hw, vif);
+ skb = ieee80211_nullfunc_get(wl->hw, vif, false);
if (!skb)
goto out;
mutex_unlock(&wl->mutex);
}
-static void wl1271_rx_streaming_timer(unsigned long data)
+static void wl1271_rx_streaming_timer(struct timer_list *t)
{
- struct wl12xx_vif *wlvif = (struct wl12xx_vif *)data;
+ struct wl12xx_vif *wlvif = from_timer(wlvif, t, rx_streaming_timer);
struct wl1271 *wl = wlvif->wl;
ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
}
wlcore_pending_auth_complete_work);
INIT_LIST_HEAD(&wlvif->list);
- setup_timer(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer,
- (unsigned long) wlvif);
+ timer_setup(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer, 0);
return 0;
}
/* IRQ name is queue name with "-tx" or "-rx" appended */
#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
+static DECLARE_WAIT_QUEUE_HEAD(module_unload_q);
+
struct netfront_stats {
u64 packets;
u64 bytes;
}
-static void rx_refill_timeout(unsigned long data)
+static void rx_refill_timeout(struct timer_list *t)
{
- struct netfront_queue *queue = (struct netfront_queue *)data;
+ struct netfront_queue *queue = from_timer(queue, t, rx_refill_timer);
napi_schedule(&queue->napi);
}
spin_lock_init(&queue->tx_lock);
spin_lock_init(&queue->rx_lock);
- setup_timer(&queue->rx_refill_timer, rx_refill_timeout,
- (unsigned long)queue);
+ timer_setup(&queue->rx_refill_timer, rx_refill_timeout, 0);
snprintf(queue->name, sizeof(queue->name), "%s-q%u",
queue->info->netdev->name, queue->id);
break;
case XenbusStateClosed:
+ wake_up_all(&module_unload_q);
if (dev->state == XenbusStateClosed)
break;
/* Missed the backend's CLOSING state -- fallthrough */
case XenbusStateClosing:
+ wake_up_all(&module_unload_q);
xenbus_frontend_closed(dev);
break;
}
dev_dbg(&dev->dev, "%s\n", dev->nodename);
+ if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) {
+ xenbus_switch_state(dev, XenbusStateClosing);
+ wait_event(module_unload_q,
+ xenbus_read_driver_state(dev->otherend) ==
+ XenbusStateClosing);
+
+ xenbus_switch_state(dev, XenbusStateClosed);
+ wait_event(module_unload_q,
+ xenbus_read_driver_state(dev->otherend) ==
+ XenbusStateClosed ||
+ xenbus_read_driver_state(dev->otherend) ==
+ XenbusStateUnknown);
+ }
+
xennet_disconnect_backend(info);
unregister_netdev(info->netdev);
nfc_fw_download_done(priv->ndev->nfc_dev, priv->fw_dnld.name, error);
}
-static void fw_dnld_timeout(unsigned long arg)
+static void fw_dnld_timeout(struct timer_list *t)
{
- struct nfcmrvl_private *priv = (struct nfcmrvl_private *) arg;
+ struct nfcmrvl_private *priv = from_timer(priv, t, fw_dnld.timer);
nfc_err(priv->dev, "FW loading timeout");
priv->fw_dnld.state = STATE_RESET;
}
/* Configure a timer for timeout */
- setup_timer(&priv->fw_dnld.timer, fw_dnld_timeout,
- (unsigned long) priv);
+ timer_setup(&priv->fw_dnld.timer, fw_dnld_timeout, 0);
mod_timer(&priv->fw_dnld.timer,
jiffies + msecs_to_jiffies(FW_DNLD_TIMEOUT));
return 0;
}
-static void pn533_listen_mode_timer(unsigned long data)
+static void pn533_listen_mode_timer(struct timer_list *t)
{
- struct pn533 *dev = (struct pn533 *)data;
+ struct pn533 *dev = from_timer(dev, t, listen_timer);
dev_dbg(dev->dev, "Listen mode timeout\n");
if (priv->wq == NULL)
goto error;
- init_timer(&priv->listen_timer);
- priv->listen_timer.data = (unsigned long) priv;
- priv->listen_timer.function = pn533_listen_mode_timer;
+ timer_setup(&priv->listen_timer, pn533_listen_mode_timer, 0);
skb_queue_head_init(&priv->resp_q);
skb_queue_head_init(&priv->fragment_skb);
}
EXPORT_SYMBOL(ndlc_recv);
-static void ndlc_t1_timeout(unsigned long data)
+static void ndlc_t1_timeout(struct timer_list *t)
{
- struct llt_ndlc *ndlc = (struct llt_ndlc *)data;
+ struct llt_ndlc *ndlc = from_timer(ndlc, t, t1_timer);
pr_debug("\n");
schedule_work(&ndlc->sm_work);
}
-static void ndlc_t2_timeout(unsigned long data)
+static void ndlc_t2_timeout(struct timer_list *t)
{
- struct llt_ndlc *ndlc = (struct llt_ndlc *)data;
+ struct llt_ndlc *ndlc = from_timer(ndlc, t, t2_timer);
pr_debug("\n");
*ndlc_id = ndlc;
/* initialize timers */
- init_timer(&ndlc->t1_timer);
- ndlc->t1_timer.data = (unsigned long)ndlc;
- ndlc->t1_timer.function = ndlc_t1_timeout;
-
- init_timer(&ndlc->t2_timer);
- ndlc->t2_timer.data = (unsigned long)ndlc;
- ndlc->t2_timer.function = ndlc_t2_timeout;
+ timer_setup(&ndlc->t1_timer, ndlc_t1_timeout, 0);
+ timer_setup(&ndlc->t2_timer, ndlc_t2_timeout, 0);
skb_queue_head_init(&ndlc->rcv_q);
skb_queue_head_init(&ndlc->send_q);
}
EXPORT_SYMBOL(st_nci_se_io);
-static void st_nci_se_wt_timeout(unsigned long data)
+static void st_nci_se_wt_timeout(struct timer_list *t)
{
/*
* No answer from the secure element
*/
/* hardware reset managed through VCC_UICC_OUT power supply */
u8 param = 0x01;
- struct st_nci_info *info = (struct st_nci_info *) data;
+ struct st_nci_info *info = from_timer(info, t, se_info.bwi_timer);
pr_debug("\n");
info->se_info.cb(info->se_info.cb_context, NULL, 0, -ETIME);
}
-static void st_nci_se_activation_timeout(unsigned long data)
+static void st_nci_se_activation_timeout(struct timer_list *t)
{
- struct st_nci_info *info = (struct st_nci_info *) data;
+ struct st_nci_info *info = from_timer(info, t,
+ se_info.se_active_timer);
pr_debug("\n");
init_completion(&info->se_info.req_completion);
/* initialize timers */
- init_timer(&info->se_info.bwi_timer);
- info->se_info.bwi_timer.data = (unsigned long)info;
- info->se_info.bwi_timer.function = st_nci_se_wt_timeout;
+ timer_setup(&info->se_info.bwi_timer, st_nci_se_wt_timeout, 0);
info->se_info.bwi_active = false;
- init_timer(&info->se_info.se_active_timer);
- info->se_info.se_active_timer.data = (unsigned long)info;
- info->se_info.se_active_timer.function =
- st_nci_se_activation_timeout;
+ timer_setup(&info->se_info.se_active_timer,
+ st_nci_se_activation_timeout, 0);
info->se_info.se_active = false;
info->se_info.xch_error = false;
}
EXPORT_SYMBOL(st21nfca_hci_se_io);
-static void st21nfca_se_wt_timeout(unsigned long data)
+static void st21nfca_se_wt_timeout(struct timer_list *t)
{
/*
* No answer from the secure element
*/
/* hardware reset managed through VCC_UICC_OUT power supply */
u8 param = 0x01;
- struct st21nfca_hci_info *info = (struct st21nfca_hci_info *) data;
+ struct st21nfca_hci_info *info = from_timer(info, t,
+ se_info.bwi_timer);
pr_debug("\n");
info->se_info.cb(info->se_info.cb_context, NULL, 0, -ETIME);
}
-static void st21nfca_se_activation_timeout(unsigned long data)
+static void st21nfca_se_activation_timeout(struct timer_list *t)
{
- struct st21nfca_hci_info *info = (struct st21nfca_hci_info *) data;
+ struct st21nfca_hci_info *info = from_timer(info, t,
+ se_info.se_active_timer);
pr_debug("\n");
init_completion(&info->se_info.req_completion);
/* initialize timers */
- init_timer(&info->se_info.bwi_timer);
- info->se_info.bwi_timer.data = (unsigned long)info;
- info->se_info.bwi_timer.function = st21nfca_se_wt_timeout;
+ timer_setup(&info->se_info.bwi_timer, st21nfca_se_wt_timeout, 0);
info->se_info.bwi_active = false;
- init_timer(&info->se_info.se_active_timer);
- info->se_info.se_active_timer.data = (unsigned long)info;
- info->se_info.se_active_timer.function = st21nfca_se_activation_timeout;
+ timer_setup(&info->se_info.se_active_timer,
+ st21nfca_se_activation_timeout, 0);
info->se_info.se_active = false;
info->se_info.count_pipes = 0;
static struct dentry *pp_debugfs_dir;
-static void pp_ping(unsigned long ctx)
+static void pp_ping(struct timer_list *t)
{
- struct pp_ctx *pp = (void *)ctx;
+ struct pp_ctx *pp = from_timer(pp, t, db_timer);
unsigned long irqflags;
u64 db_bits, db_mask;
u32 spad_rd, spad_wr;
if (ntb_link_is_up(pp->ntb, NULL, NULL) == 1) {
dev_dbg(&pp->ntb->dev, "link is up\n");
- pp_ping((unsigned long)pp);
+ pp_ping(&pp->db_timer);
} else {
dev_dbg(&pp->ntb->dev, "link is down\n");
del_timer(&pp->db_timer);
pp->db_bits = 0;
atomic_set(&pp->count, 0);
spin_lock_init(&pp->db_lock);
- setup_timer(&pp->db_timer, pp_ping, (unsigned long)pp);
+ timer_setup(&pp->db_timer, pp_ping, 0);
pp->db_delay = msecs_to_jiffies(delay_ms);
rc = ntb_set_ctx(ntb, pp, &pp_ops);
int srcu_idx, ret;
u8 data[16] = { 0, };
+ ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx);
+ if (unlikely(!ns))
+ return -EWOULDBLOCK;
+
put_unaligned_le64(key, &data[0]);
put_unaligned_le64(sa_key, &data[8]);
memset(&c, 0, sizeof(c));
c.common.opcode = op;
- c.common.nsid = cpu_to_le32(head->ns_id);
+ c.common.nsid = cpu_to_le32(ns->head->ns_id);
c.common.cdw10[0] = cpu_to_le32(cdw10);
- ns = nvme_get_ns_from_disk(bdev->bd_disk, &head, &srcu_idx);
- if (unlikely(!ns))
- ret = -EWOULDBLOCK;
- else
- ret = nvme_submit_sync_cmd(ns->queue, &c, data, 16);
+ ret = nvme_submit_sync_cmd(ns->queue, &c, data, 16);
nvme_put_ns_from_disk(head, srcu_idx);
return ret;
}
static void nvme_ns_remove(struct nvme_ns *ns)
{
- struct nvme_ns_head *head = ns->head;
-
if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags))
return;
mutex_lock(&ns->ctrl->subsys->lock);
nvme_mpath_clear_current_path(ns);
- if (head)
- list_del_rcu(&ns->siblings);
+ list_del_rcu(&ns->siblings);
mutex_unlock(&ns->ctrl->subsys->lock);
mutex_lock(&ns->ctrl->namespaces_mutex);
list_del_init(&ns->list);
mutex_unlock(&ns->ctrl->namespaces_mutex);
- synchronize_srcu(&head->srcu);
+ synchronize_srcu(&ns->head->srcu);
nvme_put_ns(ns);
}
int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size);
bool nvmf_should_reconnect(struct nvme_ctrl *ctrl);
+static inline blk_status_t nvmf_check_init_req(struct nvme_ctrl *ctrl,
+ struct request *rq)
+{
+ struct nvme_command *cmd = nvme_req(rq)->cmd;
+
+ /*
+ * We cannot accept any other command until the connect command has
+ * completed, so only allow connect to pass.
+ */
+ if (!blk_rq_is_passthrough(rq) ||
+ cmd->common.opcode != nvme_fabrics_command ||
+ cmd->fabrics.fctype != nvme_fabrics_type_connect) {
+ /*
+ * Reconnecting state means transport disruption, which can take
+ * a long time and even might fail permanently, fail fast to
+ * give upper layers a chance to failover.
+ * Deleting state means that the ctrl will never accept commands
+ * again, fail it permanently.
+ */
+ if (ctrl->state == NVME_CTRL_RECONNECTING ||
+ ctrl->state == NVME_CTRL_DELETING) {
+ nvme_req(rq)->status = NVME_SC_ABORT_REQ;
+ return BLK_STS_IOERR;
+ }
+ return BLK_STS_RESOURCE; /* try again later */
+ }
+
+ return BLK_STS_OK;
+}
+
#endif /* _NVME_FABRICS_H */
enum nvme_fc_queue_flags {
- NVME_FC_Q_CONNECTED = (1 << 0),
+ NVME_FC_Q_CONNECTED = 0,
+ NVME_FC_Q_LIVE,
};
#define NVMEFC_QUEUE_DELAY 3 /* ms units */
if (!test_and_clear_bit(NVME_FC_Q_CONNECTED, &queue->flags))
return;
+ clear_bit(NVME_FC_Q_LIVE, &queue->flags);
/*
* Current implementation never disconnects a single queue.
* It always terminates a whole association. So there is never
*/
queue->connection_id = 0;
- clear_bit(NVME_FC_Q_CONNECTED, &queue->flags);
}
static void
ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
if (ret)
break;
+
+ set_bit(NVME_FC_Q_LIVE, &ctrl->queues[i].flags);
}
return ret;
return BLK_STS_RESOURCE;
}
+static inline blk_status_t nvme_fc_is_ready(struct nvme_fc_queue *queue,
+ struct request *rq)
+{
+ if (unlikely(!test_bit(NVME_FC_Q_LIVE, &queue->flags)))
+ return nvmf_check_init_req(&queue->ctrl->ctrl, rq);
+ return BLK_STS_OK;
+}
+
static blk_status_t
nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
u32 data_len;
blk_status_t ret;
+ ret = nvme_fc_is_ready(queue, rq);
+ if (unlikely(ret))
+ return ret;
+
ret = nvme_setup_cmd(ns, rq, sqe);
if (ret)
return ret;
if (ret)
goto out_disconnect_admin_queue;
+ set_bit(NVME_FC_Q_LIVE, &ctrl->queues[0].flags);
+
/*
* Check controller capabilities
*
bio->bi_opf |= REQ_NVME_MPATH;
ret = direct_make_request(bio);
} else if (!list_empty_careful(&head->list)) {
- dev_warn_ratelimited(dev, "no path available - requeing I/O\n");
+ dev_warn_ratelimited(dev, "no path available - requeuing I/O\n");
spin_lock_irq(&head->requeue_lock);
bio_list_add(&head->requeue_list, bio);
* NVME_QUIRK_DELAY_BEFORE_CHK_RDY quirk enabled. The value (in ms) was
* found empirically.
*/
-#define NVME_QUIRK_DELAY_AMOUNT 2000
+#define NVME_QUIRK_DELAY_AMOUNT 2300
enum nvme_ctrl_state {
NVME_CTRL_NEW,
dev->nr_host_mem_descs * sizeof(*dev->host_mem_descs),
dev->host_mem_descs, dev->host_mem_descs_dma);
dev->host_mem_descs = NULL;
+ dev->nr_host_mem_descs = 0;
}
static int __nvme_alloc_host_mem(struct nvme_dev *dev, u64 preferred,
if (!bufs)
goto out_free_descs;
- for (size = 0; size < preferred; size += len) {
+ for (size = 0; size < preferred && i < max_entries; size += len) {
dma_addr_t dma_addr;
len = min_t(u64, chunk_size, preferred - size);
return -ENODEV;
}
-static unsigned long check_dell_samsung_bug(struct pci_dev *pdev)
+static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
{
if (pdev->vendor == 0x144d && pdev->device == 0xa802) {
/*
(dmi_match(DMI_PRODUCT_NAME, "XPS 15 9550") ||
dmi_match(DMI_PRODUCT_NAME, "Precision 5510")))
return NVME_QUIRK_NO_DEEPEST_PS;
+ } else if (pdev->vendor == 0x144d && pdev->device == 0xa804) {
+ /*
+ * Samsung SSD 960 EVO drops off the PCIe bus after system
+ * suspend on a Ryzen board, ASUS PRIME B350M-A.
+ */
+ if (dmi_match(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC.") &&
+ dmi_match(DMI_BOARD_NAME, "PRIME B350M-A"))
+ return NVME_QUIRK_NO_APST;
}
return 0;
if (result)
goto unmap;
- quirks |= check_dell_samsung_bug(pdev);
+ quirks |= check_vendor_combination_bug(pdev);
result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops,
quirks);
.driver_data = NVME_QUIRK_IDENTIFY_CNS, },
{ PCI_DEVICE(0x1c58, 0x0003), /* HGST adapter */
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
+ { PCI_DEVICE(0x1c58, 0x0023), /* WDC SN200 adapter */
+ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
{ PCI_DEVICE(0x1c5f, 0x0540), /* Memblaze Pblaze4 adapter */
.driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, },
{ PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
+#include <rdma/mr_pool.h>
#include <linux/err.h>
#include <linux/string.h>
#include <linux/atomic.h>
struct nvme_request req;
struct ib_mr *mr;
struct nvme_rdma_qe sqe;
+ union nvme_result result;
+ __le16 status;
+ refcount_t ref;
struct ib_sge sge[1 + NVME_RDMA_MAX_INLINE_SEGMENTS];
u32 num_sge;
int nents;
enum nvme_rdma_queue_flags {
NVME_RDMA_Q_ALLOCATED = 0,
NVME_RDMA_Q_LIVE = 1,
+ NVME_RDMA_Q_TR_READY = 2,
};
struct nvme_rdma_queue {
struct nvme_rdma_qe *rsp_ring;
- atomic_t sig_count;
int queue_size;
size_t cmnd_capsule_len;
struct nvme_rdma_ctrl *ctrl;
return ret;
}
-static int nvme_rdma_reinit_request(void *data, struct request *rq)
-{
- struct nvme_rdma_ctrl *ctrl = data;
- struct nvme_rdma_device *dev = ctrl->device;
- struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
- int ret = 0;
-
- if (WARN_ON_ONCE(!req->mr))
- return 0;
-
- ib_dereg_mr(req->mr);
-
- req->mr = ib_alloc_mr(dev->pd, IB_MR_TYPE_MEM_REG,
- ctrl->max_fr_pages);
- if (IS_ERR(req->mr)) {
- ret = PTR_ERR(req->mr);
- req->mr = NULL;
- goto out;
- }
-
- req->mr->need_inval = false;
-
-out:
- return ret;
-}
-
static void nvme_rdma_exit_request(struct blk_mq_tag_set *set,
struct request *rq, unsigned int hctx_idx)
{
struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx];
struct nvme_rdma_device *dev = queue->device;
- if (req->mr)
- ib_dereg_mr(req->mr);
-
nvme_rdma_free_qe(dev->dev, &req->sqe, sizeof(struct nvme_command),
DMA_TO_DEVICE);
}
if (ret)
return ret;
- req->mr = ib_alloc_mr(dev->pd, IB_MR_TYPE_MEM_REG,
- ctrl->max_fr_pages);
- if (IS_ERR(req->mr)) {
- ret = PTR_ERR(req->mr);
- goto out_free_qe;
- }
-
req->queue = queue;
return 0;
-
-out_free_qe:
- nvme_rdma_free_qe(dev->dev, &req->sqe, sizeof(struct nvme_command),
- DMA_TO_DEVICE);
- return -ENOMEM;
}
static int nvme_rdma_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
static void nvme_rdma_destroy_queue_ib(struct nvme_rdma_queue *queue)
{
- struct nvme_rdma_device *dev = queue->device;
- struct ib_device *ibdev = dev->dev;
+ struct nvme_rdma_device *dev;
+ struct ib_device *ibdev;
- rdma_destroy_qp(queue->cm_id);
+ if (!test_and_clear_bit(NVME_RDMA_Q_TR_READY, &queue->flags))
+ return;
+
+ dev = queue->device;
+ ibdev = dev->dev;
+
+ ib_mr_pool_destroy(queue->qp, &queue->qp->rdma_mrs);
+
+ /*
+ * The cm_id object might have been destroyed during RDMA connection
+ * establishment error flow to avoid getting other cma events, thus
+ * the destruction of the QP shouldn't use rdma_cm API.
+ */
+ ib_destroy_qp(queue->qp);
ib_free_cq(queue->ib_cq);
nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size,
nvme_rdma_dev_put(dev);
}
+static int nvme_rdma_get_max_fr_pages(struct ib_device *ibdev)
+{
+ return min_t(u32, NVME_RDMA_MAX_SEGMENTS,
+ ibdev->attrs.max_fast_reg_page_list_len);
+}
+
static int nvme_rdma_create_queue_ib(struct nvme_rdma_queue *queue)
{
struct ib_device *ibdev;
goto out_destroy_qp;
}
+ ret = ib_mr_pool_init(queue->qp, &queue->qp->rdma_mrs,
+ queue->queue_size,
+ IB_MR_TYPE_MEM_REG,
+ nvme_rdma_get_max_fr_pages(ibdev));
+ if (ret) {
+ dev_err(queue->ctrl->ctrl.device,
+ "failed to initialize MR pool sized %d for QID %d\n",
+ queue->queue_size, idx);
+ goto out_destroy_ring;
+ }
+
+ set_bit(NVME_RDMA_Q_TR_READY, &queue->flags);
+
return 0;
+out_destroy_ring:
+ nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size,
+ sizeof(struct nvme_completion), DMA_FROM_DEVICE);
out_destroy_qp:
rdma_destroy_qp(queue->cm_id);
out_destroy_ib_cq:
queue->cmnd_capsule_len = sizeof(struct nvme_command);
queue->queue_size = queue_size;
- atomic_set(&queue->sig_count, 0);
queue->cm_id = rdma_create_id(&init_net, nvme_rdma_cm_handler, queue,
RDMA_PS_TCP, IB_QPT_RC);
out_destroy_cm_id:
rdma_destroy_id(queue->cm_id);
+ nvme_rdma_destroy_queue_ib(queue);
return ret;
}
ctrl->device = ctrl->queues[0].device;
- ctrl->max_fr_pages = min_t(u32, NVME_RDMA_MAX_SEGMENTS,
- ctrl->device->dev->attrs.max_fast_reg_page_list_len);
+ ctrl->max_fr_pages = nvme_rdma_get_max_fr_pages(ctrl->device->dev);
if (new) {
ctrl->ctrl.admin_tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, true);
error = PTR_ERR(ctrl->ctrl.admin_q);
goto out_free_tagset;
}
- } else {
- error = nvme_reinit_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset);
- if (error)
- goto out_free_queue;
}
error = nvme_rdma_start_queue(ctrl, 0);
goto out_free_tag_set;
}
} else {
- ret = nvme_reinit_tagset(&ctrl->ctrl, ctrl->ctrl.tagset);
- if (ret)
- goto out_free_io_queues;
-
blk_mq_update_nr_hw_queues(&ctrl->tag_set,
ctrl->ctrl.queue_count - 1);
}
static void nvme_rdma_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc)
{
- if (unlikely(wc->status != IB_WC_SUCCESS))
+ struct nvme_rdma_request *req =
+ container_of(wc->wr_cqe, struct nvme_rdma_request, reg_cqe);
+ struct request *rq = blk_mq_rq_from_pdu(req);
+
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
nvme_rdma_wr_error(cq, wc, "LOCAL_INV");
+ return;
+ }
+
+ if (refcount_dec_and_test(&req->ref))
+ nvme_end_request(rq, req->status, req->result);
+
}
static int nvme_rdma_inv_rkey(struct nvme_rdma_queue *queue,
.opcode = IB_WR_LOCAL_INV,
.next = NULL,
.num_sge = 0,
- .send_flags = 0,
+ .send_flags = IB_SEND_SIGNALED,
.ex.invalidate_rkey = req->mr->rkey,
};
struct request *rq)
{
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
- struct nvme_rdma_ctrl *ctrl = queue->ctrl;
struct nvme_rdma_device *dev = queue->device;
struct ib_device *ibdev = dev->dev;
- int res;
if (!blk_rq_bytes(rq))
return;
- if (req->mr->need_inval && test_bit(NVME_RDMA_Q_LIVE, &req->queue->flags)) {
- res = nvme_rdma_inv_rkey(queue, req);
- if (unlikely(res < 0)) {
- dev_err(ctrl->ctrl.device,
- "Queueing INV WR for rkey %#x failed (%d)\n",
- req->mr->rkey, res);
- nvme_rdma_error_recovery(queue->ctrl);
- }
+ if (req->mr) {
+ ib_mr_pool_put(queue->qp, &queue->qp->rdma_mrs, req->mr);
+ req->mr = NULL;
}
ib_dma_unmap_sg(ibdev, req->sg_table.sgl,
struct nvme_keyed_sgl_desc *sg = &c->common.dptr.ksgl;
int nr;
+ req->mr = ib_mr_pool_get(queue->qp, &queue->qp->rdma_mrs);
+ if (WARN_ON_ONCE(!req->mr))
+ return -EAGAIN;
+
/*
* Align the MR to a 4K page size to match the ctrl page size and
* the block virtual boundary.
*/
nr = ib_map_mr_sg(req->mr, req->sg_table.sgl, count, NULL, SZ_4K);
if (unlikely(nr < count)) {
+ ib_mr_pool_put(queue->qp, &queue->qp->rdma_mrs, req->mr);
+ req->mr = NULL;
if (nr < 0)
return nr;
return -EINVAL;
IB_ACCESS_REMOTE_READ |
IB_ACCESS_REMOTE_WRITE;
- req->mr->need_inval = true;
-
sg->addr = cpu_to_le64(req->mr->iova);
put_unaligned_le24(req->mr->length, sg->length);
put_unaligned_le32(req->mr->rkey, sg->key);
req->num_sge = 1;
req->inline_data = false;
- req->mr->need_inval = false;
+ refcount_set(&req->ref, 2); /* send and recv completions */
c->common.flags |= NVME_CMD_SGL_METABUF;
static void nvme_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc)
{
- if (unlikely(wc->status != IB_WC_SUCCESS))
- nvme_rdma_wr_error(cq, wc, "SEND");
-}
+ struct nvme_rdma_qe *qe =
+ container_of(wc->wr_cqe, struct nvme_rdma_qe, cqe);
+ struct nvme_rdma_request *req =
+ container_of(qe, struct nvme_rdma_request, sqe);
+ struct request *rq = blk_mq_rq_from_pdu(req);
-/*
- * We want to signal completion at least every queue depth/2. This returns the
- * largest power of two that is not above half of (queue size + 1) to optimize
- * (avoid divisions).
- */
-static inline bool nvme_rdma_queue_sig_limit(struct nvme_rdma_queue *queue)
-{
- int limit = 1 << ilog2((queue->queue_size + 1) / 2);
+ if (unlikely(wc->status != IB_WC_SUCCESS)) {
+ nvme_rdma_wr_error(cq, wc, "SEND");
+ return;
+ }
- return (atomic_inc_return(&queue->sig_count) & (limit - 1)) == 0;
+ if (refcount_dec_and_test(&req->ref))
+ nvme_end_request(rq, req->status, req->result);
}
static int nvme_rdma_post_send(struct nvme_rdma_queue *queue,
struct nvme_rdma_qe *qe, struct ib_sge *sge, u32 num_sge,
- struct ib_send_wr *first, bool flush)
+ struct ib_send_wr *first)
{
struct ib_send_wr wr, *bad_wr;
int ret;
sge->length = sizeof(struct nvme_command),
sge->lkey = queue->device->pd->local_dma_lkey;
- qe->cqe.done = nvme_rdma_send_done;
-
wr.next = NULL;
wr.wr_cqe = &qe->cqe;
wr.sg_list = sge;
wr.num_sge = num_sge;
wr.opcode = IB_WR_SEND;
- wr.send_flags = 0;
-
- /*
- * Unsignalled send completions are another giant desaster in the
- * IB Verbs spec: If we don't regularly post signalled sends
- * the send queue will fill up and only a QP reset will rescue us.
- * Would have been way to obvious to handle this in hardware or
- * at least the RDMA stack..
- *
- * Always signal the flushes. The magic request used for the flush
- * sequencer is not allocated in our driver's tagset and it's
- * triggered to be freed by blk_cleanup_queue(). So we need to
- * always mark it as signaled to ensure that the "wr_cqe", which is
- * embedded in request's payload, is not freed when __ib_process_cq()
- * calls wr_cqe->done().
- */
- if (nvme_rdma_queue_sig_limit(queue) || flush)
- wr.send_flags |= IB_SEND_SIGNALED;
+ wr.send_flags = IB_SEND_SIGNALED;
if (first)
first->next = ≀
return queue->ctrl->tag_set.tags[queue_idx - 1];
}
+static void nvme_rdma_async_done(struct ib_cq *cq, struct ib_wc *wc)
+{
+ if (unlikely(wc->status != IB_WC_SUCCESS))
+ nvme_rdma_wr_error(cq, wc, "ASYNC");
+}
+
static void nvme_rdma_submit_async_event(struct nvme_ctrl *arg)
{
struct nvme_rdma_ctrl *ctrl = to_rdma_ctrl(arg);
cmd->common.flags |= NVME_CMD_SGL_METABUF;
nvme_rdma_set_sg_null(cmd);
+ sqe->cqe.done = nvme_rdma_async_done;
+
ib_dma_sync_single_for_device(dev, sqe->dma, sizeof(*cmd),
DMA_TO_DEVICE);
- ret = nvme_rdma_post_send(queue, sqe, &sge, 1, NULL, false);
+ ret = nvme_rdma_post_send(queue, sqe, &sge, 1, NULL);
WARN_ON_ONCE(ret);
}
}
req = blk_mq_rq_to_pdu(rq);
- if (rq->tag == tag)
- ret = 1;
+ req->status = cqe->status;
+ req->result = cqe->result;
- if ((wc->wc_flags & IB_WC_WITH_INVALIDATE) &&
- wc->ex.invalidate_rkey == req->mr->rkey)
- req->mr->need_inval = false;
+ if (wc->wc_flags & IB_WC_WITH_INVALIDATE) {
+ if (unlikely(wc->ex.invalidate_rkey != req->mr->rkey)) {
+ dev_err(queue->ctrl->ctrl.device,
+ "Bogus remote invalidation for rkey %#x\n",
+ req->mr->rkey);
+ nvme_rdma_error_recovery(queue->ctrl);
+ }
+ } else if (req->mr) {
+ ret = nvme_rdma_inv_rkey(queue, req);
+ if (unlikely(ret < 0)) {
+ dev_err(queue->ctrl->ctrl.device,
+ "Queueing INV WR for rkey %#x failed (%d)\n",
+ req->mr->rkey, ret);
+ nvme_rdma_error_recovery(queue->ctrl);
+ }
+ /* the local invalidation completion will end the request */
+ return 0;
+ }
+
+ if (refcount_dec_and_test(&req->ref)) {
+ if (rq->tag == tag)
+ ret = 1;
+ nvme_end_request(rq, req->status, req->result);
+ }
- nvme_end_request(rq, cqe->status, cqe->result);
return ret;
}
* We cannot accept any other command until the Connect command has completed.
*/
static inline blk_status_t
-nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue, struct request *rq)
-{
- if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) {
- struct nvme_command *cmd = nvme_req(rq)->cmd;
-
- if (!blk_rq_is_passthrough(rq) ||
- cmd->common.opcode != nvme_fabrics_command ||
- cmd->fabrics.fctype != nvme_fabrics_type_connect) {
- /*
- * reconnecting state means transport disruption, which
- * can take a long time and even might fail permanently,
- * fail fast to give upper layers a chance to failover.
- * deleting state means that the ctrl will never accept
- * commands again, fail it permanently.
- */
- if (queue->ctrl->ctrl.state == NVME_CTRL_RECONNECTING ||
- queue->ctrl->ctrl.state == NVME_CTRL_DELETING) {
- nvme_req(rq)->status = NVME_SC_ABORT_REQ;
- return BLK_STS_IOERR;
- }
- return BLK_STS_RESOURCE; /* try again later */
- }
- }
-
- return 0;
+nvme_rdma_is_ready(struct nvme_rdma_queue *queue, struct request *rq)
+{
+ if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags)))
+ return nvmf_check_init_req(&queue->ctrl->ctrl, rq);
+ return BLK_STS_OK;
}
static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_rdma_qe *sqe = &req->sqe;
struct nvme_command *c = sqe->data;
- bool flush = false;
struct ib_device *dev;
blk_status_t ret;
int err;
WARN_ON_ONCE(rq->tag < 0);
- ret = nvme_rdma_queue_is_ready(queue, rq);
+ ret = nvme_rdma_is_ready(queue, rq);
if (unlikely(ret))
return ret;
goto err;
}
+ sqe->cqe.done = nvme_rdma_send_done;
+
ib_dma_sync_single_for_device(dev, sqe->dma,
sizeof(struct nvme_command), DMA_TO_DEVICE);
- if (req_op(rq) == REQ_OP_FLUSH)
- flush = true;
err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
- req->mr->need_inval ? &req->reg_wr.wr : NULL, flush);
+ req->mr ? &req->reg_wr.wr : NULL);
if (unlikely(err)) {
nvme_rdma_unmap_data(queue, rq);
goto err;
.submit_async_event = nvme_rdma_submit_async_event,
.delete_ctrl = nvme_rdma_delete_ctrl,
.get_address = nvmf_get_address,
- .reinit_request = nvme_rdma_reinit_request,
};
static inline bool
tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
+ /* release the queue lookup reference on the completed IO */
+ nvmet_fc_tgt_q_put(queue);
+
spin_lock_irqsave(&queue->qlock, flags);
deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
struct nvmet_fc_defer_fcp_req, req_list);
if (!deferfcp) {
list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
spin_unlock_irqrestore(&queue->qlock, flags);
-
- /* Release reference taken at queue lookup and fod allocation */
- nvmet_fc_tgt_q_put(queue);
return;
}
tgtport->ops->fcp_req_release(&tgtport->fc_target_port,
deferfcp->fcp_req);
+ /* release the queue lookup reference */
+ nvmet_fc_tgt_q_put(queue);
+
kfree(deferfcp);
spin_lock_irqsave(&queue->qlock, flags);
return container_of(ctrl, struct nvme_loop_ctrl, ctrl);
}
+enum nvme_loop_queue_flags {
+ NVME_LOOP_Q_LIVE = 0,
+};
+
struct nvme_loop_queue {
struct nvmet_cq nvme_cq;
struct nvmet_sq nvme_sq;
struct nvme_loop_ctrl *ctrl;
+ unsigned long flags;
};
static struct nvmet_port *nvmet_loop_port;
return BLK_EH_HANDLED;
}
+static inline blk_status_t nvme_loop_is_ready(struct nvme_loop_queue *queue,
+ struct request *rq)
+{
+ if (unlikely(!test_bit(NVME_LOOP_Q_LIVE, &queue->flags)))
+ return nvmf_check_init_req(&queue->ctrl->ctrl, rq);
+ return BLK_STS_OK;
+}
+
static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
blk_status_t ret;
+ ret = nvme_loop_is_ready(queue, req);
+ if (unlikely(ret))
+ return ret;
+
ret = nvme_setup_cmd(ns, req, &iod->cmd);
if (ret)
return ret;
static void nvme_loop_destroy_admin_queue(struct nvme_loop_ctrl *ctrl)
{
+ clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
nvmet_sq_destroy(&ctrl->queues[0].nvme_sq);
blk_cleanup_queue(ctrl->ctrl.admin_q);
blk_mq_free_tag_set(&ctrl->admin_tag_set);
{
int i;
- for (i = 1; i < ctrl->ctrl.queue_count; i++)
+ for (i = 1; i < ctrl->ctrl.queue_count; i++) {
+ clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
nvmet_sq_destroy(&ctrl->queues[i].nvme_sq);
+ }
}
static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl)
ret = nvmf_connect_io_queue(&ctrl->ctrl, i);
if (ret)
return ret;
+ set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags);
}
return 0;
if (error)
goto out_cleanup_queue;
+ set_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags);
+
error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->ctrl.cap);
if (error) {
dev_err(ctrl->ctrl.device,
The PCI device frontend driver allows the kernel to import arbitrary
PCI devices from a PCI backend to support PCI driver domains.
-config HT_IRQ
- bool "Interrupts on hypertransport devices"
- default y
- depends on PCI && X86_LOCAL_APIC
- help
- This allows native hypertransport devices to use interrupts.
-
- If unsure say Y.
-
config PCI_ATS
bool
# Build the PCI MSI interrupt support
obj-$(CONFIG_PCI_MSI) += msi.o
-# Build the Hypertransport interrupt support
-obj-$(CONFIG_HT_IRQ) += htirq.o
-
obj-$(CONFIG_PCI_ATS) += ats.o
obj-$(CONFIG_PCI_IOV) += iov.o
+++ /dev/null
-// SPDX-License-Identifier: GPL-2.0
-/*
- * File: htirq.c
- * Purpose: Hypertransport Interrupt Capability
- *
- * Copyright (C) 2006 Linux Networx
- * Copyright (C) Eric Biederman <ebiederman@lnxi.com>
- */
-
-#include <linux/irq.h>
-#include <linux/pci.h>
-#include <linux/spinlock.h>
-#include <linux/export.h>
-#include <linux/slab.h>
-#include <linux/htirq.h>
-
-/* Global ht irq lock.
- *
- * This is needed to serialize access to the data port in hypertransport
- * irq capability.
- *
- * With multiple simultaneous hypertransport irq devices it might pay
- * to make this more fine grained. But start with simple, stupid, and correct.
- */
-static DEFINE_SPINLOCK(ht_irq_lock);
-
-void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg)
-{
- struct ht_irq_cfg *cfg = irq_get_handler_data(irq);
- unsigned long flags;
-
- spin_lock_irqsave(&ht_irq_lock, flags);
- if (cfg->msg.address_lo != msg->address_lo) {
- pci_write_config_byte(cfg->dev, cfg->pos + 2, cfg->idx);
- pci_write_config_dword(cfg->dev, cfg->pos + 4, msg->address_lo);
- }
- if (cfg->msg.address_hi != msg->address_hi) {
- pci_write_config_byte(cfg->dev, cfg->pos + 2, cfg->idx + 1);
- pci_write_config_dword(cfg->dev, cfg->pos + 4, msg->address_hi);
- }
- if (cfg->update)
- cfg->update(cfg->dev, irq, msg);
- spin_unlock_irqrestore(&ht_irq_lock, flags);
- cfg->msg = *msg;
-}
-
-void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg)
-{
- struct ht_irq_cfg *cfg = irq_get_handler_data(irq);
-
- *msg = cfg->msg;
-}
-
-void mask_ht_irq(struct irq_data *data)
-{
- struct ht_irq_cfg *cfg = irq_data_get_irq_handler_data(data);
- struct ht_irq_msg msg = cfg->msg;
-
- msg.address_lo |= 1;
- write_ht_irq_msg(data->irq, &msg);
-}
-
-void unmask_ht_irq(struct irq_data *data)
-{
- struct ht_irq_cfg *cfg = irq_data_get_irq_handler_data(data);
- struct ht_irq_msg msg = cfg->msg;
-
- msg.address_lo &= ~1;
- write_ht_irq_msg(data->irq, &msg);
-}
-
-/**
- * __ht_create_irq - create an irq and attach it to a device.
- * @dev: The hypertransport device to find the irq capability on.
- * @idx: Which of the possible irqs to attach to.
- * @update: Function to be called when changing the htirq message
- *
- * The irq number of the new irq or a negative error value is returned.
- */
-int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update)
-{
- int max_irq, pos, irq;
- unsigned long flags;
- u32 data;
-
- pos = pci_find_ht_capability(dev, HT_CAPTYPE_IRQ);
- if (!pos)
- return -EINVAL;
-
- /* Verify the idx I want to use is in range */
- spin_lock_irqsave(&ht_irq_lock, flags);
- pci_write_config_byte(dev, pos + 2, 1);
- pci_read_config_dword(dev, pos + 4, &data);
- spin_unlock_irqrestore(&ht_irq_lock, flags);
-
- max_irq = (data >> 16) & 0xff;
- if (idx > max_irq)
- return -EINVAL;
-
- irq = arch_setup_ht_irq(idx, pos, dev, update);
- if (irq > 0)
- dev_dbg(&dev->dev, "irq %d for HT\n", irq);
-
- return irq;
-}
-EXPORT_SYMBOL(__ht_create_irq);
-
-/**
- * ht_create_irq - create an irq and attach it to a device.
- * @dev: The hypertransport device to find the irq capability on.
- * @idx: Which of the possible irqs to attach to.
- *
- * ht_create_irq needs to be called for all hypertransport devices
- * that generate irqs.
- *
- * The irq number of the new irq or a negative error value is returned.
- */
-int ht_create_irq(struct pci_dev *dev, int idx)
-{
- return __ht_create_irq(dev, idx, NULL);
-}
-EXPORT_SYMBOL(ht_create_irq);
-
-/**
- * ht_destroy_irq - destroy an irq created with ht_create_irq
- * @irq: irq to be destroyed
- *
- * This reverses ht_create_irq removing the specified irq from
- * existence. The irq should be free before this happens.
- */
-void ht_destroy_irq(unsigned int irq)
-{
- arch_teardown_ht_irq(irq);
-}
-EXPORT_SYMBOL(ht_destroy_irq);
goto fail_platform_device2;
buffer = kzalloc(sizeof(struct calling_interface_buffer), GFP_KERNEL);
- if (!buffer)
+ if (!buffer) {
+ ret = -ENOMEM;
goto fail_buffer;
+ }
ret = dell_setup_rfkill();
static int dell_smbios_wmi_probe(struct wmi_device *wdev)
{
+ struct wmi_driver *wdriver =
+ container_of(wdev->dev.driver, struct wmi_driver, driver);
struct wmi_smbios_priv *priv;
+ u32 hotfix;
int count;
int ret;
if (!dell_wmi_get_size(&priv->req_buf_size))
return -EPROBE_DEFER;
+ /* some SMBIOS calls fail unless BIOS contains hotfix */
+ if (!dell_wmi_get_hotfix(&hotfix))
+ return -EPROBE_DEFER;
+ if (!hotfix) {
+ dev_warn(&wdev->dev,
+ "WMI SMBIOS userspace interface not supported(%u), try upgrading to a newer BIOS\n",
+ hotfix);
+ wdriver->filter_callback = NULL;
+ }
+
/* add in the length object we will use internally with ioctl */
priv->req_buf_size += sizeof(u64);
ret = set_required_buffer_size(wdev, priv->req_buf_size);
struct list_head list;
u32 interface_version;
u32 size;
+ u32 hotfix;
};
static int descriptor_valid = -EPROBE_DEFER;
static LIST_HEAD(wmi_list);
}
EXPORT_SYMBOL_GPL(dell_wmi_get_size);
+bool dell_wmi_get_hotfix(u32 *hotfix)
+{
+ struct descriptor_priv *priv;
+ bool ret = false;
+
+ mutex_lock(&list_mutex);
+ priv = list_first_entry_or_null(&wmi_list,
+ struct descriptor_priv,
+ list);
+ if (priv) {
+ *hotfix = priv->hotfix;
+ ret = true;
+ }
+ mutex_unlock(&list_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dell_wmi_get_hotfix);
+
/*
* Descriptor buffer is 128 byte long and contains:
*
* Object Signature 4 4 " WMI"
* WMI Interface Version 8 4 <version>
* WMI buffer length 12 4 <length>
+ * WMI hotfix number 16 4 <hotfix>
*/
static int dell_wmi_descriptor_probe(struct wmi_device *wdev)
{
priv->interface_version = buffer[2];
priv->size = buffer[3];
+ priv->hotfix = buffer[4];
ret = 0;
dev_set_drvdata(&wdev->dev, priv);
mutex_lock(&list_mutex);
list_add_tail(&priv->list, &wmi_list);
mutex_unlock(&list_mutex);
- dev_dbg(&wdev->dev, "Detected Dell WMI interface version %lu and buffer size %lu\n",
+ dev_dbg(&wdev->dev, "Detected Dell WMI interface version %lu, buffer size %lu, hotfix %lu\n",
(unsigned long) priv->interface_version,
- (unsigned long) priv->size);
+ (unsigned long) priv->size,
+ (unsigned long) priv->hotfix);
out:
kfree(obj);
bool dell_wmi_get_interface_version(u32 *version);
bool dell_wmi_get_size(u32 *size);
+bool dell_wmi_get_hotfix(u32 *hotfix);
#endif
};
/* release buttons after a short delay if pressed */
-static void do_sony_laptop_release_key(unsigned long unused)
+static void do_sony_laptop_release_key(struct timer_list *unused)
{
struct sony_laptop_keypress kp;
unsigned long flags;
goto err_dec_users;
}
- setup_timer(&sony_laptop_input.release_key_timer,
+ timer_setup(&sony_laptop_input.release_key_timer,
do_sony_laptop_release_key, 0);
/* input keys */
* The kernel timer
*/
-static void pps_ktimer_event(unsigned long ptr)
+static void pps_ktimer_event(struct timer_list *unused)
{
struct pps_event_time ts;
return -ENOMEM;
}
- setup_timer(&ktimer, pps_ktimer_event, 0);
+ timer_setup(&ktimer, pps_ktimer_event, 0);
mod_timer(&ktimer, jiffies + HZ);
dev_info(pps->dev, "ktimer PPS source registered\n");
if (num)
rtc_handle_legacy_irq(rtc, num, RTC_UF);
}
-static void rtc_uie_timer(unsigned long data)
+static void rtc_uie_timer(struct timer_list *t)
{
- struct rtc_device *rtc = (struct rtc_device *)data;
+ struct rtc_device *rtc = from_timer(rtc, t, uie_timer);
unsigned long flags;
spin_lock_irqsave(&rtc->irq_lock, flags);
#ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL
INIT_WORK(&rtc->uie_task, rtc_uie_task);
- setup_timer(&rtc->uie_timer, rtc_uie_timer, (unsigned long)rtc);
+ timer_setup(&rtc->uie_timer, rtc_uie_timer, 0);
#endif
cdev_init(&rtc->char_dev, &rtc_dev_fops);
+// SPDX-License-Identifier: GPL-2.0
/*
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Horst Hummel <Horst.Hummel@de.ibm.com>
static void do_reload_device(struct work_struct *);
static void do_requeue_requests(struct work_struct *);
static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
-static void dasd_device_timeout(unsigned long);
-static void dasd_block_timeout(unsigned long);
+static void dasd_device_timeout(struct timer_list *);
+static void dasd_block_timeout(struct timer_list *);
static void __dasd_process_erp(struct dasd_device *, struct dasd_ccw_req *);
static void dasd_profile_init(struct dasd_profile *, struct dentry *);
static void dasd_profile_exit(struct dasd_profile *);
(void (*)(unsigned long)) dasd_device_tasklet,
(unsigned long) device);
INIT_LIST_HEAD(&device->ccw_queue);
- init_timer(&device->timer);
- device->timer.function = dasd_device_timeout;
- device->timer.data = (unsigned long) device;
+ timer_setup(&device->timer, dasd_device_timeout, 0);
INIT_WORK(&device->kick_work, do_kick_device);
INIT_WORK(&device->restore_device, do_restore_device);
INIT_WORK(&device->reload_device, do_reload_device);
(unsigned long) block);
INIT_LIST_HEAD(&block->ccw_queue);
spin_lock_init(&block->queue_lock);
- init_timer(&block->timer);
- block->timer.function = dasd_block_timeout;
- block->timer.data = (unsigned long) block;
+ timer_setup(&block->timer, dasd_block_timeout, 0);
spin_lock_init(&block->profile.lock);
return block;
* The head of the ccw queue will have status DASD_CQR_IN_IO for 1),
* DASD_CQR_QUEUED for 2) and 3).
*/
-static void dasd_device_timeout(unsigned long ptr)
+static void dasd_device_timeout(struct timer_list *t)
{
unsigned long flags;
struct dasd_device *device;
- device = (struct dasd_device *) ptr;
+ device = from_timer(device, t, timer);
spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
/* re-activate request queue */
dasd_device_remove_stop_bits(device, DASD_STOPPED_PENDING);
* is waiting for something that may not come reliably, (e.g. a state
* change interrupt)
*/
-static void dasd_block_timeout(unsigned long ptr)
+static void dasd_block_timeout(struct timer_list *t)
{
unsigned long flags;
struct dasd_block *block;
- block = (struct dasd_block *) ptr;
+ block = from_timer(block, t, timer);
spin_lock_irqsave(get_ccwdev_lock(block->base->cdev), flags);
/* re-activate request queue */
dasd_device_remove_stop_bits(block->base, DASD_STOPPED_PENDING);
+// SPDX-License-Identifier: GPL-2.0
/*
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Horst Hummel <Horst.Hummel@de.ibm.com>
+// SPDX-License-Identifier: GPL-2.0
/*
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Based on.......: linux/drivers/s390/block/mdisk.c
+// SPDX-License-Identifier: GPL-2.0
/*
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Horst Hummel <Horst.Hummel@de.ibm.com>
+// SPDX-License-Identifier: GPL-2.0
/*
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
+// SPDX-License-Identifier: GPL-2.0
/*
* dcssblk.c -- the S/390 block driver for dcss memory
*
+// SPDX-License-Identifier: GPL-2.0
/*
* Block driver for s390 storage class memory.
*
+// SPDX-License-Identifier: GPL-2.0
/*
* Xpram.c -- the S/390 expanded memory RAM-disk
*
+// SPDX-License-Identifier: GPL-2.0
/*
* IBM/3270 Driver - fullscreen driver.
*
+// SPDX-License-Identifier: GPL-2.0
/*
* HMC Drive DVD Module
*
+// SPDX-License-Identifier: GPL-2.0
/*
* Character device driver for reading z/VM *MONITOR service records.
*
+// SPDX-License-Identifier: GPL-2.0
/*
* Character device driver for writing z/VM *MONITOR service records.
*
+// SPDX-License-Identifier: GPL-2.0
/*
* IBM/3270 Driver - core functions.
*
__sclp_set_request_timer(unsigned long time, void (*cb)(struct timer_list *))
{
del_timer(&sclp_request_timer);
- sclp_request_timer.function = (TIMER_FUNC_TYPE)cb;
+ sclp_request_timer.function = cb;
sclp_request_timer.expires = jiffies + time;
add_timer(&sclp_request_timer);
}
if (timer_pending(&sclp_request_timer) &&
get_tod_clock_fast() > timeout &&
del_timer(&sclp_request_timer))
- sclp_request_timer.function((TIMER_DATA_TYPE)&sclp_request_timer);
+ sclp_request_timer.function(&sclp_request_timer);
cpu_relax();
}
local_irq_disable();
+// SPDX-License-Identifier: GPL-2.0
/*
* Enable Asynchronous Notification via SCLP.
*
+// SPDX-License-Identifier: GPL-2.0
/*
* tape device discipline for 3480/3490 tapes.
*
+// SPDX-License-Identifier: GPL-2.0
/*
* tape device discipline for 3590 tapes.
*
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2004
*
+// SPDX-License-Identifier: GPL-2.0
/*
* basic function of the tape device driver
*
+// SPDX-License-Identifier: GPL-2.0
/*
* IBM/3270 Driver - tty functions.
*
+// SPDX-License-Identifier: GPL-2.0
/*
* character device driver for reading z/VM system service records
*
+// SPDX-License-Identifier: GPL-2.0
/*
* Linux driver for System z and s390 unit record devices
* (z/VM virtual punch, reader, printer)
+// SPDX-License-Identifier: GPL-1.0+
/*
* zcore module to export memory content and register sets for creating system
* dumps on SCSI disks (zfcpdump). The "zcore/mem" debugfs file shows the same
*
* Copyright IBM Corp. 2003, 2008
* Author(s): Michael Holzheu
- * License: GPL
*/
#define KMSG_COMPONENT "zdump"
+// SPDX-License-Identifier: GPL-2.0
/*
* bus driver for ccwgroup
*
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 1999, 2010
* Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
+// SPDX-License-Identifier: GPL-2.0
/*
* S/390 common I/O routines -- channel subsystem call
*
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for s390 chsc subchannels
*
+// SPDX-License-Identifier: GPL-2.0
/*
* S/390 common I/O routines -- low level i/o calls
*
+// SPDX-License-Identifier: GPL-2.0+
/*
* Linux on zSeries Channel Measurement Facility support
*
* Cornelia Huck <cornelia.huck@de.ibm.com>
*
* original idea from Natarajan Krishnaswami <nkrishna@us.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#define KMSG_COMPONENT "cio"
+// SPDX-License-Identifier: GPL-2.0
/*
* driver for channel subsystem
*
*
* Author(s): Arnd Bergmann (arndb@de.ibm.com)
* Cornelia Huck (cornelia.huck@de.ibm.com)
- *
- * License: GPL
*/
#define KMSG_COMPONENT "cio"
+// SPDX-License-Identifier: GPL-1.0+
/*
* bus driver for ccw devices
*
* Author(s): Arnd Bergmann (arndb@de.ibm.com)
* Cornelia Huck (cornelia.huck@de.ibm.com)
* Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * License: GPL
*/
#define KMSG_COMPONENT "cio"
+// SPDX-License-Identifier: GPL-2.0
/*
* finite state machine for device handling
*
+// SPDX-License-Identifier: GPL-1.0+
/*
* Copyright IBM Corp. 2002, 2009
*
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
* Cornelia Huck (cornelia.huck@de.ibm.com)
- *
- * License: GPL
*/
#include <linux/export.h>
#include <linux/init.h>
+// SPDX-License-Identifier: GPL-2.0
/*
* Driver for s390 eadm subchannels
*
+// SPDX-License-Identifier: GPL-2.0
/*
* Functions for registration of I/O interruption subclasses on s390.
*
+// SPDX-License-Identifier: GPL-2.0
/*
* Linux for s390 qdio support, buffer handling, qdio API and module support.
*
+// SPDX-License-Identifier: GPL-2.0
/*
* qdio queue initialization
*
+// SPDX-License-Identifier: GPL-2.0
/*
* Recognize and maintain s390 storage class memory.
*
+// SPDX-License-Identifier: GPL-2.0
/*
* VFIO based Physical Subchannel device driver
*
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright IBM Corp. 2006, 2012
* Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
* Holger Dengler <hd@linux.vnet.ibm.com>
*
* Adjunct processor bus.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#define KMSG_COMPONENT "ap"
+// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright IBM Corp. 2006, 2012
* Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
* Holger Dengler <hd@linux.vnet.ibm.com>
*
* Adjunct processor bus header file.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _AP_BUS_H_
+// SPDX-License-Identifier: GPL-2.0
/*
* pkey device driver
*
* Copyright IBM Corp. 2017
* Author(s): Harald Freudenberger
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
*/
#define KMSG_COMPONENT "pkey"
+// SPDX-License-Identifier: GPL-2.0+
/*
* zcrypt 2.1.0
*
* Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
* Ralph Wuerthner <rwuerthn@de.ibm.com>
* MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
weight += atomic_read(&zq->load);
pref_weight += atomic_read(&pref_zq->load);
if (weight == pref_weight)
- return &zq->queue->total_request_count >
- &pref_zq->queue->total_request_count;
+ return zq->queue->total_request_count >
+ pref_zq->queue->total_request_count;
return weight > pref_weight;
}
+// SPDX-License-Identifier: GPL-2.0+
/*
* zcrypt 2.1.0
*
* Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
* Ralph Wuerthner <rwuerthn@de.ibm.com>
* MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _ZCRYPT_API_H_
+// SPDX-License-Identifier: GPL-2.0+
/*
* zcrypt 2.1.0
*
* Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
* Ralph Wuerthner <rwuerthn@de.ibm.com>
* MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
+// SPDX-License-Identifier: GPL-2.0+
/*
* zcrypt 2.1.0
*
*
* Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
* Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _ZCRYPT_CCA_KEY_H_
+// SPDX-License-Identifier: GPL-2.0+
/*
* zcrypt 2.1.0
*
* Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
* Ralph Wuerthner <rwuerthn@de.ibm.com>
* MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
+// SPDX-License-Identifier: GPL-2.0+
/*
* zcrypt 2.1.0
*
*
* Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
* Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _ZCRYPT_CEX2A_H_
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2012
* Author(s): Holger Dengler <hd@linux.vnet.ibm.com>
+// SPDX-License-Identifier: GPL-2.0+
/*
* zcrypt 2.1.0
*
*
* Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
* Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _ZCRYPT_ERROR_H_
+// SPDX-License-Identifier: GPL-2.0+
/*
* zcrypt 2.1.0
*
* Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
* Ralph Wuerthner <rwuerthn@de.ibm.com>
* MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#define KMSG_COMPONENT "zcrypt"
+// SPDX-License-Identifier: GPL-2.0+
/*
* zcrypt 2.1.0
*
* Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
* Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
* MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _ZCRYPT_MSGTYPE50_H_
+// SPDX-License-Identifier: GPL-2.0+
/*
* zcrypt 2.1.0
*
* Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
* Ralph Wuerthner <rwuerthn@de.ibm.com>
* MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#define KMSG_COMPONENT "zcrypt"
+// SPDX-License-Identifier: GPL-2.0+
/*
* zcrypt 2.1.0
*
* Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
* Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
* MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _ZCRYPT_MSGTYPE6_H_
+// SPDX-License-Identifier: GPL-2.0+
/*
* zcrypt 2.1.0
*
* Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
* Ralph Wuerthner <rwuerthn@de.ibm.com>
* MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
+// SPDX-License-Identifier: GPL-2.0+
/*
* zcrypt 2.1.0
*
* Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com)
* Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
* MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef _ZCRYPT_PCIXCC_H_
+// SPDX-License-Identifier: GPL-2.0+
/*
* zcrypt 2.1.0
*
* Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com>
* Ralph Wuerthner <rwuerthn@de.ibm.com>
* MSGTYPE restruct: Holger Dengler <hd@linux.vnet.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/module.h>
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2001, 2009
* Author(s):
+// SPDX-License-Identifier: GPL-2.0
/**
* A generic FSM based on fsm used in isdn4linux
*
}
static void
-fsm_expire_timer(fsm_timer *this)
+fsm_expire_timer(struct timer_list *t)
{
+ fsm_timer *this = from_timer(this, t, tl);
#if FSM_TIMER_DEBUG
printk(KERN_DEBUG "fsm(%s): Timer %p expired\n",
this->fi->name, this);
fsm_settimer(fsm_instance *fi, fsm_timer *this)
{
this->fi = fi;
- this->tl.function = (void *)fsm_expire_timer;
- this->tl.data = (long)this;
#if FSM_TIMER_DEBUG
printk(KERN_DEBUG "fsm(%s): Create timer %p\n", fi->name,
this);
#endif
- init_timer(&this->tl);
+ timer_setup(&this->tl, fsm_expire_timer, 0);
}
void
this->fi->name, this, millisec);
#endif
- setup_timer(&this->tl, (void *)fsm_expire_timer, (long)this);
+ timer_setup(&this->tl, fsm_expire_timer, 0);
this->expire_event = event;
this->event_arg = arg;
this->tl.expires = jiffies + (millisec * HZ) / 1000;
#endif
del_timer(&this->tl);
- setup_timer(&this->tl, (void *)fsm_expire_timer, (long)this);
+ timer_setup(&this->tl, fsm_expire_timer, 0);
this->expire_event = event;
this->event_arg = arg;
this->tl.expires = jiffies + (millisec * HZ) / 1000;
+// SPDX-License-Identifier: GPL-2.0+
/*
* Linux for S/390 Lan Channel Station Network Driver
*
* Rewritten by
* Frank Pavlic <fpavlic@de.ibm.com> and
* Martin Schwidefsky <schwidefsky@de.ibm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#define KMSG_COMPONENT "lcs"
+// SPDX-License-Identifier: GPL-2.0+
/*
* IUCV network driver
*
* Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
* Martin Schwidefsky (schwidefsky@de.ibm.com)
* Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
#define KMSG_COMPONENT "netiucv"
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2007, 2009
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2007
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2007, 2009
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright IBM Corp. 2007, 2009
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
+// SPDX-License-Identifier: GPL-2.0+
/*
* IUCV special message driver
*
* Copyright IBM Corp. 2003, 2009
*
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
+// SPDX-License-Identifier: GPL-2.0
/*
* Deliver z/VM CP special messages (SMSG) as uevents.
*
+// SPDX-License-Identifier: GPL-2.0
/*
* zfcp device driver
*
static void zfcp_fsf_start_timer(struct zfcp_fsf_req *fsf_req,
unsigned long timeout)
{
- fsf_req->timer.function = (TIMER_FUNC_TYPE)zfcp_fsf_request_timeout_handler;
+ fsf_req->timer.function = zfcp_fsf_request_timeout_handler;
fsf_req->timer.expires = jiffies + timeout;
add_timer(&fsf_req->timer);
}
static void zfcp_fsf_start_erp_timer(struct zfcp_fsf_req *fsf_req)
{
BUG_ON(!fsf_req->erp_action);
- fsf_req->timer.function = (TIMER_FUNC_TYPE)zfcp_erp_timeout_handler;
+ fsf_req->timer.function = zfcp_erp_timeout_handler;
fsf_req->timer.expires = jiffies + 30 * HZ;
add_timer(&fsf_req->timer);
}
+# SPDX-License-Identifier: GPL-2.0
# Makefile for kvm guest drivers on s390
#
# Copyright IBM Corp. 2008
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License (version 2 only)
-# as published by the Free Software Foundation.
obj-$(CONFIG_S390_GUEST) += virtio_ccw.o
+// SPDX-License-Identifier: GPL-2.0
/*
* ccw based virtio transport
*
* Copyright IBM Corp. 2012, 2014
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License (version 2 only)
- * as published by the Free Software Foundation.
- *
* Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
*/
struct asd_ascb *ascb;
list_for_each_entry(ascb, list, list) {
if (!ascb->uldd_timer) {
- ascb->timer.function = (TIMER_FUNC_TYPE)asd_ascb_timedout;
+ ascb->timer.function = asd_ascb_timedout;
ascb->timer.expires = jiffies + AIC94XX_SCB_TIMEOUT;
add_timer(&ascb->timer);
}
ascb->tasklet_complete = tasklet_complete;
ascb->uldd_timer = 1;
- ascb->timer.function = (TIMER_FUNC_TYPE)timed_out;
+ ascb->timer.function = timed_out;
ascb->timer.expires = jiffies + AIC94XX_SCB_TIMEOUT;
add_timer(&ascb->timer);
static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb);
static void arcmsr_hbaB_flush_cache(struct AdapterControlBlock *acb);
-static void arcmsr_request_device_map(unsigned long pacb);
+static void arcmsr_request_device_map(struct timer_list *t);
static void arcmsr_hbaA_request_device_map(struct AdapterControlBlock *acb);
static void arcmsr_hbaB_request_device_map(struct AdapterControlBlock *acb);
static void arcmsr_hbaC_request_device_map(struct AdapterControlBlock *acb);
atomic_set(&acb->rq_map_token, 16);
atomic_set(&acb->ante_token_value, 16);
acb->fw_flag = FW_NORMAL;
- init_timer(&acb->eternal_timer);
+ timer_setup(&acb->eternal_timer, arcmsr_request_device_map, 0);
acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
- acb->eternal_timer.data = (unsigned long) acb;
- acb->eternal_timer.function = &arcmsr_request_device_map;
add_timer(&acb->eternal_timer);
if(arcmsr_alloc_sysfs_attr(acb))
goto out_free_sysfs;
atomic_set(&acb->rq_map_token, 16);
atomic_set(&acb->ante_token_value, 16);
acb->fw_flag = FW_NORMAL;
- init_timer(&acb->eternal_timer);
+ timer_setup(&acb->eternal_timer, arcmsr_request_device_map, 0);
acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
- acb->eternal_timer.data = (unsigned long) acb;
- acb->eternal_timer.function = &arcmsr_request_device_map;
add_timer(&acb->eternal_timer);
return 0;
controller_stop:
}
}
-static void arcmsr_request_device_map(unsigned long pacb)
+static void arcmsr_request_device_map(struct timer_list *t)
{
- struct AdapterControlBlock *acb = (struct AdapterControlBlock *)pacb;
+ struct AdapterControlBlock *acb = from_timer(acb, t, eternal_timer);
switch (acb->adapter_type) {
case ACB_ADAPTER_TYPE_A: {
arcmsr_hbaA_request_device_map(acb);
* Error handler timeout function. Indicate that we timed out,
* and wake up any error handler process so it can continue.
*/
-static void fas216_eh_timer(unsigned long data)
+static void fas216_eh_timer(struct timer_list *t)
{
- FAS216_Info *info = (FAS216_Info *)data;
+ FAS216_Info *info = from_timer(info, t, eh_timer);
fas216_log(info, LOG_ERROR, "error handling timed out\n");
info->rst_dev_status = -1;
info->rst_bus_status = -1;
init_waitqueue_head(&info->eh_wait);
- init_timer(&info->eh_timer);
- info->eh_timer.data = (unsigned long)info;
- info->eh_timer.function = fas216_eh_timer;
+ timer_setup(&info->eh_timer, fas216_eh_timer, 0);
spin_lock_init(&info->host_lock);
if (!test_bit(BEISCSI_HBA_UER_SUPP, &phba->state))
return;
/* modify this timer to check TPE */
- phba->hw_check.function = (TIMER_FUNC_TYPE)beiscsi_hw_tpe_check;
+ phba->hw_check.function = beiscsi_hw_tpe_check;
}
mod_timer(&phba->hw_check,
* Timer function gets modified for TPE detection.
* Always reinit to do health check first.
*/
- phba->hw_check.function = (TIMER_FUNC_TYPE)beiscsi_hw_health_check;
+ phba->hw_check.function = beiscsi_hw_health_check;
mod_timer(&phba->hw_check,
jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL));
return 0;
}
void
-bfad_bfa_tmo(unsigned long data)
+bfad_bfa_tmo(struct timer_list *t)
{
- struct bfad_s *bfad = (struct bfad_s *) data;
+ struct bfad_s *bfad = from_timer(bfad, t, hal_tmo);
unsigned long flags;
struct list_head doneq;
void
bfad_init_timer(struct bfad_s *bfad)
{
- init_timer(&bfad->hal_tmo);
- bfad->hal_tmo.function = bfad_bfa_tmo;
- bfad->hal_tmo.data = (unsigned long)bfad;
+ timer_setup(&bfad->hal_tmo, bfad_bfa_tmo, 0);
mod_timer(&bfad->hal_tmo,
jiffies + msecs_to_jiffies(BFA_TIMER_FREQ));
void bfad_remove_intr(struct bfad_s *bfad);
void bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg);
bfa_status_t bfad_hal_mem_alloc(struct bfad_s *bfad);
-void bfad_bfa_tmo(unsigned long data);
+void bfad_bfa_tmo(struct timer_list *t);
void bfad_init_timer(struct bfad_s *bfad);
int bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad);
void bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad);
{
struct bnx2fc_rport *tgt = io_req->tgt;
int rc = SUCCESS;
+ unsigned int time_left;
io_req->wait_for_comp = 1;
bnx2fc_initiate_cleanup(io_req);
spin_unlock_bh(&tgt->tgt_lock);
- wait_for_completion(&io_req->tm_done);
-
+ /*
+ * Can't wait forever on cleanup response lest we let the SCSI error
+ * handler wait forever
+ */
+ time_left = wait_for_completion_timeout(&io_req->tm_done,
+ BNX2FC_FW_TIMEOUT);
io_req->wait_for_comp = 0;
+ if (!time_left)
+ BNX2FC_IO_DBG(io_req, "%s(): Wait for cleanup timed out.\n",
+ __func__);
+
/*
- * release the reference taken in eh_abort to allow the
- * target to re-login after flushing IOs
+ * Release reference held by SCSI command the cleanup completion
+ * hits the BNX2FC_CLEANUP case in bnx2fc_process_cq_compl() and
+ * thus the SCSI command is not returnedi by bnx2fc_scsi_done().
*/
kref_put(&io_req->refcount, bnx2fc_cmd_release);
spin_lock_bh(&tgt->tgt_lock);
return rc;
}
+
/**
* bnx2fc_eh_abort - eh_abort_handler api to abort an outstanding
* SCSI command
struct fc_lport *lport;
struct bnx2fc_rport *tgt;
int rc;
+ unsigned int time_left;
rc = fc_block_scsi_eh(sc_cmd);
if (rc)
if (cancel_delayed_work(&io_req->timeout_work))
kref_put(&io_req->refcount,
bnx2fc_cmd_release); /* drop timer hold */
+ /*
+ * We don't want to hold off the upper layer timer so simply
+ * cleanup the command and return that I/O was successfully
+ * aborted.
+ */
rc = bnx2fc_abts_cleanup(io_req);
/* This only occurs when an task abort was requested while ABTS
is in progress. Setting the IO_CLEANUP flag will skip the
was a result from the ABTS request rather than the CLEANUP
request */
set_bit(BNX2FC_FLAG_IO_CLEANUP, &io_req->req_flags);
- goto out;
+ goto done;
}
/* Cancel the current timer running on this io_req */
}
spin_unlock_bh(&tgt->tgt_lock);
- wait_for_completion(&io_req->tm_done);
+ /* Wait 2 * RA_TOV + 1 to be sure timeout function hasn't fired */
+ time_left = wait_for_completion_timeout(&io_req->tm_done,
+ (2 * rp->r_a_tov + 1) * HZ);
+ if (time_left)
+ BNX2FC_IO_DBG(io_req, "Timed out in eh_abort waiting for tm_done");
spin_lock_bh(&tgt->tgt_lock);
io_req->wait_for_comp = 0;
/* Let the scsi-ml try to recover this command */
printk(KERN_ERR PFX "abort failed, xid = 0x%x\n",
io_req->xid);
+ /*
+ * Cleanup firmware residuals before returning control back
+ * to SCSI ML.
+ */
rc = bnx2fc_abts_cleanup(io_req);
- goto out;
+ goto done;
} else {
/*
* We come here even when there was a race condition
done:
/* release the reference taken in eh_abort */
kref_put(&io_req->refcount, bnx2fc_cmd_release);
-out:
spin_unlock_bh(&tgt->tgt_lock);
return rc;
}
*/
#include "bnx2fc.h"
-static void bnx2fc_upld_timer(unsigned long data);
-static void bnx2fc_ofld_timer(unsigned long data);
+static void bnx2fc_upld_timer(struct timer_list *t);
+static void bnx2fc_ofld_timer(struct timer_list *t);
static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
struct fcoe_port *port,
struct fc_rport_priv *rdata);
struct bnx2fc_rport *tgt);
static void bnx2fc_free_conn_id(struct bnx2fc_hba *hba, u32 conn_id);
-static void bnx2fc_upld_timer(unsigned long data)
+static void bnx2fc_upld_timer(struct timer_list *t)
{
- struct bnx2fc_rport *tgt = (struct bnx2fc_rport *)data;
+ struct bnx2fc_rport *tgt = from_timer(tgt, t, upld_timer);
BNX2FC_TGT_DBG(tgt, "upld_timer - Upload compl not received!!\n");
/* fake upload completion */
wake_up_interruptible(&tgt->upld_wait);
}
-static void bnx2fc_ofld_timer(unsigned long data)
+static void bnx2fc_ofld_timer(struct timer_list *t)
{
- struct bnx2fc_rport *tgt = (struct bnx2fc_rport *)data;
+ struct bnx2fc_rport *tgt = from_timer(tgt, t, ofld_timer);
BNX2FC_TGT_DBG(tgt, "entered bnx2fc_ofld_timer\n");
/* NOTE: This function should never be called, as
static void bnx2fc_ofld_wait(struct bnx2fc_rport *tgt)
{
- setup_timer(&tgt->ofld_timer, bnx2fc_ofld_timer, (unsigned long)tgt);
+ timer_setup(&tgt->ofld_timer, bnx2fc_ofld_timer, 0);
mod_timer(&tgt->ofld_timer, jiffies + BNX2FC_FW_TIMEOUT);
wait_event_interruptible(tgt->ofld_wait,
static void bnx2fc_upld_wait(struct bnx2fc_rport *tgt)
{
- setup_timer(&tgt->upld_timer, bnx2fc_upld_timer, (unsigned long)tgt);
+ timer_setup(&tgt->upld_timer, bnx2fc_upld_timer, 0);
mod_timer(&tgt->upld_timer, jiffies + BNX2FC_FW_TIMEOUT);
wait_event_interruptible(tgt->upld_wait,
(test_bit(
cxgbi_sock_get(csk);
spin_lock_bh(&csk->lock);
if (rpl->status == CPL_ERR_CONN_EXIST &&
- csk->retry_timer.function != (TIMER_FUNC_TYPE)act_open_retry_timer) {
- csk->retry_timer.function = (TIMER_FUNC_TYPE)act_open_retry_timer;
+ csk->retry_timer.function != act_open_retry_timer) {
+ csk->retry_timer.function = act_open_retry_timer;
mod_timer(&csk->retry_timer, jiffies + HZ / 2);
} else
cxgbi_sock_fail_act_open(csk,
spin_lock_bh(&csk->lock);
if (status == CPL_ERR_CONN_EXIST &&
- csk->retry_timer.function != (TIMER_FUNC_TYPE)csk_act_open_retry_timer) {
- csk->retry_timer.function = (TIMER_FUNC_TYPE)csk_act_open_retry_timer;
+ csk->retry_timer.function != csk_act_open_retry_timer) {
+ csk->retry_timer.function = csk_act_open_retry_timer;
mod_timer(&csk->retry_timer, jiffies + HZ / 2);
} else
cxgbi_sock_fail_act_open(csk,
}
}
-static void esas2r_timer_callback(unsigned long context);
+static void esas2r_timer_callback(struct timer_list *t);
void esas2r_kickoff_timer(struct esas2r_adapter *a)
{
- init_timer(&a->timer);
+ timer_setup(&a->timer, esas2r_timer_callback, 0);
- a->timer.function = esas2r_timer_callback;
- a->timer.data = (unsigned long)a;
a->timer.expires = jiffies +
msecs_to_jiffies(100);
add_timer(&a->timer);
}
-static void esas2r_timer_callback(unsigned long context)
+static void esas2r_timer_callback(struct timer_list *t)
{
- struct esas2r_adapter *a = (struct esas2r_adapter *)context;
+ struct esas2r_adapter *a = from_timer(a, t, timer);
set_bit(AF2_TIMER_TICK, &a->flags2);
#define FCOE_CTLR_MIN_FKA 500 /* min keep alive (mS) */
#define FCOE_CTLR_DEF_FKA FIP_DEF_FKA /* default keep alive (mS) */
-static void fcoe_ctlr_timeout(unsigned long);
+static void fcoe_ctlr_timeout(struct timer_list *);
static void fcoe_ctlr_timer_work(struct work_struct *);
static void fcoe_ctlr_recv_work(struct work_struct *);
static int fcoe_ctlr_flogi_retry(struct fcoe_ctlr *);
mutex_init(&fip->ctlr_mutex);
spin_lock_init(&fip->ctlr_lock);
fip->flogi_oxid = FC_XID_UNKNOWN;
- setup_timer(&fip->timer, fcoe_ctlr_timeout, (unsigned long)fip);
+ timer_setup(&fip->timer, fcoe_ctlr_timeout, 0);
INIT_WORK(&fip->timer_work, fcoe_ctlr_timer_work);
INIT_WORK(&fip->recv_work, fcoe_ctlr_recv_work);
skb_queue_head_init(&fip->fip_recv_list);
* fcoe_ctlr_timeout() - FIP timeout handler
* @arg: The FCoE controller that timed out
*/
-static void fcoe_ctlr_timeout(unsigned long arg)
+static void fcoe_ctlr_timeout(struct timer_list *t)
{
- struct fcoe_ctlr *fip = (struct fcoe_ctlr *)arg;
+ struct fcoe_ctlr *fip = from_timer(fip, t, timer);
schedule_work(&fip->timer_work);
}
return err;
}
-static void fnic_notify_timer(unsigned long data)
+static void fnic_notify_timer(struct timer_list *t)
{
- struct fnic *fnic = (struct fnic *)data;
+ struct fnic *fnic = from_timer(fnic, t, notify_timer);
fnic_handle_link_event(fnic);
mod_timer(&fnic->notify_timer,
round_jiffies(jiffies + FNIC_NOTIFY_TIMER_PERIOD));
}
-static void fnic_fip_notify_timer(unsigned long data)
+static void fnic_fip_notify_timer(struct timer_list *t)
{
- struct fnic *fnic = (struct fnic *)data;
+ struct fnic *fnic = from_timer(fnic, t, fip_timer);
fnic_handle_fip_timer(fnic);
}
vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr);
fnic->set_vlan = fnic_set_vlan;
fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_AUTO);
- setup_timer(&fnic->fip_timer, fnic_fip_notify_timer,
- (unsigned long)fnic);
+ timer_setup(&fnic->fip_timer, fnic_fip_notify_timer, 0);
spin_lock_init(&fnic->vlans_lock);
INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame);
INIT_WORK(&fnic->event_work, fnic_handle_event);
/* Setup notify timer when using MSI interrupts */
if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI)
- setup_timer(&fnic->notify_timer,
- fnic_notify_timer, (unsigned long)fnic);
+ timer_setup(&fnic->notify_timer, fnic_notify_timer, 0);
/* allocate RQ buffers and post them to RQ*/
for (i = 0; i < fnic->rq_count; i++) {
}
task->task_done = hisi_sas_task_done;
- task->slow_task->timer.function = (TIMER_FUNC_TYPE)hisi_sas_tmf_timedout;
+ task->slow_task->timer.function = hisi_sas_tmf_timedout;
task->slow_task->timer.expires = jiffies + TASK_TIMEOUT*HZ;
add_timer(&task->slow_task->timer);
task->dev = device;
task->task_proto = device->tproto;
task->task_done = hisi_sas_task_done;
- task->slow_task->timer.function = (TIMER_FUNC_TYPE)hisi_sas_tmf_timedout;
+ task->slow_task->timer.function = hisi_sas_tmf_timedout;
task->slow_task->timer.expires = jiffies + msecs_to_jiffies(110);
add_timer(&task->slow_task->timer);
}
}
- hisi_hba->timer.function = (TIMER_FUNC_TYPE)link_timeout_disable_link;
+ hisi_hba->timer.function = link_timeout_disable_link;
mod_timer(&hisi_hba->timer, jiffies + msecs_to_jiffies(900));
}
}
}
- hisi_hba->timer.function = (TIMER_FUNC_TYPE)link_timeout_enable_link;
+ hisi_hba->timer.function = link_timeout_enable_link;
mod_timer(&hisi_hba->timer, jiffies + msecs_to_jiffies(100));
}
static void set_link_timer_quirk(struct hisi_hba *hisi_hba)
{
- hisi_hba->timer.function = (TIMER_FUNC_TYPE)link_timeout_disable_link;
+ hisi_hba->timer.function = link_timeout_disable_link;
hisi_hba->timer.expires = jiffies + msecs_to_jiffies(1000);
add_timer(&hisi_hba->timer);
}
ipr_cmd->done = done;
ipr_cmd->timer.expires = jiffies + timeout;
- ipr_cmd->timer.function = (TIMER_FUNC_TYPE)timeout_func;
+ ipr_cmd->timer.function = timeout_func;
add_timer(&ipr_cmd->timer);
ipr_cmd->done = ipr_reset_ioa_job;
ipr_cmd->timer.expires = jiffies + timeout;
- ipr_cmd->timer.function = (TIMER_FUNC_TYPE)ipr_reset_timer_done;
+ ipr_cmd->timer.function = ipr_reset_timer_done;
add_timer(&ipr_cmd->timer);
}
}
ipr_cmd->timer.expires = jiffies + stage_time * HZ;
- ipr_cmd->timer.function = (TIMER_FUNC_TYPE)ipr_oper_timeout;
+ ipr_cmd->timer.function = ipr_oper_timeout;
ipr_cmd->done = ipr_reset_ioa_job;
add_timer(&ipr_cmd->timer);
}
ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
- ipr_cmd->timer.function = (TIMER_FUNC_TYPE)ipr_oper_timeout;
+ ipr_cmd->timer.function = ipr_oper_timeout;
ipr_cmd->done = ipr_reset_ioa_job;
add_timer(&ipr_cmd->timer);
list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q);
fsp->seq_ptr = seq;
fc_fcp_pkt_hold(fsp); /* hold for fc_fcp_pkt_destroy */
- fsp->timer.function = (TIMER_FUNC_TYPE)fc_fcp_timeout;
+ fsp->timer.function = fc_fcp_timeout;
if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED)
fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp));
return;
if (fc_fcp_lock_pkt(fsp))
return;
- fsp->timer.function = (TIMER_FUNC_TYPE)fc_lun_reset_send;
+ fsp->timer.function = fc_lun_reset_send;
fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp));
fc_fcp_unlock_pkt(fsp);
}
if (fsp->lp->qfull) {
FC_FCP_DBG(fsp, "fcp timeout, resetting timer delay %d\n",
fsp->timer_delay);
- fsp->timer.function = (TIMER_FUNC_TYPE)fc_fcp_timeout;
+ fsp->timer.function = fc_fcp_timeout;
fc_fcp_timer_set(fsp, fsp->timer_delay);
goto unlock;
}
task->task_done = smp_task_done;
- task->slow_task->timer.function = (TIMER_FUNC_TYPE)smp_task_timedout;
+ task->slow_task->timer.function = smp_task_timedout;
task->slow_task->timer.expires = jiffies + SMP_TIMEOUT*HZ;
add_timer(&task->slow_task->timer);
return;
if (!del_timer(&slow->timer))
return;
- slow->timer.function((TIMER_DATA_TYPE)&slow->timer);
+ slow->timer.function(&slow->timer);
return;
}
memcpy(&task->ssp_task, parameter, para_len);
task->task_done = mvs_task_done;
- task->slow_task->timer.function = (TIMER_FUNC_TYPE)mvs_tmf_timedout;
+ task->slow_task->timer.function = mvs_tmf_timedout;
task->slow_task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ;
add_timer(&task->slow_task->timer);
MVS_CHIP_DISP->write_port_irq_mask(mvi, phy_no,
tmp | PHYEV_SIG_FIS);
if (phy->timer.function == NULL) {
- phy->timer.function = (TIMER_FUNC_TYPE)mvs_sig_time_out;
+ phy->timer.function = mvs_sig_time_out;
phy->timer.expires = jiffies + 5*HZ;
add_timer(&phy->timer);
}
return IRQ_HANDLED;
}
-static void ncr53c8xx_timeout(unsigned long npref)
+static void ncr53c8xx_timeout(struct timer_list *t)
{
- struct ncb *np = (struct ncb *) npref;
+ struct ncb *np = from_timer(np, t, timer);
unsigned long flags;
struct scsi_cmnd *done_list;
if (!np->scripth0)
goto attach_error;
- init_timer(&np->timer);
- np->timer.data = (unsigned long) np;
- np->timer.function = ncr53c8xx_timeout;
+ timer_setup(&np->timer, ncr53c8xx_timeout, 0);
/* Try to map the controller chip to virtual and physical memory. */
task->task_proto = dev->tproto;
memcpy(&task->ssp_task, parameter, para_len);
task->task_done = pm8001_task_done;
- task->slow_task->timer.function = (TIMER_FUNC_TYPE)pm8001_tmf_timedout;
+ task->slow_task->timer.function = pm8001_tmf_timedout;
task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT*HZ;
add_timer(&task->slow_task->timer);
task->dev = dev;
task->task_proto = dev->tproto;
task->task_done = pm8001_task_done;
- task->slow_task->timer.function = (TIMER_FUNC_TYPE)pm8001_tmf_timedout;
+ task->slow_task->timer.function = pm8001_tmf_timedout;
task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT * HZ;
add_timer(&task->slow_task->timer);
cmd->time_left = msecs_to_jiffies(PMCRAID_BIST_TIMEOUT);
cmd->timer.expires = jiffies + msecs_to_jiffies(PMCRAID_BIST_TIMEOUT);
- cmd->timer.function = (TIMER_FUNC_TYPE)pmcraid_bist_done;
+ cmd->timer.function = pmcraid_bist_done;
add_timer(&cmd->timer);
}
/* restart timer if some more time is available to wait */
cmd->time_left -= PMCRAID_CHECK_FOR_RESET_TIMEOUT;
cmd->timer.expires = jiffies + PMCRAID_CHECK_FOR_RESET_TIMEOUT;
- cmd->timer.function = (TIMER_FUNC_TYPE)pmcraid_reset_alert_done;
+ cmd->timer.function = pmcraid_reset_alert_done;
add_timer(&cmd->timer);
}
}
*/
cmd->time_left = PMCRAID_RESET_TIMEOUT;
cmd->timer.expires = jiffies + PMCRAID_CHECK_FOR_RESET_TIMEOUT;
- cmd->timer.function = (TIMER_FUNC_TYPE)pmcraid_reset_alert_done;
+ cmd->timer.function = pmcraid_reset_alert_done;
add_timer(&cmd->timer);
iowrite32(DOORBELL_IOA_RESET_ALERT,
if (timeout_func) {
/* setup timeout handler */
cmd->timer.expires = jiffies + timeout;
- cmd->timer.function = (TIMER_FUNC_TYPE)timeout_func;
+ cmd->timer.function = timeout_func;
add_timer(&cmd->timer);
}
cmd->cmd_done = pmcraid_ioa_reset;
cmd->timer.expires = jiffies +
msecs_to_jiffies(PMCRAID_TRANSOP_TIMEOUT);
- cmd->timer.function = (TIMER_FUNC_TYPE)pmcraid_timeout_handler;
+ cmd->timer.function = pmcraid_timeout_handler;
if (!timer_pending(&cmd->timer))
add_timer(&cmd->timer);
struct list_head dev_info_list;
char vendor[8];
char model[16];
- unsigned flags;
+ blist_flags_t flags;
unsigned compatible; /* for use with scsi_static_device_list entries */
};
static const char spaces[] = " "; /* 16 of them */
-static unsigned scsi_default_dev_flags;
+static blist_flags_t scsi_default_dev_flags;
static LIST_HEAD(scsi_dev_info_list);
static char scsi_dev_flags[256];
char *vendor;
char *model;
char *revision; /* revision known to be bad, unused */
- unsigned flags;
+ blist_flags_t flags;
} scsi_static_device_list[] __initdata = {
/*
* The following devices are known not to tolerate a lun != 0 scan
* Returns: 0 OK, -error on failure.
**/
static int scsi_dev_info_list_add(int compatible, char *vendor, char *model,
- char *strflags, int flags)
+ char *strflags, blist_flags_t flags)
{
return scsi_dev_info_list_add_keyed(compatible, vendor, model,
strflags, flags,
* Returns: 0 OK, -error on failure.
**/
int scsi_dev_info_list_add_keyed(int compatible, char *vendor, char *model,
- char *strflags, int flags, int key)
+ char *strflags, blist_flags_t flags, int key)
{
struct scsi_dev_info_list *devinfo;
struct scsi_dev_info_list_table *devinfo_table =
* matching flags value, else return the host or global default
* settings. Called during scan time.
**/
-int scsi_get_device_flags(struct scsi_device *sdev,
- const unsigned char *vendor,
- const unsigned char *model)
+blist_flags_t scsi_get_device_flags(struct scsi_device *sdev,
+ const unsigned char *vendor,
+ const unsigned char *model)
{
return scsi_get_device_flags_keyed(sdev, vendor, model,
SCSI_DEVINFO_GLOBAL);
* flags value, else return the host or global default settings.
* Called during scan time.
**/
-int scsi_get_device_flags_keyed(struct scsi_device *sdev,
+blist_flags_t scsi_get_device_flags_keyed(struct scsi_device *sdev,
const unsigned char *vendor,
const unsigned char *model,
int key)
SCSI_DEVINFO_SPI,
};
-extern int scsi_get_device_flags(struct scsi_device *sdev,
- const unsigned char *vendor,
- const unsigned char *model);
-extern int scsi_get_device_flags_keyed(struct scsi_device *sdev,
- const unsigned char *vendor,
- const unsigned char *model, int key);
+extern blist_flags_t scsi_get_device_flags(struct scsi_device *sdev,
+ const unsigned char *vendor,
+ const unsigned char *model);
+extern blist_flags_t scsi_get_device_flags_keyed(struct scsi_device *sdev,
+ const unsigned char *vendor,
+ const unsigned char *model,
+ int key);
extern int scsi_dev_info_list_add_keyed(int compatible, char *vendor,
char *model, char *strflags,
- int flags, int key);
+ blist_flags_t flags, int key);
extern int scsi_dev_info_list_del_keyed(char *vendor, char *model, int key);
extern int scsi_dev_info_add_list(int key, const char *name);
extern int scsi_dev_info_remove_list(int key);
* are copied to the scsi_device any flags value is stored in *@bflags.
**/
static int scsi_probe_lun(struct scsi_device *sdev, unsigned char *inq_result,
- int result_len, int *bflags)
+ int result_len, blist_flags_t *bflags)
{
unsigned char scsi_cmd[MAX_COMMAND_SIZE];
int first_inquiry_len, try_inquiry_len, next_inquiry_len;
/*
* Linux entry point of the timer handler
*/
-static void sym53c8xx_timer(unsigned long npref)
+static void sym53c8xx_timer(struct timer_list *t)
{
- struct sym_hcb *np = (struct sym_hcb *)npref;
+ struct sym_hcb *np = from_timer(np, t, s.timer);
unsigned long flags;
spin_lock_irqsave(np->s.host->host_lock, flags);
/*
* Start the timer daemon
*/
- init_timer(&np->s.timer);
- np->s.timer.data = (unsigned long) np;
- np->s.timer.function = sym53c8xx_timer;
+ timer_setup(&np->s.timer, sym53c8xx_timer, 0);
np->s.lasttime=0;
sym_timer (np);
gb_operation_put(operation);
}
-static void gb_operation_timeout(unsigned long arg)
+static void gb_operation_timeout(struct timer_list *t)
{
- struct gb_operation *operation = (void *)arg;
+ struct gb_operation *operation = from_timer(operation, t, timer);
if (gb_operation_result_set(operation, -ETIMEDOUT)) {
/*
goto err_request;
}
- setup_timer(&operation->timer, gb_operation_timeout,
- (unsigned long)operation);
+ timer_setup(&operation->timer, gb_operation_timeout, 0);
}
operation->flags = op_flags;
static inline void irda_start_timer(struct timer_list *ptimer, int timeout,
void (*callback)(struct timer_list *))
{
- ptimer->function = (TIMER_FUNC_TYPE) callback;
+ ptimer->function = callback;
/* Set new value for timer (update or add timer).
* We use mod_timer() because it's more efficient and also
}
static void
-delay_timer_cb(unsigned long arg)
+delay_timer_cb(struct timer_list *t)
{
- struct lnet_delay_rule *rule = (struct lnet_delay_rule *)arg;
+ struct lnet_delay_rule *rule = from_timer(rule, t, dl_timer);
spin_lock_bh(&delay_dd.dd_lock);
if (list_empty(&rule->dl_sched_link) && delay_dd.dd_running) {
wait_event(delay_dd.dd_ctl_waitq, delay_dd.dd_running);
}
- setup_timer(&rule->dl_timer, delay_timer_cb, (unsigned long)rule);
+ timer_setup(&rule->dl_timer, delay_timer_cb, 0);
spin_lock_init(&rule->dl_lock);
INIT_LIST_HEAD(&rule->dl_msg_list);
if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))
return true;
- if ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode))
+ if ((inode->i_sb->s_flags & SB_NODIRATIME) && S_ISDIR(inode->i_mode))
return true;
return false;
}
if (data->ocd_connect_flags & OBD_CONNECT_ACL) {
- sb->s_flags |= MS_POSIXACL;
+ sb->s_flags |= SB_POSIXACL;
sbi->ll_flags |= LL_SBI_ACL;
} else {
LCONSOLE_INFO("client wants to enable acl, but mdt not!\n");
- sb->s_flags &= ~MS_POSIXACL;
+ sb->s_flags &= ~SB_POSIXACL;
sbi->ll_flags &= ~LL_SBI_ACL;
}
struct ll_sb_info *sbi;
/* not init sb ?*/
- if (!(sb->s_flags & MS_ACTIVE))
+ if (!(sb->s_flags & SB_ACTIVE))
return;
sbi = ll_s2sbi(sb);
int err;
__u32 read_only;
- if ((bool)(*flags & MS_RDONLY) != sb_rdonly(sb)) {
- read_only = *flags & MS_RDONLY;
+ if ((bool)(*flags & SB_RDONLY) != sb_rdonly(sb)) {
+ read_only = *flags & SB_RDONLY;
err = obd_set_info_async(NULL, sbi->ll_md_exp,
sizeof(KEY_READ_ONLY),
KEY_READ_ONLY, sizeof(read_only),
}
if (read_only)
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
else
- sb->s_flags &= ~MS_RDONLY;
+ sb->s_flags &= ~SB_RDONLY;
if (sbi->ll_flags & LL_SBI_VERBOSE)
LCONSOLE_WARN("Remounted %s %s\n", profilenm,
return -1;
}
-static void ptlrpc_at_timer(unsigned long castmeharder)
+static void ptlrpc_at_timer(struct timer_list *t)
{
struct ptlrpc_service_part *svcpt;
- svcpt = (struct ptlrpc_service_part *)castmeharder;
+ svcpt = from_timer(svcpt, t, scp_at_timer);
svcpt->scp_at_check = 1;
svcpt->scp_at_checktime = cfs_time_current();
if (!array->paa_reqs_count)
goto free_reqs_array;
- setup_timer(&svcpt->scp_at_timer, ptlrpc_at_timer,
- (unsigned long)svcpt);
+ timer_setup(&svcpt->scp_at_timer, ptlrpc_at_timer, 0);
/* At SOW, service time should be quick; 10s seems generous. If client
* timeout is less than this, we'll be sending an early reply.
next = (__s32)(array->paa_deadline - ktime_get_real_seconds() -
at_early_margin);
if (next <= 0) {
- ptlrpc_at_timer((unsigned long)svcpt);
+ ptlrpc_at_timer(&svcpt->scp_at_timer);
} else {
mod_timer(&svcpt->scp_at_timer, cfs_time_shift(next));
CDEBUG(D_INFO, "armed %s at %+ds\n",
* EOF timeout timer function. This is an unrecoverable condition
* without a stream restart.
*/
-static void prp_eof_timeout(unsigned long data)
+static void prp_eof_timeout(struct timer_list *t)
{
- struct prp_priv *priv = (struct prp_priv *)data;
+ struct prp_priv *priv = from_timer(priv, t, eof_timeout_timer);
struct imx_media_video_dev *vdev = priv->vdev;
struct imx_ic_priv *ic_priv = priv->ic_priv;
priv->ic_priv = ic_priv;
spin_lock_init(&priv->irqlock);
- setup_timer(&priv->eof_timeout_timer, prp_eof_timeout,
- (unsigned long)priv);
+ timer_setup(&priv->eof_timeout_timer, prp_eof_timeout, 0);
priv->vdev = imx_media_capture_device_init(&ic_priv->sd,
PRPENCVF_SRC_PAD);
* EOF timeout timer function. This is an unrecoverable condition
* without a stream restart.
*/
-static void csi_idmac_eof_timeout(unsigned long data)
+static void csi_idmac_eof_timeout(struct timer_list *t)
{
- struct csi_priv *priv = (struct csi_priv *)data;
+ struct csi_priv *priv = from_timer(priv, t, eof_timeout_timer);
struct imx_media_video_dev *vdev = priv->vdev;
v4l2_err(&priv->sd, "EOF timeout\n");
priv->csi_id = pdata->csi;
priv->smfc_id = (priv->csi_id == 0) ? 0 : 2;
- setup_timer(&priv->eof_timeout_timer, csi_idmac_eof_timeout,
- (unsigned long)priv);
+ timer_setup(&priv->eof_timeout_timer, csi_idmac_eof_timeout, 0);
spin_lock_init(&priv->irqlock);
v4l2_subdev_init(&priv->sd, &csi_subdev_ops);
* The handler runs in interrupt context. That's why we need to defer the
* tasks to a work queue.
*/
-static void link_stat_timer_handler(unsigned long data)
+static void link_stat_timer_handler(struct timer_list *t)
{
- struct most_dev *mdev = (struct most_dev *)data;
+ struct most_dev *mdev = from_timer(mdev, t, link_stat_timer);
schedule_work(&mdev->poll_work_obj);
mdev->link_stat_timer.expires = jiffies + (2 * HZ);
num_endpoints = usb_iface_desc->desc.bNumEndpoints;
mutex_init(&mdev->io_mutex);
INIT_WORK(&mdev->poll_work_obj, wq_netinfo);
- setup_timer(&mdev->link_stat_timer, link_stat_timer_handler,
- (unsigned long)mdev);
+ timer_setup(&mdev->link_stat_timer, link_stat_timer_handler, 0);
mdev->usb_device = usb_dev;
mdev->link_stat_timer.expires = jiffies + (2 * HZ);
}
-static void ieee80211_send_beacon_cb(unsigned long _ieee)
+static void ieee80211_send_beacon_cb(struct timer_list *t)
{
struct ieee80211_device *ieee =
- (struct ieee80211_device *) _ieee;
+ from_timer(ieee, t, beacon_timer);
unsigned long flags;
spin_lock_irqsave(&ieee->beacon_lock, flags);
spin_unlock_irqrestore(&ieee->lock, flags);
}
-static void ieee80211_associate_abort_cb(unsigned long dev)
+static void ieee80211_associate_abort_cb(struct timer_list *t)
{
- ieee80211_associate_abort((struct ieee80211_device *) dev);
+ struct ieee80211_device *dev = from_timer(dev, t, associate_timer);
+
+ ieee80211_associate_abort(dev);
}
ieee->enable_rx_imm_BA = true;
ieee->tx_pending.txb = NULL;
- setup_timer(&ieee->associate_timer, ieee80211_associate_abort_cb,
- (unsigned long)ieee);
+ timer_setup(&ieee->associate_timer, ieee80211_associate_abort_cb, 0);
- setup_timer(&ieee->beacon_timer, ieee80211_send_beacon_cb,
- (unsigned long)ieee);
+ timer_setup(&ieee->beacon_timer, ieee80211_send_beacon_cb, 0);
INIT_DELAYED_WORK(&ieee->start_ibss_wq, ieee80211_start_ibss_wq);
precvpriv->rx_drop++;
}
-static void _r8712_reordering_ctrl_timeout_handler (unsigned long data)
+static void _r8712_reordering_ctrl_timeout_handler (struct timer_list *t)
{
struct recv_reorder_ctrl *preorder_ctrl =
- (struct recv_reorder_ctrl *)data;
+ from_timer(preorder_ctrl, t, reordering_ctrl_timer);
r8712_reordering_ctrl_timeout_handler(preorder_ctrl);
}
void r8712_init_recv_timer(struct recv_reorder_ctrl *preorder_ctrl)
{
- setup_timer(&preorder_ctrl->reordering_ctrl_timer,
- _r8712_reordering_ctrl_timeout_handler,
- (unsigned long)preorder_ctrl);
+ timer_setup(&preorder_ctrl->reordering_ctrl_timer,
+ _r8712_reordering_ctrl_timeout_handler, 0);
}
* Prototype of protected function.
*===========================================================================
*/
-static void BlinkTimerCallback(unsigned long data);
+static void BlinkTimerCallback(struct timer_list *t);
static void BlinkWorkItemCallback(struct work_struct *work);
/*===========================================================================
pLed->bLedBlinkInProgress = false;
pLed->BlinkTimes = 0;
pLed->BlinkingLedState = LED_UNKNOWN;
- setup_timer(&pLed->BlinkTimer, BlinkTimerCallback,
- (unsigned long)pLed);
+ timer_setup(&pLed->BlinkTimer, BlinkTimerCallback, 0);
INIT_WORK(&pLed->BlinkWorkItem, BlinkWorkItemCallback);
}
* Callback function of LED BlinkTimer,
* it just schedules to corresponding BlinkWorkItem.
*/
-static void BlinkTimerCallback(unsigned long data)
+static void BlinkTimerCallback(struct timer_list *t)
{
- struct LED_871x *pLed = (struct LED_871x *)data;
+ struct LED_871x *pLed = from_timer(pLed, t, BlinkTimer);
/* This fixed the crash problem on Fedora 12 when trying to do the
* insmod;ifconfig up;rmmod commands.
static const int NUM_CTL_LABELS = (MSG_CTL_END - MSG_CTL_START + 1);
static void read_all_doc(struct vc_data *vc);
-static void cursor_done(u_long data);
+static void cursor_done(struct timer_list *unused);
static DEFINE_TIMER(cursor_timer, cursor_done);
static void do_handle_shift(struct vc_data *vc, u_char value, char up_flag)
return 0;
}
-static void cursor_done(u_long data)
+static void cursor_done(struct timer_list *unused)
{
struct vc_data *vc = vc_cons[cursor_con].d;
unsigned long flags;
}
EXPORT_SYMBOL_GPL(spk_synth_is_alive_restart);
-static void thread_wake_up(u_long data)
+static void thread_wake_up(struct timer_list *unused)
{
wake_up_interruptible_all(&speakup_event);
}
.release = single_release,
};
-static void dev_periodic_work(unsigned long __opaque)
+static void dev_periodic_work(struct timer_list *t)
{
- struct visor_device *dev = (struct visor_device *)__opaque;
+ struct visor_device *dev = from_timer(dev, t, timer);
struct visor_driver *drv = to_visor_driver(dev->device.driver);
drv->channel_interrupt(dev);
dev->device.release = visorbus_release_device;
/* keep a reference just for us (now 2) */
get_device(&dev->device);
- setup_timer(&dev->timer, dev_periodic_work, (unsigned long)dev);
+ timer_setup(&dev->timer, dev_periodic_work, 0);
/*
* bus_id must be a unique name with respect to this bus TYPE (NOT bus
* instance). That's why we need to include the bus number within the
* Main function of the vnic_incoming thread. Periodically check the response
* queue and drain it if needed.
*/
-static void poll_for_irq(unsigned long v)
+static void poll_for_irq(struct timer_list *t)
{
- struct visornic_devdata *devdata = (struct visornic_devdata *)v;
+ struct visornic_devdata *devdata = from_timer(devdata, t,
+ irq_poll_timer);
if (!visorchannel_signalempty(
devdata->dev->visorchannel,
/* Let's start our threads to get responses */
netif_napi_add(netdev, &devdata->napi, visornic_poll, NAPI_WEIGHT);
- setup_timer(&devdata->irq_poll_timer, poll_for_irq,
- (unsigned long)devdata);
+ timer_setup(&devdata->irq_poll_timer, poll_for_irq, 0);
/* Note: This time has to start running before the while
* loop below because the napi routine is responsible for
* setting enab_dis_acked
last_scanned_shadow[i].time_scan = jiffies;
}
-static void remove_network_from_shadow(unsigned long unused)
+static void remove_network_from_shadow(struct timer_list *unused)
{
unsigned long now = jiffies;
int i, j;
}
}
-static void clear_duringIP(unsigned long arg)
+static void clear_duringIP(struct timer_list *unused)
{
wilc_optaining_ip = false;
}
priv = wdev_priv(net->ieee80211_ptr);
if (op_ifcs == 0) {
- setup_timer(&hAgingTimer, remove_network_from_shadow, 0);
- setup_timer(&wilc_during_ip_timer, clear_duringIP, 0);
+ timer_setup(&hAgingTimer, remove_network_from_shadow, 0);
+ timer_setup(&wilc_during_ip_timer, clear_duringIP, 0);
}
op_ifcs++;
CSK_LOGIN_PDU_DONE,
CSK_LOGIN_DONE,
CSK_DDP_ENABLE,
+ CSK_ABORT_RPL_WAIT,
};
struct cxgbit_sock_common {
int cxgbit_setup_conn_digest(struct cxgbit_sock *);
int cxgbit_accept_np(struct iscsi_np *, struct iscsi_conn *);
void cxgbit_free_np(struct iscsi_np *);
+void cxgbit_abort_conn(struct cxgbit_sock *csk);
void cxgbit_free_conn(struct iscsi_conn *);
extern cxgbit_cplhandler_func cxgbit_cplhandlers[NUM_CPL_CMDS];
int cxgbit_get_login_rx(struct iscsi_conn *, struct iscsi_login *);
return cxgbit_l2t_send(csk->com.cdev, skb, csk->l2t);
}
+static void
+__cxgbit_abort_conn(struct cxgbit_sock *csk, struct sk_buff *skb)
+{
+ __kfree_skb(skb);
+
+ if (csk->com.state != CSK_STATE_ESTABLISHED)
+ goto no_abort;
+
+ set_bit(CSK_ABORT_RPL_WAIT, &csk->com.flags);
+ csk->com.state = CSK_STATE_ABORTING;
+
+ cxgbit_send_abort_req(csk);
+
+ return;
+
+no_abort:
+ cxgbit_wake_up(&csk->com.wr_wait, __func__, CPL_ERR_NONE);
+ cxgbit_put_csk(csk);
+}
+
+void cxgbit_abort_conn(struct cxgbit_sock *csk)
+{
+ struct sk_buff *skb = alloc_skb(0, GFP_KERNEL | __GFP_NOFAIL);
+
+ cxgbit_get_csk(csk);
+ cxgbit_init_wr_wait(&csk->com.wr_wait);
+
+ spin_lock_bh(&csk->lock);
+ if (csk->lock_owner) {
+ cxgbit_skcb_rx_backlog_fn(skb) = __cxgbit_abort_conn;
+ __skb_queue_tail(&csk->backlogq, skb);
+ } else {
+ __cxgbit_abort_conn(csk, skb);
+ }
+ spin_unlock_bh(&csk->lock);
+
+ cxgbit_wait_for_reply(csk->com.cdev, &csk->com.wr_wait,
+ csk->tid, 600, __func__);
+}
+
void cxgbit_free_conn(struct iscsi_conn *conn)
{
struct cxgbit_sock *csk = conn->context;
static void cxgbit_abort_rpl_rss(struct cxgbit_sock *csk, struct sk_buff *skb)
{
+ struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
+
pr_debug("%s: csk %p; tid %u; state %d\n",
__func__, csk, csk->tid, csk->com.state);
switch (csk->com.state) {
case CSK_STATE_ABORTING:
csk->com.state = CSK_STATE_DEAD;
+ if (test_bit(CSK_ABORT_RPL_WAIT, &csk->com.flags))
+ cxgbit_wake_up(&csk->com.wr_wait, __func__,
+ rpl->status);
cxgbit_put_csk(csk);
break;
default:
struct cxgbit_device *cdev = csk->com.cdev;
struct cxgbi_ppm *ppm = cdev2ppm(cdev);
+ /* Abort the TCP conn if DDP is not complete to
+ * avoid any possibility of DDP after freeing
+ * the cmd.
+ */
+ if (unlikely(cmd->write_data_done !=
+ cmd->se_cmd.data_length))
+ cxgbit_abort_conn(csk);
+
cxgbi_ppm_ppod_release(ppm, ttinfo->idx);
dma_unmap_sg(&ppm->pdev->dev, ttinfo->sgl,
case CPL_RX_ISCSI_DDP:
case CPL_FW4_ACK:
lro_flush = false;
+ /* fall through */
case CPL_ABORT_RPL_RSS:
case CPL_PASS_ESTABLISH:
case CPL_PEER_CLOSE:
EXPORT_SYMBOL(iscsit_aborted_task);
static void iscsit_do_crypto_hash_buf(struct ahash_request *, const void *,
- u32, u32, u8 *, u8 *);
+ u32, u32, const void *, void *);
static void iscsit_tx_thread_wait_for_tcp(struct iscsi_conn *);
static int
iscsit_do_crypto_hash_buf(conn->conn_tx_hash, hdr,
ISCSI_HDR_LEN, 0, NULL,
- (u8 *)header_digest);
+ header_digest);
iov[0].iov_len += ISCSI_CRC_LEN;
tx_size += ISCSI_CRC_LEN;
if (conn->conn_ops->DataDigest) {
iscsit_do_crypto_hash_buf(conn->conn_tx_hash,
data_buf, data_buf_len,
- padding,
- (u8 *)&cmd->pad_bytes,
- (u8 *)&cmd->data_crc);
+ padding, &cmd->pad_bytes,
+ &cmd->data_crc);
iov[niov].iov_base = &cmd->data_crc;
iov[niov++].iov_len = ISCSI_CRC_LEN;
iscsit_do_crypto_hash_buf(conn->conn_tx_hash, cmd->pdu,
ISCSI_HDR_LEN, 0, NULL,
- (u8 *)header_digest);
+ header_digest);
iov[0].iov_len += ISCSI_CRC_LEN;
tx_size += ISCSI_CRC_LEN;
unsigned char *buf)
{
struct iscsi_conn *conn;
+ const bool do_put = cmd->se_cmd.se_tfo != NULL;
if (!cmd->conn) {
pr_err("cmd->conn is NULL for ITT: 0x%08x\n",
* Perform the kref_put now if se_cmd has already been setup by
* scsit_setup_scsi_cmd()
*/
- if (cmd->se_cmd.se_tfo != NULL) {
+ if (do_put) {
pr_debug("iscsi reject: calling target_put_sess_cmd >>>>>>\n");
target_put_sess_cmd(&cmd->se_cmd);
}
return data_crc;
}
-static void iscsit_do_crypto_hash_buf(
- struct ahash_request *hash,
- const void *buf,
- u32 payload_length,
- u32 padding,
- u8 *pad_bytes,
- u8 *data_crc)
+static void iscsit_do_crypto_hash_buf(struct ahash_request *hash,
+ const void *buf, u32 payload_length, u32 padding,
+ const void *pad_bytes, void *data_crc)
{
struct scatterlist sg[2];
iscsit_mod_dataout_timer(cmd);
if ((be32_to_cpu(hdr->offset) + payload_length) > cmd->se_cmd.data_length) {
- pr_err("DataOut Offset: %u, Length %u greater than"
- " iSCSI Command EDTL %u, protocol error.\n",
- hdr->offset, payload_length, cmd->se_cmd.data_length);
+ pr_err("DataOut Offset: %u, Length %u greater than iSCSI Command EDTL %u, protocol error.\n",
+ be32_to_cpu(hdr->offset), payload_length,
+ cmd->se_cmd.data_length);
return iscsit_reject_cmd(cmd, ISCSI_REASON_BOOKMARK_INVALID, buf);
}
}
if (conn->conn_ops->DataDigest) {
- iscsit_do_crypto_hash_buf(conn->conn_rx_hash,
- ping_data, payload_length,
- padding, cmd->pad_bytes,
- (u8 *)&data_crc);
+ iscsit_do_crypto_hash_buf(conn->conn_rx_hash, ping_data,
+ payload_length, padding,
+ cmd->pad_bytes, &data_crc);
if (checksum != data_crc) {
pr_err("Ping data CRC32C DataDigest"
struct iscsi_tmr_req *tmr_req;
struct iscsi_tm *hdr;
int out_of_order_cmdsn = 0, ret;
- bool sess_ref = false;
u8 function, tcm_function = TMR_UNKNOWN;
hdr = (struct iscsi_tm *) buf;
cmd->data_direction = DMA_NONE;
cmd->tmr_req = kzalloc(sizeof(*cmd->tmr_req), GFP_KERNEL);
- if (!cmd->tmr_req)
+ if (!cmd->tmr_req) {
return iscsit_add_reject_cmd(cmd,
ISCSI_REASON_BOOKMARK_NO_RESOURCES,
buf);
+ }
+
+ transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops,
+ conn->sess->se_sess, 0, DMA_NONE,
+ TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
+
+ target_get_sess_cmd(&cmd->se_cmd, true);
/*
* TASK_REASSIGN for ERL=2 / connection stays inside of
* LIO-Target $FABRIC_MOD
*/
if (function != ISCSI_TM_FUNC_TASK_REASSIGN) {
- transport_init_se_cmd(&cmd->se_cmd, &iscsi_ops,
- conn->sess->se_sess, 0, DMA_NONE,
- TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
-
- target_get_sess_cmd(&cmd->se_cmd, true);
- sess_ref = true;
tcm_function = iscsit_convert_tmf(function);
if (tcm_function == TMR_UNKNOWN) {
pr_err("Unknown iSCSI TMR Function:"
if (!(hdr->opcode & ISCSI_OP_IMMEDIATE)) {
int cmdsn_ret = iscsit_sequence_cmd(conn, cmd, buf, hdr->cmdsn);
- if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP)
+ if (cmdsn_ret == CMDSN_HIGHER_THAN_EXP) {
out_of_order_cmdsn = 1;
- else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP)
+ } else if (cmdsn_ret == CMDSN_LOWER_THAN_EXP) {
+ target_put_sess_cmd(&cmd->se_cmd);
return 0;
- else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER)
+ } else if (cmdsn_ret == CMDSN_ERROR_CANNOT_RECOVER) {
return -1;
+ }
}
iscsit_ack_from_expstatsn(conn, be32_to_cpu(hdr->exp_statsn));
* For connection recovery, this is also the default action for
* TMR TASK_REASSIGN.
*/
- if (sess_ref) {
- pr_debug("Handle TMR, using sess_ref=true check\n");
- target_put_sess_cmd(&cmd->se_cmd);
- }
-
iscsit_add_cmd_to_response_queue(cmd, conn, cmd->i_state);
+ target_put_sess_cmd(&cmd->se_cmd);
return 0;
}
EXPORT_SYMBOL(iscsit_handle_task_mgt_cmd);
goto reject;
if (conn->conn_ops->DataDigest) {
- iscsit_do_crypto_hash_buf(conn->conn_rx_hash,
- text_in, payload_length,
- padding, (u8 *)&pad_bytes,
- (u8 *)&data_crc);
+ iscsit_do_crypto_hash_buf(conn->conn_rx_hash, text_in,
+ payload_length, padding,
+ &pad_bytes, &data_crc);
if (checksum != data_crc) {
pr_err("Text data CRC32C DataDigest"
return;
}
- iscsit_do_crypto_hash_buf(conn->conn_rx_hash,
- buffer, ISCSI_HDR_LEN,
- 0, NULL, (u8 *)&checksum);
+ iscsit_do_crypto_hash_buf(conn->conn_rx_hash, buffer,
+ ISCSI_HDR_LEN, 0, NULL,
+ &checksum);
if (digest != checksum) {
pr_err("HeaderDigest CRC32C failed,"
ret = core_tpg_register(wwn, &tpg->tpg_se_tpg, SCSI_PROTOCOL_ISCSI);
if (ret < 0)
- return NULL;
+ goto free_out;
ret = iscsit_tpg_add_portal_group(tiqn, tpg);
if (ret != 0)
return &tpg->tpg_se_tpg;
out:
core_tpg_deregister(&tpg->tpg_se_tpg);
+free_out:
kfree(tpg);
return NULL;
}
#include "iscsi_target_erl2.h"
#include "iscsi_target.h"
-#define OFFLOAD_BUF_SIZE 32768
+#define OFFLOAD_BUF_SIZE 32768U
/*
* Used to dump excess datain payload for certain error recovery
if (conn->sess->sess_ops->RDMAExtensions)
return 0;
- length = (buf_len > OFFLOAD_BUF_SIZE) ? OFFLOAD_BUF_SIZE : buf_len;
+ length = min(buf_len, OFFLOAD_BUF_SIZE);
buf = kzalloc(length, GFP_ATOMIC);
if (!buf) {
memset(&iov, 0, sizeof(struct kvec));
while (offset < buf_len) {
- size = ((offset + length) > buf_len) ?
- (buf_len - offset) : length;
+ size = min(buf_len - offset, length);
iov.iov_len = size;
iov.iov_base = buf;
char *key, *value;
struct iscsi_param *param;
- if (iscsi_extract_key_value(start, &key, &value) < 0) {
- kfree(tmpbuf);
- return -1;
- }
+ if (iscsi_extract_key_value(start, &key, &value) < 0)
+ goto free_buffer;
pr_debug("Got key: %s=%s\n", key, value);
param = iscsi_check_key(key, phase, sender, param_list);
if (!param) {
- if (iscsi_add_notunderstood_response(key,
- value, param_list) < 0) {
- kfree(tmpbuf);
- return -1;
- }
+ if (iscsi_add_notunderstood_response(key, value,
+ param_list) < 0)
+ goto free_buffer;
+
start += strlen(key) + strlen(value) + 2;
continue;
}
- if (iscsi_check_value(param, value) < 0) {
- kfree(tmpbuf);
- return -1;
- }
+ if (iscsi_check_value(param, value) < 0)
+ goto free_buffer;
start += strlen(key) + strlen(value) + 2;
if (IS_PSTATE_PROPOSER(param)) {
- if (iscsi_check_proposer_state(param, value) < 0) {
- kfree(tmpbuf);
- return -1;
- }
+ if (iscsi_check_proposer_state(param, value) < 0)
+ goto free_buffer;
+
SET_PSTATE_RESPONSE_GOT(param);
} else {
- if (iscsi_check_acceptor_state(param, value, conn) < 0) {
- kfree(tmpbuf);
- return -1;
- }
+ if (iscsi_check_acceptor_state(param, value, conn) < 0)
+ goto free_buffer;
+
SET_PSTATE_ACCEPTOR(param);
}
}
kfree(tmpbuf);
return 0;
+
+free_buffer:
+ kfree(tmpbuf);
+ return -1;
}
int iscsi_encode_text_output(
#include "iscsi_target_tpg.h"
#include "iscsi_target_seq_pdu_list.h"
-#define OFFLOAD_BUF_SIZE 32768
-
#ifdef DEBUG
static void iscsit_dump_seq_list(struct iscsi_cmd *cmd)
{
*/
param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list);
if (!param)
- goto out;
+ goto free_pl_out;
if (iscsi_update_param_value(param, "CHAP,None") < 0)
- goto out;
+ goto free_pl_out;
tpg->tpg_attrib.authentication = 0;
pr_debug("CORE[0] - Allocated Discovery TPG\n");
return 0;
+free_pl_out:
+ iscsi_release_param_list(tpg->param_list);
out:
if (tpg->sid == 1)
core_tpg_deregister(&tpg->tpg_se_tpg);
if (!tpg)
return;
+ iscsi_release_param_list(tpg->param_list);
core_tpg_deregister(&tpg->tpg_se_tpg);
kfree(tpg);
struct iscsi_session *sess;
struct se_cmd *se_cmd = &cmd->se_cmd;
+ WARN_ON(!list_empty(&cmd->i_conn_node));
+
if (cmd->conn)
sess = cmd->conn->sess;
else
{
struct iscsi_conn *conn = cmd->conn;
+ WARN_ON(!list_empty(&cmd->i_conn_node));
+
if (cmd->data_direction == DMA_TO_DEVICE) {
iscsit_stop_dataout_timer(cmd);
iscsit_free_r2ts_from_list(cmd);
{
unsigned char *md_buf;
struct t10_wwn *wwn = &tg_pt_gp->tg_pt_gp_dev->t10_wwn;
- char path[ALUA_METADATA_PATH_LEN];
+ char *path;
int len, rc;
md_buf = kzalloc(ALUA_MD_BUF_LEN, GFP_KERNEL);
return -ENOMEM;
}
- memset(path, 0, ALUA_METADATA_PATH_LEN);
-
len = snprintf(md_buf, ALUA_MD_BUF_LEN,
"tg_pt_gp_id=%hu\n"
"alua_access_state=0x%02x\n"
tg_pt_gp->tg_pt_gp_alua_access_state,
tg_pt_gp->tg_pt_gp_alua_access_status);
- snprintf(path, ALUA_METADATA_PATH_LEN,
- "%s/alua/tpgs_%s/%s", db_root, &wwn->unit_serial[0],
- config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
-
- rc = core_alua_write_tpg_metadata(path, md_buf, len);
+ rc = -ENOMEM;
+ path = kasprintf(GFP_KERNEL, "%s/alua/tpgs_%s/%s", db_root,
+ &wwn->unit_serial[0],
+ config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
+ if (path) {
+ rc = core_alua_write_tpg_metadata(path, md_buf, len);
+ kfree(path);
+ }
kfree(md_buf);
return rc;
}
{
struct se_portal_group *se_tpg = lun->lun_tpg;
unsigned char *md_buf;
- char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
+ char *path;
int len, rc;
mutex_lock(&lun->lun_tg_pt_md_mutex);
goto out_unlock;
}
- memset(path, 0, ALUA_METADATA_PATH_LEN);
- memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
-
- len = snprintf(wwn, ALUA_SECONDARY_METADATA_WWN_LEN, "%s",
- se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg));
-
- if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL)
- snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu",
- se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg));
-
len = snprintf(md_buf, ALUA_MD_BUF_LEN, "alua_tg_pt_offline=%d\n"
"alua_tg_pt_status=0x%02x\n",
atomic_read(&lun->lun_tg_pt_secondary_offline),
lun->lun_tg_pt_secondary_stat);
- snprintf(path, ALUA_METADATA_PATH_LEN, "%s/alua/%s/%s/lun_%llu",
- db_root, se_tpg->se_tpg_tfo->get_fabric_name(), wwn,
- lun->unpacked_lun);
+ if (se_tpg->se_tpg_tfo->tpg_get_tag != NULL) {
+ path = kasprintf(GFP_KERNEL, "%s/alua/%s/%s+%hu/lun_%llu",
+ db_root, se_tpg->se_tpg_tfo->get_fabric_name(),
+ se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
+ se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg),
+ lun->unpacked_lun);
+ } else {
+ path = kasprintf(GFP_KERNEL, "%s/alua/%s/%s/lun_%llu",
+ db_root, se_tpg->se_tpg_tfo->get_fabric_name(),
+ se_tpg->se_tpg_tfo->tpg_get_wwn(se_tpg),
+ lun->unpacked_lun);
+ }
+ if (!path) {
+ rc = -ENOMEM;
+ goto out_free;
+ }
rc = core_alua_write_tpg_metadata(path, md_buf, len);
+ kfree(path);
+out_free:
kfree(md_buf);
-
out_unlock:
mutex_unlock(&lun->lun_tg_pt_md_mutex);
return rc;
*/
#define ALUA_DEFAULT_IMPLICIT_TRANS_SECS 0
#define ALUA_MAX_IMPLICIT_TRANS_SECS 255
-/*
- * Used by core_alua_update_tpg_primary_metadata() and
- * core_alua_update_tpg_secondary_metadata()
- */
-#define ALUA_METADATA_PATH_LEN 512
-/*
- * Used by core_alua_update_tpg_secondary_metadata()
- */
-#define ALUA_SECONDARY_METADATA_WWN_LEN 256
/* Used by core_alua_update_tpg_(primary,secondary)_metadata */
#define ALUA_MD_BUF_LEN 1024
{Opt_res_type, "res_type=%d"},
{Opt_res_scope, "res_scope=%d"},
{Opt_res_all_tg_pt, "res_all_tg_pt=%d"},
- {Opt_mapped_lun, "mapped_lun=%lld"},
+ {Opt_mapped_lun, "mapped_lun=%u"},
{Opt_target_fabric, "target_fabric=%s"},
{Opt_target_node, "target_node=%s"},
{Opt_tpgt, "tpgt=%d"},
{Opt_port_rtpi, "port_rtpi=%d"},
- {Opt_target_lun, "target_lun=%lld"},
+ {Opt_target_lun, "target_lun=%u"},
{Opt_err, NULL}
};
}
break;
case Opt_sa_res_key:
- ret = kstrtoull(args->from, 0, &tmp_ll);
+ ret = match_u64(args, &tmp_ll);
if (ret < 0) {
pr_err("kstrtoull() failed for sa_res_key=\n");
goto out;
all_tg_pt = (int)arg;
break;
case Opt_mapped_lun:
- ret = match_int(args, &arg);
+ ret = match_u64(args, &tmp_ll);
if (ret)
goto out;
- mapped_lun = (u64)arg;
+ mapped_lun = (u64)tmp_ll;
break;
/*
* PR APTPL Metadata for Target Port
goto out;
break;
case Opt_target_lun:
- ret = match_int(args, &arg);
+ ret = match_u64(args, &tmp_ll);
if (ret)
goto out;
- target_lun = (u64)arg;
+ target_lun = (u64)tmp_ll;
break;
default:
break;
NULL,
};
-extern struct configfs_item_operations target_core_dev_item_ops;
-
static int target_fabric_port_link(
struct config_item *lun_ci,
struct config_item *se_dev_ci)
struct inode *inode = file->f_mapping->host;
int ret;
+ if (!nolb) {
+ return 0;
+ }
+
if (cmd->se_dev->dev_attrib.pi_prot_type) {
ret = fd_do_prot_unmap(cmd, lba, nolb);
if (ret)
void *data);
/* target_core_configfs.c */
+extern struct configfs_item_operations target_core_dev_item_ops;
void target_setup_backend_cits(struct target_backend *);
/* target_core_fabric_configfs.c */
char *buf,
u32 size)
{
- if (!pr_reg->isid_present_at_reg)
+ if (!pr_reg->isid_present_at_reg) {
buf[0] = '\0';
+ return;
+ }
snprintf(buf, size, ",i,0x%s", pr_reg->pr_reg_isid);
}
break;
case PR_TYPE_WRITE_EXCLUSIVE_REGONLY:
we = 1;
+ /* fall through */
case PR_TYPE_EXCLUSIVE_ACCESS_REGONLY:
/*
* Some commands are only allowed for registered I_T Nexuses.
break;
case PR_TYPE_WRITE_EXCLUSIVE_ALLREG:
we = 1;
+ /* fall through */
case PR_TYPE_EXCLUSIVE_ACCESS_ALLREG:
/*
* Each registered I_T Nexus is a reservation holder.
tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL);
if (!tidh_new) {
pr_err("Unable to allocate tidh_new\n");
- return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
}
INIT_LIST_HEAD(&tidh_new->dest_list);
tidh_new->dest_tpg = tpg;
sa_res_key, all_tg_pt, aptpl);
if (!local_pr_reg) {
kfree(tidh_new);
- return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ return TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
}
tidh_new->dest_pr_reg = local_pr_reg;
/*
buf = transport_kmap_data_sg(cmd);
if (!buf) {
- ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
goto out;
}
core_scsi3_nodeacl_undepend_item(dest_node_acl);
core_scsi3_tpg_undepend_item(dest_tpg);
kfree(tidh_new);
- ret = TCM_INVALID_PARAMETER_LIST;
+ ret = TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
goto out_unmap;
}
tidh_new->dest_pr_reg = dest_pr_reg;
struct t10_wwn *wwn = &dev->t10_wwn;
struct file *file;
int flags = O_RDWR | O_CREAT | O_TRUNC;
- char path[512];
+ char *path;
u32 pr_aptpl_buf_len;
int ret;
loff_t pos = 0;
- memset(path, 0, 512);
-
- if (strlen(&wwn->unit_serial[0]) >= 512) {
- pr_err("WWN value for struct se_device does not fit"
- " into path buffer\n");
- return -EMSGSIZE;
- }
+ path = kasprintf(GFP_KERNEL, "%s/pr/aptpl_%s", db_root,
+ &wwn->unit_serial[0]);
+ if (!path)
+ return -ENOMEM;
- snprintf(path, 512, "%s/pr/aptpl_%s", db_root, &wwn->unit_serial[0]);
file = filp_open(path, flags, 0600);
if (IS_ERR(file)) {
pr_err("filp_open(%s) for APTPL metadata"
" failed\n", path);
+ kfree(path);
return PTR_ERR(file);
}
if (ret < 0)
pr_debug("Error writing APTPL metadata file: %s\n", path);
fput(file);
+ kfree(path);
return (ret < 0) ? -EIO : 0;
}
register_type, 0)) {
pr_err("Unable to allocate"
" struct t10_pr_registration\n");
- return TCM_INVALID_PARAMETER_LIST;
+ return TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
}
} else {
/*
*/
buf = transport_kmap_data_sg(cmd);
if (!buf) {
- ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
goto out_put_pr_reg;
}
buf = transport_kmap_data_sg(cmd);
if (!buf) {
- ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ ret = TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
goto out_put_pr_reg;
}
proto_ident = (buf[24] & 0x0f);
if (core_scsi3_alloc_registration(cmd->se_dev, dest_node_acl,
dest_lun, dest_se_deve, dest_se_deve->mapped_lun,
iport_ptr, sa_res_key, 0, aptpl, 2, 1)) {
- ret = TCM_INVALID_PARAMETER_LIST;
+ ret = TCM_INSUFFICIENT_REGISTRATION_RESOURCES;
goto out;
}
spin_lock(&dev->dev_reservation_lock);
core_scsi3_update_and_write_aptpl(cmd->se_dev, aptpl);
- transport_kunmap_data_sg(cmd);
-
core_scsi3_put_pr_reg(dest_pr_reg);
return 0;
out:
* Set the ADDITIONAL DESCRIPTOR LENGTH
*/
put_unaligned_be32(desc_len, &buf[off]);
+ off += 4;
/*
* Size of full desctipor header minus TransportID
* containing $FABRIC_MOD specific) initiator device/port
spin_unlock(&se_cmd->t_state_lock);
return false;
}
+ if (se_cmd->transport_state & CMD_T_PRE_EXECUTE) {
+ if (se_cmd->scsi_status) {
+ pr_debug("Attempted to abort io tag: %llu early failure"
+ " status: 0x%02x\n", se_cmd->tag,
+ se_cmd->scsi_status);
+ spin_unlock(&se_cmd->t_state_lock);
+ return false;
+ }
+ }
if (sess->sess_tearing_down || se_cmd->cmd_wait_set) {
pr_debug("Attempted to abort io tag: %llu already shutdown,"
" skipping\n", se_cmd->tag);
* LUN_RESET tmr..
*/
spin_lock_irqsave(&dev->se_tmr_lock, flags);
- list_del_init(&tmr->tmr_list);
+ if (tmr)
+ list_del_init(&tmr->tmr_list);
list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) {
cmd = tmr_p->task_cmd;
if (!cmd) {
static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason);
static void transport_handle_queue_full(struct se_cmd *cmd,
struct se_device *dev, int err, bool write_pending);
-static int transport_put_cmd(struct se_cmd *cmd);
static void target_complete_ok_work(struct work_struct *work);
int init_se_kmem_caches(void)
if (transport_cmd_check_stop_to_fabric(cmd))
return 1;
if (remove && ack_kref)
- ret = transport_put_cmd(cmd);
+ ret = target_put_sess_cmd(cmd);
return ret;
}
{
int ret = 0, post_ret = 0;
- if (transport_check_aborted_status(cmd, 1))
- return;
-
pr_debug("-----[ Storage Engine Exception; sense_reason %d\n",
sense_reason);
target_show_cmd("-----[ ", cmd);
* For SAM Task Attribute emulation for failed struct se_cmd
*/
transport_complete_task_attr(cmd);
+
/*
* Handle special case for COMPARE_AND_WRITE failure, where the
* callback is expected to drop the per device ->caw_sem.
cmd->transport_complete_callback)
cmd->transport_complete_callback(cmd, false, &post_ret);
+ if (transport_check_aborted_status(cmd, 1))
+ return;
+
switch (sense_reason) {
case TCM_NON_EXISTENT_LUN:
case TCM_UNSUPPORTED_SCSI_OPCODE:
case TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE:
break;
case TCM_OUT_OF_RESOURCES:
- sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- break;
+ cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
+ goto queue_status;
case TCM_RESERVATION_CONFLICT:
/*
* No SENSE Data payload for this case, set SCSI Status
cmd->orig_fe_lun, 0x2C,
ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
}
- trace_target_cmd_complete(cmd);
- ret = cmd->se_tfo->queue_status(cmd);
- if (ret)
- goto queue_full;
- goto check_stop;
+
+ goto queue_status;
default:
pr_err("Unknown transport error for CDB 0x%02x: %d\n",
cmd->t_task_cdb[0], sense_reason);
transport_cmd_check_stop_to_fabric(cmd);
return;
+queue_status:
+ trace_target_cmd_complete(cmd);
+ ret = cmd->se_tfo->queue_status(cmd);
+ if (!ret)
+ goto check_stop;
queue_full:
transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
}
}
cmd->t_state = TRANSPORT_PROCESSING;
+ cmd->transport_state &= ~CMD_T_PRE_EXECUTE;
cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT;
spin_unlock_irq(&cmd->t_state_lock);
list_del(&cmd->se_delayed_node);
spin_unlock(&dev->delayed_cmd_lock);
+ cmd->transport_state |= CMD_T_SENT;
+
__target_execute_cmd(cmd, true);
if (cmd->sam_task_attr == TCM_ORDERED_TAG)
pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n",
dev->dev_cur_ordered_id);
}
+ cmd->se_cmd_flags &= ~SCF_TASK_ATTR_SET;
+
restart:
target_restart_delayed_cmds(dev);
}
ret = cmd->se_tfo->queue_data_in(cmd);
break;
}
- /* Fall through for DMA_TO_DEVICE */
+ /* fall through */
case DMA_NONE:
queue_status:
trace_target_cmd_complete(cmd);
goto queue_full;
break;
}
- /* Fall through for DMA_TO_DEVICE */
+ /* fall through */
case DMA_NONE:
queue_status:
trace_target_cmd_complete(cmd);
cmd->t_bidi_data_nents = 0;
}
-/**
- * transport_put_cmd - release a reference to a command
- * @cmd: command to release
- *
- * This routine releases our reference to the command and frees it if possible.
- */
-static int transport_put_cmd(struct se_cmd *cmd)
-{
- BUG_ON(!cmd->se_tfo);
- /*
- * If this cmd has been setup with target_get_sess_cmd(), drop
- * the kref and call ->release_cmd() in kref callback.
- */
- return target_put_sess_cmd(cmd);
-}
-
void *transport_kmap_data_sg(struct se_cmd *cmd)
{
struct scatterlist *sg = cmd->t_data_sg;
static void transport_write_pending_qf(struct se_cmd *cmd)
{
+ unsigned long flags;
int ret;
+ bool stop;
+
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ stop = (cmd->transport_state & (CMD_T_STOP | CMD_T_ABORTED));
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+ if (stop) {
+ pr_debug("%s:%d CMD_T_STOP|CMD_T_ABORTED for ITT: 0x%08llx\n",
+ __func__, __LINE__, cmd->tag);
+ complete_all(&cmd->t_transport_stop_comp);
+ return;
+ }
ret = cmd->se_tfo->write_pending(cmd);
if (ret) {
target_wait_free_cmd(cmd, &aborted, &tas);
if (!aborted || tas)
- ret = transport_put_cmd(cmd);
+ ret = target_put_sess_cmd(cmd);
} else {
if (wait_for_tasks)
target_wait_free_cmd(cmd, &aborted, &tas);
transport_lun_remove_cmd(cmd);
if (!aborted || tas)
- ret = transport_put_cmd(cmd);
+ ret = target_put_sess_cmd(cmd);
}
/*
* If the task has been internally aborted due to TMR ABORT_TASK
ret = -ESHUTDOWN;
goto out;
}
+ se_cmd->transport_state |= CMD_T_PRE_EXECUTE;
list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
out:
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
.key = NOT_READY,
.asc = 0x08, /* LOGICAL UNIT COMMUNICATION FAILURE */
},
+ [TCM_INSUFFICIENT_REGISTRATION_RESOURCES] = {
+ /*
+ * From spc4r22 section5.7.7,5.7.8
+ * If a PERSISTENT RESERVE OUT command with a REGISTER service action
+ * or a REGISTER AND IGNORE EXISTING KEY service action or
+ * REGISTER AND MOVE service actionis attempted,
+ * but there are insufficient device server resources to complete the
+ * operation, then the command shall be terminated with CHECK CONDITION
+ * status, with the sense key set to ILLEGAL REQUEST,and the additonal
+ * sense code set to INSUFFICIENT REGISTRATION RESOURCES.
+ */
+ .key = ILLEGAL_REQUEST,
+ .asc = 0x55,
+ .ascq = 0x04, /* INSUFFICIENT REGISTRATION RESOURCES */
+ },
};
static int translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason)
wait_queue_head_t nl_cmd_wq;
char dev_config[TCMU_CONFIG_LEN];
+
+ int nl_reply_supported;
};
#define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev)
struct se_device *se_dev = se_cmd->se_dev;
struct tcmu_dev *udev = TCMU_DEV(se_dev);
struct tcmu_cmd *tcmu_cmd;
- int cmd_id;
tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_KERNEL);
if (!tcmu_cmd)
tcmu_cmd->se_cmd = se_cmd;
tcmu_cmd->tcmu_dev = udev;
- if (udev->cmd_time_out)
- tcmu_cmd->deadline = jiffies +
- msecs_to_jiffies(udev->cmd_time_out);
tcmu_cmd_reset_dbi_cur(tcmu_cmd);
tcmu_cmd->dbi_cnt = tcmu_cmd_get_block_cnt(tcmu_cmd);
return NULL;
}
- idr_preload(GFP_KERNEL);
- spin_lock_irq(&udev->commands_lock);
- cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 0,
- USHRT_MAX, GFP_NOWAIT);
- spin_unlock_irq(&udev->commands_lock);
- idr_preload_end();
-
- if (cmd_id < 0) {
- tcmu_free_cmd(tcmu_cmd);
- return NULL;
- }
- tcmu_cmd->cmd_id = cmd_id;
-
return tcmu_cmd;
}
return command_size;
}
+static int tcmu_setup_cmd_timer(struct tcmu_cmd *tcmu_cmd)
+{
+ struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
+ unsigned long tmo = udev->cmd_time_out;
+ int cmd_id;
+
+ if (tcmu_cmd->cmd_id)
+ return 0;
+
+ cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 1, USHRT_MAX, GFP_NOWAIT);
+ if (cmd_id < 0) {
+ pr_err("tcmu: Could not allocate cmd id.\n");
+ return cmd_id;
+ }
+ tcmu_cmd->cmd_id = cmd_id;
+
+ if (!tmo)
+ return 0;
+
+ tcmu_cmd->deadline = round_jiffies_up(jiffies + msecs_to_jiffies(tmo));
+ mod_timer(&udev->timeout, tcmu_cmd->deadline);
+ return 0;
+}
+
static sense_reason_t
tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
{
entry = (void *) mb + CMDR_OFF + cmd_head;
memset(entry, 0, command_size);
tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD);
- entry->hdr.cmd_id = tcmu_cmd->cmd_id;
/* Handle allocating space from the data area */
tcmu_cmd_reset_dbi_cur(tcmu_cmd);
}
entry->req.iov_bidi_cnt = iov_cnt;
+ ret = tcmu_setup_cmd_timer(tcmu_cmd);
+ if (ret) {
+ tcmu_cmd_free_data(tcmu_cmd, tcmu_cmd->dbi_cnt);
+ mutex_unlock(&udev->cmdr_lock);
+ return TCM_OUT_OF_RESOURCES;
+ }
+ entry->hdr.cmd_id = tcmu_cmd->cmd_id;
+
/*
* Recalaulate the command's base size and size according
* to the actual needs
static sense_reason_t
tcmu_queue_cmd(struct se_cmd *se_cmd)
{
- struct se_device *se_dev = se_cmd->se_dev;
- struct tcmu_dev *udev = TCMU_DEV(se_dev);
struct tcmu_cmd *tcmu_cmd;
sense_reason_t ret;
ret = tcmu_queue_cmd_ring(tcmu_cmd);
if (ret != TCM_NO_SENSE) {
pr_err("TCMU: Could not queue command\n");
- spin_lock_irq(&udev->commands_lock);
- idr_remove(&udev->commands, tcmu_cmd->cmd_id);
- spin_unlock_irq(&udev->commands_lock);
tcmu_free_cmd(tcmu_cmd);
}
return 0;
}
-static void tcmu_device_timedout(unsigned long data)
+static void tcmu_device_timedout(struct timer_list *t)
{
- struct tcmu_dev *udev = (struct tcmu_dev *)data;
+ struct tcmu_dev *udev = from_timer(udev, t, timeout);
unsigned long flags;
spin_lock_irqsave(&udev->commands_lock, flags);
idr_init(&udev->commands);
spin_lock_init(&udev->commands_lock);
- setup_timer(&udev->timeout, tcmu_device_timedout,
- (unsigned long)udev);
+ timer_setup(&udev->timeout, tcmu_device_timedout, 0);
init_waitqueue_head(&udev->nl_cmd_wq);
spin_lock_init(&udev->nl_cmd_lock);
+ INIT_RADIX_TREE(&udev->data_blocks, GFP_KERNEL);
+
return &udev->se_dev;
}
kfree(udev);
}
+static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
+{
+ if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
+ kmem_cache_free(tcmu_cmd_cache, cmd);
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static void tcmu_blocks_release(struct tcmu_dev *udev)
+{
+ int i;
+ struct page *page;
+
+ /* Try to release all block pages */
+ mutex_lock(&udev->cmdr_lock);
+ for (i = 0; i <= udev->dbi_max; i++) {
+ page = radix_tree_delete(&udev->data_blocks, i);
+ if (page) {
+ __free_page(page);
+ atomic_dec(&global_db_count);
+ }
+ }
+ mutex_unlock(&udev->cmdr_lock);
+}
+
static void tcmu_dev_kref_release(struct kref *kref)
{
struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref);
struct se_device *dev = &udev->se_dev;
+ struct tcmu_cmd *cmd;
+ bool all_expired = true;
+ int i;
+
+ vfree(udev->mb_addr);
+ udev->mb_addr = NULL;
+
+ /* Upper layer should drain all requests before calling this */
+ spin_lock_irq(&udev->commands_lock);
+ idr_for_each_entry(&udev->commands, cmd, i) {
+ if (tcmu_check_and_free_pending_cmd(cmd) != 0)
+ all_expired = false;
+ }
+ idr_destroy(&udev->commands);
+ spin_unlock_irq(&udev->commands_lock);
+ WARN_ON(!all_expired);
+
+ tcmu_blocks_release(udev);
call_rcu(&dev->rcu_head, tcmu_dev_call_rcu);
}
if (!tcmu_kern_cmd_reply_supported)
return;
+
+ if (udev->nl_reply_supported <= 0)
+ return;
+
relock:
spin_lock(&udev->nl_cmd_lock);
if (!tcmu_kern_cmd_reply_supported)
return 0;
+ if (udev->nl_reply_supported <= 0)
+ return 0;
+
pr_debug("sleeping for nl reply\n");
wait_for_completion(&nl_cmd->complete);
WARN_ON(udev->data_size % PAGE_SIZE);
WARN_ON(udev->data_size % DATA_BLOCK_SIZE);
- INIT_RADIX_TREE(&udev->data_blocks, GFP_KERNEL);
-
info->version = __stringify(TCMU_MAILBOX_VERSION);
info->mem[0].name = "tcm-user command & data buffer";
dev->dev_attrib.emulate_write_cache = 0;
dev->dev_attrib.hw_queue_depth = 128;
+ /* If user didn't explicitly disable netlink reply support, use
+ * module scope setting.
+ */
+ if (udev->nl_reply_supported >= 0)
+ udev->nl_reply_supported = tcmu_kern_cmd_reply_supported;
+
/*
* Get a ref incase userspace does a close on the uio device before
* LIO has initiated tcmu_free_device.
uio_unregister_device(&udev->uio_info);
err_register:
vfree(udev->mb_addr);
+ udev->mb_addr = NULL;
err_vzalloc:
kfree(info->name);
info->name = NULL;
return ret;
}
-static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
-{
- if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
- kmem_cache_free(tcmu_cmd_cache, cmd);
- return 0;
- }
- return -EINVAL;
-}
-
static bool tcmu_dev_configured(struct tcmu_dev *udev)
{
return udev->uio_info.uio_dev ? true : false;
}
-static void tcmu_blocks_release(struct tcmu_dev *udev)
-{
- int i;
- struct page *page;
-
- /* Try to release all block pages */
- mutex_lock(&udev->cmdr_lock);
- for (i = 0; i <= udev->dbi_max; i++) {
- page = radix_tree_delete(&udev->data_blocks, i);
- if (page) {
- __free_page(page);
- atomic_dec(&global_db_count);
- }
- }
- mutex_unlock(&udev->cmdr_lock);
-}
-
static void tcmu_free_device(struct se_device *dev)
{
struct tcmu_dev *udev = TCMU_DEV(dev);
static void tcmu_destroy_device(struct se_device *dev)
{
struct tcmu_dev *udev = TCMU_DEV(dev);
- struct tcmu_cmd *cmd;
- bool all_expired = true;
- int i;
del_timer_sync(&udev->timeout);
list_del(&udev->node);
mutex_unlock(&root_udev_mutex);
- vfree(udev->mb_addr);
-
- /* Upper layer should drain all requests before calling this */
- spin_lock_irq(&udev->commands_lock);
- idr_for_each_entry(&udev->commands, cmd, i) {
- if (tcmu_check_and_free_pending_cmd(cmd) != 0)
- all_expired = false;
- }
- idr_destroy(&udev->commands);
- spin_unlock_irq(&udev->commands_lock);
- WARN_ON(!all_expired);
-
- tcmu_blocks_release(udev);
-
tcmu_netlink_event(udev, TCMU_CMD_REMOVED_DEVICE, 0, NULL);
uio_unregister_device(&udev->uio_info);
enum {
Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_hw_max_sectors,
- Opt_err,
+ Opt_nl_reply_supported, Opt_err,
};
static match_table_t tokens = {
{Opt_dev_size, "dev_size=%u"},
{Opt_hw_block_size, "hw_block_size=%u"},
{Opt_hw_max_sectors, "hw_max_sectors=%u"},
+ {Opt_nl_reply_supported, "nl_reply_supported=%d"},
{Opt_err, NULL}
};
ret = tcmu_set_dev_attrib(&args[0],
&(dev->dev_attrib.hw_max_sectors));
break;
+ case Opt_nl_reply_supported:
+ arg_p = match_strdup(&args[0]);
+ if (!arg_p) {
+ ret = -ENOMEM;
+ break;
+ }
+ ret = kstrtoint(arg_p, 0, &udev->nl_reply_supported);
+ kfree(arg_p);
+ if (ret < 0)
+ pr_err("kstrtoint() failed for nl_reply_supported=\n");
+ break;
default:
break;
}
{
struct se_dev_attrib *da = container_of(to_config_group(item),
struct se_dev_attrib, da_group);
- struct tcmu_dev *udev = container_of(da->da_dev,
- struct tcmu_dev, se_dev);
+ struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
return snprintf(page, PAGE_SIZE, "%lu\n", udev->cmd_time_out / MSEC_PER_SEC);
}
}
CONFIGFS_ATTR(tcmu_, dev_size);
+static ssize_t tcmu_nl_reply_supported_show(struct config_item *item,
+ char *page)
+{
+ struct se_dev_attrib *da = container_of(to_config_group(item),
+ struct se_dev_attrib, da_group);
+ struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
+
+ return snprintf(page, PAGE_SIZE, "%d\n", udev->nl_reply_supported);
+}
+
+static ssize_t tcmu_nl_reply_supported_store(struct config_item *item,
+ const char *page, size_t count)
+{
+ struct se_dev_attrib *da = container_of(to_config_group(item),
+ struct se_dev_attrib, da_group);
+ struct tcmu_dev *udev = TCMU_DEV(da->da_dev);
+ s8 val;
+ int ret;
+
+ ret = kstrtos8(page, 0, &val);
+ if (ret < 0)
+ return ret;
+
+ udev->nl_reply_supported = val;
+ return count;
+}
+CONFIGFS_ATTR(tcmu_, nl_reply_supported);
+
static ssize_t tcmu_emulate_write_cache_show(struct config_item *item,
char *page)
{
&tcmu_attr_dev_config,
&tcmu_attr_dev_size,
&tcmu_attr_emulate_write_cache,
+ &tcmu_attr_nl_reply_supported,
NULL,
};
#endif /* CONFIG_ISA */
#ifndef CONFIG_CYZ_INTR
-static void cyz_poll(unsigned long);
+static void cyz_poll(struct timer_list *);
/* The Cyclades-Z polling cycle is defined by this variable */
static long cyz_polling_cycle = CZ_DEF_POLL;
#else /* CONFIG_CYZ_INTR */
-static void cyz_poll(unsigned long arg)
+static void cyz_poll(struct timer_list *unused)
{
struct cyclades_card *cinfo;
struct cyclades_port *info;
unsigned int address,
const unsigned char *data, int len,
int is_last);
-static void ipwireless_setup_timer(unsigned long data);
+static void ipwireless_setup_timer(struct timer_list *t);
static void handle_received_CTRL_packet(struct ipw_hardware *hw,
unsigned int channel_idx, const unsigned char *data, int len);
spin_lock_init(&hw->lock);
tasklet_init(&hw->tasklet, ipwireless_do_tasklet, (unsigned long) hw);
INIT_WORK(&hw->work_rx, ipw_receive_data_work);
- setup_timer(&hw->setup_timer, ipwireless_setup_timer,
- (unsigned long) hw);
+ timer_setup(&hw->setup_timer, ipwireless_setup_timer, 0);
return hw;
}
hw->init_loops = 0;
printk(KERN_INFO IPWIRELESS_PCCARD_NAME
": waiting for card to start up...\n");
- ipwireless_setup_timer((unsigned long) hw);
+ ipwireless_setup_timer(&hw->setup_timer);
}
-static void ipwireless_setup_timer(unsigned long data)
+static void ipwireless_setup_timer(struct timer_list *t)
{
- struct ipw_hardware *hw = (struct ipw_hardware *) data;
+ struct ipw_hardware *hw = from_timer(hw, t, setup_timer);
hw->init_loops++;
static int prev_card = 3; /* start servicing isi_card[0] */
static struct tty_driver *isicom_normal;
-static void isicom_tx(unsigned long _data);
+static void isicom_tx(struct timer_list *unused);
static void isicom_start(struct tty_struct *tty);
static DEFINE_TIMER(tx, isicom_tx);
* will do the rest of the work for us.
*/
-static void isicom_tx(unsigned long _data)
+static void isicom_tx(struct timer_list *unused)
{
unsigned long flags, base;
unsigned int retries;
static int moxa_tiocmget(struct tty_struct *tty);
static int moxa_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear);
-static void moxa_poll(unsigned long);
+static void moxa_poll(struct timer_list *);
static void moxa_set_tty_param(struct tty_struct *, struct ktermios *);
static void moxa_shutdown(struct tty_port *);
static int moxa_carrier_raised(struct tty_port *);
return 0;
}
-static void moxa_poll(unsigned long ignored)
+static void moxa_poll(struct timer_list *unused)
{
struct moxa_board_conf *brd;
u16 __iomem *ip;
* gsm->pending_cmd will be NULL and we just let the timer expire.
*/
-static void gsm_control_retransmit(unsigned long data)
+static void gsm_control_retransmit(struct timer_list *t)
{
- struct gsm_mux *gsm = (struct gsm_mux *)data;
+ struct gsm_mux *gsm = from_timer(gsm, t, t2_timer);
struct gsm_control *ctrl;
unsigned long flags;
spin_lock_irqsave(&gsm->control_lock, flags);
* end will get a DM response)
*/
-static void gsm_dlci_t1(unsigned long data)
+static void gsm_dlci_t1(struct timer_list *t)
{
- struct gsm_dlci *dlci = (struct gsm_dlci *)data;
+ struct gsm_dlci *dlci = from_timer(dlci, t, t1);
struct gsm_mux *gsm = dlci->gsm;
switch (dlci->state) {
}
skb_queue_head_init(&dlci->skb_list);
- setup_timer(&dlci->t1, gsm_dlci_t1, (unsigned long)dlci);
+ timer_setup(&dlci->t1, gsm_dlci_t1, 0);
tty_port_init(&dlci->port);
dlci->port.ops = &gsm_port_ops;
dlci->gsm = gsm;
struct gsm_dlci *dlci;
int i = 0;
- setup_timer(&gsm->t2_timer, gsm_control_retransmit, (unsigned long)gsm);
+ timer_setup(&gsm->t2_timer, gsm_control_retransmit, 0);
init_waitqueue_head(&gsm->event);
spin_lock_init(&gsm->control_lock);
spin_lock_init(&gsm->tx_lock);
static void transmit_block(struct r3964_info *pInfo);
static void receive_char(struct r3964_info *pInfo, const unsigned char c);
static void receive_error(struct r3964_info *pInfo, const char flag);
-static void on_timeout(unsigned long priv);
+static void on_timeout(struct timer_list *t);
static int enable_signals(struct r3964_info *pInfo, struct pid *pid, int arg);
static int read_telegram(struct r3964_info *pInfo, struct pid *pid,
unsigned char __user * buf);
}
}
-static void on_timeout(unsigned long priv)
+static void on_timeout(struct timer_list *t)
{
- struct r3964_info *pInfo = (void *)priv;
+ struct r3964_info *pInfo = from_timer(pInfo, t, tmr);
switch (pInfo->state) {
case R3964_TX_REQUEST:
tty->disc_data = pInfo;
tty->receive_room = 65536;
- setup_timer(&pInfo->tmr, on_timeout, (unsigned long)pInfo);
+ timer_setup(&pInfo->tmr, on_timeout, 0);
return 0;
}
/****** RocketPort Local Variables ******/
-static void rp_do_poll(unsigned long dummy);
+static void rp_do_poll(struct timer_list *unused);
static struct tty_driver *rocket_driver;
/*
* The top level polling routine. Repeats every 1/100 HZ (10ms).
*/
-static void rp_do_poll(unsigned long dummy)
+static void rp_do_poll(struct timer_list *unused)
{
CONTROLLER_t *ctlp;
int ctrl, aiop, ch, line;
if (up->bugs & UART_BUG_THRE) {
pr_debug("ttyS%d - using backup timer\n", serial_index(port));
- up->timer.function = (TIMER_FUNC_TYPE)serial8250_backup_timeout;
+ up->timer.function = serial8250_backup_timeout;
mod_timer(&up->timer, jiffies +
uart_poll_timeout(port) + HZ / 5);
}
struct uart_port *port = &up->port;
del_timer_sync(&up->timer);
- up->timer.function = (TIMER_FUNC_TYPE)serial8250_timeout;
+ up->timer.function = serial8250_timeout;
if (port->irq)
serial_unlink_irq_chain(up);
}
static struct timer_list flush_timer;
static void
-timed_flush_handler(unsigned long ptr)
+timed_flush_handler(struct timer_list *unused)
{
struct e100_serial *info;
int i;
/* Setup the timed flush handler system */
#if !defined(CONFIG_ETRAX_SERIAL_FAST_TIMER)
- setup_timer(&flush_timer, timed_flush_handler, 0);
+ timer_setup(&flush_timer, timed_flush_handler, 0);
mod_timer(&flush_timer, jiffies + 5);
#endif
lpuart_copy_rx_to_tty(sport);
}
-static void lpuart_timer_func(unsigned long data)
+static void lpuart_timer_func(struct timer_list *t)
{
- struct lpuart_port *sport = (struct lpuart_port *)data;
+ struct lpuart_port *sport = from_timer(sport, t, lpuart_timer);
lpuart_copy_rx_to_tty(sport);
}
static void rx_dma_timer_init(struct lpuart_port *sport)
{
- setup_timer(&sport->lpuart_timer, lpuart_timer_func,
- (unsigned long)sport);
+ timer_setup(&sport->lpuart_timer, lpuart_timer_func, 0);
sport->lpuart_timer.expires = jiffies + sport->dma_rx_timeout;
add_timer(&sport->lpuart_timer);
}
* The SPI has timed out: hang up the tty. Users will then see a hangup
* and error events.
*/
-static void ifx_spi_timeout(unsigned long arg)
+static void ifx_spi_timeout(struct timer_list *t)
{
- struct ifx_spi_device *ifx_dev = (struct ifx_spi_device *)arg;
+ struct ifx_spi_device *ifx_dev = from_timer(ifx_dev, t, spi_timer);
dev_warn(&ifx_dev->spi_dev->dev, "*** SPI Timeout ***");
tty_port_tty_hangup(&ifx_dev->tty_port, false);
spin_lock_init(&ifx_dev->write_lock);
spin_lock_init(&ifx_dev->power_lock);
ifx_dev->power_status = 0;
- setup_timer(&ifx_dev->spi_timer, ifx_spi_timeout,
- (unsigned long)ifx_dev);
+ timer_setup(&ifx_dev->spi_timer, ifx_spi_timeout, 0);
ifx_dev->modem = pl_data->modem_type;
ifx_dev->use_dma = pl_data->use_dma;
ifx_dev->max_hz = pl_data->max_hz;
* This is our per-port timeout handler, for checking the
* modem status signals.
*/
-static void imx_timeout(unsigned long data)
+static void imx_timeout(struct timer_list *t)
{
- struct imx_port *sport = (struct imx_port *)data;
+ struct imx_port *sport = from_timer(sport, t, timer);
unsigned long flags;
if (sport->port.state) {
sport->port.rs485_config = imx_rs485_config;
sport->port.rs485.flags |= SER_RS485_RTS_ON_SEND;
sport->port.flags = UPF_BOOT_AUTOCONF;
- setup_timer(&sport->timer, imx_timeout, (unsigned long)sport);
+ timer_setup(&sport->timer, imx_timeout, 0);
sport->gpios = mctrl_gpio_init(&sport->port, 0);
if (IS_ERR(sport->gpios))
* The tasklet is cheap, it does not cause wakeups when reschedules itself,
* instead it waits for the next tick.
*/
-static void kgdb_nmi_tty_receiver(unsigned long data)
+static void kgdb_nmi_tty_receiver(struct timer_list *t)
{
- struct kgdb_nmi_tty_priv *priv = (void *)data;
+ struct kgdb_nmi_tty_priv *priv = from_timer(priv, t, timer);
char ch;
priv->timer.expires = jiffies + (HZ/100);
return -ENOMEM;
INIT_KFIFO(priv->fifo);
- setup_timer(&priv->timer, kgdb_nmi_tty_receiver, (unsigned long)priv);
+ timer_setup(&priv->timer, kgdb_nmi_tty_receiver, 0);
tty_port_init(&priv->port);
priv->port.ops = &kgdb_nmi_tty_port_ops;
tty->driver_data = priv;
queue_work(s->workqueue, &s->work);
}
-static void max3100_timeout(unsigned long data)
+static void max3100_timeout(struct timer_list *t)
{
- struct max3100_port *s = (struct max3100_port *)data;
+ struct max3100_port *s = from_timer(s, t, timer);
if (s->port.state) {
max3100_dowork(s);
max3100s[i]->poll_time = 1;
max3100s[i]->max3100_hw_suspend = pdata->max3100_hw_suspend;
max3100s[i]->minor = i;
- setup_timer(&max3100s[i]->timer, max3100_timeout,
- (unsigned long)max3100s[i]);
+ timer_setup(&max3100s[i]->timer, max3100_timeout, 0);
dev_dbg(&spi->dev, "%s: adding port %d\n", __func__, i);
max3100s[i]->port.irq = max3100s[i]->irq;
*
* This function periodically polls the Serial MUX to check for new data.
*/
-static void mux_poll(unsigned long unused)
+static void mux_poll(struct timer_list *unused)
{
int i;
if(port_cnt > 0) {
/* Start the Mux timer */
- setup_timer(&mux_timer, mux_poll, 0UL);
+ timer_setup(&mux_timer, mux_poll, 0);
mod_timer(&mux_timer, jiffies + MUX_POLL_DELAY);
#ifdef CONFIG_SERIAL_MUX_CONSOLE
* This is our per-port timeout handler, for checking the
* modem status signals.
*/
-static void pnx8xxx_timeout(unsigned long data)
+static void pnx8xxx_timeout(struct timer_list *t)
{
- struct pnx8xxx_port *sport = (struct pnx8xxx_port *)data;
+ struct pnx8xxx_port *sport = from_timer(sport, t, timer);
unsigned long flags;
if (sport->port.state) {
first = 0;
for (i = 0; i < NR_PORTS; i++) {
- setup_timer(&pnx8xxx_ports[i].timer, pnx8xxx_timeout,
- (unsigned long)&pnx8xxx_ports[i]);
+ timer_setup(&pnx8xxx_ports[i].timer, pnx8xxx_timeout, 0);
pnx8xxx_ports[i].port.ops = &pnx8xxx_pops;
}
}
* This is our per-port timeout handler, for checking the
* modem status signals.
*/
-static void sa1100_timeout(unsigned long data)
+static void sa1100_timeout(struct timer_list *t)
{
- struct sa1100_port *sport = (struct sa1100_port *)data;
+ struct sa1100_port *sport = from_timer(sport, t, timer);
unsigned long flags;
if (sport->port.state) {
sa1100_ports[i].port.fifosize = 8;
sa1100_ports[i].port.line = i;
sa1100_ports[i].port.iotype = UPIO_MEM;
- setup_timer(&sa1100_ports[i].timer, sa1100_timeout,
- (unsigned long)&sa1100_ports[i]);
+ timer_setup(&sa1100_ports[i].timer, sa1100_timeout, 0);
}
/*
(SCFCR_RTRG0 | SCFCR_RTRG1)) != 0;
}
-static void rx_fifo_timer_fn(unsigned long arg)
+static void rx_fifo_timer_fn(struct timer_list *t)
{
- struct sci_port *s = (struct sci_port *)arg;
+ struct sci_port *s = from_timer(s, t, rx_fifo_timer);
struct uart_port *port = &s->port;
dev_dbg(port->dev, "Rx timed out\n");
sci->rx_fifo_timeout = r;
scif_set_rtrg(port, 1);
if (r > 0)
- setup_timer(&sci->rx_fifo_timer, rx_fifo_timer_fn,
- (unsigned long)sci);
+ timer_setup(&sci->rx_fifo_timer, rx_fifo_timer_fn, 0);
}
return count;
dma_async_issue_pending(chan);
}
-static void rx_timer_fn(unsigned long arg)
+static void rx_timer_fn(struct timer_list *t)
{
- struct sci_port *s = (struct sci_port *)arg;
+ struct sci_port *s = from_timer(s, t, rx_timer);
struct dma_chan *chan = s->chan_rx;
struct uart_port *port = &s->port;
struct dma_tx_state state;
dma += s->buf_len_rx;
}
- setup_timer(&s->rx_timer, rx_timer_fn, (unsigned long)s);
+ timer_setup(&s->rx_timer, rx_timer_fn, 0);
if (port->type == PORT_SCIFA || port->type == PORT_SCIFB)
sci_submit_rx(s);
if (s->rx_trigger > 1) {
if (s->rx_fifo_timeout) {
scif_set_rtrg(port, 1);
- setup_timer(&s->rx_fifo_timer, rx_fifo_timer_fn,
- (unsigned long)s);
+ timer_setup(&s->rx_fifo_timer, rx_fifo_timer_fn, 0);
} else {
if (port->type == PORT_SCIFA ||
port->type == PORT_SCIFB)
* Obviously not used in interrupt mode
*
*/
-static void sn_sal_timer_poll(unsigned long data)
+static void sn_sal_timer_poll(struct timer_list *t)
{
- struct sn_cons_port *port = (struct sn_cons_port *)data;
+ struct sn_cons_port *port = from_timer(port, t, sc_timer);
unsigned long flags;
if (!port)
* timer to poll for input and push data from the console
* buffer.
*/
- setup_timer(&port->sc_timer, sn_sal_timer_poll, (unsigned long)port);
+ timer_setup(&port->sc_timer, sn_sal_timer_poll, 0);
if (IS_RUNNING_ON_SIMULATOR())
port->sc_interrupt_timeout = 6;
static void usc_loopback_frame( struct mgsl_struct *info );
-static void mgsl_tx_timeout(unsigned long context);
+static void mgsl_tx_timeout(struct timer_list *t);
static void usc_loopmode_cancel_transmit( struct mgsl_struct * info );
memset(&info->icount, 0, sizeof(info->icount));
- setup_timer(&info->tx_timer, mgsl_tx_timeout, (unsigned long)info);
+ timer_setup(&info->tx_timer, mgsl_tx_timeout, 0);
/* Allocate and claim adapter resources */
retval = mgsl_claim_resources(info);
* Arguments: context pointer to device instance data
* Return Value: None
*/
-static void mgsl_tx_timeout(unsigned long context)
+static void mgsl_tx_timeout(struct timer_list *t)
{
- struct mgsl_struct *info = (struct mgsl_struct*)context;
+ struct mgsl_struct *info = from_timer(info, t, tx_timer);
unsigned long flags;
if ( debug_level >= DEBUG_LEVEL_INFO )
static int alloc_tmp_rbuf(struct slgt_info *info);
static void free_tmp_rbuf(struct slgt_info *info);
-static void tx_timeout(unsigned long context);
-static void rx_timeout(unsigned long context);
+static void tx_timeout(struct timer_list *t);
+static void rx_timeout(struct timer_list *t);
/*
* ioctl handlers
info->adapter_num = adapter_num;
info->port_num = port_num;
- setup_timer(&info->tx_timer, tx_timeout, (unsigned long)info);
- setup_timer(&info->rx_timer, rx_timeout, (unsigned long)info);
+ timer_setup(&info->tx_timer, tx_timeout, 0);
+ timer_setup(&info->rx_timer, rx_timeout, 0);
/* Copy configuration info to device instance data */
info->pdev = pdev;
/*
* transmit timeout handler
*/
-static void tx_timeout(unsigned long context)
+static void tx_timeout(struct timer_list *t)
{
- struct slgt_info *info = (struct slgt_info*)context;
+ struct slgt_info *info = from_timer(info, t, tx_timer);
unsigned long flags;
DBGINFO(("%s tx_timeout\n", info->device_name));
/*
* receive buffer polling timer
*/
-static void rx_timeout(unsigned long context)
+static void rx_timeout(struct timer_list *t)
{
- struct slgt_info *info = (struct slgt_info*)context;
+ struct slgt_info *info = from_timer(info, t, rx_timer);
unsigned long flags;
DBGINFO(("%s rx_timeout\n", info->device_name));
static void load_pci_memory(SLMP_INFO *info, char* dest, const char* src, unsigned short count);
static void trace_block(SLMP_INFO *info, const char* data, int count, int xmit);
-static void tx_timeout(unsigned long context);
-static void status_timeout(unsigned long context);
+static void tx_timeout(struct timer_list *t);
+static void status_timeout(struct timer_list *t);
static unsigned char read_reg(SLMP_INFO *info, unsigned char addr);
static void write_reg(SLMP_INFO *info, unsigned char addr, unsigned char val);
info->bus_type = MGSL_BUS_TYPE_PCI;
info->irq_flags = IRQF_SHARED;
- setup_timer(&info->tx_timer, tx_timeout, (unsigned long)info);
- setup_timer(&info->status_timer, status_timeout,
- (unsigned long)info);
+ timer_setup(&info->tx_timer, tx_timeout, 0);
+ timer_setup(&info->status_timer, status_timeout, 0);
/* Store the PCI9050 misc control register value because a flaw
* in the PCI9050 prevents LCR registers from being read if
/* called when HDLC frame times out
* update stats and do tx completion processing
*/
-static void tx_timeout(unsigned long context)
+static void tx_timeout(struct timer_list *t)
{
- SLMP_INFO *info = (SLMP_INFO*)context;
+ SLMP_INFO *info = from_timer(info, t, tx_timer);
unsigned long flags;
if ( debug_level >= DEBUG_LEVEL_INFO )
/* called to periodically check the DSR/RI modem signal input status
*/
-static void status_timeout(unsigned long context)
+static void status_timeout(struct timer_list *t)
{
u16 status = 0;
- SLMP_INFO *info = (SLMP_INFO*)context;
+ SLMP_INFO *info = from_timer(info, t, status_timer);
unsigned long flags;
unsigned char delta;
return 0;
}
-static void kd_nosound(unsigned long ignored)
+static void kd_nosound(struct timer_list *unused)
{
static unsigned int zero;
static void hide_cursor(struct vc_data *vc);
static void console_callback(struct work_struct *ignored);
static void con_driver_unregister_callback(struct work_struct *ignored);
-static void blank_screen_t(unsigned long dummy);
+static void blank_screen_t(struct timer_list *unused);
static void set_palette(struct vc_data *vc);
#define vt_get_kmsg_redirect() vt_kmsg_redirect(-1)
* (console operations can still happen at irq time, but only from printk which
* has the console mutex. Not perfect yet, but better than no locking
*/
-static void blank_screen_t(unsigned long dummy)
+static void blank_screen_t(struct timer_list *unused)
{
blank_timer_expired = 1;
schedule_work(&console_work);
complete(urb->context);
}
-static void cxacru_timeout_kill(unsigned long data)
+struct cxacru_timer {
+ struct timer_list timer;
+ struct urb *urb;
+};
+
+static void cxacru_timeout_kill(struct timer_list *t)
{
- usb_unlink_urb((struct urb *) data);
+ struct cxacru_timer *timer = from_timer(timer, t, timer);
+
+ usb_unlink_urb(timer->urb);
}
static int cxacru_start_wait_urb(struct urb *urb, struct completion *done,
int *actual_length)
{
- struct timer_list timer;
+ struct cxacru_timer timer = {
+ .urb = urb,
+ };
- setup_timer(&timer, cxacru_timeout_kill, (unsigned long)urb);
- timer.expires = jiffies + msecs_to_jiffies(CMD_TIMEOUT);
- add_timer(&timer);
+ timer_setup_on_stack(&timer.timer, cxacru_timeout_kill, 0);
+ mod_timer(&timer.timer, jiffies + msecs_to_jiffies(CMD_TIMEOUT));
wait_for_completion(done);
- del_timer_sync(&timer);
+ del_timer_sync(&timer.timer);
+ destroy_timer_on_stack(&timer.timer);
if (actual_length)
*actual_length = urb->actual_length;
}
}
-static void speedtch_status_poll(unsigned long data)
+static void speedtch_status_poll(struct timer_list *t)
{
- struct speedtch_instance_data *instance = (void *)data;
+ struct speedtch_instance_data *instance = from_timer(instance, t,
+ status_check_timer);
schedule_work(&instance->status_check_work);
atm_warn(instance->usbatm, "Too many failures - disabling line status polling\n");
}
-static void speedtch_resubmit_int(unsigned long data)
+static void speedtch_resubmit_int(struct timer_list *t)
{
- struct speedtch_instance_data *instance = (void *)data;
+ struct speedtch_instance_data *instance = from_timer(instance, t,
+ resubmit_timer);
struct urb *int_urb = instance->int_urb;
int ret;
usbatm->flags |= (use_isoc ? UDSL_USE_ISOC : 0);
INIT_WORK(&instance->status_check_work, speedtch_check_status);
- setup_timer(&instance->status_check_timer, speedtch_status_poll,
- (unsigned long)instance);
+ timer_setup(&instance->status_check_timer, speedtch_status_poll, 0);
instance->last_status = 0xff;
instance->poll_delay = MIN_POLL_DELAY;
- setup_timer(&instance->resubmit_timer, speedtch_resubmit_int,
- (unsigned long)instance);
+ timer_setup(&instance->resubmit_timer, speedtch_resubmit_int, 0);
instance->int_urb = usb_alloc_urb(0, GFP_KERNEL);
return 0;
}
-static void usbatm_tasklet_schedule(unsigned long data)
+static void usbatm_tasklet_schedule(struct timer_list *t)
{
- tasklet_schedule((struct tasklet_struct *) data);
+ struct usbatm_channel *channel = from_timer(channel, t, delay);
+
+ tasklet_schedule(&channel->tasklet);
}
static void usbatm_init_channel(struct usbatm_channel *channel)
{
spin_lock_init(&channel->lock);
INIT_LIST_HEAD(&channel->list);
- channel->delay.function = usbatm_tasklet_schedule;
- channel->delay.data = (unsigned long) &channel->tasklet;
- init_timer(&channel->delay);
+ timer_setup(&channel->delay, usbatm_tasklet_schedule, 0);
}
int usbatm_usb_probe(struct usb_interface *intf, const struct usb_device_id *id,
EXPORT_SYMBOL_GPL(usb_hcd_poll_rh_status);
/* timer callback */
-static void rh_timer_func (unsigned long _hcd)
+static void rh_timer_func (struct timer_list *t)
{
- usb_hcd_poll_rh_status((struct usb_hcd *) _hcd);
+ struct usb_hcd *_hcd = from_timer(_hcd, t, rh_timer);
+
+ usb_hcd_poll_rh_status(_hcd);
}
/*-------------------------------------------------------------------------*/
hcd->self.bus_name = bus_name;
hcd->self.uses_dma = (sysdev->dma_mask != NULL);
- setup_timer(&hcd->rh_timer, rh_timer_func, (unsigned long)hcd);
+ timer_setup(&hcd->rh_timer, rh_timer_func, 0);
#ifdef CONFIG_PM
INIT_WORK(&hcd->wakeup_work, hcd_resume_work);
#endif
}
}
-static void dwc2_wakeup_detected(unsigned long data)
+static void dwc2_wakeup_detected(struct timer_list *t)
{
- struct dwc2_hsotg *hsotg = (struct dwc2_hsotg *)data;
+ struct dwc2_hsotg *hsotg = from_timer(hsotg, t, wkp_timer);
u32 hprt0;
dev_dbg(hsotg->dev, "%s()\n", __func__);
}
INIT_WORK(&hsotg->wf_otg, dwc2_conn_id_status_change);
- setup_timer(&hsotg->wkp_timer, dwc2_wakeup_detected,
- (unsigned long)hsotg);
+ timer_setup(&hsotg->wkp_timer, dwc2_wakeup_detected, 0);
/* Initialize the non-periodic schedule */
INIT_LIST_HEAD(&hsotg->non_periodic_sched_inactive);
*
* @work: Pointer to a qh unreserve_work.
*/
-static void dwc2_unreserve_timer_fn(unsigned long data)
+static void dwc2_unreserve_timer_fn(struct timer_list *t)
{
- struct dwc2_qh *qh = (struct dwc2_qh *)data;
+ struct dwc2_qh *qh = from_timer(qh, t, unreserve_timer);
struct dwc2_hsotg *hsotg = qh->hsotg;
unsigned long flags;
/* Initialize QH */
qh->hsotg = hsotg;
- setup_timer(&qh->unreserve_timer, dwc2_unreserve_timer_fn,
- (unsigned long)qh);
+ timer_setup(&qh->unreserve_timer, dwc2_unreserve_timer_fn, 0);
qh->ep_type = ep_type;
qh->ep_is_in = ep_is_in;
mod_timer(&udc->vbus_timer, jiffies + VBUS_POLL_TIMEOUT);
}
-static void at91_vbus_timer(unsigned long data)
+static void at91_vbus_timer(struct timer_list *t)
{
- struct at91_udc *udc = (struct at91_udc *)data;
+ struct at91_udc *udc = from_timer(udc, t, vbus_timer);
/*
* If we are polling vbus it is likely that the gpio is on an
if (udc->board.vbus_polled) {
INIT_WORK(&udc->vbus_timer_work, at91_vbus_timer_work);
- setup_timer(&udc->vbus_timer, at91_vbus_timer,
- (unsigned long)udc);
+ timer_setup(&udc->vbus_timer, at91_vbus_timer, 0);
mod_timer(&udc->vbus_timer,
jiffies + VBUS_POLL_TIMEOUT);
} else {
/* drive both sides of the transfers; looks like irq handlers to
* both drivers except the callbacks aren't in_irq().
*/
-static void dummy_timer(unsigned long _dum_hcd)
+static void dummy_timer(struct timer_list *t)
{
- struct dummy_hcd *dum_hcd = (struct dummy_hcd *) _dum_hcd;
+ struct dummy_hcd *dum_hcd = from_timer(dum_hcd, t, timer);
struct dummy *dum = dum_hcd->dum;
struct urbp *urbp, *tmp;
unsigned long flags;
static int dummy_start_ss(struct dummy_hcd *dum_hcd)
{
- setup_timer(&dum_hcd->timer, dummy_timer, (unsigned long)dum_hcd);
+ timer_setup(&dum_hcd->timer, dummy_timer, 0);
dum_hcd->rh_state = DUMMY_RH_RUNNING;
dum_hcd->stream_en_ep = 0;
INIT_LIST_HEAD(&dum_hcd->urbp_list);
return dummy_start_ss(dum_hcd);
spin_lock_init(&dum_hcd->dum->lock);
- setup_timer(&dum_hcd->timer, dummy_timer, (unsigned long)dum_hcd);
+ timer_setup(&dum_hcd->timer, dummy_timer, 0);
dum_hcd->rh_state = DUMMY_RH_RUNNING;
INIT_LIST_HEAD(&dum_hcd->urbp_list);
return IRQ_HANDLED;
}
-static void m66592_timer(unsigned long _m66592)
+static void m66592_timer(struct timer_list *t)
{
- struct m66592 *m66592 = (struct m66592 *)_m66592;
+ struct m66592 *m66592 = from_timer(m66592, t, timer);
unsigned long flags;
u16 tmp;
m66592->gadget.max_speed = USB_SPEED_HIGH;
m66592->gadget.name = udc_name;
- setup_timer(&m66592->timer, m66592_timer, (unsigned long)m66592);
+ timer_setup(&m66592->timer, m66592_timer, 0);
m66592->reg = reg;
ret = request_irq(ires->start, m66592_irq, IRQF_SHARED,
#define PIO_OUT_TIMEOUT (jiffies + HZ/3)
#define HALF_FULL(f) (!((f)&(UDC_NON_ISO_FIFO_FULL|UDC_NON_ISO_FIFO_EMPTY)))
-static void pio_out_timer(unsigned long _ep)
+static void pio_out_timer(struct timer_list *t)
{
- struct omap_ep *ep = (void *) _ep;
+ struct omap_ep *ep = from_timer(ep, t, timer);
unsigned long flags;
u16 stat_flg;
}
if (dbuf && addr)
epn_rxtx |= UDC_EPN_RX_DB;
- init_timer(&ep->timer);
- ep->timer.function = pio_out_timer;
- ep->timer.data = (unsigned long) ep;
+ timer_setup(&ep->timer, pio_out_timer, 0);
}
if (addr)
epn_rxtx |= UDC_EPN_RX_VALID;
nuke(&dev->ep[i], -ECONNABORTED);
}
-static void udc_watchdog(unsigned long _dev)
+static void udc_watchdog(struct timer_list *t)
{
- struct pxa25x_udc *dev = (void *)_dev;
+ struct pxa25x_udc *dev = from_timer(dev, t, timer);
local_irq_disable();
if (dev->ep0state == EP0_STALL
gpio_direction_output(dev->mach->gpio_pullup, 0);
}
- setup_timer(&dev->timer, udc_watchdog, (unsigned long)dev);
+ timer_setup(&dev->timer, udc_watchdog, 0);
the_controller = dev;
platform_set_drvdata(pdev, dev);
return IRQ_HANDLED;
}
-static void r8a66597_timer(unsigned long _r8a66597)
+static void r8a66597_timer(struct timer_list *t)
{
- struct r8a66597 *r8a66597 = (struct r8a66597 *)_r8a66597;
+ struct r8a66597 *r8a66597 = from_timer(r8a66597, t, timer);
unsigned long flags;
u16 tmp;
r8a66597->gadget.max_speed = USB_SPEED_HIGH;
r8a66597->gadget.name = udc_name;
- setup_timer(&r8a66597->timer, r8a66597_timer, (unsigned long)r8a66597);
+ timer_setup(&r8a66597->timer, r8a66597_timer, 0);
r8a66597->reg = reg;
if (r8a66597->pdata->on_chip) {
static void ohci_dump(struct ohci_hcd *ohci);
static void ohci_stop(struct usb_hcd *hcd);
-static void io_watchdog_func(unsigned long _ohci);
+static void io_watchdog_func(struct timer_list *t);
#include "ohci-hub.c"
#include "ohci-dbg.c"
if (ohci->hcca)
return 0;
- setup_timer(&ohci->io_watchdog, io_watchdog_func,
- (unsigned long) ohci);
+ timer_setup(&ohci->io_watchdog, io_watchdog_func, 0);
ohci->hcca = dma_alloc_coherent (hcd->self.controller,
sizeof(*ohci->hcca), &ohci->hcca_dma, GFP_KERNEL);
* the unlink list. As a result, URBs could never be dequeued and
* endpoints could never be released.
*/
-static void io_watchdog_func(unsigned long _ohci)
+static void io_watchdog_func(struct timer_list *t)
{
- struct ohci_hcd *ohci = (struct ohci_hcd *) _ohci;
+ struct ohci_hcd *ohci = from_timer(ohci, t, io_watchdog);
bool takeback_all_pending = false;
u32 status;
u32 head;
return ret;
}
-static void oxu_watchdog(unsigned long param)
+static void oxu_watchdog(struct timer_list *t)
{
- struct oxu_hcd *oxu = (struct oxu_hcd *) param;
+ struct oxu_hcd *oxu = from_timer(oxu, t, watchdog);
unsigned long flags;
spin_lock_irqsave(&oxu->lock, flags);
spin_lock_init(&oxu->lock);
- setup_timer(&oxu->watchdog, oxu_watchdog, (unsigned long)oxu);
+ timer_setup(&oxu->watchdog, oxu_watchdog, 0);
/*
* hw default: 1K periodic list heads, one per frame.
spin_unlock_irqrestore(&r8a66597->lock, flags);
}
-static void r8a66597_timer(unsigned long _r8a66597)
+static void r8a66597_timer(struct timer_list *t)
{
- struct r8a66597 *r8a66597 = (struct r8a66597 *)_r8a66597;
+ struct r8a66597 *r8a66597 = from_timer(r8a66597, t, rh_timer);
unsigned long flags;
int port;
r8a66597->max_root_hub = 2;
spin_lock_init(&r8a66597->lock);
- setup_timer(&r8a66597->rh_timer, r8a66597_timer,
- (unsigned long)r8a66597);
+ timer_setup(&r8a66597->rh_timer, r8a66597_timer, 0);
r8a66597->reg = reg;
/* make sure no interrupts are pending */
}
static void
-sl811h_timer(unsigned long _sl811)
+sl811h_timer(struct timer_list *t)
{
- struct sl811 *sl811 = (void *) _sl811;
+ struct sl811 *sl811 = from_timer(sl811, t, timer);
unsigned long flags;
u8 irqstat;
u8 signaling = sl811->ctrl1 & SL11H_CTL1MASK_FORCE;
spin_lock_init(&sl811->lock);
INIT_LIST_HEAD(&sl811->async);
sl811->board = dev_get_platdata(&dev->dev);
- setup_timer(&sl811->timer, sl811h_timer, (unsigned long)sl811);
+ timer_setup(&sl811->timer, sl811h_timer, 0);
sl811->addr_reg = addr_reg;
sl811->data_reg = data_reg;
hcd->self.sg_tablesize = ~0;
spin_lock_init(&uhci->lock);
- setup_timer(&uhci->fsbr_timer, uhci_fsbr_timeout,
- (unsigned long) uhci);
+ timer_setup(&uhci->fsbr_timer, uhci_fsbr_timeout, 0);
INIT_LIST_HEAD(&uhci->idle_qh_list);
init_waitqueue_head(&uhci->waitqh);
}
}
-static void uhci_fsbr_timeout(unsigned long _uhci)
+static void uhci_fsbr_timeout(struct timer_list *t)
{
- struct uhci_hcd *uhci = (struct uhci_hcd *) _uhci;
+ struct uhci_hcd *uhci = from_timer(uhci, t, fsbr_timer);
unsigned long flags;
spin_lock_irqsave(&uhci->lock, flags);
#endif
-static void compliance_mode_recovery(unsigned long arg)
+static void compliance_mode_recovery(struct timer_list *t)
{
struct xhci_hcd *xhci;
struct usb_hcd *hcd;
u32 temp;
int i;
- xhci = (struct xhci_hcd *)arg;
+ xhci = from_timer(xhci, t, comp_mode_recovery_timer);
for (i = 0; i < xhci->num_usb3_ports; i++) {
temp = readl(xhci->usb3_ports[i]);
static void compliance_mode_recovery_timer_init(struct xhci_hcd *xhci)
{
xhci->port_status_u0 = 0;
- setup_timer(&xhci->comp_mode_recovery_timer,
- compliance_mode_recovery, (unsigned long)xhci);
+ timer_setup(&xhci->comp_mode_recovery_timer, compliance_mode_recovery,
+ 0);
xhci->comp_mode_recovery_timer.expires = jiffies +
msecs_to_jiffies(COMP_MODE_RCVRY_MSECS);
val, reg, NULL, 0, MOS_WDR_TIMEOUT);
}
-static void mos7840_led_off(unsigned long arg)
+static void mos7840_led_off(struct timer_list *t)
{
- struct moschip_port *mcs = (struct moschip_port *) arg;
+ struct moschip_port *mcs = from_timer(mcs, t, led_timer1);
/* Turn off LED */
mos7840_set_led_async(mcs, 0x0300, MODEM_CONTROL_REGISTER);
jiffies + msecs_to_jiffies(LED_OFF_MS));
}
-static void mos7840_led_flag_off(unsigned long arg)
+static void mos7840_led_flag_off(struct timer_list *t)
{
- struct moschip_port *mcs = (struct moschip_port *) arg;
+ struct moschip_port *mcs = from_timer(mcs, t, led_timer2);
clear_bit_unlock(MOS7840_FLAG_LED_BUSY, &mcs->flags);
}
goto error;
}
- setup_timer(&mos7840_port->led_timer1, mos7840_led_off,
- (unsigned long)mos7840_port);
+ timer_setup(&mos7840_port->led_timer1, mos7840_led_off, 0);
mos7840_port->led_timer1.expires =
jiffies + msecs_to_jiffies(LED_ON_MS);
- setup_timer(&mos7840_port->led_timer2, mos7840_led_flag_off,
- (unsigned long)mos7840_port);
+ timer_setup(&mos7840_port->led_timer2, mos7840_led_flag_off,
+ 0);
mos7840_port->led_timer2.expires =
jiffies + msecs_to_jiffies(LED_OFF_MS);
mod_timer(&chip->rts51x_suspend_timer, chip->timer_expires);
}
-static void rts51x_suspend_timer_fn(unsigned long data)
+static void rts51x_suspend_timer_fn(struct timer_list *t)
{
- struct rts51x_chip *chip = (struct rts51x_chip *)data;
+ struct rts51x_chip *chip = from_timer(chip, t, rts51x_suspend_timer);
struct us_data *us = chip->us;
switch (rts51x_get_stat(chip)) {
us->proto_handler = rts51x_invoke_transport;
chip->timer_expires = 0;
- setup_timer(&chip->rts51x_suspend_timer, rts51x_suspend_timer_fn,
- (unsigned long)chip);
+ timer_setup(&chip->rts51x_suspend_timer, rts51x_suspend_timer_fn, 0);
fw5895_init(us);
/* enable autosuspend function of the usb device */
mutex_unlock(&rc->rsvs_mutex);
}
-static void uwb_cnflt_timer(unsigned long arg)
+static void uwb_cnflt_timer(struct timer_list *t)
{
- struct uwb_cnflt_alien *cnflt = (struct uwb_cnflt_alien *)arg;
+ struct uwb_cnflt_alien *cnflt = from_timer(cnflt, t, timer);
queue_work(cnflt->rc->rsv_workq, &cnflt->cnflt_update_work);
}
}
INIT_LIST_HEAD(&cnflt->rc_node);
- setup_timer(&cnflt->timer, uwb_cnflt_timer, (unsigned long)cnflt);
+ timer_setup(&cnflt->timer, uwb_cnflt_timer, 0);
cnflt->rc = rc;
INIT_WORK(&cnflt->cnflt_update_work, uwb_cnflt_update_work);
struct list_head list_node;
};
-static void uwb_rc_neh_timer(unsigned long arg);
+static void uwb_rc_neh_timer(struct timer_list *t);
static void uwb_rc_neh_release(struct kref *kref)
{
kref_init(&neh->kref);
INIT_LIST_HEAD(&neh->list_node);
- setup_timer(&neh->timer, uwb_rc_neh_timer, (unsigned long)neh);
+ timer_setup(&neh->timer, uwb_rc_neh_timer, 0);
neh->rc = rc;
neh->evt_type = expected_type;
EXPORT_SYMBOL_GPL(uwb_rc_neh_error);
-static void uwb_rc_neh_timer(unsigned long arg)
+static void uwb_rc_neh_timer(struct timer_list *t)
{
- struct uwb_rc_neh *neh = (struct uwb_rc_neh *)arg;
+ struct uwb_rc_neh *neh = from_timer(neh, t, timer);
struct uwb_rc *rc = neh->rc;
unsigned long flags;
#include "uwb-internal.h"
-static void uwb_rsv_timer(unsigned long arg);
+static void uwb_rsv_timer(struct timer_list *t);
static const char *rsv_states[] = {
[UWB_RSV_STATE_NONE] = "none ",
dev_dbg(dev, "put stream %d\n", rsv->stream);
}
-void uwb_rsv_backoff_win_timer(unsigned long arg)
+void uwb_rsv_backoff_win_timer(struct timer_list *t)
{
- struct uwb_drp_backoff_win *bow = (struct uwb_drp_backoff_win *)arg;
+ struct uwb_drp_backoff_win *bow = from_timer(bow, t, timer);
struct uwb_rc *rc = container_of(bow, struct uwb_rc, bow);
struct device *dev = &rc->uwb_dev.dev;
INIT_LIST_HEAD(&rsv->rc_node);
INIT_LIST_HEAD(&rsv->pal_node);
kref_init(&rsv->kref);
- setup_timer(&rsv->timer, uwb_rsv_timer, (unsigned long)rsv);
+ timer_setup(&rsv->timer, uwb_rsv_timer, 0);
rsv->rc = rc;
INIT_WORK(&rsv->handle_timeout_work, uwb_rsv_handle_timeout_work);
mutex_unlock(&rc->rsvs_mutex);
}
-static void uwb_rsv_timer(unsigned long arg)
+static void uwb_rsv_timer(struct timer_list *t)
{
- struct uwb_rsv *rsv = (struct uwb_rsv *)arg;
+ struct uwb_rsv *rsv = from_timer(rsv, t, timer);
queue_work(rsv->rc->rsv_workq, &rsv->handle_timeout_work);
}
rc->bow.can_reserve_extra_mases = true;
rc->bow.total_expired = 0;
rc->bow.window = UWB_DRP_BACKOFF_WIN_MIN >> 1;
- setup_timer(&rc->bow.timer, uwb_rsv_backoff_win_timer,
- (unsigned long)&rc->bow);
+ timer_setup(&rc->bow.timer, uwb_rsv_backoff_win_timer, 0);
bitmap_complement(rc->uwb_dev.streams, rc->uwb_dev.streams, UWB_NUM_STREAMS);
}
bool uwb_rsv_has_two_drp_ies(struct uwb_rsv *rsv);
void uwb_rsv_dump(char *text, struct uwb_rsv *rsv);
int uwb_rsv_try_move(struct uwb_rsv *rsv, struct uwb_mas_bm *available);
-void uwb_rsv_backoff_win_timer(unsigned long arg);
+void uwb_rsv_backoff_win_timer(struct timer_list *t);
void uwb_rsv_backoff_win_increment(struct uwb_rc *rc);
int uwb_rsv_status(struct uwb_rsv *rsv);
int uwb_rsv_companion_status(struct uwb_rsv *rsv);
MODULE_PARM_DESC(use_gpio,
"Use the gpio watchdog (required by old cobalt boards).");
-static void wdt_timer_ping(unsigned long);
+static void wdt_timer_ping(struct timer_list *);
static DEFINE_TIMER(timer, wdt_timer_ping);
static unsigned long next_heartbeat;
static unsigned long wdt_is_open;
* Whack the dog
*/
-static void wdt_timer_ping(unsigned long unused)
+static void wdt_timer_ping(struct timer_list *unused)
{
/* If we got a heartbeat pulse within the WDT_US_INTERVAL
* we agree to ping the WDT
/*
* Timer tick
*/
-static void at91_ping(unsigned long data)
+static void at91_ping(struct timer_list *t)
{
- struct at91wdt *wdt = (struct at91wdt *)data;
+ struct at91wdt *wdt = from_timer(wdt, t, timer);
if (time_before(jiffies, wdt->next_heartbeat) ||
!watchdog_active(&wdt->wdd)) {
at91_wdt_reset(wdt);
"watchdog already configured differently (mr = %x expecting %x)\n",
tmp & wdt->mr_mask, wdt->mr & wdt->mr_mask);
- setup_timer(&wdt->timer, at91_ping, (unsigned long)wdt);
+ timer_setup(&wdt->timer, at91_ping, 0);
/*
* Use min_heartbeat the first time to avoid spurious watchdog reset:
.restart = bcm47xx_wdt_restart,
};
-static void bcm47xx_wdt_soft_timer_tick(unsigned long data)
+static void bcm47xx_wdt_soft_timer_tick(struct timer_list *t)
{
- struct bcm47xx_wdt *wdt = (struct bcm47xx_wdt *)data;
+ struct bcm47xx_wdt *wdt = from_timer(wdt, t, soft_timer);
u32 next_tick = min(wdt->wdd.timeout * 1000, wdt->max_timer_ms);
if (!atomic_dec_and_test(&wdt->soft_ticks)) {
struct bcm47xx_wdt *wdt = bcm47xx_wdt_get(wdd);
bcm47xx_wdt_soft_keepalive(wdd);
- bcm47xx_wdt_soft_timer_tick((unsigned long)wdt);
+ bcm47xx_wdt_soft_timer_tick(&wdt->soft_timer);
return 0;
}
if (soft) {
wdt->wdd.ops = &bcm47xx_wdt_soft_ops;
- setup_timer(&wdt->soft_timer, bcm47xx_wdt_soft_timer_tick,
- (long unsigned int)wdt);
+ timer_setup(&wdt->soft_timer, bcm47xx_wdt_soft_timer_tick, 0);
} else {
wdt->wdd.ops = &bcm47xx_wdt_hard_ops;
}
die(PFX " fire", regs);
}
-static void bcm63xx_timer_tick(unsigned long unused)
+static void bcm63xx_timer_tick(struct timer_list *unused)
{
if (!atomic_dec_and_test(&bcm63xx_wdt_device.ticks)) {
bcm63xx_wdt_hw_start();
int ret;
struct resource *r;
- setup_timer(&bcm63xx_wdt_device.timer, bcm63xx_timer_tick, 0L);
+ timer_setup(&bcm63xx_wdt_device.timer, bcm63xx_timer_tick, 0);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
/* generic helper functions */
-static void cpu5wdt_trigger(unsigned long unused)
+static void cpu5wdt_trigger(struct timer_list *unused)
{
if (verbose > 2)
pr_debug("trigger at %i ticks\n", ticks);
init_completion(&cpu5wdt_device.stop);
cpu5wdt_device.queue = 0;
- setup_timer(&cpu5wdt_device.timer, cpu5wdt_trigger, 0);
+ timer_setup(&cpu5wdt_device.timer, cpu5wdt_trigger, 0);
cpu5wdt_device.default_ticks = ticks;
if (!request_region(port, CPU5WDT_EXTENT, PFX)) {
MODULE_PARM_DESC(action, "after watchdog resets, generate: "
"0 = RESET(*) 1 = SMI 2 = NMI 3 = SCI");
-static void zf_ping(unsigned long data);
+static void zf_ping(struct timer_list *unused);
static int zf_action = GEN_RESET;
static unsigned long zf_is_open;
}
-static void zf_ping(unsigned long data)
+static void zf_ping(struct timer_list *unused)
{
unsigned int ctrl_reg = 0;
unsigned long flags;
{0x0000, 0},
};
-static void mixcomwd_timerfun(unsigned long d);
+static void mixcomwd_timerfun(struct timer_list *unused);
static unsigned long mixcomwd_opened; /* long req'd for setbit --RR */
return;
}
-static void mixcomwd_timerfun(unsigned long d)
+static void mixcomwd_timerfun(struct timer_list *unused)
{
mixcomwd_ping();
mod_timer(&mixcomwd_timer, jiffies + 5 * HZ);
spin_unlock(&ddata->lock);
}
-static void mpc8xxx_wdt_timer_ping(unsigned long arg)
+static void mpc8xxx_wdt_timer_ping(struct timer_list *t)
{
- struct mpc8xxx_wdt_ddata *ddata = (void *)arg;
+ struct mpc8xxx_wdt_ddata *ddata = from_timer(ddata, t, timer);
mpc8xxx_wdt_keepalive(ddata);
/* We're pinging it twice faster than needed, just to be sure. */
}
spin_lock_init(&ddata->lock);
- setup_timer(&ddata->timer, mpc8xxx_wdt_timer_ping,
- (unsigned long)ddata);
+ timer_setup(&ddata->timer, mpc8xxx_wdt_timer_ping, 0);
ddata->wdd.info = &mpc8xxx_wdt_info,
ddata->wdd.ops = &mpc8xxx_wdt_ops,
unsigned int gstate;
} mtx1_wdt_device;
-static void mtx1_wdt_trigger(unsigned long unused)
+static void mtx1_wdt_trigger(struct timer_list *unused)
{
spin_lock(&mtx1_wdt_device.lock);
if (mtx1_wdt_device.running)
init_completion(&mtx1_wdt_device.stop);
mtx1_wdt_device.queue = 0;
clear_bit(0, &mtx1_wdt_device.inuse);
- setup_timer(&mtx1_wdt_device.timer, mtx1_wdt_trigger, 0L);
+ timer_setup(&mtx1_wdt_device.timer, mtx1_wdt_trigger, 0);
mtx1_wdt_device.default_ticks = ticks;
ret = misc_register(&mtx1_wdt_misc);
return len;
}
-static void nuc900_wdt_timer_ping(unsigned long data)
+static void nuc900_wdt_timer_ping(struct timer_list *unused)
{
if (time_before(jiffies, nuc900_wdt->next_heartbeat)) {
nuc900_wdt_keepalive();
clk_enable(nuc900_wdt->wdt_clock);
- setup_timer(&nuc900_wdt->timer, nuc900_wdt_timer_ping, 0);
+ timer_setup(&nuc900_wdt->timer, nuc900_wdt_timer_ping, 0);
ret = misc_register(&nuc900wdt_miscdev);
if (ret) {
pr_info("No previous trip detected - Cold boot or reset\n");
}
-static void pcwd_timer_ping(unsigned long data)
+static void pcwd_timer_ping(struct timer_list *unused)
{
int wdrst_stat;
/* clear the "card caused reboot" flag */
pcwd_clear_status();
- setup_timer(&pcwd_private.timer, pcwd_timer_ping, 0);
+ timer_setup(&pcwd_private.timer, pcwd_timer_ping, 0);
/* Disable the board */
pcwd_stop();
/*
* Timer tick
*/
-static void pikawdt_ping(unsigned long data)
+static void pikawdt_ping(struct timer_list *unused)
{
if (time_before(jiffies, pikawdt_private.next_heartbeat) ||
(!nowayout && !pikawdt_private.open)) {
iounmap(fpga);
- setup_timer(&pikawdt_private.timer, pikawdt_ping, 0);
+ timer_setup(&pikawdt_private.timer, pikawdt_ping, 0);
ret = misc_register(&pikawdt_miscdev);
if (ret) {
/* generic helper functions */
-static void rdc321x_wdt_trigger(unsigned long unused)
+static void rdc321x_wdt_trigger(struct timer_list *unused)
{
unsigned long flags;
u32 val;
clear_bit(0, &rdc321x_wdt_device.inuse);
- setup_timer(&rdc321x_wdt_device.timer, rdc321x_wdt_trigger, 0);
+ timer_setup(&rdc321x_wdt_device.timer, rdc321x_wdt_trigger, 0);
rdc321x_wdt_device.default_ticks = ticks;
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
-static void wdt_timer_ping(unsigned long);
+static void wdt_timer_ping(struct timer_list *);
static DEFINE_TIMER(timer, wdt_timer_ping);
static unsigned long next_heartbeat;
static unsigned long wdt_is_open;
* Whack the dog
*/
-static void wdt_timer_ping(unsigned long data)
+static void wdt_timer_ping(struct timer_list *unused)
{
/* If we got a heartbeat pulse within the WDT_US_INTERVAL
* we agree to ping the WDT
static __u16 __iomem *wdtmrctl;
-static void wdt_timer_ping(unsigned long);
+static void wdt_timer_ping(struct timer_list *);
static DEFINE_TIMER(timer, wdt_timer_ping);
static unsigned long next_heartbeat;
static unsigned long wdt_is_open;
* Whack the dog
*/
-static void wdt_timer_ping(unsigned long data)
+static void wdt_timer_ping(struct timer_list *unused)
{
/* If we got a heartbeat pulse within the WDT_US_INTERVAL
* we agree to ping the WDT
return 0;
}
-static void sh_wdt_ping(unsigned long data)
+static void sh_wdt_ping(struct timer_list *t)
{
- struct sh_wdt *wdt = (struct sh_wdt *)data;
+ struct sh_wdt *wdt = from_timer(wdt, t, timer);
unsigned long flags;
spin_lock_irqsave(&wdt->lock, flags);
return rc;
}
- setup_timer(&wdt->timer, sh_wdt_ping, (unsigned long)wdt);
+ timer_setup(&wdt->timer, sh_wdt_ping, 0);
wdt->timer.expires = next_ping_period(clock_division_ratio);
dev_info(&pdev->dev, "initialized.\n");
static struct resource wdt_res;
static void __iomem *wdt_mem;
static unsigned int mmio;
-static void wdt_timer_tick(unsigned long data);
+static void wdt_timer_tick(struct timer_list *unused);
static DEFINE_TIMER(timer, wdt_timer_tick);
/* The timer that pings the watchdog */
static unsigned long next_heartbeat; /* the next_heartbeat for the timer */
* then the external/userspace heartbeat).
* 2) the watchdog timer has been stopped by userspace.
*/
-static void wdt_timer_tick(unsigned long data)
+static void wdt_timer_tick(struct timer_list *unused)
{
if (time_before(jiffies, next_heartbeat) ||
(!watchdog_active(&wdt_dev))) {
"Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
-static void wdt_timer_ping(unsigned long);
+static void wdt_timer_ping(struct timer_list *);
static DEFINE_TIMER(timer, wdt_timer_ping);
static unsigned long next_heartbeat;
static unsigned long wdt_is_open;
* Whack the dog
*/
-static void wdt_timer_ping(unsigned long data)
+static void wdt_timer_ping(struct timer_list *unused)
{
/* If we got a heartbeat pulse within the WDT_US_INTERVAL
* we agree to ping the WDT
struct page *page;
};
static LIST_HEAD(deferred_list);
-static void gnttab_handle_deferred(unsigned long);
+static void gnttab_handle_deferred(struct timer_list *);
static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred);
-static void gnttab_handle_deferred(unsigned long unused)
+static void gnttab_handle_deferred(struct timer_list *unused)
{
unsigned int nr = 10;
struct deferred_entry *first = NULL;
targets := $(patsubst $(obj)/%,%, \
$(shell find $(obj) -name \*.gen.S 2>/dev/null))
-# Without this, built-in.o won't be created when it's empty, and the
-# final vmlinux link will fail.
-obj- := dummy
if (v9ses->cache)
sb->s_bdi->ra_pages = (VM_MAX_READAHEAD * 1024)/PAGE_SIZE;
- sb->s_flags |= MS_ACTIVE | MS_DIRSYNC | MS_NOATIME;
+ sb->s_flags |= SB_ACTIVE | SB_DIRSYNC | SB_NOATIME;
if (!v9ses->cache)
- sb->s_flags |= MS_SYNCHRONOUS;
+ sb->s_flags |= SB_SYNCHRONOUS;
#ifdef CONFIG_9P_FS_POSIX_ACL
if ((v9ses->flags & V9FS_ACL_MASK) == V9FS_POSIX_ACL)
- sb->s_flags |= MS_POSIXACL;
+ sb->s_flags |= SB_POSIXACL;
#endif
return 0;
static int adfs_remount(struct super_block *sb, int *flags, char *data)
{
sync_filesystem(sb);
- *flags |= MS_NODIRATIME;
+ *flags |= SB_NODIRATIME;
return parse_options(sb, data);
}
struct inode *root;
int ret = -EINVAL;
- sb->s_flags |= MS_NODIRATIME;
+ sb->s_flags |= SB_NODIRATIME;
asb = kzalloc(sizeof(*asb), GFP_KERNEL);
if (!asb)
pr_crit("error (device %s): %s(): %pV\n", sb->s_id, function, &vaf);
if (!sb_rdonly(sb))
pr_warn("Remounting filesystem read-only\n");
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
va_end(args);
}
int i, res = 0;
struct affs_sb_info *sbi = AFFS_SB(sb);
- if (*flags & MS_RDONLY)
+ if (*flags & SB_RDONLY)
return 0;
if (!AFFS_ROOT_TAIL(sb, sbi->s_root_bh)->bm_flag) {
pr_notice("Bitmap invalid - mounting %s read only\n", sb->s_id);
- *flags |= MS_RDONLY;
+ *flags |= SB_RDONLY;
return 0;
}
if (affs_checksum_block(sb, bh)) {
pr_warn("Bitmap %u invalid - mounting %s read only.\n",
bm->bm_key, sb->s_id);
- *flags |= MS_RDONLY;
+ *flags |= SB_RDONLY;
goto out;
}
pr_debug("read bitmap block %d: %d\n", blk, bm->bm_key);
sb->s_magic = AFFS_SUPER_MAGIC;
sb->s_op = &affs_sops;
- sb->s_flags |= MS_NODIRATIME;
+ sb->s_flags |= SB_NODIRATIME;
sbi = kzalloc(sizeof(struct affs_sb_info), GFP_KERNEL);
if (!sbi)
if ((chksum == FS_DCFFS || chksum == MUFS_DCFFS || chksum == FS_DCOFS
|| chksum == MUFS_DCOFS) && !sb_rdonly(sb)) {
pr_notice("Dircache FS - mounting %s read only\n", sb->s_id);
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
}
switch (chksum) {
case MUFS_FS:
/* fall thru */
case FS_OFS:
affs_set_opt(sbi->s_flags, SF_OFS);
- sb->s_flags |= MS_NOEXEC;
+ sb->s_flags |= SB_NOEXEC;
break;
case MUFS_DCOFS:
case MUFS_INTLOFS:
case FS_INTLOFS:
affs_set_opt(sbi->s_flags, SF_INTL);
affs_set_opt(sbi->s_flags, SF_OFS);
- sb->s_flags |= MS_NOEXEC;
+ sb->s_flags |= SB_NOEXEC;
break;
default:
pr_err("Unknown filesystem on device %s: %08X\n",
sig, sig[3] + '0', blocksize);
}
- sb->s_flags |= MS_NODEV | MS_NOSUID;
+ sb->s_flags |= SB_NODEV | SB_NOSUID;
sbi->s_data_blksize = sb->s_blocksize;
if (affs_test_opt(sbi->s_flags, SF_OFS))
pr_debug("%s(flags=0x%x,opts=\"%s\")\n", __func__, *flags, data);
sync_filesystem(sb);
- *flags |= MS_NODIRATIME;
+ *flags |= SB_NODIRATIME;
memcpy(volume, sbi->s_volume, 32);
if (!parse_options(data, &uid, &gid, &mode, &reserved, &root_block,
memcpy(sbi->s_volume, volume, 32);
spin_unlock(&sbi->symlink_lock);
- if ((bool)(*flags & MS_RDONLY) == sb_rdonly(sb))
+ if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
return 0;
- if (*flags & MS_RDONLY)
+ if (*flags & SB_RDONLY)
affs_free_bitmap(sb);
else
res = affs_init_bitmap(sb, flags);
rcu_read_lock();
cell = afs_lookup_cell_rcu(net, name, namesz);
rcu_read_unlock();
- if (!IS_ERR(cell)) {
- if (excl) {
- afs_put_cell(net, cell);
- return ERR_PTR(-EEXIST);
- }
+ if (!IS_ERR(cell))
goto wait_for_cell;
- }
}
/* Assume we're probably going to create a cell and preallocate and
if (fc->ac.error < 0)
return;
+ d_drop(new_dentry);
+
inode = afs_iget(fc->vnode->vfs_inode.i_sb, fc->key,
newfid, newstatus, newcb, fc->cbi);
if (IS_ERR(inode)) {
return;
}
- d_instantiate(new_dentry, inode);
- if (d_unhashed(new_dentry))
- d_rehash(new_dentry);
+ d_add(new_dentry, inode);
}
/*
ret = afs_end_vnode_operation(&fc);
if (ret < 0)
goto error_key;
+ } else {
+ goto error_key;
}
key_put(key);
struct afs_fs_cursor fc;
struct afs_file_status newstatus;
struct afs_callback newcb;
- struct afs_vnode *dvnode = dvnode = AFS_FS_I(dir);
+ struct afs_vnode *dvnode = AFS_FS_I(dir);
struct afs_fid newfid;
struct key *key;
int ret;
ret = afs_end_vnode_operation(&fc);
if (ret < 0)
goto error_key;
+ } else {
+ goto error_key;
}
key_put(key);
if (afs_begin_vnode_operation(&fc, dvnode, key)) {
if (mutex_lock_interruptible_nested(&vnode->io_lock, 1) < 0) {
afs_end_vnode_operation(&fc);
- return -ERESTARTSYS;
+ goto error_key;
}
while (afs_select_fileserver(&fc)) {
ret = afs_end_vnode_operation(&fc);
if (ret < 0)
goto error_key;
+ } else {
+ goto error_key;
}
key_put(key);
ret = afs_end_vnode_operation(&fc);
if (ret < 0)
goto error_key;
+ } else {
+ goto error_key;
}
key_put(key);
if (orig_dvnode != new_dvnode) {
if (mutex_lock_interruptible_nested(&new_dvnode->io_lock, 1) < 0) {
afs_end_vnode_operation(&fc);
- return -ERESTARTSYS;
+ goto error_key;
}
}
while (afs_select_fileserver(&fc)) {
goto error_key;
}
- key_put(key);
- _leave(" = 0");
- return 0;
-
error_key:
key_put(key);
error:
- d_drop(new_dentry);
_leave(" = %d", ret);
return ret;
}
{
struct afs_vnode *vnode =
container_of(work, struct afs_vnode, lock_work.work);
- struct file_lock *fl;
+ struct file_lock *fl, *next;
afs_lock_type_t type;
struct key *key;
int ret;
spin_lock(&vnode->lock);
- if (test_bit(AFS_VNODE_UNLOCKING, &vnode->flags)) {
+again:
+ _debug("wstate %u for %p", vnode->lock_state, vnode);
+ switch (vnode->lock_state) {
+ case AFS_VNODE_LOCK_NEED_UNLOCK:
_debug("unlock");
+ vnode->lock_state = AFS_VNODE_LOCK_UNLOCKING;
spin_unlock(&vnode->lock);
/* attempt to release the server lock; if it fails, we just
- * wait 5 minutes and it'll time out anyway */
- ret = afs_release_lock(vnode, vnode->unlock_key);
+ * wait 5 minutes and it'll expire anyway */
+ ret = afs_release_lock(vnode, vnode->lock_key);
if (ret < 0)
printk(KERN_WARNING "AFS:"
" Failed to release lock on {%x:%x} error %d\n",
vnode->fid.vid, vnode->fid.vnode, ret);
spin_lock(&vnode->lock);
- key_put(vnode->unlock_key);
- vnode->unlock_key = NULL;
- clear_bit(AFS_VNODE_UNLOCKING, &vnode->flags);
- }
+ key_put(vnode->lock_key);
+ vnode->lock_key = NULL;
+ vnode->lock_state = AFS_VNODE_LOCK_NONE;
+
+ if (list_empty(&vnode->pending_locks)) {
+ spin_unlock(&vnode->lock);
+ return;
+ }
+
+ /* The new front of the queue now owns the state variables. */
+ next = list_entry(vnode->pending_locks.next,
+ struct file_lock, fl_u.afs.link);
+ vnode->lock_key = afs_file_key(next->fl_file);
+ vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
+ vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB;
+ goto again;
- /* if we've got a lock, then it must be time to extend that lock as AFS
- * locks time out after 5 minutes */
- if (!list_empty(&vnode->granted_locks)) {
+ /* If we've already got a lock, then it must be time to extend that
+ * lock as AFS locks time out after 5 minutes.
+ */
+ case AFS_VNODE_LOCK_GRANTED:
_debug("extend");
- if (test_and_set_bit(AFS_VNODE_LOCKING, &vnode->flags))
- BUG();
- fl = list_entry(vnode->granted_locks.next,
- struct file_lock, fl_u.afs.link);
- key = key_get(afs_file_key(fl->fl_file));
+ ASSERT(!list_empty(&vnode->granted_locks));
+
+ key = key_get(vnode->lock_key);
+ vnode->lock_state = AFS_VNODE_LOCK_EXTENDING;
spin_unlock(&vnode->lock);
- ret = afs_extend_lock(vnode, key);
- clear_bit(AFS_VNODE_LOCKING, &vnode->flags);
+ ret = afs_extend_lock(vnode, key); /* RPC */
key_put(key);
- switch (ret) {
- case 0:
+
+ if (ret < 0)
+ pr_warning("AFS: Failed to extend lock on {%x:%x} error %d\n",
+ vnode->fid.vid, vnode->fid.vnode, ret);
+
+ spin_lock(&vnode->lock);
+
+ if (vnode->lock_state != AFS_VNODE_LOCK_EXTENDING)
+ goto again;
+ vnode->lock_state = AFS_VNODE_LOCK_GRANTED;
+
+ if (ret == 0)
afs_schedule_lock_extension(vnode);
- break;
- default:
- /* ummm... we failed to extend the lock - retry
- * extension shortly */
- printk(KERN_WARNING "AFS:"
- " Failed to extend lock on {%x:%x} error %d\n",
- vnode->fid.vid, vnode->fid.vnode, ret);
+ else
queue_delayed_work(afs_lock_manager, &vnode->lock_work,
HZ * 10);
- break;
- }
- _leave(" [extend]");
+ spin_unlock(&vnode->lock);
+ _leave(" [ext]");
return;
- }
- /* if we don't have a granted lock, then we must've been called back by
- * the server, and so if might be possible to get a lock we're
- * currently waiting for */
- if (!list_empty(&vnode->pending_locks)) {
+ /* If we don't have a granted lock, then we must've been called
+ * back by the server, and so if might be possible to get a
+ * lock we're currently waiting for.
+ */
+ case AFS_VNODE_LOCK_WAITING_FOR_CB:
_debug("get");
- if (test_and_set_bit(AFS_VNODE_LOCKING, &vnode->flags))
- BUG();
- fl = list_entry(vnode->pending_locks.next,
- struct file_lock, fl_u.afs.link);
- key = key_get(afs_file_key(fl->fl_file));
- type = (fl->fl_type == F_RDLCK) ?
- AFS_LOCK_READ : AFS_LOCK_WRITE;
+ key = key_get(vnode->lock_key);
+ type = vnode->lock_type;
+ vnode->lock_state = AFS_VNODE_LOCK_SETTING;
spin_unlock(&vnode->lock);
- ret = afs_set_lock(vnode, key, type);
- clear_bit(AFS_VNODE_LOCKING, &vnode->flags);
+ ret = afs_set_lock(vnode, key, type); /* RPC */
+ key_put(key);
+
+ spin_lock(&vnode->lock);
switch (ret) {
case -EWOULDBLOCK:
_debug("blocked");
break;
case 0:
_debug("acquired");
- if (type == AFS_LOCK_READ)
- set_bit(AFS_VNODE_READLOCKED, &vnode->flags);
- else
- set_bit(AFS_VNODE_WRITELOCKED, &vnode->flags);
- ret = AFS_LOCK_GRANTED;
+ vnode->lock_state = AFS_VNODE_LOCK_GRANTED;
+ /* Fall through */
default:
- spin_lock(&vnode->lock);
- /* the pending lock may have been withdrawn due to a
- * signal */
- if (list_entry(vnode->pending_locks.next,
- struct file_lock, fl_u.afs.link) == fl) {
- fl->fl_u.afs.state = ret;
- if (ret == AFS_LOCK_GRANTED)
- afs_grant_locks(vnode, fl);
- else
- list_del_init(&fl->fl_u.afs.link);
- wake_up(&fl->fl_wait);
- spin_unlock(&vnode->lock);
- } else {
+ /* Pass the lock or the error onto the first locker in
+ * the list - if they're looking for this type of lock.
+ * If they're not, we assume that whoever asked for it
+ * took a signal.
+ */
+ if (list_empty(&vnode->pending_locks)) {
_debug("withdrawn");
- clear_bit(AFS_VNODE_READLOCKED, &vnode->flags);
- clear_bit(AFS_VNODE_WRITELOCKED, &vnode->flags);
- spin_unlock(&vnode->lock);
- afs_release_lock(vnode, key);
- if (!list_empty(&vnode->pending_locks))
- afs_lock_may_be_available(vnode);
+ vnode->lock_state = AFS_VNODE_LOCK_NEED_UNLOCK;
+ goto again;
}
- break;
+
+ fl = list_entry(vnode->pending_locks.next,
+ struct file_lock, fl_u.afs.link);
+ type = (fl->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
+ if (vnode->lock_type != type) {
+ _debug("changed");
+ vnode->lock_state = AFS_VNODE_LOCK_NEED_UNLOCK;
+ goto again;
+ }
+
+ fl->fl_u.afs.state = ret;
+ if (ret == 0)
+ afs_grant_locks(vnode, fl);
+ else
+ list_del_init(&fl->fl_u.afs.link);
+ wake_up(&fl->fl_wait);
+ spin_unlock(&vnode->lock);
+ _leave(" [granted]");
+ return;
}
- key_put(key);
- _leave(" [pend]");
+
+ default:
+ /* Looks like a lock request was withdrawn. */
+ spin_unlock(&vnode->lock);
+ _leave(" [no]");
return;
}
-
- /* looks like the lock request was withdrawn on a signal */
- spin_unlock(&vnode->lock);
- _leave(" [no locks]");
}
/*
* AF_RXRPC
* - the caller must hold the vnode lock
*/
-static void afs_defer_unlock(struct afs_vnode *vnode, struct key *key)
+static void afs_defer_unlock(struct afs_vnode *vnode)
{
- cancel_delayed_work(&vnode->lock_work);
- if (!test_and_clear_bit(AFS_VNODE_READLOCKED, &vnode->flags) &&
- !test_and_clear_bit(AFS_VNODE_WRITELOCKED, &vnode->flags))
- BUG();
- if (test_and_set_bit(AFS_VNODE_UNLOCKING, &vnode->flags))
- BUG();
- vnode->unlock_key = key_get(key);
+ _enter("");
+
+ if (vnode->lock_state == AFS_VNODE_LOCK_GRANTED ||
+ vnode->lock_state == AFS_VNODE_LOCK_EXTENDING) {
+ cancel_delayed_work(&vnode->lock_work);
+
+ vnode->lock_state = AFS_VNODE_LOCK_NEED_UNLOCK;
+ afs_lock_may_be_available(vnode);
+ }
+}
+
+/*
+ * Check that our view of the file metadata is up to date and check to see
+ * whether we think that we have a locking permit.
+ */
+static int afs_do_setlk_check(struct afs_vnode *vnode, struct key *key,
+ afs_lock_type_t type, bool can_sleep)
+{
+ afs_access_t access;
+ int ret;
+
+ /* Make sure we've got a callback on this file and that our view of the
+ * data version is up to date.
+ */
+ ret = afs_validate(vnode, key);
+ if (ret < 0)
+ return ret;
+
+ /* Check the permission set to see if we're actually going to be
+ * allowed to get a lock on this file.
+ */
+ ret = afs_check_permit(vnode, key, &access);
+ if (ret < 0)
+ return ret;
+
+ /* At a rough estimation, you need LOCK, WRITE or INSERT perm to
+ * read-lock a file and WRITE or INSERT perm to write-lock a file.
+ *
+ * We can't rely on the server to do this for us since if we want to
+ * share a read lock that we already have, we won't go the server.
+ */
+ if (type == AFS_LOCK_READ) {
+ if (!(access & (AFS_ACE_INSERT | AFS_ACE_WRITE | AFS_ACE_LOCK)))
+ return -EACCES;
+ if (vnode->status.lock_count == -1 && !can_sleep)
+ return -EAGAIN; /* Write locked */
+ } else {
+ if (!(access & (AFS_ACE_INSERT | AFS_ACE_WRITE)))
+ return -EACCES;
+ if (vnode->status.lock_count != 0 && !can_sleep)
+ return -EAGAIN; /* Locked */
+ }
+
+ return 0;
+}
+
+/*
+ * Remove the front runner from the pending queue.
+ * - The caller must hold vnode->lock.
+ */
+static void afs_dequeue_lock(struct afs_vnode *vnode, struct file_lock *fl)
+{
+ struct file_lock *next;
+
+ _enter("");
+
+ /* ->lock_type, ->lock_key and ->lock_state only belong to this
+ * file_lock if we're at the front of the pending queue or if we have
+ * the lock granted or if the lock_state is NEED_UNLOCK or UNLOCKING.
+ */
+ if (vnode->granted_locks.next == &fl->fl_u.afs.link &&
+ vnode->granted_locks.prev == &fl->fl_u.afs.link) {
+ list_del_init(&fl->fl_u.afs.link);
+ afs_defer_unlock(vnode);
+ return;
+ }
+
+ if (!list_empty(&vnode->granted_locks) ||
+ vnode->pending_locks.next != &fl->fl_u.afs.link) {
+ list_del_init(&fl->fl_u.afs.link);
+ return;
+ }
+
+ list_del_init(&fl->fl_u.afs.link);
+ key_put(vnode->lock_key);
+ vnode->lock_key = NULL;
+ vnode->lock_state = AFS_VNODE_LOCK_NONE;
+
+ if (list_empty(&vnode->pending_locks))
+ return;
+
+ /* The new front of the queue now owns the state variables. */
+ next = list_entry(vnode->pending_locks.next,
+ struct file_lock, fl_u.afs.link);
+ vnode->lock_key = afs_file_key(next->fl_file);
+ vnode->lock_type = (next->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
+ vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB;
afs_lock_may_be_available(vnode);
}
*/
static int afs_do_setlk(struct file *file, struct file_lock *fl)
{
- struct inode *inode = file_inode(file);
+ struct inode *inode = locks_inode(file);
struct afs_vnode *vnode = AFS_FS_I(inode);
afs_lock_type_t type;
struct key *key = afs_file_key(file);
type = (fl->fl_type == F_RDLCK) ? AFS_LOCK_READ : AFS_LOCK_WRITE;
- spin_lock(&inode->i_lock);
-
- /* make sure we've got a callback on this file and that our view of the
- * data version is up to date */
- ret = afs_validate(vnode, key);
+ ret = afs_do_setlk_check(vnode, key, type, fl->fl_flags & FL_SLEEP);
if (ret < 0)
- goto error;
-
- if (vnode->status.lock_count != 0 && !(fl->fl_flags & FL_SLEEP)) {
- ret = -EAGAIN;
- goto error;
- }
+ return ret;
spin_lock(&vnode->lock);
- /* if we've already got a readlock on the server then we can instantly
+ /* If we've already got a readlock on the server then we instantly
* grant another readlock, irrespective of whether there are any
- * pending writelocks */
+ * pending writelocks.
+ */
if (type == AFS_LOCK_READ &&
- vnode->flags & (1 << AFS_VNODE_READLOCKED)) {
+ vnode->lock_state == AFS_VNODE_LOCK_GRANTED &&
+ vnode->lock_type == AFS_LOCK_READ) {
_debug("instant readlock");
- ASSERTCMP(vnode->flags &
- ((1 << AFS_VNODE_LOCKING) |
- (1 << AFS_VNODE_WRITELOCKED)), ==, 0);
ASSERT(!list_empty(&vnode->granted_locks));
- goto sharing_existing_lock;
+ goto share_existing_lock;
}
- /* if there's no-one else with a lock on this vnode, then we need to
- * ask the server for a lock */
- if (list_empty(&vnode->pending_locks) &&
- list_empty(&vnode->granted_locks)) {
- _debug("not locked");
- ASSERTCMP(vnode->flags &
- ((1 << AFS_VNODE_LOCKING) |
- (1 << AFS_VNODE_READLOCKED) |
- (1 << AFS_VNODE_WRITELOCKED)), ==, 0);
- list_add_tail(&fl->fl_u.afs.link, &vnode->pending_locks);
- set_bit(AFS_VNODE_LOCKING, &vnode->flags);
- spin_unlock(&vnode->lock);
+ list_add_tail(&fl->fl_u.afs.link, &vnode->pending_locks);
- ret = afs_set_lock(vnode, key, type);
- clear_bit(AFS_VNODE_LOCKING, &vnode->flags);
- switch (ret) {
- case 0:
- _debug("acquired");
- goto acquired_server_lock;
- case -EWOULDBLOCK:
- _debug("would block");
- spin_lock(&vnode->lock);
- ASSERT(list_empty(&vnode->granted_locks));
- ASSERTCMP(vnode->pending_locks.next, ==,
- &fl->fl_u.afs.link);
- goto wait;
- default:
- spin_lock(&vnode->lock);
- list_del_init(&fl->fl_u.afs.link);
- spin_unlock(&vnode->lock);
- goto error;
- }
- }
+ if (vnode->lock_state != AFS_VNODE_LOCK_NONE)
+ goto need_to_wait;
- /* otherwise, we need to wait for a local lock to become available */
- _debug("wait local");
- list_add_tail(&fl->fl_u.afs.link, &vnode->pending_locks);
-wait:
- if (!(fl->fl_flags & FL_SLEEP)) {
- _debug("noblock");
- ret = -EAGAIN;
- goto abort_attempt;
- }
+ /* We don't have a lock on this vnode and we aren't currently waiting
+ * for one either, so ask the server for a lock.
+ *
+ * Note that we need to be careful if we get interrupted by a signal
+ * after dispatching the request as we may still get the lock, even
+ * though we don't wait for the reply (it's not too bad a problem - the
+ * lock will expire in 10 mins anyway).
+ */
+ _debug("not locked");
+ vnode->lock_key = key_get(key);
+ vnode->lock_type = type;
+ vnode->lock_state = AFS_VNODE_LOCK_SETTING;
spin_unlock(&vnode->lock);
- /* now we need to sleep and wait for the lock manager thread to get the
- * lock from the server */
- _debug("sleep");
- ret = wait_event_interruptible(fl->fl_wait,
- fl->fl_u.afs.state <= AFS_LOCK_GRANTED);
- if (fl->fl_u.afs.state <= AFS_LOCK_GRANTED) {
- ret = fl->fl_u.afs.state;
- if (ret < 0)
- goto error;
- spin_lock(&vnode->lock);
- goto given_lock;
- }
-
- /* we were interrupted, but someone may still be in the throes of
- * giving us the lock */
- _debug("intr");
- ASSERTCMP(ret, ==, -ERESTARTSYS);
+ ret = afs_set_lock(vnode, key, type); /* RPC */
spin_lock(&vnode->lock);
- if (fl->fl_u.afs.state <= AFS_LOCK_GRANTED) {
- ret = fl->fl_u.afs.state;
- if (ret < 0) {
- spin_unlock(&vnode->lock);
- goto error;
- }
- goto given_lock;
- }
+ switch (ret) {
+ default:
+ goto abort_attempt;
-abort_attempt:
- /* we aren't going to get the lock, either because we're unwilling to
- * wait, or because some signal happened */
- _debug("abort");
- if (list_empty(&vnode->granted_locks) &&
- vnode->pending_locks.next == &fl->fl_u.afs.link) {
- if (vnode->pending_locks.prev != &fl->fl_u.afs.link) {
- /* kick the next pending lock into having a go */
- list_del_init(&fl->fl_u.afs.link);
- afs_lock_may_be_available(vnode);
- }
- } else {
- list_del_init(&fl->fl_u.afs.link);
+ case -EWOULDBLOCK:
+ /* The server doesn't have a lock-waiting queue, so the client
+ * will have to retry. The server will break the outstanding
+ * callbacks on a file when a lock is released.
+ */
+ _debug("would block");
+ ASSERT(list_empty(&vnode->granted_locks));
+ ASSERTCMP(vnode->pending_locks.next, ==, &fl->fl_u.afs.link);
+ vnode->lock_state = AFS_VNODE_LOCK_WAITING_FOR_CB;
+ goto need_to_wait;
+
+ case 0:
+ _debug("acquired");
+ break;
}
- spin_unlock(&vnode->lock);
- goto error;
-acquired_server_lock:
/* we've acquired a server lock, but it needs to be renewed after 5
* mins */
- spin_lock(&vnode->lock);
+ vnode->lock_state = AFS_VNODE_LOCK_GRANTED;
afs_schedule_lock_extension(vnode);
- if (type == AFS_LOCK_READ)
- set_bit(AFS_VNODE_READLOCKED, &vnode->flags);
- else
- set_bit(AFS_VNODE_WRITELOCKED, &vnode->flags);
-sharing_existing_lock:
+
+share_existing_lock:
/* the lock has been granted as far as we're concerned... */
fl->fl_u.afs.state = AFS_LOCK_GRANTED;
list_move_tail(&fl->fl_u.afs.link, &vnode->granted_locks);
+
given_lock:
/* ... but we do still need to get the VFS's blessing */
- ASSERT(!(vnode->flags & (1 << AFS_VNODE_LOCKING)));
- ASSERT((vnode->flags & ((1 << AFS_VNODE_READLOCKED) |
- (1 << AFS_VNODE_WRITELOCKED))) != 0);
+ spin_unlock(&vnode->lock);
+
ret = posix_lock_file(file, fl, NULL);
if (ret < 0)
goto vfs_rejected_lock;
- spin_unlock(&vnode->lock);
- /* again, make sure we've got a callback on this file and, again, make
+ /* Again, make sure we've got a callback on this file and, again, make
* sure that our view of the data version is up to date (we ignore
- * errors incurred here and deal with the consequences elsewhere) */
+ * errors incurred here and deal with the consequences elsewhere).
+ */
afs_validate(vnode, key);
+ _leave(" = 0");
+ return 0;
-error:
- spin_unlock(&inode->i_lock);
+need_to_wait:
+ /* We're going to have to wait. Either this client doesn't have a lock
+ * on the server yet and we need to wait for a callback to occur, or
+ * the client does have a lock on the server, but it belongs to some
+ * other process(es) and is incompatible with the lock we want.
+ */
+ ret = -EAGAIN;
+ if (fl->fl_flags & FL_SLEEP) {
+ spin_unlock(&vnode->lock);
+
+ _debug("sleep");
+ ret = wait_event_interruptible(fl->fl_wait,
+ fl->fl_u.afs.state != AFS_LOCK_PENDING);
+
+ spin_lock(&vnode->lock);
+ }
+
+ if (fl->fl_u.afs.state == AFS_LOCK_GRANTED)
+ goto given_lock;
+ if (fl->fl_u.afs.state < 0)
+ ret = fl->fl_u.afs.state;
+
+abort_attempt:
+ /* we aren't going to get the lock, either because we're unwilling to
+ * wait, or because some signal happened */
+ _debug("abort");
+ afs_dequeue_lock(vnode, fl);
+
+error_unlock:
+ spin_unlock(&vnode->lock);
_leave(" = %d", ret);
return ret;
vfs_rejected_lock:
- /* the VFS rejected the lock we just obtained, so we have to discard
- * what we just got */
+ /* The VFS rejected the lock we just obtained, so we have to discard
+ * what we just got. We defer this to the lock manager work item to
+ * deal with.
+ */
_debug("vfs refused %d", ret);
+ spin_lock(&vnode->lock);
list_del_init(&fl->fl_u.afs.link);
if (list_empty(&vnode->granted_locks))
- afs_defer_unlock(vnode, key);
- goto abort_attempt;
+ afs_defer_unlock(vnode);
+ goto error_unlock;
}
/*
*/
static int afs_do_unlk(struct file *file, struct file_lock *fl)
{
- struct afs_vnode *vnode = AFS_FS_I(file->f_mapping->host);
- struct key *key = afs_file_key(file);
+ struct afs_vnode *vnode = AFS_FS_I(locks_inode(file));
int ret;
_enter("{%x:%u},%u", vnode->fid.vid, vnode->fid.vnode, fl->fl_type);
+ /* Flush all pending writes before doing anything with locks. */
+ vfs_fsync(file, 0);
+
/* only whole-file unlocks are supported */
if (fl->fl_start != 0 || fl->fl_end != OFFSET_MAX)
return -EINVAL;
- fl->fl_ops = &afs_lock_ops;
- INIT_LIST_HEAD(&fl->fl_u.afs.link);
- fl->fl_u.afs.state = AFS_LOCK_PENDING;
-
- spin_lock(&vnode->lock);
ret = posix_lock_file(file, fl, NULL);
- if (ret < 0) {
- spin_unlock(&vnode->lock);
- _leave(" = %d [vfs]", ret);
- return ret;
- }
-
- /* discard the server lock only if all granted locks are gone */
- if (list_empty(&vnode->granted_locks))
- afs_defer_unlock(vnode, key);
- spin_unlock(&vnode->lock);
- _leave(" = 0");
- return 0;
+ _leave(" = %d [%u]", ret, vnode->lock_state);
+ return ret;
}
/*
*/
static int afs_do_getlk(struct file *file, struct file_lock *fl)
{
- struct afs_vnode *vnode = AFS_FS_I(file->f_mapping->host);
+ struct afs_vnode *vnode = AFS_FS_I(locks_inode(file));
struct key *key = afs_file_key(file);
int ret, lock_count;
fl->fl_type = F_UNLCK;
- inode_lock(&vnode->vfs_inode);
-
/* check local lock records first */
- ret = 0;
posix_test_lock(file, fl);
if (fl->fl_type == F_UNLCK) {
/* no local locks; consult the server */
ret = afs_fetch_status(vnode, key);
if (ret < 0)
goto error;
- lock_count = vnode->status.lock_count;
- if (lock_count) {
- if (lock_count > 0)
- fl->fl_type = F_RDLCK;
- else
- fl->fl_type = F_WRLCK;
- fl->fl_start = 0;
- fl->fl_end = OFFSET_MAX;
- }
+
+ lock_count = READ_ONCE(vnode->status.lock_count);
+ if (lock_count > 0)
+ fl->fl_type = F_RDLCK;
+ else
+ fl->fl_type = F_WRLCK;
+ fl->fl_start = 0;
+ fl->fl_end = OFFSET_MAX;
}
+ ret = 0;
error:
- inode_unlock(&vnode->vfs_inode);
_leave(" = %d [%hd]", ret, fl->fl_type);
return ret;
}
*/
int afs_lock(struct file *file, int cmd, struct file_lock *fl)
{
- struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
+ struct afs_vnode *vnode = AFS_FS_I(locks_inode(file));
_enter("{%x:%u},%d,{t=%x,fl=%x,r=%Ld:%Ld}",
vnode->fid.vid, vnode->fid.vnode, cmd,
*/
int afs_flock(struct file *file, int cmd, struct file_lock *fl)
{
- struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
+ struct afs_vnode *vnode = AFS_FS_I(locks_inode(file));
_enter("{%x:%u},%d,{t=%x,fl=%x}",
vnode->fid.vid, vnode->fid.vnode, cmd,
*/
static void afs_fl_copy_lock(struct file_lock *new, struct file_lock *fl)
{
+ struct afs_vnode *vnode = AFS_FS_I(locks_inode(fl->fl_file));
+
_enter("");
+ spin_lock(&vnode->lock);
list_add(&new->fl_u.afs.link, &fl->fl_u.afs.link);
+ spin_unlock(&vnode->lock);
}
/*
*/
static void afs_fl_release_private(struct file_lock *fl)
{
+ struct afs_vnode *vnode = AFS_FS_I(locks_inode(fl->fl_file));
+
_enter("");
- list_del_init(&fl->fl_u.afs.link);
+ spin_lock(&vnode->lock);
+ afs_dequeue_lock(vnode, fl);
+ _debug("state %u for %p", vnode->lock_state, vnode);
+ spin_unlock(&vnode->lock);
}
u8 name[AFS_MAXVOLNAME + 1]; /* NUL-padded volume name */
};
+enum afs_lock_state {
+ AFS_VNODE_LOCK_NONE, /* The vnode has no lock on the server */
+ AFS_VNODE_LOCK_WAITING_FOR_CB, /* We're waiting for the server to break the callback */
+ AFS_VNODE_LOCK_SETTING, /* We're asking the server for a lock */
+ AFS_VNODE_LOCK_GRANTED, /* We have a lock on the server */
+ AFS_VNODE_LOCK_EXTENDING, /* We're extending a lock on the server */
+ AFS_VNODE_LOCK_NEED_UNLOCK, /* We need to unlock on the server */
+ AFS_VNODE_LOCK_UNLOCKING, /* We're telling the server to unlock */
+};
+
/*
* AFS inode private data
*/
#define AFS_VNODE_ZAP_DATA 3 /* set if vnode's data should be invalidated */
#define AFS_VNODE_DELETED 4 /* set if vnode deleted on server */
#define AFS_VNODE_MOUNTPOINT 5 /* set if vnode is a mountpoint symlink */
-#define AFS_VNODE_LOCKING 6 /* set if waiting for lock on vnode */
-#define AFS_VNODE_READLOCKED 7 /* set if vnode is read-locked on the server */
-#define AFS_VNODE_WRITELOCKED 8 /* set if vnode is write-locked on the server */
-#define AFS_VNODE_UNLOCKING 9 /* set if vnode is being unlocked on the server */
-#define AFS_VNODE_AUTOCELL 10 /* set if Vnode is an auto mount point */
-#define AFS_VNODE_PSEUDODIR 11 /* set if Vnode is a pseudo directory */
+#define AFS_VNODE_AUTOCELL 6 /* set if Vnode is an auto mount point */
+#define AFS_VNODE_PSEUDODIR 7 /* set if Vnode is a pseudo directory */
struct list_head wb_keys; /* List of keys available for writeback */
struct list_head pending_locks; /* locks waiting to be granted */
struct list_head granted_locks; /* locks granted on this file */
struct delayed_work lock_work; /* work to be done in locking */
- struct key *unlock_key; /* key to be used in unlocking */
+ struct key *lock_key; /* Key to be used in lock ops */
+ enum afs_lock_state lock_state : 8;
+ afs_lock_type_t lock_type : 8;
/* outstanding callback notification on this file */
struct afs_cb_interest *cb_interest; /* Server on which this resides */
extern void afs_cache_permit(struct afs_vnode *, struct key *, unsigned int);
extern void afs_zap_permits(struct rcu_head *);
extern struct key *afs_request_key(struct afs_cell *);
+extern int afs_check_permit(struct afs_vnode *, struct key *, afs_access_t *);
extern int afs_permission(struct inode *, int);
extern void __exit afs_clean_up_permit_cache(void);
return false;
}
- if (test_bit(AFS_VNODE_READLOCKED, &vnode->flags) ||
- test_bit(AFS_VNODE_WRITELOCKED, &vnode->flags))
+ if (vnode->lock_state != AFS_VNODE_LOCK_NONE)
fc->flags |= AFS_FS_CURSOR_CUR_ONLY;
return true;
}
case VSALVAGING: m = "being salvaged"; break;
default: m = "busy"; break;
}
-
+
pr_notice("kAFS: Volume %u '%s' is %s\n", volume->vid, volume->name, m);
}
_enter("");
- if (!cbi) {
- fc->ac.error = -ESTALE;
+ switch (fc->ac.error) {
+ case SHRT_MAX:
+ if (!cbi) {
+ fc->ac.error = -ESTALE;
+ fc->flags |= AFS_FS_CURSOR_STOP;
+ return false;
+ }
+
+ fc->cbi = afs_get_cb_interest(vnode->cb_interest);
+
+ read_lock(&cbi->server->fs_lock);
+ alist = rcu_dereference_protected(cbi->server->addresses,
+ lockdep_is_held(&cbi->server->fs_lock));
+ afs_get_addrlist(alist);
+ read_unlock(&cbi->server->fs_lock);
+ if (!alist) {
+ fc->ac.error = -ESTALE;
+ fc->flags |= AFS_FS_CURSOR_STOP;
+ return false;
+ }
+
+ fc->ac.alist = alist;
+ fc->ac.addr = NULL;
+ fc->ac.start = READ_ONCE(alist->index);
+ fc->ac.index = fc->ac.start;
+ fc->ac.error = 0;
+ fc->ac.begun = false;
+ goto iterate_address;
+
+ case 0:
+ default:
+ /* Success or local failure. Stop. */
fc->flags |= AFS_FS_CURSOR_STOP;
+ _leave(" = f [okay/local %d]", fc->ac.error);
return false;
- }
- read_lock(&cbi->server->fs_lock);
- alist = afs_get_addrlist(cbi->server->addresses);
- read_unlock(&cbi->server->fs_lock);
- if (!alist) {
- fc->ac.error = -ESTALE;
+ case -ECONNABORTED:
fc->flags |= AFS_FS_CURSOR_STOP;
+ _leave(" = f [abort]");
return false;
+
+ case -ENETUNREACH:
+ case -EHOSTUNREACH:
+ case -ECONNREFUSED:
+ case -ETIMEDOUT:
+ case -ETIME:
+ _debug("no conn");
+ goto iterate_address;
}
- fc->ac.alist = alist;
- fc->ac.error = 0;
- return true;
+iterate_address:
+ /* Iterate over the current server's address list to try and find an
+ * address on which it will respond to us.
+ */
+ if (afs_iterate_addresses(&fc->ac)) {
+ _leave(" = t");
+ return true;
+ }
+
+ afs_end_cursor(&fc->ac);
+ return false;
}
/*
* permitted to be accessed with this authorisation, and if so, what access it
* is granted
*/
-static int afs_check_permit(struct afs_vnode *vnode, struct key *key,
- afs_access_t *_access)
+int afs_check_permit(struct afs_vnode *vnode, struct key *key,
+ afs_access_t *_access)
{
struct afs_permits *permits;
bool valid = false;
{
int i;
- if (refcount_dec_and_test(&slist->usage)) {
+ if (slist && refcount_dec_and_test(&slist->usage)) {
for (i = 0; i < slist->nr_servers; i++) {
afs_put_cb_interest(net, slist->servers[i].cb_interest);
afs_put_server(net, slist->servers[i].server);
if (ret < 0)
goto error_sb;
as = NULL;
- sb->s_flags |= MS_ACTIVE;
+ sb->s_flags |= SB_ACTIVE;
} else {
_debug("reuse");
- ASSERTCMP(sb->s_flags, &, MS_ACTIVE);
+ ASSERTCMP(sb->s_flags, &, SB_ACTIVE);
afs_destroy_sbi(as);
as = NULL;
}
}
if (f != t) {
+ if (PageWriteback(page)) {
+ trace_afs_page_dirty(vnode, tracepoint_string("alrdy"),
+ page->index, priv);
+ goto flush_conflicting_write;
+ }
if (to < f || from > t)
goto flush_conflicting_write;
if (from < f)
pr_debug("waiting for mount name=%pd\n", path->dentry);
status = autofs4_wait(sbi, path, NFY_MOUNT);
pr_debug("mount wait done status=%d\n", status);
- ino->last_used = jiffies;
}
+ ino->last_used = jiffies;
return status;
}
*/
if (autofs_type_indirect(sbi->type) && d_unhashed(dentry)) {
struct dentry *parent = dentry->d_parent;
+ struct autofs_info *ino;
struct dentry *new;
new = d_lookup(parent, &dentry->d_name);
if (!new)
return NULL;
- if (new == dentry)
- dput(new);
- else {
- struct autofs_info *ino;
-
- ino = autofs4_dentry_ino(new);
- ino->last_used = jiffies;
- dput(path->dentry);
- path->dentry = new;
- }
+ ino = autofs4_dentry_ino(new);
+ ino->last_used = jiffies;
+ dput(path->dentry);
+ path->dentry = new;
}
return path->dentry;
}
(fs/befs/super.c)
* Tell the kernel to only mount befs read-only.
- By setting the MS_RDONLY flag in befs_read_super().
+ By setting the SB_RDONLY flag in befs_read_super().
Not that it was possible to write before. But now the kernel won't even try.
(fs/befs/super.c)
if (!sb_rdonly(sb)) {
befs_warning(sb,
"No write support. Marking filesystem read-only");
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
}
/*
befs_remount(struct super_block *sb, int *flags, char *data)
{
sync_filesystem(sb);
- if (!(*flags & MS_RDONLY))
+ if (!(*flags & SB_RDONLY))
return -EINVAL;
return 0;
}
unsigned long len, u64 disk_start,
unsigned long compressed_len,
struct page **compressed_pages,
- unsigned long nr_pages)
+ unsigned long nr_pages,
+ unsigned int write_flags)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct bio *bio = NULL;
bdev = fs_info->fs_devices->latest_bdev;
bio = btrfs_bio_alloc(bdev, first_byte);
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
+ bio->bi_opf = REQ_OP_WRITE | write_flags;
bio->bi_private = cb;
bio->bi_end_io = end_compressed_bio_write;
refcount_set(&cb->pending_bios, 1);
bio_put(bio);
bio = btrfs_bio_alloc(bdev, first_byte);
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
+ bio->bi_opf = REQ_OP_WRITE | write_flags;
bio->bi_private = cb;
bio->bi_end_io = end_compressed_bio_write;
bio_add_page(bio, page, PAGE_SIZE, 0);
if (str[4] == ':' && '1' <= str[5] && str[5] <= '9' && str[6] == 0)
return str[5] - '0';
- return 0;
+ return BTRFS_ZLIB_DEFAULT_LEVEL;
}
/* Maximum size of data before compression */
#define BTRFS_MAX_UNCOMPRESSED (SZ_128K)
+#define BTRFS_ZLIB_DEFAULT_LEVEL 3
+
struct compressed_bio {
/* number of bios pending for this compressed extent */
refcount_t pending_bios;
unsigned long len, u64 disk_start,
unsigned long compressed_len,
struct page **compressed_pages,
- unsigned long nr_pages);
+ unsigned long nr_pages,
+ unsigned int write_flags);
blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
int mirror_num, unsigned long bio_flags);
*/
static inline int btrfs_need_cleaner_sleep(struct btrfs_fs_info *fs_info)
{
- return fs_info->sb->s_flags & MS_RDONLY || btrfs_fs_closing(fs_info);
+ return fs_info->sb->s_flags & SB_RDONLY || btrfs_fs_closing(fs_info);
}
static inline void free_fs_info(struct btrfs_fs_info *fs_info)
int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, int delay_iput,
int nr);
int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
+ unsigned int extra_bits,
struct extent_state **cached_state, int dedupe);
int btrfs_create_subvol_root(struct btrfs_trans_handle *trans,
struct btrfs_root *new_root,
* that we don't try and read the other copies of this block, just
* return -EIO.
*/
- if (found_level == 0 && btrfs_check_leaf(root, eb)) {
+ if (found_level == 0 && btrfs_check_leaf_full(root, eb)) {
set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags);
ret = -EIO;
}
buf->len,
fs_info->dirty_metadata_batch);
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
- if (btrfs_header_level(buf) == 0 && btrfs_check_leaf(root, buf)) {
+ /*
+ * Since btrfs_mark_buffer_dirty() can be called with item pointer set
+ * but item data not updated.
+ * So here we should only check item pointers, not item data.
+ */
+ if (btrfs_header_level(buf) == 0 &&
+ btrfs_check_leaf_relaxed(root, buf)) {
btrfs_print_leaf(buf);
ASSERT(0);
}
goto again;
}
- /* We've already setup this transaction, go ahead and exit */
- if (block_group->cache_generation == trans->transid &&
- i_size_read(inode)) {
- dcs = BTRFS_DC_SETUP;
- goto out_put;
- }
-
/*
* We want to set the generation to 0, that way if anything goes wrong
* from here on out we know not to trust this cache when we load up next
}
WARN_ON(ret);
+ /* We've already setup this transaction, go ahead and exit */
+ if (block_group->cache_generation == trans->transid &&
+ i_size_read(inode)) {
+ dcs = BTRFS_DC_SETUP;
+ goto out_put;
+ }
+
if (i_size_read(inode) > 0) {
ret = btrfs_check_trunc_cache_free_space(fs_info,
&fs_info->global_block_rsv);
struct btrfs_bio *bbio = NULL;
int ret;
- ASSERT(!(fs_info->sb->s_flags & MS_RDONLY));
+ ASSERT(!(fs_info->sb->s_flags & SB_RDONLY));
BUG_ON(!mirror_num);
bio = btrfs_io_bio_alloc(1);
delalloc_start,
delalloc_end,
&page_started,
- nr_written);
+ nr_written, wbc);
/* File system has been set read-only */
if (ret) {
SetPageError(page);
*/
int (*fill_delalloc)(void *private_data, struct page *locked_page,
u64 start, u64 end, int *page_started,
- unsigned long *nr_written);
+ unsigned long *nr_written,
+ struct writeback_control *wbc);
int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
void (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
struct extent_state **cached_state);
static inline int set_extent_delalloc(struct extent_io_tree *tree, u64 start,
- u64 end, struct extent_state **cached_state)
+ u64 end, unsigned int extra_bits,
+ struct extent_state **cached_state)
{
return set_extent_bit(tree, start, end,
- EXTENT_DELALLOC | EXTENT_UPTODATE,
+ EXTENT_DELALLOC | EXTENT_UPTODATE | extra_bits,
NULL, cached_state, GFP_NOFS);
}
}
}
+static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode,
+ const u64 start,
+ const u64 len,
+ struct extent_state **cached_state)
+{
+ u64 search_start = start;
+ const u64 end = start + len - 1;
+
+ while (search_start < end) {
+ const u64 search_len = end - search_start + 1;
+ struct extent_map *em;
+ u64 em_len;
+ int ret = 0;
+
+ em = btrfs_get_extent(inode, NULL, 0, search_start,
+ search_len, 0);
+ if (IS_ERR(em))
+ return PTR_ERR(em);
+
+ if (em->block_start != EXTENT_MAP_HOLE)
+ goto next;
+
+ em_len = em->len;
+ if (em->start < search_start)
+ em_len -= search_start - em->start;
+ if (em_len > search_len)
+ em_len = search_len;
+
+ ret = set_extent_bit(&inode->io_tree, search_start,
+ search_start + em_len - 1,
+ EXTENT_DELALLOC_NEW,
+ NULL, cached_state, GFP_NOFS);
+next:
+ search_start = extent_map_end(em);
+ free_extent_map(em);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
/*
* after copy_from_user, pages need to be dirtied and we need to make
* sure holes are created between the current EOF and the start of
u64 end_of_last_block;
u64 end_pos = pos + write_bytes;
loff_t isize = i_size_read(inode);
+ unsigned int extra_bits = 0;
start_pos = pos & ~((u64) fs_info->sectorsize - 1);
num_bytes = round_up(write_bytes + pos - start_pos,
fs_info->sectorsize);
end_of_last_block = start_pos + num_bytes - 1;
+
+ if (!btrfs_is_free_space_inode(BTRFS_I(inode))) {
+ if (start_pos >= isize &&
+ !(BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC)) {
+ /*
+ * There can't be any extents following eof in this case
+ * so just set the delalloc new bit for the range
+ * directly.
+ */
+ extra_bits |= EXTENT_DELALLOC_NEW;
+ } else {
+ err = btrfs_find_new_delalloc_bytes(BTRFS_I(inode),
+ start_pos,
+ num_bytes, cached);
+ if (err)
+ return err;
+ }
+ }
+
err = btrfs_set_extent_delalloc(inode, start_pos, end_of_last_block,
- cached, 0);
+ extra_bits, cached, 0);
if (err)
return err;
}
-static int btrfs_find_new_delalloc_bytes(struct btrfs_inode *inode,
- const u64 start,
- const u64 len,
- struct extent_state **cached_state)
-{
- u64 search_start = start;
- const u64 end = start + len - 1;
-
- while (search_start < end) {
- const u64 search_len = end - search_start + 1;
- struct extent_map *em;
- u64 em_len;
- int ret = 0;
-
- em = btrfs_get_extent(inode, NULL, 0, search_start,
- search_len, 0);
- if (IS_ERR(em))
- return PTR_ERR(em);
-
- if (em->block_start != EXTENT_MAP_HOLE)
- goto next;
-
- em_len = em->len;
- if (em->start < search_start)
- em_len -= search_start - em->start;
- if (em_len > search_len)
- em_len = search_len;
-
- ret = set_extent_bit(&inode->io_tree, search_start,
- search_start + em_len - 1,
- EXTENT_DELALLOC_NEW,
- NULL, cached_state, GFP_NOFS);
-next:
- search_start = extent_map_end(em);
- free_extent_map(em);
- if (ret)
- return ret;
- }
- return 0;
-}
-
/*
* This function locks the extent and properly waits for data=ordered extents
* to finish before allowing the pages to be modified if need.
+ round_up(pos + write_bytes - start_pos,
fs_info->sectorsize) - 1;
- if (start_pos < inode->vfs_inode.i_size ||
- (inode->flags & BTRFS_INODE_PREALLOC)) {
+ if (start_pos < inode->vfs_inode.i_size) {
struct btrfs_ordered_extent *ordered;
- unsigned int clear_bits;
lock_extent_bits(&inode->io_tree, start_pos, last_pos,
cached_state);
}
if (ordered)
btrfs_put_ordered_extent(ordered);
- ret = btrfs_find_new_delalloc_bytes(inode, start_pos,
- last_pos - start_pos + 1,
- cached_state);
- clear_bits = EXTENT_DIRTY | EXTENT_DELALLOC |
- EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG;
- if (ret)
- clear_bits |= EXTENT_DELALLOC_NEW | EXTENT_LOCKED;
- clear_extent_bit(&inode->io_tree, start_pos,
- last_pos, clear_bits,
- (clear_bits & EXTENT_LOCKED) ? 1 : 0,
- 0, cached_state, GFP_NOFS);
- if (ret)
- return ret;
+ clear_extent_bit(&inode->io_tree, start_pos, last_pos,
+ EXTENT_DIRTY | EXTENT_DELALLOC |
+ EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
+ 0, 0, cached_state, GFP_NOFS);
*lockstart = start_pos;
*lockend = last_pos;
ret = 1;
len = (u64)end - (u64)start + 1;
trace_btrfs_sync_file(file, datasync);
+ btrfs_init_log_ctx(&ctx, inode);
+
/*
* We write the dirty pages in the range and wait until they complete
* out of the ->i_mutex. If so, we can flush the dirty pages by
}
trans->sync = true;
- btrfs_init_log_ctx(&ctx, inode);
-
ret = btrfs_log_dentry_safe(trans, root, dentry, start, end, &ctx);
if (ret < 0) {
/* Fallthrough and commit/free transaction. */
ret = btrfs_end_transaction(trans);
}
out:
+ ASSERT(list_empty(&ctx.list));
err = file_check_and_advance_wb_err(file);
if (!ret)
ret = err;
/* Lock all pages first so we can lock the extent safely. */
ret = io_ctl_prepare_pages(io_ctl, inode, 0);
if (ret)
- goto out;
+ goto out_unlock;
lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
&cached_state);
out_nospc:
cleanup_write_cache_enospc(inode, io_ctl, &cached_state);
+out_unlock:
if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA))
up_write(&block_group->data_rwsem);
struct page *locked_page;
u64 start;
u64 end;
+ unsigned int write_flags;
struct list_head extents;
struct btrfs_work work;
};
async_extent->ram_size,
ins.objectid,
ins.offset, async_extent->pages,
- async_extent->nr_pages)) {
+ async_extent->nr_pages,
+ async_cow->write_flags)) {
struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
struct page *p = async_extent->pages[0];
const u64 start = async_extent->start;
static int cow_file_range_async(struct inode *inode, struct page *locked_page,
u64 start, u64 end, int *page_started,
- unsigned long *nr_written)
+ unsigned long *nr_written,
+ unsigned int write_flags)
{
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct async_cow *async_cow;
async_cow->root = root;
async_cow->locked_page = locked_page;
async_cow->start = start;
+ async_cow->write_flags = write_flags;
if (BTRFS_I(inode)->flags & BTRFS_INODE_NOCOMPRESS &&
!btrfs_test_opt(fs_info, FORCE_COMPRESS))
*/
static int run_delalloc_range(void *private_data, struct page *locked_page,
u64 start, u64 end, int *page_started,
- unsigned long *nr_written)
+ unsigned long *nr_written,
+ struct writeback_control *wbc)
{
struct inode *inode = private_data;
int ret;
int force_cow = need_force_cow(inode, start, end);
+ unsigned int write_flags = wbc_to_write_flags(wbc);
if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW && !force_cow) {
ret = run_delalloc_nocow(inode, locked_page, start, end,
set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
&BTRFS_I(inode)->runtime_flags);
ret = cow_file_range_async(inode, locked_page, start, end,
- page_started, nr_written);
+ page_started, nr_written,
+ write_flags);
}
if (ret)
btrfs_cleanup_ordered_extents(inode, start, end - start + 1);
}
int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
+ unsigned int extra_bits,
struct extent_state **cached_state, int dedupe)
{
WARN_ON((end & (PAGE_SIZE - 1)) == 0);
return set_extent_delalloc(&BTRFS_I(inode)->io_tree, start, end,
- cached_state);
+ extra_bits, cached_state);
}
/* see btrfs_writepage_start_hook for details on why this is required */
goto out;
}
- btrfs_set_extent_delalloc(inode, page_start, page_end, &cached_state,
+ btrfs_set_extent_delalloc(inode, page_start, page_end, 0, &cached_state,
0);
ClearPageChecked(page);
set_page_dirty(page);
EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
0, 0, &cached_state, GFP_NOFS);
- ret = btrfs_set_extent_delalloc(inode, block_start, block_end,
+ ret = btrfs_set_extent_delalloc(inode, block_start, block_end, 0,
&cached_state, 0);
if (ret) {
unlock_extent_cached(io_tree, block_start, block_end,
goto out_err;
btrfs_dir_item_key_to_cpu(path->nodes[0], di, location);
+ if (location->type != BTRFS_INODE_ITEM_KEY &&
+ location->type != BTRFS_ROOT_ITEM_KEY) {
+ btrfs_warn(root->fs_info,
+"%s gets something invalid in DIR_ITEM (name %s, directory ino %llu, location(%llu %u %llu))",
+ __func__, name, btrfs_ino(BTRFS_I(dir)),
+ location->objectid, location->type, location->offset);
+ goto out_err;
+ }
out:
btrfs_free_path(path);
return ret;
return inode;
}
- BUG_ON(location.type != BTRFS_ROOT_ITEM_KEY);
-
index = srcu_read_lock(&fs_info->subvol_srcu);
ret = fixup_tree_root_location(fs_info, dir, dentry,
&location, &sub_root);
EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG,
0, 0, &cached_state, GFP_NOFS);
- ret = btrfs_set_extent_delalloc(inode, page_start, end,
+ ret = btrfs_set_extent_delalloc(inode, page_start, end, 0,
&cached_state, 0);
if (ret) {
unlock_extent_cached(io_tree, page_start, page_end,
if (!i_done || ret)
goto out;
- if (!(inode->i_sb->s_flags & MS_ACTIVE))
+ if (!(inode->i_sb->s_flags & SB_ACTIVE))
goto out;
/*
* make sure we stop running if someone unmounts
* the FS
*/
- if (!(inode->i_sb->s_flags & MS_ACTIVE))
+ if (!(inode->i_sb->s_flags & SB_ACTIVE))
break;
if (btrfs_defrag_cancelled(fs_info)) {
nr++;
}
- btrfs_set_extent_delalloc(inode, page_start, page_end, NULL, 0);
+ btrfs_set_extent_delalloc(inode, page_start, page_end, 0, NULL,
+ 0);
set_page_dirty(page);
unlock_extent(&BTRFS_I(inode)->io_tree,
}
/*
- * Check if ino ino1 is an ancestor of inode ino2 in the given root.
+ * Check if inode ino2, or any of its ancestors, is inode ino1.
+ * Return 1 if true, 0 if false and < 0 on error.
+ */
+static int check_ino_in_path(struct btrfs_root *root,
+ const u64 ino1,
+ const u64 ino1_gen,
+ const u64 ino2,
+ const u64 ino2_gen,
+ struct fs_path *fs_path)
+{
+ u64 ino = ino2;
+
+ if (ino1 == ino2)
+ return ino1_gen == ino2_gen;
+
+ while (ino > BTRFS_FIRST_FREE_OBJECTID) {
+ u64 parent;
+ u64 parent_gen;
+ int ret;
+
+ fs_path_reset(fs_path);
+ ret = get_first_ref(root, ino, &parent, &parent_gen, fs_path);
+ if (ret < 0)
+ return ret;
+ if (parent == ino1)
+ return parent_gen == ino1_gen;
+ ino = parent;
+ }
+ return 0;
+}
+
+/*
+ * Check if ino ino1 is an ancestor of inode ino2 in the given root for any
+ * possible path (in case ino2 is not a directory and has multiple hard links).
* Return 1 if true, 0 if false and < 0 on error.
*/
static int is_ancestor(struct btrfs_root *root,
const u64 ino2,
struct fs_path *fs_path)
{
- u64 ino = ino2;
- bool free_path = false;
+ bool free_fs_path = false;
int ret = 0;
+ struct btrfs_path *path = NULL;
+ struct btrfs_key key;
if (!fs_path) {
fs_path = fs_path_alloc();
if (!fs_path)
return -ENOMEM;
- free_path = true;
+ free_fs_path = true;
}
- while (ino > BTRFS_FIRST_FREE_OBJECTID) {
- u64 parent;
- u64 parent_gen;
+ path = alloc_path_for_send();
+ if (!path) {
+ ret = -ENOMEM;
+ goto out;
+ }
- fs_path_reset(fs_path);
- ret = get_first_ref(root, ino, &parent, &parent_gen, fs_path);
- if (ret < 0) {
- if (ret == -ENOENT && ino == ino2)
- ret = 0;
- goto out;
+ key.objectid = ino2;
+ key.type = BTRFS_INODE_REF_KEY;
+ key.offset = 0;
+
+ ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
+ if (ret < 0)
+ goto out;
+
+ while (true) {
+ struct extent_buffer *leaf = path->nodes[0];
+ int slot = path->slots[0];
+ u32 cur_offset = 0;
+ u32 item_size;
+
+ if (slot >= btrfs_header_nritems(leaf)) {
+ ret = btrfs_next_leaf(root, path);
+ if (ret < 0)
+ goto out;
+ if (ret > 0)
+ break;
+ continue;
}
- if (parent == ino1) {
- ret = parent_gen == ino1_gen ? 1 : 0;
- goto out;
+
+ btrfs_item_key_to_cpu(leaf, &key, slot);
+ if (key.objectid != ino2)
+ break;
+ if (key.type != BTRFS_INODE_REF_KEY &&
+ key.type != BTRFS_INODE_EXTREF_KEY)
+ break;
+
+ item_size = btrfs_item_size_nr(leaf, slot);
+ while (cur_offset < item_size) {
+ u64 parent;
+ u64 parent_gen;
+
+ if (key.type == BTRFS_INODE_EXTREF_KEY) {
+ unsigned long ptr;
+ struct btrfs_inode_extref *extref;
+
+ ptr = btrfs_item_ptr_offset(leaf, slot);
+ extref = (struct btrfs_inode_extref *)
+ (ptr + cur_offset);
+ parent = btrfs_inode_extref_parent(leaf,
+ extref);
+ cur_offset += sizeof(*extref);
+ cur_offset += btrfs_inode_extref_name_len(leaf,
+ extref);
+ } else {
+ parent = key.offset;
+ cur_offset = item_size;
+ }
+
+ ret = get_inode_info(root, parent, NULL, &parent_gen,
+ NULL, NULL, NULL, NULL);
+ if (ret < 0)
+ goto out;
+ ret = check_ino_in_path(root, ino1, ino1_gen,
+ parent, parent_gen, fs_path);
+ if (ret)
+ goto out;
}
- ino = parent;
+ path->slots[0]++;
}
+ ret = 0;
out:
- if (free_path)
+ btrfs_free_path(path);
+ if (free_fs_path)
fs_path_free(fs_path);
return ret;
}
return;
if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
btrfs_info(fs_info, "forced readonly");
/*
* Note that a running device replace operation is not
/*
* Special case: if the error is EROFS, and we're already
- * under MS_RDONLY, then it is safe here.
+ * under SB_RDONLY, then it is safe here.
*/
if (errno == -EROFS && sb_rdonly(sb))
return;
set_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state);
/* Don't go through full error handling during mount */
- if (sb->s_flags & MS_BORN)
+ if (sb->s_flags & SB_BORN)
btrfs_handle_error(fs_info);
}
token == Opt_compress_force ||
strncmp(args[0].from, "zlib", 4) == 0) {
compress_type = "zlib";
+
info->compress_type = BTRFS_COMPRESS_ZLIB;
- info->compress_level =
- btrfs_compress_str2level(args[0].from);
+ info->compress_level = BTRFS_ZLIB_DEFAULT_LEVEL;
+ /*
+ * args[0] contains uninitialized data since
+ * for these tokens we don't expect any
+ * parameter.
+ */
+ if (token != Opt_compress &&
+ token != Opt_compress_force)
+ info->compress_level =
+ btrfs_compress_str2level(args[0].from);
btrfs_set_opt(info->mount_opt, COMPRESS);
btrfs_clear_opt(info->mount_opt, NODATACOW);
btrfs_clear_opt(info->mount_opt, NODATASUM);
break;
case Opt_acl:
#ifdef CONFIG_BTRFS_FS_POSIX_ACL
- info->sb->s_flags |= MS_POSIXACL;
+ info->sb->s_flags |= SB_POSIXACL;
break;
#else
btrfs_err(info, "support for ACL not compiled in!");
goto out;
#endif
case Opt_noacl:
- info->sb->s_flags &= ~MS_POSIXACL;
+ info->sb->s_flags &= ~SB_POSIXACL;
break;
case Opt_notreelog:
btrfs_set_and_info(info, NOTREELOG,
/*
* Extra check for current option against current flag
*/
- if (btrfs_test_opt(info, NOLOGREPLAY) && !(new_flags & MS_RDONLY)) {
+ if (btrfs_test_opt(info, NOLOGREPLAY) && !(new_flags & SB_RDONLY)) {
btrfs_err(info,
"nologreplay must be used with ro mount option");
ret = -EINVAL;
sb->s_xattr = btrfs_xattr_handlers;
sb->s_time_gran = 1;
#ifdef CONFIG_BTRFS_FS_POSIX_ACL
- sb->s_flags |= MS_POSIXACL;
+ sb->s_flags |= SB_POSIXACL;
#endif
sb->s_flags |= SB_I_VERSION;
sb->s_iflags |= SB_I_CGROUPWB;
}
cleancache_init_fs(sb);
- sb->s_flags |= MS_ACTIVE;
+ sb->s_flags |= SB_ACTIVE;
return 0;
fail_close:
seq_puts(seq, ",flushoncommit");
if (btrfs_test_opt(info, DISCARD))
seq_puts(seq, ",discard");
- if (!(info->sb->s_flags & MS_POSIXACL))
+ if (!(info->sb->s_flags & SB_POSIXACL))
seq_puts(seq, ",noacl");
if (btrfs_test_opt(info, SPACE_CACHE))
seq_puts(seq, ",space_cache");
mnt = vfs_kern_mount(&btrfs_fs_type, flags, device_name, newargs);
if (PTR_ERR_OR_ZERO(mnt) == -EBUSY) {
- if (flags & MS_RDONLY) {
- mnt = vfs_kern_mount(&btrfs_fs_type, flags & ~MS_RDONLY,
+ if (flags & SB_RDONLY) {
+ mnt = vfs_kern_mount(&btrfs_fs_type, flags & ~SB_RDONLY,
device_name, newargs);
} else {
- mnt = vfs_kern_mount(&btrfs_fs_type, flags | MS_RDONLY,
+ mnt = vfs_kern_mount(&btrfs_fs_type, flags | SB_RDONLY,
device_name, newargs);
if (IS_ERR(mnt)) {
root = ERR_CAST(mnt);
u64 subvol_objectid = 0;
int error = 0;
- if (!(flags & MS_RDONLY))
+ if (!(flags & SB_RDONLY))
mode |= FMODE_WRITE;
error = btrfs_parse_early_options(data, mode, fs_type,
if (error)
goto error_fs_info;
- if (!(flags & MS_RDONLY) && fs_devices->rw_devices == 0) {
+ if (!(flags & SB_RDONLY) && fs_devices->rw_devices == 0) {
error = -EACCES;
goto error_close_devices;
}
bdev = fs_devices->latest_bdev;
- s = sget(fs_type, btrfs_test_super, btrfs_set_super, flags | MS_NOSEC,
+ s = sget(fs_type, btrfs_test_super, btrfs_set_super, flags | SB_NOSEC,
fs_info);
if (IS_ERR(s)) {
error = PTR_ERR(s);
if (s->s_root) {
btrfs_close_devices(fs_devices);
free_fs_info(fs_info);
- if ((flags ^ s->s_flags) & MS_RDONLY)
+ if ((flags ^ s->s_flags) & SB_RDONLY)
error = -EBUSY;
} else {
snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev);
{
if (btrfs_raw_test_opt(old_opts, AUTO_DEFRAG) &&
(!btrfs_raw_test_opt(fs_info->mount_opt, AUTO_DEFRAG) ||
- (flags & MS_RDONLY))) {
+ (flags & SB_RDONLY))) {
/* wait for any defraggers to finish */
wait_event(fs_info->transaction_wait,
(atomic_read(&fs_info->defrag_running) == 0));
- if (flags & MS_RDONLY)
+ if (flags & SB_RDONLY)
sync_filesystem(fs_info->sb);
}
}
btrfs_resize_thread_pool(fs_info,
fs_info->thread_pool_size, old_thread_pool_size);
- if ((bool)(*flags & MS_RDONLY) == sb_rdonly(sb))
+ if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
goto out;
- if (*flags & MS_RDONLY) {
+ if (*flags & SB_RDONLY) {
/*
* this also happens on 'umount -rf' or on shutdown, when
* the filesystem is busy.
/* avoid complains from lockdep et al. */
up(&fs_info->uuid_tree_rescan_sem);
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
/*
- * Setting MS_RDONLY will put the cleaner thread to
+ * Setting SB_RDONLY will put the cleaner thread to
* sleep at the next loop if it's already active.
* If it's already asleep, we'll leave unused block
* groups on disk until we're mounted read-write again
goto restore;
}
}
- sb->s_flags &= ~MS_RDONLY;
+ sb->s_flags &= ~SB_RDONLY;
set_bit(BTRFS_FS_OPEN, &fs_info->flags);
}
return 0;
restore:
- /* We've hit an error - don't reset MS_RDONLY */
+ /* We've hit an error - don't reset SB_RDONLY */
if (sb_rdonly(sb))
- old_flags |= MS_RDONLY;
+ old_flags |= SB_RDONLY;
sb->s_flags = old_flags;
fs_info->mount_opt = old_opts;
fs_info->compress_type = old_compress_type;
* |--- delalloc ---|
* |--- search ---|
*/
- set_extent_delalloc(&tmp, 0, sectorsize - 1, NULL);
+ set_extent_delalloc(&tmp, 0, sectorsize - 1, 0, NULL);
start = 0;
end = 0;
found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
test_msg("Couldn't find the locked page\n");
goto out_bits;
}
- set_extent_delalloc(&tmp, sectorsize, max_bytes - 1, NULL);
+ set_extent_delalloc(&tmp, sectorsize, max_bytes - 1, 0, NULL);
start = test_start;
end = 0;
found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
*
* We are re-using our test_start from above since it works out well.
*/
- set_extent_delalloc(&tmp, max_bytes, total_dirty - 1, NULL);
+ set_extent_delalloc(&tmp, max_bytes, total_dirty - 1, 0, NULL);
start = test_start;
end = 0;
found = find_lock_delalloc_range(inode, &tmp, locked_page, &start,
btrfs_test_inode_set_ops(inode);
/* [BTRFS_MAX_EXTENT_SIZE] */
- ret = btrfs_set_extent_delalloc(inode, 0, BTRFS_MAX_EXTENT_SIZE - 1,
+ ret = btrfs_set_extent_delalloc(inode, 0, BTRFS_MAX_EXTENT_SIZE - 1, 0,
NULL, 0);
if (ret) {
test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
/* [BTRFS_MAX_EXTENT_SIZE][sectorsize] */
ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE,
BTRFS_MAX_EXTENT_SIZE + sectorsize - 1,
- NULL, 0);
+ 0, NULL, 0);
if (ret) {
test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
goto out;
ret = btrfs_set_extent_delalloc(inode, BTRFS_MAX_EXTENT_SIZE >> 1,
(BTRFS_MAX_EXTENT_SIZE >> 1)
+ sectorsize - 1,
- NULL, 0);
+ 0, NULL, 0);
if (ret) {
test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
goto out;
ret = btrfs_set_extent_delalloc(inode,
BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize,
(BTRFS_MAX_EXTENT_SIZE << 1) + 3 * sectorsize - 1,
- NULL, 0);
+ 0, NULL, 0);
if (ret) {
test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
goto out;
*/
ret = btrfs_set_extent_delalloc(inode,
BTRFS_MAX_EXTENT_SIZE + sectorsize,
- BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, NULL, 0);
+ BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, 0, NULL, 0);
if (ret) {
test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
goto out;
*/
ret = btrfs_set_extent_delalloc(inode,
BTRFS_MAX_EXTENT_SIZE + sectorsize,
- BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, NULL, 0);
+ BTRFS_MAX_EXTENT_SIZE + 2 * sectorsize - 1, 0, NULL, 0);
if (ret) {
test_msg("btrfs_set_extent_delalloc returned %d\n", ret);
goto out;
return ret;
}
-int btrfs_check_leaf(struct btrfs_root *root, struct extent_buffer *leaf)
+static int check_leaf(struct btrfs_root *root, struct extent_buffer *leaf,
+ bool check_item_data)
{
struct btrfs_fs_info *fs_info = root->fs_info;
/* No valid key type is 0, so all key should be larger than this key */
return -EUCLEAN;
}
- /* Check if the item size and content meet other criteria */
- ret = check_leaf_item(root, leaf, &key, slot);
- if (ret < 0)
- return ret;
+ if (check_item_data) {
+ /*
+ * Check if the item size and content meet other
+ * criteria
+ */
+ ret = check_leaf_item(root, leaf, &key, slot);
+ if (ret < 0)
+ return ret;
+ }
prev_key.objectid = key.objectid;
prev_key.type = key.type;
return 0;
}
+int btrfs_check_leaf_full(struct btrfs_root *root, struct extent_buffer *leaf)
+{
+ return check_leaf(root, leaf, true);
+}
+
+int btrfs_check_leaf_relaxed(struct btrfs_root *root,
+ struct extent_buffer *leaf)
+{
+ return check_leaf(root, leaf, false);
+}
+
int btrfs_check_node(struct btrfs_root *root, struct extent_buffer *node)
{
unsigned long nr = btrfs_header_nritems(node);
#include "ctree.h"
#include "extent_io.h"
-int btrfs_check_leaf(struct btrfs_root *root, struct extent_buffer *leaf);
+/*
+ * Comprehensive leaf checker.
+ * Will check not only the item pointers, but also every possible member
+ * in item data.
+ */
+int btrfs_check_leaf_full(struct btrfs_root *root, struct extent_buffer *leaf);
+
+/*
+ * Less strict leaf checker.
+ * Will only check item pointers, not reading item data.
+ */
+int btrfs_check_leaf_relaxed(struct btrfs_root *root,
+ struct extent_buffer *leaf);
int btrfs_check_node(struct btrfs_root *root, struct extent_buffer *node);
#endif
if (ordered_io_err) {
ctx->io_err = -EIO;
- return 0;
+ return ctx->io_err;
}
btrfs_init_map_token(&token);
struct btrfs_device, dev_list);
list_del(&device->dev_list);
rcu_string_free(device->name);
+ bio_put(device->flush_bio);
kfree(device);
}
kfree(fs_devices);
fs_devs->num_devices--;
list_del(&dev->dev_list);
rcu_string_free(dev->name);
+ bio_put(dev->flush_bio);
kfree(dev);
}
break;
name = rcu_string_strdup(path, GFP_NOFS);
if (!name) {
+ bio_put(device->flush_bio);
kfree(device);
return -ENOMEM;
}
name = rcu_string_strdup(orig_dev->name->str,
GFP_KERNEL);
if (!name) {
+ bio_put(device->flush_bio);
kfree(device);
goto error;
}
list_del_init(&device->dev_list);
fs_devices->num_devices--;
rcu_string_free(device->name);
+ bio_put(device->flush_bio);
kfree(device);
}
key.offset = device->devid;
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
- if (ret < 0)
- goto out;
-
- if (ret > 0) {
- ret = -ENOENT;
+ if (ret) {
+ if (ret > 0)
+ ret = -ENOENT;
+ btrfs_abort_transaction(trans, ret);
+ btrfs_end_transaction(trans);
goto out;
}
ret = btrfs_del_item(trans, root, path);
- if (ret)
- goto out;
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
+ btrfs_end_transaction(trans);
+ }
+
out:
btrfs_free_path(path);
- btrfs_commit_transaction(trans);
+ if (!ret)
+ ret = btrfs_commit_transaction(trans);
return ret;
}
fs_devices = srcdev->fs_devices;
list_del_rcu(&srcdev->dev_list);
- list_del_rcu(&srcdev->dev_alloc_list);
+ list_del(&srcdev->dev_alloc_list);
fs_devices->num_devices--;
if (srcdev->missing)
fs_devices->missing_devices--;
name = rcu_string_strdup(device_path, GFP_KERNEL);
if (!name) {
+ bio_put(device->flush_bio);
kfree(device);
ret = -ENOMEM;
goto error;
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans)) {
rcu_string_free(device->name);
+ bio_put(device->flush_bio);
kfree(device);
ret = PTR_ERR(trans);
goto error;
set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE);
if (seeding_dev) {
- sb->s_flags &= ~MS_RDONLY;
+ sb->s_flags &= ~SB_RDONLY;
ret = btrfs_prepare_sprout(fs_info);
if (ret) {
btrfs_abort_transaction(trans, ret);
btrfs_sysfs_rm_device_link(fs_info->fs_devices, device);
error_trans:
if (seeding_dev)
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
if (trans)
btrfs_end_transaction(trans);
rcu_string_free(device->name);
+ bio_put(device->flush_bio);
kfree(device);
error:
blkdev_put(bdev, FMODE_EXCL);
name = rcu_string_strdup(device_path, GFP_KERNEL);
if (!name) {
+ bio_put(device->flush_bio);
kfree(device);
ret = -ENOMEM;
goto error;
ret = find_next_devid(fs_info, &tmp);
if (ret) {
+ bio_put(dev->flush_bio);
kfree(dev);
return ERR_PTR(ret);
}
break;
#ifdef CONFIG_CEPH_FS_POSIX_ACL
case Opt_acl:
- fsopt->sb_flags |= MS_POSIXACL;
+ fsopt->sb_flags |= SB_POSIXACL;
break;
#endif
case Opt_noacl:
- fsopt->sb_flags &= ~MS_POSIXACL;
+ fsopt->sb_flags &= ~SB_POSIXACL;
break;
default:
BUG_ON(token);
seq_puts(m, ",nopoolperm");
#ifdef CONFIG_CEPH_FS_POSIX_ACL
- if (fsopt->sb_flags & MS_POSIXACL)
+ if (fsopt->sb_flags & SB_POSIXACL)
seq_puts(m, ",acl");
else
seq_puts(m, ",noacl");
dout("ceph_mount\n");
#ifdef CONFIG_CEPH_FS_POSIX_ACL
- flags |= MS_POSIXACL;
+ flags |= SB_POSIXACL;
#endif
err = parse_mount_options(&fsopt, &opt, flags, data, dev_name);
if (err < 0) {
#define CIFS_MOUNT_MULTIUSER 0x20000 /* multiuser mount */
#define CIFS_MOUNT_STRICT_IO 0x40000 /* strict cache mode */
#define CIFS_MOUNT_RWPIDFORWARD 0x80000 /* use pid forwarding for rw */
-#define CIFS_MOUNT_POSIXACL 0x100000 /* mirror of MS_POSIXACL in mnt_cifs_flags */
+#define CIFS_MOUNT_POSIXACL 0x100000 /* mirror of SB_POSIXACL in mnt_cifs_flags */
#define CIFS_MOUNT_CIFS_BACKUPUID 0x200000 /* backup intent bit for a user */
#define CIFS_MOUNT_CIFS_BACKUPGID 0x400000 /* backup intent bit for a group */
#define CIFS_MOUNT_MAP_SFM_CHR 0x800000 /* SFM/MAC mapping for illegal chars */
tcon = cifs_sb_master_tcon(cifs_sb);
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
- sb->s_flags |= MS_POSIXACL;
+ sb->s_flags |= SB_POSIXACL;
if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
sb->s_maxbytes = MAX_LFS_FILESIZE;
seq_puts(s, ",cifsacl");
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
seq_puts(s, ",dynperm");
- if (root->d_sb->s_flags & MS_POSIXACL)
+ if (root->d_sb->s_flags & SB_POSIXACL)
seq_puts(s, ",acl");
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
seq_puts(s, ",mfsymlinks");
static int cifs_remount(struct super_block *sb, int *flags, char *data)
{
sync_filesystem(sb);
- *flags |= MS_NODIRATIME;
+ *flags |= SB_NODIRATIME;
return 0;
}
rc = cifs_mount(cifs_sb, volume_info);
if (rc) {
- if (!(flags & MS_SILENT))
+ if (!(flags & SB_SILENT))
cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
rc);
root = ERR_PTR(rc);
mnt_data.flags = flags;
/* BB should we make this contingent on mount parm? */
- flags |= MS_NODIRATIME | MS_NOATIME;
+ flags |= SB_NODIRATIME | SB_NOATIME;
sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
if (IS_ERR(sb)) {
goto out_super;
}
- sb->s_flags |= MS_ACTIVE;
+ sb->s_flags |= SB_ACTIVE;
}
root = cifs_get_root(volume_info, sb);
CIFS_MOUNT_MULTIUSER | CIFS_MOUNT_STRICT_IO | \
CIFS_MOUNT_CIFS_BACKUPUID | CIFS_MOUNT_CIFS_BACKUPGID)
-#define CIFS_MS_MASK (MS_RDONLY | MS_MANDLOCK | MS_NOEXEC | MS_NOSUID | \
- MS_NODEV | MS_SYNCHRONOUS)
+#define CIFS_MS_MASK (SB_RDONLY | SB_MANDLOCK | SB_NOEXEC | SB_NOSUID | \
+ SB_NODEV | SB_SYNCHRONOUS)
struct cifs_mnt_data {
struct cifs_sb_info *cifs_sb;
}
cifs_fattr_to_inode(inode, fattr);
- if (sb->s_flags & MS_NOATIME)
+ if (sb->s_flags & SB_NOATIME)
inode->i_flags |= S_NOATIME | S_NOCMTIME;
if (inode->i_state & I_NEW) {
inode->i_ino = hash;
#ifdef CONFIG_CIFS_POSIX
if (!value)
goto out;
- if (sb->s_flags & MS_POSIXACL)
+ if (sb->s_flags & SB_POSIXACL)
rc = CIFSSMBSetPosixACL(xid, pTcon, full_path,
value, (const int)size,
ACL_TYPE_ACCESS, cifs_sb->local_nls,
#ifdef CONFIG_CIFS_POSIX
if (!value)
goto out;
- if (sb->s_flags & MS_POSIXACL)
+ if (sb->s_flags & SB_POSIXACL)
rc = CIFSSMBSetPosixACL(xid, pTcon, full_path,
value, (const int)size,
ACL_TYPE_DEFAULT, cifs_sb->local_nls,
case XATTR_ACL_ACCESS:
#ifdef CONFIG_CIFS_POSIX
- if (sb->s_flags & MS_POSIXACL)
+ if (sb->s_flags & SB_POSIXACL)
rc = CIFSSMBGetPosixACL(xid, pTcon, full_path,
value, size, ACL_TYPE_ACCESS,
cifs_sb->local_nls,
case XATTR_ACL_DEFAULT:
#ifdef CONFIG_CIFS_POSIX
- if (sb->s_flags & MS_POSIXACL)
+ if (sb->s_flags & SB_POSIXACL)
rc = CIFSSMBGetPosixACL(xid, pTcon, full_path,
value, size, ACL_TYPE_DEFAULT,
cifs_sb->local_nls,
static int coda_remount(struct super_block *sb, int *flags, char *data)
{
sync_filesystem(sb);
- *flags |= MS_NOATIME;
+ *flags |= SB_NOATIME;
return 0;
}
mutex_unlock(&vc->vc_mutex);
sb->s_fs_info = vc;
- sb->s_flags |= MS_NOATIME;
+ sb->s_flags |= SB_NOATIME;
sb->s_blocksize = 4096; /* XXXXX what do we put here?? */
sb->s_blocksize_bits = 12;
sb->s_magic = CODA_SUPER_MAGIC;
static int cramfs_remount(struct super_block *sb, int *flags, char *data)
{
sync_filesystem(sb);
- *flags |= MS_RDONLY;
+ *flags |= SB_RDONLY;
return 0;
}
struct inode *root;
/* Set it all up.. */
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
sb->s_op = &cramfs_ops;
root = get_cramfs_inode(sb, cramfs_root, 0);
if (IS_ERR(root))
if (pfn != pmd_pfn(*pmdp))
goto unlock_pmd;
- if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
+ if (!pmd_dirty(*pmdp)
+ && !pmd_access_permitted(*pmdp, WRITE))
goto unlock_pmd;
flush_cache_page(vma, address, pfn);
* Set the POSIX ACL flag based on whether they're enabled in the lower
* mount.
*/
- s->s_flags = flags & ~MS_POSIXACL;
- s->s_flags |= path.dentry->d_sb->s_flags & MS_POSIXACL;
+ s->s_flags = flags & ~SB_POSIXACL;
+ s->s_flags |= path.dentry->d_sb->s_flags & SB_POSIXACL;
/**
* Force a read-only eCryptfs mount when:
* 2) The ecryptfs_encrypted_view mount option is specified
*/
if (sb_rdonly(path.dentry->d_sb) || mount_crypt_stat->flags & ECRYPTFS_ENCRYPTED_VIEW_ENABLED)
- s->s_flags |= MS_RDONLY;
+ s->s_flags |= SB_RDONLY;
s->s_maxbytes = path.dentry->d_sb->s_maxbytes;
s->s_blocksize = path.dentry->d_sb->s_blocksize;
ecryptfs_set_dentry_private(s->s_root, root_info);
root_info->lower_path = path;
- s->s_flags |= MS_ACTIVE;
+ s->s_flags |= SB_ACTIVE;
return dget(s->s_root);
out_free:
static int efs_remount(struct super_block *sb, int *flags, char *data)
{
sync_filesystem(sb);
- *flags |= MS_RDONLY;
+ *flags |= SB_RDONLY;
return 0;
}
#ifdef DEBUG
pr_info("forcing read-only mode\n");
#endif
- s->s_flags |= MS_RDONLY;
+ s->s_flags |= SB_RDONLY;
}
s->s_op = &efs_superblock_operations;
s->s_export_op = &efs_export_ops;
* avoid bad behavior from the prior rlimits. This has to
* happen before arch_pick_mmap_layout(), which examines
* RLIMIT_STACK, but after the point of no return to avoid
- * needing to clean up the change on failure.
+ * races from other threads changing the limits. This also
+ * must be protected from races with prlimit() calls.
*/
+ task_lock(current->group_leader);
if (current->signal->rlim[RLIMIT_STACK].rlim_cur > _STK_LIM)
current->signal->rlim[RLIMIT_STACK].rlim_cur = _STK_LIM;
+ if (current->signal->rlim[RLIMIT_STACK].rlim_max > _STK_LIM)
+ current->signal->rlim[RLIMIT_STACK].rlim_max = _STK_LIM;
+ task_unlock(current->group_leader);
}
arch_pick_mmap_layout(current->mm);
}
mark_buffer_dirty(bitmap_bh);
- if (sb->s_flags & MS_SYNCHRONOUS)
+ if (sb->s_flags & SB_SYNCHRONOUS)
sync_dirty_buffer(bitmap_bh);
group_adjust_blocks(sb, block_group, desc, bh2, group_freed);
percpu_counter_sub(&sbi->s_freeblocks_counter, num);
mark_buffer_dirty(bitmap_bh);
- if (sb->s_flags & MS_SYNCHRONOUS)
+ if (sb->s_flags & SB_SYNCHRONOUS)
sync_dirty_buffer(bitmap_bh);
*errp = 0;
else
ext2_release_inode(sb, block_group, is_directory);
mark_buffer_dirty(bitmap_bh);
- if (sb->s_flags & MS_SYNCHRONOUS)
+ if (sb->s_flags & SB_SYNCHRONOUS)
sync_dirty_buffer(bitmap_bh);
brelse(bitmap_bh);
goto fail;
got:
mark_buffer_dirty(bitmap_bh);
- if (sb->s_flags & MS_SYNCHRONOUS)
+ if (sb->s_flags & SB_SYNCHRONOUS)
sync_dirty_buffer(bitmap_bh);
brelse(bitmap_bh);
if (test_opt(sb, ERRORS_RO)) {
ext2_msg(sb, KERN_CRIT,
"error: remounting filesystem read-only");
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
}
}
ext2_msg(sb, KERN_ERR,
"error: revision level too high, "
"forcing read-only mode");
- res = MS_RDONLY;
+ res = SB_RDONLY;
}
if (read_only)
return res;
sbi->s_resuid = opts.s_resuid;
sbi->s_resgid = opts.s_resgid;
- sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
+ sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
((EXT2_SB(sb)->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ?
- MS_POSIXACL : 0);
+ SB_POSIXACL : 0);
sb->s_iflags |= SB_I_CGROUPWB;
if (le32_to_cpu(es->s_rev_level) == EXT2_GOOD_OLD_REV &&
ext2_msg(sb, KERN_WARNING,
"warning: mounting ext3 filesystem as ext2");
if (ext2_setup_super (sb, es, sb_rdonly(sb)))
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
ext2_write_super(sb);
return 0;
"dax flag with busy inodes while remounting");
new_opts.s_mount_opt ^= EXT2_MOUNT_DAX;
}
- if ((bool)(*flags & MS_RDONLY) == sb_rdonly(sb))
+ if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
goto out_set;
- if (*flags & MS_RDONLY) {
+ if (*flags & SB_RDONLY) {
if (le16_to_cpu(es->s_state) & EXT2_VALID_FS ||
!(sbi->s_mount_state & EXT2_VALID_FS))
goto out_set;
*/
sbi->s_mount_state = le16_to_cpu(es->s_state);
if (!ext2_setup_super (sb, es, 0))
- sb->s_flags &= ~MS_RDONLY;
+ sb->s_flags &= ~SB_RDONLY;
spin_unlock(&sbi->s_lock);
ext2_write_super(sb);
sbi->s_mount_opt = new_opts.s_mount_opt;
sbi->s_resuid = new_opts.s_resuid;
sbi->s_resgid = new_opts.s_resgid;
- sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
- ((sbi->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
+ sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
+ ((sbi->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ? SB_POSIXACL : 0);
spin_unlock(&sbi->s_lock);
return 0;
* If the filesystem has aborted, it is read-only, so return
* right away instead of dumping stack traces later on that
* will obscure the real source of the problem. We test
- * EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because
+ * EXT4_MF_FS_ABORTED instead of sb->s_flag's SB_RDONLY because
* the latter could be true if the filesystem is mounted
* read-only, and in that case, ext4_writepages should
* *never* be called, so if that ever happens, we would want
ext4_inode_csum_set(inode, raw_inode, ei);
spin_unlock(&ei->i_raw_lock);
- if (inode->i_sb->s_flags & MS_LAZYTIME)
+ if (inode->i_sb->s_flags & SB_LAZYTIME)
ext4_update_other_inodes_time(inode->i_sb, inode->i_ino,
bh->b_data);
* before ->s_flags update
*/
smp_wmb();
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
}
if (test_opt(sb, ERRORS_PANIC)) {
if (EXT4_SB(sb)->s_journal &&
* before ->s_flags update
*/
smp_wmb();
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
if (EXT4_SB(sb)->s_journal)
jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO);
save_error_info(sb, function, line);
sb->s_flags |= SB_I_VERSION;
return 1;
case Opt_lazytime:
- sb->s_flags |= MS_LAZYTIME;
+ sb->s_flags |= SB_LAZYTIME;
return 1;
case Opt_nolazytime:
- sb->s_flags &= ~MS_LAZYTIME;
+ sb->s_flags &= ~SB_LAZYTIME;
return 1;
}
if (le32_to_cpu(es->s_rev_level) > EXT4_MAX_SUPP_REV) {
ext4_msg(sb, KERN_ERR, "revision level too high, "
"forcing read-only mode");
- res = MS_RDONLY;
+ res = SB_RDONLY;
}
if (read_only)
goto done;
if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
/* don't clear list on RO mount w/ errors */
- if (es->s_last_orphan && !(s_flags & MS_RDONLY)) {
+ if (es->s_last_orphan && !(s_flags & SB_RDONLY)) {
ext4_msg(sb, KERN_INFO, "Errors on filesystem, "
"clearing orphan list.\n");
es->s_last_orphan = 0;
return;
}
- if (s_flags & MS_RDONLY) {
+ if (s_flags & SB_RDONLY) {
ext4_msg(sb, KERN_INFO, "orphan cleanup on readonly fs");
- sb->s_flags &= ~MS_RDONLY;
+ sb->s_flags &= ~SB_RDONLY;
}
#ifdef CONFIG_QUOTA
/* Needed for iput() to work correctly and not trash data */
- sb->s_flags |= MS_ACTIVE;
+ sb->s_flags |= SB_ACTIVE;
/*
* Turn on quotas which were not enabled for read-only mounts if
* filesystem has quota feature, so that they are updated correctly.
*/
- if (ext4_has_feature_quota(sb) && (s_flags & MS_RDONLY)) {
+ if (ext4_has_feature_quota(sb) && (s_flags & SB_RDONLY)) {
int ret = ext4_enable_quotas(sb);
if (!ret)
}
}
#endif
- sb->s_flags = s_flags; /* Restore MS_RDONLY status */
+ sb->s_flags = s_flags; /* Restore SB_RDONLY status */
}
/*
if (ext4_has_feature_readonly(sb)) {
ext4_msg(sb, KERN_INFO, "filesystem is read-only");
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
return 1;
}
sb->s_iflags |= SB_I_CGROUPWB;
}
- sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
- (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
+ sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
+ (test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);
if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV &&
(ext4_has_compat_features(sb) ||
}
if (ext4_setup_super(sb, es, sb_rdonly(sb)))
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
/* determine the minimum size of new large inodes, if present */
if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE &&
* the clock is set in the future, and this will cause e2fsck
* to complain and force a full file system check.
*/
- if (!(sb->s_flags & MS_RDONLY))
+ if (!(sb->s_flags & SB_RDONLY))
es->s_wtime = cpu_to_le32(get_seconds());
if (sb->s_bdev->bd_part)
es->s_kbytes_written =
if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED)
ext4_abort(sb, "Abort forced by user");
- sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
- (test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
+ sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
+ (test_opt(sb, POSIX_ACL) ? SB_POSIXACL : 0);
es = sbi->s_es;
set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
}
- if (*flags & MS_LAZYTIME)
- sb->s_flags |= MS_LAZYTIME;
+ if (*flags & SB_LAZYTIME)
+ sb->s_flags |= SB_LAZYTIME;
- if ((bool)(*flags & MS_RDONLY) != sb_rdonly(sb)) {
+ if ((bool)(*flags & SB_RDONLY) != sb_rdonly(sb)) {
if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) {
err = -EROFS;
goto restore_opts;
}
- if (*flags & MS_RDONLY) {
+ if (*flags & SB_RDONLY) {
err = sync_filesystem(sb);
if (err < 0)
goto restore_opts;
* First of all, the unconditional stuff we have to do
* to disable replay of the journal when we next remount
*/
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
/*
* OK, test if we are remounting a valid rw partition
ext4_clear_journal_err(sb, es);
sbi->s_mount_state = le16_to_cpu(es->s_state);
if (!ext4_setup_super(sb, es, 0))
- sb->s_flags &= ~MS_RDONLY;
+ sb->s_flags &= ~SB_RDONLY;
if (ext4_has_feature_mmp(sb))
if (ext4_multi_mount_protect(sb,
le64_to_cpu(es->s_mmp_block))) {
}
ext4_setup_system_zone(sb);
- if (sbi->s_journal == NULL && !(old_sb_flags & MS_RDONLY))
+ if (sbi->s_journal == NULL && !(old_sb_flags & SB_RDONLY))
ext4_commit_super(sb, 1);
#ifdef CONFIG_QUOTA
}
#endif
- *flags = (*flags & ~MS_LAZYTIME) | (sb->s_flags & MS_LAZYTIME);
+ *flags = (*flags & ~SB_LAZYTIME) | (sb->s_flags & SB_LAZYTIME);
ext4_msg(sb, KERN_INFO, "re-mounted. Opts: %s", orig_data);
kfree(orig_data);
return 0;
if (!is_set_ckpt_flags(sbi, CP_ORPHAN_PRESENT_FLAG))
return 0;
- if (s_flags & MS_RDONLY) {
+ if (s_flags & SB_RDONLY) {
f2fs_msg(sbi->sb, KERN_INFO, "orphan cleanup on readonly fs");
- sbi->sb->s_flags &= ~MS_RDONLY;
+ sbi->sb->s_flags &= ~SB_RDONLY;
}
#ifdef CONFIG_QUOTA
/* Needed for iput() to work correctly and not trash data */
- sbi->sb->s_flags |= MS_ACTIVE;
+ sbi->sb->s_flags |= SB_ACTIVE;
/* Turn on quotas so that they are updated correctly */
- quota_enabled = f2fs_enable_quota_files(sbi, s_flags & MS_RDONLY);
+ quota_enabled = f2fs_enable_quota_files(sbi, s_flags & SB_RDONLY);
#endif
start_blk = __start_cp_addr(sbi) + 1 + __cp_payload(sbi);
if (quota_enabled)
f2fs_quota_off_umount(sbi->sb);
#endif
- sbi->sb->s_flags = s_flags; /* Restore MS_RDONLY status */
+ sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */
return err;
}
static inline int f2fs_readonly(struct super_block *sb)
{
- return sb->s_flags & MS_RDONLY;
+ return sb->s_flags & SB_RDONLY;
}
static inline bool f2fs_cp_error(struct f2fs_sb_info *sbi)
cpc.reason = __get_cp_reason(sbi);
gc_more:
- if (unlikely(!(sbi->sb->s_flags & MS_ACTIVE))) {
+ if (unlikely(!(sbi->sb->s_flags & SB_ACTIVE))) {
ret = -EINVAL;
goto stop;
}
int quota_enabled;
#endif
- if (s_flags & MS_RDONLY) {
+ if (s_flags & SB_RDONLY) {
f2fs_msg(sbi->sb, KERN_INFO, "orphan cleanup on readonly fs");
- sbi->sb->s_flags &= ~MS_RDONLY;
+ sbi->sb->s_flags &= ~SB_RDONLY;
}
#ifdef CONFIG_QUOTA
/* Needed for iput() to work correctly and not trash data */
- sbi->sb->s_flags |= MS_ACTIVE;
+ sbi->sb->s_flags |= SB_ACTIVE;
/* Turn on quotas so that they are updated correctly */
- quota_enabled = f2fs_enable_quota_files(sbi, s_flags & MS_RDONLY);
+ quota_enabled = f2fs_enable_quota_files(sbi, s_flags & SB_RDONLY);
#endif
fsync_entry_slab = f2fs_kmem_cache_create("f2fs_fsync_inode_entry",
if (quota_enabled)
f2fs_quota_off_umount(sbi->sb);
#endif
- sbi->sb->s_flags = s_flags; /* Restore MS_RDONLY status */
+ sbi->sb->s_flags = s_flags; /* Restore SB_RDONLY status */
return ret ? ret: err;
}
#endif
break;
case Opt_lazytime:
- sb->s_flags |= MS_LAZYTIME;
+ sb->s_flags |= SB_LAZYTIME;
break;
case Opt_nolazytime:
- sb->s_flags &= ~MS_LAZYTIME;
+ sb->s_flags &= ~SB_LAZYTIME;
break;
#ifdef CONFIG_QUOTA
case Opt_quota:
set_opt(sbi, INLINE_DENTRY);
set_opt(sbi, EXTENT_CACHE);
set_opt(sbi, NOHEAP);
- sbi->sb->s_flags |= MS_LAZYTIME;
+ sbi->sb->s_flags |= SB_LAZYTIME;
set_opt(sbi, FLUSH_MERGE);
if (f2fs_sb_mounted_blkzoned(sbi->sb)) {
set_opt_mode(sbi, F2FS_MOUNT_LFS);
#endif
/* recover superblocks we couldn't write due to previous RO mount */
- if (!(*flags & MS_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) {
+ if (!(*flags & SB_RDONLY) && is_sbi_flag_set(sbi, SBI_NEED_SB_WRITE)) {
err = f2fs_commit_super(sbi, false);
f2fs_msg(sb, KERN_INFO,
"Try to recover all the superblocks, ret: %d", err);
* Previous and new state of filesystem is RO,
* so skip checking GC and FLUSH_MERGE conditions.
*/
- if (f2fs_readonly(sb) && (*flags & MS_RDONLY))
+ if (f2fs_readonly(sb) && (*flags & SB_RDONLY))
goto skip;
#ifdef CONFIG_QUOTA
- if (!f2fs_readonly(sb) && (*flags & MS_RDONLY)) {
+ if (!f2fs_readonly(sb) && (*flags & SB_RDONLY)) {
err = dquot_suspend(sb, -1);
if (err < 0)
goto restore_opts;
} else {
/* dquot_resume needs RW */
- sb->s_flags &= ~MS_RDONLY;
+ sb->s_flags &= ~SB_RDONLY;
if (sb_any_quota_suspended(sb)) {
dquot_resume(sb, -1);
} else if (f2fs_sb_has_quota_ino(sb)) {
* or if background_gc = off is passed in mount
* option. Also sync the filesystem.
*/
- if ((*flags & MS_RDONLY) || !test_opt(sbi, BG_GC)) {
+ if ((*flags & SB_RDONLY) || !test_opt(sbi, BG_GC)) {
if (sbi->gc_thread) {
stop_gc_thread(sbi);
need_restart_gc = true;
need_stop_gc = true;
}
- if (*flags & MS_RDONLY) {
+ if (*flags & SB_RDONLY) {
writeback_inodes_sb(sb, WB_REASON_SYNC);
sync_inodes_sb(sb);
* We stop issue flush thread if FS is mounted as RO
* or if flush_merge is not passed in mount option.
*/
- if ((*flags & MS_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) {
+ if ((*flags & SB_RDONLY) || !test_opt(sbi, FLUSH_MERGE)) {
clear_opt(sbi, FLUSH_MERGE);
destroy_flush_cmd_control(sbi, false);
} else {
kfree(s_qf_names[i]);
#endif
/* Update the POSIXACL Flag */
- sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
- (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
+ sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
+ (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
return 0;
restore_gc:
sb->s_export_op = &f2fs_export_ops;
sb->s_magic = F2FS_SUPER_MAGIC;
sb->s_time_gran = 1;
- sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
- (test_opt(sbi, POSIX_ACL) ? MS_POSIXACL : 0);
+ sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
+ (test_opt(sbi, POSIX_ACL) ? SB_POSIXACL : 0);
memcpy(&sb->s_uuid, raw_super->uuid, sizeof(raw_super->uuid));
/* init f2fs-specific super block info */
memcpy(c_bh->b_data, bhs[n]->b_data, sb->s_blocksize);
set_buffer_uptodate(c_bh);
mark_buffer_dirty_inode(c_bh, sbi->fat_inode);
- if (sb->s_flags & MS_SYNCHRONOUS)
+ if (sb->s_flags & SB_SYNCHRONOUS)
err = sync_dirty_buffer(c_bh);
brelse(c_bh);
if (err)
}
if (nr_bhs + fatent.nr_bhs > MAX_BUF_PER_PAGE) {
- if (sb->s_flags & MS_SYNCHRONOUS) {
+ if (sb->s_flags & SB_SYNCHRONOUS) {
err = fat_sync_bhs(bhs, nr_bhs);
if (err)
goto error;
fat_collect_bhs(bhs, &nr_bhs, &fatent);
} while (cluster != FAT_ENT_EOF);
- if (sb->s_flags & MS_SYNCHRONOUS) {
+ if (sb->s_flags & SB_SYNCHRONOUS) {
err = fat_sync_bhs(bhs, nr_bhs);
if (err)
goto error;
static int fat_remount(struct super_block *sb, int *flags, char *data)
{
- int new_rdonly;
+ bool new_rdonly;
struct msdos_sb_info *sbi = MSDOS_SB(sb);
- *flags |= MS_NODIRATIME | (sbi->options.isvfat ? 0 : MS_NOATIME);
+ *flags |= SB_NODIRATIME | (sbi->options.isvfat ? 0 : SB_NOATIME);
sync_filesystem(sb);
/* make sure we update state on remount. */
- new_rdonly = *flags & MS_RDONLY;
+ new_rdonly = *flags & SB_RDONLY;
if (new_rdonly != sb_rdonly(sb)) {
if (new_rdonly)
fat_set_state(sb, 0, 0);
if (opts->unicode_xlate)
opts->utf8 = 0;
if (opts->nfs == FAT_NFS_NOSTALE_RO) {
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
sb->s_export_op = &fat_export_ops_nostale;
}
return -ENOMEM;
sb->s_fs_info = sbi;
- sb->s_flags |= MS_NODIRATIME;
+ sb->s_flags |= SB_NODIRATIME;
sb->s_magic = MSDOS_SUPER_MAGIC;
sb->s_op = &fat_sops;
sb->s_export_op = &fat_export_ops;
if (opts->errors == FAT_ERRORS_PANIC)
panic("FAT-fs (%s): fs panic from previous error\n", sb->s_id);
else if (opts->errors == FAT_ERRORS_RO && !sb_rdonly(sb)) {
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
fat_msg(sb, KERN_ERR, "Filesystem has been set read-only");
}
}
{
MSDOS_SB(sb)->dir_ops = &msdos_dir_inode_operations;
sb->s_d_op = &msdos_dentry_operations;
- sb->s_flags |= MS_NOATIME;
+ sb->s_flags |= SB_NOATIME;
}
static int msdos_fill_super(struct super_block *sb, void *data, int silent)
static int vxfs_remount(struct super_block *sb, int *flags, char *data)
{
sync_filesystem(sb);
- *flags |= MS_RDONLY;
+ *flags |= SB_RDONLY;
return 0;
}
int ret = -EINVAL;
u32 j;
- sbp->s_flags |= MS_RDONLY;
+ sbp->s_flags |= SB_RDONLY;
infp = kzalloc(sizeof(*infp), GFP_KERNEL);
if (!infp) {
/* while holding I_WB_SWITCH, no one else can update the association */
spin_lock(&inode->i_lock);
- if (!(inode->i_sb->s_flags & MS_ACTIVE) ||
+ if (!(inode->i_sb->s_flags & SB_ACTIVE) ||
inode->i_state & (I_WB_SWITCH | I_FREEING) ||
inode_to_wb(inode) == isw->new_wb) {
spin_unlock(&inode->i_lock);
{
truncate_inode_pages_final(&inode->i_data);
clear_inode(inode);
- if (inode->i_sb->s_flags & MS_ACTIVE) {
+ if (inode->i_sb->s_flags & SB_ACTIVE) {
struct fuse_conn *fc = get_fuse_conn(inode);
struct fuse_inode *fi = get_fuse_inode(inode);
fuse_queue_forget(fc, fi->forget, fi->nodeid, fi->nlookup);
static int fuse_remount_fs(struct super_block *sb, int *flags, char *data)
{
sync_filesystem(sb);
- if (*flags & MS_MANDLOCK)
+ if (*flags & SB_MANDLOCK)
return -EINVAL;
return 0;
int is_bdev = sb->s_bdev != NULL;
err = -EINVAL;
- if (sb->s_flags & MS_MANDLOCK)
+ if (sb->s_flags & SB_MANDLOCK)
goto err;
- sb->s_flags &= ~(MS_NOSEC | SB_I_VERSION);
+ sb->s_flags &= ~(SB_NOSEC | SB_I_VERSION);
if (!parse_fuse_opt(data, &d, is_bdev))
goto err;
goto err_dev_free;
/* Handle umasking inside the fuse code */
- if (sb->s_flags & MS_POSIXACL)
+ if (sb->s_flags & SB_POSIXACL)
fc->dont_mask = 1;
- sb->s_flags |= MS_POSIXACL;
+ sb->s_flags |= SB_POSIXACL;
fc->default_permissions = d.default_permissions;
fc->allow_other = d.allow_other;
sdp->sd_args = *args;
if (sdp->sd_args.ar_spectator) {
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
set_bit(SDF_RORECOVERY, &sdp->sd_flags);
}
if (sdp->sd_args.ar_posix_acl)
- sb->s_flags |= MS_POSIXACL;
+ sb->s_flags |= SB_POSIXACL;
if (sdp->sd_args.ar_nobarrier)
set_bit(SDF_NOBARRIERS, &sdp->sd_flags);
- sb->s_flags |= MS_NOSEC;
+ sb->s_flags |= SB_NOSEC;
sb->s_magic = GFS2_MAGIC;
sb->s_op = &gfs2_super_ops;
sb->s_d_op = &gfs2_dops;
struct gfs2_args args;
struct gfs2_sbd *sdp;
- if (!(flags & MS_RDONLY))
+ if (!(flags & SB_RDONLY))
mode |= FMODE_WRITE;
bdev = blkdev_get_by_path(dev_name, mode, fs_type);
if (s->s_root) {
error = -EBUSY;
- if ((flags ^ s->s_flags) & MS_RDONLY)
+ if ((flags ^ s->s_flags) & SB_RDONLY)
goto error_super;
} else {
snprintf(s->s_id, sizeof(s->s_id), "%pg", bdev);
sb_set_blocksize(s, block_size(bdev));
- error = fill_super(s, &args, flags & MS_SILENT ? 1 : 0);
+ error = fill_super(s, &args, flags & SB_SILENT ? 1 : 0);
if (error)
goto error_super;
- s->s_flags |= MS_ACTIVE;
+ s->s_flags |= SB_ACTIVE;
bdev->bd_super = s;
}
pr_warn("gfs2 mount does not exist\n");
return ERR_CAST(s);
}
- if ((flags ^ s->s_flags) & MS_RDONLY) {
+ if ((flags ^ s->s_flags) & SB_RDONLY) {
deactivate_locked_super(s);
return ERR_PTR(-EBUSY);
}
return -EINVAL;
if (sdp->sd_args.ar_spectator)
- *flags |= MS_RDONLY;
+ *flags |= SB_RDONLY;
- if ((sb->s_flags ^ *flags) & MS_RDONLY) {
- if (*flags & MS_RDONLY)
+ if ((sb->s_flags ^ *flags) & SB_RDONLY) {
+ if (*flags & SB_RDONLY)
error = gfs2_make_fs_ro(sdp);
else
error = gfs2_make_fs_rw(sdp);
sdp->sd_args = args;
if (sdp->sd_args.ar_posix_acl)
- sb->s_flags |= MS_POSIXACL;
+ sb->s_flags |= SB_POSIXACL;
else
- sb->s_flags &= ~MS_POSIXACL;
+ sb->s_flags &= ~SB_POSIXACL;
if (sdp->sd_args.ar_nobarrier)
set_bit(SDF_NOBARRIERS, &sdp->sd_flags);
else
kfree(tr);
up_read(&sdp->sd_log_flush_lock);
- if (sdp->sd_vfs->s_flags & MS_SYNCHRONOUS)
+ if (sdp->sd_vfs->s_flags & SB_SYNCHRONOUS)
gfs2_log_flush(sdp, NULL, NORMAL_FLUSH);
if (alloced)
sb_end_intwrite(sdp->sd_vfs);
attrib = mdb->drAtrb;
if (!(attrib & cpu_to_be16(HFS_SB_ATTRIB_UNMNT))) {
pr_warn("filesystem was not cleanly unmounted, running fsck.hfs is recommended. mounting read-only.\n");
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
}
if ((attrib & cpu_to_be16(HFS_SB_ATTRIB_SLOCK))) {
pr_warn("filesystem is marked locked, mounting read-only.\n");
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
}
if (!sb_rdonly(sb)) {
/* Mark the volume uncleanly unmounted in case we crash */
static int hfs_remount(struct super_block *sb, int *flags, char *data)
{
sync_filesystem(sb);
- *flags |= MS_NODIRATIME;
- if ((bool)(*flags & MS_RDONLY) == sb_rdonly(sb))
+ *flags |= SB_NODIRATIME;
+ if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
return 0;
- if (!(*flags & MS_RDONLY)) {
+ if (!(*flags & SB_RDONLY)) {
if (!(HFS_SB(sb)->mdb->drAtrb & cpu_to_be16(HFS_SB_ATTRIB_UNMNT))) {
pr_warn("filesystem was not cleanly unmounted, running fsck.hfs is recommended. leaving read-only.\n");
- sb->s_flags |= MS_RDONLY;
- *flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
+ *flags |= SB_RDONLY;
} else if (HFS_SB(sb)->mdb->drAtrb & cpu_to_be16(HFS_SB_ATTRIB_SLOCK)) {
pr_warn("filesystem is marked locked, leaving read-only.\n");
- sb->s_flags |= MS_RDONLY;
- *flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
+ *flags |= SB_RDONLY;
}
}
return 0;
sb->s_op = &hfs_super_operations;
sb->s_xattr = hfs_xattr_handlers;
- sb->s_flags |= MS_NODIRATIME;
+ sb->s_flags |= SB_NODIRATIME;
mutex_init(&sbi->bitmap_lock);
res = hfs_mdb_get(sb);
static int hfsplus_remount(struct super_block *sb, int *flags, char *data)
{
sync_filesystem(sb);
- if ((bool)(*flags & MS_RDONLY) == sb_rdonly(sb))
+ if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
return 0;
- if (!(*flags & MS_RDONLY)) {
+ if (!(*flags & SB_RDONLY)) {
struct hfsplus_vh *vhdr = HFSPLUS_SB(sb)->s_vhdr;
int force = 0;
if (!(vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_UNMNT))) {
pr_warn("filesystem was not cleanly unmounted, running fsck.hfsplus is recommended. leaving read-only.\n");
- sb->s_flags |= MS_RDONLY;
- *flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
+ *flags |= SB_RDONLY;
} else if (force) {
/* nothing */
} else if (vhdr->attributes &
cpu_to_be32(HFSPLUS_VOL_SOFTLOCK)) {
pr_warn("filesystem is marked locked, leaving read-only.\n");
- sb->s_flags |= MS_RDONLY;
- *flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
+ *flags |= SB_RDONLY;
} else if (vhdr->attributes &
cpu_to_be32(HFSPLUS_VOL_JOURNALED)) {
pr_warn("filesystem is marked journaled, leaving read-only.\n");
- sb->s_flags |= MS_RDONLY;
- *flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
+ *flags |= SB_RDONLY;
}
}
return 0;
if (!(vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_UNMNT))) {
pr_warn("Filesystem was not cleanly unmounted, running fsck.hfsplus is recommended. mounting read-only.\n");
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
} else if (test_and_clear_bit(HFSPLUS_SB_FORCE, &sbi->flags)) {
/* nothing */
} else if (vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_SOFTLOCK)) {
pr_warn("Filesystem is marked locked, mounting read-only.\n");
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
} else if ((vhdr->attributes & cpu_to_be32(HFSPLUS_VOL_JOURNALED)) &&
!sb_rdonly(sb)) {
pr_warn("write access to a journaled filesystem is not supported, use the force option at your own risk, mounting read-only.\n");
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
}
err = -EINVAL;
goto bail;
}
if (((31 + de->namelen + de->down*4 + 3) & ~3) != le16_to_cpu(de->length)) {
- if (((31 + de->namelen + de->down*4 + 3) & ~3) < le16_to_cpu(de->length) && s->s_flags & MS_RDONLY) goto ok;
+ if (((31 + de->namelen + de->down*4 + 3) & ~3) < le16_to_cpu(de->length) && s->s_flags & SB_RDONLY) goto ok;
hpfs_error(s, "namelen does not match dirent size in dnode %08x, dirent %03x, last %03x", secno, p, pp);
goto bail;
}
else {
pr_cont("; remounting read-only\n");
mark_dirty(s, 0);
- s->s_flags |= MS_RDONLY;
+ s->s_flags |= SB_RDONLY;
}
} else if (sb_rdonly(s))
pr_cont("; going on - but anything won't be destroyed because it's read-only\n");
sync_filesystem(s);
- *flags |= MS_NOATIME;
+ *flags |= SB_NOATIME;
hpfs_lock(s);
uid = sbi->sb_uid; gid = sbi->sb_gid;
sbi->sb_eas = eas; sbi->sb_chk = chk; sbi->sb_chkdsk = chkdsk;
sbi->sb_err = errs; sbi->sb_timeshift = timeshift;
- if (!(*flags & MS_RDONLY)) mark_dirty(s, 1);
+ if (!(*flags & SB_RDONLY)) mark_dirty(s, 1);
hpfs_unlock(s);
return 0;
goto bail4;
}
- s->s_flags |= MS_NOATIME;
+ s->s_flags |= SB_NOATIME;
/* Fill superblock stuff */
s->s_magic = HPFS_SUPER_MAGIC;
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
/*
- * page_put due to reference from alloc_huge_page()
* unlock_page because locked by add_to_page_cache()
+ * page_put due to reference from alloc_huge_page()
*/
- put_page(page);
unlock_page(page);
+ put_page(page);
}
if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
{
if (!(inode->i_state & (I_DIRTY_ALL | I_SYNC |
I_FREEING | I_WILL_FREE)) &&
- !atomic_read(&inode->i_count) && inode->i_sb->s_flags & MS_ACTIVE)
+ !atomic_read(&inode->i_count) && inode->i_sb->s_flags & SB_ACTIVE)
inode_lru_list_add(inode);
}
* @sb: superblock to operate on
*
* Make sure that no inodes with zero refcount are retained. This is
- * called by superblock shutdown after having MS_ACTIVE flag removed,
+ * called by superblock shutdown after having SB_ACTIVE flag removed,
* so any inode reaching zero refcount during or after that call will
* be immediately evicted.
*/
else
drop = generic_drop_inode(inode);
- if (!drop && (sb->s_flags & MS_ACTIVE)) {
+ if (!drop && (sb->s_flags & SB_ACTIVE)) {
inode_add_lru(inode);
spin_unlock(&inode->i_lock);
return;
if (flags & S_MTIME)
inode->i_mtime = *time;
- if (!(inode->i_sb->s_flags & MS_LAZYTIME) || (flags & S_VERSION))
+ if (!(inode->i_sb->s_flags & SB_LAZYTIME) || (flags & S_VERSION))
iflags |= I_DIRTY_SYNC;
__mark_inode_dirty(inode, iflags);
return 0;
if (IS_NOATIME(inode))
return false;
- if ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode))
+ if ((inode->i_sb->s_flags & SB_NODIRATIME) && S_ISDIR(inode->i_mode))
return false;
if (mnt->mnt_flags & MNT_NOATIME)
static int isofs_remount(struct super_block *sb, int *flags, char *data)
{
sync_filesystem(sb);
- if (!(*flags & MS_RDONLY))
+ if (!(*flags & SB_RDONLY))
return -EROFS;
return 0;
}
mutex_unlock(&c->alloc_sem);
}
- if (!(*flags & MS_RDONLY))
+ if (!(*flags & SB_RDONLY))
jffs2_start_garbage_collect_thread(c);
- *flags |= MS_NOATIME;
+ *flags |= SB_NOATIME;
return 0;
}
}
-#define jffs2_is_readonly(c) (OFNI_BS_2SFFJ(c)->s_flags & MS_RDONLY)
+#define jffs2_is_readonly(c) (OFNI_BS_2SFFJ(c)->s_flags & SB_RDONLY)
#define SECTOR_ADDR(x) ( (((unsigned long)(x) / c->sector_size) * c->sector_size) )
#ifndef CONFIG_JFFS2_FS_WRITEBUFFER
sb->s_op = &jffs2_super_operations;
sb->s_export_op = &jffs2_export_ops;
- sb->s_flags = sb->s_flags | MS_NOATIME;
+ sb->s_flags = sb->s_flags | SB_NOATIME;
sb->s_xattr = jffs2_xattr_handlers;
#ifdef CONFIG_JFFS2_FS_POSIX_ACL
- sb->s_flags |= MS_POSIXACL;
+ sb->s_flags |= SB_POSIXACL;
#endif
ret = jffs2_do_fill_super(sb, data, silent);
return ret;
else if (sbi->flag & JFS_ERR_REMOUNT_RO) {
jfs_err("ERROR: (device %s): remounting filesystem as read-only",
sb->s_id);
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
}
/* nothing is done for continue beyond marking the superblock dirty */
return rc;
}
- if (sb_rdonly(sb) && !(*flags & MS_RDONLY)) {
+ if (sb_rdonly(sb) && !(*flags & SB_RDONLY)) {
/*
* Invalidate any previously read metadata. fsck may have
* changed the on-disk data since we mounted r/o
ret = jfs_mount_rw(sb, 1);
/* mark the fs r/w for quota activity */
- sb->s_flags &= ~MS_RDONLY;
+ sb->s_flags &= ~SB_RDONLY;
dquot_resume(sb, -1);
return ret;
}
- if (!sb_rdonly(sb) && (*flags & MS_RDONLY)) {
+ if (!sb_rdonly(sb) && (*flags & SB_RDONLY)) {
rc = dquot_suspend(sb, -1);
if (rc < 0)
return rc;
sbi->flag = flag;
#ifdef CONFIG_JFS_POSIX_ACL
- sb->s_flags |= MS_POSIXACL;
+ sb->s_flags |= SB_POSIXACL;
#endif
if (newLVSize) {
deactivate_locked_super(sb);
return ERR_PTR(error);
}
- sb->s_flags |= MS_ACTIVE;
+ sb->s_flags |= SB_ACTIVE;
mutex_lock(&kernfs_mutex);
list_add(&info->node, &root->supers);
struct inode *root;
struct qstr d_name = QSTR_INIT(name, strlen(name));
- s = sget_userns(fs_type, NULL, set_anon_super, MS_KERNMOUNT|MS_NOUSER,
+ s = sget_userns(fs_type, NULL, set_anon_super, SB_KERNMOUNT|SB_NOUSER,
&init_user_ns, NULL);
if (IS_ERR(s))
return ERR_CAST(s);
d_instantiate(dentry, root);
s->s_root = dentry;
s->s_d_op = dops;
- s->s_flags |= MS_ACTIVE;
+ s->s_flags |= SB_ACTIVE;
return dget(s->s_root);
Enomem:
spin_lock(&pin_fs_lock);
if (unlikely(!*mount)) {
spin_unlock(&pin_fs_lock);
- mnt = vfs_kern_mount(type, MS_KERNMOUNT, type->name, NULL);
+ mnt = vfs_kern_mount(type, SB_KERNMOUNT, type->name, NULL);
if (IS_ERR(mnt))
return PTR_ERR(mnt);
spin_lock(&pin_fs_lock);
if (ln->nrhosts == 0)
return;
- printk(KERN_WARNING "lockd: couldn't shutdown host module for net %p!\n", net);
- dprintk("lockd: %lu hosts left in net %p:\n", ln->nrhosts, net);
+ pr_warn("lockd: couldn't shutdown host module for net %x!\n",
+ net->ns.inum);
+ dprintk("lockd: %lu hosts left in net %x:\n", ln->nrhosts,
+ net->ns.inum);
} else {
if (nrhosts == 0)
return;
for_each_host(host, chain, nlm_server_hosts) {
if (net && host->net != net)
continue;
- dprintk(" %s (cnt %d use %d exp %ld net %p)\n",
+ dprintk(" %s (cnt %d use %d exp %ld net %x)\n",
host->h_name, atomic_read(&host->h_count),
- host->h_inuse, host->h_expires, host->net);
+ host->h_inuse, host->h_expires, host->net->ns.inum);
}
}
mutex_lock(&nlm_host_mutex);
/* First, make all hosts eligible for gc */
- dprintk("lockd: nuking all hosts in net %p...\n", net);
+ dprintk("lockd: nuking all hosts in net %x...\n",
+ net ? net->ns.inum : 0);
for_each_host(host, chain, nlm_server_hosts) {
if (net && host->net != net)
continue;
/* Then, perform a garbage collection pass */
nlm_gc_hosts(net);
- mutex_unlock(&nlm_host_mutex);
-
nlm_complain_hosts(net);
+ mutex_unlock(&nlm_host_mutex);
}
/*
struct hlist_node *next;
struct nlm_host *host;
- dprintk("lockd: host garbage collection for net %p\n", net);
+ dprintk("lockd: host garbage collection for net %x\n",
+ net ? net->ns.inum : 0);
for_each_host(host, chain, nlm_server_hosts) {
if (net && host->net != net)
continue;
if (atomic_read(&host->h_count) || host->h_inuse
|| time_before(jiffies, host->h_expires)) {
dprintk("nlm_gc_hosts skipping %s "
- "(cnt %d use %d exp %ld net %p)\n",
+ "(cnt %d use %d exp %ld net %x)\n",
host->h_name, atomic_read(&host->h_count),
- host->h_inuse, host->h_expires, host->net);
+ host->h_inuse, host->h_expires,
+ host->net->ns.inum);
continue;
}
nlm_destroy_host_locked(host);
clnt = nsm_create(host->net, host->nodename);
if (IS_ERR(clnt)) {
dprintk("lockd: failed to create NSM upcall transport, "
- "status=%ld, net=%p\n", PTR_ERR(clnt), host->net);
+ "status=%ld, net=%x\n", PTR_ERR(clnt),
+ host->net->ns.inum);
return PTR_ERR(clnt);
}
static struct svc_rqst *nlmsvc_rqst;
unsigned long nlmsvc_timeout;
+atomic_t nlm_ntf_refcnt = ATOMIC_INIT(0);
+DECLARE_WAIT_QUEUE_HEAD(nlm_ntf_wq);
+
unsigned int lockd_net_id;
/*
if (error < 0)
goto err_bind;
set_grace_period(net);
- dprintk("lockd_up_net: per-net data created; net=%p\n", net);
+ dprintk("%s: per-net data created; net=%x\n", __func__, net->ns.inum);
return 0;
err_bind:
if (ln->nlmsvc_users) {
if (--ln->nlmsvc_users == 0) {
nlm_shutdown_hosts_net(net);
+ cancel_delayed_work_sync(&ln->grace_period_end);
+ locks_end_grace(&ln->lockd_manager);
svc_shutdown_net(serv, net);
- dprintk("lockd_down_net: per-net data destroyed; net=%p\n", net);
+ dprintk("%s: per-net data destroyed; net=%x\n",
+ __func__, net->ns.inum);
}
} else {
- printk(KERN_ERR "lockd_down_net: no users! task=%p, net=%p\n",
- nlmsvc_task, net);
+ pr_err("%s: no users! task=%p, net=%x\n",
+ __func__, nlmsvc_task, net->ns.inum);
BUG();
}
}
struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
struct sockaddr_in sin;
- if (event != NETDEV_DOWN)
+ if ((event != NETDEV_DOWN) ||
+ !atomic_inc_not_zero(&nlm_ntf_refcnt))
goto out;
if (nlmsvc_rqst) {
svc_age_temp_xprts_now(nlmsvc_rqst->rq_server,
(struct sockaddr *)&sin);
}
+ atomic_dec(&nlm_ntf_refcnt);
+ wake_up(&nlm_ntf_wq);
out:
return NOTIFY_DONE;
struct inet6_ifaddr *ifa = (struct inet6_ifaddr *)ptr;
struct sockaddr_in6 sin6;
- if (event != NETDEV_DOWN)
+ if ((event != NETDEV_DOWN) ||
+ !atomic_inc_not_zero(&nlm_ntf_refcnt))
goto out;
if (nlmsvc_rqst) {
svc_age_temp_xprts_now(nlmsvc_rqst->rq_server,
(struct sockaddr *)&sin6);
}
+ atomic_dec(&nlm_ntf_refcnt);
+ wake_up(&nlm_ntf_wq);
out:
return NOTIFY_DONE;
#if IS_ENABLED(CONFIG_IPV6)
unregister_inet6addr_notifier(&lockd_inet6addr_notifier);
#endif
+ wait_event(nlm_ntf_wq, atomic_read(&nlm_ntf_refcnt) == 0);
}
static void lockd_svc_exit_thread(void)
{
+ atomic_dec(&nlm_ntf_refcnt);
lockd_unregister_notifiers();
svc_exit_thread(nlmsvc_rqst);
}
goto out_rqst;
}
+ atomic_inc(&nlm_ntf_refcnt);
svc_sock_update_bufs(serv);
serv->sv_maxconn = nlm_max_connections;
static void lockd_exit_net(struct net *net)
{
+ struct lockd_net *ln = net_generic(net, lockd_net_id);
+
+ WARN_ONCE(!list_empty(&ln->lockd_manager.list),
+ "net %x %s: lockd_manager.list is not empty\n",
+ net->ns.inum, __func__);
+ WARN_ONCE(!list_empty(&ln->nsm_handles),
+ "net %x %s: nsm_handles list is not empty\n",
+ net->ns.inum, __func__);
+ WARN_ONCE(delayed_work_pending(&ln->grace_period_end),
+ "net %x %s: grace_period_end was not cancelled\n",
+ net->ns.inum, __func__);
}
static struct pernet_operations lockd_net_ops = {
{
struct nlm_host hint;
- dprintk("lockd: nlmsvc_mark_resources for net %p\n", net);
+ dprintk("lockd: %s for net %x\n", __func__, net ? net->ns.inum : 0);
hint.net = net;
nlm_traverse_files(&hint, nlmsvc_mark_host, NULL);
}
static inline bool is_remote_lock(struct file *filp)
{
- return likely(!(filp->f_path.dentry->d_sb->s_flags & MS_NOREMOTELOCK));
+ return likely(!(filp->f_path.dentry->d_sb->s_flags & SB_NOREMOTELOCK));
}
static bool lease_breaking(struct file_lock *fl)
struct mb_cache *cache = container_of(shrink, struct mb_cache,
c_shrink);
+ /* Unlikely, but not impossible */
+ if (unlikely(cache->c_entry_count < 0))
+ return 0;
return cache->c_entry_count;
}
sync_filesystem(sb);
ms = sbi->s_ms;
- if ((bool)(*flags & MS_RDONLY) == sb_rdonly(sb))
+ if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
return 0;
- if (*flags & MS_RDONLY) {
+ if (*flags & SB_RDONLY) {
if (ms->s_state & MINIX_VALID_FS ||
!(sbi->s_mount_state & MINIX_VALID_FS))
return 0;
* of the daemon to instantiate them before they can be used.
*/
if (!(nd->flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY |
- LOOKUP_OPEN | LOOKUP_CREATE |
- LOOKUP_AUTOMOUNT))) {
- /* Positive dentry that isn't meant to trigger an
- * automount, EISDIR will allow it to be used,
- * otherwise there's no mount here "now" so return
- * ENOENT.
- */
- if (path->dentry->d_inode)
- return -EISDIR;
- else
- return -ENOENT;
- }
+ LOOKUP_OPEN | LOOKUP_CREATE | LOOKUP_AUTOMOUNT)) &&
+ path->dentry->d_inode)
+ return -EISDIR;
if (path->dentry->d_sb->s_user_ns != &init_user_ns)
return -EACCES;
static int ncp_remount(struct super_block *sb, int *flags, char* data)
{
sync_filesystem(sb);
- *flags |= MS_NODIRATIME;
+ *flags |= SB_NODIRATIME;
return 0;
}
else
default_bufsize = 1024;
- sb->s_flags |= MS_NODIRATIME; /* probably even noatime */
+ sb->s_flags |= SB_NODIRATIME; /* probably even noatime */
sb->s_maxbytes = 0xFFFFFFFFU;
sb->s_blocksize = 1024; /* Eh... Is this correct? */
sb->s_blocksize_bits = 10;
/* Unhash it, so that ->d_iput() would be called */
return 1;
}
- if (!(dentry->d_sb->s_flags & MS_ACTIVE)) {
+ if (!(dentry->d_sb->s_flags & SB_ACTIVE)) {
/* Unhash it, so that ancestors of killed async unlink
* files will be cleaned up during umount */
return 1;
* Note that we only have to check the vfsmount flags here:
* - NFS always sets S_NOATIME by so checking it would give a
* bogus result
- * - NFS never sets MS_NOATIME or MS_NODIRATIME so there is
+ * - NFS never sets SB_NOATIME or SB_NODIRATIME so there is
* no point in checking those.
*/
if ((path->mnt->mnt_flags & MNT_NOATIME) ||
#include <linux/nfs_page.h>
#include <linux/wait_bit.h>
-#define NFS_MS_MASK (MS_RDONLY|MS_NOSUID|MS_NODEV|MS_NOEXEC|MS_SYNCHRONOUS)
+#define NFS_MS_MASK (SB_RDONLY|SB_NOSUID|SB_NODEV|SB_NOEXEC|SB_SYNCHRONOUS)
extern const struct export_operations nfs_export_ops;
*/
seq_printf(m, "\n\topts:\t");
seq_puts(m, sb_rdonly(root->d_sb) ? "ro" : "rw");
- seq_puts(m, root->d_sb->s_flags & MS_SYNCHRONOUS ? ",sync" : "");
- seq_puts(m, root->d_sb->s_flags & MS_NOATIME ? ",noatime" : "");
- seq_puts(m, root->d_sb->s_flags & MS_NODIRATIME ? ",nodiratime" : "");
+ seq_puts(m, root->d_sb->s_flags & SB_SYNCHRONOUS ? ",sync" : "");
+ seq_puts(m, root->d_sb->s_flags & SB_NOATIME ? ",noatime" : "");
+ seq_puts(m, root->d_sb->s_flags & SB_NODIRATIME ? ",nodiratime" : "");
nfs_show_mount_options(m, nfss, 1);
seq_printf(m, "\n\tage:\t%lu", (jiffies - nfss->mount_time) / HZ);
/*
* noac is a special case. It implies -o sync, but that's not
* necessarily reflected in the mtab options. do_remount_sb
- * will clear MS_SYNCHRONOUS if -o sync wasn't specified in the
+ * will clear SB_SYNCHRONOUS if -o sync wasn't specified in the
* remount options, so we have to explicitly reset it.
*/
if (data->flags & NFS_MOUNT_NOAC)
- *flags |= MS_SYNCHRONOUS;
+ *flags |= SB_SYNCHRONOUS;
/* compare new mount options with old ones */
error = nfs_compare_remount_data(nfss, data);
/* The VFS shouldn't apply the umask to mode bits. We will do
* so ourselves when necessary.
*/
- sb->s_flags |= MS_POSIXACL;
+ sb->s_flags |= SB_POSIXACL;
sb->s_time_gran = 1;
sb->s_export_op = &nfs_export_ops;
}
/* The VFS shouldn't apply the umask to mode bits. We will do
* so ourselves when necessary.
*/
- sb->s_flags |= MS_POSIXACL;
+ sb->s_flags |= SB_POSIXACL;
}
nfs_initialise_sb(sb);
/* -o noac implies -o sync */
if (server->flags & NFS_MOUNT_NOAC)
- sb_mntdata.mntflags |= MS_SYNCHRONOUS;
+ sb_mntdata.mntflags |= SB_SYNCHRONOUS;
if (mount_info->cloned != NULL && mount_info->cloned->sb != NULL)
- if (mount_info->cloned->sb->s_flags & MS_SYNCHRONOUS)
- sb_mntdata.mntflags |= MS_SYNCHRONOUS;
+ if (mount_info->cloned->sb->s_flags & SB_SYNCHRONOUS)
+ sb_mntdata.mntflags |= SB_SYNCHRONOUS;
/* Get a superblock - note that we may end up sharing one that already exists */
s = sget(nfs_mod->nfs_fs, compare_super, nfs_set_super, flags, &sb_mntdata);
if (error)
goto error_splat_root;
- s->s_flags |= MS_ACTIVE;
+ s->s_flags |= SB_ACTIVE;
out:
return mntroot;
struct list_head *grace_list = net_generic(net, grace_net_id);
spin_lock(&grace_lock);
- list_add(&lm->list, grace_list);
+ if (list_empty(&lm->list))
+ list_add(&lm->list, grace_list);
+ else
+ WARN(1, "double list_add attempt detected in net %x %s\n",
+ net->ns.inum, (net == &init_net) ? "(init_net)" : "");
spin_unlock(&grace_lock);
}
EXPORT_SYMBOL_GPL(locks_start_grace);
{
struct list_head *grace_list = net_generic(net, grace_net_id);
- BUG_ON(!list_empty(grace_list));
+ WARN_ONCE(!list_empty(grace_list),
+ "net %x %s: grace_list is not empty\n",
+ net->ns.inum, __func__);
}
static struct pernet_operations grace_net_ops = {
return NULL;
}
-static struct cache_detail svc_expkey_cache_template = {
+static const struct cache_detail svc_expkey_cache_template = {
.owner = THIS_MODULE,
.hash_size = EXPKEY_HASHMAX,
.name = "nfsd.fh",
return NULL;
}
-static struct cache_detail svc_export_cache_template = {
+static const struct cache_detail svc_export_cache_template = {
.owner = THIS_MODULE,
.hash_size = EXPORT_HASHMAX,
.name = "nfsd.export",
int rv;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
- dprintk("nfsd: initializing export module (net: %p).\n", net);
+ dprintk("nfsd: initializing export module (net: %x).\n", net->ns.inum);
nn->svc_export_cache = cache_create_net(&svc_export_cache_template, net);
if (IS_ERR(nn->svc_export_cache))
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
- dprintk("nfsd: shutting down export module (net: %p).\n", net);
+ dprintk("nfsd: shutting down export module (net: %x).\n", net->ns.inum);
cache_unregister_net(nn->svc_expkey_cache, net);
cache_unregister_net(nn->svc_export_cache, net);
cache_destroy_net(nn->svc_export_cache, net);
svcauth_unix_purge(net);
- dprintk("nfsd: export shutdown complete (net: %p).\n", net);
+ dprintk("nfsd: export shutdown complete (net: %x).\n", net->ns.inum);
}
u32 clverifier_counter;
struct svc_serv *nfsd_serv;
+
+ wait_queue_head_t ntf_wq;
+ atomic_t ntf_refcnt;
};
/* Simple check to find out if a given net was properly initialized */
static struct ent *idtoname_update(struct cache_detail *, struct ent *,
struct ent *);
-static struct cache_detail idtoname_cache_template = {
+static const struct cache_detail idtoname_cache_template = {
.owner = THIS_MODULE,
.hash_size = ENT_HASHMAX,
.name = "nfs4.idtoname",
struct ent *);
static int nametoid_parse(struct cache_detail *, char *, int);
-static struct cache_detail nametoid_cache_template = {
+static const struct cache_detail nametoid_cache_template = {
.owner = THIS_MODULE,
.hash_size = ENT_HASHMAX,
.name = "nfs4.nametoid",
static const stateid_t currentstateid = {
.si_generation = 1,
};
+static const stateid_t close_stateid = {
+ .si_generation = 0xffffffffU,
+};
static u64 current_sessionid = 1;
#define ZERO_STATEID(stateid) (!memcmp((stateid), &zero_stateid, sizeof(stateid_t)))
#define ONE_STATEID(stateid) (!memcmp((stateid), &one_stateid, sizeof(stateid_t)))
#define CURRENT_STATEID(stateid) (!memcmp((stateid), ¤tstateid, sizeof(stateid_t)))
+#define CLOSE_STATEID(stateid) (!memcmp((stateid), &close_stateid, sizeof(stateid_t)))
/* forward declarations */
static bool check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner);
*/
static DEFINE_SPINLOCK(state_lock);
+enum nfsd4_st_mutex_lock_subclass {
+ OPEN_STATEID_MUTEX = 0,
+ LOCK_STATEID_MUTEX = 1,
+};
+
/*
* A waitqueue for all in-progress 4.0 CLOSE operations that are waiting for
* the refcount on the open stateid to drop.
/* ignore lock owners */
if (local->st_stateowner->so_is_open_owner == 0)
continue;
- if (local->st_stateowner == &oo->oo_owner) {
+ if (local->st_stateowner != &oo->oo_owner)
+ continue;
+ if (local->st_stid.sc_type == NFS4_OPEN_STID) {
ret = local;
refcount_inc(&ret->st_stid.sc_count);
break;
return ret;
}
+static __be32
+nfsd4_verify_open_stid(struct nfs4_stid *s)
+{
+ __be32 ret = nfs_ok;
+
+ switch (s->sc_type) {
+ default:
+ break;
+ case NFS4_CLOSED_STID:
+ case NFS4_CLOSED_DELEG_STID:
+ ret = nfserr_bad_stateid;
+ break;
+ case NFS4_REVOKED_DELEG_STID:
+ ret = nfserr_deleg_revoked;
+ }
+ return ret;
+}
+
+/* Lock the stateid st_mutex, and deal with races with CLOSE */
+static __be32
+nfsd4_lock_ol_stateid(struct nfs4_ol_stateid *stp)
+{
+ __be32 ret;
+
+ mutex_lock_nested(&stp->st_mutex, LOCK_STATEID_MUTEX);
+ ret = nfsd4_verify_open_stid(&stp->st_stid);
+ if (ret != nfs_ok)
+ mutex_unlock(&stp->st_mutex);
+ return ret;
+}
+
+static struct nfs4_ol_stateid *
+nfsd4_find_and_lock_existing_open(struct nfs4_file *fp, struct nfsd4_open *open)
+{
+ struct nfs4_ol_stateid *stp;
+ for (;;) {
+ spin_lock(&fp->fi_lock);
+ stp = nfsd4_find_existing_open(fp, open);
+ spin_unlock(&fp->fi_lock);
+ if (!stp || nfsd4_lock_ol_stateid(stp) == nfs_ok)
+ break;
+ nfs4_put_stid(&stp->st_stid);
+ }
+ return stp;
+}
+
static struct nfs4_openowner *
alloc_init_open_stateowner(unsigned int strhashval, struct nfsd4_open *open,
struct nfsd4_compound_state *cstate)
stp = open->op_stp;
/* We are moving these outside of the spinlocks to avoid the warnings */
mutex_init(&stp->st_mutex);
- mutex_lock(&stp->st_mutex);
+ mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
+retry:
spin_lock(&oo->oo_owner.so_client->cl_lock);
spin_lock(&fp->fi_lock);
spin_unlock(&fp->fi_lock);
spin_unlock(&oo->oo_owner.so_client->cl_lock);
if (retstp) {
- mutex_lock(&retstp->st_mutex);
+ /* Handle races with CLOSE */
+ if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
+ nfs4_put_stid(&retstp->st_stid);
+ goto retry;
+ }
/* To keep mutex tracking happy */
mutex_unlock(&stp->st_mutex);
stp = retstp;
struct nfs4_ol_stateid *stp = NULL;
struct nfs4_delegation *dp = NULL;
__be32 status;
+ bool new_stp = false;
/*
* Lookup file; if found, lookup stateid and check open request,
status = nfs4_check_deleg(cl, open, &dp);
if (status)
goto out;
- spin_lock(&fp->fi_lock);
- stp = nfsd4_find_existing_open(fp, open);
- spin_unlock(&fp->fi_lock);
+ stp = nfsd4_find_and_lock_existing_open(fp, open);
} else {
open->op_file = NULL;
status = nfserr_bad_stateid;
goto out;
}
+ if (!stp) {
+ stp = init_open_stateid(fp, open);
+ if (!open->op_stp)
+ new_stp = true;
+ }
+
/*
* OPEN the file, or upgrade an existing OPEN.
* If truncate fails, the OPEN fails.
+ *
+ * stp is already locked.
*/
- if (stp) {
+ if (!new_stp) {
/* Stateid was found, this is an OPEN upgrade */
- mutex_lock(&stp->st_mutex);
status = nfs4_upgrade_open(rqstp, fp, current_fh, stp, open);
if (status) {
mutex_unlock(&stp->st_mutex);
goto out;
}
} else {
- /* stp is returned locked. */
- stp = init_open_stateid(fp, open);
- /* See if we lost the race to some other thread */
- if (stp->st_access_bmap != 0) {
- status = nfs4_upgrade_open(rqstp, fp, current_fh,
- stp, open);
- if (status) {
- mutex_unlock(&stp->st_mutex);
- goto out;
- }
- goto upgrade_out;
- }
status = nfs4_get_vfs_file(rqstp, fp, current_fh, stp, open);
if (status) {
- mutex_unlock(&stp->st_mutex);
+ stp->st_stid.sc_type = NFS4_CLOSED_STID;
release_open_stateid(stp);
+ mutex_unlock(&stp->st_mutex);
goto out;
}
if (stp->st_clnt_odstate == open->op_odstate)
open->op_odstate = NULL;
}
-upgrade_out:
+
nfs4_inc_and_copy_stateid(&open->op_stateid, &stp->st_stid);
mutex_unlock(&stp->st_mutex);
spin_unlock(&nn->blocked_locks_lock);
while (!list_empty(&reaplist)) {
- nbl = list_first_entry(&nn->blocked_locks_lru,
+ nbl = list_first_entry(&reaplist,
struct nfsd4_blocked_lock, nbl_lru);
list_del_init(&nbl->nbl_lru);
posix_unblock_lock(&nbl->nbl_lock);
return nfserr_old_stateid;
}
+static __be32 nfsd4_stid_check_stateid_generation(stateid_t *in, struct nfs4_stid *s, bool has_session)
+{
+ __be32 ret;
+
+ spin_lock(&s->sc_lock);
+ ret = nfsd4_verify_open_stid(s);
+ if (ret == nfs_ok)
+ ret = check_stateid_generation(in, &s->sc_stateid, has_session);
+ spin_unlock(&s->sc_lock);
+ return ret;
+}
+
static __be32 nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid *ols)
{
if (ols->st_stateowner->so_is_open_owner &&
struct nfs4_stid *s;
__be32 status = nfserr_bad_stateid;
- if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
+ if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
+ CLOSE_STATEID(stateid))
return status;
/* Client debugging aid. */
if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) {
s = find_stateid_locked(cl, stateid);
if (!s)
goto out_unlock;
- status = check_stateid_generation(stateid, &s->sc_stateid, 1);
+ status = nfsd4_stid_check_stateid_generation(stateid, s, 1);
if (status)
goto out_unlock;
switch (s->sc_type) {
else if (typemask & NFS4_DELEG_STID)
typemask |= NFS4_REVOKED_DELEG_STID;
- if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
+ if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
+ CLOSE_STATEID(stateid))
return nfserr_bad_stateid;
status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn);
if (status == nfserr_stale_clientid) {
&s, nn);
if (status)
return status;
- status = check_stateid_generation(stateid, &s->sc_stateid,
+ status = nfsd4_stid_check_stateid_generation(stateid, s,
nfsd4_has_session(cstate));
if (status)
goto out;
struct nfs4_ol_stateid *stp = openlockstateid(s);
__be32 ret;
- mutex_lock(&stp->st_mutex);
+ ret = nfsd4_lock_ol_stateid(stp);
+ if (ret)
+ goto out_put_stid;
ret = check_stateid_generation(stateid, &s->sc_stateid, 1);
if (ret)
lockowner(stp->st_stateowner)))
goto out;
+ stp->st_stid.sc_type = NFS4_CLOSED_STID;
release_lock_stateid(stp);
ret = nfs_ok;
out:
mutex_unlock(&stp->st_mutex);
+out_put_stid:
nfs4_put_stid(s);
return ret;
}
s = find_stateid_locked(cl, stateid);
if (!s)
goto out_unlock;
+ spin_lock(&s->sc_lock);
switch (s->sc_type) {
case NFS4_DELEG_STID:
ret = nfserr_locks_held;
ret = nfserr_locks_held;
break;
case NFS4_LOCK_STID:
+ spin_unlock(&s->sc_lock);
refcount_inc(&s->sc_count);
spin_unlock(&cl->cl_lock);
ret = nfsd4_free_lock_stateid(stateid, s);
goto out;
case NFS4_REVOKED_DELEG_STID:
+ spin_unlock(&s->sc_lock);
dp = delegstateid(s);
list_del_init(&dp->dl_recall_lru);
spin_unlock(&cl->cl_lock);
goto out;
/* Default falls through and returns nfserr_bad_stateid */
}
+ spin_unlock(&s->sc_lock);
out_unlock:
spin_unlock(&cl->cl_lock);
out:
status = nfsd4_check_seqid(cstate, sop, seqid);
if (status)
return status;
- if (stp->st_stid.sc_type == NFS4_CLOSED_STID
- || stp->st_stid.sc_type == NFS4_REVOKED_DELEG_STID)
- /*
- * "Closed" stateid's exist *only* to return
- * nfserr_replay_me from the previous step, and
- * revoked delegations are kept only for free_stateid.
- */
- return nfserr_bad_stateid;
- mutex_lock(&stp->st_mutex);
+ status = nfsd4_lock_ol_stateid(stp);
+ if (status != nfs_ok)
+ return status;
status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
if (status == nfs_ok)
status = nfs4_check_fh(current_fh, &stp->st_stid);
bool unhashed;
LIST_HEAD(reaplist);
- s->st_stid.sc_type = NFS4_CLOSED_STID;
spin_lock(&clp->cl_lock);
unhashed = unhash_open_stateid(s, &reaplist);
nfsd4_bump_seqid(cstate, status);
if (status)
goto out;
+
+ stp->st_stid.sc_type = NFS4_CLOSED_STID;
nfs4_inc_and_copy_stateid(&close->cl_stateid, &stp->st_stid);
- mutex_unlock(&stp->st_mutex);
nfsd4_close_open_stateid(stp);
+ mutex_unlock(&stp->st_mutex);
+
+ /* See RFC5661 sectionm 18.2.4 */
+ if (stp->st_stid.sc_client->cl_minorversion)
+ memcpy(&close->cl_stateid, &close_stateid,
+ sizeof(close->cl_stateid));
/* put reference from nfs4_preprocess_seqid_op */
nfs4_put_stid(&stp->st_stid);
if (status)
goto out;
dp = delegstateid(s);
- status = check_stateid_generation(stateid, &dp->dl_stid.sc_stateid, nfsd4_has_session(cstate));
+ status = nfsd4_stid_check_stateid_generation(stateid, &dp->dl_stid, nfsd4_has_session(cstate));
if (status)
goto put_stateid;
return ret;
}
-static void
+static struct nfs4_ol_stateid *
+find_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp)
+{
+ struct nfs4_ol_stateid *lst;
+ struct nfs4_client *clp = lo->lo_owner.so_client;
+
+ lockdep_assert_held(&clp->cl_lock);
+
+ list_for_each_entry(lst, &lo->lo_owner.so_stateids, st_perstateowner) {
+ if (lst->st_stid.sc_type != NFS4_LOCK_STID)
+ continue;
+ if (lst->st_stid.sc_file == fp) {
+ refcount_inc(&lst->st_stid.sc_count);
+ return lst;
+ }
+ }
+ return NULL;
+}
+
+static struct nfs4_ol_stateid *
init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
struct nfs4_file *fp, struct inode *inode,
struct nfs4_ol_stateid *open_stp)
{
struct nfs4_client *clp = lo->lo_owner.so_client;
+ struct nfs4_ol_stateid *retstp;
- lockdep_assert_held(&clp->cl_lock);
+ mutex_init(&stp->st_mutex);
+ mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
+retry:
+ spin_lock(&clp->cl_lock);
+ spin_lock(&fp->fi_lock);
+ retstp = find_lock_stateid(lo, fp);
+ if (retstp)
+ goto out_unlock;
refcount_inc(&stp->st_stid.sc_count);
stp->st_stid.sc_type = NFS4_LOCK_STID;
stp->st_access_bmap = 0;
stp->st_deny_bmap = open_stp->st_deny_bmap;
stp->st_openstp = open_stp;
- mutex_init(&stp->st_mutex);
list_add(&stp->st_locks, &open_stp->st_locks);
list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
- spin_lock(&fp->fi_lock);
list_add(&stp->st_perfile, &fp->fi_stateids);
+out_unlock:
spin_unlock(&fp->fi_lock);
-}
-
-static struct nfs4_ol_stateid *
-find_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp)
-{
- struct nfs4_ol_stateid *lst;
- struct nfs4_client *clp = lo->lo_owner.so_client;
-
- lockdep_assert_held(&clp->cl_lock);
-
- list_for_each_entry(lst, &lo->lo_owner.so_stateids, st_perstateowner) {
- if (lst->st_stid.sc_file == fp) {
- refcount_inc(&lst->st_stid.sc_count);
- return lst;
+ spin_unlock(&clp->cl_lock);
+ if (retstp) {
+ if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
+ nfs4_put_stid(&retstp->st_stid);
+ goto retry;
}
+ /* To keep mutex tracking happy */
+ mutex_unlock(&stp->st_mutex);
+ stp = retstp;
}
- return NULL;
+ return stp;
}
static struct nfs4_ol_stateid *
struct nfs4_openowner *oo = openowner(ost->st_stateowner);
struct nfs4_client *clp = oo->oo_owner.so_client;
+ *new = false;
spin_lock(&clp->cl_lock);
lst = find_lock_stateid(lo, fi);
- if (lst == NULL) {
- spin_unlock(&clp->cl_lock);
- ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid);
- if (ns == NULL)
- return NULL;
-
- spin_lock(&clp->cl_lock);
- lst = find_lock_stateid(lo, fi);
- if (likely(!lst)) {
- lst = openlockstateid(ns);
- init_lock_stateid(lst, lo, fi, inode, ost);
- ns = NULL;
- *new = true;
- }
- }
spin_unlock(&clp->cl_lock);
- if (ns)
+ if (lst != NULL) {
+ if (nfsd4_lock_ol_stateid(lst) == nfs_ok)
+ goto out;
+ nfs4_put_stid(&lst->st_stid);
+ }
+ ns = nfs4_alloc_stid(clp, stateid_slab, nfs4_free_lock_stateid);
+ if (ns == NULL)
+ return NULL;
+
+ lst = init_lock_stateid(openlockstateid(ns), lo, fi, inode, ost);
+ if (lst == openlockstateid(ns))
+ *new = true;
+ else
nfs4_put_stid(ns);
+out:
return lst;
}
struct nfs4_lockowner *lo;
struct nfs4_ol_stateid *lst;
unsigned int strhashval;
- bool hashed;
lo = find_lockowner_str(cl, &lock->lk_new_owner);
if (!lo) {
goto out;
}
-retry:
lst = find_or_create_lock_stateid(lo, fi, inode, ost, new);
if (lst == NULL) {
status = nfserr_jukebox;
goto out;
}
- mutex_lock(&lst->st_mutex);
-
- /* See if it's still hashed to avoid race with FREE_STATEID */
- spin_lock(&cl->cl_lock);
- hashed = !list_empty(&lst->st_perfile);
- spin_unlock(&cl->cl_lock);
-
- if (!hashed) {
- mutex_unlock(&lst->st_mutex);
- nfs4_put_stid(&lst->st_stid);
- goto retry;
- }
status = nfs_ok;
*plst = lst;
out:
seqid_mutating_err(ntohl(status)))
lock_sop->lo_owner.so_seqid++;
- mutex_unlock(&lock_stp->st_mutex);
-
/*
* If this is a new, never-before-used stateid, and we are
* returning an error, then just go ahead and release it.
*/
- if (status && new)
+ if (status && new) {
+ lock_stp->st_stid.sc_type = NFS4_CLOSED_STID;
release_lock_stateid(lock_stp);
+ }
+
+ mutex_unlock(&lock_stp->st_mutex);
nfs4_put_stid(&lock_stp->st_stid);
}
INIT_LIST_HEAD(&nn->sessionid_hashtbl[i]);
nn->conf_name_tree = RB_ROOT;
nn->unconf_name_tree = RB_ROOT;
+ nn->boot_time = get_seconds();
+ nn->grace_ended = false;
+ nn->nfsd4_manager.block_opens = true;
+ INIT_LIST_HEAD(&nn->nfsd4_manager.list);
INIT_LIST_HEAD(&nn->client_lru);
INIT_LIST_HEAD(&nn->close_lru);
INIT_LIST_HEAD(&nn->del_recall_lru);
ret = nfs4_state_create_net(net);
if (ret)
return ret;
- nn->boot_time = get_seconds();
- nn->grace_ended = false;
- nn->nfsd4_manager.block_opens = true;
locks_start_grace(net, &nn->nfsd4_manager);
nfsd4_client_tracking_init(net);
printk(KERN_INFO "NFSD: starting %ld-second grace period (net %x)\n",
spin_unlock(&nn->blocked_locks_lock);
while (!list_empty(&reaplist)) {
- nbl = list_first_entry(&nn->blocked_locks_lru,
+ nbl = list_first_entry(&reaplist,
struct nfsd4_blocked_lock, nbl_lru);
list_del_init(&nbl->nbl_lru);
posix_unblock_lock(&nbl->nbl_lock);
nn->nfsd4_grace = 90;
nn->clverifier_counter = prandom_u32();
nn->clientid_counter = prandom_u32();
+
+ atomic_set(&nn->ntf_refcnt, 0);
+ init_waitqueue_head(&nn->ntf_wq);
return 0;
out_idmap_error:
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
struct sockaddr_in sin;
- if (event != NETDEV_DOWN)
+ if ((event != NETDEV_DOWN) ||
+ !atomic_inc_not_zero(&nn->ntf_refcnt))
goto out;
if (nn->nfsd_serv) {
sin.sin_addr.s_addr = ifa->ifa_local;
svc_age_temp_xprts_now(nn->nfsd_serv, (struct sockaddr *)&sin);
}
+ atomic_dec(&nn->ntf_refcnt);
+ wake_up(&nn->ntf_wq);
out:
return NOTIFY_DONE;
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
struct sockaddr_in6 sin6;
- if (event != NETDEV_DOWN)
+ if ((event != NETDEV_DOWN) ||
+ !atomic_inc_not_zero(&nn->ntf_refcnt))
goto out;
if (nn->nfsd_serv) {
sin6.sin6_scope_id = ifa->idev->dev->ifindex;
svc_age_temp_xprts_now(nn->nfsd_serv, (struct sockaddr *)&sin6);
}
-
+ atomic_dec(&nn->ntf_refcnt);
+ wake_up(&nn->ntf_wq);
out:
return NOTIFY_DONE;
}
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
+ atomic_dec(&nn->ntf_refcnt);
/* check if the notifier still has clients */
if (atomic_dec_return(&nfsd_notifier_refcount) == 0) {
unregister_inetaddr_notifier(&nfsd_inetaddr_notifier);
unregister_inet6addr_notifier(&nfsd_inet6addr_notifier);
#endif
}
+ wait_event(nn->ntf_wq, atomic_read(&nn->ntf_refcnt) == 0);
/*
* write_ports can create the server without actually starting
register_inet6addr_notifier(&nfsd_inet6addr_notifier);
#endif
}
+ atomic_inc(&nn->ntf_refcnt);
ktime_get_real_ts64(&nn->nfssvc_boot); /* record boot time */
return 0;
}
struct the_nilfs *nilfs)
{
struct nilfs_inode_info *ii, *n;
- int during_mount = !(sci->sc_super->s_flags & MS_ACTIVE);
+ int during_mount = !(sci->sc_super->s_flags & SB_ACTIVE);
int defer_iput = false;
spin_lock(&nilfs->ns_inode_lock);
if (nilfs_test_opt(nilfs, ERRORS_RO)) {
printk(KERN_CRIT "Remounting filesystem read-only\n");
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
}
}
/* FS independent flags */
#ifdef NILFS_ATIME_DISABLE
- sb->s_flags |= MS_NOATIME;
+ sb->s_flags |= SB_NOATIME;
#endif
nilfs_set_default_options(sb, sbp);
err = -EINVAL;
goto restore_opts;
}
- sb->s_flags = (sb->s_flags & ~MS_POSIXACL);
+ sb->s_flags = (sb->s_flags & ~SB_POSIXACL);
err = -EINVAL;
goto restore_opts;
}
- if ((bool)(*flags & MS_RDONLY) == sb_rdonly(sb))
+ if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
goto out;
- if (*flags & MS_RDONLY) {
+ if (*flags & SB_RDONLY) {
/* Shutting down log writer */
nilfs_detach_log_writer(sb);
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
/*
* Remounting a valid RW partition RDONLY, so set
goto restore_opts;
}
- sb->s_flags &= ~MS_RDONLY;
+ sb->s_flags &= ~SB_RDONLY;
root = NILFS_I(d_inode(sb->s_root))->i_root;
err = nilfs_attach_log_writer(sb, root);
const char *msg = NULL;
int err;
- if (!(sd->flags & MS_RDONLY)) {
+ if (!(sd->flags & SB_RDONLY)) {
msg = "read-only option is not specified";
goto parse_error;
}
struct dentry *root_dentry;
int err, s_new = false;
- if (!(flags & MS_RDONLY))
+ if (!(flags & SB_RDONLY))
mode |= FMODE_WRITE;
sd.bdev = blkdev_get_by_path(dev_name, mode, fs_type);
snprintf(s->s_id, sizeof(s->s_id), "%pg", sd.bdev);
sb_set_blocksize(s, block_size(sd.bdev));
- err = nilfs_fill_super(s, data, flags & MS_SILENT ? 1 : 0);
+ err = nilfs_fill_super(s, data, flags & SB_SILENT ? 1 : 0);
if (err)
goto failed_super;
- s->s_flags |= MS_ACTIVE;
+ s->s_flags |= SB_ACTIVE;
} else if (!sd.cno) {
if (nilfs_tree_is_busy(s->s_root)) {
- if ((flags ^ s->s_flags) & MS_RDONLY) {
+ if ((flags ^ s->s_flags) & SB_RDONLY) {
nilfs_msg(s, KERN_ERR,
"the device already has a %s mount.",
sb_rdonly(s) ? "read-only" : "read/write");
if (!valid_fs) {
nilfs_msg(sb, KERN_WARNING, "mounting unchecked fs");
- if (s_flags & MS_RDONLY) {
+ if (s_flags & SB_RDONLY) {
nilfs_msg(sb, KERN_INFO,
"recovery required for readonly filesystem");
nilfs_msg(sb, KERN_INFO,
if (valid_fs)
goto skip_recovery;
- if (s_flags & MS_RDONLY) {
+ if (s_flags & SB_RDONLY) {
__u64 features;
if (nilfs_test_opt(nilfs, NORECOVERY)) {
err = -EROFS;
goto failed_unload;
}
- sb->s_flags &= ~MS_RDONLY;
+ sb->s_flags &= ~SB_RDONLY;
} else if (nilfs_test_opt(nilfs, NORECOVERY)) {
nilfs_msg(sb, KERN_ERR,
"recovery cancelled because norecovery option was specified for a read/write mount");
/*
* If i_count is zero, the inode cannot have any watches and
- * doing an __iget/iput with MS_ACTIVE clear would actually
+ * doing an __iget/iput with SB_ACTIVE clear would actually
* evict all inodes with zero i_count from icache which is
* unnecessarily violent and may in fact be illegal to do.
*/
nsfs_mnt = kern_mount(&nsfs);
if (IS_ERR(nsfs_mnt))
panic("can't set nsfs up\n");
- nsfs_mnt->mnt_sb->s_flags &= ~MS_NOUSER;
+ nsfs_mnt->mnt_sb->s_flags &= ~SB_NOUSER;
}
#ifndef NTFS_RW
/* For read-only compiled driver, enforce read-only flag. */
- *flags |= MS_RDONLY;
+ *flags |= SB_RDONLY;
#else /* NTFS_RW */
/*
* For the read-write compiled driver, if we are remounting read-write,
* When remounting read-only, mark the volume clean if no volume errors
* have occurred.
*/
- if (sb_rdonly(sb) && !(*flags & MS_RDONLY)) {
+ if (sb_rdonly(sb) && !(*flags & SB_RDONLY)) {
static const char *es = ". Cannot remount read-write.";
/* Remounting read-write. */
NVolSetErrors(vol);
return -EROFS;
}
- } else if (!sb_rdonly(sb) && (*flags & MS_RDONLY)) {
+ } else if (!sb_rdonly(sb) && (*flags & SB_RDONLY)) {
/* Remounting read-only. */
if (!NVolErrors(vol)) {
if (ntfs_clear_volume_flags(vol, VOLUME_IS_DIRTY))
es3);
goto iput_mirr_err_out;
}
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
ntfs_error(sb, "%s. Mounting read-only%s",
!vol->mftmirr_ino ? es1 : es2, es3);
} else
es1, es2);
goto iput_vol_err_out;
}
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
ntfs_error(sb, "%s. Mounting read-only%s", es1, es2);
} else
ntfs_warning(sb, "%s. Will not be able to remount "
}
goto iput_logfile_err_out;
}
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
ntfs_error(sb, "%s. Mounting read-only%s", es1, es2);
} else
ntfs_warning(sb, "%s. Will not be able to remount "
es1, es2);
goto iput_root_err_out;
}
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
ntfs_error(sb, "%s. Mounting read-only%s", es1, es2);
} else
ntfs_warning(sb, "%s. Will not be able to remount "
goto iput_root_err_out;
}
ntfs_error(sb, "%s. Mounting read-only%s", es1, es2);
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
/*
* Do not set NVolErrors() because ntfs_remount() might manage
* to set the dirty flag in which case all would be well.
* If (still) a read-write mount, set the NT4 compatibility flag on
* newer NTFS version volumes.
*/
- if (!(sb->s_flags & MS_RDONLY) && (vol->major_ver > 1) &&
+ if (!(sb->s_flags & SB_RDONLY) && (vol->major_ver > 1) &&
ntfs_set_volume_flags(vol, VOLUME_MOUNTED_ON_NT4)) {
static const char *es1 = "Failed to set NT4 compatibility flag";
static const char *es2 = ". Run chkdsk.";
goto iput_root_err_out;
}
ntfs_error(sb, "%s. Mounting read-only%s", es1, es2);
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
NVolSetErrors(vol);
}
#endif
goto iput_root_err_out;
}
ntfs_error(sb, "%s. Mounting read-only%s", es1, es2);
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
NVolSetErrors(vol);
}
#endif /* NTFS_RW */
es1, es2);
goto iput_quota_err_out;
}
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
ntfs_error(sb, "%s. Mounting read-only%s", es1, es2);
} else
ntfs_warning(sb, "%s. Will not be able to remount "
goto iput_quota_err_out;
}
ntfs_error(sb, "%s. Mounting read-only%s", es1, es2);
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
NVolSetErrors(vol);
}
/*
es1, es2);
goto iput_usnjrnl_err_out;
}
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
ntfs_error(sb, "%s. Mounting read-only%s", es1, es2);
} else
ntfs_warning(sb, "%s. Will not be able to remount "
goto iput_usnjrnl_err_out;
}
ntfs_error(sb, "%s. Mounting read-only%s", es1, es2);
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
NVolSetErrors(vol);
}
#endif /* NTFS_RW */
lockdep_off();
ntfs_debug("Entering.");
#ifndef NTFS_RW
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
#endif /* ! NTFS_RW */
/* Allocate a new ntfs_volume and place it in sb->s_fs_info. */
sb->s_fs_info = kmalloc(sizeof(ntfs_volume), GFP_NOFS);
static void o2net_shutdown_sc(struct work_struct *work);
static void o2net_listen_data_ready(struct sock *sk);
static void o2net_sc_send_keep_req(struct work_struct *work);
-static void o2net_idle_timer(unsigned long data);
+static void o2net_idle_timer(struct timer_list *t);
static void o2net_sc_postpone_idle(struct o2net_sock_container *sc);
static void o2net_sc_reset_idle_timer(struct o2net_sock_container *sc);
INIT_WORK(&sc->sc_shutdown_work, o2net_shutdown_sc);
INIT_DELAYED_WORK(&sc->sc_keepalive_work, o2net_sc_send_keep_req);
- setup_timer(&sc->sc_idle_timeout, o2net_idle_timer,
- (unsigned long)sc);
+ timer_setup(&sc->sc_idle_timeout, o2net_idle_timer, 0);
sclog(sc, "alloced\n");
/* socket shutdown does a del_timer_sync against this as it tears down.
* we can't start this timer until we've got to the point in sc buildup
* where shutdown is going to be involved */
-static void o2net_idle_timer(unsigned long data)
+static void o2net_idle_timer(struct timer_list *t)
{
- struct o2net_sock_container *sc = (struct o2net_sock_container *)data;
+ struct o2net_sock_container *sc = from_timer(sc, t, sc_idle_timeout);
struct o2net_node *nn = o2net_nn_from_num(sc->sc_node->nd_num);
#ifdef CONFIG_DEBUG_FS
unsigned long msecs = ktime_to_ms(ktime_get()) -
return 0;
if ((inode->i_flags & S_NOATIME) ||
- ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode)))
+ ((inode->i_sb->s_flags & SB_NODIRATIME) && S_ISDIR(inode->i_mode)))
return 0;
/*
}
/* We're going to/from readonly mode. */
- if ((bool)(*flags & MS_RDONLY) != sb_rdonly(sb)) {
+ if ((bool)(*flags & SB_RDONLY) != sb_rdonly(sb)) {
/* Disable quota accounting before remounting RO */
- if (*flags & MS_RDONLY) {
+ if (*flags & SB_RDONLY) {
ret = ocfs2_susp_quotas(osb, 0);
if (ret < 0)
goto out;
goto unlock_osb;
}
- if (*flags & MS_RDONLY) {
- sb->s_flags |= MS_RDONLY;
+ if (*flags & SB_RDONLY) {
+ sb->s_flags |= SB_RDONLY;
osb->osb_flags |= OCFS2_OSB_SOFT_RO;
} else {
if (osb->osb_flags & OCFS2_OSB_ERROR_FS) {
ret = -EINVAL;
goto unlock_osb;
}
- sb->s_flags &= ~MS_RDONLY;
+ sb->s_flags &= ~SB_RDONLY;
osb->osb_flags &= ~OCFS2_OSB_SOFT_RO;
}
trace_ocfs2_remount(sb->s_flags, osb->osb_flags, *flags);
unlock_osb:
spin_unlock(&osb->osb_lock);
/* Enable quota accounting after remounting RW */
- if (!ret && !(*flags & MS_RDONLY)) {
+ if (!ret && !(*flags & SB_RDONLY)) {
if (sb_any_quota_suspended(sb))
ret = ocfs2_susp_quotas(osb, 1);
else
if (ret < 0) {
/* Return back changes... */
spin_lock(&osb->osb_lock);
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
osb->osb_flags |= OCFS2_OSB_SOFT_RO;
spin_unlock(&osb->osb_lock);
goto out;
if (!ocfs2_is_hard_readonly(osb))
ocfs2_set_journal_params(osb);
- sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
+ sb->s_flags = (sb->s_flags & ~SB_POSIXACL) |
((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) ?
- MS_POSIXACL : 0);
+ SB_POSIXACL : 0);
}
out:
return ret;
sb->s_magic = OCFS2_SUPER_MAGIC;
- sb->s_flags = (sb->s_flags & ~(MS_POSIXACL | MS_NOSEC)) |
- ((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
+ sb->s_flags = (sb->s_flags & ~(SB_POSIXACL | SB_NOSEC)) |
+ ((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) ? SB_POSIXACL : 0);
- /* Hard readonly mode only if: bdev_read_only, MS_RDONLY,
+ /* Hard readonly mode only if: bdev_read_only, SB_RDONLY,
* heartbeat=none */
if (bdev_read_only(sb->s_bdev)) {
if (!sb_rdonly(sb)) {
sb->s_quota_types = QTYPE_MASK_USR | QTYPE_MASK_GRP;
sb->s_xattr = ocfs2_xattr_handlers;
sb->s_time_gran = 1;
- sb->s_flags |= MS_NOATIME;
+ sb->s_flags |= SB_NOATIME;
/* this is needed to support O_LARGEFILE */
cbits = le32_to_cpu(di->id2.i_super.s_clustersize_bits);
bbits = le32_to_cpu(di->id2.i_super.s_blocksize_bits);
return rv;
pr_crit("OCFS2: File system is now read-only.\n");
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
ocfs2_set_ro_flag(osb, 0);
}
case OCFS2_XATTR_INDEX_POSIX_ACL_ACCESS:
case OCFS2_XATTR_INDEX_POSIX_ACL_DEFAULT:
- if (!(sb->s_flags & MS_POSIXACL))
+ if (!(sb->s_flags & SB_POSIXACL))
return 0;
break;
static int openprom_remount(struct super_block *sb, int *flags, char *data)
{
sync_filesystem(sb);
- *flags |= MS_NOATIME;
+ *flags |= SB_NOATIME;
return 0;
}
struct op_inode_info *oi;
int ret;
- s->s_flags |= MS_NOATIME;
+ s->s_flags |= SB_NOATIME;
s->s_blocksize = 1024;
s->s_blocksize_bits = 10;
s->s_magic = OPENPROM_SUPER_MAGIC;
{
struct orangefs_sb_info_s *orangefs_sb = ORANGEFS_SB(root->d_sb);
- if (root->d_sb->s_flags & MS_POSIXACL)
+ if (root->d_sb->s_flags & SB_POSIXACL)
seq_puts(m, ",acl");
if (orangefs_sb->flags & ORANGEFS_OPT_INTR)
seq_puts(m, ",intr");
* Force any potential flags that might be set from the mount
* to zero, ie, initialize to unset.
*/
- sb->s_flags &= ~MS_POSIXACL;
+ sb->s_flags &= ~SB_POSIXACL;
orangefs_sb->flags &= ~ORANGEFS_OPT_INTR;
orangefs_sb->flags &= ~ORANGEFS_OPT_LOCAL_LOCK;
token = match_token(p, tokens, args);
switch (token) {
case Opt_acl:
- sb->s_flags |= MS_POSIXACL;
+ sb->s_flags |= SB_POSIXACL;
break;
case Opt_intr:
orangefs_sb->flags |= ORANGEFS_OPT_INTR;
ret = orangefs_fill_sb(sb,
&new_op->downcall.resp.fs_mount, data,
- flags & MS_SILENT ? 1 : 0);
+ flags & SB_SILENT ? 1 : 0);
if (ret) {
d = ERR_PTR(ret);
{
struct ovl_fs *ofs = sb->s_fs_info;
- if (!(*flags & MS_RDONLY) && ovl_force_readonly(ofs))
+ if (!(*flags & SB_RDONLY) && ovl_force_readonly(ofs))
return -EROFS;
return 0;
goto out_err;
if (!ofs->workdir)
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
sb->s_stack_depth = ofs->upper_mnt->mnt_sb->s_stack_depth;
sb->s_time_gran = ofs->upper_mnt->mnt_sb->s_time_gran;
/* If the upper fs is nonexistent, we mark overlayfs r/o too */
if (!ofs->upper_mnt)
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
else if (ofs->upper_mnt->mnt_sb != ofs->same_sb)
ofs->same_sb = NULL;
goto out_free_oe;
if (!ofs->indexdir)
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
}
/* Show index=off/on in /proc/mounts for any of the reasons above */
sb->s_op = &ovl_super_operations;
sb->s_xattr = ovl_xattr_handlers;
sb->s_fs_info = ofs;
- sb->s_flags |= MS_POSIXACL | MS_NOREMOTELOCK;
+ sb->s_flags |= SB_POSIXACL | SB_NOREMOTELOCK;
err = -ENOMEM;
root_dentry = d_make_root(ovl_new_inode(sb, S_IFDIR, 0));
save_stack_trace_tsk(task, &trace);
for (i = 0; i < trace.nr_entries; i++) {
- seq_printf(m, "[<%pK>] %pB\n",
- (void *)entries[i], (void *)entries[i]);
+ seq_printf(m, "[<0>] %pB\n", (void *)entries[i]);
}
unlock_trace(task);
}
/* User space would break if executables or devices appear on proc */
s->s_iflags |= SB_I_USERNS_VISIBLE | SB_I_NOEXEC | SB_I_NODEV;
- s->s_flags |= MS_NODIRATIME | MS_NOSUID | MS_NOEXEC;
+ s->s_flags |= SB_NODIRATIME | SB_NOSUID | SB_NOEXEC;
s->s_blocksize = 1024;
s->s_blocksize_bits = 10;
s->s_magic = PROC_SUPER_MAGIC;
{
struct pid_namespace *ns;
- if (flags & MS_KERNMOUNT) {
+ if (flags & SB_KERNMOUNT) {
ns = data;
data = NULL;
} else {
static int show_sb_opts(struct seq_file *m, struct super_block *sb)
{
static const struct proc_fs_info fs_info[] = {
- { MS_SYNCHRONOUS, ",sync" },
- { MS_DIRSYNC, ",dirsync" },
- { MS_MANDLOCK, ",mand" },
- { MS_LAZYTIME, ",lazytime" },
+ { SB_SYNCHRONOUS, ",sync" },
+ { SB_DIRSYNC, ",dirsync" },
+ { SB_MANDLOCK, ",mand" },
+ { SB_LAZYTIME, ",lazytime" },
{ 0, NULL }
};
const struct proc_fs_info *fs_infop;
static int pstore_new_entry;
-static void pstore_timefunc(unsigned long);
+static void pstore_timefunc(struct timer_list *);
static DEFINE_TIMER(pstore_timer, pstore_timefunc);
static void pstore_dowork(struct work_struct *);
pstore_get_records(1);
}
-static void pstore_timefunc(unsigned long dummy)
+static void pstore_timefunc(struct timer_list *unused)
{
if (pstore_new_entry) {
pstore_new_entry = 0;
sync_filesystem(sb);
qs = qnx4_sb(sb);
qs->Version = QNX4_VERSION;
- *flags |= MS_RDONLY;
+ *flags |= SB_RDONLY;
return 0;
}
s->s_op = &qnx4_sops;
s->s_magic = QNX4_SUPER_MAGIC;
- s->s_flags |= MS_RDONLY; /* Yup, read-only yet */
+ s->s_flags |= SB_RDONLY; /* Yup, read-only yet */
/* Check the superblock signature. Since the qnx4 code is
dangerous, we should leave as quickly as possible
static int qnx6_remount(struct super_block *sb, int *flags, char *data)
{
sync_filesystem(sb);
- *flags |= MS_RDONLY;
+ *flags |= SB_RDONLY;
return 0;
}
}
s->s_op = &qnx6_sops;
s->s_magic = QNX6_SUPER_MAGIC;
- s->s_flags |= MS_RDONLY; /* Yup, read-only yet */
+ s->s_flags |= SB_RDONLY; /* Yup, read-only yet */
/* ease the later tree level calculations */
sbi = QNX6_SB(s);
}
/* This routine is guarded by s_umount semaphore */
-static void add_dquot_ref(struct super_block *sb, int type)
+static int add_dquot_ref(struct super_block *sb, int type)
{
struct inode *inode, *old_inode = NULL;
#ifdef CONFIG_QUOTA_DEBUG
int reserved = 0;
#endif
+ int err = 0;
spin_lock(&sb->s_inode_list_lock);
list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
reserved = 1;
#endif
iput(old_inode);
- __dquot_initialize(inode, type);
+ err = __dquot_initialize(inode, type);
+ if (err) {
+ iput(inode);
+ goto out;
+ }
/*
* We hold a reference to 'inode' so it couldn't have been
}
spin_unlock(&sb->s_inode_list_lock);
iput(old_inode);
-
+out:
#ifdef CONFIG_QUOTA_DEBUG
if (reserved) {
quota_error(sb, "Writes happened before quota was turned on "
"Please run quotacheck(8)");
}
#endif
+ return err;
}
/*
dqopt->flags |= dquot_state_flag(flags, type);
spin_unlock(&dq_state_lock);
- add_dquot_ref(sb, type);
-
- return 0;
+ error = add_dquot_ref(sb, type);
+ if (error)
+ dquot_disable(sb, type, flags);
+ return error;
out_file_init:
dqopt->files[type] = NULL;
iput(inode);
pr_info("VFS: Dquot-cache hash table entries: %ld (order %ld,"
" %ld bytes)\n", nr_hash, order, (PAGE_SIZE << order));
- register_shrinker(&dqcache_shrinker);
+ if (register_shrinker(&dqcache_shrinker))
+ panic("Cannot register dquot shrinker");
return 0;
}
journal_end(th);
goto out_inserted_sd;
}
- } else if (inode->i_sb->s_flags & MS_POSIXACL) {
+ } else if (inode->i_sb->s_flags & SB_POSIXACL) {
reiserfs_warning(inode->i_sb, "jdm-13090",
"ACLs aren't enabled in the fs, "
"but vfs thinks they are!");
/*
* Cancel flushing of old commits. Note that neither of these works
* will be requeued because superblock is being shutdown and doesn't
- * have MS_ACTIVE set.
+ * have SB_ACTIVE set.
*/
reiserfs_cancel_old_flush(sb);
/* wait for all commits to finish */
* Avoid queueing work when sb is being shut down. Transaction
* will be flushed on journal shutdown.
*/
- if (sb->s_flags & MS_ACTIVE)
+ if (sb->s_flags & SB_ACTIVE)
queue_delayed_work(REISERFS_SB(sb)->commit_wq,
&journal->j_work, HZ / 10);
}
if (!journal->j_errno)
journal->j_errno = errno;
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
set_bit(J_ABORTED, &journal->j_state);
#ifdef CONFIG_REISERFS_CHECK
return;
reiserfs_info(sb, "Remounting filesystem read-only\n");
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
reiserfs_abort_journal(sb, -EIO);
}
printk(KERN_CRIT "REISERFS abort (device %s): %s\n", sb->s_id,
error_buf);
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
reiserfs_abort_journal(sb, errno);
}
* Avoid scheduling flush when sb is being shut down. It can race
* with journal shutdown and free still queued delayed work.
*/
- if (sb_rdonly(s) || !(s->s_flags & MS_ACTIVE))
+ if (sb_rdonly(s) || !(s->s_flags & SB_ACTIVE))
return;
spin_lock(&sbi->old_work_lock);
#ifdef CONFIG_QUOTA
/* Needed for iput() to work correctly and not trash data */
- if (s->s_flags & MS_ACTIVE) {
+ if (s->s_flags & SB_ACTIVE) {
ms_active_set = 0;
} else {
ms_active_set = 1;
- s->s_flags |= MS_ACTIVE;
+ s->s_flags |= SB_ACTIVE;
}
/* Turn on quotas so that they are updated correctly */
for (i = 0; i < REISERFS_MAXQUOTAS; i++) {
reiserfs_write_lock(s);
if (ms_active_set)
/* Restore the flag back */
- s->s_flags &= ~MS_ACTIVE;
+ s->s_flags &= ~SB_ACTIVE;
#endif
pathrelse(&path);
if (done)
goto out_err_unlock;
}
- if (*mount_flags & MS_RDONLY) {
+ if (*mount_flags & SB_RDONLY) {
reiserfs_write_unlock(s);
reiserfs_xattr_init(s, *mount_flags);
/* remount read-only */
REISERFS_SB(s)->s_mount_state = sb_umount_state(rs);
/* now it is safe to call journal_begin */
- s->s_flags &= ~MS_RDONLY;
+ s->s_flags &= ~SB_RDONLY;
err = journal_begin(&th, s, 10);
if (err)
goto out_err_unlock;
/* Mount a partition which is read-only, read-write */
reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
REISERFS_SB(s)->s_mount_state = sb_umount_state(rs);
- s->s_flags &= ~MS_RDONLY;
+ s->s_flags &= ~SB_RDONLY;
set_sb_umount_state(rs, REISERFS_ERROR_FS);
if (!old_format_only(s))
set_sb_mnt_count(rs, sb_mnt_count(rs) + 1);
goto out_err_unlock;
reiserfs_write_unlock(s);
- if (!(*mount_flags & MS_RDONLY)) {
+ if (!(*mount_flags & SB_RDONLY)) {
dquot_resume(s, -1);
reiserfs_write_lock(s);
finish_unfinished(s);
if (bdev_read_only(s->s_bdev) && !sb_rdonly(s)) {
SWARN(silent, s, "clm-7000",
"Detected readonly device, marking FS readonly");
- s->s_flags |= MS_RDONLY;
+ s->s_flags |= SB_RDONLY;
}
args.objectid = REISERFS_ROOT_OBJECTID;
args.dirid = REISERFS_ROOT_PARENT_OBJECTID;
return err;
if (inode->i_size < off + len - towrite)
i_size_write(inode, off + len - towrite);
- inode->i_version++;
inode->i_mtime = inode->i_ctime = current_time(inode);
mark_inode_dirty(inode);
return len - towrite;
/*
* We need to take a copy of the mount flags since things like
- * MS_RDONLY don't get set until *after* we're called.
+ * SB_RDONLY don't get set until *after* we're called.
* mount_flags != mount_options
*/
int reiserfs_xattr_init(struct super_block *s, int mount_flags)
if (err)
goto error;
- if (d_really_is_negative(privroot) && !(mount_flags & MS_RDONLY)) {
+ if (d_really_is_negative(privroot) && !(mount_flags & SB_RDONLY)) {
inode_lock(d_inode(s->s_root));
err = create_privroot(REISERFS_SB(s)->priv_root);
inode_unlock(d_inode(s->s_root));
clear_bit(REISERFS_POSIXACL, &REISERFS_SB(s)->s_mount_opt);
}
- /* The super_block MS_POSIXACL must mirror the (no)acl mount option. */
+ /* The super_block SB_POSIXACL must mirror the (no)acl mount option. */
if (reiserfs_posixacl(s))
- s->s_flags |= MS_POSIXACL;
+ s->s_flags |= SB_POSIXACL;
else
- s->s_flags &= ~MS_POSIXACL;
+ s->s_flags &= ~SB_POSIXACL;
return err;
}
static int romfs_remount(struct super_block *sb, int *flags, char *data)
{
sync_filesystem(sb);
- *flags |= MS_RDONLY;
+ *flags |= SB_RDONLY;
return 0;
}
sb->s_maxbytes = 0xFFFFFFFF;
sb->s_magic = ROMFS_MAGIC;
- sb->s_flags |= MS_RDONLY | MS_NOATIME;
+ sb->s_flags |= SB_RDONLY | SB_NOATIME;
sb->s_op = &romfs_super_ops;
#ifdef CONFIG_ROMFS_ON_MTD
(u64) le64_to_cpu(sblk->id_table_start));
sb->s_maxbytes = MAX_LFS_FILESIZE;
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
sb->s_op = &squashfs_super_ops;
err = -ENOMEM;
static int squashfs_remount(struct super_block *sb, int *flags, char *data)
{
sync_filesystem(sb);
- *flags |= MS_RDONLY;
+ *flags |= SB_RDONLY;
return 0;
}
static int flags_by_sb(int s_flags)
{
int flags = 0;
- if (s_flags & MS_SYNCHRONOUS)
+ if (s_flags & SB_SYNCHRONOUS)
flags |= ST_SYNCHRONOUS;
- if (s_flags & MS_MANDLOCK)
+ if (s_flags & SB_MANDLOCK)
flags |= ST_MANDLOCK;
- if (s_flags & MS_RDONLY)
+ if (s_flags & SB_RDONLY)
flags |= ST_RDONLY;
return flags;
}
void *ns;
bool new_sb;
- if (!(flags & MS_KERNMOUNT)) {
+ if (!(flags & SB_KERNMOUNT)) {
if (!kobj_ns_current_may_mount(KOBJ_NS_TYPE_NET))
return ERR_PTR(-EPERM);
}
sync_filesystem(sb);
if (sbi->s_forced_ro)
- *flags |= MS_RDONLY;
+ *flags |= SB_RDONLY;
return 0;
}
/* set up enough so that it can read an inode */
sb->s_op = &sysv_sops;
if (sbi->s_forced_ro)
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
if (sbi->s_truncate)
sb->s_d_op = &sysv_dentry_operations;
root_inode = sysv_iget(sb, SYSV_ROOT_INO);
if (flags & S_MTIME)
inode->i_mtime = *time;
- if (!(inode->i_sb->s_flags & MS_LAZYTIME))
+ if (!(inode->i_sb->s_flags & SB_LAZYTIME))
iflags |= I_DIRTY_SYNC;
release = ui->dirty;
if (!c->ro_error) {
c->ro_error = 1;
c->no_chk_data_crc = 0;
- c->vfs_sb->s_flags |= MS_RDONLY;
+ c->vfs_sb->s_flags |= SB_RDONLY;
ubifs_warn(c, "switched to read-only mode, error %d", err);
dump_stack();
}
pr_notice("UBIFS: parse %s\n", option);
if (!strcmp(option, "sync"))
- return MS_SYNCHRONOUS;
+ return SB_SYNCHRONOUS;
return 0;
}
size_t sz;
c->ro_mount = !!sb_rdonly(c->vfs_sb);
- /* Suppress error messages while probing if MS_SILENT is set */
- c->probing = !!(c->vfs_sb->s_flags & MS_SILENT);
+ /* Suppress error messages while probing if SB_SILENT is set */
+ c->probing = !!(c->vfs_sb->s_flags & SB_SILENT);
err = init_constants_early(c);
if (err)
return err;
}
- if (c->ro_mount && !(*flags & MS_RDONLY)) {
+ if (c->ro_mount && !(*flags & SB_RDONLY)) {
if (c->ro_error) {
ubifs_msg(c, "cannot re-mount R/W due to prior errors");
return -EROFS;
err = ubifs_remount_rw(c);
if (err)
return err;
- } else if (!c->ro_mount && (*flags & MS_RDONLY)) {
+ } else if (!c->ro_mount && (*flags & SB_RDONLY)) {
if (c->ro_error) {
ubifs_msg(c, "cannot re-mount R/O due to prior errors");
return -EROFS;
*/
ubi = open_ubi(name, UBI_READONLY);
if (IS_ERR(ubi)) {
- if (!(flags & MS_SILENT))
+ if (!(flags & SB_SILENT))
pr_err("UBIFS error (pid: %d): cannot open \"%s\", error %d",
current->pid, name, (int)PTR_ERR(ubi));
return ERR_CAST(ubi);
kfree(c);
/* A new mount point for already mounted UBIFS */
dbg_gen("this ubi volume is already mounted");
- if (!!(flags & MS_RDONLY) != c1->ro_mount) {
+ if (!!(flags & SB_RDONLY) != c1->ro_mount) {
err = -EBUSY;
goto out_deact;
}
} else {
- err = ubifs_fill_super(sb, data, flags & MS_SILENT ? 1 : 0);
+ err = ubifs_fill_super(sb, data, flags & SB_SILENT ? 1 : 0);
if (err)
goto out_deact;
/* We do not support atime */
- sb->s_flags |= MS_ACTIVE;
+ sb->s_flags |= SB_ACTIVE;
#ifndef CONFIG_UBIFS_ATIME_SUPPORT
- sb->s_flags |= MS_NOATIME;
+ sb->s_flags |= SB_NOATIME;
#else
ubifs_msg(c, "full atime support is enabled.");
#endif
* @need_recovery: %1 if the file-system needs recovery
* @replaying: %1 during journal replay
* @mounting: %1 while mounting
- * @probing: %1 while attempting to mount if MS_SILENT mount flag is set
+ * @probing: %1 while attempting to mount if SB_SILENT mount flag is set
* @remounting_rw: %1 while re-mounting from R/O mode to R/W mode
* @replay_list: temporary list used during journal replay
* @replay_buds: list of buds to replay
void ubifs_warn(const struct ubifs_info *c, const char *fmt, ...);
/*
* A conditional variant of 'ubifs_err()' which doesn't output anything
- * if probing (ie. MS_SILENT set).
+ * if probing (ie. SB_SILENT set).
*/
#define ubifs_errc(c, fmt, ...) \
do { \
sync_filesystem(sb);
if (lvidiu) {
int write_rev = le16_to_cpu(lvidiu->minUDFWriteRev);
- if (write_rev > UDF_MAX_WRITE_VERSION && !(*flags & MS_RDONLY))
+ if (write_rev > UDF_MAX_WRITE_VERSION && !(*flags & SB_RDONLY))
return -EACCES;
}
sbi->s_dmode = uopt.dmode;
write_unlock(&sbi->s_cred_lock);
- if ((bool)(*flags & MS_RDONLY) == sb_rdonly(sb))
+ if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
goto out_unlock;
- if (*flags & MS_RDONLY)
+ if (*flags & SB_RDONLY)
udf_close_lvid(sb);
else
udf_open_lvid(sb);
ubh_mark_buffer_dirty (USPI_UBH(uspi));
ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
- if (sb->s_flags & MS_SYNCHRONOUS)
+ if (sb->s_flags & SB_SYNCHRONOUS)
ubh_sync_block(UCPI_UBH(ucpi));
ufs_mark_sb_dirty(sb);
ubh_mark_buffer_dirty (USPI_UBH(uspi));
ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
- if (sb->s_flags & MS_SYNCHRONOUS)
+ if (sb->s_flags & SB_SYNCHRONOUS)
ubh_sync_block(UCPI_UBH(ucpi));
if (overflow) {
ubh_mark_buffer_dirty (USPI_UBH(uspi));
ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
- if (sb->s_flags & MS_SYNCHRONOUS)
+ if (sb->s_flags & SB_SYNCHRONOUS)
ubh_sync_block(UCPI_UBH(ucpi));
ufs_mark_sb_dirty(sb);
succed:
ubh_mark_buffer_dirty (USPI_UBH(uspi));
ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
- if (sb->s_flags & MS_SYNCHRONOUS)
+ if (sb->s_flags & SB_SYNCHRONOUS)
ubh_sync_block(UCPI_UBH(ucpi));
ufs_mark_sb_dirty(sb);
ubh_mark_buffer_dirty (USPI_UBH(uspi));
ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
- if (sb->s_flags & MS_SYNCHRONOUS)
+ if (sb->s_flags & SB_SYNCHRONOUS)
ubh_sync_block(UCPI_UBH(ucpi));
ufs_mark_sb_dirty(sb);
set_buffer_uptodate(bh);
mark_buffer_dirty(bh);
unlock_buffer(bh);
- if (sb->s_flags & MS_SYNCHRONOUS)
+ if (sb->s_flags & SB_SYNCHRONOUS)
sync_dirty_buffer(bh);
brelse(bh);
}
fs32_add(sb, &ucg->cg_u.cg_u2.cg_initediblk, uspi->s_inopb);
ubh_mark_buffer_dirty(UCPI_UBH(ucpi));
- if (sb->s_flags & MS_SYNCHRONOUS)
+ if (sb->s_flags & SB_SYNCHRONOUS)
ubh_sync_block(UCPI_UBH(ucpi));
UFSD("EXIT\n");
}
ubh_mark_buffer_dirty (USPI_UBH(uspi));
ubh_mark_buffer_dirty (UCPI_UBH(ucpi));
- if (sb->s_flags & MS_SYNCHRONOUS)
+ if (sb->s_flags & SB_SYNCHRONOUS)
ubh_sync_block(UCPI_UBH(ucpi));
ufs_mark_sb_dirty(sb);
ufs2_inode->ui_birthnsec = cpu_to_fs32(sb, ts.tv_nsec);
mark_buffer_dirty(bh);
unlock_buffer(bh);
- if (sb->s_flags & MS_SYNCHRONOUS)
+ if (sb->s_flags & SB_SYNCHRONOUS)
sync_dirty_buffer(bh);
brelse(bh);
}
usb1->fs_clean = UFS_FSBAD;
ubh_mark_buffer_dirty(USPI_UBH(uspi));
ufs_mark_sb_dirty(sb);
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
}
va_start(args, fmt);
vaf.fmt = fmt;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
pr_crit("panic (device %s): %s: %pV\n",
sb->s_id, function, &vaf);
va_end(args);
if (!sb_rdonly(sb)) {
if (!silent)
pr_info("ufstype=old is supported read-only\n");
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
}
break;
if (!sb_rdonly(sb)) {
if (!silent)
pr_info("ufstype=nextstep is supported read-only\n");
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
}
break;
if (!sb_rdonly(sb)) {
if (!silent)
pr_info("ufstype=nextstep-cd is supported read-only\n");
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
}
break;
if (!sb_rdonly(sb)) {
if (!silent)
pr_info("ufstype=openstep is supported read-only\n");
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
}
break;
if (!sb_rdonly(sb)) {
if (!silent)
pr_info("ufstype=hp is supported read-only\n");
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
}
break;
default:
break;
case UFS_FSACTIVE:
pr_err("%s(): fs is active\n", __func__);
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
break;
case UFS_FSBAD:
pr_err("%s(): fs is bad\n", __func__);
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
break;
default:
pr_err("%s(): can't grok fs_clean 0x%x\n",
__func__, usb1->fs_clean);
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
break;
}
} else {
pr_err("%s(): fs needs fsck\n", __func__);
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
}
/*
return -EINVAL;
}
- if ((bool)(*mount_flags & MS_RDONLY) == sb_rdonly(sb)) {
+ if ((bool)(*mount_flags & SB_RDONLY) == sb_rdonly(sb)) {
UFS_SB(sb)->s_mount_opt = new_mount_opt;
mutex_unlock(&UFS_SB(sb)->s_lock);
return 0;
/*
* fs was mouted as rw, remounting ro
*/
- if (*mount_flags & MS_RDONLY) {
+ if (*mount_flags & SB_RDONLY) {
ufs_put_super_internal(sb);
usb1->fs_time = cpu_to_fs32(sb, get_seconds());
if ((flags & UFS_ST_MASK) == UFS_ST_SUN
ufs_set_fs_state(sb, usb1, usb3,
UFS_FSOK - fs32_to_cpu(sb, usb1->fs_time));
ubh_mark_buffer_dirty (USPI_UBH(uspi));
- sb->s_flags |= MS_RDONLY;
+ sb->s_flags |= SB_RDONLY;
} else {
/*
* fs was mounted as ro, remounting rw
mutex_unlock(&UFS_SB(sb)->s_lock);
return -EPERM;
}
- sb->s_flags &= ~MS_RDONLY;
+ sb->s_flags &= ~SB_RDONLY;
#endif
}
UFS_SB(sb)->s_mount_opt = new_mount_opt;
* something to an unlinked inode, the irele won't cause
* premature truncation and freeing of the inode, which results
* in log recovery failure. We have to evict the unreferenced
- * lru inodes after clearing MS_ACTIVE because we don't
+ * lru inodes after clearing SB_ACTIVE because we don't
* otherwise clean up the lru if there's a subsequent failure in
* xfs_mountfs, which leads to us leaking the inodes if nothing
* else (e.g. quotacheck) references the inodes before the
* mount failure occurs.
*/
- mp->m_super->s_flags |= MS_ACTIVE;
+ mp->m_super->s_flags |= SB_ACTIVE;
error = xlog_recover_finish(mp->m_log);
if (!error)
xfs_log_work_queue(mp);
- mp->m_super->s_flags &= ~MS_ACTIVE;
+ mp->m_super->s_flags &= ~SB_ACTIVE;
evict_inodes(mp->m_super);
/*
*/
if (sb_rdonly(sb))
mp->m_flags |= XFS_MOUNT_RDONLY;
- if (sb->s_flags & MS_DIRSYNC)
+ if (sb->s_flags & SB_DIRSYNC)
mp->m_flags |= XFS_MOUNT_DIRSYNC;
- if (sb->s_flags & MS_SYNCHRONOUS)
+ if (sb->s_flags & SB_SYNCHRONOUS)
mp->m_flags |= XFS_MOUNT_WSYNC;
/*
}
/* ro -> rw */
- if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & MS_RDONLY)) {
+ if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & SB_RDONLY)) {
if (mp->m_flags & XFS_MOUNT_NORECOVERY) {
xfs_warn(mp,
"ro->rw transition prohibited on norecovery mount");
}
/* rw -> ro */
- if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & MS_RDONLY)) {
+ if (!(mp->m_flags & XFS_MOUNT_RDONLY) && (*flags & SB_RDONLY)) {
/* Free the per-AG metadata reservation pool. */
error = xfs_fs_unreserve_ag_blocks(mp);
if (error) {
#ifdef CONFIG_XFS_POSIX_ACL
# define XFS_ACL_STRING "ACLs, "
-# define set_posix_acl_flag(sb) ((sb)->s_flags |= MS_POSIXACL)
+# define set_posix_acl_flag(sb) ((sb)->s_flags |= SB_POSIXACL)
#else
# define XFS_ACL_STRING
# define set_posix_acl_flag(sb) do { } while (0)
ACPI_BUS_TYPE_THERMAL,
ACPI_BUS_TYPE_POWER_BUTTON,
ACPI_BUS_TYPE_SLEEP_BUTTON,
+ ACPI_BUS_TYPE_ECDT_EC,
ACPI_BUS_DEVICE_TYPE_COUNT
};
#define ACPI_VIDEO_HID "LNXVIDEO"
#define ACPI_BAY_HID "LNXIOBAY"
#define ACPI_DOCK_HID "LNXDOCK"
+#define ACPI_ECDT_HID "LNXEC"
/* Quirk for broken IBM BIOSes */
#define ACPI_SMBUS_IBM_HID "SMBUSIBM"
{
return 0;
}
-#ifndef __HAVE_ARCH_PMD_WRITE
+#ifndef pmd_write
static inline int pmd_write(pmd_t pmd)
{
BUG();
return 0;
}
-#endif /* __HAVE_ARCH_PMD_WRITE */
+#endif /* pmd_write */
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+#ifndef pud_write
+static inline int pud_write(pud_t pud)
+{
+ BUG();
+ return 0;
+}
+#endif /* pud_write */
+
#if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \
(defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
!defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD))
unsigned int ivsize);
ssize_t af_alg_sendpage(struct socket *sock, struct page *page,
int offset, size_t size, int flags);
+void af_alg_free_resources(struct af_alg_async_req *areq);
void af_alg_async_cb(struct crypto_async_request *_req, int err);
unsigned int af_alg_poll(struct file *file, struct socket *sock,
poll_table *wait);
* @hdmi: advance features of a HDMI sink.
*/
struct drm_hdmi_info hdmi;
+
+ /**
+ * @non_desktop: Non desktop display (HMD).
+ */
+ bool non_desktop;
};
int drm_display_info_set_bus_formats(struct drm_display_info *info,
drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
const struct drm_display_mode *mode,
enum hdmi_quantization_range rgb_quant_range,
- bool rgb_quant_range_selectable);
+ bool rgb_quant_range_selectable,
+ bool is_hdmi2_sink);
/**
* drm_eld_mnl - Get ELD monitor name length in bytes.
*/
struct drm_property *suggested_y_property;
+ /**
+ * @non_desktop_property: Optional connector property with a hint
+ * that device isn't a standard display, and the console/desktop,
+ * should not be displayed on it.
+ */
+ struct drm_property *non_desktop_property;
+
/* dumb ioctl parameters */
uint32_t preferred_depth, prefer_shadow;
*/
void ttm_pool_unpopulate(struct ttm_tt *ttm);
+/**
+ * Populates and DMA maps pages to fullfil a ttm_dma_populate() request
+ */
+int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt);
+
+/**
+ * Unpopulates and DMA unmaps pages as part of a
+ * ttm_dma_unpopulate() request */
+void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt);
+
/**
* Output the state of pools to debugfs file
*/
int ttm_page_alloc_debugfs(struct seq_file *m, void *data);
-
#if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU)
/**
* Initialize pool allocator.
int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
-
-/**
- * Populates and DMA maps pages to fullfil a ttm_dma_populate() request
- */
-int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt);
-
-/**
- * Unpopulates and DMA unmaps pages as part of a
- * ttm_dma_unpopulate() request */
-void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt);
-
#else
static inline int ttm_dma_page_alloc_init(struct ttm_mem_global *glob,
unsigned max_pages)
struct device *dev)
{
}
-
-static inline int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt)
-{
- return -ENOMEM;
-}
-
-static inline void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt)
-{
-}
-
#endif
#endif
+++ /dev/null
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __MSM_BUS_IDS_H
-#define __MSM_BUS_IDS_H
-
-/* Aggregation types */
-#define AGG_SCHEME_NONE 0
-#define AGG_SCHEME_LEG 1
-#define AGG_SCHEME_1 2
-
-/* Topology related enums */
-#define MSM_BUS_FAB_DEFAULT 0
-#define MSM_BUS_FAB_APPSS 0
-#define MSM_BUS_FAB_SYSTEM 1024
-#define MSM_BUS_FAB_MMSS 2048
-#define MSM_BUS_FAB_SYSTEM_FPB 3072
-#define MSM_BUS_FAB_CPSS_FPB 4096
-
-#define MSM_BUS_FAB_BIMC 0
-#define MSM_BUS_FAB_SYS_NOC 1024
-#define MSM_BUS_FAB_MMSS_NOC 2048
-#define MSM_BUS_FAB_OCMEM_NOC 3072
-#define MSM_BUS_FAB_PERIPH_NOC 4096
-#define MSM_BUS_FAB_CONFIG_NOC 5120
-#define MSM_BUS_FAB_OCMEM_VNOC 6144
-#define MSM_BUS_FAB_MMSS_AHB 2049
-#define MSM_BUS_FAB_A0_NOC 6145
-#define MSM_BUS_FAB_A1_NOC 6146
-#define MSM_BUS_FAB_A2_NOC 6147
-#define MSM_BUS_FAB_GNOC 6148
-#define MSM_BUS_FAB_CR_VIRT 6149
-
-#define MSM_BUS_MASTER_FIRST 1
-#define MSM_BUS_MASTER_AMPSS_M0 1
-#define MSM_BUS_MASTER_AMPSS_M1 2
-#define MSM_BUS_APPSS_MASTER_FAB_MMSS 3
-#define MSM_BUS_APPSS_MASTER_FAB_SYSTEM 4
-#define MSM_BUS_SYSTEM_MASTER_FAB_APPSS 5
-#define MSM_BUS_MASTER_SPS 6
-#define MSM_BUS_MASTER_ADM_PORT0 7
-#define MSM_BUS_MASTER_ADM_PORT1 8
-#define MSM_BUS_SYSTEM_MASTER_ADM1_PORT0 9
-#define MSM_BUS_MASTER_ADM1_PORT1 10
-#define MSM_BUS_MASTER_LPASS_PROC 11
-#define MSM_BUS_MASTER_MSS_PROCI 12
-#define MSM_BUS_MASTER_MSS_PROCD 13
-#define MSM_BUS_MASTER_MSS_MDM_PORT0 14
-#define MSM_BUS_MASTER_LPASS 15
-#define MSM_BUS_SYSTEM_MASTER_CPSS_FPB 16
-#define MSM_BUS_SYSTEM_MASTER_SYSTEM_FPB 17
-#define MSM_BUS_SYSTEM_MASTER_MMSS_FPB 18
-#define MSM_BUS_MASTER_ADM1_CI 19
-#define MSM_BUS_MASTER_ADM0_CI 20
-#define MSM_BUS_MASTER_MSS_MDM_PORT1 21
-#define MSM_BUS_MASTER_MDP_PORT0 22
-#define MSM_BUS_MASTER_MDP_PORT1 23
-#define MSM_BUS_MMSS_MASTER_ADM1_PORT0 24
-#define MSM_BUS_MASTER_ROTATOR 25
-#define MSM_BUS_MASTER_GRAPHICS_3D 26
-#define MSM_BUS_MASTER_JPEG_DEC 27
-#define MSM_BUS_MASTER_GRAPHICS_2D_CORE0 28
-#define MSM_BUS_MASTER_VFE 29
-#define MSM_BUS_MASTER_VFE0 MSM_BUS_MASTER_VFE
-#define MSM_BUS_MASTER_VPE 30
-#define MSM_BUS_MASTER_JPEG_ENC 31
-#define MSM_BUS_MASTER_GRAPHICS_2D_CORE1 32
-#define MSM_BUS_MMSS_MASTER_APPS_FAB 33
-#define MSM_BUS_MASTER_HD_CODEC_PORT0 34
-#define MSM_BUS_MASTER_HD_CODEC_PORT1 35
-#define MSM_BUS_MASTER_SPDM 36
-#define MSM_BUS_MASTER_RPM 37
-#define MSM_BUS_MASTER_MSS 38
-#define MSM_BUS_MASTER_RIVA 39
-#define MSM_BUS_MASTER_SNOC_VMEM 40
-#define MSM_BUS_MASTER_MSS_SW_PROC 41
-#define MSM_BUS_MASTER_MSS_FW_PROC 42
-#define MSM_BUS_MASTER_HMSS 43
-#define MSM_BUS_MASTER_GSS_NAV 44
-#define MSM_BUS_MASTER_PCIE 45
-#define MSM_BUS_MASTER_SATA 46
-#define MSM_BUS_MASTER_CRYPTO 47
-#define MSM_BUS_MASTER_VIDEO_CAP 48
-#define MSM_BUS_MASTER_GRAPHICS_3D_PORT1 49
-#define MSM_BUS_MASTER_VIDEO_ENC 50
-#define MSM_BUS_MASTER_VIDEO_DEC 51
-#define MSM_BUS_MASTER_LPASS_AHB 52
-#define MSM_BUS_MASTER_QDSS_BAM 53
-#define MSM_BUS_MASTER_SNOC_CFG 54
-#define MSM_BUS_MASTER_CRYPTO_CORE0 55
-#define MSM_BUS_MASTER_CRYPTO_CORE1 56
-#define MSM_BUS_MASTER_MSS_NAV 57
-#define MSM_BUS_MASTER_OCMEM_DMA 58
-#define MSM_BUS_MASTER_WCSS 59
-#define MSM_BUS_MASTER_QDSS_ETR 60
-#define MSM_BUS_MASTER_USB3 61
-#define MSM_BUS_MASTER_JPEG 62
-#define MSM_BUS_MASTER_VIDEO_P0 63
-#define MSM_BUS_MASTER_VIDEO_P1 64
-#define MSM_BUS_MASTER_MSS_PROC 65
-#define MSM_BUS_MASTER_JPEG_OCMEM 66
-#define MSM_BUS_MASTER_MDP_OCMEM 67
-#define MSM_BUS_MASTER_VIDEO_P0_OCMEM 68
-#define MSM_BUS_MASTER_VIDEO_P1_OCMEM 69
-#define MSM_BUS_MASTER_VFE_OCMEM 70
-#define MSM_BUS_MASTER_CNOC_ONOC_CFG 71
-#define MSM_BUS_MASTER_RPM_INST 72
-#define MSM_BUS_MASTER_RPM_DATA 73
-#define MSM_BUS_MASTER_RPM_SYS 74
-#define MSM_BUS_MASTER_DEHR 75
-#define MSM_BUS_MASTER_QDSS_DAP 76
-#define MSM_BUS_MASTER_TIC 77
-#define MSM_BUS_MASTER_SDCC_1 78
-#define MSM_BUS_MASTER_SDCC_3 79
-#define MSM_BUS_MASTER_SDCC_4 80
-#define MSM_BUS_MASTER_SDCC_2 81
-#define MSM_BUS_MASTER_TSIF 82
-#define MSM_BUS_MASTER_BAM_DMA 83
-#define MSM_BUS_MASTER_BLSP_2 84
-#define MSM_BUS_MASTER_USB_HSIC 85
-#define MSM_BUS_MASTER_BLSP_1 86
-#define MSM_BUS_MASTER_USB_HS 87
-#define MSM_BUS_MASTER_PNOC_CFG 88
-#define MSM_BUS_MASTER_V_OCMEM_GFX3D 89
-#define MSM_BUS_MASTER_IPA 90
-#define MSM_BUS_MASTER_QPIC 91
-#define MSM_BUS_MASTER_MDPE 92
-#define MSM_BUS_MASTER_USB_HS2 93
-#define MSM_BUS_MASTER_VPU 94
-#define MSM_BUS_MASTER_UFS 95
-#define MSM_BUS_MASTER_BCAST 96
-#define MSM_BUS_MASTER_CRYPTO_CORE2 97
-#define MSM_BUS_MASTER_EMAC 98
-#define MSM_BUS_MASTER_VPU_1 99
-#define MSM_BUS_MASTER_PCIE_1 100
-#define MSM_BUS_MASTER_USB3_1 101
-#define MSM_BUS_MASTER_CNOC_MNOC_MMSS_CFG 102
-#define MSM_BUS_MASTER_CNOC_MNOC_CFG 103
-#define MSM_BUS_MASTER_TCU_0 104
-#define MSM_BUS_MASTER_TCU_1 105
-#define MSM_BUS_MASTER_CPP 106
-#define MSM_BUS_MASTER_AUDIO 107
-#define MSM_BUS_MASTER_PCIE_2 108
-#define MSM_BUS_MASTER_VFE1 109
-#define MSM_BUS_MASTER_XM_USB_HS1 110
-#define MSM_BUS_MASTER_PCNOC_BIMC_1 111
-#define MSM_BUS_MASTER_BIMC_PCNOC 112
-#define MSM_BUS_MASTER_XI_USB_HSIC 113
-#define MSM_BUS_MASTER_SGMII 114
-#define MSM_BUS_SPMI_FETCHER 115
-#define MSM_BUS_MASTER_GNOC_BIMC 116
-#define MSM_BUS_MASTER_CRVIRT_A2NOC 117
-#define MSM_BUS_MASTER_CNOC_A2NOC 118
-#define MSM_BUS_MASTER_WLAN 119
-#define MSM_BUS_MASTER_MSS_CE 120
-#define MSM_BUS_MASTER_CDSP_PROC 121
-#define MSM_BUS_MASTER_GNOC_SNOC 122
-#define MSM_BUS_MASTER_PIMEM 123
-#define MSM_BUS_MASTER_MASTER_LAST 124
-
-#define MSM_BUS_SYSTEM_FPB_MASTER_SYSTEM MSM_BUS_SYSTEM_MASTER_SYSTEM_FPB
-#define MSM_BUS_CPSS_FPB_MASTER_SYSTEM MSM_BUS_SYSTEM_MASTER_CPSS_FPB
-
-#define MSM_BUS_SNOC_MM_INT_0 10000
-#define MSM_BUS_SNOC_MM_INT_1 10001
-#define MSM_BUS_SNOC_MM_INT_2 10002
-#define MSM_BUS_SNOC_MM_INT_BIMC 10003
-#define MSM_BUS_SNOC_INT_0 10004
-#define MSM_BUS_SNOC_INT_1 10005
-#define MSM_BUS_SNOC_INT_BIMC 10006
-#define MSM_BUS_SNOC_BIMC_0_MAS 10007
-#define MSM_BUS_SNOC_BIMC_1_MAS 10008
-#define MSM_BUS_SNOC_QDSS_INT 10009
-#define MSM_BUS_PNOC_SNOC_MAS 10010
-#define MSM_BUS_PNOC_SNOC_SLV 10011
-#define MSM_BUS_PNOC_INT_0 10012
-#define MSM_BUS_PNOC_INT_1 10013
-#define MSM_BUS_PNOC_M_0 10014
-#define MSM_BUS_PNOC_M_1 10015
-#define MSM_BUS_BIMC_SNOC_MAS 10016
-#define MSM_BUS_BIMC_SNOC_SLV 10017
-#define MSM_BUS_PNOC_SLV_0 10018
-#define MSM_BUS_PNOC_SLV_1 10019
-#define MSM_BUS_PNOC_SLV_2 10020
-#define MSM_BUS_PNOC_SLV_3 10021
-#define MSM_BUS_PNOC_SLV_4 10022
-#define MSM_BUS_PNOC_SLV_8 10023
-#define MSM_BUS_PNOC_SLV_9 10024
-#define MSM_BUS_SNOC_BIMC_0_SLV 10025
-#define MSM_BUS_SNOC_BIMC_1_SLV 10026
-#define MSM_BUS_MNOC_BIMC_MAS 10027
-#define MSM_BUS_MNOC_BIMC_SLV 10028
-#define MSM_BUS_BIMC_MNOC_MAS 10029
-#define MSM_BUS_BIMC_MNOC_SLV 10030
-#define MSM_BUS_SNOC_BIMC_MAS 10031
-#define MSM_BUS_SNOC_BIMC_SLV 10032
-#define MSM_BUS_CNOC_SNOC_MAS 10033
-#define MSM_BUS_CNOC_SNOC_SLV 10034
-#define MSM_BUS_SNOC_CNOC_MAS 10035
-#define MSM_BUS_SNOC_CNOC_SLV 10036
-#define MSM_BUS_OVNOC_SNOC_MAS 10037
-#define MSM_BUS_OVNOC_SNOC_SLV 10038
-#define MSM_BUS_SNOC_OVNOC_MAS 10039
-#define MSM_BUS_SNOC_OVNOC_SLV 10040
-#define MSM_BUS_SNOC_PNOC_MAS 10041
-#define MSM_BUS_SNOC_PNOC_SLV 10042
-#define MSM_BUS_BIMC_INT_APPS_EBI 10043
-#define MSM_BUS_BIMC_INT_APPS_SNOC 10044
-#define MSM_BUS_SNOC_BIMC_2_MAS 10045
-#define MSM_BUS_SNOC_BIMC_2_SLV 10046
-#define MSM_BUS_PNOC_SLV_5 10047
-#define MSM_BUS_PNOC_SLV_7 10048
-#define MSM_BUS_PNOC_INT_2 10049
-#define MSM_BUS_PNOC_INT_3 10050
-#define MSM_BUS_PNOC_INT_4 10051
-#define MSM_BUS_PNOC_INT_5 10052
-#define MSM_BUS_PNOC_INT_6 10053
-#define MSM_BUS_PNOC_INT_7 10054
-#define MSM_BUS_BIMC_SNOC_1_MAS 10055
-#define MSM_BUS_BIMC_SNOC_1_SLV 10056
-#define MSM_BUS_PNOC_A1NOC_MAS 10057
-#define MSM_BUS_PNOC_A1NOC_SLV 10058
-#define MSM_BUS_CNOC_A1NOC_MAS 10059
-#define MSM_BUS_A0NOC_SNOC_MAS 10060
-#define MSM_BUS_A0NOC_SNOC_SLV 10061
-#define MSM_BUS_A1NOC_SNOC_SLV 10062
-#define MSM_BUS_A1NOC_SNOC_MAS 10063
-#define MSM_BUS_A2NOC_SNOC_MAS 10064
-#define MSM_BUS_A2NOC_SNOC_SLV 10065
-#define MSM_BUS_SNOC_INT_2 10066
-#define MSM_BUS_A0NOC_QDSS_INT 10067
-#define MSM_BUS_INT_LAST 10068
-
-#define MSM_BUS_INT_TEST_ID 20000
-#define MSM_BUS_INT_TEST_LAST 20050
-
-#define MSM_BUS_SLAVE_FIRST 512
-#define MSM_BUS_SLAVE_EBI_CH0 512
-#define MSM_BUS_SLAVE_EBI_CH1 513
-#define MSM_BUS_SLAVE_AMPSS_L2 514
-#define MSM_BUS_APPSS_SLAVE_FAB_MMSS 515
-#define MSM_BUS_APPSS_SLAVE_FAB_SYSTEM 516
-#define MSM_BUS_SYSTEM_SLAVE_FAB_APPS 517
-#define MSM_BUS_SLAVE_SPS 518
-#define MSM_BUS_SLAVE_SYSTEM_IMEM 519
-#define MSM_BUS_SLAVE_AMPSS 520
-#define MSM_BUS_SLAVE_MSS 521
-#define MSM_BUS_SLAVE_LPASS 522
-#define MSM_BUS_SYSTEM_SLAVE_CPSS_FPB 523
-#define MSM_BUS_SYSTEM_SLAVE_SYSTEM_FPB 524
-#define MSM_BUS_SYSTEM_SLAVE_MMSS_FPB 525
-#define MSM_BUS_SLAVE_CORESIGHT 526
-#define MSM_BUS_SLAVE_RIVA 527
-#define MSM_BUS_SLAVE_SMI 528
-#define MSM_BUS_MMSS_SLAVE_FAB_APPS 529
-#define MSM_BUS_MMSS_SLAVE_FAB_APPS_1 530
-#define MSM_BUS_SLAVE_MM_IMEM 531
-#define MSM_BUS_SLAVE_CRYPTO 532
-#define MSM_BUS_SLAVE_SPDM 533
-#define MSM_BUS_SLAVE_RPM 534
-#define MSM_BUS_SLAVE_RPM_MSG_RAM 535
-#define MSM_BUS_SLAVE_MPM 536
-#define MSM_BUS_SLAVE_PMIC1_SSBI1_A 537
-#define MSM_BUS_SLAVE_PMIC1_SSBI1_B 538
-#define MSM_BUS_SLAVE_PMIC1_SSBI1_C 539
-#define MSM_BUS_SLAVE_PMIC2_SSBI2_A 540
-#define MSM_BUS_SLAVE_PMIC2_SSBI2_B 541
-#define MSM_BUS_SLAVE_GSBI1_UART 542
-#define MSM_BUS_SLAVE_GSBI2_UART 543
-#define MSM_BUS_SLAVE_GSBI3_UART 544
-#define MSM_BUS_SLAVE_GSBI4_UART 545
-#define MSM_BUS_SLAVE_GSBI5_UART 546
-#define MSM_BUS_SLAVE_GSBI6_UART 547
-#define MSM_BUS_SLAVE_GSBI7_UART 548
-#define MSM_BUS_SLAVE_GSBI8_UART 549
-#define MSM_BUS_SLAVE_GSBI9_UART 550
-#define MSM_BUS_SLAVE_GSBI10_UART 551
-#define MSM_BUS_SLAVE_GSBI11_UART 552
-#define MSM_BUS_SLAVE_GSBI12_UART 553
-#define MSM_BUS_SLAVE_GSBI1_QUP 554
-#define MSM_BUS_SLAVE_GSBI2_QUP 555
-#define MSM_BUS_SLAVE_GSBI3_QUP 556
-#define MSM_BUS_SLAVE_GSBI4_QUP 557
-#define MSM_BUS_SLAVE_GSBI5_QUP 558
-#define MSM_BUS_SLAVE_GSBI6_QUP 559
-#define MSM_BUS_SLAVE_GSBI7_QUP 560
-#define MSM_BUS_SLAVE_GSBI8_QUP 561
-#define MSM_BUS_SLAVE_GSBI9_QUP 562
-#define MSM_BUS_SLAVE_GSBI10_QUP 563
-#define MSM_BUS_SLAVE_GSBI11_QUP 564
-#define MSM_BUS_SLAVE_GSBI12_QUP 565
-#define MSM_BUS_SLAVE_EBI2_NAND 566
-#define MSM_BUS_SLAVE_EBI2_CS0 567
-#define MSM_BUS_SLAVE_EBI2_CS1 568
-#define MSM_BUS_SLAVE_EBI2_CS2 569
-#define MSM_BUS_SLAVE_EBI2_CS3 570
-#define MSM_BUS_SLAVE_EBI2_CS4 571
-#define MSM_BUS_SLAVE_EBI2_CS5 572
-#define MSM_BUS_SLAVE_USB_FS1 573
-#define MSM_BUS_SLAVE_USB_FS2 574
-#define MSM_BUS_SLAVE_TSIF 575
-#define MSM_BUS_SLAVE_MSM_TSSC 576
-#define MSM_BUS_SLAVE_MSM_PDM 577
-#define MSM_BUS_SLAVE_MSM_DIMEM 578
-#define MSM_BUS_SLAVE_MSM_TCSR 579
-#define MSM_BUS_SLAVE_MSM_PRNG 580
-#define MSM_BUS_SLAVE_GSS 581
-#define MSM_BUS_SLAVE_SATA 582
-#define MSM_BUS_SLAVE_USB3 583
-#define MSM_BUS_SLAVE_WCSS 584
-#define MSM_BUS_SLAVE_OCIMEM 585
-#define MSM_BUS_SLAVE_SNOC_OCMEM 586
-#define MSM_BUS_SLAVE_SERVICE_SNOC 587
-#define MSM_BUS_SLAVE_QDSS_STM 588
-#define MSM_BUS_SLAVE_CAMERA_CFG 589
-#define MSM_BUS_SLAVE_DISPLAY_CFG 590
-#define MSM_BUS_SLAVE_OCMEM_CFG 591
-#define MSM_BUS_SLAVE_CPR_CFG 592
-#define MSM_BUS_SLAVE_CPR_XPU_CFG 593
-#define MSM_BUS_SLAVE_MISC_CFG 594
-#define MSM_BUS_SLAVE_MISC_XPU_CFG 595
-#define MSM_BUS_SLAVE_VENUS_CFG 596
-#define MSM_BUS_SLAVE_MISC_VENUS_CFG 597
-#define MSM_BUS_SLAVE_GRAPHICS_3D_CFG 598
-#define MSM_BUS_SLAVE_MMSS_CLK_CFG 599
-#define MSM_BUS_SLAVE_MMSS_CLK_XPU_CFG 600
-#define MSM_BUS_SLAVE_MNOC_MPU_CFG 601
-#define MSM_BUS_SLAVE_ONOC_MPU_CFG 602
-#define MSM_BUS_SLAVE_SERVICE_MNOC 603
-#define MSM_BUS_SLAVE_OCMEM 604
-#define MSM_BUS_SLAVE_SERVICE_ONOC 605
-#define MSM_BUS_SLAVE_SDCC_1 606
-#define MSM_BUS_SLAVE_SDCC_3 607
-#define MSM_BUS_SLAVE_SDCC_2 608
-#define MSM_BUS_SLAVE_SDCC_4 609
-#define MSM_BUS_SLAVE_BAM_DMA 610
-#define MSM_BUS_SLAVE_BLSP_2 611
-#define MSM_BUS_SLAVE_USB_HSIC 612
-#define MSM_BUS_SLAVE_BLSP_1 613
-#define MSM_BUS_SLAVE_USB_HS 614
-#define MSM_BUS_SLAVE_PDM 615
-#define MSM_BUS_SLAVE_PERIPH_APU_CFG 616
-#define MSM_BUS_SLAVE_PNOC_MPU_CFG 617
-#define MSM_BUS_SLAVE_PRNG 618
-#define MSM_BUS_SLAVE_SERVICE_PNOC 619
-#define MSM_BUS_SLAVE_CLK_CTL 620
-#define MSM_BUS_SLAVE_CNOC_MSS 621
-#define MSM_BUS_SLAVE_SECURITY 622
-#define MSM_BUS_SLAVE_TCSR 623
-#define MSM_BUS_SLAVE_TLMM 624
-#define MSM_BUS_SLAVE_CRYPTO_0_CFG 625
-#define MSM_BUS_SLAVE_CRYPTO_1_CFG 626
-#define MSM_BUS_SLAVE_IMEM_CFG 627
-#define MSM_BUS_SLAVE_MESSAGE_RAM 628
-#define MSM_BUS_SLAVE_BIMC_CFG 629
-#define MSM_BUS_SLAVE_BOOT_ROM 630
-#define MSM_BUS_SLAVE_CNOC_MNOC_MMSS_CFG 631
-#define MSM_BUS_SLAVE_PMIC_ARB 632
-#define MSM_BUS_SLAVE_SPDM_WRAPPER 633
-#define MSM_BUS_SLAVE_DEHR_CFG 634
-#define MSM_BUS_SLAVE_QDSS_CFG 635
-#define MSM_BUS_SLAVE_RBCPR_CFG 636
-#define MSM_BUS_SLAVE_RBCPR_QDSS_APU_CFG 637
-#define MSM_BUS_SLAVE_SNOC_MPU_CFG 638
-#define MSM_BUS_SLAVE_CNOC_ONOC_CFG 639
-#define MSM_BUS_SLAVE_CNOC_MNOC_CFG 640
-#define MSM_BUS_SLAVE_PNOC_CFG 641
-#define MSM_BUS_SLAVE_SNOC_CFG 642
-#define MSM_BUS_SLAVE_EBI1_DLL_CFG 643
-#define MSM_BUS_SLAVE_PHY_APU_CFG 644
-#define MSM_BUS_SLAVE_EBI1_PHY_CFG 645
-#define MSM_BUS_SLAVE_SERVICE_CNOC 646
-#define MSM_BUS_SLAVE_IPS_CFG 647
-#define MSM_BUS_SLAVE_QPIC 648
-#define MSM_BUS_SLAVE_DSI_CFG 649
-#define MSM_BUS_SLAVE_UFS_CFG 650
-#define MSM_BUS_SLAVE_RBCPR_CX_CFG 651
-#define MSM_BUS_SLAVE_RBCPR_MX_CFG 652
-#define MSM_BUS_SLAVE_PCIE_CFG 653
-#define MSM_BUS_SLAVE_USB_PHYS_CFG 654
-#define MSM_BUS_SLAVE_VIDEO_CAP_CFG 655
-#define MSM_BUS_SLAVE_AVSYNC_CFG 656
-#define MSM_BUS_SLAVE_CRYPTO_2_CFG 657
-#define MSM_BUS_SLAVE_VPU_CFG 658
-#define MSM_BUS_SLAVE_BCAST_CFG 659
-#define MSM_BUS_SLAVE_KLM_CFG 660
-#define MSM_BUS_SLAVE_GENI_IR_CFG 661
-#define MSM_BUS_SLAVE_OCMEM_GFX 662
-#define MSM_BUS_SLAVE_CATS_128 663
-#define MSM_BUS_SLAVE_OCMEM_64 664
-#define MSM_BUS_SLAVE_PCIE_0 665
-#define MSM_BUS_SLAVE_PCIE_1 666
-#define MSM_BUS_SLAVE_PCIE_0_CFG 667
-#define MSM_BUS_SLAVE_PCIE_1_CFG 668
-#define MSM_BUS_SLAVE_SRVC_MNOC 669
-#define MSM_BUS_SLAVE_USB_HS2 670
-#define MSM_BUS_SLAVE_AUDIO 671
-#define MSM_BUS_SLAVE_TCU 672
-#define MSM_BUS_SLAVE_APPSS 673
-#define MSM_BUS_SLAVE_PCIE_PARF 674
-#define MSM_BUS_SLAVE_USB3_PHY_CFG 675
-#define MSM_BUS_SLAVE_IPA_CFG 676
-#define MSM_BUS_SLAVE_A0NOC_SNOC 677
-#define MSM_BUS_SLAVE_A1NOC_SNOC 678
-#define MSM_BUS_SLAVE_A2NOC_SNOC 679
-#define MSM_BUS_SLAVE_HMSS_L3 680
-#define MSM_BUS_SLAVE_PIMEM_CFG 681
-#define MSM_BUS_SLAVE_DCC_CFG 682
-#define MSM_BUS_SLAVE_QDSS_RBCPR_APU_CFG 683
-#define MSM_BUS_SLAVE_PCIE_2_CFG 684
-#define MSM_BUS_SLAVE_PCIE20_AHB2PHY 685
-#define MSM_BUS_SLAVE_A0NOC_CFG 686
-#define MSM_BUS_SLAVE_A1NOC_CFG 687
-#define MSM_BUS_SLAVE_A2NOC_CFG 688
-#define MSM_BUS_SLAVE_A1NOC_MPU_CFG 689
-#define MSM_BUS_SLAVE_A2NOC_MPU_CFG 690
-#define MSM_BUS_SLAVE_A0NOC_SMMU_CFG 691
-#define MSM_BUS_SLAVE_A1NOC_SMMU_CFG 692
-#define MSM_BUS_SLAVE_A2NOC_SMMU_CFG 693
-#define MSM_BUS_SLAVE_LPASS_SMMU_CFG 694
-#define MSM_BUS_SLAVE_MMAGIC_CFG 695
-#define MSM_BUS_SLAVE_VENUS_THROTTLE_CFG 696
-#define MSM_BUS_SLAVE_SSC_CFG 697
-#define MSM_BUS_SLAVE_DSA_CFG 698
-#define MSM_BUS_SLAVE_DSA_MPU_CFG 699
-#define MSM_BUS_SLAVE_DISPLAY_THROTTLE_CFG 700
-#define MSM_BUS_SLAVE_SMMU_CPP_CFG 701
-#define MSM_BUS_SLAVE_SMMU_JPEG_CFG 702
-#define MSM_BUS_SLAVE_SMMU_MDP_CFG 703
-#define MSM_BUS_SLAVE_SMMU_ROTATOR_CFG 704
-#define MSM_BUS_SLAVE_SMMU_VENUS_CFG 705
-#define MSM_BUS_SLAVE_SMMU_VFE_CFG 706
-#define MSM_BUS_SLAVE_A0NOC_MPU_CFG 707
-#define MSM_BUS_SLAVE_VMEM_CFG 708
-#define MSM_BUS_SLAVE_CAMERA_THROTTLE_CFG 709
-#define MSM_BUS_SLAVE_VMEM 710
-#define MSM_BUS_SLAVE_AHB2PHY 711
-#define MSM_BUS_SLAVE_PIMEM 712
-#define MSM_BUS_SLAVE_SNOC_VMEM 713
-#define MSM_BUS_SLAVE_PCIE_2 714
-#define MSM_BUS_SLAVE_RBCPR_MX 715
-#define MSM_BUS_SLAVE_RBCPR_CX 716
-#define MSM_BUS_SLAVE_BIMC_PCNOC 717
-#define MSM_BUS_SLAVE_PCNOC_BIMC_1 718
-#define MSM_BUS_SLAVE_SGMII 719
-#define MSM_BUS_SLAVE_SPMI_FETCHER 720
-#define MSM_BUS_PNOC_SLV_6 721
-#define MSM_BUS_SLAVE_MMSS_SMMU_CFG 722
-#define MSM_BUS_SLAVE_WLAN 723
-#define MSM_BUS_SLAVE_CRVIRT_A2NOC 724
-#define MSM_BUS_SLAVE_CNOC_A2NOC 725
-#define MSM_BUS_SLAVE_GLM 726
-#define MSM_BUS_SLAVE_GNOC_BIMC 727
-#define MSM_BUS_SLAVE_GNOC_SNOC 728
-#define MSM_BUS_SLAVE_QM_CFG 729
-#define MSM_BUS_SLAVE_TLMM_EAST 730
-#define MSM_BUS_SLAVE_TLMM_NORTH 731
-#define MSM_BUS_SLAVE_TLMM_WEST 732
-#define MSM_BUS_SLAVE_SKL 733
-#define MSM_BUS_SLAVE_LPASS_TCM 734
-#define MSM_BUS_SLAVE_TLMM_SOUTH 735
-#define MSM_BUS_SLAVE_TLMM_CENTER 736
-#define MSM_BUS_MSS_NAV_CE_MPU_CFG 737
-#define MSM_BUS_SLAVE_A2NOC_THROTTLE_CFG 738
-#define MSM_BUS_SLAVE_CDSP 739
-#define MSM_BUS_SLAVE_CDSP_SMMU_CFG 740
-#define MSM_BUS_SLAVE_LPASS_MPU_CFG 741
-#define MSM_BUS_SLAVE_CSI_PHY_CFG 742
-#define MSM_BUS_SLAVE_LAST 743
-
-#define MSM_BUS_SYSTEM_FPB_SLAVE_SYSTEM MSM_BUS_SYSTEM_SLAVE_SYSTEM_FPB
-#define MSM_BUS_CPSS_FPB_SLAVE_SYSTEM MSM_BUS_SYSTEM_SLAVE_CPSS_FPB
-
-/*
- * ID's used in RPM messages
- */
-#define ICBID_MASTER_APPSS_PROC 0
-#define ICBID_MASTER_MSS_PROC 1
-#define ICBID_MASTER_MNOC_BIMC 2
-#define ICBID_MASTER_SNOC_BIMC 3
-#define ICBID_MASTER_SNOC_BIMC_0 ICBID_MASTER_SNOC_BIMC
-#define ICBID_MASTER_CNOC_MNOC_MMSS_CFG 4
-#define ICBID_MASTER_CNOC_MNOC_CFG 5
-#define ICBID_MASTER_GFX3D 6
-#define ICBID_MASTER_JPEG 7
-#define ICBID_MASTER_MDP 8
-#define ICBID_MASTER_MDP0 ICBID_MASTER_MDP
-#define ICBID_MASTER_MDPS ICBID_MASTER_MDP
-#define ICBID_MASTER_VIDEO 9
-#define ICBID_MASTER_VIDEO_P0 ICBID_MASTER_VIDEO
-#define ICBID_MASTER_VIDEO_P1 10
-#define ICBID_MASTER_VFE 11
-#define ICBID_MASTER_VFE0 ICBID_MASTER_VFE
-#define ICBID_MASTER_CNOC_ONOC_CFG 12
-#define ICBID_MASTER_JPEG_OCMEM 13
-#define ICBID_MASTER_MDP_OCMEM 14
-#define ICBID_MASTER_VIDEO_P0_OCMEM 15
-#define ICBID_MASTER_VIDEO_P1_OCMEM 16
-#define ICBID_MASTER_VFE_OCMEM 17
-#define ICBID_MASTER_LPASS_AHB 18
-#define ICBID_MASTER_QDSS_BAM 19
-#define ICBID_MASTER_SNOC_CFG 20
-#define ICBID_MASTER_BIMC_SNOC 21
-#define ICBID_MASTER_BIMC_SNOC_0 ICBID_MASTER_BIMC_SNOC
-#define ICBID_MASTER_CNOC_SNOC 22
-#define ICBID_MASTER_CRYPTO 23
-#define ICBID_MASTER_CRYPTO_CORE0 ICBID_MASTER_CRYPTO
-#define ICBID_MASTER_CRYPTO_CORE1 24
-#define ICBID_MASTER_LPASS_PROC 25
-#define ICBID_MASTER_MSS 26
-#define ICBID_MASTER_MSS_NAV 27
-#define ICBID_MASTER_OCMEM_DMA 28
-#define ICBID_MASTER_PNOC_SNOC 29
-#define ICBID_MASTER_WCSS 30
-#define ICBID_MASTER_QDSS_ETR 31
-#define ICBID_MASTER_USB3 32
-#define ICBID_MASTER_USB3_0 ICBID_MASTER_USB3
-#define ICBID_MASTER_SDCC_1 33
-#define ICBID_MASTER_SDCC_3 34
-#define ICBID_MASTER_SDCC_2 35
-#define ICBID_MASTER_SDCC_4 36
-#define ICBID_MASTER_TSIF 37
-#define ICBID_MASTER_BAM_DMA 38
-#define ICBID_MASTER_BLSP_2 39
-#define ICBID_MASTER_USB_HSIC 40
-#define ICBID_MASTER_BLSP_1 41
-#define ICBID_MASTER_USB_HS 42
-#define ICBID_MASTER_USB_HS1 ICBID_MASTER_USB_HS
-#define ICBID_MASTER_PNOC_CFG 43
-#define ICBID_MASTER_SNOC_PNOC 44
-#define ICBID_MASTER_RPM_INST 45
-#define ICBID_MASTER_RPM_DATA 46
-#define ICBID_MASTER_RPM_SYS 47
-#define ICBID_MASTER_DEHR 48
-#define ICBID_MASTER_QDSS_DAP 49
-#define ICBID_MASTER_SPDM 50
-#define ICBID_MASTER_TIC 51
-#define ICBID_MASTER_SNOC_CNOC 52
-#define ICBID_MASTER_GFX3D_OCMEM 53
-#define ICBID_MASTER_GFX3D_GMEM ICBID_MASTER_GFX3D_OCMEM
-#define ICBID_MASTER_OVIRT_SNOC 54
-#define ICBID_MASTER_SNOC_OVIRT 55
-#define ICBID_MASTER_SNOC_GVIRT ICBID_MASTER_SNOC_OVIRT
-#define ICBID_MASTER_ONOC_OVIRT 56
-#define ICBID_MASTER_USB_HS2 57
-#define ICBID_MASTER_QPIC 58
-#define ICBID_MASTER_IPA 59
-#define ICBID_MASTER_DSI 60
-#define ICBID_MASTER_MDP1 61
-#define ICBID_MASTER_MDPE ICBID_MASTER_MDP1
-#define ICBID_MASTER_VPU_PROC 62
-#define ICBID_MASTER_VPU 63
-#define ICBID_MASTER_VPU0 ICBID_MASTER_VPU
-#define ICBID_MASTER_CRYPTO_CORE2 64
-#define ICBID_MASTER_PCIE_0 65
-#define ICBID_MASTER_PCIE_1 66
-#define ICBID_MASTER_SATA 67
-#define ICBID_MASTER_UFS 68
-#define ICBID_MASTER_USB3_1 69
-#define ICBID_MASTER_VIDEO_OCMEM 70
-#define ICBID_MASTER_VPU1 71
-#define ICBID_MASTER_VCAP 72
-#define ICBID_MASTER_EMAC 73
-#define ICBID_MASTER_BCAST 74
-#define ICBID_MASTER_MMSS_PROC 75
-#define ICBID_MASTER_SNOC_BIMC_1 76
-#define ICBID_MASTER_SNOC_PCNOC 77
-#define ICBID_MASTER_AUDIO 78
-#define ICBID_MASTER_MM_INT_0 79
-#define ICBID_MASTER_MM_INT_1 80
-#define ICBID_MASTER_MM_INT_2 81
-#define ICBID_MASTER_MM_INT_BIMC 82
-#define ICBID_MASTER_MSS_INT 83
-#define ICBID_MASTER_PCNOC_CFG 84
-#define ICBID_MASTER_PCNOC_INT_0 85
-#define ICBID_MASTER_PCNOC_INT_1 86
-#define ICBID_MASTER_PCNOC_M_0 87
-#define ICBID_MASTER_PCNOC_M_1 88
-#define ICBID_MASTER_PCNOC_S_0 89
-#define ICBID_MASTER_PCNOC_S_1 90
-#define ICBID_MASTER_PCNOC_S_2 91
-#define ICBID_MASTER_PCNOC_S_3 92
-#define ICBID_MASTER_PCNOC_S_4 93
-#define ICBID_MASTER_PCNOC_S_6 94
-#define ICBID_MASTER_PCNOC_S_7 95
-#define ICBID_MASTER_PCNOC_S_8 96
-#define ICBID_MASTER_PCNOC_S_9 97
-#define ICBID_MASTER_QDSS_INT 98
-#define ICBID_MASTER_SNOC_INT_0 99
-#define ICBID_MASTER_SNOC_INT_1 100
-#define ICBID_MASTER_SNOC_INT_BIMC 101
-#define ICBID_MASTER_TCU_0 102
-#define ICBID_MASTER_TCU_1 103
-#define ICBID_MASTER_BIMC_INT_0 104
-#define ICBID_MASTER_BIMC_INT_1 105
-#define ICBID_MASTER_CAMERA 106
-#define ICBID_MASTER_RICA 107
-#define ICBID_MASTER_SNOC_BIMC_2 108
-#define ICBID_MASTER_BIMC_SNOC_1 109
-#define ICBID_MASTER_A0NOC_SNOC 110
-#define ICBID_MASTER_A1NOC_SNOC 111
-#define ICBID_MASTER_A2NOC_SNOC 112
-#define ICBID_MASTER_PIMEM 113
-#define ICBID_MASTER_SNOC_VMEM 114
-#define ICBID_MASTER_CPP 115
-#define ICBID_MASTER_CNOC_A1NOC 116
-#define ICBID_MASTER_PNOC_A1NOC 117
-#define ICBID_MASTER_HMSS 118
-#define ICBID_MASTER_PCIE_2 119
-#define ICBID_MASTER_ROTATOR 120
-#define ICBID_MASTER_VENUS_VMEM 121
-#define ICBID_MASTER_DCC 122
-#define ICBID_MASTER_MCDMA 123
-#define ICBID_MASTER_PCNOC_INT_2 124
-#define ICBID_MASTER_PCNOC_INT_3 125
-#define ICBID_MASTER_PCNOC_INT_4 126
-#define ICBID_MASTER_PCNOC_INT_5 127
-#define ICBID_MASTER_PCNOC_INT_6 128
-#define ICBID_MASTER_PCNOC_S_5 129
-#define ICBID_MASTER_SENSORS_AHB 130
-#define ICBID_MASTER_SENSORS_PROC 131
-#define ICBID_MASTER_QSPI 132
-#define ICBID_MASTER_VFE1 133
-#define ICBID_MASTER_SNOC_INT_2 134
-#define ICBID_MASTER_SMMNOC_BIMC 135
-#define ICBID_MASTER_CRVIRT_A1NOC 136
-#define ICBID_MASTER_XM_USB_HS1 137
-#define ICBID_MASTER_XI_USB_HS1 138
-#define ICBID_MASTER_PCNOC_BIMC_1 139
-#define ICBID_MASTER_BIMC_PCNOC 140
-#define ICBID_MASTER_XI_HSIC 141
-#define ICBID_MASTER_SGMII 142
-#define ICBID_MASTER_SPMI_FETCHER 143
-#define ICBID_MASTER_GNOC_BIMC 144
-#define ICBID_MASTER_CRVIRT_A2NOC 145
-#define ICBID_MASTER_CNOC_A2NOC 146
-#define ICBID_MASTER_WLAN 147
-#define ICBID_MASTER_MSS_CE 148
-#define ICBID_MASTER_CDSP_PROC 149
-#define ICBID_MASTER_GNOC_SNOC 150
-
-#define ICBID_SLAVE_EBI1 0
-#define ICBID_SLAVE_APPSS_L2 1
-#define ICBID_SLAVE_BIMC_SNOC 2
-#define ICBID_SLAVE_BIMC_SNOC_0 ICBID_SLAVE_BIMC_SNOC
-#define ICBID_SLAVE_CAMERA_CFG 3
-#define ICBID_SLAVE_DISPLAY_CFG 4
-#define ICBID_SLAVE_OCMEM_CFG 5
-#define ICBID_SLAVE_CPR_CFG 6
-#define ICBID_SLAVE_CPR_XPU_CFG 7
-#define ICBID_SLAVE_MISC_CFG 8
-#define ICBID_SLAVE_MISC_XPU_CFG 9
-#define ICBID_SLAVE_VENUS_CFG 10
-#define ICBID_SLAVE_GFX3D_CFG 11
-#define ICBID_SLAVE_MMSS_CLK_CFG 12
-#define ICBID_SLAVE_MMSS_CLK_XPU_CFG 13
-#define ICBID_SLAVE_MNOC_MPU_CFG 14
-#define ICBID_SLAVE_ONOC_MPU_CFG 15
-#define ICBID_SLAVE_MNOC_BIMC 16
-#define ICBID_SLAVE_SERVICE_MNOC 17
-#define ICBID_SLAVE_OCMEM 18
-#define ICBID_SLAVE_GMEM ICBID_SLAVE_OCMEM
-#define ICBID_SLAVE_SERVICE_ONOC 19
-#define ICBID_SLAVE_APPSS 20
-#define ICBID_SLAVE_LPASS 21
-#define ICBID_SLAVE_USB3 22
-#define ICBID_SLAVE_USB3_0 ICBID_SLAVE_USB3
-#define ICBID_SLAVE_WCSS 23
-#define ICBID_SLAVE_SNOC_BIMC 24
-#define ICBID_SLAVE_SNOC_BIMC_0 ICBID_SLAVE_SNOC_BIMC
-#define ICBID_SLAVE_SNOC_CNOC 25
-#define ICBID_SLAVE_IMEM 26
-#define ICBID_SLAVE_OCIMEM ICBID_SLAVE_IMEM
-#define ICBID_SLAVE_SNOC_OVIRT 27
-#define ICBID_SLAVE_SNOC_GVIRT ICBID_SLAVE_SNOC_OVIRT
-#define ICBID_SLAVE_SNOC_PNOC 28
-#define ICBID_SLAVE_SNOC_PCNOC ICBID_SLAVE_SNOC_PNOC
-#define ICBID_SLAVE_SERVICE_SNOC 29
-#define ICBID_SLAVE_QDSS_STM 30
-#define ICBID_SLAVE_SDCC_1 31
-#define ICBID_SLAVE_SDCC_3 32
-#define ICBID_SLAVE_SDCC_2 33
-#define ICBID_SLAVE_SDCC_4 34
-#define ICBID_SLAVE_TSIF 35
-#define ICBID_SLAVE_BAM_DMA 36
-#define ICBID_SLAVE_BLSP_2 37
-#define ICBID_SLAVE_USB_HSIC 38
-#define ICBID_SLAVE_BLSP_1 39
-#define ICBID_SLAVE_USB_HS 40
-#define ICBID_SLAVE_USB_HS1 ICBID_SLAVE_USB_HS
-#define ICBID_SLAVE_PDM 41
-#define ICBID_SLAVE_PERIPH_APU_CFG 42
-#define ICBID_SLAVE_PNOC_MPU_CFG 43
-#define ICBID_SLAVE_PRNG 44
-#define ICBID_SLAVE_PNOC_SNOC 45
-#define ICBID_SLAVE_PCNOC_SNOC ICBID_SLAVE_PNOC_SNOC
-#define ICBID_SLAVE_SERVICE_PNOC 46
-#define ICBID_SLAVE_CLK_CTL 47
-#define ICBID_SLAVE_CNOC_MSS 48
-#define ICBID_SLAVE_PCNOC_MSS ICBID_SLAVE_CNOC_MSS
-#define ICBID_SLAVE_SECURITY 49
-#define ICBID_SLAVE_TCSR 50
-#define ICBID_SLAVE_TLMM 51
-#define ICBID_SLAVE_CRYPTO_0_CFG 52
-#define ICBID_SLAVE_CRYPTO_1_CFG 53
-#define ICBID_SLAVE_IMEM_CFG 54
-#define ICBID_SLAVE_MESSAGE_RAM 55
-#define ICBID_SLAVE_BIMC_CFG 56
-#define ICBID_SLAVE_BOOT_ROM 57
-#define ICBID_SLAVE_CNOC_MNOC_MMSS_CFG 58
-#define ICBID_SLAVE_PMIC_ARB 59
-#define ICBID_SLAVE_SPDM_WRAPPER 60
-#define ICBID_SLAVE_DEHR_CFG 61
-#define ICBID_SLAVE_MPM 62
-#define ICBID_SLAVE_QDSS_CFG 63
-#define ICBID_SLAVE_RBCPR_CFG 64
-#define ICBID_SLAVE_RBCPR_CX_CFG ICBID_SLAVE_RBCPR_CFG
-#define ICBID_SLAVE_RBCPR_QDSS_APU_CFG 65
-#define ICBID_SLAVE_CNOC_MNOC_CFG 66
-#define ICBID_SLAVE_SNOC_MPU_CFG 67
-#define ICBID_SLAVE_CNOC_ONOC_CFG 68
-#define ICBID_SLAVE_PNOC_CFG 69
-#define ICBID_SLAVE_SNOC_CFG 70
-#define ICBID_SLAVE_EBI1_DLL_CFG 71
-#define ICBID_SLAVE_PHY_APU_CFG 72
-#define ICBID_SLAVE_EBI1_PHY_CFG 73
-#define ICBID_SLAVE_RPM 74
-#define ICBID_SLAVE_CNOC_SNOC 75
-#define ICBID_SLAVE_SERVICE_CNOC 76
-#define ICBID_SLAVE_OVIRT_SNOC 77
-#define ICBID_SLAVE_OVIRT_OCMEM 78
-#define ICBID_SLAVE_USB_HS2 79
-#define ICBID_SLAVE_QPIC 80
-#define ICBID_SLAVE_IPS_CFG 81
-#define ICBID_SLAVE_DSI_CFG 82
-#define ICBID_SLAVE_USB3_1 83
-#define ICBID_SLAVE_PCIE_0 84
-#define ICBID_SLAVE_PCIE_1 85
-#define ICBID_SLAVE_PSS_SMMU_CFG 86
-#define ICBID_SLAVE_CRYPTO_2_CFG 87
-#define ICBID_SLAVE_PCIE_0_CFG 88
-#define ICBID_SLAVE_PCIE_1_CFG 89
-#define ICBID_SLAVE_SATA_CFG 90
-#define ICBID_SLAVE_SPSS_GENI_IR 91
-#define ICBID_SLAVE_UFS_CFG 92
-#define ICBID_SLAVE_AVSYNC_CFG 93
-#define ICBID_SLAVE_VPU_CFG 94
-#define ICBID_SLAVE_USB_PHY_CFG 95
-#define ICBID_SLAVE_RBCPR_MX_CFG 96
-#define ICBID_SLAVE_PCIE_PARF 97
-#define ICBID_SLAVE_VCAP_CFG 98
-#define ICBID_SLAVE_EMAC_CFG 99
-#define ICBID_SLAVE_BCAST_CFG 100
-#define ICBID_SLAVE_KLM_CFG 101
-#define ICBID_SLAVE_DISPLAY_PWM 102
-#define ICBID_SLAVE_GENI 103
-#define ICBID_SLAVE_SNOC_BIMC_1 104
-#define ICBID_SLAVE_AUDIO 105
-#define ICBID_SLAVE_CATS_0 106
-#define ICBID_SLAVE_CATS_1 107
-#define ICBID_SLAVE_MM_INT_0 108
-#define ICBID_SLAVE_MM_INT_1 109
-#define ICBID_SLAVE_MM_INT_2 110
-#define ICBID_SLAVE_MM_INT_BIMC 111
-#define ICBID_SLAVE_MMU_MODEM_XPU_CFG 112
-#define ICBID_SLAVE_MSS_INT 113
-#define ICBID_SLAVE_PCNOC_INT_0 114
-#define ICBID_SLAVE_PCNOC_INT_1 115
-#define ICBID_SLAVE_PCNOC_M_0 116
-#define ICBID_SLAVE_PCNOC_M_1 117
-#define ICBID_SLAVE_PCNOC_S_0 118
-#define ICBID_SLAVE_PCNOC_S_1 119
-#define ICBID_SLAVE_PCNOC_S_2 120
-#define ICBID_SLAVE_PCNOC_S_3 121
-#define ICBID_SLAVE_PCNOC_S_4 122
-#define ICBID_SLAVE_PCNOC_S_6 123
-#define ICBID_SLAVE_PCNOC_S_7 124
-#define ICBID_SLAVE_PCNOC_S_8 125
-#define ICBID_SLAVE_PCNOC_S_9 126
-#define ICBID_SLAVE_PRNG_XPU_CFG 127
-#define ICBID_SLAVE_QDSS_INT 128
-#define ICBID_SLAVE_RPM_XPU_CFG 129
-#define ICBID_SLAVE_SNOC_INT_0 130
-#define ICBID_SLAVE_SNOC_INT_1 131
-#define ICBID_SLAVE_SNOC_INT_BIMC 132
-#define ICBID_SLAVE_TCU 133
-#define ICBID_SLAVE_BIMC_INT_0 134
-#define ICBID_SLAVE_BIMC_INT_1 135
-#define ICBID_SLAVE_RICA_CFG 136
-#define ICBID_SLAVE_SNOC_BIMC_2 137
-#define ICBID_SLAVE_BIMC_SNOC_1 138
-#define ICBID_SLAVE_PNOC_A1NOC 139
-#define ICBID_SLAVE_SNOC_VMEM 140
-#define ICBID_SLAVE_A0NOC_SNOC 141
-#define ICBID_SLAVE_A1NOC_SNOC 142
-#define ICBID_SLAVE_A2NOC_SNOC 143
-#define ICBID_SLAVE_A0NOC_CFG 144
-#define ICBID_SLAVE_A0NOC_MPU_CFG 145
-#define ICBID_SLAVE_A0NOC_SMMU_CFG 146
-#define ICBID_SLAVE_A1NOC_CFG 147
-#define ICBID_SLAVE_A1NOC_MPU_CFG 148
-#define ICBID_SLAVE_A1NOC_SMMU_CFG 149
-#define ICBID_SLAVE_A2NOC_CFG 150
-#define ICBID_SLAVE_A2NOC_MPU_CFG 151
-#define ICBID_SLAVE_A2NOC_SMMU_CFG 152
-#define ICBID_SLAVE_AHB2PHY 153
-#define ICBID_SLAVE_CAMERA_THROTTLE_CFG 154
-#define ICBID_SLAVE_DCC_CFG 155
-#define ICBID_SLAVE_DISPLAY_THROTTLE_CFG 156
-#define ICBID_SLAVE_DSA_CFG 157
-#define ICBID_SLAVE_DSA_MPU_CFG 158
-#define ICBID_SLAVE_SSC_MPU_CFG 159
-#define ICBID_SLAVE_HMSS_L3 160
-#define ICBID_SLAVE_LPASS_SMMU_CFG 161
-#define ICBID_SLAVE_MMAGIC_CFG 162
-#define ICBID_SLAVE_PCIE20_AHB2PHY 163
-#define ICBID_SLAVE_PCIE_2 164
-#define ICBID_SLAVE_PCIE_2_CFG 165
-#define ICBID_SLAVE_PIMEM 166
-#define ICBID_SLAVE_PIMEM_CFG 167
-#define ICBID_SLAVE_QDSS_RBCPR_APU_CFG 168
-#define ICBID_SLAVE_RBCPR_CX 169
-#define ICBID_SLAVE_RBCPR_MX 170
-#define ICBID_SLAVE_SMMU_CPP_CFG 171
-#define ICBID_SLAVE_SMMU_JPEG_CFG 172
-#define ICBID_SLAVE_SMMU_MDP_CFG 173
-#define ICBID_SLAVE_SMMU_ROTATOR_CFG 174
-#define ICBID_SLAVE_SMMU_VENUS_CFG 175
-#define ICBID_SLAVE_SMMU_VFE_CFG 176
-#define ICBID_SLAVE_SSC_CFG 177
-#define ICBID_SLAVE_VENUS_THROTTLE_CFG 178
-#define ICBID_SLAVE_VMEM 179
-#define ICBID_SLAVE_VMEM_CFG 180
-#define ICBID_SLAVE_QDSS_MPU_CFG 181
-#define ICBID_SLAVE_USB3_PHY_CFG 182
-#define ICBID_SLAVE_IPA_CFG 183
-#define ICBID_SLAVE_PCNOC_INT_2 184
-#define ICBID_SLAVE_PCNOC_INT_3 185
-#define ICBID_SLAVE_PCNOC_INT_4 186
-#define ICBID_SLAVE_PCNOC_INT_5 187
-#define ICBID_SLAVE_PCNOC_INT_6 188
-#define ICBID_SLAVE_PCNOC_S_5 189
-#define ICBID_SLAVE_QSPI 190
-#define ICBID_SLAVE_A1NOC_MS_MPU_CFG 191
-#define ICBID_SLAVE_A2NOC_MS_MPU_CFG 192
-#define ICBID_SLAVE_MODEM_Q6_SMMU_CFG 193
-#define ICBID_SLAVE_MSS_MPU_CFG 194
-#define ICBID_SLAVE_MSS_PROC_MS_MPU_CFG 195
-#define ICBID_SLAVE_SKL 196
-#define ICBID_SLAVE_SNOC_INT_2 197
-#define ICBID_SLAVE_SMMNOC_BIMC 198
-#define ICBID_SLAVE_CRVIRT_A1NOC 199
-#define ICBID_SLAVE_SGMII 200
-#define ICBID_SLAVE_QHS4_APPS 201
-#define ICBID_SLAVE_BIMC_PCNOC 202
-#define ICBID_SLAVE_PCNOC_BIMC_1 203
-#define ICBID_SLAVE_SPMI_FETCHER 204
-#define ICBID_SLAVE_MMSS_SMMU_CFG 205
-#define ICBID_SLAVE_WLAN 206
-#define ICBID_SLAVE_CRVIRT_A2NOC 207
-#define ICBID_SLAVE_CNOC_A2NOC 208
-#define ICBID_SLAVE_GLM 209
-#define ICBID_SLAVE_GNOC_BIMC 210
-#define ICBID_SLAVE_GNOC_SNOC 211
-#define ICBID_SLAVE_QM_CFG 212
-#define ICBID_SLAVE_TLMM_EAST 213
-#define ICBID_SLAVE_TLMM_NORTH 214
-#define ICBID_SLAVE_TLMM_WEST 215
-#define ICBID_SLAVE_LPASS_TCM 216
-#define ICBID_SLAVE_TLMM_SOUTH 217
-#define ICBID_SLAVE_TLMM_CENTER 218
-#define ICBID_SLAVE_MSS_NAV_CE_MPU_CFG 219
-#define ICBID_SLAVE_A2NOC_THROTTLE_CFG 220
-#define ICBID_SLAVE_CDSP 221
-#define ICBID_SLAVE_CDSP_SMMU_CFG 222
-#define ICBID_SLAVE_LPASS_MPU_CFG 223
-#define ICBID_SLAVE_CSI_PHY_CFG 224
-#endif
#include <linux/list.h>
#include <linux/jump_label.h>
+#include <linux/irqchip/arm-gic-v4.h>
+
#define VGIC_V3_MAX_CPUS 255
#define VGIC_V2_MAX_CPUS 8
#define VGIC_NR_IRQS_LEGACY 256
/* Only needed for the legacy KVM_CREATE_IRQCHIP */
bool can_emulate_gicv2;
+ /* Hardware has GICv4? */
+ bool has_gicv4;
+
/* GIC system register CPU interface */
struct static_key_false gicv3_cpuif;
bool hw; /* Tied to HW IRQ */
struct kref refcount; /* Used for LPIs */
u32 hwintid; /* HW INTID number */
+ unsigned int host_irq; /* linux irq corresponding to hwintid */
union {
u8 targets; /* GICv2 target VCPUs mask */
u32 mpidr; /* GICv3 target VCPU */
/* used by vgic-debug */
struct vgic_state_iter *iter;
+
+ /*
+ * GICv4 ITS per-VM data, containing the IRQ domain, the VPE
+ * array, the property table pointer as well as allocation
+ * data. This essentially ties the Linux IRQ core and ITS
+ * together, and avoids leaking KVM's data structures anywhere
+ * else.
+ */
+ struct its_vm its_vm;
};
struct vgic_v2_cpu_if {
u32 vgic_ap0r[4];
u32 vgic_ap1r[4];
u64 vgic_lr[VGIC_V3_MAX_LRS];
+
+ /*
+ * GICv4 ITS per-VPE data, containing the doorbell IRQ, the
+ * pending table pointer, the its_vm pointer and a few other
+ * HW specific things. As for the its_vm structure, this is
+ * linking the Linux IRQ subsystem and the ITS together.
+ */
+ struct its_vpe its_vpe;
};
struct vgic_cpu {
int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
bool level, void *owner);
-int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, u32 virt_irq, u32 phys_irq);
-int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq);
-bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int virt_irq);
+int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
+ u32 vintid);
+int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid);
+bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid);
int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner);
+struct kvm_kernel_irq_routing_entry;
+
+int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int irq,
+ struct kvm_kernel_irq_routing_entry *irq_entry);
+
+int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int irq,
+ struct kvm_kernel_irq_routing_entry *irq_entry);
+
+void kvm_vgic_v4_enable_doorbell(struct kvm_vcpu *vcpu);
+void kvm_vgic_v4_disable_doorbell(struct kvm_vcpu *vcpu);
+
#endif /* __KVM_ARM_VGIC_H */
* functions that access data on eBPF program stack
*/
ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */
+ ARG_PTR_TO_MEM_OR_NULL, /* pointer to valid memory or NULL */
ARG_PTR_TO_UNINIT_MEM, /* pointer to memory does not need to be initialized,
* helper function must fill all bytes or clear
* them in error case.
extern const struct bpf_verifier_ops xdp_analyzer_ops;
struct bpf_prog *bpf_prog_get(u32 ufd);
-struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type);
struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
- struct net_device *netdev);
+ bool attach_drv);
struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, int i);
void bpf_prog_sub(struct bpf_prog *prog, int i);
struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog);
return ERR_PTR(-EOPNOTSUPP);
}
-static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
- enum bpf_prog_type type)
-{
- return ERR_PTR(-EOPNOTSUPP);
-}
-
static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd,
enum bpf_prog_type type,
- struct net_device *netdev)
+ bool attach_drv)
{
return ERR_PTR(-EOPNOTSUPP);
}
}
#endif /* CONFIG_BPF_SYSCALL */
+static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
+ enum bpf_prog_type type)
+{
+ return bpf_prog_get_type_dev(ufd, type, false);
+}
+
int bpf_prog_offload_compile(struct bpf_prog *prog);
void bpf_prog_offload_destroy(struct bpf_prog *prog);
-u32 bpf_prog_offload_ifindex(struct bpf_prog *prog);
#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);
struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */
};
int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
- int converted_op_size; /* the valid value width after perceived conversion */
+ bool seen; /* this insn was processed by the verifier */
};
#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env);
#else
-int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env)
+static inline int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env)
{
return -EOPNOTSUPP;
}
/* Unreachable code */
#ifdef CONFIG_STACK_VALIDATION
+/*
+ * These macros help objtool understand GCC code flow for unreachable code.
+ * The __COUNTER__ based labels are a hack to make each instance of the macros
+ * unique, to convince GCC not to merge duplicate inline asm statements.
+ */
#define annotate_reachable() ({ \
- asm("%c0:\n\t" \
- ".pushsection .discard.reachable\n\t" \
- ".long %c0b - .\n\t" \
- ".popsection\n\t" : : "i" (__COUNTER__)); \
+ asm volatile("%c0:\n\t" \
+ ".pushsection .discard.reachable\n\t" \
+ ".long %c0b - .\n\t" \
+ ".popsection\n\t" : : "i" (__COUNTER__)); \
})
#define annotate_unreachable() ({ \
- asm("%c0:\n\t" \
- ".pushsection .discard.unreachable\n\t" \
- ".long %c0b - .\n\t" \
- ".popsection\n\t" : : "i" (__COUNTER__)); \
+ asm volatile("%c0:\n\t" \
+ ".pushsection .discard.unreachable\n\t" \
+ ".long %c0b - .\n\t" \
+ ".popsection\n\t" : : "i" (__COUNTER__)); \
})
#define ASM_UNREACHABLE \
"999:\n\t" \
*/
#define __IS_FLG(inode, flg) ((inode)->i_sb->s_flags & (flg))
-static inline bool sb_rdonly(const struct super_block *sb) { return sb->s_flags & MS_RDONLY; }
+static inline bool sb_rdonly(const struct super_block *sb) { return sb->s_flags & SB_RDONLY; }
#define IS_RDONLY(inode) sb_rdonly((inode)->i_sb)
#define IS_SYNC(inode) (__IS_FLG(inode, SB_SYNCHRONOUS) || \
((inode)->i_flags & S_SYNC))
static inline int vfs_fstatat(int dfd, const char __user *filename,
struct kstat *stat, int flags)
{
- return vfs_statx(dfd, filename, flags, stat, STATX_BASIC_STATS);
+ return vfs_statx(dfd, filename, flags | AT_NO_AUTOMOUNT,
+ stat, STATX_BASIC_STATS);
}
static inline int vfs_fstat(int fd, struct kstat *stat)
{
return vma->vm_file && IS_DAX(vma->vm_file->f_mapping->host);
}
+static inline bool vma_is_fsdax(struct vm_area_struct *vma)
+{
+ struct inode *inode;
+
+ if (!vma->vm_file)
+ return false;
+ if (!vma_is_dax(vma))
+ return false;
+ inode = file_inode(vma->vm_file);
+ if (inode->i_mode == S_IFCHR)
+ return false; /* device-dax */
+ return true;
+}
+
static inline int iocb_flags(struct file *file)
{
int res = 0;
+++ /dev/null
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef LINUX_HTIRQ_H
-#define LINUX_HTIRQ_H
-
-struct pci_dev;
-struct irq_data;
-
-struct ht_irq_msg {
- u32 address_lo; /* low 32 bits of the ht irq message */
- u32 address_hi; /* high 32 bits of the it irq message */
-};
-
-typedef void (ht_irq_update_t)(struct pci_dev *dev, int irq,
- struct ht_irq_msg *msg);
-
-struct ht_irq_cfg {
- struct pci_dev *dev;
- /* Update callback used to cope with buggy hardware */
- ht_irq_update_t *update;
- unsigned pos;
- unsigned idx;
- struct ht_irq_msg msg;
-};
-
-/* Helper functions.. */
-void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg);
-void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg);
-void mask_ht_irq(struct irq_data *data);
-void unmask_ht_irq(struct irq_data *data);
-
-/* The arch hook for getting things started */
-int arch_setup_ht_irq(int idx, int pos, struct pci_dev *dev,
- ht_irq_update_t *update);
-void arch_teardown_ht_irq(unsigned int irq);
-
-/* For drivers of buggy hardware */
-int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update);
-
-#endif /* LINUX_HTIRQ_H */
}
#endif
-#ifndef pud_write
-static inline int pud_write(pud_t pud)
-{
- BUG();
- return 0;
-}
-#endif
-
#define HUGETLB_ANON_FILE "anon_hugepage"
enum {
* IRQD_MANAGED_SHUTDOWN - Interrupt was shutdown due to empty affinity
* mask. Applies only to affinity managed irqs.
* IRQD_SINGLE_TARGET - IRQ allows only a single affinity target
+ * IRQD_DEFAULT_TRIGGER_SET - Expected trigger already been set
*/
enum {
IRQD_TRIGGER_MASK = 0xf,
IRQD_IRQ_STARTED = (1 << 22),
IRQD_MANAGED_SHUTDOWN = (1 << 23),
IRQD_SINGLE_TARGET = (1 << 24),
+ IRQD_DEFAULT_TRIGGER_SET = (1 << 25),
};
#define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
__irqd_to_state(d) |= IRQD_AFFINITY_SET;
}
+static inline bool irqd_trigger_type_was_set(struct irq_data *d)
+{
+ return __irqd_to_state(d) & IRQD_DEFAULT_TRIGGER_SET;
+}
+
static inline u32 irqd_get_trigger_type(struct irq_data *d)
{
return __irqd_to_state(d) & IRQD_TRIGGER_MASK;
}
/*
- * Must only be called inside irq_chip.irq_set_type() functions.
+ * Must only be called inside irq_chip.irq_set_type() functions or
+ * from the DT/ACPI setup code.
*/
static inline void irqd_set_trigger_type(struct irq_data *d, u32 type)
{
__irqd_to_state(d) &= ~IRQD_TRIGGER_MASK;
__irqd_to_state(d) |= type & IRQD_TRIGGER_MASK;
+ __irqd_to_state(d) |= IRQD_DEFAULT_TRIGGER_SET;
}
static inline bool irqd_is_level_type(struct irq_data *d)
int its_unmap_vlpi(int irq);
int its_prop_update_vlpi(int irq, u8 config, bool inv);
+struct irq_domain_ops;
int its_init_v4(struct irq_domain *domain, const struct irq_domain_ops *ops);
#endif
#define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \
2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + 1)
-#ifndef CONFIG_64BIT
-# define KALLSYM_FMT "%08lx"
-#else
-# define KALLSYM_FMT "%016lx"
-#endif
-
struct module;
#ifdef CONFIG_KALLSYMS
const void *data; /* Raw data */
size_t datalen; /* Raw datalen */
size_t quotalen; /* Quota length for proposed payload */
- time_t expiry; /* Expiry time of key */
+ time64_t expiry; /* Expiry time of key */
} __randomize_layout;
typedef int (*request_key_actor_t)(struct key_construction *key,
#include <linux/atomic.h>
#include <linux/assoc_array.h>
#include <linux/refcount.h>
+#include <linux/time64.h>
#ifdef __KERNEL__
#include <linux/uidgid.h>
struct key_user *user; /* owner of this key */
void *security; /* security data for this key */
union {
- time_t expiry; /* time at which key expires (or 0) */
- time_t revoked_at; /* time at which key was revoked */
+ time64_t expiry; /* time at which key expires (or 0) */
+ time64_t revoked_at; /* time at which key was revoked */
};
- time_t last_used_at; /* last time used for LRU keyring discard */
+ time64_t last_used_at; /* last time used for LRU keyring discard */
kuid_t uid;
kgid_t gid;
key_perm_t perm; /* access permissions */
#define KTHREAD_DELAYED_WORK_INIT(dwork, fn) { \
.work = KTHREAD_WORK_INIT((dwork).work, (fn)), \
- .timer = __TIMER_INITIALIZER((TIMER_FUNC_TYPE)kthread_delayed_work_timer_fn,\
- (TIMER_DATA_TYPE)&(dwork.timer), \
+ .timer = __TIMER_INITIALIZER(kthread_delayed_work_timer_fn,\
TIMER_IRQSAFE), \
}
#define kthread_init_delayed_work(dwork, fn) \
do { \
kthread_init_work(&(dwork)->work, (fn)); \
- __setup_timer(&(dwork)->timer, \
- (TIMER_FUNC_TYPE)kthread_delayed_work_timer_fn,\
- (TIMER_DATA_TYPE)&(dwork)->timer, \
- TIMER_IRQSAFE); \
+ __init_timer(&(dwork)->timer, \
+ kthread_delayed_work_timer_fn, \
+ TIMER_IRQSAFE); \
} while (0)
int kthread_worker_fn(void *worker_ptr);
unsigned long len);
void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
+void kvm_sigset_activate(struct kvm_vcpu *vcpu);
+void kvm_sigset_deactivate(struct kvm_vcpu *vcpu);
+
void kvm_vcpu_block(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu);
new_page = __alloc_pages_nodemask(gfp_mask, order,
preferred_nid, nodemask);
- if (new_page && PageTransHuge(page))
+ if (new_page && PageTransHuge(new_page))
prep_transhuge_page(new_page);
return new_page;
struct vm_operations_struct {
void (*open)(struct vm_area_struct * area);
void (*close)(struct vm_area_struct * area);
+ int (*split)(struct vm_area_struct * area, unsigned long addr);
int (*mremap)(struct vm_area_struct * area);
int (*fault)(struct vm_fault *vmf);
int (*huge_fault)(struct vm_fault *vmf, enum page_entry_size pe_size);
unsigned int gup_flags, struct page **pages, int *locked);
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
struct page **pages, unsigned int gup_flags);
+#ifdef CONFIG_FS_DAX
+long get_user_pages_longterm(unsigned long start, unsigned long nr_pages,
+ unsigned int gup_flags, struct page **pages,
+ struct vm_area_struct **vmas);
+#else
+static inline long get_user_pages_longterm(unsigned long start,
+ unsigned long nr_pages, unsigned int gup_flags,
+ struct page **pages, struct vm_area_struct **vmas)
+{
+ return get_user_pages(start, nr_pages, gup_flags, pages, vmas);
+}
+#endif /* CONFIG_FS_DAX */
+
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages);
NETIF_F_GSO_TUNNEL_REMCSUM_BIT, /* ... TUNNEL with TSO & REMCSUM */
NETIF_F_GSO_SCTP_BIT, /* ... SCTP fragmentation */
NETIF_F_GSO_ESP_BIT, /* ... ESP with TSO */
+ NETIF_F_GSO_UDP_BIT, /* ... UFO, deprecated except tuntap */
/**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */
- NETIF_F_GSO_ESP_BIT,
+ NETIF_F_GSO_UDP_BIT,
NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */
NETIF_F_SCTP_CRC_BIT, /* SCTP checksum offload */
#define NETIF_F_GSO_TUNNEL_REMCSUM __NETIF_F(GSO_TUNNEL_REMCSUM)
#define NETIF_F_GSO_SCTP __NETIF_F(GSO_SCTP)
#define NETIF_F_GSO_ESP __NETIF_F(GSO_ESP)
+#define NETIF_F_GSO_UDP __NETIF_F(GSO_UDP)
#define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER)
#define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX)
#define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX)
BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_SCTP != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_ESP != (NETIF_F_GSO_ESP >> NETIF_F_GSO_SHIFT));
+ BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_GSO_UDP >> NETIF_F_GSO_SHIFT));
return (features & feature) == feature;
}
static inline void pcie_ecrc_get_policy(char *str) { }
#endif
-#ifdef CONFIG_HT_IRQ
-/* The functions a driver should call */
-int ht_create_irq(struct pci_dev *dev, int idx);
-void ht_destroy_irq(unsigned int irq);
-#endif /* CONFIG_HT_IRQ */
-
#ifdef CONFIG_PCI_ATS
/* Address Translation Service */
void pci_ats_init(struct pci_dev *dev);
* conditions between the inactive timer handler and the wakeup
* code.
*/
- int dl_throttled : 1;
- int dl_boosted : 1;
- int dl_yielded : 1;
- int dl_non_contending : 1;
+ unsigned int dl_throttled : 1;
+ unsigned int dl_boosted : 1;
+ unsigned int dl_yielded : 1;
+ unsigned int dl_non_contending : 1;
/*
* Bandwidth enforcement timer. Each -deadline task has its
SKB_GSO_SCTP = 1 << 14,
SKB_GSO_ESP = 1 << 15,
+
+ SKB_GSO_UDP = 1 << 16,
};
#if BITS_PER_LONG > 32
extern int cache_register_net(struct cache_detail *cd, struct net *net);
extern void cache_unregister_net(struct cache_detail *cd, struct net *net);
-extern struct cache_detail *cache_create_net(struct cache_detail *tmpl, struct net *net);
+extern struct cache_detail *cache_create_net(const struct cache_detail *tmpl, struct net *net);
extern void cache_destroy_net(struct cache_detail *cd, struct net *net);
extern void sunrpc_init_cache_detail(struct cache_detail *cd);
extern void update_vsyscall(struct timekeeper *tk);
extern void update_vsyscall_tz(void);
-#elif defined(CONFIG_GENERIC_TIME_VSYSCALL_OLD)
-
-extern void update_vsyscall_old(struct timespec *ts, struct timespec *wtm,
- struct clocksource *c, u32 mult,
- u64 cycle_last);
-extern void update_vsyscall_tz(void);
-
#else
static inline void update_vsyscall(struct timekeeper *tk)
extern void timekeeping_inject_sleeptime64(struct timespec64 *delta);
-/*
- * PPS accessor
- */
-extern void ktime_get_raw_and_real_ts64(struct timespec64 *ts_raw,
- struct timespec64 *ts_real);
-
/*
* struct system_time_snapshot - simultaneous raw/real time capture with
* counter value
*/
struct hlist_node entry;
unsigned long expires;
- void (*function)(unsigned long);
- unsigned long data;
+ void (*function)(struct timer_list *);
u32 flags;
#ifdef CONFIG_LOCKDEP
#define TIMER_TRACE_FLAGMASK (TIMER_MIGRATING | TIMER_DEFERRABLE | TIMER_PINNED | TIMER_IRQSAFE)
-#define TIMER_DATA_TYPE unsigned long
-#define TIMER_FUNC_TYPE void (*)(TIMER_DATA_TYPE)
-
-#define __TIMER_INITIALIZER(_function, _data, _flags) { \
+#define __TIMER_INITIALIZER(_function, _flags) { \
.entry = { .next = TIMER_ENTRY_STATIC }, \
.function = (_function), \
- .data = (_data), \
.flags = (_flags), \
__TIMER_LOCKDEP_MAP_INITIALIZER( \
__FILE__ ":" __stringify(__LINE__)) \
#define DEFINE_TIMER(_name, _function) \
struct timer_list _name = \
- __TIMER_INITIALIZER((TIMER_FUNC_TYPE)_function, 0, 0)
+ __TIMER_INITIALIZER(_function, 0)
-void init_timer_key(struct timer_list *timer, unsigned int flags,
+/*
+ * LOCKDEP and DEBUG timer interfaces.
+ */
+void init_timer_key(struct timer_list *timer,
+ void (*func)(struct timer_list *), unsigned int flags,
const char *name, struct lock_class_key *key);
#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
extern void init_timer_on_stack_key(struct timer_list *timer,
+ void (*func)(struct timer_list *),
unsigned int flags, const char *name,
struct lock_class_key *key);
-extern void destroy_timer_on_stack(struct timer_list *timer);
#else
-static inline void destroy_timer_on_stack(struct timer_list *timer) { }
static inline void init_timer_on_stack_key(struct timer_list *timer,
- unsigned int flags, const char *name,
+ void (*func)(struct timer_list *),
+ unsigned int flags,
+ const char *name,
struct lock_class_key *key)
{
- init_timer_key(timer, flags, name, key);
+ init_timer_key(timer, func, flags, name, key);
}
#endif
#ifdef CONFIG_LOCKDEP
-#define __init_timer(_timer, _flags) \
+#define __init_timer(_timer, _fn, _flags) \
do { \
static struct lock_class_key __key; \
- init_timer_key((_timer), (_flags), #_timer, &__key); \
+ init_timer_key((_timer), (_fn), (_flags), #_timer, &__key);\
} while (0)
-#define __init_timer_on_stack(_timer, _flags) \
+#define __init_timer_on_stack(_timer, _fn, _flags) \
do { \
static struct lock_class_key __key; \
- init_timer_on_stack_key((_timer), (_flags), #_timer, &__key); \
+ init_timer_on_stack_key((_timer), (_fn), (_flags), \
+ #_timer, &__key); \
} while (0)
#else
-#define __init_timer(_timer, _flags) \
- init_timer_key((_timer), (_flags), NULL, NULL)
-#define __init_timer_on_stack(_timer, _flags) \
- init_timer_on_stack_key((_timer), (_flags), NULL, NULL)
+#define __init_timer(_timer, _fn, _flags) \
+ init_timer_key((_timer), (_fn), (_flags), NULL, NULL)
+#define __init_timer_on_stack(_timer, _fn, _flags) \
+ init_timer_on_stack_key((_timer), (_fn), (_flags), NULL, NULL)
#endif
-#define init_timer(timer) \
- __init_timer((timer), 0)
-
-#define __setup_timer(_timer, _fn, _data, _flags) \
- do { \
- __init_timer((_timer), (_flags)); \
- (_timer)->function = (_fn); \
- (_timer)->data = (_data); \
- } while (0)
-
-#define __setup_timer_on_stack(_timer, _fn, _data, _flags) \
- do { \
- __init_timer_on_stack((_timer), (_flags)); \
- (_timer)->function = (_fn); \
- (_timer)->data = (_data); \
- } while (0)
-
-#define setup_timer(timer, fn, data) \
- __setup_timer((timer), (fn), (data), 0)
-#define setup_pinned_timer(timer, fn, data) \
- __setup_timer((timer), (fn), (data), TIMER_PINNED)
-#define setup_deferrable_timer(timer, fn, data) \
- __setup_timer((timer), (fn), (data), TIMER_DEFERRABLE)
-#define setup_pinned_deferrable_timer(timer, fn, data) \
- __setup_timer((timer), (fn), (data), TIMER_DEFERRABLE | TIMER_PINNED)
-#define setup_timer_on_stack(timer, fn, data) \
- __setup_timer_on_stack((timer), (fn), (data), 0)
-#define setup_pinned_timer_on_stack(timer, fn, data) \
- __setup_timer_on_stack((timer), (fn), (data), TIMER_PINNED)
-#define setup_deferrable_timer_on_stack(timer, fn, data) \
- __setup_timer_on_stack((timer), (fn), (data), TIMER_DEFERRABLE)
-#define setup_pinned_deferrable_timer_on_stack(timer, fn, data) \
- __setup_timer_on_stack((timer), (fn), (data), TIMER_DEFERRABLE | TIMER_PINNED)
+/**
+ * timer_setup - prepare a timer for first use
+ * @timer: the timer in question
+ * @callback: the function to call when timer expires
+ * @flags: any TIMER_* flags
+ *
+ * Regular timer initialization should use either DEFINE_TIMER() above,
+ * or timer_setup(). For timers on the stack, timer_setup_on_stack() must
+ * be used and must be balanced with a call to destroy_timer_on_stack().
+ */
+#define timer_setup(timer, callback, flags) \
+ __init_timer((timer), (callback), (flags))
-#ifndef CONFIG_LOCKDEP
-static inline void timer_setup(struct timer_list *timer,
- void (*callback)(struct timer_list *),
- unsigned int flags)
-{
- __setup_timer(timer, (TIMER_FUNC_TYPE)callback,
- (TIMER_DATA_TYPE)timer, flags);
-}
+#define timer_setup_on_stack(timer, callback, flags) \
+ __init_timer_on_stack((timer), (callback), (flags))
-static inline void timer_setup_on_stack(struct timer_list *timer,
- void (*callback)(struct timer_list *),
- unsigned int flags)
-{
- __setup_timer_on_stack(timer, (TIMER_FUNC_TYPE)callback,
- (TIMER_DATA_TYPE)timer, flags);
-}
+#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
+extern void destroy_timer_on_stack(struct timer_list *timer);
#else
-/*
- * Under LOCKDEP, the timer lock_class_key (set up in __init_timer) needs
- * to be tied to the caller's context, so an inline (above) won't work. We
- * do want to keep the inline for argument type checking, though.
- */
-# define timer_setup(timer, callback, flags) \
- __setup_timer((timer), (TIMER_FUNC_TYPE)(callback), \
- (TIMER_DATA_TYPE)(timer), (flags))
-# define timer_setup_on_stack(timer, callback, flags) \
- __setup_timer_on_stack((timer), \
- (TIMER_FUNC_TYPE)(callback), \
- (TIMER_DATA_TYPE)(timer), (flags))
+static inline void destroy_timer_on_stack(struct timer_list *timer) { }
#endif
#define from_timer(var, callback_timer, timer_fieldname) \
const struct virtio_net_hdr *hdr,
bool little_endian)
{
- unsigned short gso_type = 0;
+ unsigned int gso_type = 0;
if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
case VIRTIO_NET_HDR_GSO_TCPV6:
gso_type = SKB_GSO_TCPV6;
break;
+ case VIRTIO_NET_HDR_GSO_UDP:
+ gso_type = SKB_GSO_UDP;
+ break;
default:
return -EINVAL;
}
#define __DELAYED_WORK_INITIALIZER(n, f, tflags) { \
.work = __WORK_INITIALIZER((n).work, (f)), \
- .timer = __TIMER_INITIALIZER((TIMER_FUNC_TYPE)delayed_work_timer_fn,\
- (TIMER_DATA_TYPE)&(n.timer), \
+ .timer = __TIMER_INITIALIZER(delayed_work_timer_fn,\
(tflags) | TIMER_IRQSAFE), \
}
#define __INIT_DELAYED_WORK(_work, _func, _tflags) \
do { \
INIT_WORK(&(_work)->work, (_func)); \
- __setup_timer(&(_work)->timer, \
- (TIMER_FUNC_TYPE)delayed_work_timer_fn, \
- (TIMER_DATA_TYPE)&(_work)->timer, \
- (_tflags) | TIMER_IRQSAFE); \
+ __init_timer(&(_work)->timer, \
+ delayed_work_timer_fn, \
+ (_tflags) | TIMER_IRQSAFE); \
} while (0)
#define __INIT_DELAYED_WORK_ONSTACK(_work, _func, _tflags) \
do { \
INIT_WORK_ONSTACK(&(_work)->work, (_func)); \
- __setup_timer_on_stack(&(_work)->timer, \
- (TIMER_FUNC_TYPE)delayed_work_timer_fn,\
- (TIMER_DATA_TYPE)&(_work)->timer,\
- (_tflags) | TIMER_IRQSAFE); \
+ __init_timer_on_stack(&(_work)->timer, \
+ delayed_work_timer_fn, \
+ (_tflags) | TIMER_IRQSAFE); \
} while (0)
#define INIT_DELAYED_WORK(_work, _func) \
void laptop_io_completion(struct backing_dev_info *info);
void laptop_sync_completion(void);
void laptop_mode_sync(struct work_struct *work);
-void laptop_mode_timer_fn(unsigned long data);
+void laptop_mode_timer_fn(struct timer_list *t);
#else
static inline void laptop_sync_completion(void) { }
#endif
__be32 ipv6_select_ident(struct net *net,
const struct in6_addr *daddr,
const struct in6_addr *saddr);
+__be32 ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb);
int ip6_dst_hoplimit(struct dst_entry *dst);
* ieee80211_nullfunc_get - retrieve a nullfunc template
* @hw: pointer obtained from ieee80211_alloc_hw().
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
+ * @qos_ok: QoS NDP is acceptable to the caller, this should be set
+ * if at all possible
*
* Creates a Nullfunc template which can, for example, uploaded to
* hardware. The template must be updated after association so that correct
* BSSID and address is used.
*
+ * If @qos_ndp is set and the association is to an AP with QoS/WMM, the
+ * returned packet will be QoS NDP.
+ *
* Note: Caller (or hardware) is responsible for setting the
* &IEEE80211_FCTL_PM bit as well as Duration and Sequence Control fields.
*
* Return: The nullfunc template. %NULL on error.
*/
struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif);
+ struct ieee80211_vif *vif,
+ bool qos_ok);
/**
* ieee80211_probereq_get - retrieve a Probe Request template
/* This uses the crypto implementation of crc32c, which is either
* implemented w/ hardware support or resolves to __crc32c_le().
*/
- return crc32c(sum, buff, len);
+ return (__force __wsum)crc32c((__force __u32)sum, buff, len);
}
static inline __wsum sctp_csum_combine(__wsum csum, __wsum csum2,
int offset, int len)
{
- return __crc32c_le_combine(csum, csum2, len);
+ return (__force __wsum)__crc32c_le_combine((__force __u32)csum,
+ (__force __u32)csum2, len);
}
static inline __le32 sctp_compute_cksum(const struct sk_buff *skb,
unsigned int offset)
{
struct sctphdr *sh = sctp_hdr(skb);
- __le32 ret, old = sh->checksum;
const struct skb_checksum_ops ops = {
.update = sctp_csum_update,
.combine = sctp_csum_combine,
};
+ __le32 old = sh->checksum;
+ __wsum new;
sh->checksum = 0;
- ret = cpu_to_le32(~__skb_checksum(skb, offset, skb->len - offset,
- ~(__u32)0, &ops));
+ new = ~__skb_checksum(skb, offset, skb->len - offset, ~(__wsum)0, &ops);
sh->checksum = old;
- return ret;
+ return cpu_to_le32((__force __u32)new);
}
#endif /* __sctp_checksum_h__ */
*/
int sctp_offload_init(void);
+/*
+ * sctp/stream_sched.c
+ */
+void sctp_sched_ops_init(void);
+
/*
* sctp/stream.c
*/
int sctp_sched_init_sid(struct sctp_stream *stream, __u16 sid, gfp_t gfp);
struct sctp_sched_ops *sctp_sched_ops_from_stream(struct sctp_stream *stream);
+void sctp_sched_ops_register(enum sctp_sched_type sched,
+ struct sctp_sched_ops *sched_ops);
+void sctp_sched_ops_prio_init(void);
+void sctp_sched_ops_rr_init(void);
+
#endif /* __sctp_stream_sched_h__ */
struct scsi_lun;
struct scsi_sense_hdr;
+typedef unsigned int __bitwise blist_flags_t;
+
struct scsi_mode_data {
__u32 length;
__u16 block_descriptor_length;
unsigned char current_tag; /* current tag */
struct scsi_target *sdev_target; /* used only for single_lun */
- unsigned int sdev_bflags; /* black/white flags as also found in
+ blist_flags_t sdev_bflags; /* black/white flags as also found in
* scsi_devinfo.[hc]. For now used only to
* pass settings from slave_alloc to scsi
* core. */
*/
/* Only scan LUN 0 */
-#define BLIST_NOLUN ((__force __u32 __bitwise)(1 << 0))
+#define BLIST_NOLUN ((__force blist_flags_t)(1 << 0))
/* Known to have LUNs, force scanning.
* DEPRECATED: Use max_luns=N */
-#define BLIST_FORCELUN ((__force __u32 __bitwise)(1 << 1))
+#define BLIST_FORCELUN ((__force blist_flags_t)(1 << 1))
/* Flag for broken handshaking */
-#define BLIST_BORKEN ((__force __u32 __bitwise)(1 << 2))
+#define BLIST_BORKEN ((__force blist_flags_t)(1 << 2))
/* unlock by special command */
-#define BLIST_KEY ((__force __u32 __bitwise)(1 << 3))
+#define BLIST_KEY ((__force blist_flags_t)(1 << 3))
/* Do not use LUNs in parallel */
-#define BLIST_SINGLELUN ((__force __u32 __bitwise)(1 << 4))
+#define BLIST_SINGLELUN ((__force blist_flags_t)(1 << 4))
/* Buggy Tagged Command Queuing */
-#define BLIST_NOTQ ((__force __u32 __bitwise)(1 << 5))
+#define BLIST_NOTQ ((__force blist_flags_t)(1 << 5))
/* Non consecutive LUN numbering */
-#define BLIST_SPARSELUN ((__force __u32 __bitwise)(1 << 6))
+#define BLIST_SPARSELUN ((__force blist_flags_t)(1 << 6))
/* Avoid LUNS >= 5 */
-#define BLIST_MAX5LUN ((__force __u32 __bitwise)(1 << 7))
+#define BLIST_MAX5LUN ((__force blist_flags_t)(1 << 7))
/* Treat as (removable) CD-ROM */
-#define BLIST_ISROM ((__force __u32 __bitwise)(1 << 8))
+#define BLIST_ISROM ((__force blist_flags_t)(1 << 8))
/* LUNs past 7 on a SCSI-2 device */
-#define BLIST_LARGELUN ((__force __u32 __bitwise)(1 << 9))
+#define BLIST_LARGELUN ((__force blist_flags_t)(1 << 9))
/* override additional length field */
-#define BLIST_INQUIRY_36 ((__force __u32 __bitwise)(1 << 10))
+#define BLIST_INQUIRY_36 ((__force blist_flags_t)(1 << 10))
/* do not do automatic start on add */
-#define BLIST_NOSTARTONADD ((__force __u32 __bitwise)(1 << 12))
+#define BLIST_NOSTARTONADD ((__force blist_flags_t)(1 << 12))
/* try REPORT_LUNS even for SCSI-2 devs (if HBA supports more than 8 LUNs) */
-#define BLIST_REPORTLUN2 ((__force __u32 __bitwise)(1 << 17))
+#define BLIST_REPORTLUN2 ((__force blist_flags_t)(1 << 17))
/* don't try REPORT_LUNS scan (SCSI-3 devs) */
-#define BLIST_NOREPORTLUN ((__force __u32 __bitwise)(1 << 18))
+#define BLIST_NOREPORTLUN ((__force blist_flags_t)(1 << 18))
/* don't use PREVENT-ALLOW commands */
-#define BLIST_NOT_LOCKABLE ((__force __u32 __bitwise)(1 << 19))
+#define BLIST_NOT_LOCKABLE ((__force blist_flags_t)(1 << 19))
/* device is actually for RAID config */
-#define BLIST_NO_ULD_ATTACH ((__force __u32 __bitwise)(1 << 20))
+#define BLIST_NO_ULD_ATTACH ((__force blist_flags_t)(1 << 20))
/* select without ATN */
-#define BLIST_SELECT_NO_ATN ((__force __u32 __bitwise)(1 << 21))
+#define BLIST_SELECT_NO_ATN ((__force blist_flags_t)(1 << 21))
/* retry HARDWARE_ERROR */
-#define BLIST_RETRY_HWERROR ((__force __u32 __bitwise)(1 << 22))
+#define BLIST_RETRY_HWERROR ((__force blist_flags_t)(1 << 22))
/* maximum 512 sector cdb length */
-#define BLIST_MAX_512 ((__force __u32 __bitwise)(1 << 23))
+#define BLIST_MAX_512 ((__force blist_flags_t)(1 << 23))
/* Disable T10 PI (DIF) */
-#define BLIST_NO_DIF ((__force __u32 __bitwise)(1 << 25))
+#define BLIST_NO_DIF ((__force blist_flags_t)(1 << 25))
/* Ignore SBC-3 VPD pages */
-#define BLIST_SKIP_VPD_PAGES ((__force __u32 __bitwise)(1 << 26))
+#define BLIST_SKIP_VPD_PAGES ((__force blist_flags_t)(1 << 26))
/* Attempt to read VPD pages */
-#define BLIST_TRY_VPD_PAGES ((__force __u32 __bitwise)(1 << 28))
+#define BLIST_TRY_VPD_PAGES ((__force blist_flags_t)(1 << 28))
/* don't try to issue RSOC */
-#define BLIST_NO_RSOC ((__force __u32 __bitwise)(1 << 29))
+#define BLIST_NO_RSOC ((__force blist_flags_t)(1 << 29))
/* maximum 1024 sector cdb length */
-#define BLIST_MAX_1024 ((__force __u32 __bitwise)(1 << 30))
+#define BLIST_MAX_1024 ((__force blist_flags_t)(1 << 30))
/* Use UNMAP limit for WRITE SAME */
-#define BLIST_UNMAP_LIMIT_WS ((__force __u32 __bitwise)(1 << 31))
+#define BLIST_UNMAP_LIMIT_WS ((__force blist_flags_t)(1 << 31))
#endif
void snd_ctl_sync_vmaster(struct snd_kcontrol *kctl, bool hook_only);
#define snd_ctl_sync_vmaster_hook(kctl) snd_ctl_sync_vmaster(kctl, true)
int snd_ctl_apply_vmaster_slaves(struct snd_kcontrol *kctl,
- int (*func)(struct snd_kcontrol *, void *),
+ int (*func)(struct snd_kcontrol *vslave,
+ struct snd_kcontrol *slave,
+ void *arg),
void *arg);
/*
TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE = R(0x1a),
TCM_TOO_MANY_SEGMENT_DESCS = R(0x1b),
TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE = R(0x1c),
+ TCM_INSUFFICIENT_REGISTRATION_RESOURCES = R(0x1d),
#undef R
};
#define CMD_T_STOP (1 << 5)
#define CMD_T_TAS (1 << 10)
#define CMD_T_FABRIC_STOP (1 << 11)
+#define CMD_T_PRE_EXECUTE (1 << 12)
spinlock_t t_state_lock;
struct kref cmd_kref;
struct completion t_transport_stop_comp;
rxrpc_conn_put_client,
rxrpc_conn_put_service,
rxrpc_conn_queued,
+ rxrpc_conn_reap_service,
rxrpc_conn_seen,
};
enum rxrpc_timer_trace {
rxrpc_timer_begin,
+ rxrpc_timer_exp_ack,
+ rxrpc_timer_exp_hard,
+ rxrpc_timer_exp_idle,
+ rxrpc_timer_exp_keepalive,
+ rxrpc_timer_exp_lost_ack,
+ rxrpc_timer_exp_normal,
+ rxrpc_timer_exp_ping,
+ rxrpc_timer_exp_resend,
rxrpc_timer_expired,
rxrpc_timer_init_for_reply,
rxrpc_timer_init_for_send_reply,
+ rxrpc_timer_restart,
rxrpc_timer_set_for_ack,
+ rxrpc_timer_set_for_hard,
+ rxrpc_timer_set_for_idle,
+ rxrpc_timer_set_for_keepalive,
+ rxrpc_timer_set_for_lost_ack,
+ rxrpc_timer_set_for_normal,
rxrpc_timer_set_for_ping,
rxrpc_timer_set_for_resend,
rxrpc_timer_set_for_send,
enum rxrpc_propose_ack_trace {
rxrpc_propose_ack_client_tx_end,
rxrpc_propose_ack_input_data,
+ rxrpc_propose_ack_ping_for_keepalive,
rxrpc_propose_ack_ping_for_lost_ack,
rxrpc_propose_ack_ping_for_lost_reply,
rxrpc_propose_ack_ping_for_params,
EM(rxrpc_conn_put_client, "PTc") \
EM(rxrpc_conn_put_service, "PTs") \
EM(rxrpc_conn_queued, "QUE") \
+ EM(rxrpc_conn_reap_service, "RPs") \
E_(rxrpc_conn_seen, "SEE")
#define rxrpc_client_traces \
#define rxrpc_timer_traces \
EM(rxrpc_timer_begin, "Begin ") \
EM(rxrpc_timer_expired, "*EXPR*") \
+ EM(rxrpc_timer_exp_ack, "ExpAck") \
+ EM(rxrpc_timer_exp_hard, "ExpHrd") \
+ EM(rxrpc_timer_exp_idle, "ExpIdl") \
+ EM(rxrpc_timer_exp_keepalive, "ExpKA ") \
+ EM(rxrpc_timer_exp_lost_ack, "ExpLoA") \
+ EM(rxrpc_timer_exp_normal, "ExpNml") \
+ EM(rxrpc_timer_exp_ping, "ExpPng") \
+ EM(rxrpc_timer_exp_resend, "ExpRsn") \
EM(rxrpc_timer_init_for_reply, "IniRpl") \
EM(rxrpc_timer_init_for_send_reply, "SndRpl") \
+ EM(rxrpc_timer_restart, "Restrt") \
EM(rxrpc_timer_set_for_ack, "SetAck") \
+ EM(rxrpc_timer_set_for_hard, "SetHrd") \
+ EM(rxrpc_timer_set_for_idle, "SetIdl") \
+ EM(rxrpc_timer_set_for_keepalive, "KeepAl") \
+ EM(rxrpc_timer_set_for_lost_ack, "SetLoA") \
+ EM(rxrpc_timer_set_for_normal, "SetNml") \
EM(rxrpc_timer_set_for_ping, "SetPng") \
EM(rxrpc_timer_set_for_resend, "SetRTx") \
- E_(rxrpc_timer_set_for_send, "SetTx ")
+ E_(rxrpc_timer_set_for_send, "SetSnd")
#define rxrpc_propose_ack_traces \
EM(rxrpc_propose_ack_client_tx_end, "ClTxEnd") \
EM(rxrpc_propose_ack_input_data, "DataIn ") \
+ EM(rxrpc_propose_ack_ping_for_keepalive, "KeepAlv") \
EM(rxrpc_propose_ack_ping_for_lost_ack, "LostAck") \
EM(rxrpc_propose_ack_ping_for_lost_reply, "LostRpl") \
EM(rxrpc_propose_ack_ping_for_params, "Params ") \
TRACE_EVENT(rxrpc_timer,
TP_PROTO(struct rxrpc_call *call, enum rxrpc_timer_trace why,
- ktime_t now, unsigned long now_j),
+ unsigned long now),
- TP_ARGS(call, why, now, now_j),
+ TP_ARGS(call, why, now),
TP_STRUCT__entry(
__field(struct rxrpc_call *, call )
__field(enum rxrpc_timer_trace, why )
- __field_struct(ktime_t, now )
- __field_struct(ktime_t, expire_at )
- __field_struct(ktime_t, ack_at )
- __field_struct(ktime_t, resend_at )
- __field(unsigned long, now_j )
- __field(unsigned long, timer )
+ __field(long, now )
+ __field(long, ack_at )
+ __field(long, ack_lost_at )
+ __field(long, resend_at )
+ __field(long, ping_at )
+ __field(long, expect_rx_by )
+ __field(long, expect_req_by )
+ __field(long, expect_term_by )
+ __field(long, timer )
),
TP_fast_assign(
- __entry->call = call;
- __entry->why = why;
- __entry->now = now;
- __entry->expire_at = call->expire_at;
- __entry->ack_at = call->ack_at;
- __entry->resend_at = call->resend_at;
- __entry->now_j = now_j;
- __entry->timer = call->timer.expires;
+ __entry->call = call;
+ __entry->why = why;
+ __entry->now = now;
+ __entry->ack_at = call->ack_at;
+ __entry->ack_lost_at = call->ack_lost_at;
+ __entry->resend_at = call->resend_at;
+ __entry->expect_rx_by = call->expect_rx_by;
+ __entry->expect_req_by = call->expect_req_by;
+ __entry->expect_term_by = call->expect_term_by;
+ __entry->timer = call->timer.expires;
),
- TP_printk("c=%p %s x=%lld a=%lld r=%lld t=%ld",
+ TP_printk("c=%p %s a=%ld la=%ld r=%ld xr=%ld xq=%ld xt=%ld t=%ld",
__entry->call,
__print_symbolic(__entry->why, rxrpc_timer_traces),
- ktime_to_ns(ktime_sub(__entry->expire_at, __entry->now)),
- ktime_to_ns(ktime_sub(__entry->ack_at, __entry->now)),
- ktime_to_ns(ktime_sub(__entry->resend_at, __entry->now)),
- __entry->timer - __entry->now_j)
+ __entry->ack_at - __entry->now,
+ __entry->ack_lost_at - __entry->now,
+ __entry->resend_at - __entry->now,
+ __entry->expect_rx_by - __entry->now,
+ __entry->expect_req_by - __entry->now,
+ __entry->expect_term_by - __entry->now,
+ __entry->timer - __entry->now)
);
TRACE_EVENT(rxrpc_rx_lose,
memcpy(&__entry->sum, summary, sizeof(__entry->sum));
),
- TP_printk("c=%p %08x %s %08x %s cw=%u ss=%u nr=%u,%u nw=%u,%u r=%u b=%u u=%u d=%u l=%x%s%s%s",
+ TP_printk("c=%p r=%08x %s q=%08x %s cw=%u ss=%u nr=%u,%u nw=%u,%u r=%u b=%u u=%u d=%u l=%x%s%s%s",
__entry->call,
__entry->ack_serial,
__print_symbolic(__entry->sum.ack_reason, rxrpc_ack_names),
* RUNNING (we will not have dequeued if state != RUNNING).
*/
if (preempt)
- return TASK_STATE_MAX;
+ return TASK_REPORT_MAX;
- return task_state_index(p);
+ return 1 << task_state_index(p);
}
#endif /* CREATE_TRACE_POINTS */
{ 0x40, "P" }, { 0x80, "I" }) :
"R",
- __entry->prev_state & TASK_STATE_MAX ? "+" : "",
+ __entry->prev_state & TASK_REPORT_MAX ? "+" : "",
__entry->next_comm, __entry->next_pid, __entry->next_prio)
);
#define PTR_CHECK_DEV ((1 << PTR_DEV_BITS) - 1)
-#define PTR(gen, offset, dev) \
+#define MAKE_PTR(gen, offset, dev) \
((((__u64) dev) << 51) | ((__u64) offset) << 8 | gen)
/* Bkey utility code */
#define BFS_FILEBLOCKS(ip) \
((ip)->i_sblock == 0 ? 0 : (le32_to_cpu((ip)->i_eblock) + 1) - le32_to_cpu((ip)->i_sblock))
#define BFS_UNCLEAN(bfs_sb, sb) \
- ((le32_to_cpu(bfs_sb->s_from) != -1) && (le32_to_cpu(bfs_sb->s_to) != -1) && !(sb->s_flags & MS_RDONLY))
+ ((le32_to_cpu(bfs_sb->s_from) != -1) && (le32_to_cpu(bfs_sb->s_to) != -1) && !(sb->s_flags & SB_RDONLY))
#endif /* _LINUX_BFS_FS_H */
__u32 kern_version; /* checked when prog_type=kprobe */
__u32 prog_flags;
char prog_name[BPF_OBJ_NAME_LEN];
- __u32 prog_target_ifindex; /* ifindex of netdev to prep for */
+ __u32 prog_ifindex; /* ifindex of netdev to prep for */
};
struct { /* anonymous struct used by BPF_OBJ_* commands */
#define BPF_TAG_SIZE 8
-enum bpf_prog_status {
- BPF_PROG_STATUS_DEV_BOUND = (1 << 0),
-};
-
struct bpf_prog_info {
__u32 type;
__u32 id;
__u32 nr_map_ids;
__aligned_u64 map_ids;
char name[BPF_OBJ_NAME_LEN];
- __u32 ifindex;
- __u32 status;
} __attribute__((aligned(8)));
struct bpf_map_info {
};
struct kfd_ioctl_set_scratch_backing_va_args {
- uint64_t va_addr; /* to KFD */
- uint32_t gpu_id; /* to KFD */
- uint32_t pad;
+ __u64 va_addr; /* to KFD */
+ __u32 gpu_id; /* to KFD */
+ __u32 pad;
};
struct kfd_ioctl_get_tile_config_args {
/* to KFD: pointer to tile array */
- uint64_t tile_config_ptr;
+ __u64 tile_config_ptr;
/* to KFD: pointer to macro tile array */
- uint64_t macro_tile_config_ptr;
+ __u64 macro_tile_config_ptr;
/* to KFD: array size allocated by user mode
* from KFD: array size filled by kernel
*/
- uint32_t num_tile_configs;
+ __u32 num_tile_configs;
/* to KFD: array size allocated by user mode
* from KFD: array size filled by kernel
*/
- uint32_t num_macro_tile_configs;
+ __u32 num_macro_tile_configs;
- uint32_t gpu_id; /* to KFD */
- uint32_t gb_addr_config; /* from KFD */
- uint32_t num_banks; /* from KFD */
- uint32_t num_ranks; /* from KFD */
+ __u32 gpu_id; /* to KFD */
+ __u32 gb_addr_config; /* from KFD */
+ __u32 num_banks; /* from KFD */
+ __u32 num_ranks; /* from KFD */
/* struct size can be extended later if needed
* without breaking ABI compatibility
*/
RXRPC_EXCLUSIVE_CALL = 10, /* s-: Call should be on exclusive connection */
RXRPC_UPGRADE_SERVICE = 11, /* s-: Request service upgrade for client call */
RXRPC_TX_LENGTH = 12, /* s-: Total length of Tx data */
+ RXRPC_SET_CALL_TIMEOUT = 13, /* s-: Set one or more call timeouts */
RXRPC__SUPPORTED
};
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/* AF_VSOCK sock_diag(7) interface for querying open sockets */
#ifndef _UAPI__VM_SOCKETS_DIAG_H__
void *data)
{
struct ipc_namespace *ns;
- if (flags & MS_KERNMOUNT) {
+ if (flags & SB_KERNMOUNT) {
ns = data;
data = NULL;
} else {
struct net *net = current->nsproxy->net_ns;
struct bpf_dev_offload *offload;
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
+ if (attr->prog_type != BPF_PROG_TYPE_SCHED_CLS &&
+ attr->prog_type != BPF_PROG_TYPE_XDP)
+ return -EINVAL;
if (attr->prog_flags)
return -EINVAL;
init_waitqueue_head(&offload->verifier_done);
rtnl_lock();
- offload->netdev = __dev_get_by_index(net, attr->prog_target_ifindex);
+ offload->netdev = __dev_get_by_index(net, attr->prog_ifindex);
if (!offload->netdev) {
rtnl_unlock();
kfree(offload);
struct bpf_dev_offload *offload = prog->aux->offload;
struct netdev_bpf data = {};
+ /* Caution - if netdev is destroyed before the program, this function
+ * will be called twice.
+ */
+
data.offload.prog = prog;
if (offload->verifier_running)
return bpf_prog_offload_translate(prog);
}
-u32 bpf_prog_offload_ifindex(struct bpf_prog *prog)
-{
- struct bpf_dev_offload *offload = prog->aux->offload;
- u32 ifindex;
-
- rtnl_lock();
- ifindex = offload->netdev ? offload->netdev->ifindex : 0;
- rtnl_unlock();
-
- return ifindex;
-}
-
const struct bpf_prog_ops bpf_offload_prog_ops = {
};
switch (event) {
case NETDEV_UNREGISTER:
+ /* ignore namespace changes */
+ if (netdev->reg_state != NETREG_UNREGISTERING)
+ break;
+
list_for_each_entry_safe(offload, tmp, &bpf_prog_offload_devs,
offloads) {
if (offload->netdev == netdev)
}
EXPORT_SYMBOL_GPL(bpf_prog_inc_not_zero);
-static bool bpf_prog_can_attach(struct bpf_prog *prog,
- enum bpf_prog_type *attach_type,
- struct net_device *netdev)
+static bool bpf_prog_get_ok(struct bpf_prog *prog,
+ enum bpf_prog_type *attach_type, bool attach_drv)
{
- struct bpf_dev_offload *offload = prog->aux->offload;
+ /* not an attachment, just a refcount inc, always allow */
+ if (!attach_type)
+ return true;
if (prog->type != *attach_type)
return false;
- if (offload && offload->netdev != netdev)
+ if (bpf_prog_is_dev_bound(prog->aux) && !attach_drv)
return false;
return true;
}
static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *attach_type,
- struct net_device *netdev)
+ bool attach_drv)
{
struct fd f = fdget(ufd);
struct bpf_prog *prog;
prog = ____bpf_prog_get(f);
if (IS_ERR(prog))
return prog;
- if (attach_type && !bpf_prog_can_attach(prog, attach_type, netdev)) {
+ if (!bpf_prog_get_ok(prog, attach_type, attach_drv)) {
prog = ERR_PTR(-EINVAL);
goto out;
}
struct bpf_prog *bpf_prog_get(u32 ufd)
{
- return __bpf_prog_get(ufd, NULL, NULL);
+ return __bpf_prog_get(ufd, NULL, false);
}
-struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type)
-{
- struct bpf_prog *prog = __bpf_prog_get(ufd, &type, NULL);
-
- if (!IS_ERR(prog))
- trace_bpf_prog_get_type(prog);
- return prog;
-}
-EXPORT_SYMBOL_GPL(bpf_prog_get_type);
-
struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
- struct net_device *netdev)
+ bool attach_drv)
{
- struct bpf_prog *prog = __bpf_prog_get(ufd, &type, netdev);
+ struct bpf_prog *prog = __bpf_prog_get(ufd, &type, attach_drv);
if (!IS_ERR(prog))
trace_bpf_prog_get_type(prog);
EXPORT_SYMBOL_GPL(bpf_prog_get_type_dev);
/* last field in 'union bpf_attr' used by this command */
-#define BPF_PROG_LOAD_LAST_FIELD prog_target_ifindex
+#define BPF_PROG_LOAD_LAST_FIELD prog_ifindex
static int bpf_prog_load(union bpf_attr *attr)
{
atomic_set(&prog->aux->refcnt, 1);
prog->gpl_compatible = is_gpl ? 1 : 0;
- if (attr->prog_target_ifindex) {
+ if (attr->prog_ifindex) {
err = bpf_prog_offload_init(prog, attr);
if (err)
goto free_prog;
return -EFAULT;
}
- if (bpf_prog_is_dev_bound(prog->aux)) {
- info.status |= BPF_PROG_STATUS_DEV_BOUND;
- info.ifindex = bpf_prog_offload_ifindex(prog);
- }
-
done:
if (copy_to_user(uinfo, &info, info_len) ||
put_user(info_len, &uattr->info.info_len))
if (type != expected_type)
goto err_type;
} else if (arg_type == ARG_PTR_TO_MEM ||
+ arg_type == ARG_PTR_TO_MEM_OR_NULL ||
arg_type == ARG_PTR_TO_UNINIT_MEM) {
expected_type = PTR_TO_STACK;
/* One exception here. In case function allows for NULL to be
* passed in as argument, it's a SCALAR_VALUE type. Final test
* happens during stack boundary checking.
*/
- if (register_is_null(*reg))
+ if (register_is_null(*reg) &&
+ arg_type == ARG_PTR_TO_MEM_OR_NULL)
/* final test in check_stack_boundary() */;
else if (!type_is_pkt_pointer(type) &&
type != PTR_TO_MAP_VALUE &&
return err;
regs = cur_regs(env);
+ env->insn_aux_data[insn_idx].seen = true;
if (class == BPF_ALU || class == BPF_ALU64) {
err = check_alu_op(env, insn);
if (err)
return err;
insn_idx++;
+ env->insn_aux_data[insn_idx].seen = true;
} else {
verbose(env, "invalid BPF_LD mode\n");
return -EINVAL;
u32 off, u32 cnt)
{
struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
+ int i;
if (cnt == 1)
return 0;
memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
memcpy(new_data + off + cnt - 1, old_data + off,
sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
+ for (i = off; i < off + cnt - 1; i++)
+ new_data[i].seen = true;
env->insn_aux_data = new_data;
vfree(old_data);
return 0;
return new_prog;
}
+/* The verifier does more data flow analysis than llvm and will not explore
+ * branches that are dead at run time. Malicious programs can have dead code
+ * too. Therefore replace all dead at-run-time code with nops.
+ */
+static void sanitize_dead_code(struct bpf_verifier_env *env)
+{
+ struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
+ struct bpf_insn nop = BPF_MOV64_REG(BPF_REG_0, BPF_REG_0);
+ struct bpf_insn *insn = env->prog->insnsi;
+ const int insn_cnt = env->prog->len;
+ int i;
+
+ for (i = 0; i < insn_cnt; i++) {
+ if (aux_data[i].seen)
+ continue;
+ memcpy(insn + i, &nop, sizeof(nop));
+ }
+}
+
/* convert load instructions that access fields of 'struct __sk_buff'
* into sequence of instructions that access fields of 'struct sk_buff'
*/
while (!pop_stack(env, NULL, NULL));
free_states(env);
+ if (ret == 0)
+ sanitize_dead_code(env);
+
if (ret == 0)
/* program is valid, convert *(u32*)(ctx + off) accesses */
ret = convert_ctx_accesses(env);
ns_inode = ns_path.dentry->d_inode;
ns_link_info->dev = new_encode_dev(ns_inode->i_sb->s_dev);
ns_link_info->ino = ns_inode->i_ino;
+ path_put(&ns_path);
}
}
* set the trigger type must match. Also all must
* agree on ONESHOT.
*/
- unsigned int oldtype = irqd_get_trigger_type(&desc->irq_data);
+ unsigned int oldtype;
+
+ /*
+ * If nobody did set the configuration before, inherit
+ * the one provided by the requester.
+ */
+ if (irqd_trigger_type_was_set(&desc->irq_data)) {
+ oldtype = irqd_get_trigger_type(&desc->irq_data);
+ } else {
+ oldtype = new->flags & IRQF_TRIGGER_MASK;
+ irqd_set_trigger_type(&desc->irq_data, oldtype);
+ }
if (!((old->flags & new->flags) & IRQF_SHARED) ||
(oldtype != (new->flags & IRQF_TRIGGER_MASK)) ||
{
struct cpumap *cm = this_cpu_ptr(m->maps);
- return m->global_available - cpudown ? cm->available : 0;
+ return (m->global_available - cpudown) ? cm->available : 0;
}
/**
static int irqfixup __read_mostly;
#define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10)
-static void poll_spurious_irqs(unsigned long dummy);
+static void poll_spurious_irqs(struct timer_list *unused);
static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs);
static int irq_poll_cpu;
static atomic_t irq_poll_active;
return ok;
}
-static void poll_spurious_irqs(unsigned long dummy)
+static void poll_spurious_irqs(struct timer_list *unused)
{
struct irq_desc *desc;
int i;
return 0;
}
-late_initcall(jump_label_test);
+early_initcall(jump_label_test);
#endif /* STATIC_KEYS_SELFTEST */
#endif /* HAVE_JUMP_LABEL */
static int s_show(struct seq_file *m, void *p)
{
- unsigned long value;
+ void *value;
struct kallsym_iter *iter = m->private;
/* Some debugging symbols have no name. Ignore them. */
if (!iter->name[0])
return 0;
- value = iter->show_value ? iter->value : 0;
+ value = iter->show_value ? (void *)iter->value : NULL;
if (iter->module_name[0]) {
char type;
*/
type = iter->exported ? toupper(iter->type) :
tolower(iter->type);
- seq_printf(m, KALLSYM_FMT " %c %s\t[%s]\n", value,
+ seq_printf(m, "%px %c %s\t[%s]\n", value,
type, iter->name, iter->module_name);
} else
- seq_printf(m, KALLSYM_FMT " %c %s\n", value,
+ seq_printf(m, "%px %c %s\n", value,
iter->type, iter->name);
return 0;
}
struct timer_list *timer = &dwork->timer;
struct kthread_work *work = &dwork->work;
- WARN_ON_ONCE(timer->function != (TIMER_FUNC_TYPE)kthread_delayed_work_timer_fn);
+ WARN_ON_ONCE(timer->function != kthread_delayed_work_timer_fn);
/*
* If @delay is 0, queue @dwork->work immediately. This is for
{
struct module *mod = list_entry(p, struct module, list);
char buf[MODULE_FLAGS_BUF_SIZE];
- unsigned long value;
+ void *value;
/* We always ignore unformed modules. */
if (mod->state == MODULE_STATE_UNFORMED)
mod->state == MODULE_STATE_COMING ? "Loading" :
"Live");
/* Used by oprofile and other similar tools. */
- value = m->private ? 0 : (unsigned long)mod->core_layout.base;
- seq_printf(m, " 0x" KALLSYM_FMT, value);
+ value = m->private ? NULL : mod->core_layout.base;
+ seq_printf(m, " 0x%px", value);
/* Taints info */
if (mod->taints)
local_bh_enable();
}
-static void padata_reorder_timer(unsigned long arg)
+static void padata_reorder_timer(struct timer_list *t)
{
- struct parallel_data *pd = (struct parallel_data *)arg;
+ struct parallel_data *pd = from_timer(pd, t, timer);
unsigned int weight;
int target_cpu, cpu;
padata_init_pqueues(pd);
padata_init_squeues(pd);
- setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
+ timer_setup(&pd->timer, padata_reorder_timer, 0);
atomic_set(&pd->seq_nr, -1);
atomic_set(&pd->reorder_objects, 0);
atomic_set(&pd->refcnt, 0);
config GENERIC_TIME_VSYSCALL
bool
-# Timekeeping vsyscall support
-config GENERIC_TIME_VSYSCALL_OLD
- bool
-
# Old style timekeeping
config ARCH_USES_GETTIMEOFFSET
bool
spin_unlock_irqrestore(&watchdog_lock, flags);
}
-static void clocksource_watchdog(unsigned long data)
+static void clocksource_watchdog(struct timer_list *unused)
{
struct clocksource *cs;
u64 csnow, wdnow, cslast, wdlast, delta;
{
if (watchdog_running || !watchdog || list_empty(&watchdog_list))
return;
- init_timer(&watchdog_timer);
- watchdog_timer.function = clocksource_watchdog;
+ timer_setup(&watchdog_timer, clocksource_watchdog, 0);
watchdog_timer.expires = jiffies + WATCHDOG_INTERVAL;
add_timer_on(&watchdog_timer, cpumask_first(cpu_online_mask));
watchdog_running = 1;
update_fast_timekeeper(&tkr_dummy, &tk_fast_raw);
}
-#ifdef CONFIG_GENERIC_TIME_VSYSCALL_OLD
-#warning Please contact your maintainers, as GENERIC_TIME_VSYSCALL_OLD compatibity will disappear soon.
-
-static inline void update_vsyscall(struct timekeeper *tk)
-{
- struct timespec xt, wm;
-
- xt = timespec64_to_timespec(tk_xtime(tk));
- wm = timespec64_to_timespec(tk->wall_to_monotonic);
- update_vsyscall_old(&xt, &wm, tk->tkr_mono.clock, tk->tkr_mono.mult,
- tk->tkr_mono.cycle_last);
-}
-
-static inline void old_vsyscall_fixup(struct timekeeper *tk)
-{
- s64 remainder;
-
- /*
- * Store only full nanoseconds into xtime_nsec after rounding
- * it up and add the remainder to the error difference.
- * XXX - This is necessary to avoid small 1ns inconsistnecies caused
- * by truncating the remainder in vsyscalls. However, it causes
- * additional work to be done in timekeeping_adjust(). Once
- * the vsyscall implementations are converted to use xtime_nsec
- * (shifted nanoseconds), and CONFIG_GENERIC_TIME_VSYSCALL_OLD
- * users are removed, this can be killed.
- */
- remainder = tk->tkr_mono.xtime_nsec & ((1ULL << tk->tkr_mono.shift) - 1);
- if (remainder != 0) {
- tk->tkr_mono.xtime_nsec -= remainder;
- tk->tkr_mono.xtime_nsec += 1ULL << tk->tkr_mono.shift;
- tk->ntp_error += remainder << tk->ntp_error_shift;
- tk->ntp_error -= (1ULL << tk->tkr_mono.shift) << tk->ntp_error_shift;
- }
-}
-#else
-#define old_vsyscall_fixup(tk)
-#endif
-
static RAW_NOTIFIER_HEAD(pvclock_gtod_chain);
static void update_pvclock_gtod(struct timekeeper *tk, bool was_set)
/* correct the clock when NTP error is too big */
timekeeping_adjust(tk, offset);
- /*
- * XXX This can be killed once everyone converts
- * to the new update_vsyscall.
- */
- old_vsyscall_fixup(tk);
-
/*
* Finally, make sure that after the rounding
* xtime_nsec isn't larger than NSEC_PER_SEC
debug_object_assert_init(timer, &timer_debug_descr);
}
-static void do_init_timer(struct timer_list *timer, unsigned int flags,
+static void do_init_timer(struct timer_list *timer,
+ void (*func)(struct timer_list *),
+ unsigned int flags,
const char *name, struct lock_class_key *key);
-void init_timer_on_stack_key(struct timer_list *timer, unsigned int flags,
+void init_timer_on_stack_key(struct timer_list *timer,
+ void (*func)(struct timer_list *),
+ unsigned int flags,
const char *name, struct lock_class_key *key)
{
debug_object_init_on_stack(timer, &timer_debug_descr);
- do_init_timer(timer, flags, name, key);
+ do_init_timer(timer, func, flags, name, key);
}
EXPORT_SYMBOL_GPL(init_timer_on_stack_key);
debug_timer_assert_init(timer);
}
-static void do_init_timer(struct timer_list *timer, unsigned int flags,
+static void do_init_timer(struct timer_list *timer,
+ void (*func)(struct timer_list *),
+ unsigned int flags,
const char *name, struct lock_class_key *key)
{
timer->entry.pprev = NULL;
+ timer->function = func;
timer->flags = flags | raw_smp_processor_id();
lockdep_init_map(&timer->lockdep_map, name, key, 0);
}
/**
* init_timer_key - initialize a timer
* @timer: the timer to be initialized
+ * @func: timer callback function
* @flags: timer flags
* @name: name of the timer
* @key: lockdep class key of the fake lock used for tracking timer
* init_timer_key() must be done to a timer prior calling *any* of the
* other timer functions.
*/
-void init_timer_key(struct timer_list *timer, unsigned int flags,
+void init_timer_key(struct timer_list *timer,
+ void (*func)(struct timer_list *), unsigned int flags,
const char *name, struct lock_class_key *key)
{
debug_init(timer);
- do_init_timer(timer, flags, name, key);
+ do_init_timer(timer, func, flags, name, key);
}
EXPORT_SYMBOL(init_timer_key);
* add_timer - start a timer
* @timer: the timer to be added
*
- * The kernel will do a ->function(->data) callback from the
+ * The kernel will do a ->function(@timer) callback from the
* timer interrupt at the ->expires point in the future. The
* current time is 'jiffies'.
*
- * The timer's ->expires, ->function (and if the handler uses it, ->data)
- * fields must be set prior calling this function.
+ * The timer's ->expires, ->function fields must be set prior calling this
+ * function.
*
* Timers with an ->expires field in the past will be executed in the next
* timer tick.
EXPORT_SYMBOL(del_timer_sync);
#endif
-static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
- unsigned long data)
+static void call_timer_fn(struct timer_list *timer, void (*fn)(struct timer_list *))
{
int count = preempt_count();
lock_map_acquire(&lockdep_map);
trace_timer_expire_entry(timer);
- fn(data);
+ fn(timer);
trace_timer_expire_exit(timer);
lock_map_release(&lockdep_map);
{
while (!hlist_empty(head)) {
struct timer_list *timer;
- void (*fn)(unsigned long);
- unsigned long data;
+ void (*fn)(struct timer_list *);
timer = hlist_entry(head->first, struct timer_list, entry);
detach_timer(timer, true);
fn = timer->function;
- data = timer->data;
if (timer->flags & TIMER_IRQSAFE) {
raw_spin_unlock(&base->lock);
- call_timer_fn(timer, fn, data);
+ call_timer_fn(timer, fn);
raw_spin_lock(&base->lock);
} else {
raw_spin_unlock_irq(&base->lock);
- call_timer_fn(timer, fn, data);
+ call_timer_fn(timer, fn);
raw_spin_lock_irq(&base->lock);
}
}
{
struct proc_dir_entry *pe;
- pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
if (!pe)
return -ENOMEM;
return 0;
return ret;
if (copy_to_user(arg, &buts, sizeof(buts))) {
- blk_trace_remove(q);
+ __blk_trace_remove(q);
return -EFAULT;
}
return 0;
return ret;
if (copy_to_user(arg, &buts.name, ARRAY_SIZE(buts.name))) {
- blk_trace_remove(q);
+ __blk_trace_remove(q);
return -EFAULT;
}
*
**/
static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
- u32 what, int error, union kernfs_node_id *cgid)
+ u32 what, int error)
{
struct blk_trace *bt = q->blk_trace;
return;
__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
- bio_op(bio), bio->bi_opf, what, error, 0, NULL, cgid);
+ bio_op(bio), bio->bi_opf, what, error, 0, NULL,
+ blk_trace_bio_get_cgid(q, bio));
}
static void blk_add_trace_bio_bounce(void *ignore,
struct request_queue *q, struct bio *bio)
{
- blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0,
- blk_trace_bio_get_cgid(q, bio));
+ blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0);
}
static void blk_add_trace_bio_complete(void *ignore,
struct request_queue *q, struct bio *bio,
int error)
{
- blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error,
- blk_trace_bio_get_cgid(q, bio));
+ blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error);
}
static void blk_add_trace_bio_backmerge(void *ignore,
struct request *rq,
struct bio *bio)
{
- blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0,
- blk_trace_bio_get_cgid(q, bio));
+ blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0);
}
static void blk_add_trace_bio_frontmerge(void *ignore,
struct request *rq,
struct bio *bio)
{
- blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE, 0,
- blk_trace_bio_get_cgid(q, bio));
+ blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE, 0);
}
static void blk_add_trace_bio_queue(void *ignore,
struct request_queue *q, struct bio *bio)
{
- blk_add_trace_bio(q, bio, BLK_TA_QUEUE, 0,
- blk_trace_bio_get_cgid(q, bio));
+ blk_add_trace_bio(q, bio, BLK_TA_QUEUE, 0);
}
static void blk_add_trace_getrq(void *ignore,
struct bio *bio, int rw)
{
if (bio)
- blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0,
- blk_trace_bio_get_cgid(q, bio));
+ blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0);
else {
struct blk_trace *bt = q->blk_trace;
struct bio *bio, int rw)
{
if (bio)
- blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0,
- blk_trace_bio_get_cgid(q, bio));
+ blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0);
else {
struct blk_trace *bt = q->blk_trace;
BPF_CALL_3(bpf_probe_read, void *, dst, u32, size, const void *, unsafe_ptr)
{
- int ret = 0;
-
- if (unlikely(size == 0))
- goto out;
+ int ret;
ret = probe_kernel_read(dst, unsafe_ptr, size);
if (unlikely(ret < 0))
memset(dst, 0, size);
- out:
return ret;
}
.arg2_type = ARG_CONST_MAP_PTR,
.arg3_type = ARG_ANYTHING,
.arg4_type = ARG_PTR_TO_MEM,
- .arg5_type = ARG_CONST_SIZE,
+ .arg5_type = ARG_CONST_SIZE_OR_ZERO,
};
static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs);
.gpl_only = true,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_UNINIT_MEM,
- .arg2_type = ARG_CONST_SIZE,
+ .arg2_type = ARG_CONST_SIZE_OR_ZERO,
.arg3_type = ARG_ANYTHING,
};
.arg2_type = ARG_CONST_MAP_PTR,
.arg3_type = ARG_ANYTHING,
.arg4_type = ARG_PTR_TO_MEM,
- .arg5_type = ARG_CONST_SIZE,
+ .arg5_type = ARG_CONST_SIZE_OR_ZERO,
};
BPF_CALL_3(bpf_get_stackid_tp, void *, tp_buff, struct bpf_map *, map,
struct work_struct *work = &dwork->work;
WARN_ON_ONCE(!wq);
- WARN_ON_ONCE(timer->function != (TIMER_FUNC_TYPE)delayed_work_timer_fn);
+ WARN_ON_ONCE(timer->function != delayed_work_timer_fn);
WARN_ON_ONCE(timer_pending(timer));
WARN_ON_ONCE(!list_empty(&work->entry));
}
core_initcall(prandom_init);
-static void __prandom_timer(unsigned long dontcare);
+static void __prandom_timer(struct timer_list *unused);
static DEFINE_TIMER(seed_timer, __prandom_timer);
-static void __prandom_timer(unsigned long dontcare)
+static void __prandom_timer(struct timer_list *unused)
{
u32 entropy;
unsigned long expires;
#define PAD_SIZE 16
#define FILL_CHAR '$'
-#define PTR1 ((void*)0x01234567)
-#define PTR2 ((void*)(long)(int)0xfedcba98)
-
-#if BITS_PER_LONG == 64
-#define PTR1_ZEROES "000000000"
-#define PTR1_SPACES " "
-#define PTR1_STR "1234567"
-#define PTR2_STR "fffffffffedcba98"
-#define PTR_WIDTH 16
-#else
-#define PTR1_ZEROES "0"
-#define PTR1_SPACES " "
-#define PTR1_STR "1234567"
-#define PTR2_STR "fedcba98"
-#define PTR_WIDTH 8
-#endif
-#define PTR_WIDTH_STR stringify(PTR_WIDTH)
-
static unsigned total_tests __initdata;
static unsigned failed_tests __initdata;
static char *test_buffer __initdata;
test("a | | ", "%-3.s|%-3.0s|%-3.*s", "a", "b", 0, "c");
}
+#define PLAIN_BUF_SIZE 64 /* leave some space so we don't oops */
+
+#if BITS_PER_LONG == 64
+
+#define PTR_WIDTH 16
+#define PTR ((void *)0xffff0123456789ab)
+#define PTR_STR "ffff0123456789ab"
+#define ZEROS "00000000" /* hex 32 zero bits */
+
+static int __init
+plain_format(void)
+{
+ char buf[PLAIN_BUF_SIZE];
+ int nchars;
+
+ nchars = snprintf(buf, PLAIN_BUF_SIZE, "%p", PTR);
+
+ if (nchars != PTR_WIDTH || strncmp(buf, ZEROS, strlen(ZEROS)) != 0)
+ return -1;
+
+ return 0;
+}
+
+#else
+
+#define PTR_WIDTH 8
+#define PTR ((void *)0x456789ab)
+#define PTR_STR "456789ab"
+
+static int __init
+plain_format(void)
+{
+ /* Format is implicitly tested for 32 bit machines by plain_hash() */
+ return 0;
+}
+
+#endif /* BITS_PER_LONG == 64 */
+
+static int __init
+plain_hash(void)
+{
+ char buf[PLAIN_BUF_SIZE];
+ int nchars;
+
+ nchars = snprintf(buf, PLAIN_BUF_SIZE, "%p", PTR);
+
+ if (nchars != PTR_WIDTH || strncmp(buf, PTR_STR, PTR_WIDTH) == 0)
+ return -1;
+
+ return 0;
+}
+
+/*
+ * We can't use test() to test %p because we don't know what output to expect
+ * after an address is hashed.
+ */
static void __init
plain(void)
{
- test(PTR1_ZEROES PTR1_STR " " PTR2_STR, "%p %p", PTR1, PTR2);
- /*
- * The field width is overloaded for some %p extensions to
- * pass another piece of information. For plain pointers, the
- * behaviour is slightly odd: One cannot pass either the 0
- * flag nor a precision to %p without gcc complaining, and if
- * one explicitly gives a field width, the number is no longer
- * zero-padded.
- */
- test("|" PTR1_STR PTR1_SPACES " | " PTR1_SPACES PTR1_STR "|",
- "|%-*p|%*p|", PTR_WIDTH+2, PTR1, PTR_WIDTH+2, PTR1);
- test("|" PTR2_STR " | " PTR2_STR "|",
- "|%-*p|%*p|", PTR_WIDTH+2, PTR2, PTR_WIDTH+2, PTR2);
+ int err;
- /*
- * Unrecognized %p extensions are treated as plain %p, but the
- * alphanumeric suffix is ignored (that is, does not occur in
- * the output.)
- */
- test("|"PTR1_ZEROES PTR1_STR"|", "|%p0y|", PTR1);
- test("|"PTR2_STR"|", "|%p0y|", PTR2);
+ err = plain_hash();
+ if (err) {
+ pr_warn("plain 'p' does not appear to be hashed\n");
+ failed_tests++;
+ return;
+ }
+
+ err = plain_format();
+ if (err) {
+ pr_warn("hashing plain 'p' has unexpected format\n");
+ failed_tests++;
+ }
}
static void __init
static void __init
kernel_ptr(void)
{
+ /* We can't test this without access to kptr_restrict. */
}
static void __init
#include <linux/uuid.h>
#include <linux/of.h>
#include <net/addrconf.h>
+#include <linux/siphash.h>
+#include <linux/compiler.h>
#ifdef CONFIG_BLOCK
#include <linux/blkdev.h>
#endif
return string(buf, end, uuid, spec);
}
+int kptr_restrict __read_mostly;
+
+static noinline_for_stack
+char *restricted_pointer(char *buf, char *end, const void *ptr,
+ struct printf_spec spec)
+{
+ spec.base = 16;
+ spec.flags |= SMALL;
+ if (spec.field_width == -1) {
+ spec.field_width = 2 * sizeof(ptr);
+ spec.flags |= ZEROPAD;
+ }
+
+ switch (kptr_restrict) {
+ case 0:
+ /* Always print %pK values */
+ break;
+ case 1: {
+ const struct cred *cred;
+
+ /*
+ * kptr_restrict==1 cannot be used in IRQ context
+ * because its test for CAP_SYSLOG would be meaningless.
+ */
+ if (in_irq() || in_serving_softirq() || in_nmi())
+ return string(buf, end, "pK-error", spec);
+
+ /*
+ * Only print the real pointer value if the current
+ * process has CAP_SYSLOG and is running with the
+ * same credentials it started with. This is because
+ * access to files is checked at open() time, but %pK
+ * checks permission at read() time. We don't want to
+ * leak pointer values if a binary opens a file using
+ * %pK and then elevates privileges before reading it.
+ */
+ cred = current_cred();
+ if (!has_capability_noaudit(current, CAP_SYSLOG) ||
+ !uid_eq(cred->euid, cred->uid) ||
+ !gid_eq(cred->egid, cred->gid))
+ ptr = NULL;
+ break;
+ }
+ case 2:
+ default:
+ /* Always print 0's for %pK */
+ ptr = NULL;
+ break;
+ }
+
+ return number(buf, end, (unsigned long)ptr, spec);
+}
+
static noinline_for_stack
char *netdev_bits(char *buf, char *end, const void *addr, const char *fmt)
{
return widen_string(buf, buf - buf_start, end, spec);
}
-int kptr_restrict __read_mostly;
+static noinline_for_stack
+char *pointer_string(char *buf, char *end, const void *ptr,
+ struct printf_spec spec)
+{
+ spec.base = 16;
+ spec.flags |= SMALL;
+ if (spec.field_width == -1) {
+ spec.field_width = 2 * sizeof(ptr);
+ spec.flags |= ZEROPAD;
+ }
+
+ return number(buf, end, (unsigned long int)ptr, spec);
+}
+
+static bool have_filled_random_ptr_key __read_mostly;
+static siphash_key_t ptr_key __read_mostly;
+
+static void fill_random_ptr_key(struct random_ready_callback *unused)
+{
+ get_random_bytes(&ptr_key, sizeof(ptr_key));
+ /*
+ * have_filled_random_ptr_key==true is dependent on get_random_bytes().
+ * ptr_to_id() needs to see have_filled_random_ptr_key==true
+ * after get_random_bytes() returns.
+ */
+ smp_mb();
+ WRITE_ONCE(have_filled_random_ptr_key, true);
+}
+
+static struct random_ready_callback random_ready = {
+ .func = fill_random_ptr_key
+};
+
+static int __init initialize_ptr_random(void)
+{
+ int ret = add_random_ready_callback(&random_ready);
+
+ if (!ret) {
+ return 0;
+ } else if (ret == -EALREADY) {
+ fill_random_ptr_key(&random_ready);
+ return 0;
+ }
+
+ return ret;
+}
+early_initcall(initialize_ptr_random);
+
+/* Maps a pointer to a 32 bit unique identifier. */
+static char *ptr_to_id(char *buf, char *end, void *ptr, struct printf_spec spec)
+{
+ unsigned long hashval;
+ const int default_width = 2 * sizeof(ptr);
+
+ if (unlikely(!have_filled_random_ptr_key)) {
+ spec.field_width = default_width;
+ /* string length must be less than default_width */
+ return string(buf, end, "(ptrval)", spec);
+ }
+
+#ifdef CONFIG_64BIT
+ hashval = (unsigned long)siphash_1u64((u64)ptr, &ptr_key);
+ /*
+ * Mask off the first 32 bits, this makes explicit that we have
+ * modified the address (and 32 bits is plenty for a unique ID).
+ */
+ hashval = hashval & 0xffffffff;
+#else
+ hashval = (unsigned long)siphash_1u32((u32)ptr, &ptr_key);
+#endif
+
+ spec.flags |= SMALL;
+ if (spec.field_width == -1) {
+ spec.field_width = default_width;
+ spec.flags |= ZEROPAD;
+ }
+ spec.base = 16;
+
+ return number(buf, end, hashval, spec);
+}
/*
* Show a '%p' thing. A kernel extension is that the '%p' is followed
* c major compatible string
* C full compatible string
*
+ * - 'x' For printing the address. Equivalent to "%lx".
+ *
* ** Please update also Documentation/printk-formats.txt when making changes **
*
* Note: The difference between 'S' and 'F' is that on ia64 and ppc64
* function pointers are really function descriptors, which contain a
* pointer to the real address.
+ *
+ * Note: The default behaviour (unadorned %p) is to hash the address,
+ * rendering it useful as a unique identifier.
*/
static noinline_for_stack
char *pointer(const char *fmt, char *buf, char *end, void *ptr,
return buf;
}
case 'K':
- switch (kptr_restrict) {
- case 0:
- /* Always print %pK values */
- break;
- case 1: {
- const struct cred *cred;
-
- /*
- * kptr_restrict==1 cannot be used in IRQ context
- * because its test for CAP_SYSLOG would be meaningless.
- */
- if (in_irq() || in_serving_softirq() || in_nmi()) {
- if (spec.field_width == -1)
- spec.field_width = default_width;
- return string(buf, end, "pK-error", spec);
- }
-
- /*
- * Only print the real pointer value if the current
- * process has CAP_SYSLOG and is running with the
- * same credentials it started with. This is because
- * access to files is checked at open() time, but %pK
- * checks permission at read() time. We don't want to
- * leak pointer values if a binary opens a file using
- * %pK and then elevates privileges before reading it.
- */
- cred = current_cred();
- if (!has_capability_noaudit(current, CAP_SYSLOG) ||
- !uid_eq(cred->euid, cred->uid) ||
- !gid_eq(cred->egid, cred->gid))
- ptr = NULL;
- break;
- }
- case 2:
- default:
- /* Always print 0's for %pK */
- ptr = NULL;
+ if (!kptr_restrict)
break;
- }
- break;
-
+ return restricted_pointer(buf, end, ptr, spec);
case 'N':
return netdev_bits(buf, end, ptr, fmt);
case 'a':
case 'F':
return device_node_string(buf, end, ptr, spec, fmt + 1);
}
+ case 'x':
+ return pointer_string(buf, end, ptr, spec);
}
- spec.flags |= SMALL;
- if (spec.field_width == -1) {
- spec.field_width = default_width;
- spec.flags |= ZEROPAD;
- }
- spec.base = 16;
- return number(buf, end, (unsigned long) ptr, spec);
+ /* default is to _not_ leak addresses, hash before printing */
+ return ptr_to_id(buf, end, ptr, spec);
}
/*
.release = single_release,
};
-static void bdi_debug_register(struct backing_dev_info *bdi, const char *name)
+static int bdi_debug_register(struct backing_dev_info *bdi, const char *name)
{
+ if (!bdi_debug_root)
+ return -ENOMEM;
+
bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root);
+ if (!bdi->debug_dir)
+ return -ENOMEM;
+
bdi->debug_stats = debugfs_create_file("stats", 0444, bdi->debug_dir,
bdi, &bdi_debug_stats_fops);
+ if (!bdi->debug_stats) {
+ debugfs_remove(bdi->debug_dir);
+ return -ENOMEM;
+ }
+
+ return 0;
}
static void bdi_debug_unregister(struct backing_dev_info *bdi)
static inline void bdi_debug_init(void)
{
}
-static inline void bdi_debug_register(struct backing_dev_info *bdi,
+static inline int bdi_debug_register(struct backing_dev_info *bdi,
const char *name)
{
+ return 0;
}
static inline void bdi_debug_unregister(struct backing_dev_info *bdi)
{
if (IS_ERR(dev))
return PTR_ERR(dev);
+ if (bdi_debug_register(bdi, dev_name(dev))) {
+ device_destroy(bdi_class, dev->devt);
+ return -ENOMEM;
+ }
cgwb_bdi_register(bdi);
bdi->dev = dev;
- bdi_debug_register(bdi, dev_name(dev));
set_bit(WB_registered, &bdi->wb.state);
spin_lock_bh(&bdi_lock);
ret = -EFAULT;
goto out;
}
+
+ /*
+ * While get_vaddr_frames() could be used for transient (kernel
+ * controlled lifetime) pinning of memory pages all current
+ * users establish long term (userspace controlled lifetime)
+ * page pinning. Treat get_vaddr_frames() like
+ * get_user_pages_longterm() and disallow it for filesystem-dax
+ * mappings.
+ */
+ if (vma_is_fsdax(vma))
+ return -EOPNOTSUPP;
+
if (!(vma->vm_flags & (VM_IO | VM_PFNMAP))) {
vec->got_ref = true;
vec->is_pfns = false;
*/
static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
{
- return pte_write(pte) ||
+ return pte_access_permitted(pte, WRITE) ||
((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
}
}
EXPORT_SYMBOL(get_user_pages);
+#ifdef CONFIG_FS_DAX
+/*
+ * This is the same as get_user_pages() in that it assumes we are
+ * operating on the current task's mm, but it goes further to validate
+ * that the vmas associated with the address range are suitable for
+ * longterm elevated page reference counts. For example, filesystem-dax
+ * mappings are subject to the lifetime enforced by the filesystem and
+ * we need guarantees that longterm users like RDMA and V4L2 only
+ * establish mappings that have a kernel enforced revocation mechanism.
+ *
+ * "longterm" == userspace controlled elevated page count lifetime.
+ * Contrast this to iov_iter_get_pages() usages which are transient.
+ */
+long get_user_pages_longterm(unsigned long start, unsigned long nr_pages,
+ unsigned int gup_flags, struct page **pages,
+ struct vm_area_struct **vmas_arg)
+{
+ struct vm_area_struct **vmas = vmas_arg;
+ struct vm_area_struct *vma_prev = NULL;
+ long rc, i;
+
+ if (!pages)
+ return -EINVAL;
+
+ if (!vmas) {
+ vmas = kcalloc(nr_pages, sizeof(struct vm_area_struct *),
+ GFP_KERNEL);
+ if (!vmas)
+ return -ENOMEM;
+ }
+
+ rc = get_user_pages(start, nr_pages, gup_flags, pages, vmas);
+
+ for (i = 0; i < rc; i++) {
+ struct vm_area_struct *vma = vmas[i];
+
+ if (vma == vma_prev)
+ continue;
+
+ vma_prev = vma;
+
+ if (vma_is_fsdax(vma))
+ break;
+ }
+
+ /*
+ * Either get_user_pages() failed, or the vma validation
+ * succeeded, in either case we don't need to put_page() before
+ * returning.
+ */
+ if (i >= rc)
+ goto out;
+
+ for (i = 0; i < rc; i++)
+ put_page(pages[i]);
+ rc = -EOPNOTSUPP;
+out:
+ if (vmas != vmas_arg)
+ kfree(vmas);
+ return rc;
+}
+EXPORT_SYMBOL(get_user_pages_longterm);
+#endif /* CONFIG_FS_DAX */
+
/**
* populate_vma_page_range() - populate a range of pages in the vma.
* @vma: target vma
if (pmd_protnone(pmd))
return hmm_vma_walk_clear(start, end, walk);
- if (write_fault && !pmd_write(pmd))
+ if (!pmd_access_permitted(pmd, write_fault))
return hmm_vma_walk_clear(start, end, walk);
pfn = pmd_pfn(pmd) + pte_index(addr);
- flag |= pmd_write(pmd) ? HMM_PFN_WRITE : 0;
+ flag |= pmd_access_permitted(pmd, WRITE) ? HMM_PFN_WRITE : 0;
for (; addr < end; addr += PAGE_SIZE, i++, pfn++)
pfns[i] = hmm_pfn_t_from_pfn(pfn) | flag;
return 0;
continue;
}
- if (write_fault && !pte_write(pte))
+ if (!pte_access_permitted(pte, write_fault))
goto fault;
pfns[i] = hmm_pfn_t_from_pfn(pte_pfn(pte)) | flag;
- pfns[i] |= pte_write(pte) ? HMM_PFN_WRITE : 0;
+ pfns[i] |= pte_access_permitted(pte, WRITE) ? HMM_PFN_WRITE : 0;
continue;
fault:
#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
static void touch_pmd(struct vm_area_struct *vma, unsigned long addr,
- pmd_t *pmd)
+ pmd_t *pmd, int flags)
{
pmd_t _pmd;
- /*
- * We should set the dirty bit only for FOLL_WRITE but for now
- * the dirty bit in the pmd is meaningless. And if the dirty
- * bit will become meaningful and we'll only set it with
- * FOLL_WRITE, an atomic set_bit will be required on the pmd to
- * set the young bit, instead of the current set_pmd_at.
- */
- _pmd = pmd_mkyoung(pmd_mkdirty(*pmd));
+ _pmd = pmd_mkyoung(*pmd);
+ if (flags & FOLL_WRITE)
+ _pmd = pmd_mkdirty(_pmd);
if (pmdp_set_access_flags(vma, addr & HPAGE_PMD_MASK,
- pmd, _pmd, 1))
+ pmd, _pmd, flags & FOLL_WRITE))
update_mmu_cache_pmd(vma, addr, pmd);
}
*/
WARN_ONCE(flags & FOLL_COW, "mm: In follow_devmap_pmd with FOLL_COW set");
- if (flags & FOLL_WRITE && !pmd_write(*pmd))
+ if (!pmd_access_permitted(*pmd, flags & FOLL_WRITE))
return NULL;
if (pmd_present(*pmd) && pmd_devmap(*pmd))
return NULL;
if (flags & FOLL_TOUCH)
- touch_pmd(vma, addr, pmd);
+ touch_pmd(vma, addr, pmd, flags);
/*
* device mapped pages can only be returned if the
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
static void touch_pud(struct vm_area_struct *vma, unsigned long addr,
- pud_t *pud)
+ pud_t *pud, int flags)
{
pud_t _pud;
- /*
- * We should set the dirty bit only for FOLL_WRITE but for now
- * the dirty bit in the pud is meaningless. And if the dirty
- * bit will become meaningful and we'll only set it with
- * FOLL_WRITE, an atomic set_bit will be required on the pud to
- * set the young bit, instead of the current set_pud_at.
- */
- _pud = pud_mkyoung(pud_mkdirty(*pud));
+ _pud = pud_mkyoung(*pud);
+ if (flags & FOLL_WRITE)
+ _pud = pud_mkdirty(_pud);
if (pudp_set_access_flags(vma, addr & HPAGE_PUD_MASK,
- pud, _pud, 1))
+ pud, _pud, flags & FOLL_WRITE))
update_mmu_cache_pud(vma, addr, pud);
}
assert_spin_locked(pud_lockptr(mm, pud));
- if (flags & FOLL_WRITE && !pud_write(*pud))
+ if (!pud_access_permitted(*pud, flags & FOLL_WRITE))
return NULL;
if (pud_present(*pud) && pud_devmap(*pud))
return NULL;
if (flags & FOLL_TOUCH)
- touch_pud(vma, addr, pud);
+ touch_pud(vma, addr, pud, flags);
/*
* device mapped pages can only be returned if the
*/
static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
{
- return pmd_write(pmd) ||
+ return pmd_access_permitted(pmd, WRITE) ||
((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
}
page = pmd_page(*pmd);
VM_BUG_ON_PAGE(!PageHead(page) && !is_zone_device_page(page), page);
if (flags & FOLL_TOUCH)
- touch_pmd(vma, addr, pmd);
+ touch_pmd(vma, addr, pmd, flags);
if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
/*
* We don't mlock() pte-mapped THPs. This way we can avoid
}
}
+static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
+{
+ if (addr & ~(huge_page_mask(hstate_vma(vma))))
+ return -EINVAL;
+ return 0;
+}
+
/*
* We cannot handle pagefaults against hugetlb pages at all. They cause
* handle_mm_fault() to try to instantiate regular-sized pages in the
.fault = hugetlb_vm_op_fault,
.open = hugetlb_vm_op_open,
.close = hugetlb_vm_op_close,
+ .split = hugetlb_vm_op_split,
};
static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
pte_t *pte = NULL;
pgd = pgd_offset(mm, addr);
- p4d = p4d_offset(pgd, addr);
+ p4d = p4d_alloc(mm, pgd, addr);
+ if (!p4d)
+ return NULL;
pud = pud_alloc(mm, p4d, addr);
if (pud) {
if (sz == PUD_SIZE) {
pr_err("BUG: KASAN: %s in %pS\n",
bug_type, (void *)info->ip);
- pr_err("%s of size %zu at addr %p by task %s/%d\n",
+ pr_err("%s of size %zu at addr %px by task %s/%d\n",
info->is_write ? "Write" : "Read", info->access_size,
info->access_addr, current->comm, task_pid_nr(current));
}
const char *rel_type;
int rel_bytes;
- pr_err("The buggy address belongs to the object at %p\n"
+ pr_err("The buggy address belongs to the object at %px\n"
" which belongs to the cache %s of size %d\n",
object, cache->name, cache->object_size);
}
pr_err("The buggy address is located %d bytes %s of\n"
- " %d-byte region [%p, %p)\n",
+ " %d-byte region [%px, %px)\n",
rel_bytes, rel_type, cache->object_size, (void *)object_addr,
(void *)(object_addr + cache->object_size));
}
char shadow_buf[SHADOW_BYTES_PER_ROW];
snprintf(buffer, sizeof(buffer),
- (i == 0) ? ">%p: " : " %p: ", kaddr);
+ (i == 0) ? ">%px: " : " %px: ", kaddr);
/*
* We should not pass a shadow pointer to generic
* function, because generic functions may try to
if (page_count(page) == 0)
continue;
scan_block(page, page + 1, NULL);
+ if (!(pfn % (MAX_SCAN_SIZE / sizeof(*page))))
+ cond_resched();
}
}
put_online_mems();
{
struct file *file = vma->vm_file;
+ *prev = vma;
#ifdef CONFIG_SWAP
if (!file) {
- *prev = vma;
force_swapin_readahead(vma, start, end);
return 0;
}
if (shmem_mapping(file->f_mapping)) {
- *prev = vma;
force_shm_swapin_readahead(vma, start, end,
file->f_mapping);
return 0;
return 0;
}
- *prev = vma;
start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
if (end > vma->vm_end)
end = vma->vm_end;
memcg_check_events(memcg, page);
if (!mem_cgroup_is_root(memcg))
- css_put(&memcg->css);
+ css_put_many(&memcg->css, nr_entries);
}
/**
if (unlikely(!pte_same(*vmf->pte, entry)))
goto unlock;
if (vmf->flags & FAULT_FLAG_WRITE) {
- if (!pte_write(entry))
+ if (!pte_access_permitted(entry, WRITE))
return do_wp_page(vmf);
entry = pte_mkdirty(entry);
}
/* NUMA case for anonymous PUDs would go here */
- if (dirty && !pud_write(orig_pud)) {
+ if (dirty && !pud_access_permitted(orig_pud, WRITE)) {
ret = wp_huge_pud(&vmf, orig_pud);
if (!(ret & VM_FAULT_FALLBACK))
return ret;
if (pmd_protnone(orig_pmd) && vma_is_accessible(vma))
return do_huge_pmd_numa_page(&vmf, orig_pmd);
- if (dirty && !pmd_write(orig_pmd)) {
+ if (dirty && !pmd_access_permitted(orig_pmd, WRITE)) {
ret = wp_huge_pmd(&vmf, orig_pmd);
if (!(ret & VM_FAULT_FALLBACK))
return ret;
goto out;
pte = *ptep;
- if ((flags & FOLL_WRITE) && !pte_write(pte))
+ if (!pte_access_permitted(pte, flags & FOLL_WRITE))
goto unlock;
*prot = pgprot_val(pte_pgprot(pte));
struct vm_area_struct *new;
int err;
- if (is_vm_hugetlb_page(vma) && (addr &
- ~(huge_page_mask(hstate_vma(vma)))))
- return -EINVAL;
+ if (vma->vm_ops && vma->vm_ops->split) {
+ err = vma->vm_ops->split(vma, addr);
+ if (err)
+ return err;
+ }
new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
if (!new)
*/
set_bit(MMF_UNSTABLE, &mm->flags);
- tlb_gather_mmu(&tlb, mm, 0, -1);
for (vma = mm->mmap ; vma; vma = vma->vm_next) {
if (!can_madv_dontneed_vma(vma))
continue;
* we do not want to block exit_mmap by keeping mm ref
* count elevated without a good reason.
*/
- if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED))
+ if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) {
+ tlb_gather_mmu(&tlb, mm, vma->vm_start, vma->vm_end);
unmap_page_range(&tlb, vma, vma->vm_start, vma->vm_end,
NULL);
+ tlb_finish_mmu(&tlb, vma->vm_start, vma->vm_end);
+ }
}
- tlb_finish_mmu(&tlb, 0, -1);
pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
task_pid_nr(tsk), tsk->comm,
K(get_mm_counter(mm, MM_ANONPAGES)),
else
bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE;
- if (unlikely(bg_thresh >= thresh)) {
- pr_warn("vm direct limit must be set greater than background limit.\n");
+ if (bg_thresh >= thresh)
bg_thresh = thresh / 2;
- }
-
tsk = current;
if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {
bg_thresh += bg_thresh / 4 + global_wb_domain.dirty_limit / 32;
}
#ifdef CONFIG_BLOCK
-void laptop_mode_timer_fn(unsigned long data)
+void laptop_mode_timer_fn(struct timer_list *t)
{
- struct request_queue *q = (struct request_queue *)data;
+ struct backing_dev_info *backing_dev_info =
+ from_timer(backing_dev_info, t, laptop_mode_wb_timer);
- wakeup_flusher_threads_bdi(q->backing_dev_info, WB_REASON_LAPTOP_TIMER);
+ wakeup_flusher_threads_bdi(backing_dev_info, WB_REASON_LAPTOP_TIMER);
}
/*
if (WARN_ON_ONCE(!mm_percpu_wq))
return;
- /* Workqueues cannot recurse */
- if (current->flags & PF_WQ_WORKER)
- return;
-
/*
* Do not drain if one is already in progress unless it's specific to
* a zone. Such callers are primarily CMA and memory hotplug and need
/*
* In case of -EBUSY, we'd like to know which page causes problem.
- * So, just fall through. We will check it in test_pages_isolated().
+ * So, just fall through. test_pages_isolated() has a tracepoint
+ * which will report the busy page.
+ *
+ * It is possible that busy pages could become available before
+ * the call to test_pages_isolated, and the range will actually be
+ * allocated. So, if we fall through be sure to clear ret so that
+ * -EBUSY is not accidentally used or returned to caller.
*/
ret = __alloc_contig_migrate_range(&cc, start, end);
if (ret && ret != -EBUSY)
goto done;
+ ret =0;
/*
* Pages from [start, end) are within a MAX_ORDER_NR_PAGES
* tmpfs instance, limiting inodes to one per page of lowmem;
* but the internal instance is left unlimited.
*/
- if (!(sb->s_flags & MS_KERNMOUNT)) {
+ if (!(sb->s_flags & SB_KERNMOUNT)) {
sbinfo->max_blocks = shmem_default_max_blocks();
sbinfo->max_inodes = shmem_default_max_inodes();
if (shmem_parse_options(data, sbinfo, false)) {
goto failed;
}
} else {
- sb->s_flags |= MS_NOUSER;
+ sb->s_flags |= SB_NOUSER;
}
sb->s_export_op = &shmem_export_ops;
- sb->s_flags |= MS_NOSEC;
+ sb->s_flags |= SB_NOSEC;
#else
- sb->s_flags |= MS_NOUSER;
+ sb->s_flags |= SB_NOUSER;
#endif
spin_lock_init(&sbinfo->stat_lock);
sb->s_xattr = shmem_xattr_handlers;
#endif
#ifdef CONFIG_TMPFS_POSIX_ACL
- sb->s_flags |= MS_POSIXACL;
+ sb->s_flags |= SB_POSIXACL;
#endif
uuid_gen(&sb->s_uuid);
mod_timer(&app->join_timer, jiffies + delay);
}
-static void garp_join_timer(unsigned long data)
+static void garp_join_timer(struct timer_list *t)
{
- struct garp_applicant *app = (struct garp_applicant *)data;
+ struct garp_applicant *app = from_timer(app, t, join_timer);
spin_lock(&app->lock);
garp_gid_event(app, GARP_EVENT_TRANSMIT_PDU);
spin_lock_init(&app->lock);
skb_queue_head_init(&app->queue);
rcu_assign_pointer(dev->garp_port->applicants[appl->type], app);
- setup_timer(&app->join_timer, garp_join_timer, (unsigned long)app);
+ timer_setup(&app->join_timer, garp_join_timer, 0);
garp_join_timer_arm(app);
return 0;
mod_timer(&app->join_timer, jiffies + delay);
}
-static void mrp_join_timer(unsigned long data)
+static void mrp_join_timer(struct timer_list *t)
{
- struct mrp_applicant *app = (struct mrp_applicant *)data;
+ struct mrp_applicant *app = from_timer(app, t, join_timer);
spin_lock(&app->lock);
mrp_mad_event(app, MRP_EVENT_TX);
jiffies + msecs_to_jiffies(mrp_periodic_time));
}
-static void mrp_periodic_timer(unsigned long data)
+static void mrp_periodic_timer(struct timer_list *t)
{
- struct mrp_applicant *app = (struct mrp_applicant *)data;
+ struct mrp_applicant *app = from_timer(app, t, periodic_timer);
spin_lock(&app->lock);
mrp_mad_event(app, MRP_EVENT_PERIODIC);
spin_lock_init(&app->lock);
skb_queue_head_init(&app->queue);
rcu_assign_pointer(dev->mrp_port->applicants[appl->type], app);
- setup_timer(&app->join_timer, mrp_join_timer, (unsigned long)app);
+ timer_setup(&app->join_timer, mrp_join_timer, 0);
mrp_join_timer_arm(app);
- setup_timer(&app->periodic_timer, mrp_periodic_timer,
- (unsigned long)app);
+ timer_setup(&app->periodic_timer, mrp_periodic_timer, 0);
mrp_periodic_timer_arm(app);
return 0;
}
/* Handle the timer event */
-static void aarp_expire_timeout(unsigned long unused)
+static void aarp_expire_timeout(struct timer_list *unused)
{
int ct;
aarp_dl = register_snap_client(aarp_snap_id, aarp_rcv);
if (!aarp_dl)
printk(KERN_CRIT "Unable to register AARP with SNAP.\n");
- setup_timer(&aarp_timer, aarp_expire_timeout, 0);
+ timer_setup(&aarp_timer, aarp_expire_timeout, 0);
aarp_timer.expires = jiffies + sysctl_aarp_expiry_time;
add_timer(&aarp_timer);
register_netdevice_notifier(&aarp_notifier);
return s;
}
-static void atalk_destroy_timer(unsigned long data)
+static void atalk_destroy_timer(struct timer_list *t)
{
- struct sock *sk = (struct sock *)data;
+ struct sock *sk = from_timer(sk, t, sk_timer);
if (sk_has_allocations(sk)) {
sk->sk_timer.expires = jiffies + SOCK_DESTROY_TIME;
skb_queue_purge(&sk->sk_receive_queue);
if (sk_has_allocations(sk)) {
- setup_timer(&sk->sk_timer, atalk_destroy_timer,
- (unsigned long)sk);
+ timer_setup(&sk->sk_timer, atalk_destroy_timer, 0);
sk->sk_timer.expires = jiffies + SOCK_DESTROY_TIME;
add_timer(&sk->sk_timer);
} else
else
send_to_lecd(priv, l_arp_xmt, mac_to_find, NULL, NULL);
entry->timer.expires = jiffies + (1 * HZ);
- entry->timer.function = (TIMER_FUNC_TYPE)lec_arp_expire_arp;
+ entry->timer.function = lec_arp_expire_arp;
add_timer(&entry->timer);
found = priv->mcast_vcc;
}
entry->old_recv_push = old_push;
entry->status = ESI_UNKNOWN;
entry->timer.expires = jiffies + priv->vcc_timeout_period;
- entry->timer.function = (TIMER_FUNC_TYPE)lec_arp_expire_vcc;
+ entry->timer.function = lec_arp_expire_vcc;
hlist_add_head(&entry->next, &priv->lec_no_forward);
add_timer(&entry->timer);
dump_arp_table(priv);
entry->status = ESI_UNKNOWN;
hlist_add_head(&entry->next, &priv->lec_arp_empty_ones);
entry->timer.expires = jiffies + priv->vcc_timeout_period;
- entry->timer.function = (TIMER_FUNC_TYPE)lec_arp_expire_vcc;
+ entry->timer.function = lec_arp_expire_vcc;
add_timer(&entry->timer);
pr_debug("After vcc was added\n");
dump_arp_table(priv);
struct mpoa_client *mpcs = NULL; /* FIXME */
static struct atm_mpoa_qos *qos_head = NULL;
-static DEFINE_TIMER(mpc_timer, NULL);
+static DEFINE_TIMER(mpc_timer, mpc_cache_check);
static struct mpoa_client *find_mpc_by_itfnum(int itf)
{
mpc_timer.expires = jiffies + (MPC_P2 * HZ);
checking_time = mpc_timer.expires;
- mpc_timer.function = (TIMER_FUNC_TYPE)mpc_cache_check;
add_timer(&mpc_timer);
}
* Switch to Slow Start, set the ss_threshold to half of the current cwnd and
* reset the cwnd to 3*MSS
*/
-static void batadv_tp_sender_timeout(unsigned long arg)
+static void batadv_tp_sender_timeout(struct timer_list *t)
{
- struct batadv_tp_vars *tp_vars = (struct batadv_tp_vars *)arg;
+ struct batadv_tp_vars *tp_vars = from_timer(tp_vars, t, timer);
struct batadv_priv *bat_priv = tp_vars->bat_priv;
if (atomic_read(&tp_vars->sending) == 0)
atomic64_set(&tp_vars->tot_sent, 0);
kref_get(&tp_vars->refcount);
- setup_timer(&tp_vars->timer, batadv_tp_sender_timeout,
- (unsigned long)tp_vars);
+ timer_setup(&tp_vars->timer, batadv_tp_sender_timeout, 0);
tp_vars->bat_priv = bat_priv;
tp_vars->start_time = jiffies;
* reached without received ack
* @arg: address of the related tp_vars
*/
-static void batadv_tp_receiver_shutdown(unsigned long arg)
+static void batadv_tp_receiver_shutdown(struct timer_list *t)
{
- struct batadv_tp_vars *tp_vars = (struct batadv_tp_vars *)arg;
+ struct batadv_tp_vars *tp_vars = from_timer(tp_vars, t, timer);
struct batadv_tp_unacked *un, *safe;
struct batadv_priv *bat_priv;
hlist_add_head_rcu(&tp_vars->list, &bat_priv->tp_list);
kref_get(&tp_vars->refcount);
- setup_timer(&tp_vars->timer, batadv_tp_receiver_shutdown,
- (unsigned long)tp_vars);
+ timer_setup(&tp_vars->timer, batadv_tp_receiver_shutdown, 0);
batadv_tp_reset_receiver_timer(tp_vars);
}
}
-static void hidp_idle_timeout(unsigned long arg)
+static void hidp_idle_timeout(struct timer_list *t)
{
- struct hidp_session *session = (struct hidp_session *) arg;
+ struct hidp_session *session = from_timer(session, t, timer);
/* The HIDP user-space API only contains calls to add and remove
* devices. There is no way to forward events of any kind. Therefore,
/* device management */
INIT_WORK(&session->dev_init, hidp_session_dev_work);
- setup_timer(&session->timer, hidp_idle_timeout,
- (unsigned long)session);
+ timer_setup(&session->timer, hidp_idle_timeout, 0);
/* session data */
mutex_init(&session->report_mutex);
d->out);
}
-static void rfcomm_session_timeout(unsigned long arg)
+static void rfcomm_session_timeout(struct timer_list *t)
{
- struct rfcomm_session *s = (void *) arg;
+ struct rfcomm_session *s = from_timer(s, t, timer);
BT_DBG("session %p state %ld", s, s->state);
}
/* ---- RFCOMM DLCs ---- */
-static void rfcomm_dlc_timeout(unsigned long arg)
+static void rfcomm_dlc_timeout(struct timer_list *t)
{
- struct rfcomm_dlc *d = (void *) arg;
+ struct rfcomm_dlc *d = from_timer(d, t, timer);
BT_DBG("dlc %p state %ld", d, d->state);
if (!d)
return NULL;
- setup_timer(&d->timer, rfcomm_dlc_timeout, (unsigned long)d);
+ timer_setup(&d->timer, rfcomm_dlc_timeout, 0);
skb_queue_head_init(&d->tx_queue);
mutex_init(&d->lock);
BT_DBG("session %p sock %p", s, sock);
- setup_timer(&s->timer, rfcomm_session_timeout, (unsigned long) s);
+ timer_setup(&s->timer, rfcomm_session_timeout, 0);
INIT_LIST_HEAD(&s->dlcs);
s->state = state;
#define SCO_CONN_TIMEOUT (HZ * 40)
#define SCO_DISCONN_TIMEOUT (HZ * 2)
-static void sco_sock_timeout(unsigned long arg)
+static void sco_sock_timeout(struct timer_list *t)
{
- struct sock *sk = (struct sock *)arg;
+ struct sock *sk = from_timer(sk, t, sk_timer);
BT_DBG("sock %p state %d", sk, sk->sk_state);
sco_pi(sk)->setting = BT_VOICE_CVSD_16BIT;
- setup_timer(&sk->sk_timer, sco_sock_timeout, (unsigned long)sk);
+ timer_setup(&sk->sk_timer, sco_sock_timeout, 0);
bt_sock_link(&sco_sk_list, sk);
return sk;
seq_putc(m, '\n');
- if (net->can.can_stattimer.function == (TIMER_FUNC_TYPE)can_stat_update) {
+ if (net->can.can_stattimer.function == can_stat_update) {
seq_printf(m, " %8ld %% total match ratio (RXMR)\n",
can_stats->total_rx_match_ratio);
user_reset = 1;
- if (net->can.can_stattimer.function == (TIMER_FUNC_TYPE)can_stat_update) {
+ if (net->can.can_stattimer.function == can_stat_update) {
seq_printf(m, "Scheduled statistic reset #%ld.\n",
can_pstats->stats_reset + 1);
} else {
static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
{
if (tx_path)
- return skb->ip_summed != CHECKSUM_PARTIAL;
+ return skb->ip_summed != CHECKSUM_PARTIAL &&
+ skb->ip_summed != CHECKSUM_UNNECESSARY;
return skb->ip_summed == CHECKSUM_NONE;
}
__dev_xdp_attached(dev, bpf_op, NULL))
return -EBUSY;
- if (bpf_op == ops->ndo_bpf)
- prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP,
- dev);
- else
- prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_XDP);
+ prog = bpf_prog_get_type_dev(fd, BPF_PROG_TYPE_XDP,
+ bpf_op == ops->ndo_bpf);
if (IS_ERR(prog))
return PTR_ERR(prog);
+
+ if (!(flags & XDP_FLAGS_HW_MODE) &&
+ bpf_prog_is_dev_bound(prog->aux)) {
+ NL_SET_ERR_MSG(extack, "using device-bound program without HW_MODE flag is not supported");
+ bpf_prog_put(prog);
+ return -EINVAL;
+ }
}
err = dev_xdp_install(dev, bpf_op, extack, flags, prog);
* in the event that more drops will arrive during the
* hysteresis period.
*/
-static void sched_send_work(unsigned long _data)
+static void sched_send_work(struct timer_list *t)
{
- struct per_cpu_dm_data *data = (struct per_cpu_dm_data *)_data;
+ struct per_cpu_dm_data *data = from_timer(data, t, send_timer);
schedule_work(&data->dm_alert_work);
}
for_each_possible_cpu(cpu) {
data = &per_cpu(dm_cpu_data, cpu);
INIT_WORK(&data->dm_alert_work, send_dm_alert);
- setup_timer(&data->send_timer, sched_send_work,
- (unsigned long)data);
+ timer_setup(&data->send_timer, sched_send_work, 0);
spin_lock_init(&data->lock);
reset_per_cpu_data(data);
}
.gpl_only = false,
.pkt_access = true,
.ret_type = RET_INTEGER,
- .arg1_type = ARG_PTR_TO_MEM,
+ .arg1_type = ARG_PTR_TO_MEM_OR_NULL,
.arg2_type = ARG_CONST_SIZE_OR_ZERO,
- .arg3_type = ARG_PTR_TO_MEM,
+ .arg3_type = ARG_PTR_TO_MEM_OR_NULL,
.arg4_type = ARG_CONST_SIZE_OR_ZERO,
.arg5_type = ARG_ANYTHING,
};
}
-static void est_timer(unsigned long arg)
+static void est_timer(struct timer_list *t)
{
- struct net_rate_estimator *est = (struct net_rate_estimator *)arg;
+ struct net_rate_estimator *est = from_timer(est, t, timer);
struct gnet_stats_basic_packed b;
u64 rate, brate;
}
est->next_jiffies = jiffies + ((HZ/4) << intvl_log);
- setup_timer(&est->timer, est_timer, (unsigned long)est);
+ timer_setup(&est->timer, est_timer, 0);
mod_timer(&est->timer, est->next_jiffies);
rcu_assign_pointer(*rate_est, est);
#define PNEIGH_HASHMASK 0xF
-static void neigh_timer_handler(unsigned long arg);
+static void neigh_timer_handler(struct timer_list *t);
static void __neigh_notify(struct neighbour *n, int type, int flags,
u32 pid);
static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid);
n->output = neigh_blackhole;
seqlock_init(&n->hh.hh_lock);
n->parms = neigh_parms_clone(&tbl->parms);
- setup_timer(&n->timer, neigh_timer_handler, (unsigned long)n);
+ timer_setup(&n->timer, neigh_timer_handler, 0);
NEIGH_CACHE_STAT_INC(tbl, allocs);
n->tbl = tbl;
/* Called when a timer expires for a neighbour entry. */
-static void neigh_timer_handler(unsigned long arg)
+static void neigh_timer_handler(struct timer_list *t)
{
unsigned long now, next;
- struct neighbour *neigh = (struct neighbour *)arg;
+ struct neighbour *neigh = from_timer(neigh, t, timer);
unsigned int state;
int notify = 0;
}
EXPORT_SYMBOL(neigh_direct_output);
-static void neigh_proxy_process(unsigned long arg)
+static void neigh_proxy_process(struct timer_list *t)
{
- struct neigh_table *tbl = (struct neigh_table *)arg;
+ struct neigh_table *tbl = from_timer(tbl, t, proxy_timer);
long sched_next = 0;
unsigned long now = jiffies;
struct sk_buff *skb, *n;
INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
tbl->parms.reachable_time);
- setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl);
+ timer_setup(&tbl->proxy_timer, neigh_proxy_process, 0);
skb_queue_head_init_class(&tbl->proxy_queue,
&neigh_table_proxy_queue_class);
struct sk_buff *skb,
const void *daddr);
static int dn_route_input(struct sk_buff *);
-static void dn_run_flush(unsigned long dummy);
+static void dn_run_flush(struct timer_list *unused);
static struct dn_rt_hash_bucket *dn_rt_hash_table;
static unsigned int dn_rt_hash_mask;
return dn_rt_hash_mask & (unsigned int)tmp;
}
-static void dn_dst_check_expire(unsigned long dummy)
+static void dn_dst_check_expire(struct timer_list *unused)
{
int i;
struct dn_route *rt;
return 0;
}
-static void dn_run_flush(unsigned long dummy)
+static void dn_run_flush(struct timer_list *unused)
{
int i;
struct dn_route *rt, *next;
kmem_cache_create("dn_dst_cache", sizeof(struct dn_route), 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
dst_entries_init(&dn_dst_ops);
- setup_timer(&dn_route_timer, dn_dst_check_expire, 0);
+ timer_setup(&dn_route_timer, dn_dst_check_expire, 0);
dn_route_timer.expires = jiffies + decnet_dst_gc_interval * HZ;
add_timer(&dn_route_timer);
#define SLOW_INTERVAL (HZ/2)
-static void dn_slow_timer(unsigned long arg);
+static void dn_slow_timer(struct timer_list *t);
void dn_start_slow_timer(struct sock *sk)
{
- setup_timer(&sk->sk_timer, dn_slow_timer, (unsigned long)sk);
+ timer_setup(&sk->sk_timer, dn_slow_timer, 0);
sk_reset_timer(sk, &sk->sk_timer, jiffies + SLOW_INTERVAL);
}
sk_stop_timer(sk, &sk->sk_timer);
}
-static void dn_slow_timer(unsigned long arg)
+static void dn_slow_timer(struct timer_list *t)
{
- struct sock *sk = (struct sock *)arg;
+ struct sock *sk = from_timer(sk, t, sk_timer);
struct dn_scp *scp = DN_SK(sk);
bh_lock_sock(sk);
INIT_LIST_HEAD(&dst->list);
list_add_tail(&dsa_tree_list, &dst->list);
- /* Initialize the reference counter to the number of switches, not 1 */
kref_init(&dst->refcount);
- refcount_set(&dst->refcount.refcount, 0);
return dst;
}
kfree(dst);
}
-static struct dsa_switch_tree *dsa_tree_touch(int index)
+static struct dsa_switch_tree *dsa_tree_get(struct dsa_switch_tree *dst)
{
- struct dsa_switch_tree *dst;
-
- dst = dsa_tree_find(index);
- if (!dst)
- dst = dsa_tree_alloc(index);
+ if (dst)
+ kref_get(&dst->refcount);
return dst;
}
-static void dsa_tree_get(struct dsa_switch_tree *dst)
+static struct dsa_switch_tree *dsa_tree_touch(int index)
{
- kref_get(&dst->refcount);
+ struct dsa_switch_tree *dst;
+
+ dst = dsa_tree_find(index);
+ if (dst)
+ return dsa_tree_get(dst);
+ else
+ return dsa_tree_alloc(index);
}
static void dsa_tree_release(struct kref *ref)
static void dsa_tree_put(struct dsa_switch_tree *dst)
{
- kref_put(&dst->refcount, dsa_tree_release);
+ if (dst)
+ kref_put(&dst->refcount, dsa_tree_release);
}
static bool dsa_port_is_dsa(struct dsa_port *port)
mutex_lock(&dsa2_mutex);
err = dsa_switch_probe(ds);
+ dsa_tree_put(ds->dst);
mutex_unlock(&dsa2_mutex);
return err;
struct sk_buff *inet_gso_segment(struct sk_buff *skb,
netdev_features_t features)
{
- bool fixedid = false, gso_partial, encap;
+ bool udpfrag = false, fixedid = false, gso_partial, encap;
struct sk_buff *segs = ERR_PTR(-EINVAL);
const struct net_offload *ops;
+ unsigned int offset = 0;
struct iphdr *iph;
int proto, tot_len;
int nhoff;
segs = ERR_PTR(-EPROTONOSUPPORT);
if (!skb->encapsulation || encap) {
+ udpfrag = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP);
fixedid = !!(skb_shinfo(skb)->gso_type & SKB_GSO_TCP_FIXEDID);
/* fixed ID is invalid if DF bit is not set */
skb = segs;
do {
iph = (struct iphdr *)(skb_mac_header(skb) + nhoff);
- if (skb_is_gso(skb)) {
+ if (udpfrag) {
+ iph->frag_off = htons(offset >> 3);
+ if (skb->next)
+ iph->frag_off |= htons(IP_MF);
+ offset += skb->len - nhoff - ihl;
+ tot_len = skb->len - nhoff;
+ } else if (skb_is_gso(skb)) {
if (!fixedid) {
iph->id = htons(id);
id += skb_shinfo(skb)->gso_segs;
return ip_local_out(net, skb->sk, skb);
}
-static void igmp_gq_timer_expire(unsigned long data)
+static void igmp_gq_timer_expire(struct timer_list *t)
{
- struct in_device *in_dev = (struct in_device *)data;
+ struct in_device *in_dev = from_timer(in_dev, t, mr_gq_timer);
in_dev->mr_gq_running = 0;
igmpv3_send_report(in_dev, NULL);
in_dev_put(in_dev);
}
-static void igmp_ifc_timer_expire(unsigned long data)
+static void igmp_ifc_timer_expire(struct timer_list *t)
{
- struct in_device *in_dev = (struct in_device *)data;
+ struct in_device *in_dev = from_timer(in_dev, t, mr_ifc_timer);
igmpv3_send_cr(in_dev);
if (in_dev->mr_ifc_count) {
}
-static void igmp_timer_expire(unsigned long data)
+static void igmp_timer_expire(struct timer_list *t)
{
- struct ip_mc_list *im = (struct ip_mc_list *)data;
+ struct ip_mc_list *im = from_timer(im, t, timer);
struct in_device *in_dev = im->interface;
spin_lock(&im->lock);
refcount_set(&im->refcnt, 1);
spin_lock_init(&im->lock);
#ifdef CONFIG_IP_MULTICAST
- setup_timer(&im->timer, igmp_timer_expire, (unsigned long)im);
+ timer_setup(&im->timer, igmp_timer_expire, 0);
im->unsolicit_count = net->ipv4.sysctl_igmp_qrv;
#endif
ASSERT_RTNL();
#ifdef CONFIG_IP_MULTICAST
- setup_timer(&in_dev->mr_gq_timer, igmp_gq_timer_expire,
- (unsigned long)in_dev);
- setup_timer(&in_dev->mr_ifc_timer, igmp_ifc_timer_expire,
- (unsigned long)in_dev);
+ timer_setup(&in_dev->mr_gq_timer, igmp_gq_timer_expire, 0);
+ timer_setup(&in_dev->mr_ifc_timer, igmp_ifc_timer_expire, 0);
in_dev->mr_qrv = net->ipv4.sysctl_igmp_qrv;
#endif
int cmd);
static void igmpmsg_netlink_event(struct mr_table *mrt, struct sk_buff *pkt);
static void mroute_clean_tables(struct mr_table *mrt, bool all);
-static void ipmr_expire_process(unsigned long arg);
+static void ipmr_expire_process(struct timer_list *t);
#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
#define ipmr_for_each_table(mrt, net) \
INIT_LIST_HEAD(&mrt->mfc_cache_list);
INIT_LIST_HEAD(&mrt->mfc_unres_queue);
- setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
- (unsigned long)mrt);
+ timer_setup(&mrt->ipmr_expire_timer, ipmr_expire_process, 0);
mrt->mroute_reg_vif_num = -1;
#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES
}
/* Timer process for the unresolved queue. */
-static void ipmr_expire_process(unsigned long arg)
+static void ipmr_expire_process(struct timer_list *t)
{
- struct mr_table *mrt = (struct mr_table *)arg;
+ struct mr_table *mrt = from_timer(mrt, t, ipmr_expire_timer);
unsigned long now;
unsigned long expires;
struct mfc_cache *c, *next;
}
EXPORT_SYMBOL(skb_udp_tunnel_segment);
-static struct sk_buff *udp4_tunnel_segment(struct sk_buff *skb,
- netdev_features_t features)
+static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
+ netdev_features_t features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
+ unsigned int mss;
+ __wsum csum;
+ struct udphdr *uh;
+ struct iphdr *iph;
if (skb->encapsulation &&
(skb_shinfo(skb)->gso_type &
- (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM)))
+ (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM))) {
segs = skb_udp_tunnel_segment(skb, features, false);
+ goto out;
+ }
+
+ if (!pskb_may_pull(skb, sizeof(struct udphdr)))
+ goto out;
+
+ mss = skb_shinfo(skb)->gso_size;
+ if (unlikely(skb->len <= mss))
+ goto out;
+
+ /* Do software UFO. Complete and fill in the UDP checksum as
+ * HW cannot do checksum of UDP packets sent as multiple
+ * IP fragments.
+ */
+ uh = udp_hdr(skb);
+ iph = ip_hdr(skb);
+
+ uh->check = 0;
+ csum = skb_checksum(skb, 0, skb->len, 0);
+ uh->check = udp_v4_check(skb->len, iph->saddr, iph->daddr, csum);
+ if (uh->check == 0)
+ uh->check = CSUM_MANGLED_0;
+
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ /* If there is no outer header we can fake a checksum offload
+ * due to the fact that we have already done the checksum in
+ * software prior to segmenting the frame.
+ */
+ if (!skb->encap_hdr_csum)
+ features |= NETIF_F_HW_CSUM;
+
+ /* Fragment the skb. IP headers of the fragments are updated in
+ * inet_gso_segment()
+ */
+ segs = skb_segment(skb, features);
+out:
return segs;
}
static const struct net_offload udpv4_offload = {
.callbacks = {
- .gso_segment = udp4_tunnel_segment,
+ .gso_segment = udp4_ufo_fragment,
.gro_receive = udp4_gro_receive,
.gro_complete = udp4_gro_complete,
},
static void addrconf_dad_work(struct work_struct *w);
static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id);
static void addrconf_dad_run(struct inet6_dev *idev);
-static void addrconf_rs_timer(unsigned long data);
+static void addrconf_rs_timer(struct timer_list *t);
static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
rwlock_init(&ndev->lock);
ndev->dev = dev;
INIT_LIST_HEAD(&ndev->addr_list);
- setup_timer(&ndev->rs_timer, addrconf_rs_timer,
- (unsigned long)ndev);
+ timer_setup(&ndev->rs_timer, addrconf_rs_timer, 0);
memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf));
if (ndev->cnf.stable_secret.initialized)
return 0;
}
-static void addrconf_rs_timer(unsigned long data)
+static void addrconf_rs_timer(struct timer_list *t)
{
- struct inet6_dev *idev = (struct inet6_dev *)data;
+ struct inet6_dev *idev = from_timer(idev, t, rs_timer);
struct net_device *dev = idev->dev;
struct in6_addr lladdr;
* result of redirects, path MTU changes, etc.
*/
-static void fib6_gc_timer_cb(unsigned long arg);
+static void fib6_gc_timer_cb(struct timer_list *t);
#define FOR_WALKERS(net, w) \
list_for_each_entry(w, &(net)->ipv6.fib6_walkers, lh)
spin_unlock_bh(&net->ipv6.fib6_gc_lock);
}
-static void fib6_gc_timer_cb(unsigned long arg)
+static void fib6_gc_timer_cb(struct timer_list *t)
{
- fib6_run_gc(0, (struct net *)arg, true);
+ struct net *arg = from_timer(arg, t, ipv6.ip6_fib_timer);
+
+ fib6_run_gc(0, arg, true);
}
static int __net_init fib6_net_init(struct net *net)
spin_lock_init(&net->ipv6.fib6_gc_lock);
rwlock_init(&net->ipv6.fib6_walker_lock);
INIT_LIST_HEAD(&net->ipv6.fib6_walkers);
- setup_timer(&net->ipv6.ip6_fib_timer, fib6_gc_timer_cb, (unsigned long)net);
+ timer_setup(&net->ipv6.ip6_fib_timer, fib6_gc_timer_cb, 0);
net->ipv6.rt6_stats = kzalloc(sizeof(*net->ipv6.rt6_stats), GFP_KERNEL);
if (!net->ipv6.rt6_stats)
static atomic_t fl_size = ATOMIC_INIT(0);
static struct ip6_flowlabel __rcu *fl_ht[FL_HASH_MASK+1];
-static void ip6_fl_gc(unsigned long dummy);
+static void ip6_fl_gc(struct timer_list *unused);
static DEFINE_TIMER(ip6_fl_gc_timer, ip6_fl_gc);
/* FL hash table lock: it protects only of GC */
spin_unlock_bh(&ip6_fl_lock);
}
-static void ip6_fl_gc(unsigned long dummy)
+static void ip6_fl_gc(struct timer_list *unused)
{
int i;
unsigned long now = jiffies;
static int ip6mr_rtm_dumproute(struct sk_buff *skb,
struct netlink_callback *cb);
static void mroute_clean_tables(struct mr6_table *mrt, bool all);
-static void ipmr_expire_process(unsigned long arg);
+static void ipmr_expire_process(struct timer_list *t);
#ifdef CONFIG_IPV6_MROUTE_MULTIPLE_TABLES
#define ip6mr_for_each_table(mrt, net) \
INIT_LIST_HEAD(&mrt->mfc6_unres_queue);
- setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
- (unsigned long)mrt);
+ timer_setup(&mrt->ipmr_expire_timer, ipmr_expire_process, 0);
#ifdef CONFIG_IPV6_PIMSM_V2
mrt->mroute_reg_vif_num = -1;
mod_timer(&mrt->ipmr_expire_timer, jiffies + expires);
}
-static void ipmr_expire_process(unsigned long arg)
+static void ipmr_expire_process(struct timer_list *t)
{
- struct mr6_table *mrt = (struct mr6_table *)arg;
+ struct mr6_table *mrt = from_timer(mrt, t, ipmr_expire_timer);
if (!spin_trylock(&mfc_unres_lock)) {
mod_timer(&mrt->ipmr_expire_timer, jiffies + 1);
static void igmp6_join_group(struct ifmcaddr6 *ma);
static void igmp6_leave_group(struct ifmcaddr6 *ma);
-static void igmp6_timer_handler(unsigned long data);
+static void igmp6_timer_handler(struct timer_list *t);
-static void mld_gq_timer_expire(unsigned long data);
-static void mld_ifc_timer_expire(unsigned long data);
+static void mld_gq_timer_expire(struct timer_list *t);
+static void mld_ifc_timer_expire(struct timer_list *t);
static void mld_ifc_event(struct inet6_dev *idev);
static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc);
static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc);
if (!mc)
return NULL;
- setup_timer(&mc->mca_timer, igmp6_timer_handler, (unsigned long)mc);
+ timer_setup(&mc->mca_timer, igmp6_timer_handler, 0);
mc->mca_addr = *addr;
mc->idev = idev; /* reference taken by caller */
}
}
-static void mld_dad_timer_expire(unsigned long data)
+static void mld_dad_timer_expire(struct timer_list *t)
{
- struct inet6_dev *idev = (struct inet6_dev *)data;
+ struct inet6_dev *idev = from_timer(idev, t, mc_dad_timer);
mld_send_initial_cr(idev);
if (idev->mc_dad_count) {
}
}
-static void mld_gq_timer_expire(unsigned long data)
+static void mld_gq_timer_expire(struct timer_list *t)
{
- struct inet6_dev *idev = (struct inet6_dev *)data;
+ struct inet6_dev *idev = from_timer(idev, t, mc_gq_timer);
idev->mc_gq_running = 0;
mld_send_report(idev, NULL);
in6_dev_put(idev);
}
-static void mld_ifc_timer_expire(unsigned long data)
+static void mld_ifc_timer_expire(struct timer_list *t)
{
- struct inet6_dev *idev = (struct inet6_dev *)data;
+ struct inet6_dev *idev = from_timer(idev, t, mc_ifc_timer);
mld_send_cr(idev);
if (idev->mc_ifc_count) {
mld_ifc_start_timer(idev, 1);
}
-static void igmp6_timer_handler(unsigned long data)
+static void igmp6_timer_handler(struct timer_list *t)
{
- struct ifmcaddr6 *ma = (struct ifmcaddr6 *) data;
+ struct ifmcaddr6 *ma = from_timer(ma, t, mca_timer);
if (mld_in_v1_mode(ma->idev))
igmp6_send(&ma->mca_addr, ma->idev->dev, ICMPV6_MGM_REPORT);
write_lock_bh(&idev->lock);
spin_lock_init(&idev->mc_lock);
idev->mc_gq_running = 0;
- setup_timer(&idev->mc_gq_timer, mld_gq_timer_expire,
- (unsigned long)idev);
+ timer_setup(&idev->mc_gq_timer, mld_gq_timer_expire, 0);
idev->mc_tomb = NULL;
idev->mc_ifc_count = 0;
- setup_timer(&idev->mc_ifc_timer, mld_ifc_timer_expire,
- (unsigned long)idev);
- setup_timer(&idev->mc_dad_timer, mld_dad_timer_expire,
- (unsigned long)idev);
+ timer_setup(&idev->mc_ifc_timer, mld_ifc_timer_expire, 0);
+ timer_setup(&idev->mc_dad_timer, mld_dad_timer_expire, 0);
ipv6_mc_reset(idev);
write_unlock_bh(&idev->lock);
}
return id;
}
+/* This function exists only for tap drivers that must support broken
+ * clients requesting UFO without specifying an IPv6 fragment ID.
+ *
+ * This is similar to ipv6_select_ident() but we use an independent hash
+ * seed to limit information leakage.
+ *
+ * The network header must be set before calling this.
+ */
+__be32 ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb)
+{
+ static u32 ip6_proxy_idents_hashrnd __read_mostly;
+ struct in6_addr buf[2];
+ struct in6_addr *addrs;
+ u32 id;
+
+ addrs = skb_header_pointer(skb,
+ skb_network_offset(skb) +
+ offsetof(struct ipv6hdr, saddr),
+ sizeof(buf), buf);
+ if (!addrs)
+ return 0;
+
+ net_get_random_once(&ip6_proxy_idents_hashrnd,
+ sizeof(ip6_proxy_idents_hashrnd));
+
+ id = __ipv6_select_ident(net, ip6_proxy_idents_hashrnd,
+ &addrs[1], &addrs[0]);
+ return htonl(id);
+}
+EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident);
+
__be32 ipv6_select_ident(struct net *net,
const struct in6_addr *daddr,
const struct in6_addr *saddr)
&match->rt6i_siblings, rt6i_siblings) {
route_choosen--;
if (route_choosen == 0) {
+ struct inet6_dev *idev = sibling->rt6i_idev;
+
+ if (!netif_carrier_ok(sibling->dst.dev) &&
+ idev->cnf.ignore_routes_with_linkdown)
+ break;
if (rt6_score_route(sibling, oif, strict) < 0)
break;
match = sibling;
{
struct net_device *dev = rt->dst.dev;
- if (rt->rt6i_flags & RTF_LOCAL) {
+ if (rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST)) {
/* for copies of local routes, dst->dev needs to be the
* device if it is a master device, the master device if
* device is enslaved, and the loopback as the default
#include <net/ip6_checksum.h>
#include "ip6_offload.h"
-static struct sk_buff *udp6_tunnel_segment(struct sk_buff *skb,
- netdev_features_t features)
+static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
+ netdev_features_t features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
+ unsigned int mss;
+ unsigned int unfrag_ip6hlen, unfrag_len;
+ struct frag_hdr *fptr;
+ u8 *packet_start, *prevhdr;
+ u8 nexthdr;
+ u8 frag_hdr_sz = sizeof(struct frag_hdr);
+ __wsum csum;
+ int tnl_hlen;
+ int err;
+
+ mss = skb_shinfo(skb)->gso_size;
+ if (unlikely(skb->len <= mss))
+ goto out;
if (skb->encapsulation && skb_shinfo(skb)->gso_type &
(SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM))
segs = skb_udp_tunnel_segment(skb, features, true);
+ else {
+ const struct ipv6hdr *ipv6h;
+ struct udphdr *uh;
+
+ if (!pskb_may_pull(skb, sizeof(struct udphdr)))
+ goto out;
+
+ /* Do software UFO. Complete and fill in the UDP checksum as HW cannot
+ * do checksum of UDP packets sent as multiple IP fragments.
+ */
+
+ uh = udp_hdr(skb);
+ ipv6h = ipv6_hdr(skb);
+
+ uh->check = 0;
+ csum = skb_checksum(skb, 0, skb->len, 0);
+ uh->check = udp_v6_check(skb->len, &ipv6h->saddr,
+ &ipv6h->daddr, csum);
+ if (uh->check == 0)
+ uh->check = CSUM_MANGLED_0;
+
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ /* If there is no outer header we can fake a checksum offload
+ * due to the fact that we have already done the checksum in
+ * software prior to segmenting the frame.
+ */
+ if (!skb->encap_hdr_csum)
+ features |= NETIF_F_HW_CSUM;
+
+ /* Check if there is enough headroom to insert fragment header. */
+ tnl_hlen = skb_tnl_header_len(skb);
+ if (skb->mac_header < (tnl_hlen + frag_hdr_sz)) {
+ if (gso_pskb_expand_head(skb, tnl_hlen + frag_hdr_sz))
+ goto out;
+ }
+
+ /* Find the unfragmentable header and shift it left by frag_hdr_sz
+ * bytes to insert fragment header.
+ */
+ err = ip6_find_1stfragopt(skb, &prevhdr);
+ if (err < 0)
+ return ERR_PTR(err);
+ unfrag_ip6hlen = err;
+ nexthdr = *prevhdr;
+ *prevhdr = NEXTHDR_FRAGMENT;
+ unfrag_len = (skb_network_header(skb) - skb_mac_header(skb)) +
+ unfrag_ip6hlen + tnl_hlen;
+ packet_start = (u8 *) skb->head + SKB_GSO_CB(skb)->mac_offset;
+ memmove(packet_start-frag_hdr_sz, packet_start, unfrag_len);
+
+ SKB_GSO_CB(skb)->mac_offset -= frag_hdr_sz;
+ skb->mac_header -= frag_hdr_sz;
+ skb->network_header -= frag_hdr_sz;
+
+ fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen);
+ fptr->nexthdr = nexthdr;
+ fptr->reserved = 0;
+ fptr->identification = ipv6_proxy_select_ident(dev_net(skb->dev), skb);
+
+ /* Fragment the skb. ipv6 header and the remaining fields of the
+ * fragment header are updated in ipv6_gso_segment()
+ */
+ segs = skb_segment(skb, features);
+ }
+out:
return segs;
}
static const struct net_offload udpv6_offload = {
.callbacks = {
- .gso_segment = udp6_tunnel_segment,
+ .gso_segment = udp6_ufo_fragment,
.gro_receive = udp6_gro_receive,
.gro_complete = udp6_gro_complete,
},
{
del_timer(&lapb->t1timer);
- lapb->t1timer.function = (TIMER_FUNC_TYPE)lapb_t1timer_expiry;
+ lapb->t1timer.function = lapb_t1timer_expiry;
lapb->t1timer.expires = jiffies + lapb->t1;
add_timer(&lapb->t1timer);
{
del_timer(&lapb->t2timer);
- lapb->t2timer.function = (TIMER_FUNC_TYPE)lapb_t2timer_expiry;
+ lapb->t2timer.function = lapb_t2timer_expiry;
lapb->t2timer.expires = jiffies + lapb->t2;
add_timer(&lapb->t2timer);
mutex_lock(&sta->ampdu_mlme.mtx);
for (i = 0; i < IEEE80211_NUM_TIDS; i++) {
- ___ieee80211_stop_tx_ba_session(sta, i, reason);
___ieee80211_stop_rx_ba_session(sta, i, WLAN_BACK_RECIPIENT,
WLAN_REASON_QSTA_LEAVE_QBSS,
reason != AGG_STOP_DESTROY_STA &&
}
mutex_unlock(&sta->ampdu_mlme.mtx);
+ for (i = 0; i < IEEE80211_NUM_TIDS; i++)
+ ___ieee80211_stop_tx_ba_session(sta, i, reason);
+
/* stopping might queue the work again - so cancel only afterwards */
cancel_work_sync(&sta->ampdu_mlme.work);
struct mesh_path *mpath;
u8 ttl, flags, hopcount;
const u8 *orig_addr;
- u32 orig_sn, metric, metric_txsta, interval;
+ u32 orig_sn, new_metric, orig_metric, last_hop_metric, interval;
bool root_is_gate;
ttl = rann->rann_ttl;
interval = le32_to_cpu(rann->rann_interval);
hopcount = rann->rann_hopcount;
hopcount++;
- metric = le32_to_cpu(rann->rann_metric);
+ orig_metric = le32_to_cpu(rann->rann_metric);
/* Ignore our own RANNs */
if (ether_addr_equal(orig_addr, sdata->vif.addr))
return;
}
- metric_txsta = airtime_link_metric_get(local, sta);
+ last_hop_metric = airtime_link_metric_get(local, sta);
+ new_metric = orig_metric + last_hop_metric;
+ if (new_metric < orig_metric)
+ new_metric = MAX_METRIC;
mpath = mesh_path_lookup(sdata, orig_addr);
if (!mpath) {
}
if (!(SN_LT(mpath->sn, orig_sn)) &&
- !(mpath->sn == orig_sn && metric < mpath->rann_metric)) {
+ !(mpath->sn == orig_sn && new_metric < mpath->rann_metric)) {
rcu_read_unlock();
return;
}
}
mpath->sn = orig_sn;
- mpath->rann_metric = metric + metric_txsta;
+ mpath->rann_metric = new_metric;
mpath->is_root = true;
/* Recording RANNs sender address to send individually
* addressed PREQs destined for root mesh STA */
mesh_path_sel_frame_tx(MPATH_RANN, flags, orig_addr,
orig_sn, 0, NULL, 0, broadcast_addr,
hopcount, ttl, interval,
- metric + metric_txsta, 0, sdata);
+ new_metric, 0, sdata);
}
rcu_read_unlock();
struct ieee80211_hdr_3addr *nullfunc;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif);
+ skb = ieee80211_nullfunc_get(&local->hw, &sdata->vif, true);
if (!skb)
return;
EXPORT_SYMBOL(ieee80211_pspoll_get);
struct sk_buff *ieee80211_nullfunc_get(struct ieee80211_hw *hw,
- struct ieee80211_vif *vif)
+ struct ieee80211_vif *vif,
+ bool qos_ok)
{
struct ieee80211_hdr_3addr *nullfunc;
struct ieee80211_sub_if_data *sdata;
struct ieee80211_if_managed *ifmgd;
struct ieee80211_local *local;
struct sk_buff *skb;
+ bool qos = false;
if (WARN_ON(vif->type != NL80211_IFTYPE_STATION))
return NULL;
ifmgd = &sdata->u.mgd;
local = sdata->local;
- skb = dev_alloc_skb(local->hw.extra_tx_headroom + sizeof(*nullfunc));
+ if (qos_ok) {
+ struct sta_info *sta;
+
+ rcu_read_lock();
+ sta = sta_info_get(sdata, ifmgd->bssid);
+ qos = sta && sta->sta.wme;
+ rcu_read_unlock();
+ }
+
+ skb = dev_alloc_skb(local->hw.extra_tx_headroom +
+ sizeof(*nullfunc) + 2);
if (!skb)
return NULL;
nullfunc->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
IEEE80211_STYPE_NULLFUNC |
IEEE80211_FCTL_TODS);
+ if (qos) {
+ __le16 qos = cpu_to_le16(7);
+
+ BUILD_BUG_ON((IEEE80211_STYPE_QOS_NULLFUNC |
+ IEEE80211_STYPE_NULLFUNC) !=
+ IEEE80211_STYPE_QOS_NULLFUNC);
+ nullfunc->frame_control |=
+ cpu_to_le16(IEEE80211_STYPE_QOS_NULLFUNC);
+ skb->priority = 7;
+ skb_set_queue_mapping(skb, IEEE80211_AC_VO);
+ skb_put_data(skb, &qos, sizeof(qos));
+ }
+
memcpy(nullfunc->addr1, ifmgd->bssid, ETH_ALEN);
memcpy(nullfunc->addr2, vif->addr, ETH_ALEN);
memcpy(nullfunc->addr3, ifmgd->bssid, ETH_ALEN);
nd->handler(nd);
}
-static void ncsi_channel_monitor(unsigned long data)
+static void ncsi_channel_monitor(struct timer_list *t)
{
- struct ncsi_channel *nc = (struct ncsi_channel *)data;
+ struct ncsi_channel *nc = from_timer(nc, t, monitor.timer);
struct ncsi_package *np = nc->package;
struct ncsi_dev_priv *ndp = np->ndp;
struct ncsi_channel_mode *ncm;
nc->package = np;
nc->state = NCSI_CHANNEL_INACTIVE;
nc->monitor.enabled = false;
- setup_timer(&nc->monitor.timer,
- ncsi_channel_monitor, (unsigned long)nc);
+ timer_setup(&nc->monitor.timer, ncsi_channel_monitor, 0);
spin_lock_init(&nc->lock);
INIT_LIST_HEAD(&nc->link);
for (index = 0; index < NCSI_CAP_MAX; index++)
return NULL;
}
-static void ncsi_request_timeout(unsigned long data)
+static void ncsi_request_timeout(struct timer_list *t)
{
- struct ncsi_request *nr = (struct ncsi_request *)data;
+ struct ncsi_request *nr = from_timer(nr, t, timer);
struct ncsi_dev_priv *ndp = nr->ndp;
unsigned long flags;
for (i = 0; i < ARRAY_SIZE(ndp->requests); i++) {
ndp->requests[i].id = i;
ndp->requests[i].ndp = ndp;
- setup_timer(&ndp->requests[i].timer,
- ncsi_request_timeout,
- (unsigned long)&ndp->requests[i]);
+ timer_setup(&ndp->requests[i].timer, ncsi_request_timeout, 0);
}
spin_lock_irqsave(&ncsi_dev_lock, flags);
}
EXPORT_SYMBOL_GPL(nf_ct_unlink_expect_report);
-static void nf_ct_expectation_timed_out(unsigned long ul_expect)
+static void nf_ct_expectation_timed_out(struct timer_list *t)
{
- struct nf_conntrack_expect *exp = (void *)ul_expect;
+ struct nf_conntrack_expect *exp = from_timer(exp, t, timeout);
spin_lock_bh(&nf_conntrack_expect_lock);
nf_ct_unlink_expect(exp);
/* two references : one for hash insert, one for the timer */
refcount_add(2, &exp->use);
- setup_timer(&exp->timeout, nf_ct_expectation_timed_out,
- (unsigned long)exp);
+ timer_setup(&exp->timeout, nf_ct_expectation_timed_out, 0);
helper = rcu_dereference_protected(master_help->helper,
lockdep_is_held(&nf_conntrack_expect_lock));
if (helper) {
call_rcu_bh(&inst->rcu, nfulnl_instance_free_rcu);
}
-static void nfulnl_timer(unsigned long data);
+static void nfulnl_timer(struct timer_list *t);
static struct nfulnl_instance *
instance_create(struct net *net, u_int16_t group_num,
/* needs to be two, since we _put() after creation */
refcount_set(&inst->use, 2);
- setup_timer(&inst->timer, nfulnl_timer, (unsigned long)inst);
+ timer_setup(&inst->timer, nfulnl_timer, 0);
inst->net = get_net(net);
inst->peer_user_ns = user_ns;
}
static void
-nfulnl_timer(unsigned long data)
+nfulnl_timer(struct timer_list *t)
{
- struct nfulnl_instance *inst = (struct nfulnl_instance *)data;
+ struct nfulnl_instance *inst = from_timer(inst, t, timer);
spin_lock_bh(&inst->lock);
if (inst->skb)
sysfs_notify(idletimer_tg_kobj, NULL, timer->attr.attr.name);
}
-static void idletimer_tg_expired(unsigned long data)
+static void idletimer_tg_expired(struct timer_list *t)
{
- struct idletimer_tg *timer = (struct idletimer_tg *) data;
+ struct idletimer_tg *timer = from_timer(timer, t, timer);
pr_debug("timer %s expired\n", timer->attr.attr.name);
list_add(&info->timer->entry, &idletimer_tg_list);
- setup_timer(&info->timer->timer, idletimer_tg_expired,
- (unsigned long) info->timer);
+ timer_setup(&info->timer->timer, idletimer_tg_expired, 0);
info->timer->refcnt = 1;
mod_timer(&info->timer->timer,
return XT_CONTINUE;
}
-static void led_timeout_callback(unsigned long data)
+static void led_timeout_callback(struct timer_list *t)
{
- struct xt_led_info_internal *ledinternal = (struct xt_led_info_internal *)data;
+ struct xt_led_info_internal *ledinternal = from_timer(ledinternal, t,
+ timer);
led_trigger_event(&ledinternal->netfilter_led_trigger, LED_OFF);
}
/* See if we need to set up a timer */
if (ledinfo->delay > 0)
- setup_timer(&ledinternal->timer, led_timeout_callback,
- (unsigned long)ledinternal);
+ timer_setup(&ledinternal->timer, led_timeout_callback, 0);
list_add_tail(&ledinternal->list, &xt_led_triggers);
if (sk_has_allocations(sk)) {
/* Defer: outstanding buffers */
- sk->sk_timer.function = (TIMER_FUNC_TYPE)nr_destroy_timer;
+ sk->sk_timer.function = nr_destroy_timer;
sk->sk_timer.expires = jiffies + 2 * HZ;
add_timer(&sk->sk_timer);
} else
#include <net/netrom.h>
#include <linux/init.h>
-static void nr_loopback_timer(unsigned long);
+static void nr_loopback_timer(struct timer_list *);
static struct sk_buff_head loopback_queue;
static DEFINE_TIMER(loopback_timer, nr_loopback_timer);
return 1;
}
-static void nr_loopback_timer(unsigned long param)
+static void nr_loopback_timer(struct timer_list *unused)
{
struct sk_buff *skb;
ax25_address *nr_dest;
timer_setup(&nr->idletimer, nr_idletimer_expiry, 0);
/* initialized by sock_init_data */
- sk->sk_timer.function = (TIMER_FUNC_TYPE)nr_heartbeat_expiry;
+ sk->sk_timer.function = nr_heartbeat_expiry;
}
void nr_start_t1timer(struct sock *sk)
}
/* NCI command timer function */
-static void nci_cmd_timer(unsigned long arg)
+static void nci_cmd_timer(struct timer_list *t)
{
- struct nci_dev *ndev = (void *) arg;
+ struct nci_dev *ndev = from_timer(ndev, t, cmd_timer);
atomic_set(&ndev->cmd_cnt, 1);
queue_work(ndev->cmd_wq, &ndev->cmd_work);
}
/* NCI data exchange timer function */
-static void nci_data_timer(unsigned long arg)
+static void nci_data_timer(struct timer_list *t)
{
- struct nci_dev *ndev = (void *) arg;
+ struct nci_dev *ndev = from_timer(ndev, t, data_timer);
set_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags);
queue_work(ndev->rx_wq, &ndev->rx_work);
skb_queue_head_init(&ndev->rx_q);
skb_queue_head_init(&ndev->tx_q);
- setup_timer(&ndev->cmd_timer, nci_cmd_timer,
- (unsigned long) ndev);
- setup_timer(&ndev->data_timer, nci_data_timer,
- (unsigned long) ndev);
+ timer_setup(&ndev->cmd_timer, nci_cmd_timer, 0);
+ timer_setup(&ndev->data_timer, nci_data_timer, 0);
mutex_init(&ndev->req_lock);
INIT_LIST_HEAD(&ndev->conn_info_list);
const struct dp_upcall_info *upcall_info,
uint32_t cutlen)
{
+ unsigned int gso_type = skb_shinfo(skb)->gso_type;
+ struct sw_flow_key later_key;
struct sk_buff *segs, *nskb;
int err;
if (segs == NULL)
return -EINVAL;
+ if (gso_type & SKB_GSO_UDP) {
+ /* The initial flow key extracted by ovs_flow_key_extract()
+ * in this case is for a first fragment, so we need to
+ * properly mark later fragments.
+ */
+ later_key = *key;
+ later_key.ip.frag = OVS_FRAG_TYPE_LATER;
+ }
+
/* Queue all of the segments. */
skb = segs;
do {
+ if (gso_type & SKB_GSO_UDP && skb != segs)
+ key = &later_key;
+
err = queue_userspace_packet(dp, skb, key, upcall_info, cutlen);
if (err)
break;
key->ip.frag = OVS_FRAG_TYPE_LATER;
return 0;
}
- if (nh->frag_off & htons(IP_MF))
+ if (nh->frag_off & htons(IP_MF) ||
+ skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
key->ip.frag = OVS_FRAG_TYPE_FIRST;
else
key->ip.frag = OVS_FRAG_TYPE_NONE;
if (key->ip.frag == OVS_FRAG_TYPE_LATER)
return 0;
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
+ key->ip.frag = OVS_FRAG_TYPE_FIRST;
+
/* Transport layer. */
if (key->ip.proto == NEXTHDR_TCP) {
if (tcphdr_ok(skb)) {
#define MAX_ACTIONS_BUFSIZE (32 * 1024)
-static struct sw_flow_actions *nla_alloc_flow_actions(int size, bool log)
+static struct sw_flow_actions *nla_alloc_flow_actions(int size)
{
struct sw_flow_actions *sfa;
- if (size > MAX_ACTIONS_BUFSIZE) {
- OVS_NLERR(log, "Flow action size %u bytes exceeds max", size);
- return ERR_PTR(-EINVAL);
- }
+ WARN_ON_ONCE(size > MAX_ACTIONS_BUFSIZE);
sfa = kmalloc(sizeof(*sfa) + size, GFP_KERNEL);
if (!sfa)
new_acts_size = ksize(*sfa) * 2;
if (new_acts_size > MAX_ACTIONS_BUFSIZE) {
- if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size)
+ if ((MAX_ACTIONS_BUFSIZE - next_offset) < req_size) {
+ OVS_NLERR(log, "Flow action size exceeds max %u",
+ MAX_ACTIONS_BUFSIZE);
return ERR_PTR(-EMSGSIZE);
+ }
new_acts_size = MAX_ACTIONS_BUFSIZE;
}
- acts = nla_alloc_flow_actions(new_acts_size, log);
+ acts = nla_alloc_flow_actions(new_acts_size);
if (IS_ERR(acts))
return (void *)acts;
{
int err;
- *sfa = nla_alloc_flow_actions(nla_len(attr), log);
+ *sfa = nla_alloc_flow_actions(min(nla_len(attr), MAX_ACTIONS_BUFSIZE));
if (IS_ERR(*sfa))
return PTR_ERR(*sfa);
atomic_long_set(&rollover->num, 0);
atomic_long_set(&rollover->num_huge, 0);
atomic_long_set(&rollover->num_failed, 0);
- po->rollover = rollover;
}
if (type_flags & PACKET_FANOUT_FLAG_UNIQUEID) {
if (refcount_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
__dev_remove_pack(&po->prot_hook);
po->fanout = match;
+ po->rollover = rollover;
+ rollover = NULL;
refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1);
__fanout_link(sk, po);
err = 0;
}
out:
- if (err && rollover) {
- kfree_rcu(rollover, rcu);
- po->rollover = NULL;
- }
+ kfree(rollover);
mutex_unlock(&fanout_mutex);
return err;
}
list_del(&f->list);
else
f = NULL;
-
- if (po->rollover) {
- kfree_rcu(po->rollover, rcu);
- po->rollover = NULL;
- }
}
mutex_unlock(&fanout_mutex);
synchronize_net();
if (f) {
+ kfree(po->rollover);
fanout_release_data(f);
kfree(f);
}
if (need_rehook) {
if (po->running) {
rcu_read_unlock();
+ /* prevents packet_notifier() from calling
+ * register_prot_hook()
+ */
+ po->num = 0;
__unregister_prot_hook(sk, true);
rcu_read_lock();
dev_curr = po->prot_hook.dev;
dev->ifindex);
}
+ BUG_ON(po->running);
po->num = proto;
po->prot_hook.type = proto;
void *data = &val;
union tpacket_stats_u st;
struct tpacket_rollover_stats rstats;
- struct packet_rollover *rollover;
if (level != SOL_PACKET)
return -ENOPROTOOPT;
0);
break;
case PACKET_ROLLOVER_STATS:
- rcu_read_lock();
- rollover = rcu_dereference(po->rollover);
- if (rollover) {
- rstats.tp_all = atomic_long_read(&rollover->num);
- rstats.tp_huge = atomic_long_read(&rollover->num_huge);
- rstats.tp_failed = atomic_long_read(&rollover->num_failed);
- data = &rstats;
- lv = sizeof(rstats);
- }
- rcu_read_unlock();
- if (!rollover)
+ if (!po->rollover)
return -EINVAL;
+ rstats.tp_all = atomic_long_read(&po->rollover->num);
+ rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
+ rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
+ data = &rstats;
+ lv = sizeof(rstats);
break;
case PACKET_TX_HAS_OFF:
val = po->tp_tx_has_off;
struct packet_rollover {
int sock;
- struct rcu_head rcu;
atomic_long_t num;
atomic_long_t num_huge;
atomic_long_t num_failed;
{
del_timer(&neigh->ftimer);
- neigh->ftimer.function = (TIMER_FUNC_TYPE)rose_ftimer_expiry;
+ neigh->ftimer.function = rose_ftimer_expiry;
neigh->ftimer.expires =
jiffies + msecs_to_jiffies(sysctl_rose_link_fail_timeout);
{
del_timer(&neigh->t0timer);
- neigh->t0timer.function = (TIMER_FUNC_TYPE)rose_t0timer_expiry;
+ neigh->t0timer.function = rose_t0timer_expiry;
neigh->t0timer.expires =
jiffies + msecs_to_jiffies(sysctl_rose_restart_request_timeout);
{
del_timer(&sk->sk_timer);
- sk->sk_timer.function = (TIMER_FUNC_TYPE)rose_heartbeat_expiry;
+ sk->sk_timer.function = rose_heartbeat_expiry;
sk->sk_timer.expires = jiffies + 5 * HZ;
add_timer(&sk->sk_timer);
del_timer(&rose->timer);
- rose->timer.function = (TIMER_FUNC_TYPE)rose_timer_expiry;
+ rose->timer.function = rose_timer_expiry;
rose->timer.expires = jiffies + rose->t1;
add_timer(&rose->timer);
del_timer(&rose->timer);
- rose->timer.function = (TIMER_FUNC_TYPE)rose_timer_expiry;
+ rose->timer.function = rose_timer_expiry;
rose->timer.expires = jiffies + rose->t2;
add_timer(&rose->timer);
del_timer(&rose->timer);
- rose->timer.function = (TIMER_FUNC_TYPE)rose_timer_expiry;
+ rose->timer.function = rose_timer_expiry;
rose->timer.expires = jiffies + rose->t3;
add_timer(&rose->timer);
del_timer(&rose->timer);
- rose->timer.function = (TIMER_FUNC_TYPE)rose_timer_expiry;
+ rose->timer.function = rose_timer_expiry;
rose->timer.expires = jiffies + rose->hb;
add_timer(&rose->timer);
del_timer(&rose->idletimer);
if (rose->idle > 0) {
- rose->idletimer.function = (TIMER_FUNC_TYPE)rose_idletimer_expiry;
+ rose->idletimer.function = rose_idletimer_expiry;
rose->idletimer.expires = jiffies + rose->idle;
add_timer(&rose->idletimer);
bool upgrade)
{
struct rxrpc_conn_parameters cp;
+ struct rxrpc_call_params p;
struct rxrpc_call *call;
struct rxrpc_sock *rx = rxrpc_sk(sock->sk);
int ret;
if (key && !key->payload.data[0])
key = NULL; /* a no-security key */
+ memset(&p, 0, sizeof(p));
+ p.user_call_ID = user_call_ID;
+ p.tx_total_len = tx_total_len;
+
memset(&cp, 0, sizeof(cp));
cp.local = rx->local;
cp.key = key;
cp.exclusive = false;
cp.upgrade = upgrade;
cp.service_id = srx->srx_service;
- call = rxrpc_new_client_call(rx, &cp, srx, user_call_ID, tx_total_len,
- gfp);
+ call = rxrpc_new_client_call(rx, &cp, srx, &p, gfp);
/* The socket has been unlocked. */
if (!IS_ERR(call)) {
call->notify_rx = notify_rx;
sock_orphan(sk);
sk->sk_shutdown = SHUTDOWN_MASK;
+ /* We want to kill off all connections from a service socket
+ * as fast as possible because we can't share these; client
+ * sockets, on the other hand, can share an endpoint.
+ */
+ switch (sk->sk_state) {
+ case RXRPC_SERVER_BOUND:
+ case RXRPC_SERVER_BOUND2:
+ case RXRPC_SERVER_LISTENING:
+ case RXRPC_SERVER_LISTEN_DISABLED:
+ rx->local->service_closed = true;
+ break;
+ }
+
spin_lock_bh(&sk->sk_receive_queue.lock);
sk->sk_state = RXRPC_CLOSE;
spin_unlock_bh(&sk->sk_receive_queue.lock);
rxrpc_release_calls_on_socket(rx);
flush_workqueue(rxrpc_workqueue);
rxrpc_purge_queue(&sk->sk_receive_queue);
+ rxrpc_queue_work(&rx->local->rxnet->service_conn_reaper);
+ rxrpc_queue_work(&rx->local->rxnet->client_conn_reaper);
rxrpc_put_local(rx->local);
rx->local = NULL;
struct list_head conn_proc_list; /* List of conns in this namespace for proc */
struct list_head service_conns; /* Service conns in this namespace */
rwlock_t conn_lock; /* Lock for ->conn_proc_list, ->service_conns */
- struct delayed_work service_conn_reaper;
+ struct work_struct service_conn_reaper;
+ struct timer_list service_conn_reap_timer;
unsigned int nr_client_conns;
unsigned int nr_active_client_conns;
bool kill_all_client_conns;
+ bool live;
spinlock_t client_conn_cache_lock; /* Lock for ->*_client_conns */
spinlock_t client_conn_discard_lock; /* Prevent multiple discarders */
struct list_head waiting_client_conns;
struct list_head active_client_conns;
struct list_head idle_client_conns;
- struct delayed_work client_conn_reaper;
+ struct work_struct client_conn_reaper;
+ struct timer_list client_conn_reap_timer;
struct list_head local_endpoints;
struct mutex local_mutex; /* Lock for ->local_endpoints */
rwlock_t services_lock; /* lock for services list */
int debug_id; /* debug ID for printks */
bool dead;
+ bool service_closed; /* Service socket closed */
struct sockaddr_rxrpc srx; /* local address */
};
RXRPC_CONN_DONT_REUSE, /* Don't reuse this connection */
RXRPC_CONN_COUNTED, /* Counted by rxrpc_nr_client_conns */
RXRPC_CONN_PROBING_FOR_UPGRADE, /* Probing for service upgrade */
+ RXRPC_CONN_FINAL_ACK_0, /* Need final ACK for channel 0 */
+ RXRPC_CONN_FINAL_ACK_1, /* Need final ACK for channel 1 */
+ RXRPC_CONN_FINAL_ACK_2, /* Need final ACK for channel 2 */
+ RXRPC_CONN_FINAL_ACK_3, /* Need final ACK for channel 3 */
};
+#define RXRPC_CONN_FINAL_ACK_MASK ((1UL << RXRPC_CONN_FINAL_ACK_0) | \
+ (1UL << RXRPC_CONN_FINAL_ACK_1) | \
+ (1UL << RXRPC_CONN_FINAL_ACK_2) | \
+ (1UL << RXRPC_CONN_FINAL_ACK_3))
+
/*
* Events that can be raised upon a connection.
*/
#define RXRPC_ACTIVE_CHANS_MASK ((1 << RXRPC_MAXCALLS) - 1)
struct list_head waiting_calls; /* Calls waiting for channels */
struct rxrpc_channel {
+ unsigned long final_ack_at; /* Time at which to issue final ACK */
struct rxrpc_call __rcu *call; /* Active call */
u32 call_id; /* ID of current call */
u32 call_counter; /* Call ID counter */
};
} channels[RXRPC_MAXCALLS];
+ struct timer_list timer; /* Conn event timer */
struct work_struct processor; /* connection event processor */
union {
struct rb_node client_node; /* Node in local->client_conns */
enum rxrpc_call_event {
RXRPC_CALL_EV_ACK, /* need to generate ACK */
RXRPC_CALL_EV_ABORT, /* need to generate abort */
- RXRPC_CALL_EV_TIMER, /* Timer expired */
RXRPC_CALL_EV_RESEND, /* Tx resend required */
RXRPC_CALL_EV_PING, /* Ping send required */
+ RXRPC_CALL_EV_EXPIRED, /* Expiry occurred */
+ RXRPC_CALL_EV_ACK_LOST, /* ACK may be lost, send ping */
};
/*
struct rxrpc_peer *peer; /* Peer record for remote address */
struct rxrpc_sock __rcu *socket; /* socket responsible */
struct mutex user_mutex; /* User access mutex */
- ktime_t ack_at; /* When deferred ACK needs to happen */
- ktime_t resend_at; /* When next resend needs to happen */
- ktime_t ping_at; /* When next to send a ping */
- ktime_t expire_at; /* When the call times out */
+ unsigned long ack_at; /* When deferred ACK needs to happen */
+ unsigned long ack_lost_at; /* When ACK is figured as lost */
+ unsigned long resend_at; /* When next resend needs to happen */
+ unsigned long ping_at; /* When next to send a ping */
+ unsigned long keepalive_at; /* When next to send a keepalive ping */
+ unsigned long expect_rx_by; /* When we expect to get a packet by */
+ unsigned long expect_req_by; /* When we expect to get a request DATA packet by */
+ unsigned long expect_term_by; /* When we expect call termination by */
+ u32 next_rx_timo; /* Timeout for next Rx packet (jif) */
+ u32 next_req_timo; /* Timeout for next Rx request packet (jif) */
struct timer_list timer; /* Combined event timer */
struct work_struct processor; /* Event processor */
rxrpc_notify_rx_t notify_rx; /* kernel service Rx notification function */
ktime_t acks_latest_ts; /* Timestamp of latest ACK received */
rxrpc_serial_t acks_latest; /* serial number of latest ACK received */
rxrpc_seq_t acks_lowest_nak; /* Lowest NACK in the buffer (or ==tx_hard_ack) */
+ rxrpc_seq_t acks_lost_top; /* tx_top at the time lost-ack ping sent */
+ rxrpc_serial_t acks_lost_ping; /* Serial number of probe ACK */
};
/*
u8 cumulative_acks;
};
+/*
+ * sendmsg() cmsg-specified parameters.
+ */
+enum rxrpc_command {
+ RXRPC_CMD_SEND_DATA, /* send data message */
+ RXRPC_CMD_SEND_ABORT, /* request abort generation */
+ RXRPC_CMD_ACCEPT, /* [server] accept incoming call */
+ RXRPC_CMD_REJECT_BUSY, /* [server] reject a call as busy */
+};
+
+struct rxrpc_call_params {
+ s64 tx_total_len; /* Total Tx data length (if send data) */
+ unsigned long user_call_ID; /* User's call ID */
+ struct {
+ u32 hard; /* Maximum lifetime (sec) */
+ u32 idle; /* Max time since last data packet (msec) */
+ u32 normal; /* Max time since last call packet (msec) */
+ } timeouts;
+ u8 nr_timeouts; /* Number of timeouts specified */
+};
+
+struct rxrpc_send_params {
+ struct rxrpc_call_params call;
+ u32 abort_code; /* Abort code to Tx (if abort) */
+ enum rxrpc_command command : 8; /* The command to implement */
+ bool exclusive; /* Shared or exclusive call */
+ bool upgrade; /* If the connection is upgradeable */
+};
+
#include <trace/events/rxrpc.h>
/*
/*
* call_event.c
*/
-void __rxrpc_set_timer(struct rxrpc_call *, enum rxrpc_timer_trace, ktime_t);
-void rxrpc_set_timer(struct rxrpc_call *, enum rxrpc_timer_trace, ktime_t);
void rxrpc_propose_ACK(struct rxrpc_call *, u8, u16, u32, bool, bool,
enum rxrpc_propose_ack_trace);
void rxrpc_process_call(struct work_struct *);
+static inline void rxrpc_reduce_call_timer(struct rxrpc_call *call,
+ unsigned long expire_at,
+ unsigned long now,
+ enum rxrpc_timer_trace why)
+{
+ trace_rxrpc_timer(call, why, now);
+ timer_reduce(&call->timer, expire_at);
+}
+
/*
* call_object.c
*/
extern struct kmem_cache *rxrpc_call_jar;
struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *, unsigned long);
-struct rxrpc_call *rxrpc_alloc_call(gfp_t);
+struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *, gfp_t);
struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *,
struct rxrpc_conn_parameters *,
struct sockaddr_rxrpc *,
- unsigned long, s64, gfp_t);
+ struct rxrpc_call_params *, gfp_t);
int rxrpc_retry_client_call(struct rxrpc_sock *,
struct rxrpc_call *,
struct rxrpc_conn_parameters *,
*/
extern unsigned int rxrpc_max_client_connections;
extern unsigned int rxrpc_reap_client_connections;
-extern unsigned int rxrpc_conn_idle_client_expiry;
-extern unsigned int rxrpc_conn_idle_client_fast_expiry;
+extern unsigned long rxrpc_conn_idle_client_expiry;
+extern unsigned long rxrpc_conn_idle_client_fast_expiry;
extern struct idr rxrpc_client_conn_ids;
void rxrpc_destroy_client_conn_ids(void);
* conn_object.c
*/
extern unsigned int rxrpc_connection_expiry;
+extern unsigned int rxrpc_closed_conn_expiry;
struct rxrpc_connection *rxrpc_alloc_connection(gfp_t);
struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *,
rxrpc_put_service_conn(conn);
}
+static inline void rxrpc_reduce_conn_timer(struct rxrpc_connection *conn,
+ unsigned long expire_at)
+{
+ timer_reduce(&conn->timer, expire_at);
+}
+
/*
* conn_service.c
*/
* misc.c
*/
extern unsigned int rxrpc_max_backlog __read_mostly;
-extern unsigned int rxrpc_requested_ack_delay;
-extern unsigned int rxrpc_soft_ack_delay;
-extern unsigned int rxrpc_idle_ack_delay;
+extern unsigned long rxrpc_requested_ack_delay;
+extern unsigned long rxrpc_soft_ack_delay;
+extern unsigned long rxrpc_idle_ack_delay;
extern unsigned int rxrpc_rx_window_size;
extern unsigned int rxrpc_rx_mtu;
extern unsigned int rxrpc_rx_jumbo_max;
-extern unsigned int rxrpc_resend_timeout;
+extern unsigned long rxrpc_resend_timeout;
extern const s8 rxrpc_ack_priority[];
/*
* output.c
*/
-int rxrpc_send_ack_packet(struct rxrpc_call *, bool);
+int rxrpc_send_ack_packet(struct rxrpc_call *, bool, rxrpc_serial_t *);
int rxrpc_send_abort_packet(struct rxrpc_call *);
int rxrpc_send_data_packet(struct rxrpc_call *, struct sk_buff *, bool);
void rxrpc_reject_packets(struct rxrpc_local *);
/* Now it gets complicated, because calls get registered with the
* socket here, particularly if a user ID is preassigned by the user.
*/
- call = rxrpc_alloc_call(gfp);
+ call = rxrpc_alloc_call(rx, gfp);
if (!call)
return -ENOMEM;
call->flags |= (1 << RXRPC_CALL_IS_SERVICE);
#include <net/af_rxrpc.h>
#include "ar-internal.h"
-/*
- * Set the timer
- */
-void __rxrpc_set_timer(struct rxrpc_call *call, enum rxrpc_timer_trace why,
- ktime_t now)
-{
- unsigned long t_j, now_j = jiffies;
- ktime_t t;
- bool queue = false;
-
- if (call->state < RXRPC_CALL_COMPLETE) {
- t = call->expire_at;
- if (!ktime_after(t, now)) {
- trace_rxrpc_timer(call, why, now, now_j);
- queue = true;
- goto out;
- }
-
- if (!ktime_after(call->resend_at, now)) {
- call->resend_at = call->expire_at;
- if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
- queue = true;
- } else if (ktime_before(call->resend_at, t)) {
- t = call->resend_at;
- }
-
- if (!ktime_after(call->ack_at, now)) {
- call->ack_at = call->expire_at;
- if (!test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events))
- queue = true;
- } else if (ktime_before(call->ack_at, t)) {
- t = call->ack_at;
- }
-
- if (!ktime_after(call->ping_at, now)) {
- call->ping_at = call->expire_at;
- if (!test_and_set_bit(RXRPC_CALL_EV_PING, &call->events))
- queue = true;
- } else if (ktime_before(call->ping_at, t)) {
- t = call->ping_at;
- }
-
- t_j = nsecs_to_jiffies(ktime_to_ns(ktime_sub(t, now)));
- t_j += jiffies;
-
- /* We have to make sure that the calculated jiffies value falls
- * at or after the nsec value, or we may loop ceaselessly
- * because the timer times out, but we haven't reached the nsec
- * timeout yet.
- */
- t_j++;
-
- if (call->timer.expires != t_j || !timer_pending(&call->timer)) {
- mod_timer(&call->timer, t_j);
- trace_rxrpc_timer(call, why, now, now_j);
- }
- }
-
-out:
- if (queue)
- rxrpc_queue_call(call);
-}
-
-/*
- * Set the timer
- */
-void rxrpc_set_timer(struct rxrpc_call *call, enum rxrpc_timer_trace why,
- ktime_t now)
-{
- read_lock_bh(&call->state_lock);
- __rxrpc_set_timer(call, why, now);
- read_unlock_bh(&call->state_lock);
-}
-
/*
* Propose a PING ACK be sent.
*/
!test_and_set_bit(RXRPC_CALL_EV_PING, &call->events))
rxrpc_queue_call(call);
} else {
- ktime_t now = ktime_get_real();
- ktime_t ping_at = ktime_add_ms(now, rxrpc_idle_ack_delay);
+ unsigned long now = jiffies;
+ unsigned long ping_at = now + rxrpc_idle_ack_delay;
- if (ktime_before(ping_at, call->ping_at)) {
- call->ping_at = ping_at;
- rxrpc_set_timer(call, rxrpc_timer_set_for_ping, now);
+ if (time_before(ping_at, call->ping_at)) {
+ WRITE_ONCE(call->ping_at, ping_at);
+ rxrpc_reduce_call_timer(call, ping_at, now,
+ rxrpc_timer_set_for_ping);
}
}
}
enum rxrpc_propose_ack_trace why)
{
enum rxrpc_propose_ack_outcome outcome = rxrpc_propose_ack_use;
- unsigned int expiry = rxrpc_soft_ack_delay;
- ktime_t now, ack_at;
+ unsigned long expiry = rxrpc_soft_ack_delay;
s8 prior = rxrpc_ack_priority[ack_reason];
/* Pings are handled specially because we don't want to accidentally
background)
rxrpc_queue_call(call);
} else {
- now = ktime_get_real();
- ack_at = ktime_add_ms(now, expiry);
- if (ktime_before(ack_at, call->ack_at)) {
- call->ack_at = ack_at;
- rxrpc_set_timer(call, rxrpc_timer_set_for_ack, now);
+ unsigned long now = jiffies, ack_at;
+
+ if (call->peer->rtt_usage > 0)
+ ack_at = nsecs_to_jiffies(call->peer->rtt);
+ else
+ ack_at = expiry;
+
+ ack_at = jiffies + expiry;
+ if (time_before(ack_at, call->ack_at)) {
+ WRITE_ONCE(call->ack_at, ack_at);
+ rxrpc_reduce_call_timer(call, ack_at, now,
+ rxrpc_timer_set_for_ack);
}
}
/*
* Perform retransmission of NAK'd and unack'd packets.
*/
-static void rxrpc_resend(struct rxrpc_call *call, ktime_t now)
+static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
{
struct rxrpc_skb_priv *sp;
struct sk_buff *skb;
+ unsigned long resend_at;
rxrpc_seq_t cursor, seq, top;
- ktime_t max_age, oldest, ack_ts;
+ ktime_t now, max_age, oldest, ack_ts, timeout, min_timeo;
int ix;
u8 annotation, anno_type, retrans = 0, unacked = 0;
_enter("{%d,%d}", call->tx_hard_ack, call->tx_top);
- max_age = ktime_sub_ms(now, rxrpc_resend_timeout);
+ if (call->peer->rtt_usage > 1)
+ timeout = ns_to_ktime(call->peer->rtt * 3 / 2);
+ else
+ timeout = ms_to_ktime(rxrpc_resend_timeout);
+ min_timeo = ns_to_ktime((1000000000 / HZ) * 4);
+ if (ktime_before(timeout, min_timeo))
+ timeout = min_timeo;
+
+ now = ktime_get_real();
+ max_age = ktime_sub(now, timeout);
spin_lock_bh(&call->lock);
ktime_to_ns(ktime_sub(skb->tstamp, max_age)));
}
- call->resend_at = ktime_add_ms(oldest, rxrpc_resend_timeout);
+ resend_at = nsecs_to_jiffies(ktime_to_ns(ktime_sub(oldest, now)));
+ resend_at += jiffies + rxrpc_resend_timeout;
+ WRITE_ONCE(call->resend_at, resend_at);
if (unacked)
rxrpc_congestion_timeout(call);
* retransmitting data.
*/
if (!retrans) {
- rxrpc_set_timer(call, rxrpc_timer_set_for_resend, now);
+ rxrpc_reduce_call_timer(call, resend_at, now,
+ rxrpc_timer_set_for_resend);
spin_unlock_bh(&call->lock);
ack_ts = ktime_sub(now, call->acks_latest_ts);
if (ktime_to_ns(ack_ts) < call->peer->rtt)
goto out;
rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false,
rxrpc_propose_ack_ping_for_lost_ack);
- rxrpc_send_ack_packet(call, true);
+ rxrpc_send_ack_packet(call, true, NULL);
goto out;
}
{
struct rxrpc_call *call =
container_of(work, struct rxrpc_call, processor);
- ktime_t now;
+ rxrpc_serial_t *send_ack;
+ unsigned long now, next, t;
rxrpc_see_call(call);
goto out_put;
}
- now = ktime_get_real();
- if (ktime_before(call->expire_at, now)) {
+ /* Work out if any timeouts tripped */
+ now = jiffies;
+ t = READ_ONCE(call->expect_rx_by);
+ if (time_after_eq(now, t)) {
+ trace_rxrpc_timer(call, rxrpc_timer_exp_normal, now);
+ set_bit(RXRPC_CALL_EV_EXPIRED, &call->events);
+ }
+
+ t = READ_ONCE(call->expect_req_by);
+ if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST &&
+ time_after_eq(now, t)) {
+ trace_rxrpc_timer(call, rxrpc_timer_exp_idle, now);
+ set_bit(RXRPC_CALL_EV_EXPIRED, &call->events);
+ }
+
+ t = READ_ONCE(call->expect_term_by);
+ if (time_after_eq(now, t)) {
+ trace_rxrpc_timer(call, rxrpc_timer_exp_hard, now);
+ set_bit(RXRPC_CALL_EV_EXPIRED, &call->events);
+ }
+
+ t = READ_ONCE(call->ack_at);
+ if (time_after_eq(now, t)) {
+ trace_rxrpc_timer(call, rxrpc_timer_exp_ack, now);
+ cmpxchg(&call->ack_at, t, now + MAX_JIFFY_OFFSET);
+ set_bit(RXRPC_CALL_EV_ACK, &call->events);
+ }
+
+ t = READ_ONCE(call->ack_lost_at);
+ if (time_after_eq(now, t)) {
+ trace_rxrpc_timer(call, rxrpc_timer_exp_lost_ack, now);
+ cmpxchg(&call->ack_lost_at, t, now + MAX_JIFFY_OFFSET);
+ set_bit(RXRPC_CALL_EV_ACK_LOST, &call->events);
+ }
+
+ t = READ_ONCE(call->keepalive_at);
+ if (time_after_eq(now, t)) {
+ trace_rxrpc_timer(call, rxrpc_timer_exp_keepalive, now);
+ cmpxchg(&call->keepalive_at, t, now + MAX_JIFFY_OFFSET);
+ rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, true,
+ rxrpc_propose_ack_ping_for_keepalive);
+ set_bit(RXRPC_CALL_EV_PING, &call->events);
+ }
+
+ t = READ_ONCE(call->ping_at);
+ if (time_after_eq(now, t)) {
+ trace_rxrpc_timer(call, rxrpc_timer_exp_ping, now);
+ cmpxchg(&call->ping_at, t, now + MAX_JIFFY_OFFSET);
+ set_bit(RXRPC_CALL_EV_PING, &call->events);
+ }
+
+ t = READ_ONCE(call->resend_at);
+ if (time_after_eq(now, t)) {
+ trace_rxrpc_timer(call, rxrpc_timer_exp_resend, now);
+ cmpxchg(&call->resend_at, t, now + MAX_JIFFY_OFFSET);
+ set_bit(RXRPC_CALL_EV_RESEND, &call->events);
+ }
+
+ /* Process events */
+ if (test_and_clear_bit(RXRPC_CALL_EV_EXPIRED, &call->events)) {
rxrpc_abort_call("EXP", call, 0, RX_USER_ABORT, -ETIME);
set_bit(RXRPC_CALL_EV_ABORT, &call->events);
goto recheck_state;
}
- if (test_and_clear_bit(RXRPC_CALL_EV_ACK, &call->events)) {
+ send_ack = NULL;
+ if (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events)) {
+ call->acks_lost_top = call->tx_top;
+ rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, 0, true, false,
+ rxrpc_propose_ack_ping_for_lost_ack);
+ send_ack = &call->acks_lost_ping;
+ }
+
+ if (test_and_clear_bit(RXRPC_CALL_EV_ACK, &call->events) ||
+ send_ack) {
if (call->ackr_reason) {
- rxrpc_send_ack_packet(call, false);
+ rxrpc_send_ack_packet(call, false, send_ack);
goto recheck_state;
}
}
if (test_and_clear_bit(RXRPC_CALL_EV_PING, &call->events)) {
- rxrpc_send_ack_packet(call, true);
+ rxrpc_send_ack_packet(call, true, NULL);
goto recheck_state;
}
goto recheck_state;
}
- rxrpc_set_timer(call, rxrpc_timer_set_for_resend, now);
+ /* Make sure the timer is restarted */
+ next = call->expect_rx_by;
+
+#define set(T) { t = READ_ONCE(T); if (time_before(t, next)) next = t; }
+
+ set(call->expect_req_by);
+ set(call->expect_term_by);
+ set(call->ack_at);
+ set(call->ack_lost_at);
+ set(call->resend_at);
+ set(call->keepalive_at);
+ set(call->ping_at);
+
+ now = jiffies;
+ if (time_after_eq(now, next))
+ goto recheck_state;
+
+ rxrpc_reduce_call_timer(call, next, now, rxrpc_timer_restart);
/* other events may have been raised since we started checking */
if (call->events && call->state < RXRPC_CALL_COMPLETE) {
struct kmem_cache *rxrpc_call_jar;
-static void rxrpc_call_timer_expired(unsigned long _call)
+static void rxrpc_call_timer_expired(struct timer_list *t)
{
- struct rxrpc_call *call = (struct rxrpc_call *)_call;
+ struct rxrpc_call *call = from_timer(call, t, timer);
_enter("%d", call->debug_id);
- if (call->state < RXRPC_CALL_COMPLETE)
- rxrpc_set_timer(call, rxrpc_timer_expired, ktime_get_real());
+ if (call->state < RXRPC_CALL_COMPLETE) {
+ trace_rxrpc_timer(call, rxrpc_timer_expired, jiffies);
+ rxrpc_queue_call(call);
+ }
}
+static struct lock_class_key rxrpc_call_user_mutex_lock_class_key;
+
/*
* find an extant server call
* - called in process context with IRQs enabled
/*
* allocate a new call
*/
-struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
+struct rxrpc_call *rxrpc_alloc_call(struct rxrpc_sock *rx, gfp_t gfp)
{
struct rxrpc_call *call;
goto nomem_2;
mutex_init(&call->user_mutex);
- setup_timer(&call->timer, rxrpc_call_timer_expired,
- (unsigned long)call);
+
+ /* Prevent lockdep reporting a deadlock false positive between the afs
+ * filesystem and sys_sendmsg() via the mmap sem.
+ */
+ if (rx->sk.sk_kern_sock)
+ lockdep_set_class(&call->user_mutex,
+ &rxrpc_call_user_mutex_lock_class_key);
+
+ timer_setup(&call->timer, rxrpc_call_timer_expired, 0);
INIT_WORK(&call->processor, &rxrpc_process_call);
INIT_LIST_HEAD(&call->link);
INIT_LIST_HEAD(&call->chan_wait_link);
atomic_set(&call->usage, 1);
call->debug_id = atomic_inc_return(&rxrpc_debug_id);
call->tx_total_len = -1;
+ call->next_rx_timo = 20 * HZ;
+ call->next_req_timo = 1 * HZ;
memset(&call->sock_node, 0xed, sizeof(call->sock_node));
/*
* Allocate a new client call.
*/
-static struct rxrpc_call *rxrpc_alloc_client_call(struct sockaddr_rxrpc *srx,
+static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
+ struct sockaddr_rxrpc *srx,
gfp_t gfp)
{
struct rxrpc_call *call;
_enter("");
- call = rxrpc_alloc_call(gfp);
+ call = rxrpc_alloc_call(rx, gfp);
if (!call)
return ERR_PTR(-ENOMEM);
call->state = RXRPC_CALL_CLIENT_AWAIT_CONN;
*/
static void rxrpc_start_call_timer(struct rxrpc_call *call)
{
- ktime_t now = ktime_get_real(), expire_at;
-
- expire_at = ktime_add_ms(now, rxrpc_max_call_lifetime);
- call->expire_at = expire_at;
- call->ack_at = expire_at;
- call->ping_at = expire_at;
- call->resend_at = expire_at;
- call->timer.expires = jiffies + LONG_MAX / 2;
- rxrpc_set_timer(call, rxrpc_timer_begin, now);
+ unsigned long now = jiffies;
+ unsigned long j = now + MAX_JIFFY_OFFSET;
+
+ call->ack_at = j;
+ call->ack_lost_at = j;
+ call->resend_at = j;
+ call->ping_at = j;
+ call->expect_rx_by = j;
+ call->expect_req_by = j;
+ call->expect_term_by = j;
+ call->timer.expires = now;
}
/*
struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx,
struct rxrpc_conn_parameters *cp,
struct sockaddr_rxrpc *srx,
- unsigned long user_call_ID,
- s64 tx_total_len,
+ struct rxrpc_call_params *p,
gfp_t gfp)
__releases(&rx->sk.sk_lock.slock)
{
const void *here = __builtin_return_address(0);
int ret;
- _enter("%p,%lx", rx, user_call_ID);
+ _enter("%p,%lx", rx, p->user_call_ID);
- call = rxrpc_alloc_client_call(srx, gfp);
+ call = rxrpc_alloc_client_call(rx, srx, gfp);
if (IS_ERR(call)) {
release_sock(&rx->sk);
_leave(" = %ld", PTR_ERR(call));
return call;
}
- call->tx_total_len = tx_total_len;
+ call->tx_total_len = p->tx_total_len;
trace_rxrpc_call(call, rxrpc_call_new_client, atomic_read(&call->usage),
- here, (const void *)user_call_ID);
+ here, (const void *)p->user_call_ID);
/* We need to protect a partially set up call against the user as we
* will be acting outside the socket lock.
parent = *pp;
xcall = rb_entry(parent, struct rxrpc_call, sock_node);
- if (user_call_ID < xcall->user_call_ID)
+ if (p->user_call_ID < xcall->user_call_ID)
pp = &(*pp)->rb_left;
- else if (user_call_ID > xcall->user_call_ID)
+ else if (p->user_call_ID > xcall->user_call_ID)
pp = &(*pp)->rb_right;
else
goto error_dup_user_ID;
}
rcu_assign_pointer(call->socket, rx);
- call->user_call_ID = user_call_ID;
+ call->user_call_ID = p->user_call_ID;
__set_bit(RXRPC_CALL_HAS_USERID, &call->flags);
rxrpc_get_call(call, rxrpc_call_got_userid);
rb_link_node(&call->sock_node, parent, pp);
__read_mostly unsigned int rxrpc_max_client_connections = 1000;
__read_mostly unsigned int rxrpc_reap_client_connections = 900;
-__read_mostly unsigned int rxrpc_conn_idle_client_expiry = 2 * 60 * HZ;
-__read_mostly unsigned int rxrpc_conn_idle_client_fast_expiry = 2 * HZ;
+__read_mostly unsigned long rxrpc_conn_idle_client_expiry = 2 * 60 * HZ;
+__read_mostly unsigned long rxrpc_conn_idle_client_fast_expiry = 2 * HZ;
/*
* We use machine-unique IDs for our client connections.
trace_rxrpc_client(conn, channel, rxrpc_client_chan_activate);
+ /* Cancel the final ACK on the previous call if it hasn't been sent yet
+ * as the DATA packet will implicitly ACK it.
+ */
+ clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags);
+
write_lock_bh(&call->state_lock);
if (!test_bit(RXRPC_CALL_TX_LASTQ, &call->flags))
call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
_enter("{%d,%lx},", call->debug_id, call->user_call_ID);
- rxrpc_discard_expired_client_conns(&rxnet->client_conn_reaper.work);
+ rxrpc_discard_expired_client_conns(&rxnet->client_conn_reaper);
rxrpc_cull_active_client_conns(rxnet);
ret = rxrpc_get_client_conn(call, cp, srx, gfp);
}
}
+/*
+ * Set the reap timer.
+ */
+static void rxrpc_set_client_reap_timer(struct rxrpc_net *rxnet)
+{
+ unsigned long now = jiffies;
+ unsigned long reap_at = now + rxrpc_conn_idle_client_expiry;
+
+ if (rxnet->live)
+ timer_reduce(&rxnet->client_conn_reap_timer, reap_at);
+}
+
/*
* Disconnect a client call.
*/
goto out_2;
}
+ /* Schedule the final ACK to be transmitted in a short while so that it
+ * can be skipped if we find a follow-on call. The first DATA packet
+ * of the follow on call will implicitly ACK this call.
+ */
+ if (test_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
+ unsigned long final_ack_at = jiffies + 2;
+
+ WRITE_ONCE(chan->final_ack_at, final_ack_at);
+ smp_wmb(); /* vs rxrpc_process_delayed_final_acks() */
+ set_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags);
+ rxrpc_reduce_conn_timer(conn, final_ack_at);
+ }
+
/* Things are more complex and we need the cache lock. We might be
* able to simply idle the conn or it might now be lurking on the wait
* list. It might even get moved back to the active list whilst we're
list_move_tail(&conn->cache_link, &rxnet->idle_client_conns);
if (rxnet->idle_client_conns.next == &conn->cache_link &&
!rxnet->kill_all_client_conns)
- queue_delayed_work(rxrpc_workqueue,
- &rxnet->client_conn_reaper,
- rxrpc_conn_idle_client_expiry);
+ rxrpc_set_client_reap_timer(rxnet);
} else {
trace_rxrpc_client(conn, channel, rxrpc_client_to_inactive);
conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE;
{
struct rxrpc_connection *conn;
struct rxrpc_net *rxnet =
- container_of(to_delayed_work(work),
- struct rxrpc_net, client_conn_reaper);
+ container_of(work, struct rxrpc_net, client_conn_reaper);
unsigned long expiry, conn_expires_at, now;
unsigned int nr_conns;
bool did_discard = false;
expiry = rxrpc_conn_idle_client_expiry;
if (nr_conns > rxrpc_reap_client_connections)
expiry = rxrpc_conn_idle_client_fast_expiry;
+ if (conn->params.local->service_closed)
+ expiry = rxrpc_closed_conn_expiry * HZ;
conn_expires_at = conn->idle_timestamp + expiry;
*/
_debug("not yet");
if (!rxnet->kill_all_client_conns)
- queue_delayed_work(rxrpc_workqueue,
- &rxnet->client_conn_reaper,
- conn_expires_at - now);
+ timer_reduce(&rxnet->client_conn_reap_timer,
+ conn_expires_at);
out:
spin_unlock(&rxnet->client_conn_cache_lock);
rxnet->kill_all_client_conns = true;
spin_unlock(&rxnet->client_conn_cache_lock);
- cancel_delayed_work(&rxnet->client_conn_reaper);
+ del_timer_sync(&rxnet->client_conn_reap_timer);
- if (!queue_delayed_work(rxrpc_workqueue, &rxnet->client_conn_reaper, 0))
+ if (!rxrpc_queue_work(&rxnet->client_conn_reaper))
_debug("destroy: queue failed");
_leave("");
* Retransmit terminal ACK or ABORT of the previous call.
*/
static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ unsigned int channel)
{
- struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+ struct rxrpc_skb_priv *sp = skb ? rxrpc_skb(skb) : NULL;
struct rxrpc_channel *chan;
struct msghdr msg;
struct kvec iov;
_enter("%d", conn->debug_id);
- chan = &conn->channels[sp->hdr.cid & RXRPC_CHANNELMASK];
+ chan = &conn->channels[channel];
/* If the last call got moved on whilst we were waiting to run, just
* ignore this packet.
call_id = READ_ONCE(chan->last_call);
/* Sync with __rxrpc_disconnect_call() */
smp_rmb();
- if (call_id != sp->hdr.callNumber)
+ if (skb && call_id != sp->hdr.callNumber)
return;
msg.msg_name = &conn->params.peer->srx.transport;
msg.msg_controllen = 0;
msg.msg_flags = 0;
- pkt.whdr.epoch = htonl(sp->hdr.epoch);
- pkt.whdr.cid = htonl(sp->hdr.cid);
- pkt.whdr.callNumber = htonl(sp->hdr.callNumber);
+ pkt.whdr.epoch = htonl(conn->proto.epoch);
+ pkt.whdr.cid = htonl(conn->proto.cid);
+ pkt.whdr.callNumber = htonl(call_id);
pkt.whdr.seq = 0;
pkt.whdr.type = chan->last_type;
pkt.whdr.flags = conn->out_clientflag;
mtu = conn->params.peer->if_mtu;
mtu -= conn->params.peer->hdrsize;
pkt.ack.bufferSpace = 0;
- pkt.ack.maxSkew = htons(skb->priority);
- pkt.ack.firstPacket = htonl(chan->last_seq);
- pkt.ack.previousPacket = htonl(chan->last_seq - 1);
- pkt.ack.serial = htonl(sp->hdr.serial);
- pkt.ack.reason = RXRPC_ACK_DUPLICATE;
+ pkt.ack.maxSkew = htons(skb ? skb->priority : 0);
+ pkt.ack.firstPacket = htonl(chan->last_seq + 1);
+ pkt.ack.previousPacket = htonl(chan->last_seq);
+ pkt.ack.serial = htonl(skb ? sp->hdr.serial : 0);
+ pkt.ack.reason = skb ? RXRPC_ACK_DUPLICATE : RXRPC_ACK_IDLE;
pkt.ack.nAcks = 0;
pkt.info.rxMTU = htonl(rxrpc_rx_mtu);
pkt.info.maxMTU = htonl(mtu);
switch (sp->hdr.type) {
case RXRPC_PACKET_TYPE_DATA:
case RXRPC_PACKET_TYPE_ACK:
- rxrpc_conn_retransmit_call(conn, skb);
+ rxrpc_conn_retransmit_call(conn, skb,
+ sp->hdr.cid & RXRPC_CHANNELMASK);
return 0;
case RXRPC_PACKET_TYPE_BUSY:
_leave(" [aborted]");
}
+/*
+ * Process delayed final ACKs that we haven't subsumed into a subsequent call.
+ */
+static void rxrpc_process_delayed_final_acks(struct rxrpc_connection *conn)
+{
+ unsigned long j = jiffies, next_j;
+ unsigned int channel;
+ bool set;
+
+again:
+ next_j = j + LONG_MAX;
+ set = false;
+ for (channel = 0; channel < RXRPC_MAXCALLS; channel++) {
+ struct rxrpc_channel *chan = &conn->channels[channel];
+ unsigned long ack_at;
+
+ if (!test_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags))
+ continue;
+
+ smp_rmb(); /* vs rxrpc_disconnect_client_call */
+ ack_at = READ_ONCE(chan->final_ack_at);
+
+ if (time_before(j, ack_at)) {
+ if (time_before(ack_at, next_j)) {
+ next_j = ack_at;
+ set = true;
+ }
+ continue;
+ }
+
+ if (test_and_clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel,
+ &conn->flags))
+ rxrpc_conn_retransmit_call(conn, NULL, channel);
+ }
+
+ j = jiffies;
+ if (time_before_eq(next_j, j))
+ goto again;
+ if (set)
+ rxrpc_reduce_conn_timer(conn, next_j);
+}
+
/*
* connection-level event processor
*/
if (test_and_clear_bit(RXRPC_CONN_EV_CHALLENGE, &conn->events))
rxrpc_secure_connection(conn);
+ /* Process delayed ACKs whose time has come. */
+ if (conn->flags & RXRPC_CONN_FINAL_ACK_MASK)
+ rxrpc_process_delayed_final_acks(conn);
+
/* go through the conn-level event packets, releasing the ref on this
* connection that each one has when we've finished with it */
while ((skb = skb_dequeue(&conn->rx_queue))) {
/*
* Time till a connection expires after last use (in seconds).
*/
-unsigned int rxrpc_connection_expiry = 10 * 60;
+unsigned int __read_mostly rxrpc_connection_expiry = 10 * 60;
+unsigned int __read_mostly rxrpc_closed_conn_expiry = 10;
static void rxrpc_destroy_connection(struct rcu_head *);
+static void rxrpc_connection_timer(struct timer_list *timer)
+{
+ struct rxrpc_connection *conn =
+ container_of(timer, struct rxrpc_connection, timer);
+
+ rxrpc_queue_conn(conn);
+}
+
/*
* allocate a new connection
*/
INIT_LIST_HEAD(&conn->cache_link);
spin_lock_init(&conn->channel_lock);
INIT_LIST_HEAD(&conn->waiting_calls);
+ timer_setup(&conn->timer, &rxrpc_connection_timer, 0);
INIT_WORK(&conn->processor, &rxrpc_process_connection);
INIT_LIST_HEAD(&conn->proc_link);
INIT_LIST_HEAD(&conn->link);
return conn;
}
+/*
+ * Set the service connection reap timer.
+ */
+static void rxrpc_set_service_reap_timer(struct rxrpc_net *rxnet,
+ unsigned long reap_at)
+{
+ if (rxnet->live)
+ timer_reduce(&rxnet->service_conn_reap_timer, reap_at);
+}
+
/*
* Release a service connection
*/
void rxrpc_put_service_conn(struct rxrpc_connection *conn)
{
- struct rxrpc_net *rxnet;
const void *here = __builtin_return_address(0);
int n;
n = atomic_dec_return(&conn->usage);
trace_rxrpc_conn(conn, rxrpc_conn_put_service, n, here);
ASSERTCMP(n, >=, 0);
- if (n == 0) {
- rxnet = conn->params.local->rxnet;
- rxrpc_queue_delayed_work(&rxnet->service_conn_reaper, 0);
- }
+ if (n == 1)
+ rxrpc_set_service_reap_timer(conn->params.local->rxnet,
+ jiffies + rxrpc_connection_expiry);
}
/*
_net("DESTROY CONN %d", conn->debug_id);
+ del_timer_sync(&conn->timer);
rxrpc_purge_queue(&conn->rx_queue);
conn->security->clear(conn);
{
struct rxrpc_connection *conn, *_p;
struct rxrpc_net *rxnet =
- container_of(to_delayed_work(work),
- struct rxrpc_net, service_conn_reaper);
- unsigned long reap_older_than, earliest, idle_timestamp, now;
+ container_of(work, struct rxrpc_net, service_conn_reaper);
+ unsigned long expire_at, earliest, idle_timestamp, now;
LIST_HEAD(graveyard);
_enter("");
now = jiffies;
- reap_older_than = now - rxrpc_connection_expiry * HZ;
- earliest = ULONG_MAX;
+ earliest = now + MAX_JIFFY_OFFSET;
write_lock(&rxnet->conn_lock);
list_for_each_entry_safe(conn, _p, &rxnet->service_conns, link) {
if (conn->state == RXRPC_CONN_SERVICE_PREALLOC)
continue;
- idle_timestamp = READ_ONCE(conn->idle_timestamp);
- _debug("reap CONN %d { u=%d,t=%ld }",
- conn->debug_id, atomic_read(&conn->usage),
- (long)reap_older_than - (long)idle_timestamp);
-
- if (time_after(idle_timestamp, reap_older_than)) {
- if (time_before(idle_timestamp, earliest))
- earliest = idle_timestamp;
- continue;
+ if (rxnet->live) {
+ idle_timestamp = READ_ONCE(conn->idle_timestamp);
+ expire_at = idle_timestamp + rxrpc_connection_expiry * HZ;
+ if (conn->params.local->service_closed)
+ expire_at = idle_timestamp + rxrpc_closed_conn_expiry * HZ;
+
+ _debug("reap CONN %d { u=%d,t=%ld }",
+ conn->debug_id, atomic_read(&conn->usage),
+ (long)expire_at - (long)now);
+
+ if (time_before(now, expire_at)) {
+ if (time_before(expire_at, earliest))
+ earliest = expire_at;
+ continue;
+ }
}
/* The usage count sits at 1 whilst the object is unused on the
*/
if (atomic_cmpxchg(&conn->usage, 1, 0) != 1)
continue;
+ trace_rxrpc_conn(conn, rxrpc_conn_reap_service, 0, 0);
if (rxrpc_conn_is_client(conn))
BUG();
}
write_unlock(&rxnet->conn_lock);
- if (earliest != ULONG_MAX) {
- _debug("reschedule reaper %ld", (long) earliest - now);
+ if (earliest != now + MAX_JIFFY_OFFSET) {
+ _debug("reschedule reaper %ld", (long)earliest - (long)now);
ASSERT(time_after(earliest, now));
- rxrpc_queue_delayed_work(&rxnet->client_conn_reaper,
- earliest - now);
+ rxrpc_set_service_reap_timer(rxnet, earliest);
}
while (!list_empty(&graveyard)) {
rxrpc_destroy_all_client_connections(rxnet);
- rxrpc_connection_expiry = 0;
- cancel_delayed_work(&rxnet->client_conn_reaper);
- rxrpc_queue_delayed_work(&rxnet->client_conn_reaper, 0);
+ del_timer_sync(&rxnet->service_conn_reap_timer);
+ rxrpc_queue_work(&rxnet->service_conn_reaper);
flush_workqueue(rxrpc_workqueue);
write_lock(&rxnet->conn_lock);
static bool rxrpc_receiving_reply(struct rxrpc_call *call)
{
struct rxrpc_ack_summary summary = { 0 };
+ unsigned long now, timo;
rxrpc_seq_t top = READ_ONCE(call->tx_top);
if (call->ackr_reason) {
spin_lock_bh(&call->lock);
call->ackr_reason = 0;
- call->resend_at = call->expire_at;
- call->ack_at = call->expire_at;
spin_unlock_bh(&call->lock);
- rxrpc_set_timer(call, rxrpc_timer_init_for_reply,
- ktime_get_real());
+ now = jiffies;
+ timo = now + MAX_JIFFY_OFFSET;
+ WRITE_ONCE(call->resend_at, timo);
+ WRITE_ONCE(call->ack_at, timo);
+ trace_rxrpc_timer(call, rxrpc_timer_init_for_reply, now);
}
if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags))
if (state >= RXRPC_CALL_COMPLETE)
return;
+ if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST) {
+ unsigned long timo = READ_ONCE(call->next_req_timo);
+ unsigned long now, expect_req_by;
+
+ if (timo) {
+ now = jiffies;
+ expect_req_by = now + timo;
+ WRITE_ONCE(call->expect_req_by, expect_req_by);
+ rxrpc_reduce_call_timer(call, expect_req_by, now,
+ rxrpc_timer_set_for_idle);
+ }
+ }
+
/* Received data implicitly ACKs all of the request packets we sent
* when we're acting as a client.
*/
orig_serial, ack_serial, sent_at, resp_time);
}
+/*
+ * Process the response to a ping that we sent to find out if we lost an ACK.
+ *
+ * If we got back a ping response that indicates a lower tx_top than what we
+ * had at the time of the ping transmission, we adjudge all the DATA packets
+ * sent between the response tx_top and the ping-time tx_top to have been lost.
+ */
+static void rxrpc_input_check_for_lost_ack(struct rxrpc_call *call)
+{
+ rxrpc_seq_t top, bottom, seq;
+ bool resend = false;
+
+ spin_lock_bh(&call->lock);
+
+ bottom = call->tx_hard_ack + 1;
+ top = call->acks_lost_top;
+ if (before(bottom, top)) {
+ for (seq = bottom; before_eq(seq, top); seq++) {
+ int ix = seq & RXRPC_RXTX_BUFF_MASK;
+ u8 annotation = call->rxtx_annotations[ix];
+ u8 anno_type = annotation & RXRPC_TX_ANNO_MASK;
+
+ if (anno_type != RXRPC_TX_ANNO_UNACK)
+ continue;
+ annotation &= ~RXRPC_TX_ANNO_MASK;
+ annotation |= RXRPC_TX_ANNO_RETRANS;
+ call->rxtx_annotations[ix] = annotation;
+ resend = true;
+ }
+ }
+
+ spin_unlock_bh(&call->lock);
+
+ if (resend && !test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
+ rxrpc_queue_call(call);
+}
+
/*
* Process a ping response.
*/
smp_rmb();
ping_serial = call->ping_serial;
+ if (orig_serial == call->acks_lost_ping)
+ rxrpc_input_check_for_lost_ack(call);
+
if (!test_bit(RXRPC_CALL_PINGING, &call->flags) ||
before(orig_serial, ping_serial))
return;
struct sk_buff *skb, u16 skew)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+ unsigned long timo;
_enter("%p,%p", call, skb);
+ timo = READ_ONCE(call->next_rx_timo);
+ if (timo) {
+ unsigned long now = jiffies, expect_rx_by;
+
+ expect_rx_by = jiffies + timo;
+ WRITE_ONCE(call->expect_rx_by, expect_rx_by);
+ rxrpc_reduce_call_timer(call, expect_rx_by, now,
+ rxrpc_timer_set_for_normal);
+ }
+
switch (sp->hdr.type) {
case RXRPC_PACKET_TYPE_DATA:
rxrpc_input_data(call, skb, skew);
*/
unsigned int rxrpc_max_backlog __read_mostly = 10;
-/*
- * Maximum lifetime of a call (in mx).
- */
-unsigned int rxrpc_max_call_lifetime = 60 * 1000;
-
/*
* How long to wait before scheduling ACK generation after seeing a
- * packet with RXRPC_REQUEST_ACK set (in ms).
+ * packet with RXRPC_REQUEST_ACK set (in jiffies).
*/
-unsigned int rxrpc_requested_ack_delay = 1;
+unsigned long rxrpc_requested_ack_delay = 1;
/*
- * How long to wait before scheduling an ACK with subtype DELAY (in ms).
+ * How long to wait before scheduling an ACK with subtype DELAY (in jiffies).
*
* We use this when we've received new data packets. If those packets aren't
* all consumed within this time we will send a DELAY ACK if an ACK was not
* requested to let the sender know it doesn't need to resend.
*/
-unsigned int rxrpc_soft_ack_delay = 1 * 1000;
+unsigned long rxrpc_soft_ack_delay = HZ;
/*
- * How long to wait before scheduling an ACK with subtype IDLE (in ms).
+ * How long to wait before scheduling an ACK with subtype IDLE (in jiffies).
*
* We use this when we've consumed some previously soft-ACK'd packets when
* further packets aren't immediately received to decide when to send an IDLE
* ACK let the other end know that it can free up its Tx buffer space.
*/
-unsigned int rxrpc_idle_ack_delay = 0.5 * 1000;
+unsigned long rxrpc_idle_ack_delay = HZ / 2;
/*
* Receive window size in packets. This indicates the maximum number of
/*
* Time till packet resend (in milliseconds).
*/
-unsigned int rxrpc_resend_timeout = 4 * 1000;
+unsigned long rxrpc_resend_timeout = 4 * HZ;
const s8 rxrpc_ack_priority[] = {
[0] = 0,
unsigned int rxrpc_net_id;
+static void rxrpc_client_conn_reap_timeout(struct timer_list *timer)
+{
+ struct rxrpc_net *rxnet =
+ container_of(timer, struct rxrpc_net, client_conn_reap_timer);
+
+ if (rxnet->live)
+ rxrpc_queue_work(&rxnet->client_conn_reaper);
+}
+
+static void rxrpc_service_conn_reap_timeout(struct timer_list *timer)
+{
+ struct rxrpc_net *rxnet =
+ container_of(timer, struct rxrpc_net, service_conn_reap_timer);
+
+ if (rxnet->live)
+ rxrpc_queue_work(&rxnet->service_conn_reaper);
+}
+
/*
* Initialise a per-network namespace record.
*/
struct rxrpc_net *rxnet = rxrpc_net(net);
int ret;
+ rxnet->live = true;
get_random_bytes(&rxnet->epoch, sizeof(rxnet->epoch));
rxnet->epoch |= RXRPC_RANDOM_EPOCH;
INIT_LIST_HEAD(&rxnet->conn_proc_list);
INIT_LIST_HEAD(&rxnet->service_conns);
rwlock_init(&rxnet->conn_lock);
- INIT_DELAYED_WORK(&rxnet->service_conn_reaper,
- rxrpc_service_connection_reaper);
+ INIT_WORK(&rxnet->service_conn_reaper,
+ rxrpc_service_connection_reaper);
+ timer_setup(&rxnet->service_conn_reap_timer,
+ rxrpc_service_conn_reap_timeout, 0);
rxnet->nr_client_conns = 0;
rxnet->nr_active_client_conns = 0;
INIT_LIST_HEAD(&rxnet->waiting_client_conns);
INIT_LIST_HEAD(&rxnet->active_client_conns);
INIT_LIST_HEAD(&rxnet->idle_client_conns);
- INIT_DELAYED_WORK(&rxnet->client_conn_reaper,
- rxrpc_discard_expired_client_conns);
+ INIT_WORK(&rxnet->client_conn_reaper,
+ rxrpc_discard_expired_client_conns);
+ timer_setup(&rxnet->client_conn_reap_timer,
+ rxrpc_client_conn_reap_timeout, 0);
INIT_LIST_HEAD(&rxnet->local_endpoints);
mutex_init(&rxnet->local_mutex);
return 0;
err_proc:
+ rxnet->live = false;
return ret;
}
{
struct rxrpc_net *rxnet = rxrpc_net(net);
+ rxnet->live = false;
rxrpc_destroy_all_calls(rxnet);
rxrpc_destroy_all_connections(rxnet);
rxrpc_destroy_all_locals(rxnet);
__be32 abort_code;
};
+/*
+ * Arrange for a keepalive ping a certain time after we last transmitted. This
+ * lets the far side know we're still interested in this call and helps keep
+ * the route through any intervening firewall open.
+ *
+ * Receiving a response to the ping will prevent the ->expect_rx_by timer from
+ * expiring.
+ */
+static void rxrpc_set_keepalive(struct rxrpc_call *call)
+{
+ unsigned long now = jiffies, keepalive_at = call->next_rx_timo / 6;
+
+ keepalive_at += now;
+ WRITE_ONCE(call->keepalive_at, keepalive_at);
+ rxrpc_reduce_call_timer(call, keepalive_at, now,
+ rxrpc_timer_set_for_keepalive);
+}
+
/*
* Fill out an ACK packet.
*/
/*
* Send an ACK call packet.
*/
-int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping)
+int rxrpc_send_ack_packet(struct rxrpc_call *call, bool ping,
+ rxrpc_serial_t *_serial)
{
struct rxrpc_connection *conn = NULL;
struct rxrpc_ack_buffer *pkt;
ntohl(pkt->ack.firstPacket),
ntohl(pkt->ack.serial),
pkt->ack.reason, pkt->ack.nAcks);
+ if (_serial)
+ *_serial = serial;
if (ping) {
call->ping_serial = serial;
call->ackr_seen = top;
spin_unlock_bh(&call->lock);
}
+
+ rxrpc_set_keepalive(call);
}
out:
* ACKs if a DATA packet appears to have been lost.
*/
if (!(sp->hdr.flags & RXRPC_LAST_PACKET) &&
- (retrans ||
+ (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events) ||
+ retrans ||
call->cong_mode == RXRPC_CALL_SLOW_START ||
(call->peer->rtt_usage < 3 && sp->hdr.seq & 1) ||
ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000),
if (whdr.flags & RXRPC_REQUEST_ACK) {
call->peer->rtt_last_req = now;
trace_rxrpc_rtt_tx(call, rxrpc_rtt_tx_data, serial);
+ if (call->peer->rtt_usage > 1) {
+ unsigned long nowj = jiffies, ack_lost_at;
+
+ ack_lost_at = nsecs_to_jiffies(2 * call->peer->rtt);
+ if (ack_lost_at < 1)
+ ack_lost_at = 1;
+
+ ack_lost_at += nowj;
+ WRITE_ONCE(call->ack_lost_at, ack_lost_at);
+ rxrpc_reduce_call_timer(call, ack_lost_at, nowj,
+ rxrpc_timer_set_for_lost_ack);
+ }
}
}
+
+ rxrpc_set_keepalive(call);
+
_leave(" = %d [%u]", ret, call->peer->maxdata);
return ret;
trace_rxrpc_receive(call, rxrpc_receive_end, 0, call->rx_top);
ASSERTCMP(call->rx_hard_ack, ==, call->rx_top);
+#if 0 // TODO: May want to transmit final ACK under some circumstances anyway
if (call->state == RXRPC_CALL_CLIENT_RECV_REPLY) {
rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, 0, serial, true, false,
rxrpc_propose_ack_terminal_ack);
- rxrpc_send_ack_packet(call, false);
+ rxrpc_send_ack_packet(call, false, NULL);
}
+#endif
write_lock_bh(&call->state_lock);
case RXRPC_CALL_SERVER_RECV_REQUEST:
call->tx_phase = true;
call->state = RXRPC_CALL_SERVER_ACK_REQUEST;
- call->ack_at = call->expire_at;
+ call->expect_req_by = jiffies + MAX_JIFFY_OFFSET;
write_unlock_bh(&call->state_lock);
rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial, false, true,
rxrpc_propose_ack_processing_op);
after_eq(top, call->ackr_seen + 2) ||
(hard_ack == top && after(hard_ack, call->ackr_consumed)))
rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, 0, serial,
- true, false,
+ true, true,
rxrpc_propose_ack_rotate_rx);
- if (call->ackr_reason)
- rxrpc_send_ack_packet(call, false);
+ if (call->ackr_reason && call->ackr_reason != RXRPC_ACK_DELAY)
+ rxrpc_send_ack_packet(call, false, NULL);
}
}
#include <net/af_rxrpc.h>
#include "ar-internal.h"
-enum rxrpc_command {
- RXRPC_CMD_SEND_DATA, /* send data message */
- RXRPC_CMD_SEND_ABORT, /* request abort generation */
- RXRPC_CMD_ACCEPT, /* [server] accept incoming call */
- RXRPC_CMD_REJECT_BUSY, /* [server] reject a call as busy */
-};
-
-struct rxrpc_send_params {
- s64 tx_total_len; /* Total Tx data length (if send data) */
- unsigned long user_call_ID; /* User's call ID */
- u32 abort_code; /* Abort code to Tx (if abort) */
- enum rxrpc_command command : 8; /* The command to implement */
- bool exclusive; /* Shared or exclusive call */
- bool upgrade; /* If the connection is upgradeable */
-};
-
/*
* Wait for space to appear in the Tx queue or a signal to occur.
*/
rxrpc_notify_end_tx_t notify_end_tx)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
+ unsigned long now;
rxrpc_seq_t seq = sp->hdr.seq;
int ret, ix;
u8 annotation = RXRPC_TX_ANNO_UNACK;
break;
case RXRPC_CALL_SERVER_ACK_REQUEST:
call->state = RXRPC_CALL_SERVER_SEND_REPLY;
- call->ack_at = call->expire_at;
+ now = jiffies;
+ WRITE_ONCE(call->ack_at, now + MAX_JIFFY_OFFSET);
if (call->ackr_reason == RXRPC_ACK_DELAY)
call->ackr_reason = 0;
- __rxrpc_set_timer(call, rxrpc_timer_init_for_send_reply,
- ktime_get_real());
+ trace_rxrpc_timer(call, rxrpc_timer_init_for_send_reply, now);
if (!last)
break;
/* Fall through */
_debug("need instant resend %d", ret);
rxrpc_instant_resend(call, ix);
} else {
- ktime_t now = ktime_get_real(), resend_at;
-
- resend_at = ktime_add_ms(now, rxrpc_resend_timeout);
-
- if (ktime_before(resend_at, call->resend_at)) {
- call->resend_at = resend_at;
- rxrpc_set_timer(call, rxrpc_timer_set_for_send, now);
- }
+ unsigned long now = jiffies, resend_at;
+
+ if (call->peer->rtt_usage > 1)
+ resend_at = nsecs_to_jiffies(call->peer->rtt * 3 / 2);
+ else
+ resend_at = rxrpc_resend_timeout;
+ if (resend_at < 1)
+ resend_at = 1;
+
+ resend_at = now + rxrpc_resend_timeout;
+ WRITE_ONCE(call->resend_at, resend_at);
+ rxrpc_reduce_call_timer(call, resend_at, now,
+ rxrpc_timer_set_for_send);
}
rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
do {
/* Check to see if there's a ping ACK to reply to. */
if (call->ackr_reason == RXRPC_ACK_PING_RESPONSE)
- rxrpc_send_ack_packet(call, false);
+ rxrpc_send_ack_packet(call, false, NULL);
if (!skb) {
size_t size, chunk, max, space;
if (msg->msg_flags & MSG_CMSG_COMPAT) {
if (len != sizeof(u32))
return -EINVAL;
- p->user_call_ID = *(u32 *)CMSG_DATA(cmsg);
+ p->call.user_call_ID = *(u32 *)CMSG_DATA(cmsg);
} else {
if (len != sizeof(unsigned long))
return -EINVAL;
- p->user_call_ID = *(unsigned long *)
+ p->call.user_call_ID = *(unsigned long *)
CMSG_DATA(cmsg);
}
got_user_ID = true;
break;
case RXRPC_TX_LENGTH:
- if (p->tx_total_len != -1 || len != sizeof(__s64))
+ if (p->call.tx_total_len != -1 || len != sizeof(__s64))
+ return -EINVAL;
+ p->call.tx_total_len = *(__s64 *)CMSG_DATA(cmsg);
+ if (p->call.tx_total_len < 0)
return -EINVAL;
- p->tx_total_len = *(__s64 *)CMSG_DATA(cmsg);
- if (p->tx_total_len < 0)
+ break;
+
+ case RXRPC_SET_CALL_TIMEOUT:
+ if (len & 3 || len < 4 || len > 12)
return -EINVAL;
+ memcpy(&p->call.timeouts, CMSG_DATA(cmsg), len);
+ p->call.nr_timeouts = len / 4;
+ if (p->call.timeouts.hard > INT_MAX / HZ)
+ return -ERANGE;
+ if (p->call.nr_timeouts >= 2 && p->call.timeouts.idle > 60 * 60 * 1000)
+ return -ERANGE;
+ if (p->call.nr_timeouts >= 3 && p->call.timeouts.normal > 60 * 60 * 1000)
+ return -ERANGE;
break;
default:
if (!got_user_ID)
return -EINVAL;
- if (p->tx_total_len != -1 && p->command != RXRPC_CMD_SEND_DATA)
+ if (p->call.tx_total_len != -1 && p->command != RXRPC_CMD_SEND_DATA)
return -EINVAL;
_leave(" = 0");
return 0;
cp.exclusive = rx->exclusive | p->exclusive;
cp.upgrade = p->upgrade;
cp.service_id = srx->srx_service;
- call = rxrpc_new_client_call(rx, &cp, srx, p->user_call_ID,
- p->tx_total_len, GFP_KERNEL);
+ call = rxrpc_new_client_call(rx, &cp, srx, &p->call, GFP_KERNEL);
/* The socket is now unlocked */
_leave(" = %p\n", call);
{
enum rxrpc_call_state state;
struct rxrpc_call *call;
+ unsigned long now, j;
int ret;
struct rxrpc_send_params p = {
- .tx_total_len = -1,
- .user_call_ID = 0,
- .abort_code = 0,
- .command = RXRPC_CMD_SEND_DATA,
- .exclusive = false,
- .upgrade = true,
+ .call.tx_total_len = -1,
+ .call.user_call_ID = 0,
+ .call.nr_timeouts = 0,
+ .abort_code = 0,
+ .command = RXRPC_CMD_SEND_DATA,
+ .exclusive = false,
+ .upgrade = false,
};
_enter("");
ret = -EINVAL;
if (rx->sk.sk_state != RXRPC_SERVER_LISTENING)
goto error_release_sock;
- call = rxrpc_accept_call(rx, p.user_call_ID, NULL);
+ call = rxrpc_accept_call(rx, p.call.user_call_ID, NULL);
/* The socket is now unlocked. */
if (IS_ERR(call))
return PTR_ERR(call);
- rxrpc_put_call(call, rxrpc_call_put);
- return 0;
+ ret = 0;
+ goto out_put_unlock;
}
- call = rxrpc_find_call_by_user_ID(rx, p.user_call_ID);
+ call = rxrpc_find_call_by_user_ID(rx, p.call.user_call_ID);
if (!call) {
ret = -EBADSLT;
if (p.command != RXRPC_CMD_SEND_DATA)
goto error_put;
}
- if (p.tx_total_len != -1) {
+ if (p.call.tx_total_len != -1) {
ret = -EINVAL;
if (call->tx_total_len != -1 ||
call->tx_pending ||
call->tx_top != 0)
goto error_put;
- call->tx_total_len = p.tx_total_len;
+ call->tx_total_len = p.call.tx_total_len;
+ }
+ }
+
+ switch (p.call.nr_timeouts) {
+ case 3:
+ j = msecs_to_jiffies(p.call.timeouts.normal);
+ if (p.call.timeouts.normal > 0 && j == 0)
+ j = 1;
+ WRITE_ONCE(call->next_rx_timo, j);
+ /* Fall through */
+ case 2:
+ j = msecs_to_jiffies(p.call.timeouts.idle);
+ if (p.call.timeouts.idle > 0 && j == 0)
+ j = 1;
+ WRITE_ONCE(call->next_req_timo, j);
+ /* Fall through */
+ case 1:
+ if (p.call.timeouts.hard > 0) {
+ j = msecs_to_jiffies(p.call.timeouts.hard);
+ now = jiffies;
+ j += now;
+ WRITE_ONCE(call->expect_term_by, j);
+ rxrpc_reduce_call_timer(call, j, now,
+ rxrpc_timer_set_for_hard);
}
+ break;
}
state = READ_ONCE(call->state);
ret = rxrpc_send_data(rx, call, msg, len, NULL);
}
+out_put_unlock:
mutex_unlock(&call->user_mutex);
error_put:
rxrpc_put_call(call, rxrpc_call_put);
static const unsigned int thirtytwo = 32;
static const unsigned int n_65535 = 65535;
static const unsigned int n_max_acks = RXRPC_RXTX_BUFF_SIZE - 1;
+static const unsigned long one_jiffy = 1;
+static const unsigned long max_jiffies = MAX_JIFFY_OFFSET;
/*
* RxRPC operating parameters.
* information on the individual parameters.
*/
static struct ctl_table rxrpc_sysctl_table[] = {
- /* Values measured in milliseconds */
+ /* Values measured in milliseconds but used in jiffies */
{
.procname = "req_ack_delay",
.data = &rxrpc_requested_ack_delay,
- .maxlen = sizeof(unsigned int),
+ .maxlen = sizeof(unsigned long),
.mode = 0644,
- .proc_handler = proc_dointvec,
- .extra1 = (void *)&zero,
+ .proc_handler = proc_doulongvec_ms_jiffies_minmax,
+ .extra1 = (void *)&one_jiffy,
+ .extra2 = (void *)&max_jiffies,
},
{
.procname = "soft_ack_delay",
.data = &rxrpc_soft_ack_delay,
- .maxlen = sizeof(unsigned int),
+ .maxlen = sizeof(unsigned long),
.mode = 0644,
- .proc_handler = proc_dointvec,
- .extra1 = (void *)&one,
+ .proc_handler = proc_doulongvec_ms_jiffies_minmax,
+ .extra1 = (void *)&one_jiffy,
+ .extra2 = (void *)&max_jiffies,
},
{
.procname = "idle_ack_delay",
.data = &rxrpc_idle_ack_delay,
- .maxlen = sizeof(unsigned int),
+ .maxlen = sizeof(unsigned long),
.mode = 0644,
- .proc_handler = proc_dointvec,
- .extra1 = (void *)&one,
- },
- {
- .procname = "resend_timeout",
- .data = &rxrpc_resend_timeout,
- .maxlen = sizeof(unsigned int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- .extra1 = (void *)&one,
+ .proc_handler = proc_doulongvec_ms_jiffies_minmax,
+ .extra1 = (void *)&one_jiffy,
+ .extra2 = (void *)&max_jiffies,
},
{
.procname = "idle_conn_expiry",
.data = &rxrpc_conn_idle_client_expiry,
- .maxlen = sizeof(unsigned int),
+ .maxlen = sizeof(unsigned long),
.mode = 0644,
- .proc_handler = proc_dointvec_ms_jiffies,
- .extra1 = (void *)&one,
+ .proc_handler = proc_doulongvec_ms_jiffies_minmax,
+ .extra1 = (void *)&one_jiffy,
+ .extra2 = (void *)&max_jiffies,
},
{
.procname = "idle_conn_fast_expiry",
.data = &rxrpc_conn_idle_client_fast_expiry,
- .maxlen = sizeof(unsigned int),
+ .maxlen = sizeof(unsigned long),
.mode = 0644,
- .proc_handler = proc_dointvec_ms_jiffies,
- .extra1 = (void *)&one,
+ .proc_handler = proc_doulongvec_ms_jiffies_minmax,
+ .extra1 = (void *)&one_jiffy,
+ .extra2 = (void *)&max_jiffies,
},
-
- /* Values measured in seconds but used in jiffies */
{
- .procname = "max_call_lifetime",
- .data = &rxrpc_max_call_lifetime,
- .maxlen = sizeof(unsigned int),
+ .procname = "resend_timeout",
+ .data = &rxrpc_resend_timeout,
+ .maxlen = sizeof(unsigned long),
.mode = 0644,
- .proc_handler = proc_dointvec,
- .extra1 = (void *)&one,
+ .proc_handler = proc_doulongvec_ms_jiffies_minmax,
+ .extra1 = (void *)&one_jiffy,
+ .extra2 = (void *)&max_jiffies,
},
/* Non-time values */
const struct iphdr *iph;
u16 ul;
+ if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
+ return 1;
+
/*
* Support both UDP and UDPLITE checksum algorithms, Don't use
* udph->len to get the real length without any protocol check,
const struct ipv6hdr *ip6h;
u16 ul;
+ if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
+ return 1;
+
/*
* Support both UDP and UDPLITE checksum algorithms, Don't use
* udph->len to get the real length without any protocol check,
static void tcf_chain_flush(struct tcf_chain *chain)
{
- struct tcf_proto *tp;
+ struct tcf_proto *tp = rtnl_dereference(chain->filter_chain);
tcf_chain_head_change(chain, NULL);
- while ((tp = rtnl_dereference(chain->filter_chain)) != NULL) {
+ while (tp) {
RCU_INIT_POINTER(chain->filter_chain, tp->next);
- tcf_chain_put(chain);
tcf_proto_destroy(tp);
+ tp = rtnl_dereference(chain->filter_chain);
+ tcf_chain_put(chain);
}
}
struct tcf_chain *chain, *tmp;
rtnl_lock();
- /* Only chain 0 should be still here. */
+
+ /* At this point, all the chains should have refcnt == 1. */
list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
tcf_chain_put(chain);
rtnl_unlock();
}
/* XXX: Standalone actions are not allowed to jump to any chain, and bound
- * actions should be all removed after flushing. However, filters are now
- * destroyed in tc filter workqueue with RTNL lock, they can not race here.
+ * actions should be all removed after flushing.
*/
void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
struct tcf_block_ext_info *ei)
{
- struct tcf_chain *chain, *tmp;
+ struct tcf_chain *chain;
- list_for_each_entry_safe(chain, tmp, &block->chain_list, list)
+ /* Hold a refcnt for all chains, except 0, so that they don't disappear
+ * while we are iterating.
+ */
+ list_for_each_entry(chain, &block->chain_list, list)
+ if (chain->index)
+ tcf_chain_hold(chain);
+
+ list_for_each_entry(chain, &block->chain_list, list)
tcf_chain_flush(chain);
tcf_block_offload_unbind(block, q, ei);
return 0;
}
-static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
+static void cls_bpf_free_parms(struct cls_bpf_prog *prog)
{
- tcf_exts_destroy(&prog->exts);
- tcf_exts_put_net(&prog->exts);
-
if (cls_bpf_is_ebpf(prog))
bpf_prog_put(prog->filter);
else
kfree(prog->bpf_name);
kfree(prog->bpf_ops);
+}
+
+static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
+{
+ tcf_exts_destroy(&prog->exts);
+ tcf_exts_put_net(&prog->exts);
+
+ cls_bpf_free_parms(prog);
kfree(prog);
}
{
struct bpf_prog *fp;
char *name = NULL;
+ bool skip_sw;
u32 bpf_fd;
bpf_fd = nla_get_u32(tb[TCA_BPF_FD]);
+ skip_sw = gen_flags & TCA_CLS_FLAGS_SKIP_SW;
- if (gen_flags & TCA_CLS_FLAGS_SKIP_SW)
- fp = bpf_prog_get_type_dev(bpf_fd, BPF_PROG_TYPE_SCHED_CLS,
- qdisc_dev(tp->q));
- else
- fp = bpf_prog_get_type(bpf_fd, BPF_PROG_TYPE_SCHED_CLS);
+ fp = bpf_prog_get_type_dev(bpf_fd, BPF_PROG_TYPE_SCHED_CLS, skip_sw);
if (IS_ERR(fp))
return PTR_ERR(fp);
goto errout_idr;
ret = cls_bpf_offload(tp, prog, oldprog);
- if (ret) {
- if (!oldprog)
- idr_remove_ext(&head->handle_idr, prog->handle);
- __cls_bpf_delete_prog(prog);
- return ret;
- }
+ if (ret)
+ goto errout_parms;
if (!tc_in_hw(prog->gen_flags))
prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW;
*arg = prog;
return 0;
+errout_parms:
+ cls_bpf_free_parms(prog);
errout_idr:
if (!oldprog)
idr_remove_ext(&head->handle_idr, prog->handle);
if ((q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB])) == NULL)
return -EINVAL;
+ err = tcf_block_get(&q->link.block, &q->link.filter_list, sch);
+ if (err)
+ goto put_rtab;
+
err = qdisc_class_hash_init(&q->clhash);
if (err < 0)
- goto put_rtab;
+ goto put_block;
q->link.sibling = &q->link;
q->link.common.classid = sch->handle;
cbq_addprio(q, &q->link);
return 0;
+put_block:
+ tcf_block_put(q->link.block);
+
put_rtab:
qdisc_put_rtab(q->link.R_tab);
return err;
int i;
int err;
+ q->sch = sch;
timer_setup(&q->perturb_timer, sfq_perturbation, TIMER_DEFERRABLE);
err = tcf_block_get(&q->block, &q->filter_list, sch);
INIT_LIST_HEAD(&sctp_address_families);
sctp_v4_pf_init();
sctp_v6_pf_init();
+ sctp_sched_ops_init();
status = register_pernet_subsys(&sctp_defaults_ops);
if (status)
list_for_each_entry(chunk, &t->transmitted, transmitted_list)
cb(chunk);
- list_for_each_entry(chunk, &q->retransmit, list)
+ list_for_each_entry(chunk, &q->retransmit, transmitted_list)
cb(chunk);
- list_for_each_entry(chunk, &q->sacked, list)
+ list_for_each_entry(chunk, &q->sacked, transmitted_list)
cb(chunk);
- list_for_each_entry(chunk, &q->abandoned, list)
+ list_for_each_entry(chunk, &q->abandoned, transmitted_list)
cb(chunk);
list_for_each_entry(chunk, &q->out_chunk_list, list)
*/
/* Mark as failed send. */
- sctp_chunk_fail(ch, SCTP_ERROR_INV_STRM);
+ sctp_chunk_fail(ch, (__force __u32)SCTP_ERROR_INV_STRM);
if (asoc->peer.prsctp_capable &&
SCTP_PR_PRIO_ENABLED(ch->sinfo.sinfo_flags))
asoc->sent_cnt_removable--;
return retval;
}
+static bool sctp_stream_outq_is_empty(struct sctp_stream *stream,
+ __u16 str_nums, __be16 *str_list)
+{
+ struct sctp_association *asoc;
+ __u16 i;
+
+ asoc = container_of(stream, struct sctp_association, stream);
+ if (!asoc->outqueue.out_qlen)
+ return true;
+
+ if (!str_nums)
+ return false;
+
+ for (i = 0; i < str_nums; i++) {
+ __u16 sid = ntohs(str_list[i]);
+
+ if (stream->out[sid].ext &&
+ !list_empty(&stream->out[sid].ext->outq))
+ return false;
+ }
+
+ return true;
+}
+
int sctp_send_reset_streams(struct sctp_association *asoc,
struct sctp_reset_streams *params)
{
for (i = 0; i < str_nums; i++)
nstr_list[i] = htons(str_list[i]);
+ if (out && !sctp_stream_outq_is_empty(stream, str_nums, nstr_list)) {
+ retval = -EAGAIN;
+ goto out;
+ }
+
chunk = sctp_make_strreset_req(asoc, str_nums, nstr_list, out, in);
kfree(nstr_list);
if (asoc->strreset_outstanding)
return -EINPROGRESS;
+ if (!sctp_outq_is_empty(&asoc->outqueue))
+ return -EAGAIN;
+
chunk = sctp_make_strreset_tsnreq(asoc);
if (!chunk)
return -ENOMEM;
flags = SCTP_STREAM_RESET_INCOMING_SSN;
}
- nums = (ntohs(param.p->length) - sizeof(*outreq)) / 2;
+ nums = (ntohs(param.p->length) - sizeof(*outreq)) / sizeof(__u16);
if (nums) {
str_p = outreq->list_of_streams;
for (i = 0; i < nums; i++) {
goto out;
}
- nums = (ntohs(param.p->length) - sizeof(*inreq)) / 2;
+ nums = (ntohs(param.p->length) - sizeof(*inreq)) / sizeof(__u16);
str_p = inreq->list_of_streams;
for (i = 0; i < nums; i++) {
if (ntohs(str_p[i]) >= stream->outcnt) {
}
}
+ if (!sctp_stream_outq_is_empty(stream, nums, str_p)) {
+ result = SCTP_STRRESET_IN_PROGRESS;
+ asoc->strreset_inseq--;
+ goto err;
+ }
+
chunk = sctp_make_strreset_req(asoc, nums, str_p, 1, 0);
if (!chunk)
goto out;
i = asoc->strreset_inseq - request_seq - 1;
result = asoc->strreset_result[i];
if (result == SCTP_STRRESET_PERFORMED) {
- next_tsn = asoc->next_tsn;
+ next_tsn = asoc->ctsn_ack_point + 1;
init_tsn =
sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + 1;
}
goto err;
}
+
+ if (!sctp_outq_is_empty(&asoc->outqueue)) {
+ result = SCTP_STRRESET_IN_PROGRESS;
+ goto err;
+ }
+
asoc->strreset_inseq++;
if (!(asoc->strreset_enable & SCTP_ENABLE_RESET_ASSOC_REQ))
goto out;
}
- /* G3: The same processing as though a SACK chunk with no gap report
- * and a cumulative TSN ACK of the Sender's Next TSN minus 1 were
- * received MUST be performed.
+ /* G4: The same processing as though a FWD-TSN chunk (as defined in
+ * [RFC3758]) with all streams affected and a new cumulative TSN
+ * ACK of the Receiver's Next TSN minus 1 were received MUST be
+ * performed.
*/
max_tsn_seen = sctp_tsnmap_get_max_tsn_seen(&asoc->peer.tsn_map);
sctp_ulpq_reasm_flushtsn(&asoc->ulpq, max_tsn_seen);
sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
init_tsn, GFP_ATOMIC);
- /* G4: The same processing as though a FWD-TSN chunk (as defined in
- * [RFC3758]) with all streams affected and a new cumulative TSN
- * ACK of the Receiver's Next TSN minus 1 were received MUST be
- * performed.
+ /* G3: The same processing as though a SACK chunk with no gap report
+ * and a cumulative TSN ACK of the Sender's Next TSN minus 1 were
+ * received MUST be performed.
*/
sctp_outq_free(&asoc->outqueue);
outreq = (struct sctp_strreset_outreq *)req;
str_p = outreq->list_of_streams;
- nums = (ntohs(outreq->param_hdr.length) - sizeof(*outreq)) / 2;
+ nums = (ntohs(outreq->param_hdr.length) - sizeof(*outreq)) /
+ sizeof(__u16);
if (result == SCTP_STRRESET_PERFORMED) {
if (nums) {
inreq = (struct sctp_strreset_inreq *)req;
str_p = inreq->list_of_streams;
- nums = (ntohs(inreq->param_hdr.length) - sizeof(*inreq)) / 2;
+ nums = (ntohs(inreq->param_hdr.length) - sizeof(*inreq)) /
+ sizeof(__u16);
*evp = sctp_ulpevent_make_stream_reset_event(asoc, flags,
nums, str_p, GFP_ATOMIC);
if (result == SCTP_STRRESET_PERFORMED) {
__u32 mtsn = sctp_tsnmap_get_max_tsn_seen(
&asoc->peer.tsn_map);
+ LIST_HEAD(temp);
sctp_ulpq_reasm_flushtsn(&asoc->ulpq, mtsn);
sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC);
SCTP_TSN_MAP_INITIAL,
stsn, GFP_ATOMIC);
+ /* Clean up sacked and abandoned queues only. As the
+ * out_chunk_list may not be empty, splice it to temp,
+ * then get it back after sctp_outq_free is done.
+ */
+ list_splice_init(&asoc->outqueue.out_chunk_list, &temp);
sctp_outq_free(&asoc->outqueue);
+ list_splice_init(&temp, &asoc->outqueue.out_chunk_list);
asoc->next_tsn = rtsn;
asoc->ctsn_ack_point = asoc->next_tsn - 1;
.unsched_all = sctp_sched_fcfs_unsched_all,
};
+static void sctp_sched_ops_fcfs_init(void)
+{
+ sctp_sched_ops_register(SCTP_SS_FCFS, &sctp_sched_fcfs);
+}
+
/* API to other parts of the stack */
-extern struct sctp_sched_ops sctp_sched_prio;
-extern struct sctp_sched_ops sctp_sched_rr;
+static struct sctp_sched_ops *sctp_sched_ops[SCTP_SS_MAX + 1];
-static struct sctp_sched_ops *sctp_sched_ops[] = {
- &sctp_sched_fcfs,
- &sctp_sched_prio,
- &sctp_sched_rr,
-};
+void sctp_sched_ops_register(enum sctp_sched_type sched,
+ struct sctp_sched_ops *sched_ops)
+{
+ sctp_sched_ops[sched] = sched_ops;
+}
+
+void sctp_sched_ops_init(void)
+{
+ sctp_sched_ops_fcfs_init();
+ sctp_sched_ops_prio_init();
+ sctp_sched_ops_rr_init();
+}
int sctp_sched_set_sched(struct sctp_association *asoc,
enum sctp_sched_type sched)
sctp_sched_prio_unsched(soute);
}
-struct sctp_sched_ops sctp_sched_prio = {
+static struct sctp_sched_ops sctp_sched_prio = {
.set = sctp_sched_prio_set,
.get = sctp_sched_prio_get,
.init = sctp_sched_prio_init,
.sched_all = sctp_sched_prio_sched_all,
.unsched_all = sctp_sched_prio_unsched_all,
};
+
+void sctp_sched_ops_prio_init(void)
+{
+ sctp_sched_ops_register(SCTP_SS_PRIO, &sctp_sched_prio);
+}
sctp_sched_rr_unsched(stream, soute);
}
-struct sctp_sched_ops sctp_sched_rr = {
+static struct sctp_sched_ops sctp_sched_rr = {
.set = sctp_sched_rr_set,
.get = sctp_sched_rr_get,
.init = sctp_sched_rr_init,
.sched_all = sctp_sched_rr_sched_all,
.unsched_all = sctp_sched_rr_unsched_all,
};
+
+void sctp_sched_ops_rr_init(void)
+{
+ sctp_sched_ops_register(SCTP_SS_RR, &sctp_sched_rr);
+}
{
struct smc_connection *conn = &smc->conn;
struct smc_link_group *lgr = conn->lgr;
- struct smc_buf_desc *buf_desc = NULL;
+ struct smc_buf_desc *buf_desc = ERR_PTR(-ENOMEM);
struct list_head *buf_list;
int bufsize, bufsize_short;
int sk_buf_size;
/* use socket send buffer size (w/o overhead) as start value */
sk_buf_size = smc->sk.sk_sndbuf / 2;
- for (bufsize_short = smc_compress_bufsize(smc->sk.sk_sndbuf / 2);
+ for (bufsize_short = smc_compress_bufsize(sk_buf_size);
bufsize_short >= 0; bufsize_short--) {
if (is_rmb) {
return status;
}
-static struct cache_detail rsi_cache_template = {
+static const struct cache_detail rsi_cache_template = {
.owner = THIS_MODULE,
.hash_size = RSI_HASHMAX,
.name = "auth.rpcsec.init",
return status;
}
-static struct cache_detail rsc_cache_template = {
+static const struct cache_detail rsc_cache_template = {
.owner = THIS_MODULE,
.hash_size = RSC_HASHMAX,
.name = "auth.rpcsec.context",
}
EXPORT_SYMBOL_GPL(cache_unregister_net);
-struct cache_detail *cache_create_net(struct cache_detail *tmpl, struct net *net)
+struct cache_detail *cache_create_net(const struct cache_detail *tmpl, struct net *net)
{
struct cache_detail *cd;
int i;
serv->sv_tmpcnt++;
if (serv->sv_temptimer.function == NULL) {
/* setup timer to age temp transports */
- serv->sv_temptimer.function = (TIMER_FUNC_TYPE)svc_age_temp_xprts;
+ serv->sv_temptimer.function = svc_age_temp_xprts;
mod_timer(&serv->sv_temptimer,
jiffies + svc_conn_age_period * HZ);
}
return 0;
}
-static struct cache_detail unix_gid_cache_template = {
+static const struct cache_detail unix_gid_cache_template = {
.owner = THIS_MODULE,
.hash_size = GID_HASHMAX,
.name = "auth.unix.gid",
.set_client = svcauth_unix_set_client,
};
-static struct cache_detail ip_map_cache_template = {
+static const struct cache_detail ip_map_cache_template = {
.owner = THIS_MODULE,
.hash_size = IP_HASHMAX,
.name = "auth.unix.ip",
while ((skb = skb_peek(defq))) {
hdr = buf_msg(skb);
mtyp = msg_type(hdr);
+ blks = msg_blocks(hdr);
deliver = true;
ack = false;
update = false;
if (!update)
continue;
- blks = msg_blocks(hdr);
tipc_group_update_rcv_win(grp, blks, node, port, xmitq);
}
return;
/* We should not be sending anymore since the peer won't be
* there to receive, but we can still receive if there is data
- * left in our consume queue.
+ * left in our consume queue. If the local endpoint is a host,
+ * we can't call vsock_stream_has_data, since that may block,
+ * but a host endpoint can't read data once the VM has
+ * detached, so there is no available data in that case.
*/
- if (vsock_stream_has_data(vsk) <= 0) {
- sk->sk_state = TCP_CLOSE;
-
+ if (vsk->local_addr.svm_cid == VMADDR_CID_HOST ||
+ vsock_stream_has_data(vsk) <= 0) {
if (sk->sk_state == TCP_SYN_SENT) {
/* The peer may detach from a queue pair while
* we are still in the connecting state, i.e.,
* event like a reset.
*/
+ sk->sk_state = TCP_CLOSE;
sk->sk_err = ECONNRESET;
sk->sk_error_report(sk);
return;
}
+ sk->sk_state = TCP_CLOSE;
}
sk->sk_state_change(sk);
}
MODULE_AUTHOR("VMware, Inc.");
MODULE_DESCRIPTION("VMCI transport for Virtual Sockets");
-MODULE_VERSION("1.0.4.0-k");
+MODULE_VERSION("1.0.5.0-k");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("vmware_vsock");
MODULE_ALIAS_NETPROTO(PF_VSOCK);
tristate "cfg80211 - wireless configuration API"
depends on RFKILL || !RFKILL
select FW_LOADER
+ # may need to update this when certificates are changed and are
+ # using a different algorithm, though right now they shouldn't
+ # (this is here rather than below to allow it to be a module)
+ select CRYPTO_SHA256 if CFG80211_USE_KERNEL_REGDB_KEYS
---help---
cfg80211 is the Linux wireless LAN (802.11) configuration API.
Enable this if you have a wireless device.
certificates like in the kernel sources (net/wireless/certs/)
that shall be accepted for a signed regulatory database.
+ Note that you need to also select the correct CRYPTO_<hash> modules
+ for your certificates, and if cfg80211 is built-in they also must be.
+
config CFG80211_REG_CELLULAR_HINTS
bool "cfg80211 regulatory support for cellular base station hints"
depends on CFG80211_CERTIFICATION_ONUS
static void lib80211_crypt_deinit_entries(struct lib80211_crypt_info *info,
int force);
static void lib80211_crypt_quiescing(struct lib80211_crypt_info *info);
-static void lib80211_crypt_deinit_handler(unsigned long data);
+static void lib80211_crypt_deinit_handler(struct timer_list *t);
int lib80211_crypt_info_init(struct lib80211_crypt_info *info, char *name,
spinlock_t *lock)
info->lock = lock;
INIT_LIST_HEAD(&info->crypt_deinit_list);
- setup_timer(&info->crypt_deinit_timer, lib80211_crypt_deinit_handler,
- (unsigned long)info);
+ timer_setup(&info->crypt_deinit_timer, lib80211_crypt_deinit_handler,
+ 0);
return 0;
}
spin_unlock_irqrestore(info->lock, flags);
}
-static void lib80211_crypt_deinit_handler(unsigned long data)
+static void lib80211_crypt_deinit_handler(struct timer_list *t)
{
- struct lib80211_crypt_info *info = (struct lib80211_crypt_info *)data;
+ struct lib80211_crypt_info *info = from_timer(info, t,
+ crypt_deinit_timer);
unsigned long flags;
lib80211_crypt_deinit_entries(info, 0);
if (sk_has_allocations(sk)) {
/* Defer: outstanding buffers */
sk->sk_timer.expires = jiffies + 10 * HZ;
- sk->sk_timer.function = (TIMER_FUNC_TYPE)x25_destroy_timer;
+ sk->sk_timer.function = x25_destroy_timer;
add_timer(&sk->sk_timer);
} else {
/* drop last reference so sock_put will free */
LIST_HEAD(x25_neigh_list);
DEFINE_RWLOCK(x25_neigh_list_lock);
-static void x25_t20timer_expiry(unsigned long);
+static void x25_t20timer_expiry(struct timer_list *);
static void x25_transmit_restart_confirmation(struct x25_neigh *nb);
static void x25_transmit_restart_request(struct x25_neigh *nb);
mod_timer(&nb->t20timer, jiffies + nb->t20);
}
-static void x25_t20timer_expiry(unsigned long param)
+static void x25_t20timer_expiry(struct timer_list *t)
{
- struct x25_neigh *nb = (struct x25_neigh *)param;
+ struct x25_neigh *nb = from_timer(nb, t, t20timer);
x25_transmit_restart_request(nb);
return;
skb_queue_head_init(&nb->queue);
- setup_timer(&nb->t20timer, x25_t20timer_expiry, (unsigned long)nb);
+ timer_setup(&nb->t20timer, x25_t20timer_expiry, 0);
dev_hold(dev);
nb->dev = dev;
timer_setup(&x25->timer, x25_timer_expiry, 0);
/* initialized by sock_init_data */
- sk->sk_timer.function = (TIMER_FUNC_TYPE)x25_heartbeat_expiry;
+ sk->sk_timer.function = x25_heartbeat_expiry;
}
void x25_start_heartbeat(struct sock *sk)
return HRTIMER_NORESTART;
}
-static void xfrm_replay_timer_handler(unsigned long data);
+static void xfrm_replay_timer_handler(struct timer_list *t);
struct xfrm_state *xfrm_state_alloc(struct net *net)
{
INIT_HLIST_NODE(&x->byspi);
tasklet_hrtimer_init(&x->mtimer, xfrm_timer_handler,
CLOCK_BOOTTIME, HRTIMER_MODE_ABS);
- setup_timer(&x->rtimer, xfrm_replay_timer_handler,
- (unsigned long)x);
+ timer_setup(&x->rtimer, xfrm_replay_timer_handler, 0);
x->curlft.add_time = get_seconds();
x->lft.soft_byte_limit = XFRM_INF;
x->lft.soft_packet_limit = XFRM_INF;
}
EXPORT_SYMBOL(xfrm_state_walk_done);
-static void xfrm_replay_timer_handler(unsigned long data)
+static void xfrm_replay_timer_handler(struct timer_list *t)
{
- struct xfrm_state *x = (struct xfrm_state *)data;
+ struct xfrm_state *x = from_timer(x, t, rtimer);
spin_lock(&x->lock);
# SPDX-License-Identifier: GPL-2.0
-# kbuild trick to avoid linker error. Can be omitted if a module is built.
-obj- := dummy.o
-
# List of programs to build
hostprogs-y := test_lru_dist
hostprogs-y += sock_example
# SPDX-License-Identifier: GPL-2.0
-# kbuild trick to avoid linker error. Can be omitted if a module is built.
-obj- := dummy.o
-
# List of programs to build
hostprogs-y := hid-example
# SPDX-License-Identifier: GPL-2.0
-# kbuild trick to avoid linker error. Can be omitted if a module is built.
-obj- := dummy.o
-
hostprogs-$(CONFIG_SAMPLE_SECCOMP) := bpf-fancy dropper bpf-direct
HOSTCFLAGS_bpf-fancy.o += -I$(objtree)/usr/include
-# kbuild trick to avoid linker error. Can be omitted if a module is built.
-obj- := dummy.o
-
# List of programs to build
hostprogs-y := sockmap
-# kbuild trick to avoid linker error. Can be omitted if a module is built.
-obj- := dummy.o
-
# List of programs to build
hostprogs-$(CONFIG_SAMPLE_STATX) := test-statx
-# kbuild trick to avoid linker error. Can be omitted if a module is built.
-obj- := dummy.o
-
# List of programs to build
hostprogs-y := uhid-example
obj-y += $(obj)/lib-ksyms.o
endif
-ifneq ($(strip $(obj-y) $(obj-m) $(obj-) $(subdir-m) $(lib-target)),)
+ifneq ($(strip $(obj-y) $(need-builtin)),)
builtin-target := $(obj)/built-in.o
endif
endif
endif
+ifneq ($(KBUILD_ENABLE_EXTRA_GCC_CHECKS),)
+ cmd_checkdoc = $(srctree)/scripts/kernel-doc -none $< ;
+endif
+
# Do section mismatch analysis for each module/built-in.o
ifdef CONFIG_DEBUG_SECTION_MISMATCH
cmd_secanalysis = ; scripts/mod/modpost $@
$(call echo-cmd,checksrc) $(cmd_checksrc) \
$(call cmd_and_fixdep,cc_o_c) \
$(cmd_modversions_c) \
+ $(cmd_checkdoc) \
$(call echo-cmd,objtool) $(cmd_objtool) \
$(call echo-cmd,record_mcount) $(cmd_record_mcount)
endef
PHONY += $(subdir-ym)
$(subdir-ym):
- $(Q)$(MAKE) $(build)=$@
+ $(Q)$(MAKE) $(build)=$@ need-builtin=$(if $(findstring $@,$(subdir-obj-y)),1)
# Add FORCE to the prequisites of a target to force it to be always rebuilt.
# ---------------------------------------------------------------------------
subdir-obj-y := $(filter %/built-in.o, $(obj-y))
# Replace multi-part objects by their individual parts, look at local dir only
-real-objs-y := $(foreach m, $(filter-out $(subdir-obj-y), $(obj-y)), $(if $(strip $($(m:.o=-objs)) $($(m:.o=-y))),$($(m:.o=-objs)) $($(m:.o=-y)),$(m))) $(extra-y)
+real-objs-y := $(foreach m, $(filter-out $(subdir-obj-y), $(obj-y)), $(if $(strip $($(m:.o=-objs)) $($(m:.o=-y))),$($(m:.o=-objs)) $($(m:.o=-y)),$(m)))
real-objs-m := $(foreach m, $(obj-m), $(if $(strip $($(m:.o=-objs)) $($(m:.o=-y)) $($(m:.o=-m))),$($(m:.o=-objs)) $($(m:.o=-y)) $($(m:.o=-m)),$(m)))
# DTB
for d, n in delta:
if d: print("%-40s %7s %7s %+7d" % (n, old.get(n,"-"), new.get(n,"-"), d))
- print("Total: Before=%d, After=%d, chg %+.2f%%" % \
- (otot, ntot, (ntot - otot)*100.0/otot))
+ if otot:
+ percent = (ntot - otot) * 100.0 / otot
+ else:
+ percent = 0
+ print("Total: Before=%d, After=%d, chg %+.2f%%" % (otot, ntot, percent))
if sys.argv[1] == "-c":
print_result("Function", "tT", 3)
for (my $count = $linenr; $count <= $lc; $count++) {
my $fmt = get_quoted_string($lines[$count - 1], raw_line($count, 0));
$fmt =~ s/%%//g;
- if ($fmt =~ /(\%[\*\d\.]*p(?![\WFfSsBKRraEhMmIiUDdgVCbGNO]).)/) {
+ if ($fmt =~ /(\%[\*\d\.]*p(?![\WFfSsBKRraEhMmIiUDdgVCbGNOx]).)/) {
$bad_extension = $1;
last;
}
VERBOSE=0
fi
-if [ -z "$J" ]; then
- NPROC=$(getconf _NPROCESSORS_ONLN)
-else
- NPROC="$J"
-fi
-
FLAGS="--very-quiet"
# You can use SPFLAGS to append extra arguments to coccicheck or override any
# Take only the last argument, which is the C file to test
shift $(( $# - 1 ))
OPTIONS="$COCCIINCLUDE $1"
+
+ # No need to parallelize Coccinelle since this mode takes one input file.
+ NPROC=1
else
ONLINE=0
if [ "$KBUILD_EXTMOD" = "" ] ; then
else
OPTIONS="--dir $KBUILD_EXTMOD $COCCIINCLUDE"
fi
+
+ if [ -z "$J" ]; then
+ NPROC=$(getconf _NPROCESSORS_ONLN)
+ else
+ NPROC="$J"
+ fi
fi
if [ "$KBUILD_EXTMOD" != "" ] ; then
+++ /dev/null
-/// Use setup_timer function instead of initializing timer with the function
-/// and data fields
-// Confidence: High
-// Copyright: (C) 2016 Vaishali Thakkar, Oracle. GPLv2
-// Copyright: (C) 2017 Kees Cook, Google. GPLv2
-// Options: --no-includes --include-headers
-// Keywords: init_timer, setup_timer
-
-virtual patch
-virtual context
-virtual org
-virtual report
-
-// Match the common cases first to avoid Coccinelle parsing loops with
-// "... when" clauses.
-
-@match_immediate_function_data_after_init_timer
-depends on patch && !context && !org && !report@
-expression e, func, da;
-@@
-
--init_timer
-+setup_timer
- ( \(&e\|e\)
-+, func, da
- );
-(
--\(e.function\|e->function\) = func;
--\(e.data\|e->data\) = da;
-|
--\(e.data\|e->data\) = da;
--\(e.function\|e->function\) = func;
-)
-
-@match_immediate_function_data_before_init_timer
-depends on patch && !context && !org && !report@
-expression e, func, da;
-@@
-
-(
--\(e.function\|e->function\) = func;
--\(e.data\|e->data\) = da;
-|
--\(e.data\|e->data\) = da;
--\(e.function\|e->function\) = func;
-)
--init_timer
-+setup_timer
- ( \(&e\|e\)
-+, func, da
- );
-
-@match_function_and_data_after_init_timer
-depends on patch && !context && !org && !report@
-expression e, e2, e3, e4, e5, func, da;
-@@
-
--init_timer
-+setup_timer
- ( \(&e\|e\)
-+, func, da
- );
- ... when != func = e2
- when != da = e3
-(
--e.function = func;
-... when != da = e4
--e.data = da;
-|
--e->function = func;
-... when != da = e4
--e->data = da;
-|
--e.data = da;
-... when != func = e5
--e.function = func;
-|
--e->data = da;
-... when != func = e5
--e->function = func;
-)
-
-@match_function_and_data_before_init_timer
-depends on patch && !context && !org && !report@
-expression e, e2, e3, e4, e5, func, da;
-@@
-(
--e.function = func;
-... when != da = e4
--e.data = da;
-|
--e->function = func;
-... when != da = e4
--e->data = da;
-|
--e.data = da;
-... when != func = e5
--e.function = func;
-|
--e->data = da;
-... when != func = e5
--e->function = func;
-)
-... when != func = e2
- when != da = e3
--init_timer
-+setup_timer
- ( \(&e\|e\)
-+, func, da
- );
-
-@r1 exists@
-expression t;
-identifier f;
-position p;
-@@
-
-f(...) { ... when any
- init_timer@p(\(&t\|t\))
- ... when any
-}
-
-@r2 exists@
-expression r1.t;
-identifier g != r1.f;
-expression e8;
-@@
-
-g(...) { ... when any
- \(t.data\|t->data\) = e8
- ... when any
-}
-
-// It is dangerous to use setup_timer if data field is initialized
-// in another function.
-
-@script:python depends on r2@
-p << r1.p;
-@@
-
-cocci.include_match(False)
-
-@r3 depends on patch && !context && !org && !report@
-expression r1.t, func, e7;
-position r1.p;
-@@
-
-(
--init_timer@p(&t);
-+setup_timer(&t, func, 0UL);
-... when != func = e7
--t.function = func;
-|
--t.function = func;
-... when != func = e7
--init_timer@p(&t);
-+setup_timer(&t, func, 0UL);
-|
--init_timer@p(t);
-+setup_timer(t, func, 0UL);
-... when != func = e7
--t->function = func;
-|
--t->function = func;
-... when != func = e7
--init_timer@p(t);
-+setup_timer(t, func, 0UL);
-)
-
-// ----------------------------------------------------------------------------
-
-@match_immediate_function_data_after_init_timer_context
-depends on !patch && (context || org || report)@
-expression da, e, func;
-position j0, j1, j2;
-@@
-
-* init_timer@j0 (&e);
-(
-* e@j1.function = func;
-* e@j2.data = da;
-|
-* e@j1.data = da;
-* e@j2.function = func;
-)
-
-@match_function_and_data_after_init_timer_context
-depends on !patch && (context || org || report)@
-expression a, b, e1, e2, e3, e4, e5;
-position j0 != match_immediate_function_data_after_init_timer_context.j0,j1,j2;
-@@
-
-* init_timer@j0 (&e1);
-... when != a = e2
- when != b = e3
-(
-* e1@j1.function = a;
-... when != b = e4
-* e1@j2.data = b;
-|
-* e1@j1.data = b;
-... when != a = e5
-* e1@j2.function = a;
-)
-
-@r3_context depends on !patch && (context || org || report)@
-expression c, e6, e7;
-position r1.p;
-position j0 !=
- {match_immediate_function_data_after_init_timer_context.j0,
- match_function_and_data_after_init_timer_context.j0}, j1;
-@@
-
-* init_timer@j0@p (&e6);
-... when != c = e7
-* e6@j1.function = c;
-
-// ----------------------------------------------------------------------------
-
-@script:python match_immediate_function_data_after_init_timer_org
-depends on org@
-j0 << match_immediate_function_data_after_init_timer_context.j0;
-j1 << match_immediate_function_data_after_init_timer_context.j1;
-j2 << match_immediate_function_data_after_init_timer_context.j2;
-@@
-
-msg = "Use setup_timer function."
-coccilib.org.print_todo(j0[0], msg)
-coccilib.org.print_link(j1[0], "")
-coccilib.org.print_link(j2[0], "")
-
-@script:python match_function_and_data_after_init_timer_org depends on org@
-j0 << match_function_and_data_after_init_timer_context.j0;
-j1 << match_function_and_data_after_init_timer_context.j1;
-j2 << match_function_and_data_after_init_timer_context.j2;
-@@
-
-msg = "Use setup_timer function."
-coccilib.org.print_todo(j0[0], msg)
-coccilib.org.print_link(j1[0], "")
-coccilib.org.print_link(j2[0], "")
-
-@script:python r3_org depends on org@
-j0 << r3_context.j0;
-j1 << r3_context.j1;
-@@
-
-msg = "Use setup_timer function."
-coccilib.org.print_todo(j0[0], msg)
-coccilib.org.print_link(j1[0], "")
-
-// ----------------------------------------------------------------------------
-
-@script:python match_immediate_function_data_after_init_timer_report
-depends on report@
-j0 << match_immediate_function_data_after_init_timer_context.j0;
-j1 << match_immediate_function_data_after_init_timer_context.j1;
-@@
-
-msg = "Use setup_timer function for function on line %s." % (j1[0].line)
-coccilib.report.print_report(j0[0], msg)
-
-@script:python match_function_and_data_after_init_timer_report depends on report@
-j0 << match_function_and_data_after_init_timer_context.j0;
-j1 << match_function_and_data_after_init_timer_context.j1;
-@@
-
-msg = "Use setup_timer function for function on line %s." % (j1[0].line)
-coccilib.report.print_report(j0[0], msg)
-
-@script:python r3_report depends on report@
-j0 << r3_context.j0;
-j1 << r3_context.j1;
-@@
-
-msg = "Use setup_timer function for function on line %s." % (j1[0].line)
-coccilib.report.print_report(j0[0], msg)
set -o errexit
set -o nounset
+READELF="${CROSS_COMPILE}readelf"
+ADDR2LINE="${CROSS_COMPILE}addr2line"
+SIZE="${CROSS_COMPILE}size"
+NM="${CROSS_COMPILE}nm"
+
command -v awk >/dev/null 2>&1 || die "awk isn't installed"
-command -v readelf >/dev/null 2>&1 || die "readelf isn't installed"
-command -v addr2line >/dev/null 2>&1 || die "addr2line isn't installed"
+command -v ${READELF} >/dev/null 2>&1 || die "readelf isn't installed"
+command -v ${ADDR2LINE} >/dev/null 2>&1 || die "addr2line isn't installed"
+command -v ${SIZE} >/dev/null 2>&1 || die "size isn't installed"
+command -v ${NM} >/dev/null 2>&1 || die "nm isn't installed"
usage() {
echo "usage: faddr2line <object file> <func+offset> <func+offset>..." >&2
find_dir_prefix() {
local objfile=$1
- local start_kernel_addr=$(readelf -sW $objfile | awk '$8 == "start_kernel" {printf "0x%s", $2}')
+ local start_kernel_addr=$(${READELF} -sW $objfile | awk '$8 == "start_kernel" {printf "0x%s", $2}')
[[ -z $start_kernel_addr ]] && return
- local file_line=$(addr2line -e $objfile $start_kernel_addr)
+ local file_line=$(${ADDR2LINE} -e $objfile $start_kernel_addr)
[[ -z $file_line ]] && return
local prefix=${file_line%init/main.c:*}
# Go through each of the object's symbols which match the func name.
# In rare cases there might be duplicates.
- file_end=$(size -Ax $objfile | awk '$1 == ".text" {print $2}')
+ file_end=$(${SIZE} -Ax $objfile | awk '$1 == ".text" {print $2}')
while read symbol; do
local fields=($symbol)
local sym_base=0x${fields[0]}
# pass real address to addr2line
echo "$func+$offset/$sym_size:"
- addr2line -fpie $objfile $addr | sed "s; $dir_prefix\(\./\)*; ;"
+ ${ADDR2LINE} -fpie $objfile $addr | sed "s; $dir_prefix\(\./\)*; ;"
DONE=1
- done < <(nm -n $objfile | awk -v fn=$func -v end=$file_end '$3 == fn { found=1; line=$0; start=$1; next } found == 1 { found=0; print line, "0x"$1 } END {if (found == 1) print line, end; }')
+ done < <(${NM} -n $objfile | awk -v fn=$func -v end=$file_end '$3 == fn { found=1; line=$0; start=$1; next } found == 1 { found=0; print line, "0x"$1 } END {if (found == 1) print line, end; }')
}
[[ $# -lt 2 ]] && usage
}
if (sym_match_arr) {
qsort(sym_match_arr, cnt, sizeof(struct sym_match), sym_rel_comp);
- sym_arr = malloc((cnt+1) * sizeof(struct symbol));
+ sym_arr = malloc((cnt+1) * sizeof(struct symbol *));
if (!sym_arr)
goto sym_re_search_free;
for (i = 0; i < cnt; i++)
-man Output troff manual page format. This is the default.
-rst Output reStructuredText format.
-text Output plain text format.
+ -none Do not output documentation, only warnings.
Output selection (mutually exclusive):
-export Only output documentation for symbols that have been
$output_mode = "gnome";
@highlights = @highlights_gnome;
$blankline = $blankline_gnome;
+ } elsif ($cmd eq "-none") {
+ $output_mode = "none";
} elsif ($cmd eq "-module") { # not needed for XML, inherits from calling document
$modulename = shift @ARGV;
} elsif ($cmd eq "-function") { # to only output specific functions
}
}
+
+## none mode output functions
+
+sub output_function_none(%) {
+}
+
+sub output_enum_none(%) {
+}
+
+sub output_typedef_none(%) {
+}
+
+sub output_struct_none(%) {
+}
+
+sub output_blockhead_none(%) {
+}
+
##
# generic output function for all types (function, struct/union, typedef, enum);
# calls the generated, variable output_ function name based on
}
}
if ($initial_section_counter == $section_counter) {
- print STDERR "${file}:1: warning: no structured comments found\n";
+ if ($output_mode ne "none") {
+ print STDERR "${file}:1: warning: no structured comments found\n";
+ }
if (($output_selection == OUTPUT_INCLUDE) && ($show_not_found == 1)) {
print STDERR " Was looking for '$_'.\n" for keys %function_table;
}
false; \
fi ; \
$(srctree)/scripts/setlocalversion --save-scmversion; \
-ln -sf $(srctree) $(2); \
tar -cz $(RCS_TAR_IGNORE) -f $(2).tar.gz \
- $(addprefix $(2)/,$(TAR_CONTENT) $(3)); \
-rm -f $(2) $(objtree)/.scmversion
+ --transform 's:^:$(2)/:S' $(TAR_CONTENT) $(3); \
+rm -f $(objtree)/.scmversion
# rpm-pkg
# ---------------------------------------------------------------------------
-rpm-pkg rpm: FORCE
+rpm-pkg: FORCE
$(MAKE) clean
$(CONFIG_SHELL) $(MKSPEC) >$(objtree)/kernel.spec
$(call cmd,src_tar,$(KERNELPATH),kernel.spec)
long last_read;
int avail;
- mutex_lock(&rev->ns->lock);
+ mutex_lock_nested(&rev->ns->lock, rev->ns->level);
last_read = rev->last_read;
if (last_read == rev->ns->revision) {
mutex_unlock(&rev->ns->lock);
last_read !=
READ_ONCE(rev->ns->revision)))
return -ERESTARTSYS;
- mutex_lock(&rev->ns->lock);
+ mutex_lock_nested(&rev->ns->lock, rev->ns->level);
}
avail = sprintf(buffer, "%ld\n", rev->ns->revision);
unsigned int mask = 0;
if (rev) {
- mutex_lock(&rev->ns->lock);
+ mutex_lock_nested(&rev->ns->lock, rev->ns->level);
poll_wait(file, &rev->ns->wait, pt);
if (rev->last_read < rev->ns->revision)
mask |= POLLIN | POLLRDNORM;
*/
inode_unlock(dir);
error = simple_pin_fs(&aafs_ops, &aafs_mnt, &aafs_count);
- mutex_lock(&parent->lock);
+ mutex_lock_nested(&parent->lock, parent->level);
inode_lock_nested(dir, I_MUTEX_PARENT);
if (error)
goto out;
inode_unlock(dir);
inode_unlock(dentry->d_inode);
- mutex_lock(&parent->lock);
+ mutex_lock_nested(&parent->lock, parent->level);
ns = aa_get_ns(__aa_findn_ns(&parent->sub_ns, dentry->d_name.name,
dentry->d_name.len));
if (!ns) {
__aafs_profile_rmdir(child);
list_for_each_entry(sub, &ns->sub_ns, base.list) {
- mutex_lock(&sub->lock);
+ mutex_lock_nested(&sub->lock, sub->level);
__aafs_ns_rmdir(sub);
mutex_unlock(&sub->lock);
}
/* subnamespaces */
list_for_each_entry(sub, &ns->sub_ns, base.list) {
- mutex_lock(&sub->lock);
+ mutex_lock_nested(&sub->lock, sub->level);
error = __aafs_ns_mkdir(sub, ns_subns_dir(ns), NULL, NULL);
mutex_unlock(&sub->lock);
if (error)
/* is next namespace a child */
if (!list_empty(&ns->sub_ns)) {
next = list_first_entry(&ns->sub_ns, typeof(*ns), base.list);
- mutex_lock(&next->lock);
+ mutex_lock_nested(&next->lock, next->level);
return next;
}
mutex_unlock(&ns->lock);
next = list_next_entry(ns, base.list);
if (!list_entry_is_head(next, &parent->sub_ns, base.list)) {
- mutex_lock(&next->lock);
+ mutex_lock_nested(&next->lock, next->level);
return next;
}
ns = parent;
f->private = root;
/* find the first profile */
- mutex_lock(&root->lock);
+ mutex_lock_nested(&root->lock, root->level);
profile = __first_profile(root, root);
/* skip to position */
aafs_mnt = kern_mount(&aafs_ops);
if (IS_ERR(aafs_mnt))
panic("can't set apparmorfs up\n");
- aafs_mnt->mnt_sb->s_flags &= ~MS_NOUSER;
+ aafs_mnt->mnt_sb->s_flags &= ~SB_NOUSER;
/* Populate fs tree. */
error = entry_create_dir(&aa_sfs_entry, NULL);
ns_subrevision(root_ns) = dent;
/* policy tree referenced by magic policy symlink */
- mutex_lock(&root_ns->lock);
+ mutex_lock_nested(&root_ns->lock, root_ns->level);
error = __aafs_ns_mkdir(root_ns, aafs_mnt->mnt_root, ".policy",
aafs_mnt->mnt_root);
mutex_unlock(&root_ns->lock);
* __attach_match_ - find an attachment match
* @name - to match against (NOT NULL)
* @head - profile list to walk (NOT NULL)
+ * @info - info message if there was an error (NOT NULL)
*
* Do a linear search on the profiles in the list. There is a matching
* preference where an exact match is preferred over a name which uses
* Returns: profile or NULL if no match found
*/
static struct aa_profile *__attach_match(const char *name,
- struct list_head *head)
+ struct list_head *head,
+ const char **info)
{
int len = 0;
+ bool conflict = false;
struct aa_profile *profile, *candidate = NULL;
list_for_each_entry_rcu(profile, head, base.list) {
- if (profile->label.flags & FLAG_NULL)
+ if (profile->label.flags & FLAG_NULL &&
+ &profile->label == ns_unconfined(profile->ns))
continue;
- if (profile->xmatch && profile->xmatch_len > len) {
- unsigned int state = aa_dfa_match(profile->xmatch,
- DFA_START, name);
- u32 perm = dfa_user_allow(profile->xmatch, state);
- /* any accepting state means a valid match. */
- if (perm & MAY_EXEC) {
- candidate = profile;
- len = profile->xmatch_len;
+
+ if (profile->xmatch) {
+ if (profile->xmatch_len == len) {
+ conflict = true;
+ continue;
+ } else if (profile->xmatch_len > len) {
+ unsigned int state;
+ u32 perm;
+
+ state = aa_dfa_match(profile->xmatch,
+ DFA_START, name);
+ perm = dfa_user_allow(profile->xmatch, state);
+ /* any accepting state means a valid match. */
+ if (perm & MAY_EXEC) {
+ candidate = profile;
+ len = profile->xmatch_len;
+ conflict = false;
+ }
}
} else if (!strcmp(profile->base.name, name))
/* exact non-re match, no more searching required */
return profile;
}
+ if (conflict) {
+ *info = "conflicting profile attachments";
+ return NULL;
+ }
+
return candidate;
}
* @ns: the current namespace (NOT NULL)
* @list: list to search (NOT NULL)
* @name: the executable name to match against (NOT NULL)
+ * @info: info message if there was an error
*
* Returns: label or NULL if no match found
*/
static struct aa_label *find_attach(struct aa_ns *ns, struct list_head *list,
- const char *name)
+ const char *name, const char **info)
{
struct aa_profile *profile;
rcu_read_lock();
- profile = aa_get_profile(__attach_match(name, list));
+ profile = aa_get_profile(__attach_match(name, list, info));
rcu_read_unlock();
return profile ? &profile->label : NULL;
if (xindex & AA_X_CHILD)
/* released by caller */
new = find_attach(ns, &profile->base.profiles,
- name);
+ name, info);
else
/* released by caller */
new = find_attach(ns, &ns->base.profiles,
- name);
+ name, info);
*lookupname = name;
break;
}
if (profile_unconfined(profile)) {
new = find_attach(profile->ns, &profile->ns->base.profiles,
- name);
+ name, &info);
if (new) {
AA_DEBUG("unconfined attached to new label");
return new;
}
} else if (COMPLAIN_MODE(profile)) {
/* no exec permission - learning mode */
- struct aa_profile *new_profile = aa_new_null_profile(profile,
- false, name,
- GFP_ATOMIC);
+ struct aa_profile *new_profile = NULL;
+ char *n = kstrdup(name, GFP_ATOMIC);
+
+ if (n) {
+ /* name is ptr into buffer */
+ long pos = name - buffer;
+ /* break per cpu buffer hold */
+ put_buffers(buffer);
+ new_profile = aa_new_null_profile(profile, false, n,
+ GFP_KERNEL);
+ get_buffers(buffer);
+ name = buffer + pos;
+ strcpy((char *)name, n);
+ kfree(n);
+ }
if (!new_profile) {
error = -ENOMEM;
info = "could not create null profile";
struct aa_perms aa_compute_fperms(struct aa_dfa *dfa, unsigned int state,
struct path_cond *cond)
{
- struct aa_perms perms;
-
/* FIXME: change over to new dfa format
* currently file perms are encoded in the dfa, new format
* splits the permissions from the dfa. This mapping can be
* done at profile load
*/
- perms.deny = 0;
- perms.kill = perms.stop = 0;
- perms.complain = perms.cond = 0;
- perms.hide = 0;
- perms.prompt = 0;
+ struct aa_perms perms = { };
if (uid_eq(current_fsuid(), cond->uid)) {
perms.allow = map_old_perms(dfa_user_allow(dfa, state));
/* these entries require a custom callback fn */
struct {
struct aa_label *peer;
- struct {
- const char *target;
- kuid_t ouid;
- } fs;
+ union {
+ struct {
+ const char *target;
+ kuid_t ouid;
+ } fs;
+ int signal;
+ };
};
struct {
struct aa_profile *profile;
const char *ns;
long pos;
} iface;
- int signal;
struct {
int rlim;
unsigned long max;
static inline bool path_mediated_fs(struct dentry *dentry)
{
- return !(dentry->d_sb->s_flags & MS_NOUSER);
+ return !(dentry->d_sb->s_flags & SB_NOUSER);
}
__labelset_update(ns);
list_for_each_entry(child, &ns->sub_ns, base.list) {
- mutex_lock(&child->lock);
+ mutex_lock_nested(&child->lock, child->level);
__aa_labelset_update_subtree(child);
mutex_unlock(&child->lock);
}
void aa_compute_perms(struct aa_dfa *dfa, unsigned int state,
struct aa_perms *perms)
{
- perms->deny = 0;
- perms->kill = perms->stop = 0;
- perms->complain = perms->cond = 0;
- perms->hide = 0;
- perms->prompt = 0;
- perms->allow = dfa_user_allow(dfa, state);
- perms->audit = dfa_user_audit(dfa, state);
- perms->quiet = dfa_user_quiet(dfa, state);
+ *perms = (struct aa_perms) {
+ .allow = dfa_user_allow(dfa, state),
+ .audit = dfa_user_audit(dfa, state),
+ .quiet = dfa_user_quiet(dfa, state),
+ };
/* for v5 perm mapping in the policydb, the other set is used
* to extend the general perm set
void (*cb)(struct audit_buffer *, void *))
{
int type, error;
- bool stop = false;
u32 denied = request & (~perms->allow | perms->deny);
if (likely(!denied)) {
else
type = AUDIT_APPARMOR_DENIED;
- if (denied & perms->stop)
- stop = true;
if (denied == (denied & perms->hide))
error = -ENOENT;
/* Determines if audit header is included in audited messages. This
* provides more context if the audit daemon is not running
*/
-bool aa_g_audit_header = 1;
+bool aa_g_audit_header = true;
module_param_named(audit_header, aa_g_audit_header, aabool,
S_IRUSR | S_IWUSR);
* DEPRECATED: read only as strict checking of load is always done now
* that none root users (user namespaces) can load policy.
*/
-bool aa_g_paranoid_load = 1;
+bool aa_g_paranoid_load = true;
module_param_named(paranoid_load, aa_g_paranoid_load, aabool, S_IRUGO);
/* Boot time disable flag */
if (!apparmor_enabled || !security_module_enable("apparmor")) {
aa_info_message("AppArmor disabled by boot time parameter");
- apparmor_enabled = 0;
+ apparmor_enabled = false;
return 0;
}
aa_destroy_aafs();
aa_teardown_dfa_engine();
- apparmor_enabled = 0;
+ apparmor_enabled = false;
return error;
}
static struct aa_perms compute_mnt_perms(struct aa_dfa *dfa,
unsigned int state)
{
- struct aa_perms perms;
-
- perms.kill = 0;
- perms.allow = dfa_user_allow(dfa, state);
- perms.audit = dfa_user_audit(dfa, state);
- perms.quiet = dfa_user_quiet(dfa, state);
- perms.xindex = dfa_user_xindex(dfa, state);
+ struct aa_perms perms = {
+ .allow = dfa_user_allow(dfa, state),
+ .audit = dfa_user_audit(dfa, state),
+ .quiet = dfa_user_quiet(dfa, state),
+ .xindex = dfa_user_xindex(dfa, state),
+ };
return perms;
}
{
struct aa_profile *p, *profile;
const char *bname;
- char *name;
+ char *name = NULL;
AA_BUG(!parent);
profile->file.dfa = aa_get_dfa(nulldfa);
profile->policy.dfa = aa_get_dfa(nulldfa);
- mutex_lock(&profile->ns->lock);
+ mutex_lock_nested(&profile->ns->lock, profile->ns->level);
p = __find_child(&parent->base.profiles, bname);
if (p) {
aa_free_profile(profile);
return profile;
fail:
+ kfree(name);
aa_free_profile(profile);
return NULL;
}
} else
ns = aa_get_ns(policy_ns ? policy_ns : labels_ns(label));
- mutex_lock(&ns->lock);
+ mutex_lock_nested(&ns->lock, ns->level);
/* check for duplicate rawdata blobs: space and file dedup */
list_for_each_entry(rawdata_ent, &ns->rawdata_list, list) {
if (aa_rawdata_eq(rawdata_ent, udata)) {
if (!name) {
/* remove namespace - can only happen if fqname[0] == ':' */
- mutex_lock(&ns->parent->lock);
+ mutex_lock_nested(&ns->parent->lock, ns->level);
__aa_remove_ns(ns);
__aa_bump_ns_revision(ns);
mutex_unlock(&ns->parent->lock);
} else {
/* remove profile */
- mutex_lock(&ns->lock);
+ mutex_lock_nested(&ns->lock, ns->level);
profile = aa_get_profile(__lookup_profile(&ns->base, name));
if (!profile) {
error = -ENOENT;
ns = alloc_ns(parent->base.hname, name);
if (!ns)
return NULL;
- mutex_lock(&ns->lock);
+ ns->level = parent->level + 1;
+ mutex_lock_nested(&ns->lock, ns->level);
error = __aafs_ns_mkdir(ns, ns_subns_dir(parent), name, dir);
if (error) {
AA_ERROR("Failed to create interface for ns %s\n",
return ERR_PTR(error);
}
ns->parent = aa_get_ns(parent);
- ns->level = parent->level + 1;
list_add_rcu(&ns->base.list, &parent->sub_ns);
/* add list ref */
aa_get_ns(ns);
{
struct aa_ns *ns;
- mutex_lock(&parent->lock);
+ mutex_lock_nested(&parent->lock, parent->level);
/* try and find the specified ns and if it doesn't exist create it */
/* released by caller */
ns = aa_get_ns(__aa_find_ns(&parent->sub_ns, name));
if (!ns)
return;
- mutex_lock(&ns->lock);
+ mutex_lock_nested(&ns->lock, ns->level);
/* release all profiles in this namespace */
__aa_profile_list_release(&ns->base.profiles);
struct aa_ns *ns = aa_get_ns(d->ns);
if (ns) {
- mutex_lock(&ns->lock);
+ mutex_lock_nested(&ns->lock, ns->level);
__aa_fs_remove_rawdata(d);
mutex_unlock(&ns->lock);
aa_put_ns(ns);
/**
* audit_resource - audit setting resource limit
* @profile: profile being enforced (NOT NULL)
- * @resoure: rlimit being auditing
+ * @resource: rlimit being auditing
* @value: value being set
* @error: error value
*
error = fn_for_each(label, profile,
audit_resource(profile, resource,
new_rlim->rlim_max, peer,
- "cap_sys_resoure", -EACCES));
+ "cap_sys_resource", -EACCES));
else
error = fn_for_each_confined(label, profile,
profile_setrlimit(profile, resource, new_rlim));
/*
* Reaper for links from keyrings to dead keys.
*/
-static void key_gc_timer_func(unsigned long);
+static void key_gc_timer_func(struct timer_list *);
static DEFINE_TIMER(key_gc_timer, key_gc_timer_func);
-static time_t key_gc_next_run = LONG_MAX;
+static time64_t key_gc_next_run = TIME64_MAX;
static struct key_type *key_gc_dead_keytype;
static unsigned long key_gc_flags;
* Schedule a garbage collection run.
* - time precision isn't particularly important
*/
-void key_schedule_gc(time_t gc_at)
+void key_schedule_gc(time64_t gc_at)
{
unsigned long expires;
- time_t now = current_kernel_time().tv_sec;
+ time64_t now = ktime_get_real_seconds();
- kenter("%ld", gc_at - now);
+ kenter("%lld", gc_at - now);
if (gc_at <= now || test_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags)) {
kdebug("IMMEDIATE");
* Some key's cleanup time was met after it expired, so we need to get the
* reaper to go through a cycle finding expired keys.
*/
-static void key_gc_timer_func(unsigned long data)
+static void key_gc_timer_func(struct timer_list *unused)
{
kenter("");
- key_gc_next_run = LONG_MAX;
+ key_gc_next_run = TIME64_MAX;
key_schedule_gc_links();
}
struct rb_node *cursor;
struct key *key;
- time_t new_timer, limit;
+ time64_t new_timer, limit;
kenter("[%lx,%x]", key_gc_flags, gc_state);
- limit = current_kernel_time().tv_sec;
+ limit = ktime_get_real_seconds();
if (limit > key_gc_delay)
limit -= key_gc_delay;
else
gc_state |= KEY_GC_REAPING_DEAD_1;
kdebug("new pass %x", gc_state);
- new_timer = LONG_MAX;
+ new_timer = TIME64_MAX;
/* As only this function is permitted to remove things from the key
* serial tree, if cursor is non-NULL then it will always point to a
if (gc_state & KEY_GC_SET_TIMER) {
if (key->expiry > limit && key->expiry < new_timer) {
- kdebug("will expire %x in %ld",
+ kdebug("will expire %x in %lld",
key_serial(key), key->expiry - limit);
new_timer = key->expiry;
}
*/
kdebug("pass complete");
- if (gc_state & KEY_GC_SET_TIMER && new_timer != (time_t)LONG_MAX) {
+ if (gc_state & KEY_GC_SET_TIMER && new_timer != (time64_t)TIME64_MAX) {
new_timer += key_gc_delay;
key_schedule_gc(new_timer);
}
int skipped_ret;
bool possessed;
key_ref_t result;
- struct timespec now;
+ time64_t now;
};
extern bool key_default_cmp(const struct key *key,
extern struct work_struct key_gc_work;
extern unsigned key_gc_delay;
-extern void keyring_gc(struct key *keyring, time_t limit);
+extern void keyring_gc(struct key *keyring, time64_t limit);
extern void keyring_restriction_gc(struct key *keyring,
struct key_type *dead_type);
-extern void key_schedule_gc(time_t gc_at);
+extern void key_schedule_gc(time64_t gc_at);
extern void key_schedule_gc_links(void);
extern void key_gc_keytype(struct key_type *ktype);
/*
* Determine whether a key is dead.
*/
-static inline bool key_is_dead(const struct key *key, time_t limit)
+static inline bool key_is_dead(const struct key *key, time64_t limit)
{
return
key->flags & ((1 << KEY_FLAG_DEAD) |
if (authkey)
key_revoke(authkey);
- if (prep->expiry != TIME_T_MAX) {
+ if (prep->expiry != TIME64_MAX) {
key->expiry = prep->expiry;
key_schedule_gc(prep->expiry + key_gc_delay);
}
prep.data = data;
prep.datalen = datalen;
prep.quotalen = key->type->def_datalen;
- prep.expiry = TIME_T_MAX;
+ prep.expiry = TIME64_MAX;
if (key->type->preparse) {
ret = key->type->preparse(&prep);
if (ret < 0)
struct key *authkey)
{
struct assoc_array_edit *edit;
- struct timespec now;
int ret, awaken, link_ret = 0;
key_check(key);
/* mark the key as being negatively instantiated */
atomic_inc(&key->user->nikeys);
mark_key_instantiated(key, -error);
- now = current_kernel_time();
- key->expiry = now.tv_sec + timeout;
+ key->expiry = ktime_get_real_seconds() + timeout;
key_schedule_gc(key->expiry + key_gc_delay);
if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
void key_set_timeout(struct key *key, unsigned timeout)
{
- struct timespec now;
- time_t expiry = 0;
+ time64_t expiry = 0;
/* make the changes with the locks held to prevent races */
down_write(&key->sem);
- if (timeout > 0) {
- now = current_kernel_time();
- expiry = now.tv_sec + timeout;
- }
+ if (timeout > 0)
+ expiry = ktime_get_real_seconds() + timeout;
key->expiry = expiry;
key_schedule_gc(key->expiry + key_gc_delay);
prep.data = payload;
prep.datalen = plen;
prep.quotalen = index_key.type->def_datalen;
- prep.expiry = TIME_T_MAX;
+ prep.expiry = TIME64_MAX;
if (index_key.type->preparse) {
ret = index_key.type->preparse(&prep);
if (ret < 0) {
prep.data = payload;
prep.datalen = plen;
prep.quotalen = key->type->def_datalen;
- prep.expiry = TIME_T_MAX;
+ prep.expiry = TIME64_MAX;
if (key->type->preparse) {
ret = key->type->preparse(&prep);
if (ret < 0)
*/
void key_revoke(struct key *key)
{
- struct timespec now;
- time_t time;
+ time64_t time;
key_check(key);
key->type->revoke(key);
/* set the death time to no more than the expiry time */
- now = current_kernel_time();
- time = now.tv_sec;
+ time = ktime_get_real_seconds();
if (key->revoked_at == 0 || key->revoked_at > time) {
key->revoked_at = time;
key_schedule_gc(key->revoked_at + key_gc_delay);
/* skip invalidated, revoked and expired keys */
if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) {
- time_t expiry = READ_ONCE(key->expiry);
+ time64_t expiry = READ_ONCE(key->expiry);
if (kflags & ((1 << KEY_FLAG_INVALIDATED) |
(1 << KEY_FLAG_REVOKED))) {
goto skipped;
}
- if (expiry && ctx->now.tv_sec >= expiry) {
+ if (expiry && ctx->now >= expiry) {
if (!(ctx->flags & KEYRING_SEARCH_SKIP_EXPIRED))
ctx->result = ERR_PTR(-EKEYEXPIRED);
kleave(" = %d [expire]", ctx->skipped_ret);
key = key_ref_to_ptr(ctx->result);
key_check(key);
if (!(ctx->flags & KEYRING_SEARCH_NO_UPDATE_TIME)) {
- key->last_used_at = ctx->now.tv_sec;
- keyring->last_used_at = ctx->now.tv_sec;
+ key->last_used_at = ctx->now;
+ keyring->last_used_at = ctx->now;
while (sp > 0)
- stack[--sp].keyring->last_used_at = ctx->now.tv_sec;
+ stack[--sp].keyring->last_used_at = ctx->now;
}
kleave(" = true");
return true;
}
rcu_read_lock();
- ctx->now = current_kernel_time();
+ ctx->now = ktime_get_real_seconds();
if (search_nested_keyrings(keyring, ctx))
__key_get(key_ref_to_ptr(ctx->result));
rcu_read_unlock();
* (ie. it has a zero usage count) */
if (!refcount_inc_not_zero(&keyring->usage))
continue;
- keyring->last_used_at = current_kernel_time().tv_sec;
+ keyring->last_used_at = ktime_get_real_seconds();
goto out;
}
}
static bool keyring_gc_select_iterator(void *object, void *iterator_data)
{
struct key *key = keyring_ptr_to_key(object);
- time_t *limit = iterator_data;
+ time64_t *limit = iterator_data;
if (key_is_dead(key, *limit))
return false;
static int keyring_gc_check_iterator(const void *object, void *iterator_data)
{
const struct key *key = keyring_ptr_to_key(object);
- time_t *limit = iterator_data;
+ time64_t *limit = iterator_data;
key_check(key);
return key_is_dead(key, *limit);
* Not called with any locks held. The keyring's key struct will not be
* deallocated under us as only our caller may deallocate it.
*/
-void keyring_gc(struct key *keyring, time_t limit)
+void keyring_gc(struct key *keyring, time64_t limit)
{
int result;
int key_validate(const struct key *key)
{
unsigned long flags = READ_ONCE(key->flags);
- time_t expiry = READ_ONCE(key->expiry);
+ time64_t expiry = READ_ONCE(key->expiry);
if (flags & (1 << KEY_FLAG_INVALIDATED))
return -ENOKEY;
/* check it hasn't expired */
if (expiry) {
- struct timespec now = current_kernel_time();
- if (now.tv_sec >= expiry)
+ if (ktime_get_real_seconds() >= expiry)
return -EKEYEXPIRED;
}
{
struct rb_node *_p = v;
struct key *key = rb_entry(_p, struct key, serial_node);
- struct timespec now;
- time_t expiry;
- unsigned long timo;
unsigned long flags;
key_ref_t key_ref, skey_ref;
+ time64_t now, expiry;
char xbuf[16];
short state;
+ u64 timo;
int rc;
struct keyring_search_context ctx = {
if (rc < 0)
return 0;
- now = current_kernel_time();
+ now = ktime_get_real_seconds();
rcu_read_lock();
expiry = READ_ONCE(key->expiry);
if (expiry == 0) {
memcpy(xbuf, "perm", 5);
- } else if (now.tv_sec >= expiry) {
+ } else if (now >= expiry) {
memcpy(xbuf, "expd", 5);
} else {
- timo = expiry - now.tv_sec;
+ timo = expiry - now;
if (timo < 60)
- sprintf(xbuf, "%lus", timo);
+ sprintf(xbuf, "%llus", timo);
else if (timo < 60*60)
- sprintf(xbuf, "%lum", timo / 60);
+ sprintf(xbuf, "%llum", div_u64(timo, 60));
else if (timo < 60*60*24)
- sprintf(xbuf, "%luh", timo / (60*60));
+ sprintf(xbuf, "%lluh", div_u64(timo, 60 * 60));
else if (timo < 60*60*24*7)
- sprintf(xbuf, "%lud", timo / (60*60*24));
+ sprintf(xbuf, "%llud", div_u64(timo, 60 * 60 * 24));
else
- sprintf(xbuf, "%luw", timo / (60*60*24*7));
+ sprintf(xbuf, "%lluw", div_u64(timo, 60 * 60 * 24 * 7));
}
state = key_read_state(key);
if (ret < 0)
goto invalid_key;
- key->last_used_at = current_kernel_time().tv_sec;
+ key->last_used_at = ktime_get_real_seconds();
error:
put_cred(ctx.cred);
runtime->rate);
*audio_tstamp = ns_to_timespec(audio_nsecs);
}
- runtime->status->audio_tstamp = *audio_tstamp;
- runtime->status->tstamp = *curr_tstamp;
+ if (!timespec_equal(&runtime->status->audio_tstamp, audio_tstamp)) {
+ runtime->status->audio_tstamp = *audio_tstamp;
+ runtime->status->tstamp = *curr_tstamp;
+ }
/*
* re-take a driver timestamp to let apps detect if the reference tstamp
struct snd_timer *t;
tu = file->private_data;
- if (snd_BUG_ON(!tu->timeri))
- return -ENXIO;
+ if (!tu->timeri)
+ return -EBADFD;
t = tu->timeri->timer;
- if (snd_BUG_ON(!t))
- return -ENXIO;
+ if (!t)
+ return -EBADFD;
memset(&info, 0, sizeof(info));
info.card = t->card ? t->card->number : -1;
if (t->hw.flags & SNDRV_TIMER_HW_SLAVE)
struct snd_timer_status32 status;
tu = file->private_data;
- if (snd_BUG_ON(!tu->timeri))
- return -ENXIO;
+ if (!tu->timeri)
+ return -EBADFD;
memset(&status, 0, sizeof(status));
status.tstamp.tv_sec = tu->tstamp.tv_sec;
status.tstamp.tv_nsec = tu->tstamp.tv_nsec;
* Returns 0 if successful, or a negative error code.
*/
int snd_ctl_apply_vmaster_slaves(struct snd_kcontrol *kctl,
- int (*func)(struct snd_kcontrol *, void *),
+ int (*func)(struct snd_kcontrol *vslave,
+ struct snd_kcontrol *slave,
+ void *arg),
void *arg)
{
struct link_master *master;
if (err < 0)
return err;
list_for_each_entry(slave, &master->slaves, list) {
- err = func(&slave->slave, arg);
+ err = func(slave->kctl, &slave->slave, arg);
if (err < 0)
return err;
}
memset(pcm_chmap, 0, sizeof(pcm_chmap));
chmap->ops.get_chmap(chmap->hdac, pcm_idx, pcm_chmap);
- for (i = 0; i < sizeof(chmap); i++)
+ for (i = 0; i < ARRAY_SIZE(pcm_chmap); i++)
ucontrol->value.integer.value[i] = pcm_chmap[i];
return 0;
};
/* initialize the slave volume with 0dB via snd_ctl_apply_vmaster_slaves() */
-static int init_slave_0dB(struct snd_kcontrol *kctl, void *_arg)
+static int init_slave_0dB(struct snd_kcontrol *slave,
+ struct snd_kcontrol *kctl,
+ void *_arg)
{
struct slave_init_arg *arg = _arg;
int _tlv[4];
arg->step = step;
val = -tlv[2] / step;
if (val > 0) {
- put_kctl_with_value(kctl, val);
+ put_kctl_with_value(slave, val);
return val;
}
}
/* unmute the slave via snd_ctl_apply_vmaster_slaves() */
-static int init_slave_unmute(struct snd_kcontrol *slave, void *_arg)
+static int init_slave_unmute(struct snd_kcontrol *slave,
+ struct snd_kcontrol *kctl,
+ void *_arg)
{
return put_kctl_with_value(slave, 1);
}
/* AMD Hudson */
{ PCI_DEVICE(0x1022, 0x780d),
.driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
+ /* AMD Raven */
+ { PCI_DEVICE(0x1022, 0x15e3),
+ .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
/* ATI HDMI */
{ PCI_DEVICE(0x1002, 0x0002),
.driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
case 0x10ec0299:
alc_update_coef_idx(codec, 0x10, 1<<9, 0);
break;
+ case 0x10ec0275:
+ alc_update_coef_idx(codec, 0xe, 0, 1<<0);
+ break;
case 0x10ec0293:
alc_update_coef_idx(codec, 0xa, 1<<13, 0);
break;
ALC225_STANDARD_PINS,
{0x12, 0xb7a60130},
{0x1b, 0x90170110}),
+ SND_HDA_PIN_QUIRK(0x10ec0233, 0x8086, "Intel NUC Skull Canyon", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+ {0x1b, 0x01111010},
+ {0x1e, 0x01451130},
+ {0x21, 0x02211020}),
SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
{0x12, 0x90a60140},
{0x14, 0x90170110},
case 0x10ec0703:
spec->codec_variant = ALC269_TYPE_ALC700;
spec->gen.mixer_nid = 0; /* ALC700 does not have any loopback mixer path */
- alc_update_coef_idx(codec, 0x4a, 0, 1 << 15); /* Combo jack auto trigger control */
+ alc_update_coef_idx(codec, 0x4a, 1 << 15, 0); /* Combo jack auto trigger control */
break;
}
depends on X86 || COMPILE_TEST
select SND_SOC_INTEL_MACH
select SND_SOC_INTEL_COMMON
+ help
+ Intel ASoC Audio Drivers. If you have a Intel machine that
+ has audio controller with a DSP and I2S or DMIC port, then
+ enable this option by saying Y or M
+ If unsure select "N".
config SND_SOC_INTEL_HASWELL
tristate "Intel ASoC SST driver for Haswell/Broadwell"
while ((cs = snd_usb_find_csint_desc(ctrl_iface->extra,
ctrl_iface->extralen,
cs, UAC2_CLOCK_SOURCE))) {
- if (cs->bClockID == clock_id)
+ if (cs->bLength >= sizeof(*cs) && cs->bClockID == clock_id)
return cs;
}
while ((cs = snd_usb_find_csint_desc(ctrl_iface->extra,
ctrl_iface->extralen,
cs, UAC2_CLOCK_SELECTOR))) {
- if (cs->bClockID == clock_id)
+ if (cs->bLength >= sizeof(*cs) && cs->bClockID == clock_id) {
+ if (cs->bLength < 5 + cs->bNrInPins)
+ return NULL;
return cs;
+ }
}
return NULL;
while ((cs = snd_usb_find_csint_desc(ctrl_iface->extra,
ctrl_iface->extralen,
cs, UAC2_CLOCK_MULTIPLIER))) {
- if (cs->bClockID == clock_id)
+ if (cs->bLength >= sizeof(*cs) && cs->bClockID == clock_id)
return cs;
}
void line6_start_timer(struct timer_list *timer, unsigned long msecs,
void (*function)(struct timer_list *t))
{
- timer->function = (TIMER_FUNC_TYPE)function;
+ timer->function = function;
mod_timer(timer, jiffies + msecs_to_jiffies(msecs));
}
EXPORT_SYMBOL_GPL(line6_start_timer);
__u8 *bmaControls;
if (state->mixer->protocol == UAC_VERSION_1) {
+ if (hdr->bLength < 7) {
+ usb_audio_err(state->chip,
+ "unit %u: invalid UAC_FEATURE_UNIT descriptor\n",
+ unitid);
+ return -EINVAL;
+ }
csize = hdr->bControlSize;
if (!csize) {
usb_audio_dbg(state->chip,
}
} else {
struct uac2_feature_unit_descriptor *ftr = _ftr;
+ if (hdr->bLength < 6) {
+ usb_audio_err(state->chip,
+ "unit %u: invalid UAC_FEATURE_UNIT descriptor\n",
+ unitid);
+ return -EINVAL;
+ }
csize = 4;
channels = (hdr->bLength - 6) / 4 - 1;
bmaControls = ftr->bmaControls;
const struct usbmix_name_map *map;
char **namelist;
- if (!desc->bNrInPins || desc->bLength < 5 + desc->bNrInPins) {
+ if (desc->bLength < 5 || !desc->bNrInPins ||
+ desc->bLength < 5 + desc->bNrInPins) {
usb_audio_err(state->chip,
"invalid SELECTOR UNIT descriptor %d\n", unitid);
return -EINVAL;
{
struct usb_mixer_elem_list *list;
- for (list = mixer->id_elems[unitid]; list; list = list->next_id_elem)
+ for (list = mixer->id_elems[unitid]; list; list = list->next_id_elem) {
+ struct usb_mixer_elem_info *info =
+ (struct usb_mixer_elem_info *)list;
+ /* invalidate cache, so the value is read from the device */
+ info->cached = 0;
snd_ctl_notify(mixer->chip->card, SNDRV_CTL_EVENT_MASK_VALUE,
&list->kctl->id);
+ }
}
static void snd_usb_mixer_dump_cval(struct snd_info_buffer *buffer,
#include <string.h>
#include <time.h>
#include <unistd.h>
-#include <net/if.h>
#include <sys/types.h>
#include <sys/stat.h>
info->tag[0], info->tag[1], info->tag[2], info->tag[3],
info->tag[4], info->tag[5], info->tag[6], info->tag[7]);
- if (info->status & BPF_PROG_STATUS_DEV_BOUND) {
- jsonw_name(json_wtr, "dev");
- if (info->ifindex) {
- char name[IF_NAMESIZE];
-
- if (!if_indextoname(info->ifindex, name))
- jsonw_printf(json_wtr, "\"ifindex:%d\"",
- info->ifindex);
- else
- jsonw_printf(json_wtr, "\"%s\"", name);
- } else {
- jsonw_printf(json_wtr, "\"unknown\"");
- }
- }
-
if (info->load_time) {
char buf[32];
printf("tag ");
fprint_hex(stdout, info->tag, BPF_TAG_SIZE, "");
- printf(" ");
-
- if (info->status & BPF_PROG_STATUS_DEV_BOUND) {
- printf("dev ");
- if (info->ifindex) {
- char name[IF_NAMESIZE];
-
- if (!if_indextoname(info->ifindex, name))
- printf("ifindex:%d ", info->ifindex);
- else
- printf("%s ", name);
- } else {
- printf("unknown ");
- }
- }
printf("\n");
if (info->load_time) {
__u32 kern_version; /* checked when prog_type=kprobe */
__u32 prog_flags;
char prog_name[BPF_OBJ_NAME_LEN];
- __u32 prog_target_ifindex; /* ifindex of netdev to prep for */
+ __u32 prog_ifindex; /* ifindex of netdev to prep for */
};
struct { /* anonymous struct used by BPF_OBJ_* commands */
#define BPF_TAG_SIZE 8
-enum bpf_prog_status {
- BPF_PROG_STATUS_DEV_BOUND = (1 << 0),
-};
-
struct bpf_prog_info {
__u32 type;
__u32 id;
__u32 nr_map_ids;
__aligned_u64 map_ids;
char name[BPF_OBJ_NAME_LEN];
- __u32 ifindex;
- __u32 status;
} __attribute__((aligned(8)));
struct bpf_map_info {
-arch/x86/insn/inat-tables.c
+arch/x86/lib/inat-tables.c
objtool
fixdep
all: $(OBJTOOL)
-INCLUDES := -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi
+INCLUDES := -I$(srctree)/tools/include \
+ -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi \
+ -I$(srctree)/tools/objtool/arch/$(ARCH)/include
WARNINGS := $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -Wno-packed
CFLAGS += -Wall -Werror $(WARNINGS) -fomit-frame-pointer -O2 -g $(INCLUDES)
LDFLAGS += -lelf $(LIBSUBCMD)
$(OBJTOOL_IN): fixdep FORCE
@$(MAKE) $(build)=objtool
-# Busybox's diff doesn't have -I, avoid warning in that case
-#
$(OBJTOOL): $(LIBSUBCMD) $(OBJTOOL_IN)
- @(diff -I 2>&1 | grep -q 'option requires an argument' && \
- test -d ../../kernel -a -d ../../tools -a -d ../objtool && (( \
- diff -I'^#include' arch/x86/insn/insn.c ../../arch/x86/lib/insn.c >/dev/null && \
- diff -I'^#include' arch/x86/insn/inat.c ../../arch/x86/lib/inat.c >/dev/null && \
- diff arch/x86/insn/x86-opcode-map.txt ../../arch/x86/lib/x86-opcode-map.txt >/dev/null && \
- diff arch/x86/insn/gen-insn-attr-x86.awk ../../arch/x86/tools/gen-insn-attr-x86.awk >/dev/null && \
- diff -I'^#include' arch/x86/insn/insn.h ../../arch/x86/include/asm/insn.h >/dev/null && \
- diff -I'^#include' arch/x86/insn/inat.h ../../arch/x86/include/asm/inat.h >/dev/null && \
- diff -I'^#include' arch/x86/insn/inat_types.h ../../arch/x86/include/asm/inat_types.h >/dev/null) \
- || echo "warning: objtool: x86 instruction decoder differs from kernel" >&2 )) || true
- @(test -d ../../kernel -a -d ../../tools -a -d ../objtool && (( \
- diff ../../arch/x86/include/asm/orc_types.h orc_types.h >/dev/null) \
- || echo "warning: objtool: orc_types.h differs from kernel" >&2 )) || true
+ @./sync-check.sh
$(QUIET_LINK)$(CC) $(OBJTOOL_IN) $(LDFLAGS) -o $@
clean:
$(call QUIET_CLEAN, objtool) $(RM) $(OBJTOOL)
$(Q)find $(OUTPUT) -name '*.o' -delete -o -name '\.*.cmd' -delete -o -name '\.*.d' -delete
- $(Q)$(RM) $(OUTPUT)arch/x86/insn/inat-tables.c $(OUTPUT)fixdep
+ $(Q)$(RM) $(OUTPUT)arch/x86/lib/inat-tables.c $(OUTPUT)fixdep
FORCE:
objtool-y += decode.o
-inat_tables_script = arch/x86/insn/gen-insn-attr-x86.awk
-inat_tables_maps = arch/x86/insn/x86-opcode-map.txt
+inat_tables_script = arch/x86/tools/gen-insn-attr-x86.awk
+inat_tables_maps = arch/x86/lib/x86-opcode-map.txt
-$(OUTPUT)arch/x86/insn/inat-tables.c: $(inat_tables_script) $(inat_tables_maps)
+$(OUTPUT)arch/x86/lib/inat-tables.c: $(inat_tables_script) $(inat_tables_maps)
$(call rule_mkdir)
$(Q)$(call echo-cmd,gen)$(AWK) -f $(inat_tables_script) $(inat_tables_maps) > $@
-$(OUTPUT)arch/x86/decode.o: $(OUTPUT)arch/x86/insn/inat-tables.c
+$(OUTPUT)arch/x86/decode.o: $(OUTPUT)arch/x86/lib/inat-tables.c
-CFLAGS_decode.o += -I$(OUTPUT)arch/x86/insn
+CFLAGS_decode.o += -I$(OUTPUT)arch/x86/lib
#include <stdlib.h>
#define unlikely(cond) (cond)
-#include "insn/insn.h"
-#include "insn/inat.c"
-#include "insn/insn.c"
+#include <asm/insn.h>
+#include "lib/inat.c"
+#include "lib/insn.c"
#include "../../elf.h"
#include "../../arch.h"
--- /dev/null
+#ifndef _ASM_X86_INAT_H
+#define _ASM_X86_INAT_H
+/*
+ * x86 instruction attributes
+ *
+ * Written by Masami Hiramatsu <mhiramat@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ */
+#include <asm/inat_types.h>
+
+/*
+ * Internal bits. Don't use bitmasks directly, because these bits are
+ * unstable. You should use checking functions.
+ */
+
+#define INAT_OPCODE_TABLE_SIZE 256
+#define INAT_GROUP_TABLE_SIZE 8
+
+/* Legacy last prefixes */
+#define INAT_PFX_OPNDSZ 1 /* 0x66 */ /* LPFX1 */
+#define INAT_PFX_REPE 2 /* 0xF3 */ /* LPFX2 */
+#define INAT_PFX_REPNE 3 /* 0xF2 */ /* LPFX3 */
+/* Other Legacy prefixes */
+#define INAT_PFX_LOCK 4 /* 0xF0 */
+#define INAT_PFX_CS 5 /* 0x2E */
+#define INAT_PFX_DS 6 /* 0x3E */
+#define INAT_PFX_ES 7 /* 0x26 */
+#define INAT_PFX_FS 8 /* 0x64 */
+#define INAT_PFX_GS 9 /* 0x65 */
+#define INAT_PFX_SS 10 /* 0x36 */
+#define INAT_PFX_ADDRSZ 11 /* 0x67 */
+/* x86-64 REX prefix */
+#define INAT_PFX_REX 12 /* 0x4X */
+/* AVX VEX prefixes */
+#define INAT_PFX_VEX2 13 /* 2-bytes VEX prefix */
+#define INAT_PFX_VEX3 14 /* 3-bytes VEX prefix */
+#define INAT_PFX_EVEX 15 /* EVEX prefix */
+
+#define INAT_LSTPFX_MAX 3
+#define INAT_LGCPFX_MAX 11
+
+/* Immediate size */
+#define INAT_IMM_BYTE 1
+#define INAT_IMM_WORD 2
+#define INAT_IMM_DWORD 3
+#define INAT_IMM_QWORD 4
+#define INAT_IMM_PTR 5
+#define INAT_IMM_VWORD32 6
+#define INAT_IMM_VWORD 7
+
+/* Legacy prefix */
+#define INAT_PFX_OFFS 0
+#define INAT_PFX_BITS 4
+#define INAT_PFX_MAX ((1 << INAT_PFX_BITS) - 1)
+#define INAT_PFX_MASK (INAT_PFX_MAX << INAT_PFX_OFFS)
+/* Escape opcodes */
+#define INAT_ESC_OFFS (INAT_PFX_OFFS + INAT_PFX_BITS)
+#define INAT_ESC_BITS 2
+#define INAT_ESC_MAX ((1 << INAT_ESC_BITS) - 1)
+#define INAT_ESC_MASK (INAT_ESC_MAX << INAT_ESC_OFFS)
+/* Group opcodes (1-16) */
+#define INAT_GRP_OFFS (INAT_ESC_OFFS + INAT_ESC_BITS)
+#define INAT_GRP_BITS 5
+#define INAT_GRP_MAX ((1 << INAT_GRP_BITS) - 1)
+#define INAT_GRP_MASK (INAT_GRP_MAX << INAT_GRP_OFFS)
+/* Immediates */
+#define INAT_IMM_OFFS (INAT_GRP_OFFS + INAT_GRP_BITS)
+#define INAT_IMM_BITS 3
+#define INAT_IMM_MASK (((1 << INAT_IMM_BITS) - 1) << INAT_IMM_OFFS)
+/* Flags */
+#define INAT_FLAG_OFFS (INAT_IMM_OFFS + INAT_IMM_BITS)
+#define INAT_MODRM (1 << (INAT_FLAG_OFFS))
+#define INAT_FORCE64 (1 << (INAT_FLAG_OFFS + 1))
+#define INAT_SCNDIMM (1 << (INAT_FLAG_OFFS + 2))
+#define INAT_MOFFSET (1 << (INAT_FLAG_OFFS + 3))
+#define INAT_VARIANT (1 << (INAT_FLAG_OFFS + 4))
+#define INAT_VEXOK (1 << (INAT_FLAG_OFFS + 5))
+#define INAT_VEXONLY (1 << (INAT_FLAG_OFFS + 6))
+#define INAT_EVEXONLY (1 << (INAT_FLAG_OFFS + 7))
+/* Attribute making macros for attribute tables */
+#define INAT_MAKE_PREFIX(pfx) (pfx << INAT_PFX_OFFS)
+#define INAT_MAKE_ESCAPE(esc) (esc << INAT_ESC_OFFS)
+#define INAT_MAKE_GROUP(grp) ((grp << INAT_GRP_OFFS) | INAT_MODRM)
+#define INAT_MAKE_IMM(imm) (imm << INAT_IMM_OFFS)
+
+/* Identifiers for segment registers */
+#define INAT_SEG_REG_IGNORE 0
+#define INAT_SEG_REG_DEFAULT 1
+#define INAT_SEG_REG_CS 2
+#define INAT_SEG_REG_SS 3
+#define INAT_SEG_REG_DS 4
+#define INAT_SEG_REG_ES 5
+#define INAT_SEG_REG_FS 6
+#define INAT_SEG_REG_GS 7
+
+/* Attribute search APIs */
+extern insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode);
+extern int inat_get_last_prefix_id(insn_byte_t last_pfx);
+extern insn_attr_t inat_get_escape_attribute(insn_byte_t opcode,
+ int lpfx_id,
+ insn_attr_t esc_attr);
+extern insn_attr_t inat_get_group_attribute(insn_byte_t modrm,
+ int lpfx_id,
+ insn_attr_t esc_attr);
+extern insn_attr_t inat_get_avx_attribute(insn_byte_t opcode,
+ insn_byte_t vex_m,
+ insn_byte_t vex_pp);
+
+/* Attribute checking functions */
+static inline int inat_is_legacy_prefix(insn_attr_t attr)
+{
+ attr &= INAT_PFX_MASK;
+ return attr && attr <= INAT_LGCPFX_MAX;
+}
+
+static inline int inat_is_address_size_prefix(insn_attr_t attr)
+{
+ return (attr & INAT_PFX_MASK) == INAT_PFX_ADDRSZ;
+}
+
+static inline int inat_is_operand_size_prefix(insn_attr_t attr)
+{
+ return (attr & INAT_PFX_MASK) == INAT_PFX_OPNDSZ;
+}
+
+static inline int inat_is_rex_prefix(insn_attr_t attr)
+{
+ return (attr & INAT_PFX_MASK) == INAT_PFX_REX;
+}
+
+static inline int inat_last_prefix_id(insn_attr_t attr)
+{
+ if ((attr & INAT_PFX_MASK) > INAT_LSTPFX_MAX)
+ return 0;
+ else
+ return attr & INAT_PFX_MASK;
+}
+
+static inline int inat_is_vex_prefix(insn_attr_t attr)
+{
+ attr &= INAT_PFX_MASK;
+ return attr == INAT_PFX_VEX2 || attr == INAT_PFX_VEX3 ||
+ attr == INAT_PFX_EVEX;
+}
+
+static inline int inat_is_evex_prefix(insn_attr_t attr)
+{
+ return (attr & INAT_PFX_MASK) == INAT_PFX_EVEX;
+}
+
+static inline int inat_is_vex3_prefix(insn_attr_t attr)
+{
+ return (attr & INAT_PFX_MASK) == INAT_PFX_VEX3;
+}
+
+static inline int inat_is_escape(insn_attr_t attr)
+{
+ return attr & INAT_ESC_MASK;
+}
+
+static inline int inat_escape_id(insn_attr_t attr)
+{
+ return (attr & INAT_ESC_MASK) >> INAT_ESC_OFFS;
+}
+
+static inline int inat_is_group(insn_attr_t attr)
+{
+ return attr & INAT_GRP_MASK;
+}
+
+static inline int inat_group_id(insn_attr_t attr)
+{
+ return (attr & INAT_GRP_MASK) >> INAT_GRP_OFFS;
+}
+
+static inline int inat_group_common_attribute(insn_attr_t attr)
+{
+ return attr & ~INAT_GRP_MASK;
+}
+
+static inline int inat_has_immediate(insn_attr_t attr)
+{
+ return attr & INAT_IMM_MASK;
+}
+
+static inline int inat_immediate_size(insn_attr_t attr)
+{
+ return (attr & INAT_IMM_MASK) >> INAT_IMM_OFFS;
+}
+
+static inline int inat_has_modrm(insn_attr_t attr)
+{
+ return attr & INAT_MODRM;
+}
+
+static inline int inat_is_force64(insn_attr_t attr)
+{
+ return attr & INAT_FORCE64;
+}
+
+static inline int inat_has_second_immediate(insn_attr_t attr)
+{
+ return attr & INAT_SCNDIMM;
+}
+
+static inline int inat_has_moffset(insn_attr_t attr)
+{
+ return attr & INAT_MOFFSET;
+}
+
+static inline int inat_has_variant(insn_attr_t attr)
+{
+ return attr & INAT_VARIANT;
+}
+
+static inline int inat_accept_vex(insn_attr_t attr)
+{
+ return attr & INAT_VEXOK;
+}
+
+static inline int inat_must_vex(insn_attr_t attr)
+{
+ return attr & (INAT_VEXONLY | INAT_EVEXONLY);
+}
+
+static inline int inat_must_evex(insn_attr_t attr)
+{
+ return attr & INAT_EVEXONLY;
+}
+#endif
--- /dev/null
+#ifndef _ASM_X86_INAT_TYPES_H
+#define _ASM_X86_INAT_TYPES_H
+/*
+ * x86 instruction attributes
+ *
+ * Written by Masami Hiramatsu <mhiramat@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ */
+
+/* Instruction attributes */
+typedef unsigned int insn_attr_t;
+typedef unsigned char insn_byte_t;
+typedef signed int insn_value_t;
+
+#endif
--- /dev/null
+#ifndef _ASM_X86_INSN_H
+#define _ASM_X86_INSN_H
+/*
+ * x86 instruction analysis
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2009
+ */
+
+/* insn_attr_t is defined in inat.h */
+#include <asm/inat.h>
+
+struct insn_field {
+ union {
+ insn_value_t value;
+ insn_byte_t bytes[4];
+ };
+ /* !0 if we've run insn_get_xxx() for this field */
+ unsigned char got;
+ unsigned char nbytes;
+};
+
+struct insn {
+ struct insn_field prefixes; /*
+ * Prefixes
+ * prefixes.bytes[3]: last prefix
+ */
+ struct insn_field rex_prefix; /* REX prefix */
+ struct insn_field vex_prefix; /* VEX prefix */
+ struct insn_field opcode; /*
+ * opcode.bytes[0]: opcode1
+ * opcode.bytes[1]: opcode2
+ * opcode.bytes[2]: opcode3
+ */
+ struct insn_field modrm;
+ struct insn_field sib;
+ struct insn_field displacement;
+ union {
+ struct insn_field immediate;
+ struct insn_field moffset1; /* for 64bit MOV */
+ struct insn_field immediate1; /* for 64bit imm or off16/32 */
+ };
+ union {
+ struct insn_field moffset2; /* for 64bit MOV */
+ struct insn_field immediate2; /* for 64bit imm or seg16 */
+ };
+
+ insn_attr_t attr;
+ unsigned char opnd_bytes;
+ unsigned char addr_bytes;
+ unsigned char length;
+ unsigned char x86_64;
+
+ const insn_byte_t *kaddr; /* kernel address of insn to analyze */
+ const insn_byte_t *end_kaddr; /* kernel address of last insn in buffer */
+ const insn_byte_t *next_byte;
+};
+
+#define MAX_INSN_SIZE 15
+
+#define X86_MODRM_MOD(modrm) (((modrm) & 0xc0) >> 6)
+#define X86_MODRM_REG(modrm) (((modrm) & 0x38) >> 3)
+#define X86_MODRM_RM(modrm) ((modrm) & 0x07)
+
+#define X86_SIB_SCALE(sib) (((sib) & 0xc0) >> 6)
+#define X86_SIB_INDEX(sib) (((sib) & 0x38) >> 3)
+#define X86_SIB_BASE(sib) ((sib) & 0x07)
+
+#define X86_REX_W(rex) ((rex) & 8)
+#define X86_REX_R(rex) ((rex) & 4)
+#define X86_REX_X(rex) ((rex) & 2)
+#define X86_REX_B(rex) ((rex) & 1)
+
+/* VEX bit flags */
+#define X86_VEX_W(vex) ((vex) & 0x80) /* VEX3 Byte2 */
+#define X86_VEX_R(vex) ((vex) & 0x80) /* VEX2/3 Byte1 */
+#define X86_VEX_X(vex) ((vex) & 0x40) /* VEX3 Byte1 */
+#define X86_VEX_B(vex) ((vex) & 0x20) /* VEX3 Byte1 */
+#define X86_VEX_L(vex) ((vex) & 0x04) /* VEX3 Byte2, VEX2 Byte1 */
+/* VEX bit fields */
+#define X86_EVEX_M(vex) ((vex) & 0x03) /* EVEX Byte1 */
+#define X86_VEX3_M(vex) ((vex) & 0x1f) /* VEX3 Byte1 */
+#define X86_VEX2_M 1 /* VEX2.M always 1 */
+#define X86_VEX_V(vex) (((vex) & 0x78) >> 3) /* VEX3 Byte2, VEX2 Byte1 */
+#define X86_VEX_P(vex) ((vex) & 0x03) /* VEX3 Byte2, VEX2 Byte1 */
+#define X86_VEX_M_MAX 0x1f /* VEX3.M Maximum value */
+
+extern void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64);
+extern void insn_get_prefixes(struct insn *insn);
+extern void insn_get_opcode(struct insn *insn);
+extern void insn_get_modrm(struct insn *insn);
+extern void insn_get_sib(struct insn *insn);
+extern void insn_get_displacement(struct insn *insn);
+extern void insn_get_immediate(struct insn *insn);
+extern void insn_get_length(struct insn *insn);
+
+/* Attribute will be determined after getting ModRM (for opcode groups) */
+static inline void insn_get_attribute(struct insn *insn)
+{
+ insn_get_modrm(insn);
+}
+
+/* Instruction uses RIP-relative addressing */
+extern int insn_rip_relative(struct insn *insn);
+
+/* Init insn for kernel text */
+static inline void kernel_insn_init(struct insn *insn,
+ const void *kaddr, int buf_len)
+{
+#ifdef CONFIG_X86_64
+ insn_init(insn, kaddr, buf_len, 1);
+#else /* CONFIG_X86_32 */
+ insn_init(insn, kaddr, buf_len, 0);
+#endif
+}
+
+static inline int insn_is_avx(struct insn *insn)
+{
+ if (!insn->prefixes.got)
+ insn_get_prefixes(insn);
+ return (insn->vex_prefix.value != 0);
+}
+
+static inline int insn_is_evex(struct insn *insn)
+{
+ if (!insn->prefixes.got)
+ insn_get_prefixes(insn);
+ return (insn->vex_prefix.nbytes == 4);
+}
+
+/* Ensure this instruction is decoded completely */
+static inline int insn_complete(struct insn *insn)
+{
+ return insn->opcode.got && insn->modrm.got && insn->sib.got &&
+ insn->displacement.got && insn->immediate.got;
+}
+
+static inline insn_byte_t insn_vex_m_bits(struct insn *insn)
+{
+ if (insn->vex_prefix.nbytes == 2) /* 2 bytes VEX */
+ return X86_VEX2_M;
+ else if (insn->vex_prefix.nbytes == 3) /* 3 bytes VEX */
+ return X86_VEX3_M(insn->vex_prefix.bytes[1]);
+ else /* EVEX */
+ return X86_EVEX_M(insn->vex_prefix.bytes[1]);
+}
+
+static inline insn_byte_t insn_vex_p_bits(struct insn *insn)
+{
+ if (insn->vex_prefix.nbytes == 2) /* 2 bytes VEX */
+ return X86_VEX_P(insn->vex_prefix.bytes[1]);
+ else
+ return X86_VEX_P(insn->vex_prefix.bytes[2]);
+}
+
+/* Get the last prefix id from last prefix or VEX prefix */
+static inline int insn_last_prefix_id(struct insn *insn)
+{
+ if (insn_is_avx(insn))
+ return insn_vex_p_bits(insn); /* VEX_p is a SIMD prefix id */
+
+ if (insn->prefixes.bytes[3])
+ return inat_get_last_prefix_id(insn->prefixes.bytes[3]);
+
+ return 0;
+}
+
+/* Offset of each field from kaddr */
+static inline int insn_offset_rex_prefix(struct insn *insn)
+{
+ return insn->prefixes.nbytes;
+}
+static inline int insn_offset_vex_prefix(struct insn *insn)
+{
+ return insn_offset_rex_prefix(insn) + insn->rex_prefix.nbytes;
+}
+static inline int insn_offset_opcode(struct insn *insn)
+{
+ return insn_offset_vex_prefix(insn) + insn->vex_prefix.nbytes;
+}
+static inline int insn_offset_modrm(struct insn *insn)
+{
+ return insn_offset_opcode(insn) + insn->opcode.nbytes;
+}
+static inline int insn_offset_sib(struct insn *insn)
+{
+ return insn_offset_modrm(insn) + insn->modrm.nbytes;
+}
+static inline int insn_offset_displacement(struct insn *insn)
+{
+ return insn_offset_sib(insn) + insn->sib.nbytes;
+}
+static inline int insn_offset_immediate(struct insn *insn)
+{
+ return insn_offset_displacement(insn) + insn->displacement.nbytes;
+}
+
+#endif /* _ASM_X86_INSN_H */
--- /dev/null
+/*
+ * Copyright (C) 2017 Josh Poimboeuf <jpoimboe@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _ORC_TYPES_H
+#define _ORC_TYPES_H
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+
+/*
+ * The ORC_REG_* registers are base registers which are used to find other
+ * registers on the stack.
+ *
+ * ORC_REG_PREV_SP, also known as DWARF Call Frame Address (CFA), is the
+ * address of the previous frame: the caller's SP before it called the current
+ * function.
+ *
+ * ORC_REG_UNDEFINED means the corresponding register's value didn't change in
+ * the current frame.
+ *
+ * The most commonly used base registers are SP and BP -- which the previous SP
+ * is usually based on -- and PREV_SP and UNDEFINED -- which the previous BP is
+ * usually based on.
+ *
+ * The rest of the base registers are needed for special cases like entry code
+ * and GCC realigned stacks.
+ */
+#define ORC_REG_UNDEFINED 0
+#define ORC_REG_PREV_SP 1
+#define ORC_REG_DX 2
+#define ORC_REG_DI 3
+#define ORC_REG_BP 4
+#define ORC_REG_SP 5
+#define ORC_REG_R10 6
+#define ORC_REG_R13 7
+#define ORC_REG_BP_INDIRECT 8
+#define ORC_REG_SP_INDIRECT 9
+#define ORC_REG_MAX 15
+
+/*
+ * ORC_TYPE_CALL: Indicates that sp_reg+sp_offset resolves to PREV_SP (the
+ * caller's SP right before it made the call). Used for all callable
+ * functions, i.e. all C code and all callable asm functions.
+ *
+ * ORC_TYPE_REGS: Used in entry code to indicate that sp_reg+sp_offset points
+ * to a fully populated pt_regs from a syscall, interrupt, or exception.
+ *
+ * ORC_TYPE_REGS_IRET: Used in entry code to indicate that sp_reg+sp_offset
+ * points to the iret return frame.
+ *
+ * The UNWIND_HINT macros are used only for the unwind_hint struct. They
+ * aren't used in struct orc_entry due to size and complexity constraints.
+ * Objtool converts them to real types when it converts the hints to orc
+ * entries.
+ */
+#define ORC_TYPE_CALL 0
+#define ORC_TYPE_REGS 1
+#define ORC_TYPE_REGS_IRET 2
+#define UNWIND_HINT_TYPE_SAVE 3
+#define UNWIND_HINT_TYPE_RESTORE 4
+
+#ifndef __ASSEMBLY__
+/*
+ * This struct is more or less a vastly simplified version of the DWARF Call
+ * Frame Information standard. It contains only the necessary parts of DWARF
+ * CFI, simplified for ease of access by the in-kernel unwinder. It tells the
+ * unwinder how to find the previous SP and BP (and sometimes entry regs) on
+ * the stack for a given code address. Each instance of the struct corresponds
+ * to one or more code locations.
+ */
+struct orc_entry {
+ s16 sp_offset;
+ s16 bp_offset;
+ unsigned sp_reg:4;
+ unsigned bp_reg:4;
+ unsigned type:2;
+} __packed;
+
+/*
+ * This struct is used by asm and inline asm code to manually annotate the
+ * location of registers on the stack for the ORC unwinder.
+ *
+ * Type can be either ORC_TYPE_* or UNWIND_HINT_TYPE_*.
+ */
+struct unwind_hint {
+ u32 ip;
+ s16 sp_offset;
+ u8 sp_reg;
+ u8 type;
+};
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ORC_TYPES_H */
+++ /dev/null
-#!/bin/awk -f
-# SPDX-License-Identifier: GPL-2.0
-# gen-insn-attr-x86.awk: Instruction attribute table generator
-# Written by Masami Hiramatsu <mhiramat@redhat.com>
-#
-# Usage: awk -f gen-insn-attr-x86.awk x86-opcode-map.txt > inat-tables.c
-
-# Awk implementation sanity check
-function check_awk_implement() {
- if (sprintf("%x", 0) != "0")
- return "Your awk has a printf-format problem."
- return ""
-}
-
-# Clear working vars
-function clear_vars() {
- delete table
- delete lptable2
- delete lptable1
- delete lptable3
- eid = -1 # escape id
- gid = -1 # group id
- aid = -1 # AVX id
- tname = ""
-}
-
-BEGIN {
- # Implementation error checking
- awkchecked = check_awk_implement()
- if (awkchecked != "") {
- print "Error: " awkchecked > "/dev/stderr"
- print "Please try to use gawk." > "/dev/stderr"
- exit 1
- }
-
- # Setup generating tables
- print "/* x86 opcode map generated from x86-opcode-map.txt */"
- print "/* Do not change this code. */\n"
- ggid = 1
- geid = 1
- gaid = 0
- delete etable
- delete gtable
- delete atable
-
- opnd_expr = "^[A-Za-z/]"
- ext_expr = "^\\("
- sep_expr = "^\\|$"
- group_expr = "^Grp[0-9A-Za-z]+"
-
- imm_expr = "^[IJAOL][a-z]"
- imm_flag["Ib"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)"
- imm_flag["Jb"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)"
- imm_flag["Iw"] = "INAT_MAKE_IMM(INAT_IMM_WORD)"
- imm_flag["Id"] = "INAT_MAKE_IMM(INAT_IMM_DWORD)"
- imm_flag["Iq"] = "INAT_MAKE_IMM(INAT_IMM_QWORD)"
- imm_flag["Ap"] = "INAT_MAKE_IMM(INAT_IMM_PTR)"
- imm_flag["Iz"] = "INAT_MAKE_IMM(INAT_IMM_VWORD32)"
- imm_flag["Jz"] = "INAT_MAKE_IMM(INAT_IMM_VWORD32)"
- imm_flag["Iv"] = "INAT_MAKE_IMM(INAT_IMM_VWORD)"
- imm_flag["Ob"] = "INAT_MOFFSET"
- imm_flag["Ov"] = "INAT_MOFFSET"
- imm_flag["Lx"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)"
-
- modrm_expr = "^([CDEGMNPQRSUVW/][a-z]+|NTA|T[012])"
- force64_expr = "\\([df]64\\)"
- rex_expr = "^REX(\\.[XRWB]+)*"
- fpu_expr = "^ESC" # TODO
-
- lprefix1_expr = "\\((66|!F3)\\)"
- lprefix2_expr = "\\(F3\\)"
- lprefix3_expr = "\\((F2|!F3|66\\&F2)\\)"
- lprefix_expr = "\\((66|F2|F3)\\)"
- max_lprefix = 4
-
- # All opcodes starting with lower-case 'v', 'k' or with (v1) superscript
- # accepts VEX prefix
- vexok_opcode_expr = "^[vk].*"
- vexok_expr = "\\(v1\\)"
- # All opcodes with (v) superscript supports *only* VEX prefix
- vexonly_expr = "\\(v\\)"
- # All opcodes with (ev) superscript supports *only* EVEX prefix
- evexonly_expr = "\\(ev\\)"
-
- prefix_expr = "\\(Prefix\\)"
- prefix_num["Operand-Size"] = "INAT_PFX_OPNDSZ"
- prefix_num["REPNE"] = "INAT_PFX_REPNE"
- prefix_num["REP/REPE"] = "INAT_PFX_REPE"
- prefix_num["XACQUIRE"] = "INAT_PFX_REPNE"
- prefix_num["XRELEASE"] = "INAT_PFX_REPE"
- prefix_num["LOCK"] = "INAT_PFX_LOCK"
- prefix_num["SEG=CS"] = "INAT_PFX_CS"
- prefix_num["SEG=DS"] = "INAT_PFX_DS"
- prefix_num["SEG=ES"] = "INAT_PFX_ES"
- prefix_num["SEG=FS"] = "INAT_PFX_FS"
- prefix_num["SEG=GS"] = "INAT_PFX_GS"
- prefix_num["SEG=SS"] = "INAT_PFX_SS"
- prefix_num["Address-Size"] = "INAT_PFX_ADDRSZ"
- prefix_num["VEX+1byte"] = "INAT_PFX_VEX2"
- prefix_num["VEX+2byte"] = "INAT_PFX_VEX3"
- prefix_num["EVEX"] = "INAT_PFX_EVEX"
-
- clear_vars()
-}
-
-function semantic_error(msg) {
- print "Semantic error at " NR ": " msg > "/dev/stderr"
- exit 1
-}
-
-function debug(msg) {
- print "DEBUG: " msg
-}
-
-function array_size(arr, i,c) {
- c = 0
- for (i in arr)
- c++
- return c
-}
-
-/^Table:/ {
- print "/* " $0 " */"
- if (tname != "")
- semantic_error("Hit Table: before EndTable:.");
-}
-
-/^Referrer:/ {
- if (NF != 1) {
- # escape opcode table
- ref = ""
- for (i = 2; i <= NF; i++)
- ref = ref $i
- eid = escape[ref]
- tname = sprintf("inat_escape_table_%d", eid)
- }
-}
-
-/^AVXcode:/ {
- if (NF != 1) {
- # AVX/escape opcode table
- aid = $2
- if (gaid <= aid)
- gaid = aid + 1
- if (tname == "") # AVX only opcode table
- tname = sprintf("inat_avx_table_%d", $2)
- }
- if (aid == -1 && eid == -1) # primary opcode table
- tname = "inat_primary_table"
-}
-
-/^GrpTable:/ {
- print "/* " $0 " */"
- if (!($2 in group))
- semantic_error("No group: " $2 )
- gid = group[$2]
- tname = "inat_group_table_" gid
-}
-
-function print_table(tbl,name,fmt,n)
-{
- print "const insn_attr_t " name " = {"
- for (i = 0; i < n; i++) {
- id = sprintf(fmt, i)
- if (tbl[id])
- print " [" id "] = " tbl[id] ","
- }
- print "};"
-}
-
-/^EndTable/ {
- if (gid != -1) {
- # print group tables
- if (array_size(table) != 0) {
- print_table(table, tname "[INAT_GROUP_TABLE_SIZE]",
- "0x%x", 8)
- gtable[gid,0] = tname
- }
- if (array_size(lptable1) != 0) {
- print_table(lptable1, tname "_1[INAT_GROUP_TABLE_SIZE]",
- "0x%x", 8)
- gtable[gid,1] = tname "_1"
- }
- if (array_size(lptable2) != 0) {
- print_table(lptable2, tname "_2[INAT_GROUP_TABLE_SIZE]",
- "0x%x", 8)
- gtable[gid,2] = tname "_2"
- }
- if (array_size(lptable3) != 0) {
- print_table(lptable3, tname "_3[INAT_GROUP_TABLE_SIZE]",
- "0x%x", 8)
- gtable[gid,3] = tname "_3"
- }
- } else {
- # print primary/escaped tables
- if (array_size(table) != 0) {
- print_table(table, tname "[INAT_OPCODE_TABLE_SIZE]",
- "0x%02x", 256)
- etable[eid,0] = tname
- if (aid >= 0)
- atable[aid,0] = tname
- }
- if (array_size(lptable1) != 0) {
- print_table(lptable1,tname "_1[INAT_OPCODE_TABLE_SIZE]",
- "0x%02x", 256)
- etable[eid,1] = tname "_1"
- if (aid >= 0)
- atable[aid,1] = tname "_1"
- }
- if (array_size(lptable2) != 0) {
- print_table(lptable2,tname "_2[INAT_OPCODE_TABLE_SIZE]",
- "0x%02x", 256)
- etable[eid,2] = tname "_2"
- if (aid >= 0)
- atable[aid,2] = tname "_2"
- }
- if (array_size(lptable3) != 0) {
- print_table(lptable3,tname "_3[INAT_OPCODE_TABLE_SIZE]",
- "0x%02x", 256)
- etable[eid,3] = tname "_3"
- if (aid >= 0)
- atable[aid,3] = tname "_3"
- }
- }
- print ""
- clear_vars()
-}
-
-function add_flags(old,new) {
- if (old && new)
- return old " | " new
- else if (old)
- return old
- else
- return new
-}
-
-# convert operands to flags.
-function convert_operands(count,opnd, i,j,imm,mod)
-{
- imm = null
- mod = null
- for (j = 1; j <= count; j++) {
- i = opnd[j]
- if (match(i, imm_expr) == 1) {
- if (!imm_flag[i])
- semantic_error("Unknown imm opnd: " i)
- if (imm) {
- if (i != "Ib")
- semantic_error("Second IMM error")
- imm = add_flags(imm, "INAT_SCNDIMM")
- } else
- imm = imm_flag[i]
- } else if (match(i, modrm_expr))
- mod = "INAT_MODRM"
- }
- return add_flags(imm, mod)
-}
-
-/^[0-9a-f]+\:/ {
- if (NR == 1)
- next
- # get index
- idx = "0x" substr($1, 1, index($1,":") - 1)
- if (idx in table)
- semantic_error("Redefine " idx " in " tname)
-
- # check if escaped opcode
- if ("escape" == $2) {
- if ($3 != "#")
- semantic_error("No escaped name")
- ref = ""
- for (i = 4; i <= NF; i++)
- ref = ref $i
- if (ref in escape)
- semantic_error("Redefine escape (" ref ")")
- escape[ref] = geid
- geid++
- table[idx] = "INAT_MAKE_ESCAPE(" escape[ref] ")"
- next
- }
-
- variant = null
- # converts
- i = 2
- while (i <= NF) {
- opcode = $(i++)
- delete opnds
- ext = null
- flags = null
- opnd = null
- # parse one opcode
- if (match($i, opnd_expr)) {
- opnd = $i
- count = split($(i++), opnds, ",")
- flags = convert_operands(count, opnds)
- }
- if (match($i, ext_expr))
- ext = $(i++)
- if (match($i, sep_expr))
- i++
- else if (i < NF)
- semantic_error($i " is not a separator")
-
- # check if group opcode
- if (match(opcode, group_expr)) {
- if (!(opcode in group)) {
- group[opcode] = ggid
- ggid++
- }
- flags = add_flags(flags, "INAT_MAKE_GROUP(" group[opcode] ")")
- }
- # check force(or default) 64bit
- if (match(ext, force64_expr))
- flags = add_flags(flags, "INAT_FORCE64")
-
- # check REX prefix
- if (match(opcode, rex_expr))
- flags = add_flags(flags, "INAT_MAKE_PREFIX(INAT_PFX_REX)")
-
- # check coprocessor escape : TODO
- if (match(opcode, fpu_expr))
- flags = add_flags(flags, "INAT_MODRM")
-
- # check VEX codes
- if (match(ext, evexonly_expr))
- flags = add_flags(flags, "INAT_VEXOK | INAT_EVEXONLY")
- else if (match(ext, vexonly_expr))
- flags = add_flags(flags, "INAT_VEXOK | INAT_VEXONLY")
- else if (match(ext, vexok_expr) || match(opcode, vexok_opcode_expr))
- flags = add_flags(flags, "INAT_VEXOK")
-
- # check prefixes
- if (match(ext, prefix_expr)) {
- if (!prefix_num[opcode])
- semantic_error("Unknown prefix: " opcode)
- flags = add_flags(flags, "INAT_MAKE_PREFIX(" prefix_num[opcode] ")")
- }
- if (length(flags) == 0)
- continue
- # check if last prefix
- if (match(ext, lprefix1_expr)) {
- lptable1[idx] = add_flags(lptable1[idx],flags)
- variant = "INAT_VARIANT"
- }
- if (match(ext, lprefix2_expr)) {
- lptable2[idx] = add_flags(lptable2[idx],flags)
- variant = "INAT_VARIANT"
- }
- if (match(ext, lprefix3_expr)) {
- lptable3[idx] = add_flags(lptable3[idx],flags)
- variant = "INAT_VARIANT"
- }
- if (!match(ext, lprefix_expr)){
- table[idx] = add_flags(table[idx],flags)
- }
- }
- if (variant)
- table[idx] = add_flags(table[idx],variant)
-}
-
-END {
- if (awkchecked != "")
- exit 1
- # print escape opcode map's array
- print "/* Escape opcode map array */"
- print "const insn_attr_t * const inat_escape_tables[INAT_ESC_MAX + 1]" \
- "[INAT_LSTPFX_MAX + 1] = {"
- for (i = 0; i < geid; i++)
- for (j = 0; j < max_lprefix; j++)
- if (etable[i,j])
- print " ["i"]["j"] = "etable[i,j]","
- print "};\n"
- # print group opcode map's array
- print "/* Group opcode map array */"
- print "const insn_attr_t * const inat_group_tables[INAT_GRP_MAX + 1]"\
- "[INAT_LSTPFX_MAX + 1] = {"
- for (i = 0; i < ggid; i++)
- for (j = 0; j < max_lprefix; j++)
- if (gtable[i,j])
- print " ["i"]["j"] = "gtable[i,j]","
- print "};\n"
- # print AVX opcode map's array
- print "/* AVX opcode map array */"
- print "const insn_attr_t * const inat_avx_tables[X86_VEX_M_MAX + 1]"\
- "[INAT_LSTPFX_MAX + 1] = {"
- for (i = 0; i < gaid; i++)
- for (j = 0; j < max_lprefix; j++)
- if (atable[i,j])
- print " ["i"]["j"] = "atable[i,j]","
- print "};"
-}
-
+++ /dev/null
-/*
- * x86 instruction attribute tables
- *
- * Written by Masami Hiramatsu <mhiramat@redhat.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- */
-#include "insn.h"
-
-/* Attribute tables are generated from opcode map */
-#include "inat-tables.c"
-
-/* Attribute search APIs */
-insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode)
-{
- return inat_primary_table[opcode];
-}
-
-int inat_get_last_prefix_id(insn_byte_t last_pfx)
-{
- insn_attr_t lpfx_attr;
-
- lpfx_attr = inat_get_opcode_attribute(last_pfx);
- return inat_last_prefix_id(lpfx_attr);
-}
-
-insn_attr_t inat_get_escape_attribute(insn_byte_t opcode, int lpfx_id,
- insn_attr_t esc_attr)
-{
- const insn_attr_t *table;
- int n;
-
- n = inat_escape_id(esc_attr);
-
- table = inat_escape_tables[n][0];
- if (!table)
- return 0;
- if (inat_has_variant(table[opcode]) && lpfx_id) {
- table = inat_escape_tables[n][lpfx_id];
- if (!table)
- return 0;
- }
- return table[opcode];
-}
-
-insn_attr_t inat_get_group_attribute(insn_byte_t modrm, int lpfx_id,
- insn_attr_t grp_attr)
-{
- const insn_attr_t *table;
- int n;
-
- n = inat_group_id(grp_attr);
-
- table = inat_group_tables[n][0];
- if (!table)
- return inat_group_common_attribute(grp_attr);
- if (inat_has_variant(table[X86_MODRM_REG(modrm)]) && lpfx_id) {
- table = inat_group_tables[n][lpfx_id];
- if (!table)
- return inat_group_common_attribute(grp_attr);
- }
- return table[X86_MODRM_REG(modrm)] |
- inat_group_common_attribute(grp_attr);
-}
-
-insn_attr_t inat_get_avx_attribute(insn_byte_t opcode, insn_byte_t vex_m,
- insn_byte_t vex_p)
-{
- const insn_attr_t *table;
- if (vex_m > X86_VEX_M_MAX || vex_p > INAT_LSTPFX_MAX)
- return 0;
- /* At first, this checks the master table */
- table = inat_avx_tables[vex_m][0];
- if (!table)
- return 0;
- if (!inat_is_group(table[opcode]) && vex_p) {
- /* If this is not a group, get attribute directly */
- table = inat_avx_tables[vex_m][vex_p];
- if (!table)
- return 0;
- }
- return table[opcode];
-}
-
+++ /dev/null
-#ifndef _ASM_X86_INAT_H
-#define _ASM_X86_INAT_H
-/*
- * x86 instruction attributes
- *
- * Written by Masami Hiramatsu <mhiramat@redhat.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- */
-#include "inat_types.h"
-
-/*
- * Internal bits. Don't use bitmasks directly, because these bits are
- * unstable. You should use checking functions.
- */
-
-#define INAT_OPCODE_TABLE_SIZE 256
-#define INAT_GROUP_TABLE_SIZE 8
-
-/* Legacy last prefixes */
-#define INAT_PFX_OPNDSZ 1 /* 0x66 */ /* LPFX1 */
-#define INAT_PFX_REPE 2 /* 0xF3 */ /* LPFX2 */
-#define INAT_PFX_REPNE 3 /* 0xF2 */ /* LPFX3 */
-/* Other Legacy prefixes */
-#define INAT_PFX_LOCK 4 /* 0xF0 */
-#define INAT_PFX_CS 5 /* 0x2E */
-#define INAT_PFX_DS 6 /* 0x3E */
-#define INAT_PFX_ES 7 /* 0x26 */
-#define INAT_PFX_FS 8 /* 0x64 */
-#define INAT_PFX_GS 9 /* 0x65 */
-#define INAT_PFX_SS 10 /* 0x36 */
-#define INAT_PFX_ADDRSZ 11 /* 0x67 */
-/* x86-64 REX prefix */
-#define INAT_PFX_REX 12 /* 0x4X */
-/* AVX VEX prefixes */
-#define INAT_PFX_VEX2 13 /* 2-bytes VEX prefix */
-#define INAT_PFX_VEX3 14 /* 3-bytes VEX prefix */
-#define INAT_PFX_EVEX 15 /* EVEX prefix */
-
-#define INAT_LSTPFX_MAX 3
-#define INAT_LGCPFX_MAX 11
-
-/* Immediate size */
-#define INAT_IMM_BYTE 1
-#define INAT_IMM_WORD 2
-#define INAT_IMM_DWORD 3
-#define INAT_IMM_QWORD 4
-#define INAT_IMM_PTR 5
-#define INAT_IMM_VWORD32 6
-#define INAT_IMM_VWORD 7
-
-/* Legacy prefix */
-#define INAT_PFX_OFFS 0
-#define INAT_PFX_BITS 4
-#define INAT_PFX_MAX ((1 << INAT_PFX_BITS) - 1)
-#define INAT_PFX_MASK (INAT_PFX_MAX << INAT_PFX_OFFS)
-/* Escape opcodes */
-#define INAT_ESC_OFFS (INAT_PFX_OFFS + INAT_PFX_BITS)
-#define INAT_ESC_BITS 2
-#define INAT_ESC_MAX ((1 << INAT_ESC_BITS) - 1)
-#define INAT_ESC_MASK (INAT_ESC_MAX << INAT_ESC_OFFS)
-/* Group opcodes (1-16) */
-#define INAT_GRP_OFFS (INAT_ESC_OFFS + INAT_ESC_BITS)
-#define INAT_GRP_BITS 5
-#define INAT_GRP_MAX ((1 << INAT_GRP_BITS) - 1)
-#define INAT_GRP_MASK (INAT_GRP_MAX << INAT_GRP_OFFS)
-/* Immediates */
-#define INAT_IMM_OFFS (INAT_GRP_OFFS + INAT_GRP_BITS)
-#define INAT_IMM_BITS 3
-#define INAT_IMM_MASK (((1 << INAT_IMM_BITS) - 1) << INAT_IMM_OFFS)
-/* Flags */
-#define INAT_FLAG_OFFS (INAT_IMM_OFFS + INAT_IMM_BITS)
-#define INAT_MODRM (1 << (INAT_FLAG_OFFS))
-#define INAT_FORCE64 (1 << (INAT_FLAG_OFFS + 1))
-#define INAT_SCNDIMM (1 << (INAT_FLAG_OFFS + 2))
-#define INAT_MOFFSET (1 << (INAT_FLAG_OFFS + 3))
-#define INAT_VARIANT (1 << (INAT_FLAG_OFFS + 4))
-#define INAT_VEXOK (1 << (INAT_FLAG_OFFS + 5))
-#define INAT_VEXONLY (1 << (INAT_FLAG_OFFS + 6))
-#define INAT_EVEXONLY (1 << (INAT_FLAG_OFFS + 7))
-/* Attribute making macros for attribute tables */
-#define INAT_MAKE_PREFIX(pfx) (pfx << INAT_PFX_OFFS)
-#define INAT_MAKE_ESCAPE(esc) (esc << INAT_ESC_OFFS)
-#define INAT_MAKE_GROUP(grp) ((grp << INAT_GRP_OFFS) | INAT_MODRM)
-#define INAT_MAKE_IMM(imm) (imm << INAT_IMM_OFFS)
-
-/* Attribute search APIs */
-extern insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode);
-extern int inat_get_last_prefix_id(insn_byte_t last_pfx);
-extern insn_attr_t inat_get_escape_attribute(insn_byte_t opcode,
- int lpfx_id,
- insn_attr_t esc_attr);
-extern insn_attr_t inat_get_group_attribute(insn_byte_t modrm,
- int lpfx_id,
- insn_attr_t esc_attr);
-extern insn_attr_t inat_get_avx_attribute(insn_byte_t opcode,
- insn_byte_t vex_m,
- insn_byte_t vex_pp);
-
-/* Attribute checking functions */
-static inline int inat_is_legacy_prefix(insn_attr_t attr)
-{
- attr &= INAT_PFX_MASK;
- return attr && attr <= INAT_LGCPFX_MAX;
-}
-
-static inline int inat_is_address_size_prefix(insn_attr_t attr)
-{
- return (attr & INAT_PFX_MASK) == INAT_PFX_ADDRSZ;
-}
-
-static inline int inat_is_operand_size_prefix(insn_attr_t attr)
-{
- return (attr & INAT_PFX_MASK) == INAT_PFX_OPNDSZ;
-}
-
-static inline int inat_is_rex_prefix(insn_attr_t attr)
-{
- return (attr & INAT_PFX_MASK) == INAT_PFX_REX;
-}
-
-static inline int inat_last_prefix_id(insn_attr_t attr)
-{
- if ((attr & INAT_PFX_MASK) > INAT_LSTPFX_MAX)
- return 0;
- else
- return attr & INAT_PFX_MASK;
-}
-
-static inline int inat_is_vex_prefix(insn_attr_t attr)
-{
- attr &= INAT_PFX_MASK;
- return attr == INAT_PFX_VEX2 || attr == INAT_PFX_VEX3 ||
- attr == INAT_PFX_EVEX;
-}
-
-static inline int inat_is_evex_prefix(insn_attr_t attr)
-{
- return (attr & INAT_PFX_MASK) == INAT_PFX_EVEX;
-}
-
-static inline int inat_is_vex3_prefix(insn_attr_t attr)
-{
- return (attr & INAT_PFX_MASK) == INAT_PFX_VEX3;
-}
-
-static inline int inat_is_escape(insn_attr_t attr)
-{
- return attr & INAT_ESC_MASK;
-}
-
-static inline int inat_escape_id(insn_attr_t attr)
-{
- return (attr & INAT_ESC_MASK) >> INAT_ESC_OFFS;
-}
-
-static inline int inat_is_group(insn_attr_t attr)
-{
- return attr & INAT_GRP_MASK;
-}
-
-static inline int inat_group_id(insn_attr_t attr)
-{
- return (attr & INAT_GRP_MASK) >> INAT_GRP_OFFS;
-}
-
-static inline int inat_group_common_attribute(insn_attr_t attr)
-{
- return attr & ~INAT_GRP_MASK;
-}
-
-static inline int inat_has_immediate(insn_attr_t attr)
-{
- return attr & INAT_IMM_MASK;
-}
-
-static inline int inat_immediate_size(insn_attr_t attr)
-{
- return (attr & INAT_IMM_MASK) >> INAT_IMM_OFFS;
-}
-
-static inline int inat_has_modrm(insn_attr_t attr)
-{
- return attr & INAT_MODRM;
-}
-
-static inline int inat_is_force64(insn_attr_t attr)
-{
- return attr & INAT_FORCE64;
-}
-
-static inline int inat_has_second_immediate(insn_attr_t attr)
-{
- return attr & INAT_SCNDIMM;
-}
-
-static inline int inat_has_moffset(insn_attr_t attr)
-{
- return attr & INAT_MOFFSET;
-}
-
-static inline int inat_has_variant(insn_attr_t attr)
-{
- return attr & INAT_VARIANT;
-}
-
-static inline int inat_accept_vex(insn_attr_t attr)
-{
- return attr & INAT_VEXOK;
-}
-
-static inline int inat_must_vex(insn_attr_t attr)
-{
- return attr & (INAT_VEXONLY | INAT_EVEXONLY);
-}
-
-static inline int inat_must_evex(insn_attr_t attr)
-{
- return attr & INAT_EVEXONLY;
-}
-#endif
+++ /dev/null
-#ifndef _ASM_X86_INAT_TYPES_H
-#define _ASM_X86_INAT_TYPES_H
-/*
- * x86 instruction attributes
- *
- * Written by Masami Hiramatsu <mhiramat@redhat.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- */
-
-/* Instruction attributes */
-typedef unsigned int insn_attr_t;
-typedef unsigned char insn_byte_t;
-typedef signed int insn_value_t;
-
-#endif
+++ /dev/null
-/*
- * x86 instruction analysis
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * Copyright (C) IBM Corporation, 2002, 2004, 2009
- */
-
-#ifdef __KERNEL__
-#include <linux/string.h>
-#else
-#include <string.h>
-#endif
-#include "inat.h"
-#include "insn.h"
-
-/* Verify next sizeof(t) bytes can be on the same instruction */
-#define validate_next(t, insn, n) \
- ((insn)->next_byte + sizeof(t) + n <= (insn)->end_kaddr)
-
-#define __get_next(t, insn) \
- ({ t r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
-
-#define __peek_nbyte_next(t, insn, n) \
- ({ t r = *(t*)((insn)->next_byte + n); r; })
-
-#define get_next(t, insn) \
- ({ if (unlikely(!validate_next(t, insn, 0))) goto err_out; __get_next(t, insn); })
-
-#define peek_nbyte_next(t, insn, n) \
- ({ if (unlikely(!validate_next(t, insn, n))) goto err_out; __peek_nbyte_next(t, insn, n); })
-
-#define peek_next(t, insn) peek_nbyte_next(t, insn, 0)
-
-/**
- * insn_init() - initialize struct insn
- * @insn: &struct insn to be initialized
- * @kaddr: address (in kernel memory) of instruction (or copy thereof)
- * @x86_64: !0 for 64-bit kernel or 64-bit app
- */
-void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64)
-{
- /*
- * Instructions longer than MAX_INSN_SIZE (15 bytes) are invalid
- * even if the input buffer is long enough to hold them.
- */
- if (buf_len > MAX_INSN_SIZE)
- buf_len = MAX_INSN_SIZE;
-
- memset(insn, 0, sizeof(*insn));
- insn->kaddr = kaddr;
- insn->end_kaddr = kaddr + buf_len;
- insn->next_byte = kaddr;
- insn->x86_64 = x86_64 ? 1 : 0;
- insn->opnd_bytes = 4;
- if (x86_64)
- insn->addr_bytes = 8;
- else
- insn->addr_bytes = 4;
-}
-
-/**
- * insn_get_prefixes - scan x86 instruction prefix bytes
- * @insn: &struct insn containing instruction
- *
- * Populates the @insn->prefixes bitmap, and updates @insn->next_byte
- * to point to the (first) opcode. No effect if @insn->prefixes.got
- * is already set.
- */
-void insn_get_prefixes(struct insn *insn)
-{
- struct insn_field *prefixes = &insn->prefixes;
- insn_attr_t attr;
- insn_byte_t b, lb;
- int i, nb;
-
- if (prefixes->got)
- return;
-
- nb = 0;
- lb = 0;
- b = peek_next(insn_byte_t, insn);
- attr = inat_get_opcode_attribute(b);
- while (inat_is_legacy_prefix(attr)) {
- /* Skip if same prefix */
- for (i = 0; i < nb; i++)
- if (prefixes->bytes[i] == b)
- goto found;
- if (nb == 4)
- /* Invalid instruction */
- break;
- prefixes->bytes[nb++] = b;
- if (inat_is_address_size_prefix(attr)) {
- /* address size switches 2/4 or 4/8 */
- if (insn->x86_64)
- insn->addr_bytes ^= 12;
- else
- insn->addr_bytes ^= 6;
- } else if (inat_is_operand_size_prefix(attr)) {
- /* oprand size switches 2/4 */
- insn->opnd_bytes ^= 6;
- }
-found:
- prefixes->nbytes++;
- insn->next_byte++;
- lb = b;
- b = peek_next(insn_byte_t, insn);
- attr = inat_get_opcode_attribute(b);
- }
- /* Set the last prefix */
- if (lb && lb != insn->prefixes.bytes[3]) {
- if (unlikely(insn->prefixes.bytes[3])) {
- /* Swap the last prefix */
- b = insn->prefixes.bytes[3];
- for (i = 0; i < nb; i++)
- if (prefixes->bytes[i] == lb)
- prefixes->bytes[i] = b;
- }
- insn->prefixes.bytes[3] = lb;
- }
-
- /* Decode REX prefix */
- if (insn->x86_64) {
- b = peek_next(insn_byte_t, insn);
- attr = inat_get_opcode_attribute(b);
- if (inat_is_rex_prefix(attr)) {
- insn->rex_prefix.value = b;
- insn->rex_prefix.nbytes = 1;
- insn->next_byte++;
- if (X86_REX_W(b))
- /* REX.W overrides opnd_size */
- insn->opnd_bytes = 8;
- }
- }
- insn->rex_prefix.got = 1;
-
- /* Decode VEX prefix */
- b = peek_next(insn_byte_t, insn);
- attr = inat_get_opcode_attribute(b);
- if (inat_is_vex_prefix(attr)) {
- insn_byte_t b2 = peek_nbyte_next(insn_byte_t, insn, 1);
- if (!insn->x86_64) {
- /*
- * In 32-bits mode, if the [7:6] bits (mod bits of
- * ModRM) on the second byte are not 11b, it is
- * LDS or LES or BOUND.
- */
- if (X86_MODRM_MOD(b2) != 3)
- goto vex_end;
- }
- insn->vex_prefix.bytes[0] = b;
- insn->vex_prefix.bytes[1] = b2;
- if (inat_is_evex_prefix(attr)) {
- b2 = peek_nbyte_next(insn_byte_t, insn, 2);
- insn->vex_prefix.bytes[2] = b2;
- b2 = peek_nbyte_next(insn_byte_t, insn, 3);
- insn->vex_prefix.bytes[3] = b2;
- insn->vex_prefix.nbytes = 4;
- insn->next_byte += 4;
- if (insn->x86_64 && X86_VEX_W(b2))
- /* VEX.W overrides opnd_size */
- insn->opnd_bytes = 8;
- } else if (inat_is_vex3_prefix(attr)) {
- b2 = peek_nbyte_next(insn_byte_t, insn, 2);
- insn->vex_prefix.bytes[2] = b2;
- insn->vex_prefix.nbytes = 3;
- insn->next_byte += 3;
- if (insn->x86_64 && X86_VEX_W(b2))
- /* VEX.W overrides opnd_size */
- insn->opnd_bytes = 8;
- } else {
- /*
- * For VEX2, fake VEX3-like byte#2.
- * Makes it easier to decode vex.W, vex.vvvv,
- * vex.L and vex.pp. Masking with 0x7f sets vex.W == 0.
- */
- insn->vex_prefix.bytes[2] = b2 & 0x7f;
- insn->vex_prefix.nbytes = 2;
- insn->next_byte += 2;
- }
- }
-vex_end:
- insn->vex_prefix.got = 1;
-
- prefixes->got = 1;
-
-err_out:
- return;
-}
-
-/**
- * insn_get_opcode - collect opcode(s)
- * @insn: &struct insn containing instruction
- *
- * Populates @insn->opcode, updates @insn->next_byte to point past the
- * opcode byte(s), and set @insn->attr (except for groups).
- * If necessary, first collects any preceding (prefix) bytes.
- * Sets @insn->opcode.value = opcode1. No effect if @insn->opcode.got
- * is already 1.
- */
-void insn_get_opcode(struct insn *insn)
-{
- struct insn_field *opcode = &insn->opcode;
- insn_byte_t op;
- int pfx_id;
- if (opcode->got)
- return;
- if (!insn->prefixes.got)
- insn_get_prefixes(insn);
-
- /* Get first opcode */
- op = get_next(insn_byte_t, insn);
- opcode->bytes[0] = op;
- opcode->nbytes = 1;
-
- /* Check if there is VEX prefix or not */
- if (insn_is_avx(insn)) {
- insn_byte_t m, p;
- m = insn_vex_m_bits(insn);
- p = insn_vex_p_bits(insn);
- insn->attr = inat_get_avx_attribute(op, m, p);
- if ((inat_must_evex(insn->attr) && !insn_is_evex(insn)) ||
- (!inat_accept_vex(insn->attr) &&
- !inat_is_group(insn->attr)))
- insn->attr = 0; /* This instruction is bad */
- goto end; /* VEX has only 1 byte for opcode */
- }
-
- insn->attr = inat_get_opcode_attribute(op);
- while (inat_is_escape(insn->attr)) {
- /* Get escaped opcode */
- op = get_next(insn_byte_t, insn);
- opcode->bytes[opcode->nbytes++] = op;
- pfx_id = insn_last_prefix_id(insn);
- insn->attr = inat_get_escape_attribute(op, pfx_id, insn->attr);
- }
- if (inat_must_vex(insn->attr))
- insn->attr = 0; /* This instruction is bad */
-end:
- opcode->got = 1;
-
-err_out:
- return;
-}
-
-/**
- * insn_get_modrm - collect ModRM byte, if any
- * @insn: &struct insn containing instruction
- *
- * Populates @insn->modrm and updates @insn->next_byte to point past the
- * ModRM byte, if any. If necessary, first collects the preceding bytes
- * (prefixes and opcode(s)). No effect if @insn->modrm.got is already 1.
- */
-void insn_get_modrm(struct insn *insn)
-{
- struct insn_field *modrm = &insn->modrm;
- insn_byte_t pfx_id, mod;
- if (modrm->got)
- return;
- if (!insn->opcode.got)
- insn_get_opcode(insn);
-
- if (inat_has_modrm(insn->attr)) {
- mod = get_next(insn_byte_t, insn);
- modrm->value = mod;
- modrm->nbytes = 1;
- if (inat_is_group(insn->attr)) {
- pfx_id = insn_last_prefix_id(insn);
- insn->attr = inat_get_group_attribute(mod, pfx_id,
- insn->attr);
- if (insn_is_avx(insn) && !inat_accept_vex(insn->attr))
- insn->attr = 0; /* This is bad */
- }
- }
-
- if (insn->x86_64 && inat_is_force64(insn->attr))
- insn->opnd_bytes = 8;
- modrm->got = 1;
-
-err_out:
- return;
-}
-
-
-/**
- * insn_rip_relative() - Does instruction use RIP-relative addressing mode?
- * @insn: &struct insn containing instruction
- *
- * If necessary, first collects the instruction up to and including the
- * ModRM byte. No effect if @insn->x86_64 is 0.
- */
-int insn_rip_relative(struct insn *insn)
-{
- struct insn_field *modrm = &insn->modrm;
-
- if (!insn->x86_64)
- return 0;
- if (!modrm->got)
- insn_get_modrm(insn);
- /*
- * For rip-relative instructions, the mod field (top 2 bits)
- * is zero and the r/m field (bottom 3 bits) is 0x5.
- */
- return (modrm->nbytes && (modrm->value & 0xc7) == 0x5);
-}
-
-/**
- * insn_get_sib() - Get the SIB byte of instruction
- * @insn: &struct insn containing instruction
- *
- * If necessary, first collects the instruction up to and including the
- * ModRM byte.
- */
-void insn_get_sib(struct insn *insn)
-{
- insn_byte_t modrm;
-
- if (insn->sib.got)
- return;
- if (!insn->modrm.got)
- insn_get_modrm(insn);
- if (insn->modrm.nbytes) {
- modrm = (insn_byte_t)insn->modrm.value;
- if (insn->addr_bytes != 2 &&
- X86_MODRM_MOD(modrm) != 3 && X86_MODRM_RM(modrm) == 4) {
- insn->sib.value = get_next(insn_byte_t, insn);
- insn->sib.nbytes = 1;
- }
- }
- insn->sib.got = 1;
-
-err_out:
- return;
-}
-
-
-/**
- * insn_get_displacement() - Get the displacement of instruction
- * @insn: &struct insn containing instruction
- *
- * If necessary, first collects the instruction up to and including the
- * SIB byte.
- * Displacement value is sign-expanded.
- */
-void insn_get_displacement(struct insn *insn)
-{
- insn_byte_t mod, rm, base;
-
- if (insn->displacement.got)
- return;
- if (!insn->sib.got)
- insn_get_sib(insn);
- if (insn->modrm.nbytes) {
- /*
- * Interpreting the modrm byte:
- * mod = 00 - no displacement fields (exceptions below)
- * mod = 01 - 1-byte displacement field
- * mod = 10 - displacement field is 4 bytes, or 2 bytes if
- * address size = 2 (0x67 prefix in 32-bit mode)
- * mod = 11 - no memory operand
- *
- * If address size = 2...
- * mod = 00, r/m = 110 - displacement field is 2 bytes
- *
- * If address size != 2...
- * mod != 11, r/m = 100 - SIB byte exists
- * mod = 00, SIB base = 101 - displacement field is 4 bytes
- * mod = 00, r/m = 101 - rip-relative addressing, displacement
- * field is 4 bytes
- */
- mod = X86_MODRM_MOD(insn->modrm.value);
- rm = X86_MODRM_RM(insn->modrm.value);
- base = X86_SIB_BASE(insn->sib.value);
- if (mod == 3)
- goto out;
- if (mod == 1) {
- insn->displacement.value = get_next(signed char, insn);
- insn->displacement.nbytes = 1;
- } else if (insn->addr_bytes == 2) {
- if ((mod == 0 && rm == 6) || mod == 2) {
- insn->displacement.value =
- get_next(short, insn);
- insn->displacement.nbytes = 2;
- }
- } else {
- if ((mod == 0 && rm == 5) || mod == 2 ||
- (mod == 0 && base == 5)) {
- insn->displacement.value = get_next(int, insn);
- insn->displacement.nbytes = 4;
- }
- }
- }
-out:
- insn->displacement.got = 1;
-
-err_out:
- return;
-}
-
-/* Decode moffset16/32/64. Return 0 if failed */
-static int __get_moffset(struct insn *insn)
-{
- switch (insn->addr_bytes) {
- case 2:
- insn->moffset1.value = get_next(short, insn);
- insn->moffset1.nbytes = 2;
- break;
- case 4:
- insn->moffset1.value = get_next(int, insn);
- insn->moffset1.nbytes = 4;
- break;
- case 8:
- insn->moffset1.value = get_next(int, insn);
- insn->moffset1.nbytes = 4;
- insn->moffset2.value = get_next(int, insn);
- insn->moffset2.nbytes = 4;
- break;
- default: /* opnd_bytes must be modified manually */
- goto err_out;
- }
- insn->moffset1.got = insn->moffset2.got = 1;
-
- return 1;
-
-err_out:
- return 0;
-}
-
-/* Decode imm v32(Iz). Return 0 if failed */
-static int __get_immv32(struct insn *insn)
-{
- switch (insn->opnd_bytes) {
- case 2:
- insn->immediate.value = get_next(short, insn);
- insn->immediate.nbytes = 2;
- break;
- case 4:
- case 8:
- insn->immediate.value = get_next(int, insn);
- insn->immediate.nbytes = 4;
- break;
- default: /* opnd_bytes must be modified manually */
- goto err_out;
- }
-
- return 1;
-
-err_out:
- return 0;
-}
-
-/* Decode imm v64(Iv/Ov), Return 0 if failed */
-static int __get_immv(struct insn *insn)
-{
- switch (insn->opnd_bytes) {
- case 2:
- insn->immediate1.value = get_next(short, insn);
- insn->immediate1.nbytes = 2;
- break;
- case 4:
- insn->immediate1.value = get_next(int, insn);
- insn->immediate1.nbytes = 4;
- break;
- case 8:
- insn->immediate1.value = get_next(int, insn);
- insn->immediate1.nbytes = 4;
- insn->immediate2.value = get_next(int, insn);
- insn->immediate2.nbytes = 4;
- break;
- default: /* opnd_bytes must be modified manually */
- goto err_out;
- }
- insn->immediate1.got = insn->immediate2.got = 1;
-
- return 1;
-err_out:
- return 0;
-}
-
-/* Decode ptr16:16/32(Ap) */
-static int __get_immptr(struct insn *insn)
-{
- switch (insn->opnd_bytes) {
- case 2:
- insn->immediate1.value = get_next(short, insn);
- insn->immediate1.nbytes = 2;
- break;
- case 4:
- insn->immediate1.value = get_next(int, insn);
- insn->immediate1.nbytes = 4;
- break;
- case 8:
- /* ptr16:64 is not exist (no segment) */
- return 0;
- default: /* opnd_bytes must be modified manually */
- goto err_out;
- }
- insn->immediate2.value = get_next(unsigned short, insn);
- insn->immediate2.nbytes = 2;
- insn->immediate1.got = insn->immediate2.got = 1;
-
- return 1;
-err_out:
- return 0;
-}
-
-/**
- * insn_get_immediate() - Get the immediates of instruction
- * @insn: &struct insn containing instruction
- *
- * If necessary, first collects the instruction up to and including the
- * displacement bytes.
- * Basically, most of immediates are sign-expanded. Unsigned-value can be
- * get by bit masking with ((1 << (nbytes * 8)) - 1)
- */
-void insn_get_immediate(struct insn *insn)
-{
- if (insn->immediate.got)
- return;
- if (!insn->displacement.got)
- insn_get_displacement(insn);
-
- if (inat_has_moffset(insn->attr)) {
- if (!__get_moffset(insn))
- goto err_out;
- goto done;
- }
-
- if (!inat_has_immediate(insn->attr))
- /* no immediates */
- goto done;
-
- switch (inat_immediate_size(insn->attr)) {
- case INAT_IMM_BYTE:
- insn->immediate.value = get_next(signed char, insn);
- insn->immediate.nbytes = 1;
- break;
- case INAT_IMM_WORD:
- insn->immediate.value = get_next(short, insn);
- insn->immediate.nbytes = 2;
- break;
- case INAT_IMM_DWORD:
- insn->immediate.value = get_next(int, insn);
- insn->immediate.nbytes = 4;
- break;
- case INAT_IMM_QWORD:
- insn->immediate1.value = get_next(int, insn);
- insn->immediate1.nbytes = 4;
- insn->immediate2.value = get_next(int, insn);
- insn->immediate2.nbytes = 4;
- break;
- case INAT_IMM_PTR:
- if (!__get_immptr(insn))
- goto err_out;
- break;
- case INAT_IMM_VWORD32:
- if (!__get_immv32(insn))
- goto err_out;
- break;
- case INAT_IMM_VWORD:
- if (!__get_immv(insn))
- goto err_out;
- break;
- default:
- /* Here, insn must have an immediate, but failed */
- goto err_out;
- }
- if (inat_has_second_immediate(insn->attr)) {
- insn->immediate2.value = get_next(signed char, insn);
- insn->immediate2.nbytes = 1;
- }
-done:
- insn->immediate.got = 1;
-
-err_out:
- return;
-}
-
-/**
- * insn_get_length() - Get the length of instruction
- * @insn: &struct insn containing instruction
- *
- * If necessary, first collects the instruction up to and including the
- * immediates bytes.
- */
-void insn_get_length(struct insn *insn)
-{
- if (insn->length)
- return;
- if (!insn->immediate.got)
- insn_get_immediate(insn);
- insn->length = (unsigned char)((unsigned long)insn->next_byte
- - (unsigned long)insn->kaddr);
-}
+++ /dev/null
-#ifndef _ASM_X86_INSN_H
-#define _ASM_X86_INSN_H
-/*
- * x86 instruction analysis
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- * Copyright (C) IBM Corporation, 2009
- */
-
-/* insn_attr_t is defined in inat.h */
-#include "inat.h"
-
-struct insn_field {
- union {
- insn_value_t value;
- insn_byte_t bytes[4];
- };
- /* !0 if we've run insn_get_xxx() for this field */
- unsigned char got;
- unsigned char nbytes;
-};
-
-struct insn {
- struct insn_field prefixes; /*
- * Prefixes
- * prefixes.bytes[3]: last prefix
- */
- struct insn_field rex_prefix; /* REX prefix */
- struct insn_field vex_prefix; /* VEX prefix */
- struct insn_field opcode; /*
- * opcode.bytes[0]: opcode1
- * opcode.bytes[1]: opcode2
- * opcode.bytes[2]: opcode3
- */
- struct insn_field modrm;
- struct insn_field sib;
- struct insn_field displacement;
- union {
- struct insn_field immediate;
- struct insn_field moffset1; /* for 64bit MOV */
- struct insn_field immediate1; /* for 64bit imm or off16/32 */
- };
- union {
- struct insn_field moffset2; /* for 64bit MOV */
- struct insn_field immediate2; /* for 64bit imm or seg16 */
- };
-
- insn_attr_t attr;
- unsigned char opnd_bytes;
- unsigned char addr_bytes;
- unsigned char length;
- unsigned char x86_64;
-
- const insn_byte_t *kaddr; /* kernel address of insn to analyze */
- const insn_byte_t *end_kaddr; /* kernel address of last insn in buffer */
- const insn_byte_t *next_byte;
-};
-
-#define MAX_INSN_SIZE 15
-
-#define X86_MODRM_MOD(modrm) (((modrm) & 0xc0) >> 6)
-#define X86_MODRM_REG(modrm) (((modrm) & 0x38) >> 3)
-#define X86_MODRM_RM(modrm) ((modrm) & 0x07)
-
-#define X86_SIB_SCALE(sib) (((sib) & 0xc0) >> 6)
-#define X86_SIB_INDEX(sib) (((sib) & 0x38) >> 3)
-#define X86_SIB_BASE(sib) ((sib) & 0x07)
-
-#define X86_REX_W(rex) ((rex) & 8)
-#define X86_REX_R(rex) ((rex) & 4)
-#define X86_REX_X(rex) ((rex) & 2)
-#define X86_REX_B(rex) ((rex) & 1)
-
-/* VEX bit flags */
-#define X86_VEX_W(vex) ((vex) & 0x80) /* VEX3 Byte2 */
-#define X86_VEX_R(vex) ((vex) & 0x80) /* VEX2/3 Byte1 */
-#define X86_VEX_X(vex) ((vex) & 0x40) /* VEX3 Byte1 */
-#define X86_VEX_B(vex) ((vex) & 0x20) /* VEX3 Byte1 */
-#define X86_VEX_L(vex) ((vex) & 0x04) /* VEX3 Byte2, VEX2 Byte1 */
-/* VEX bit fields */
-#define X86_EVEX_M(vex) ((vex) & 0x03) /* EVEX Byte1 */
-#define X86_VEX3_M(vex) ((vex) & 0x1f) /* VEX3 Byte1 */
-#define X86_VEX2_M 1 /* VEX2.M always 1 */
-#define X86_VEX_V(vex) (((vex) & 0x78) >> 3) /* VEX3 Byte2, VEX2 Byte1 */
-#define X86_VEX_P(vex) ((vex) & 0x03) /* VEX3 Byte2, VEX2 Byte1 */
-#define X86_VEX_M_MAX 0x1f /* VEX3.M Maximum value */
-
-extern void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64);
-extern void insn_get_prefixes(struct insn *insn);
-extern void insn_get_opcode(struct insn *insn);
-extern void insn_get_modrm(struct insn *insn);
-extern void insn_get_sib(struct insn *insn);
-extern void insn_get_displacement(struct insn *insn);
-extern void insn_get_immediate(struct insn *insn);
-extern void insn_get_length(struct insn *insn);
-
-/* Attribute will be determined after getting ModRM (for opcode groups) */
-static inline void insn_get_attribute(struct insn *insn)
-{
- insn_get_modrm(insn);
-}
-
-/* Instruction uses RIP-relative addressing */
-extern int insn_rip_relative(struct insn *insn);
-
-/* Init insn for kernel text */
-static inline void kernel_insn_init(struct insn *insn,
- const void *kaddr, int buf_len)
-{
-#ifdef CONFIG_X86_64
- insn_init(insn, kaddr, buf_len, 1);
-#else /* CONFIG_X86_32 */
- insn_init(insn, kaddr, buf_len, 0);
-#endif
-}
-
-static inline int insn_is_avx(struct insn *insn)
-{
- if (!insn->prefixes.got)
- insn_get_prefixes(insn);
- return (insn->vex_prefix.value != 0);
-}
-
-static inline int insn_is_evex(struct insn *insn)
-{
- if (!insn->prefixes.got)
- insn_get_prefixes(insn);
- return (insn->vex_prefix.nbytes == 4);
-}
-
-/* Ensure this instruction is decoded completely */
-static inline int insn_complete(struct insn *insn)
-{
- return insn->opcode.got && insn->modrm.got && insn->sib.got &&
- insn->displacement.got && insn->immediate.got;
-}
-
-static inline insn_byte_t insn_vex_m_bits(struct insn *insn)
-{
- if (insn->vex_prefix.nbytes == 2) /* 2 bytes VEX */
- return X86_VEX2_M;
- else if (insn->vex_prefix.nbytes == 3) /* 3 bytes VEX */
- return X86_VEX3_M(insn->vex_prefix.bytes[1]);
- else /* EVEX */
- return X86_EVEX_M(insn->vex_prefix.bytes[1]);
-}
-
-static inline insn_byte_t insn_vex_p_bits(struct insn *insn)
-{
- if (insn->vex_prefix.nbytes == 2) /* 2 bytes VEX */
- return X86_VEX_P(insn->vex_prefix.bytes[1]);
- else
- return X86_VEX_P(insn->vex_prefix.bytes[2]);
-}
-
-/* Get the last prefix id from last prefix or VEX prefix */
-static inline int insn_last_prefix_id(struct insn *insn)
-{
- if (insn_is_avx(insn))
- return insn_vex_p_bits(insn); /* VEX_p is a SIMD prefix id */
-
- if (insn->prefixes.bytes[3])
- return inat_get_last_prefix_id(insn->prefixes.bytes[3]);
-
- return 0;
-}
-
-/* Offset of each field from kaddr */
-static inline int insn_offset_rex_prefix(struct insn *insn)
-{
- return insn->prefixes.nbytes;
-}
-static inline int insn_offset_vex_prefix(struct insn *insn)
-{
- return insn_offset_rex_prefix(insn) + insn->rex_prefix.nbytes;
-}
-static inline int insn_offset_opcode(struct insn *insn)
-{
- return insn_offset_vex_prefix(insn) + insn->vex_prefix.nbytes;
-}
-static inline int insn_offset_modrm(struct insn *insn)
-{
- return insn_offset_opcode(insn) + insn->opcode.nbytes;
-}
-static inline int insn_offset_sib(struct insn *insn)
-{
- return insn_offset_modrm(insn) + insn->modrm.nbytes;
-}
-static inline int insn_offset_displacement(struct insn *insn)
-{
- return insn_offset_sib(insn) + insn->sib.nbytes;
-}
-static inline int insn_offset_immediate(struct insn *insn)
-{
- return insn_offset_displacement(insn) + insn->displacement.nbytes;
-}
-
-#endif /* _ASM_X86_INSN_H */
+++ /dev/null
-# x86 Opcode Maps
-#
-# This is (mostly) based on following documentations.
-# - Intel(R) 64 and IA-32 Architectures Software Developer's Manual Vol.2C
-# (#326018-047US, June 2013)
-#
-#<Opcode maps>
-# Table: table-name
-# Referrer: escaped-name
-# AVXcode: avx-code
-# opcode: mnemonic|GrpXXX [operand1[,operand2...]] [(extra1)[,(extra2)...] [| 2nd-mnemonic ...]
-# (or)
-# opcode: escape # escaped-name
-# EndTable
-#
-# mnemonics that begin with lowercase 'v' accept a VEX or EVEX prefix
-# mnemonics that begin with lowercase 'k' accept a VEX prefix
-#
-#<group maps>
-# GrpTable: GrpXXX
-# reg: mnemonic [operand1[,operand2...]] [(extra1)[,(extra2)...] [| 2nd-mnemonic ...]
-# EndTable
-#
-# AVX Superscripts
-# (ev): this opcode requires EVEX prefix.
-# (evo): this opcode is changed by EVEX prefix (EVEX opcode)
-# (v): this opcode requires VEX prefix.
-# (v1): this opcode only supports 128bit VEX.
-#
-# Last Prefix Superscripts
-# - (66): the last prefix is 0x66
-# - (F3): the last prefix is 0xF3
-# - (F2): the last prefix is 0xF2
-# - (!F3) : the last prefix is not 0xF3 (including non-last prefix case)
-# - (66&F2): Both 0x66 and 0xF2 prefixes are specified.
-
-Table: one byte opcode
-Referrer:
-AVXcode:
-# 0x00 - 0x0f
-00: ADD Eb,Gb
-01: ADD Ev,Gv
-02: ADD Gb,Eb
-03: ADD Gv,Ev
-04: ADD AL,Ib
-05: ADD rAX,Iz
-06: PUSH ES (i64)
-07: POP ES (i64)
-08: OR Eb,Gb
-09: OR Ev,Gv
-0a: OR Gb,Eb
-0b: OR Gv,Ev
-0c: OR AL,Ib
-0d: OR rAX,Iz
-0e: PUSH CS (i64)
-0f: escape # 2-byte escape
-# 0x10 - 0x1f
-10: ADC Eb,Gb
-11: ADC Ev,Gv
-12: ADC Gb,Eb
-13: ADC Gv,Ev
-14: ADC AL,Ib
-15: ADC rAX,Iz
-16: PUSH SS (i64)
-17: POP SS (i64)
-18: SBB Eb,Gb
-19: SBB Ev,Gv
-1a: SBB Gb,Eb
-1b: SBB Gv,Ev
-1c: SBB AL,Ib
-1d: SBB rAX,Iz
-1e: PUSH DS (i64)
-1f: POP DS (i64)
-# 0x20 - 0x2f
-20: AND Eb,Gb
-21: AND Ev,Gv
-22: AND Gb,Eb
-23: AND Gv,Ev
-24: AND AL,Ib
-25: AND rAx,Iz
-26: SEG=ES (Prefix)
-27: DAA (i64)
-28: SUB Eb,Gb
-29: SUB Ev,Gv
-2a: SUB Gb,Eb
-2b: SUB Gv,Ev
-2c: SUB AL,Ib
-2d: SUB rAX,Iz
-2e: SEG=CS (Prefix)
-2f: DAS (i64)
-# 0x30 - 0x3f
-30: XOR Eb,Gb
-31: XOR Ev,Gv
-32: XOR Gb,Eb
-33: XOR Gv,Ev
-34: XOR AL,Ib
-35: XOR rAX,Iz
-36: SEG=SS (Prefix)
-37: AAA (i64)
-38: CMP Eb,Gb
-39: CMP Ev,Gv
-3a: CMP Gb,Eb
-3b: CMP Gv,Ev
-3c: CMP AL,Ib
-3d: CMP rAX,Iz
-3e: SEG=DS (Prefix)
-3f: AAS (i64)
-# 0x40 - 0x4f
-40: INC eAX (i64) | REX (o64)
-41: INC eCX (i64) | REX.B (o64)
-42: INC eDX (i64) | REX.X (o64)
-43: INC eBX (i64) | REX.XB (o64)
-44: INC eSP (i64) | REX.R (o64)
-45: INC eBP (i64) | REX.RB (o64)
-46: INC eSI (i64) | REX.RX (o64)
-47: INC eDI (i64) | REX.RXB (o64)
-48: DEC eAX (i64) | REX.W (o64)
-49: DEC eCX (i64) | REX.WB (o64)
-4a: DEC eDX (i64) | REX.WX (o64)
-4b: DEC eBX (i64) | REX.WXB (o64)
-4c: DEC eSP (i64) | REX.WR (o64)
-4d: DEC eBP (i64) | REX.WRB (o64)
-4e: DEC eSI (i64) | REX.WRX (o64)
-4f: DEC eDI (i64) | REX.WRXB (o64)
-# 0x50 - 0x5f
-50: PUSH rAX/r8 (d64)
-51: PUSH rCX/r9 (d64)
-52: PUSH rDX/r10 (d64)
-53: PUSH rBX/r11 (d64)
-54: PUSH rSP/r12 (d64)
-55: PUSH rBP/r13 (d64)
-56: PUSH rSI/r14 (d64)
-57: PUSH rDI/r15 (d64)
-58: POP rAX/r8 (d64)
-59: POP rCX/r9 (d64)
-5a: POP rDX/r10 (d64)
-5b: POP rBX/r11 (d64)
-5c: POP rSP/r12 (d64)
-5d: POP rBP/r13 (d64)
-5e: POP rSI/r14 (d64)
-5f: POP rDI/r15 (d64)
-# 0x60 - 0x6f
-60: PUSHA/PUSHAD (i64)
-61: POPA/POPAD (i64)
-62: BOUND Gv,Ma (i64) | EVEX (Prefix)
-63: ARPL Ew,Gw (i64) | MOVSXD Gv,Ev (o64)
-64: SEG=FS (Prefix)
-65: SEG=GS (Prefix)
-66: Operand-Size (Prefix)
-67: Address-Size (Prefix)
-68: PUSH Iz (d64)
-69: IMUL Gv,Ev,Iz
-6a: PUSH Ib (d64)
-6b: IMUL Gv,Ev,Ib
-6c: INS/INSB Yb,DX
-6d: INS/INSW/INSD Yz,DX
-6e: OUTS/OUTSB DX,Xb
-6f: OUTS/OUTSW/OUTSD DX,Xz
-# 0x70 - 0x7f
-70: JO Jb
-71: JNO Jb
-72: JB/JNAE/JC Jb
-73: JNB/JAE/JNC Jb
-74: JZ/JE Jb
-75: JNZ/JNE Jb
-76: JBE/JNA Jb
-77: JNBE/JA Jb
-78: JS Jb
-79: JNS Jb
-7a: JP/JPE Jb
-7b: JNP/JPO Jb
-7c: JL/JNGE Jb
-7d: JNL/JGE Jb
-7e: JLE/JNG Jb
-7f: JNLE/JG Jb
-# 0x80 - 0x8f
-80: Grp1 Eb,Ib (1A)
-81: Grp1 Ev,Iz (1A)
-82: Grp1 Eb,Ib (1A),(i64)
-83: Grp1 Ev,Ib (1A)
-84: TEST Eb,Gb
-85: TEST Ev,Gv
-86: XCHG Eb,Gb
-87: XCHG Ev,Gv
-88: MOV Eb,Gb
-89: MOV Ev,Gv
-8a: MOV Gb,Eb
-8b: MOV Gv,Ev
-8c: MOV Ev,Sw
-8d: LEA Gv,M
-8e: MOV Sw,Ew
-8f: Grp1A (1A) | POP Ev (d64)
-# 0x90 - 0x9f
-90: NOP | PAUSE (F3) | XCHG r8,rAX
-91: XCHG rCX/r9,rAX
-92: XCHG rDX/r10,rAX
-93: XCHG rBX/r11,rAX
-94: XCHG rSP/r12,rAX
-95: XCHG rBP/r13,rAX
-96: XCHG rSI/r14,rAX
-97: XCHG rDI/r15,rAX
-98: CBW/CWDE/CDQE
-99: CWD/CDQ/CQO
-9a: CALLF Ap (i64)
-9b: FWAIT/WAIT
-9c: PUSHF/D/Q Fv (d64)
-9d: POPF/D/Q Fv (d64)
-9e: SAHF
-9f: LAHF
-# 0xa0 - 0xaf
-a0: MOV AL,Ob
-a1: MOV rAX,Ov
-a2: MOV Ob,AL
-a3: MOV Ov,rAX
-a4: MOVS/B Yb,Xb
-a5: MOVS/W/D/Q Yv,Xv
-a6: CMPS/B Xb,Yb
-a7: CMPS/W/D Xv,Yv
-a8: TEST AL,Ib
-a9: TEST rAX,Iz
-aa: STOS/B Yb,AL
-ab: STOS/W/D/Q Yv,rAX
-ac: LODS/B AL,Xb
-ad: LODS/W/D/Q rAX,Xv
-ae: SCAS/B AL,Yb
-# Note: The May 2011 Intel manual shows Xv for the second parameter of the
-# next instruction but Yv is correct
-af: SCAS/W/D/Q rAX,Yv
-# 0xb0 - 0xbf
-b0: MOV AL/R8L,Ib
-b1: MOV CL/R9L,Ib
-b2: MOV DL/R10L,Ib
-b3: MOV BL/R11L,Ib
-b4: MOV AH/R12L,Ib
-b5: MOV CH/R13L,Ib
-b6: MOV DH/R14L,Ib
-b7: MOV BH/R15L,Ib
-b8: MOV rAX/r8,Iv
-b9: MOV rCX/r9,Iv
-ba: MOV rDX/r10,Iv
-bb: MOV rBX/r11,Iv
-bc: MOV rSP/r12,Iv
-bd: MOV rBP/r13,Iv
-be: MOV rSI/r14,Iv
-bf: MOV rDI/r15,Iv
-# 0xc0 - 0xcf
-c0: Grp2 Eb,Ib (1A)
-c1: Grp2 Ev,Ib (1A)
-c2: RETN Iw (f64)
-c3: RETN
-c4: LES Gz,Mp (i64) | VEX+2byte (Prefix)
-c5: LDS Gz,Mp (i64) | VEX+1byte (Prefix)
-c6: Grp11A Eb,Ib (1A)
-c7: Grp11B Ev,Iz (1A)
-c8: ENTER Iw,Ib
-c9: LEAVE (d64)
-ca: RETF Iw
-cb: RETF
-cc: INT3
-cd: INT Ib
-ce: INTO (i64)
-cf: IRET/D/Q
-# 0xd0 - 0xdf
-d0: Grp2 Eb,1 (1A)
-d1: Grp2 Ev,1 (1A)
-d2: Grp2 Eb,CL (1A)
-d3: Grp2 Ev,CL (1A)
-d4: AAM Ib (i64)
-d5: AAD Ib (i64)
-d6:
-d7: XLAT/XLATB
-d8: ESC
-d9: ESC
-da: ESC
-db: ESC
-dc: ESC
-dd: ESC
-de: ESC
-df: ESC
-# 0xe0 - 0xef
-# Note: "forced64" is Intel CPU behavior: they ignore 0x66 prefix
-# in 64-bit mode. AMD CPUs accept 0x66 prefix, it causes RIP truncation
-# to 16 bits. In 32-bit mode, 0x66 is accepted by both Intel and AMD.
-e0: LOOPNE/LOOPNZ Jb (f64)
-e1: LOOPE/LOOPZ Jb (f64)
-e2: LOOP Jb (f64)
-e3: JrCXZ Jb (f64)
-e4: IN AL,Ib
-e5: IN eAX,Ib
-e6: OUT Ib,AL
-e7: OUT Ib,eAX
-# With 0x66 prefix in 64-bit mode, for AMD CPUs immediate offset
-# in "near" jumps and calls is 16-bit. For CALL,
-# push of return address is 16-bit wide, RSP is decremented by 2
-# but is not truncated to 16 bits, unlike RIP.
-e8: CALL Jz (f64)
-e9: JMP-near Jz (f64)
-ea: JMP-far Ap (i64)
-eb: JMP-short Jb (f64)
-ec: IN AL,DX
-ed: IN eAX,DX
-ee: OUT DX,AL
-ef: OUT DX,eAX
-# 0xf0 - 0xff
-f0: LOCK (Prefix)
-f1:
-f2: REPNE (Prefix) | XACQUIRE (Prefix)
-f3: REP/REPE (Prefix) | XRELEASE (Prefix)
-f4: HLT
-f5: CMC
-f6: Grp3_1 Eb (1A)
-f7: Grp3_2 Ev (1A)
-f8: CLC
-f9: STC
-fa: CLI
-fb: STI
-fc: CLD
-fd: STD
-fe: Grp4 (1A)
-ff: Grp5 (1A)
-EndTable
-
-Table: 2-byte opcode (0x0f)
-Referrer: 2-byte escape
-AVXcode: 1
-# 0x0f 0x00-0x0f
-00: Grp6 (1A)
-01: Grp7 (1A)
-02: LAR Gv,Ew
-03: LSL Gv,Ew
-04:
-05: SYSCALL (o64)
-06: CLTS
-07: SYSRET (o64)
-08: INVD
-09: WBINVD
-0a:
-0b: UD2 (1B)
-0c:
-# AMD's prefetch group. Intel supports prefetchw(/1) only.
-0d: GrpP
-0e: FEMMS
-# 3DNow! uses the last imm byte as opcode extension.
-0f: 3DNow! Pq,Qq,Ib
-# 0x0f 0x10-0x1f
-# NOTE: According to Intel SDM opcode map, vmovups and vmovupd has no operands
-# but it actually has operands. And also, vmovss and vmovsd only accept 128bit.
-# MOVSS/MOVSD has too many forms(3) on SDM. This map just shows a typical form.
-# Many AVX instructions lack v1 superscript, according to Intel AVX-Prgramming
-# Reference A.1
-10: vmovups Vps,Wps | vmovupd Vpd,Wpd (66) | vmovss Vx,Hx,Wss (F3),(v1) | vmovsd Vx,Hx,Wsd (F2),(v1)
-11: vmovups Wps,Vps | vmovupd Wpd,Vpd (66) | vmovss Wss,Hx,Vss (F3),(v1) | vmovsd Wsd,Hx,Vsd (F2),(v1)
-12: vmovlps Vq,Hq,Mq (v1) | vmovhlps Vq,Hq,Uq (v1) | vmovlpd Vq,Hq,Mq (66),(v1) | vmovsldup Vx,Wx (F3) | vmovddup Vx,Wx (F2)
-13: vmovlps Mq,Vq (v1) | vmovlpd Mq,Vq (66),(v1)
-14: vunpcklps Vx,Hx,Wx | vunpcklpd Vx,Hx,Wx (66)
-15: vunpckhps Vx,Hx,Wx | vunpckhpd Vx,Hx,Wx (66)
-16: vmovhps Vdq,Hq,Mq (v1) | vmovlhps Vdq,Hq,Uq (v1) | vmovhpd Vdq,Hq,Mq (66),(v1) | vmovshdup Vx,Wx (F3)
-17: vmovhps Mq,Vq (v1) | vmovhpd Mq,Vq (66),(v1)
-18: Grp16 (1A)
-19:
-# Intel SDM opcode map does not list MPX instructions. For now using Gv for
-# bnd registers and Ev for everything else is OK because the instruction
-# decoder does not use the information except as an indication that there is
-# a ModR/M byte.
-1a: BNDCL Gv,Ev (F3) | BNDCU Gv,Ev (F2) | BNDMOV Gv,Ev (66) | BNDLDX Gv,Ev
-1b: BNDCN Gv,Ev (F2) | BNDMOV Ev,Gv (66) | BNDMK Gv,Ev (F3) | BNDSTX Ev,Gv
-1c:
-1d:
-1e:
-1f: NOP Ev
-# 0x0f 0x20-0x2f
-20: MOV Rd,Cd
-21: MOV Rd,Dd
-22: MOV Cd,Rd
-23: MOV Dd,Rd
-24:
-25:
-26:
-27:
-28: vmovaps Vps,Wps | vmovapd Vpd,Wpd (66)
-29: vmovaps Wps,Vps | vmovapd Wpd,Vpd (66)
-2a: cvtpi2ps Vps,Qpi | cvtpi2pd Vpd,Qpi (66) | vcvtsi2ss Vss,Hss,Ey (F3),(v1) | vcvtsi2sd Vsd,Hsd,Ey (F2),(v1)
-2b: vmovntps Mps,Vps | vmovntpd Mpd,Vpd (66)
-2c: cvttps2pi Ppi,Wps | cvttpd2pi Ppi,Wpd (66) | vcvttss2si Gy,Wss (F3),(v1) | vcvttsd2si Gy,Wsd (F2),(v1)
-2d: cvtps2pi Ppi,Wps | cvtpd2pi Qpi,Wpd (66) | vcvtss2si Gy,Wss (F3),(v1) | vcvtsd2si Gy,Wsd (F2),(v1)
-2e: vucomiss Vss,Wss (v1) | vucomisd Vsd,Wsd (66),(v1)
-2f: vcomiss Vss,Wss (v1) | vcomisd Vsd,Wsd (66),(v1)
-# 0x0f 0x30-0x3f
-30: WRMSR
-31: RDTSC
-32: RDMSR
-33: RDPMC
-34: SYSENTER
-35: SYSEXIT
-36:
-37: GETSEC
-38: escape # 3-byte escape 1
-39:
-3a: escape # 3-byte escape 2
-3b:
-3c:
-3d:
-3e:
-3f:
-# 0x0f 0x40-0x4f
-40: CMOVO Gv,Ev
-41: CMOVNO Gv,Ev | kandw/q Vk,Hk,Uk | kandb/d Vk,Hk,Uk (66)
-42: CMOVB/C/NAE Gv,Ev | kandnw/q Vk,Hk,Uk | kandnb/d Vk,Hk,Uk (66)
-43: CMOVAE/NB/NC Gv,Ev
-44: CMOVE/Z Gv,Ev | knotw/q Vk,Uk | knotb/d Vk,Uk (66)
-45: CMOVNE/NZ Gv,Ev | korw/q Vk,Hk,Uk | korb/d Vk,Hk,Uk (66)
-46: CMOVBE/NA Gv,Ev | kxnorw/q Vk,Hk,Uk | kxnorb/d Vk,Hk,Uk (66)
-47: CMOVA/NBE Gv,Ev | kxorw/q Vk,Hk,Uk | kxorb/d Vk,Hk,Uk (66)
-48: CMOVS Gv,Ev
-49: CMOVNS Gv,Ev
-4a: CMOVP/PE Gv,Ev | kaddw/q Vk,Hk,Uk | kaddb/d Vk,Hk,Uk (66)
-4b: CMOVNP/PO Gv,Ev | kunpckbw Vk,Hk,Uk (66) | kunpckwd/dq Vk,Hk,Uk
-4c: CMOVL/NGE Gv,Ev
-4d: CMOVNL/GE Gv,Ev
-4e: CMOVLE/NG Gv,Ev
-4f: CMOVNLE/G Gv,Ev
-# 0x0f 0x50-0x5f
-50: vmovmskps Gy,Ups | vmovmskpd Gy,Upd (66)
-51: vsqrtps Vps,Wps | vsqrtpd Vpd,Wpd (66) | vsqrtss Vss,Hss,Wss (F3),(v1) | vsqrtsd Vsd,Hsd,Wsd (F2),(v1)
-52: vrsqrtps Vps,Wps | vrsqrtss Vss,Hss,Wss (F3),(v1)
-53: vrcpps Vps,Wps | vrcpss Vss,Hss,Wss (F3),(v1)
-54: vandps Vps,Hps,Wps | vandpd Vpd,Hpd,Wpd (66)
-55: vandnps Vps,Hps,Wps | vandnpd Vpd,Hpd,Wpd (66)
-56: vorps Vps,Hps,Wps | vorpd Vpd,Hpd,Wpd (66)
-57: vxorps Vps,Hps,Wps | vxorpd Vpd,Hpd,Wpd (66)
-58: vaddps Vps,Hps,Wps | vaddpd Vpd,Hpd,Wpd (66) | vaddss Vss,Hss,Wss (F3),(v1) | vaddsd Vsd,Hsd,Wsd (F2),(v1)
-59: vmulps Vps,Hps,Wps | vmulpd Vpd,Hpd,Wpd (66) | vmulss Vss,Hss,Wss (F3),(v1) | vmulsd Vsd,Hsd,Wsd (F2),(v1)
-5a: vcvtps2pd Vpd,Wps | vcvtpd2ps Vps,Wpd (66) | vcvtss2sd Vsd,Hx,Wss (F3),(v1) | vcvtsd2ss Vss,Hx,Wsd (F2),(v1)
-5b: vcvtdq2ps Vps,Wdq | vcvtqq2ps Vps,Wqq (evo) | vcvtps2dq Vdq,Wps (66) | vcvttps2dq Vdq,Wps (F3)
-5c: vsubps Vps,Hps,Wps | vsubpd Vpd,Hpd,Wpd (66) | vsubss Vss,Hss,Wss (F3),(v1) | vsubsd Vsd,Hsd,Wsd (F2),(v1)
-5d: vminps Vps,Hps,Wps | vminpd Vpd,Hpd,Wpd (66) | vminss Vss,Hss,Wss (F3),(v1) | vminsd Vsd,Hsd,Wsd (F2),(v1)
-5e: vdivps Vps,Hps,Wps | vdivpd Vpd,Hpd,Wpd (66) | vdivss Vss,Hss,Wss (F3),(v1) | vdivsd Vsd,Hsd,Wsd (F2),(v1)
-5f: vmaxps Vps,Hps,Wps | vmaxpd Vpd,Hpd,Wpd (66) | vmaxss Vss,Hss,Wss (F3),(v1) | vmaxsd Vsd,Hsd,Wsd (F2),(v1)
-# 0x0f 0x60-0x6f
-60: punpcklbw Pq,Qd | vpunpcklbw Vx,Hx,Wx (66),(v1)
-61: punpcklwd Pq,Qd | vpunpcklwd Vx,Hx,Wx (66),(v1)
-62: punpckldq Pq,Qd | vpunpckldq Vx,Hx,Wx (66),(v1)
-63: packsswb Pq,Qq | vpacksswb Vx,Hx,Wx (66),(v1)
-64: pcmpgtb Pq,Qq | vpcmpgtb Vx,Hx,Wx (66),(v1)
-65: pcmpgtw Pq,Qq | vpcmpgtw Vx,Hx,Wx (66),(v1)
-66: pcmpgtd Pq,Qq | vpcmpgtd Vx,Hx,Wx (66),(v1)
-67: packuswb Pq,Qq | vpackuswb Vx,Hx,Wx (66),(v1)
-68: punpckhbw Pq,Qd | vpunpckhbw Vx,Hx,Wx (66),(v1)
-69: punpckhwd Pq,Qd | vpunpckhwd Vx,Hx,Wx (66),(v1)
-6a: punpckhdq Pq,Qd | vpunpckhdq Vx,Hx,Wx (66),(v1)
-6b: packssdw Pq,Qd | vpackssdw Vx,Hx,Wx (66),(v1)
-6c: vpunpcklqdq Vx,Hx,Wx (66),(v1)
-6d: vpunpckhqdq Vx,Hx,Wx (66),(v1)
-6e: movd/q Pd,Ey | vmovd/q Vy,Ey (66),(v1)
-6f: movq Pq,Qq | vmovdqa Vx,Wx (66) | vmovdqa32/64 Vx,Wx (66),(evo) | vmovdqu Vx,Wx (F3) | vmovdqu32/64 Vx,Wx (F3),(evo) | vmovdqu8/16 Vx,Wx (F2),(ev)
-# 0x0f 0x70-0x7f
-70: pshufw Pq,Qq,Ib | vpshufd Vx,Wx,Ib (66),(v1) | vpshufhw Vx,Wx,Ib (F3),(v1) | vpshuflw Vx,Wx,Ib (F2),(v1)
-71: Grp12 (1A)
-72: Grp13 (1A)
-73: Grp14 (1A)
-74: pcmpeqb Pq,Qq | vpcmpeqb Vx,Hx,Wx (66),(v1)
-75: pcmpeqw Pq,Qq | vpcmpeqw Vx,Hx,Wx (66),(v1)
-76: pcmpeqd Pq,Qq | vpcmpeqd Vx,Hx,Wx (66),(v1)
-# Note: Remove (v), because vzeroall and vzeroupper becomes emms without VEX.
-77: emms | vzeroupper | vzeroall
-78: VMREAD Ey,Gy | vcvttps2udq/pd2udq Vx,Wpd (evo) | vcvttsd2usi Gv,Wx (F2),(ev) | vcvttss2usi Gv,Wx (F3),(ev) | vcvttps2uqq/pd2uqq Vx,Wx (66),(ev)
-79: VMWRITE Gy,Ey | vcvtps2udq/pd2udq Vx,Wpd (evo) | vcvtsd2usi Gv,Wx (F2),(ev) | vcvtss2usi Gv,Wx (F3),(ev) | vcvtps2uqq/pd2uqq Vx,Wx (66),(ev)
-7a: vcvtudq2pd/uqq2pd Vpd,Wx (F3),(ev) | vcvtudq2ps/uqq2ps Vpd,Wx (F2),(ev) | vcvttps2qq/pd2qq Vx,Wx (66),(ev)
-7b: vcvtusi2sd Vpd,Hpd,Ev (F2),(ev) | vcvtusi2ss Vps,Hps,Ev (F3),(ev) | vcvtps2qq/pd2qq Vx,Wx (66),(ev)
-7c: vhaddpd Vpd,Hpd,Wpd (66) | vhaddps Vps,Hps,Wps (F2)
-7d: vhsubpd Vpd,Hpd,Wpd (66) | vhsubps Vps,Hps,Wps (F2)
-7e: movd/q Ey,Pd | vmovd/q Ey,Vy (66),(v1) | vmovq Vq,Wq (F3),(v1)
-7f: movq Qq,Pq | vmovdqa Wx,Vx (66) | vmovdqa32/64 Wx,Vx (66),(evo) | vmovdqu Wx,Vx (F3) | vmovdqu32/64 Wx,Vx (F3),(evo) | vmovdqu8/16 Wx,Vx (F2),(ev)
-# 0x0f 0x80-0x8f
-# Note: "forced64" is Intel CPU behavior (see comment about CALL insn).
-80: JO Jz (f64)
-81: JNO Jz (f64)
-82: JB/JC/JNAE Jz (f64)
-83: JAE/JNB/JNC Jz (f64)
-84: JE/JZ Jz (f64)
-85: JNE/JNZ Jz (f64)
-86: JBE/JNA Jz (f64)
-87: JA/JNBE Jz (f64)
-88: JS Jz (f64)
-89: JNS Jz (f64)
-8a: JP/JPE Jz (f64)
-8b: JNP/JPO Jz (f64)
-8c: JL/JNGE Jz (f64)
-8d: JNL/JGE Jz (f64)
-8e: JLE/JNG Jz (f64)
-8f: JNLE/JG Jz (f64)
-# 0x0f 0x90-0x9f
-90: SETO Eb | kmovw/q Vk,Wk | kmovb/d Vk,Wk (66)
-91: SETNO Eb | kmovw/q Mv,Vk | kmovb/d Mv,Vk (66)
-92: SETB/C/NAE Eb | kmovw Vk,Rv | kmovb Vk,Rv (66) | kmovq/d Vk,Rv (F2)
-93: SETAE/NB/NC Eb | kmovw Gv,Uk | kmovb Gv,Uk (66) | kmovq/d Gv,Uk (F2)
-94: SETE/Z Eb
-95: SETNE/NZ Eb
-96: SETBE/NA Eb
-97: SETA/NBE Eb
-98: SETS Eb | kortestw/q Vk,Uk | kortestb/d Vk,Uk (66)
-99: SETNS Eb | ktestw/q Vk,Uk | ktestb/d Vk,Uk (66)
-9a: SETP/PE Eb
-9b: SETNP/PO Eb
-9c: SETL/NGE Eb
-9d: SETNL/GE Eb
-9e: SETLE/NG Eb
-9f: SETNLE/G Eb
-# 0x0f 0xa0-0xaf
-a0: PUSH FS (d64)
-a1: POP FS (d64)
-a2: CPUID
-a3: BT Ev,Gv
-a4: SHLD Ev,Gv,Ib
-a5: SHLD Ev,Gv,CL
-a6: GrpPDLK
-a7: GrpRNG
-a8: PUSH GS (d64)
-a9: POP GS (d64)
-aa: RSM
-ab: BTS Ev,Gv
-ac: SHRD Ev,Gv,Ib
-ad: SHRD Ev,Gv,CL
-ae: Grp15 (1A),(1C)
-af: IMUL Gv,Ev
-# 0x0f 0xb0-0xbf
-b0: CMPXCHG Eb,Gb
-b1: CMPXCHG Ev,Gv
-b2: LSS Gv,Mp
-b3: BTR Ev,Gv
-b4: LFS Gv,Mp
-b5: LGS Gv,Mp
-b6: MOVZX Gv,Eb
-b7: MOVZX Gv,Ew
-b8: JMPE (!F3) | POPCNT Gv,Ev (F3)
-b9: Grp10 (1A)
-ba: Grp8 Ev,Ib (1A)
-bb: BTC Ev,Gv
-bc: BSF Gv,Ev (!F3) | TZCNT Gv,Ev (F3)
-bd: BSR Gv,Ev (!F3) | LZCNT Gv,Ev (F3)
-be: MOVSX Gv,Eb
-bf: MOVSX Gv,Ew
-# 0x0f 0xc0-0xcf
-c0: XADD Eb,Gb
-c1: XADD Ev,Gv
-c2: vcmpps Vps,Hps,Wps,Ib | vcmppd Vpd,Hpd,Wpd,Ib (66) | vcmpss Vss,Hss,Wss,Ib (F3),(v1) | vcmpsd Vsd,Hsd,Wsd,Ib (F2),(v1)
-c3: movnti My,Gy
-c4: pinsrw Pq,Ry/Mw,Ib | vpinsrw Vdq,Hdq,Ry/Mw,Ib (66),(v1)
-c5: pextrw Gd,Nq,Ib | vpextrw Gd,Udq,Ib (66),(v1)
-c6: vshufps Vps,Hps,Wps,Ib | vshufpd Vpd,Hpd,Wpd,Ib (66)
-c7: Grp9 (1A)
-c8: BSWAP RAX/EAX/R8/R8D
-c9: BSWAP RCX/ECX/R9/R9D
-ca: BSWAP RDX/EDX/R10/R10D
-cb: BSWAP RBX/EBX/R11/R11D
-cc: BSWAP RSP/ESP/R12/R12D
-cd: BSWAP RBP/EBP/R13/R13D
-ce: BSWAP RSI/ESI/R14/R14D
-cf: BSWAP RDI/EDI/R15/R15D
-# 0x0f 0xd0-0xdf
-d0: vaddsubpd Vpd,Hpd,Wpd (66) | vaddsubps Vps,Hps,Wps (F2)
-d1: psrlw Pq,Qq | vpsrlw Vx,Hx,Wx (66),(v1)
-d2: psrld Pq,Qq | vpsrld Vx,Hx,Wx (66),(v1)
-d3: psrlq Pq,Qq | vpsrlq Vx,Hx,Wx (66),(v1)
-d4: paddq Pq,Qq | vpaddq Vx,Hx,Wx (66),(v1)
-d5: pmullw Pq,Qq | vpmullw Vx,Hx,Wx (66),(v1)
-d6: vmovq Wq,Vq (66),(v1) | movq2dq Vdq,Nq (F3) | movdq2q Pq,Uq (F2)
-d7: pmovmskb Gd,Nq | vpmovmskb Gd,Ux (66),(v1)
-d8: psubusb Pq,Qq | vpsubusb Vx,Hx,Wx (66),(v1)
-d9: psubusw Pq,Qq | vpsubusw Vx,Hx,Wx (66),(v1)
-da: pminub Pq,Qq | vpminub Vx,Hx,Wx (66),(v1)
-db: pand Pq,Qq | vpand Vx,Hx,Wx (66),(v1) | vpandd/q Vx,Hx,Wx (66),(evo)
-dc: paddusb Pq,Qq | vpaddusb Vx,Hx,Wx (66),(v1)
-dd: paddusw Pq,Qq | vpaddusw Vx,Hx,Wx (66),(v1)
-de: pmaxub Pq,Qq | vpmaxub Vx,Hx,Wx (66),(v1)
-df: pandn Pq,Qq | vpandn Vx,Hx,Wx (66),(v1) | vpandnd/q Vx,Hx,Wx (66),(evo)
-# 0x0f 0xe0-0xef
-e0: pavgb Pq,Qq | vpavgb Vx,Hx,Wx (66),(v1)
-e1: psraw Pq,Qq | vpsraw Vx,Hx,Wx (66),(v1)
-e2: psrad Pq,Qq | vpsrad Vx,Hx,Wx (66),(v1)
-e3: pavgw Pq,Qq | vpavgw Vx,Hx,Wx (66),(v1)
-e4: pmulhuw Pq,Qq | vpmulhuw Vx,Hx,Wx (66),(v1)
-e5: pmulhw Pq,Qq | vpmulhw Vx,Hx,Wx (66),(v1)
-e6: vcvttpd2dq Vx,Wpd (66) | vcvtdq2pd Vx,Wdq (F3) | vcvtdq2pd/qq2pd Vx,Wdq (F3),(evo) | vcvtpd2dq Vx,Wpd (F2)
-e7: movntq Mq,Pq | vmovntdq Mx,Vx (66)
-e8: psubsb Pq,Qq | vpsubsb Vx,Hx,Wx (66),(v1)
-e9: psubsw Pq,Qq | vpsubsw Vx,Hx,Wx (66),(v1)
-ea: pminsw Pq,Qq | vpminsw Vx,Hx,Wx (66),(v1)
-eb: por Pq,Qq | vpor Vx,Hx,Wx (66),(v1) | vpord/q Vx,Hx,Wx (66),(evo)
-ec: paddsb Pq,Qq | vpaddsb Vx,Hx,Wx (66),(v1)
-ed: paddsw Pq,Qq | vpaddsw Vx,Hx,Wx (66),(v1)
-ee: pmaxsw Pq,Qq | vpmaxsw Vx,Hx,Wx (66),(v1)
-ef: pxor Pq,Qq | vpxor Vx,Hx,Wx (66),(v1) | vpxord/q Vx,Hx,Wx (66),(evo)
-# 0x0f 0xf0-0xff
-f0: vlddqu Vx,Mx (F2)
-f1: psllw Pq,Qq | vpsllw Vx,Hx,Wx (66),(v1)
-f2: pslld Pq,Qq | vpslld Vx,Hx,Wx (66),(v1)
-f3: psllq Pq,Qq | vpsllq Vx,Hx,Wx (66),(v1)
-f4: pmuludq Pq,Qq | vpmuludq Vx,Hx,Wx (66),(v1)
-f5: pmaddwd Pq,Qq | vpmaddwd Vx,Hx,Wx (66),(v1)
-f6: psadbw Pq,Qq | vpsadbw Vx,Hx,Wx (66),(v1)
-f7: maskmovq Pq,Nq | vmaskmovdqu Vx,Ux (66),(v1)
-f8: psubb Pq,Qq | vpsubb Vx,Hx,Wx (66),(v1)
-f9: psubw Pq,Qq | vpsubw Vx,Hx,Wx (66),(v1)
-fa: psubd Pq,Qq | vpsubd Vx,Hx,Wx (66),(v1)
-fb: psubq Pq,Qq | vpsubq Vx,Hx,Wx (66),(v1)
-fc: paddb Pq,Qq | vpaddb Vx,Hx,Wx (66),(v1)
-fd: paddw Pq,Qq | vpaddw Vx,Hx,Wx (66),(v1)
-fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1)
-ff:
-EndTable
-
-Table: 3-byte opcode 1 (0x0f 0x38)
-Referrer: 3-byte escape 1
-AVXcode: 2
-# 0x0f 0x38 0x00-0x0f
-00: pshufb Pq,Qq | vpshufb Vx,Hx,Wx (66),(v1)
-01: phaddw Pq,Qq | vphaddw Vx,Hx,Wx (66),(v1)
-02: phaddd Pq,Qq | vphaddd Vx,Hx,Wx (66),(v1)
-03: phaddsw Pq,Qq | vphaddsw Vx,Hx,Wx (66),(v1)
-04: pmaddubsw Pq,Qq | vpmaddubsw Vx,Hx,Wx (66),(v1)
-05: phsubw Pq,Qq | vphsubw Vx,Hx,Wx (66),(v1)
-06: phsubd Pq,Qq | vphsubd Vx,Hx,Wx (66),(v1)
-07: phsubsw Pq,Qq | vphsubsw Vx,Hx,Wx (66),(v1)
-08: psignb Pq,Qq | vpsignb Vx,Hx,Wx (66),(v1)
-09: psignw Pq,Qq | vpsignw Vx,Hx,Wx (66),(v1)
-0a: psignd Pq,Qq | vpsignd Vx,Hx,Wx (66),(v1)
-0b: pmulhrsw Pq,Qq | vpmulhrsw Vx,Hx,Wx (66),(v1)
-0c: vpermilps Vx,Hx,Wx (66),(v)
-0d: vpermilpd Vx,Hx,Wx (66),(v)
-0e: vtestps Vx,Wx (66),(v)
-0f: vtestpd Vx,Wx (66),(v)
-# 0x0f 0x38 0x10-0x1f
-10: pblendvb Vdq,Wdq (66) | vpsrlvw Vx,Hx,Wx (66),(evo) | vpmovuswb Wx,Vx (F3),(ev)
-11: vpmovusdb Wx,Vd (F3),(ev) | vpsravw Vx,Hx,Wx (66),(ev)
-12: vpmovusqb Wx,Vq (F3),(ev) | vpsllvw Vx,Hx,Wx (66),(ev)
-13: vcvtph2ps Vx,Wx (66),(v) | vpmovusdw Wx,Vd (F3),(ev)
-14: blendvps Vdq,Wdq (66) | vpmovusqw Wx,Vq (F3),(ev) | vprorvd/q Vx,Hx,Wx (66),(evo)
-15: blendvpd Vdq,Wdq (66) | vpmovusqd Wx,Vq (F3),(ev) | vprolvd/q Vx,Hx,Wx (66),(evo)
-16: vpermps Vqq,Hqq,Wqq (66),(v) | vpermps/d Vqq,Hqq,Wqq (66),(evo)
-17: vptest Vx,Wx (66)
-18: vbroadcastss Vx,Wd (66),(v)
-19: vbroadcastsd Vqq,Wq (66),(v) | vbroadcastf32x2 Vqq,Wq (66),(evo)
-1a: vbroadcastf128 Vqq,Mdq (66),(v) | vbroadcastf32x4/64x2 Vqq,Wq (66),(evo)
-1b: vbroadcastf32x8/64x4 Vqq,Mdq (66),(ev)
-1c: pabsb Pq,Qq | vpabsb Vx,Wx (66),(v1)
-1d: pabsw Pq,Qq | vpabsw Vx,Wx (66),(v1)
-1e: pabsd Pq,Qq | vpabsd Vx,Wx (66),(v1)
-1f: vpabsq Vx,Wx (66),(ev)
-# 0x0f 0x38 0x20-0x2f
-20: vpmovsxbw Vx,Ux/Mq (66),(v1) | vpmovswb Wx,Vx (F3),(ev)
-21: vpmovsxbd Vx,Ux/Md (66),(v1) | vpmovsdb Wx,Vd (F3),(ev)
-22: vpmovsxbq Vx,Ux/Mw (66),(v1) | vpmovsqb Wx,Vq (F3),(ev)
-23: vpmovsxwd Vx,Ux/Mq (66),(v1) | vpmovsdw Wx,Vd (F3),(ev)
-24: vpmovsxwq Vx,Ux/Md (66),(v1) | vpmovsqw Wx,Vq (F3),(ev)
-25: vpmovsxdq Vx,Ux/Mq (66),(v1) | vpmovsqd Wx,Vq (F3),(ev)
-26: vptestmb/w Vk,Hx,Wx (66),(ev) | vptestnmb/w Vk,Hx,Wx (F3),(ev)
-27: vptestmd/q Vk,Hx,Wx (66),(ev) | vptestnmd/q Vk,Hx,Wx (F3),(ev)
-28: vpmuldq Vx,Hx,Wx (66),(v1) | vpmovm2b/w Vx,Uk (F3),(ev)
-29: vpcmpeqq Vx,Hx,Wx (66),(v1) | vpmovb2m/w2m Vk,Ux (F3),(ev)
-2a: vmovntdqa Vx,Mx (66),(v1) | vpbroadcastmb2q Vx,Uk (F3),(ev)
-2b: vpackusdw Vx,Hx,Wx (66),(v1)
-2c: vmaskmovps Vx,Hx,Mx (66),(v) | vscalefps/d Vx,Hx,Wx (66),(evo)
-2d: vmaskmovpd Vx,Hx,Mx (66),(v) | vscalefss/d Vx,Hx,Wx (66),(evo)
-2e: vmaskmovps Mx,Hx,Vx (66),(v)
-2f: vmaskmovpd Mx,Hx,Vx (66),(v)
-# 0x0f 0x38 0x30-0x3f
-30: vpmovzxbw Vx,Ux/Mq (66),(v1) | vpmovwb Wx,Vx (F3),(ev)
-31: vpmovzxbd Vx,Ux/Md (66),(v1) | vpmovdb Wx,Vd (F3),(ev)
-32: vpmovzxbq Vx,Ux/Mw (66),(v1) | vpmovqb Wx,Vq (F3),(ev)
-33: vpmovzxwd Vx,Ux/Mq (66),(v1) | vpmovdw Wx,Vd (F3),(ev)
-34: vpmovzxwq Vx,Ux/Md (66),(v1) | vpmovqw Wx,Vq (F3),(ev)
-35: vpmovzxdq Vx,Ux/Mq (66),(v1) | vpmovqd Wx,Vq (F3),(ev)
-36: vpermd Vqq,Hqq,Wqq (66),(v) | vpermd/q Vqq,Hqq,Wqq (66),(evo)
-37: vpcmpgtq Vx,Hx,Wx (66),(v1)
-38: vpminsb Vx,Hx,Wx (66),(v1) | vpmovm2d/q Vx,Uk (F3),(ev)
-39: vpminsd Vx,Hx,Wx (66),(v1) | vpminsd/q Vx,Hx,Wx (66),(evo) | vpmovd2m/q2m Vk,Ux (F3),(ev)
-3a: vpminuw Vx,Hx,Wx (66),(v1) | vpbroadcastmw2d Vx,Uk (F3),(ev)
-3b: vpminud Vx,Hx,Wx (66),(v1) | vpminud/q Vx,Hx,Wx (66),(evo)
-3c: vpmaxsb Vx,Hx,Wx (66),(v1)
-3d: vpmaxsd Vx,Hx,Wx (66),(v1) | vpmaxsd/q Vx,Hx,Wx (66),(evo)
-3e: vpmaxuw Vx,Hx,Wx (66),(v1)
-3f: vpmaxud Vx,Hx,Wx (66),(v1) | vpmaxud/q Vx,Hx,Wx (66),(evo)
-# 0x0f 0x38 0x40-0x8f
-40: vpmulld Vx,Hx,Wx (66),(v1) | vpmulld/q Vx,Hx,Wx (66),(evo)
-41: vphminposuw Vdq,Wdq (66),(v1)
-42: vgetexpps/d Vx,Wx (66),(ev)
-43: vgetexpss/d Vx,Hx,Wx (66),(ev)
-44: vplzcntd/q Vx,Wx (66),(ev)
-45: vpsrlvd/q Vx,Hx,Wx (66),(v)
-46: vpsravd Vx,Hx,Wx (66),(v) | vpsravd/q Vx,Hx,Wx (66),(evo)
-47: vpsllvd/q Vx,Hx,Wx (66),(v)
-# Skip 0x48-0x4b
-4c: vrcp14ps/d Vpd,Wpd (66),(ev)
-4d: vrcp14ss/d Vsd,Hpd,Wsd (66),(ev)
-4e: vrsqrt14ps/d Vpd,Wpd (66),(ev)
-4f: vrsqrt14ss/d Vsd,Hsd,Wsd (66),(ev)
-# Skip 0x50-0x57
-58: vpbroadcastd Vx,Wx (66),(v)
-59: vpbroadcastq Vx,Wx (66),(v) | vbroadcasti32x2 Vx,Wx (66),(evo)
-5a: vbroadcasti128 Vqq,Mdq (66),(v) | vbroadcasti32x4/64x2 Vx,Wx (66),(evo)
-5b: vbroadcasti32x8/64x4 Vqq,Mdq (66),(ev)
-# Skip 0x5c-0x63
-64: vpblendmd/q Vx,Hx,Wx (66),(ev)
-65: vblendmps/d Vx,Hx,Wx (66),(ev)
-66: vpblendmb/w Vx,Hx,Wx (66),(ev)
-# Skip 0x67-0x74
-75: vpermi2b/w Vx,Hx,Wx (66),(ev)
-76: vpermi2d/q Vx,Hx,Wx (66),(ev)
-77: vpermi2ps/d Vx,Hx,Wx (66),(ev)
-78: vpbroadcastb Vx,Wx (66),(v)
-79: vpbroadcastw Vx,Wx (66),(v)
-7a: vpbroadcastb Vx,Rv (66),(ev)
-7b: vpbroadcastw Vx,Rv (66),(ev)
-7c: vpbroadcastd/q Vx,Rv (66),(ev)
-7d: vpermt2b/w Vx,Hx,Wx (66),(ev)
-7e: vpermt2d/q Vx,Hx,Wx (66),(ev)
-7f: vpermt2ps/d Vx,Hx,Wx (66),(ev)
-80: INVEPT Gy,Mdq (66)
-81: INVPID Gy,Mdq (66)
-82: INVPCID Gy,Mdq (66)
-83: vpmultishiftqb Vx,Hx,Wx (66),(ev)
-88: vexpandps/d Vpd,Wpd (66),(ev)
-89: vpexpandd/q Vx,Wx (66),(ev)
-8a: vcompressps/d Wx,Vx (66),(ev)
-8b: vpcompressd/q Wx,Vx (66),(ev)
-8c: vpmaskmovd/q Vx,Hx,Mx (66),(v)
-8d: vpermb/w Vx,Hx,Wx (66),(ev)
-8e: vpmaskmovd/q Mx,Vx,Hx (66),(v)
-# 0x0f 0x38 0x90-0xbf (FMA)
-90: vgatherdd/q Vx,Hx,Wx (66),(v) | vpgatherdd/q Vx,Wx (66),(evo)
-91: vgatherqd/q Vx,Hx,Wx (66),(v) | vpgatherqd/q Vx,Wx (66),(evo)
-92: vgatherdps/d Vx,Hx,Wx (66),(v)
-93: vgatherqps/d Vx,Hx,Wx (66),(v)
-94:
-95:
-96: vfmaddsub132ps/d Vx,Hx,Wx (66),(v)
-97: vfmsubadd132ps/d Vx,Hx,Wx (66),(v)
-98: vfmadd132ps/d Vx,Hx,Wx (66),(v)
-99: vfmadd132ss/d Vx,Hx,Wx (66),(v),(v1)
-9a: vfmsub132ps/d Vx,Hx,Wx (66),(v)
-9b: vfmsub132ss/d Vx,Hx,Wx (66),(v),(v1)
-9c: vfnmadd132ps/d Vx,Hx,Wx (66),(v)
-9d: vfnmadd132ss/d Vx,Hx,Wx (66),(v),(v1)
-9e: vfnmsub132ps/d Vx,Hx,Wx (66),(v)
-9f: vfnmsub132ss/d Vx,Hx,Wx (66),(v),(v1)
-a0: vpscatterdd/q Wx,Vx (66),(ev)
-a1: vpscatterqd/q Wx,Vx (66),(ev)
-a2: vscatterdps/d Wx,Vx (66),(ev)
-a3: vscatterqps/d Wx,Vx (66),(ev)
-a6: vfmaddsub213ps/d Vx,Hx,Wx (66),(v)
-a7: vfmsubadd213ps/d Vx,Hx,Wx (66),(v)
-a8: vfmadd213ps/d Vx,Hx,Wx (66),(v)
-a9: vfmadd213ss/d Vx,Hx,Wx (66),(v),(v1)
-aa: vfmsub213ps/d Vx,Hx,Wx (66),(v)
-ab: vfmsub213ss/d Vx,Hx,Wx (66),(v),(v1)
-ac: vfnmadd213ps/d Vx,Hx,Wx (66),(v)
-ad: vfnmadd213ss/d Vx,Hx,Wx (66),(v),(v1)
-ae: vfnmsub213ps/d Vx,Hx,Wx (66),(v)
-af: vfnmsub213ss/d Vx,Hx,Wx (66),(v),(v1)
-b4: vpmadd52luq Vx,Hx,Wx (66),(ev)
-b5: vpmadd52huq Vx,Hx,Wx (66),(ev)
-b6: vfmaddsub231ps/d Vx,Hx,Wx (66),(v)
-b7: vfmsubadd231ps/d Vx,Hx,Wx (66),(v)
-b8: vfmadd231ps/d Vx,Hx,Wx (66),(v)
-b9: vfmadd231ss/d Vx,Hx,Wx (66),(v),(v1)
-ba: vfmsub231ps/d Vx,Hx,Wx (66),(v)
-bb: vfmsub231ss/d Vx,Hx,Wx (66),(v),(v1)
-bc: vfnmadd231ps/d Vx,Hx,Wx (66),(v)
-bd: vfnmadd231ss/d Vx,Hx,Wx (66),(v),(v1)
-be: vfnmsub231ps/d Vx,Hx,Wx (66),(v)
-bf: vfnmsub231ss/d Vx,Hx,Wx (66),(v),(v1)
-# 0x0f 0x38 0xc0-0xff
-c4: vpconflictd/q Vx,Wx (66),(ev)
-c6: Grp18 (1A)
-c7: Grp19 (1A)
-c8: sha1nexte Vdq,Wdq | vexp2ps/d Vx,Wx (66),(ev)
-c9: sha1msg1 Vdq,Wdq
-ca: sha1msg2 Vdq,Wdq | vrcp28ps/d Vx,Wx (66),(ev)
-cb: sha256rnds2 Vdq,Wdq | vrcp28ss/d Vx,Hx,Wx (66),(ev)
-cc: sha256msg1 Vdq,Wdq | vrsqrt28ps/d Vx,Wx (66),(ev)
-cd: sha256msg2 Vdq,Wdq | vrsqrt28ss/d Vx,Hx,Wx (66),(ev)
-db: VAESIMC Vdq,Wdq (66),(v1)
-dc: VAESENC Vdq,Hdq,Wdq (66),(v1)
-dd: VAESENCLAST Vdq,Hdq,Wdq (66),(v1)
-de: VAESDEC Vdq,Hdq,Wdq (66),(v1)
-df: VAESDECLAST Vdq,Hdq,Wdq (66),(v1)
-f0: MOVBE Gy,My | MOVBE Gw,Mw (66) | CRC32 Gd,Eb (F2) | CRC32 Gd,Eb (66&F2)
-f1: MOVBE My,Gy | MOVBE Mw,Gw (66) | CRC32 Gd,Ey (F2) | CRC32 Gd,Ew (66&F2)
-f2: ANDN Gy,By,Ey (v)
-f3: Grp17 (1A)
-f5: BZHI Gy,Ey,By (v) | PEXT Gy,By,Ey (F3),(v) | PDEP Gy,By,Ey (F2),(v)
-f6: ADCX Gy,Ey (66) | ADOX Gy,Ey (F3) | MULX By,Gy,rDX,Ey (F2),(v)
-f7: BEXTR Gy,Ey,By (v) | SHLX Gy,Ey,By (66),(v) | SARX Gy,Ey,By (F3),(v) | SHRX Gy,Ey,By (F2),(v)
-EndTable
-
-Table: 3-byte opcode 2 (0x0f 0x3a)
-Referrer: 3-byte escape 2
-AVXcode: 3
-# 0x0f 0x3a 0x00-0xff
-00: vpermq Vqq,Wqq,Ib (66),(v)
-01: vpermpd Vqq,Wqq,Ib (66),(v)
-02: vpblendd Vx,Hx,Wx,Ib (66),(v)
-03: valignd/q Vx,Hx,Wx,Ib (66),(ev)
-04: vpermilps Vx,Wx,Ib (66),(v)
-05: vpermilpd Vx,Wx,Ib (66),(v)
-06: vperm2f128 Vqq,Hqq,Wqq,Ib (66),(v)
-07:
-08: vroundps Vx,Wx,Ib (66) | vrndscaleps Vx,Wx,Ib (66),(evo)
-09: vroundpd Vx,Wx,Ib (66) | vrndscalepd Vx,Wx,Ib (66),(evo)
-0a: vroundss Vss,Wss,Ib (66),(v1) | vrndscaless Vx,Hx,Wx,Ib (66),(evo)
-0b: vroundsd Vsd,Wsd,Ib (66),(v1) | vrndscalesd Vx,Hx,Wx,Ib (66),(evo)
-0c: vblendps Vx,Hx,Wx,Ib (66)
-0d: vblendpd Vx,Hx,Wx,Ib (66)
-0e: vpblendw Vx,Hx,Wx,Ib (66),(v1)
-0f: palignr Pq,Qq,Ib | vpalignr Vx,Hx,Wx,Ib (66),(v1)
-14: vpextrb Rd/Mb,Vdq,Ib (66),(v1)
-15: vpextrw Rd/Mw,Vdq,Ib (66),(v1)
-16: vpextrd/q Ey,Vdq,Ib (66),(v1)
-17: vextractps Ed,Vdq,Ib (66),(v1)
-18: vinsertf128 Vqq,Hqq,Wqq,Ib (66),(v) | vinsertf32x4/64x2 Vqq,Hqq,Wqq,Ib (66),(evo)
-19: vextractf128 Wdq,Vqq,Ib (66),(v) | vextractf32x4/64x2 Wdq,Vqq,Ib (66),(evo)
-1a: vinsertf32x8/64x4 Vqq,Hqq,Wqq,Ib (66),(ev)
-1b: vextractf32x8/64x4 Wdq,Vqq,Ib (66),(ev)
-1d: vcvtps2ph Wx,Vx,Ib (66),(v)
-1e: vpcmpud/q Vk,Hd,Wd,Ib (66),(ev)
-1f: vpcmpd/q Vk,Hd,Wd,Ib (66),(ev)
-20: vpinsrb Vdq,Hdq,Ry/Mb,Ib (66),(v1)
-21: vinsertps Vdq,Hdq,Udq/Md,Ib (66),(v1)
-22: vpinsrd/q Vdq,Hdq,Ey,Ib (66),(v1)
-23: vshuff32x4/64x2 Vx,Hx,Wx,Ib (66),(ev)
-25: vpternlogd/q Vx,Hx,Wx,Ib (66),(ev)
-26: vgetmantps/d Vx,Wx,Ib (66),(ev)
-27: vgetmantss/d Vx,Hx,Wx,Ib (66),(ev)
-30: kshiftrb/w Vk,Uk,Ib (66),(v)
-31: kshiftrd/q Vk,Uk,Ib (66),(v)
-32: kshiftlb/w Vk,Uk,Ib (66),(v)
-33: kshiftld/q Vk,Uk,Ib (66),(v)
-38: vinserti128 Vqq,Hqq,Wqq,Ib (66),(v) | vinserti32x4/64x2 Vqq,Hqq,Wqq,Ib (66),(evo)
-39: vextracti128 Wdq,Vqq,Ib (66),(v) | vextracti32x4/64x2 Wdq,Vqq,Ib (66),(evo)
-3a: vinserti32x8/64x4 Vqq,Hqq,Wqq,Ib (66),(ev)
-3b: vextracti32x8/64x4 Wdq,Vqq,Ib (66),(ev)
-3e: vpcmpub/w Vk,Hk,Wx,Ib (66),(ev)
-3f: vpcmpb/w Vk,Hk,Wx,Ib (66),(ev)
-40: vdpps Vx,Hx,Wx,Ib (66)
-41: vdppd Vdq,Hdq,Wdq,Ib (66),(v1)
-42: vmpsadbw Vx,Hx,Wx,Ib (66),(v1) | vdbpsadbw Vx,Hx,Wx,Ib (66),(evo)
-43: vshufi32x4/64x2 Vx,Hx,Wx,Ib (66),(ev)
-44: vpclmulqdq Vdq,Hdq,Wdq,Ib (66),(v1)
-46: vperm2i128 Vqq,Hqq,Wqq,Ib (66),(v)
-4a: vblendvps Vx,Hx,Wx,Lx (66),(v)
-4b: vblendvpd Vx,Hx,Wx,Lx (66),(v)
-4c: vpblendvb Vx,Hx,Wx,Lx (66),(v1)
-50: vrangeps/d Vx,Hx,Wx,Ib (66),(ev)
-51: vrangess/d Vx,Hx,Wx,Ib (66),(ev)
-54: vfixupimmps/d Vx,Hx,Wx,Ib (66),(ev)
-55: vfixupimmss/d Vx,Hx,Wx,Ib (66),(ev)
-56: vreduceps/d Vx,Wx,Ib (66),(ev)
-57: vreducess/d Vx,Hx,Wx,Ib (66),(ev)
-60: vpcmpestrm Vdq,Wdq,Ib (66),(v1)
-61: vpcmpestri Vdq,Wdq,Ib (66),(v1)
-62: vpcmpistrm Vdq,Wdq,Ib (66),(v1)
-63: vpcmpistri Vdq,Wdq,Ib (66),(v1)
-66: vfpclassps/d Vk,Wx,Ib (66),(ev)
-67: vfpclassss/d Vk,Wx,Ib (66),(ev)
-cc: sha1rnds4 Vdq,Wdq,Ib
-df: VAESKEYGEN Vdq,Wdq,Ib (66),(v1)
-f0: RORX Gy,Ey,Ib (F2),(v)
-EndTable
-
-GrpTable: Grp1
-0: ADD
-1: OR
-2: ADC
-3: SBB
-4: AND
-5: SUB
-6: XOR
-7: CMP
-EndTable
-
-GrpTable: Grp1A
-0: POP
-EndTable
-
-GrpTable: Grp2
-0: ROL
-1: ROR
-2: RCL
-3: RCR
-4: SHL/SAL
-5: SHR
-6:
-7: SAR
-EndTable
-
-GrpTable: Grp3_1
-0: TEST Eb,Ib
-1:
-2: NOT Eb
-3: NEG Eb
-4: MUL AL,Eb
-5: IMUL AL,Eb
-6: DIV AL,Eb
-7: IDIV AL,Eb
-EndTable
-
-GrpTable: Grp3_2
-0: TEST Ev,Iz
-1:
-2: NOT Ev
-3: NEG Ev
-4: MUL rAX,Ev
-5: IMUL rAX,Ev
-6: DIV rAX,Ev
-7: IDIV rAX,Ev
-EndTable
-
-GrpTable: Grp4
-0: INC Eb
-1: DEC Eb
-EndTable
-
-GrpTable: Grp5
-0: INC Ev
-1: DEC Ev
-# Note: "forced64" is Intel CPU behavior (see comment about CALL insn).
-2: CALLN Ev (f64)
-3: CALLF Ep
-4: JMPN Ev (f64)
-5: JMPF Mp
-6: PUSH Ev (d64)
-7:
-EndTable
-
-GrpTable: Grp6
-0: SLDT Rv/Mw
-1: STR Rv/Mw
-2: LLDT Ew
-3: LTR Ew
-4: VERR Ew
-5: VERW Ew
-EndTable
-
-GrpTable: Grp7
-0: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B)
-1: SIDT Ms | MONITOR (000),(11B) | MWAIT (001),(11B) | CLAC (010),(11B) | STAC (011),(11B)
-2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B) | XEND (101)(11B) | XTEST (110)(11B)
-3: LIDT Ms
-4: SMSW Mw/Rv
-5: rdpkru (110),(11B) | wrpkru (111),(11B)
-6: LMSW Ew
-7: INVLPG Mb | SWAPGS (o64),(000),(11B) | RDTSCP (001),(11B)
-EndTable
-
-GrpTable: Grp8
-4: BT
-5: BTS
-6: BTR
-7: BTC
-EndTable
-
-GrpTable: Grp9
-1: CMPXCHG8B/16B Mq/Mdq
-3: xrstors
-4: xsavec
-5: xsaves
-6: VMPTRLD Mq | VMCLEAR Mq (66) | VMXON Mq (F3) | RDRAND Rv (11B)
-7: VMPTRST Mq | VMPTRST Mq (F3) | RDSEED Rv (11B)
-EndTable
-
-GrpTable: Grp10
-EndTable
-
-# Grp11A and Grp11B are expressed as Grp11 in Intel SDM
-GrpTable: Grp11A
-0: MOV Eb,Ib
-7: XABORT Ib (000),(11B)
-EndTable
-
-GrpTable: Grp11B
-0: MOV Eb,Iz
-7: XBEGIN Jz (000),(11B)
-EndTable
-
-GrpTable: Grp12
-2: psrlw Nq,Ib (11B) | vpsrlw Hx,Ux,Ib (66),(11B),(v1)
-4: psraw Nq,Ib (11B) | vpsraw Hx,Ux,Ib (66),(11B),(v1)
-6: psllw Nq,Ib (11B) | vpsllw Hx,Ux,Ib (66),(11B),(v1)
-EndTable
-
-GrpTable: Grp13
-0: vprord/q Hx,Wx,Ib (66),(ev)
-1: vprold/q Hx,Wx,Ib (66),(ev)
-2: psrld Nq,Ib (11B) | vpsrld Hx,Ux,Ib (66),(11B),(v1)
-4: psrad Nq,Ib (11B) | vpsrad Hx,Ux,Ib (66),(11B),(v1) | vpsrad/q Hx,Ux,Ib (66),(evo)
-6: pslld Nq,Ib (11B) | vpslld Hx,Ux,Ib (66),(11B),(v1)
-EndTable
-
-GrpTable: Grp14
-2: psrlq Nq,Ib (11B) | vpsrlq Hx,Ux,Ib (66),(11B),(v1)
-3: vpsrldq Hx,Ux,Ib (66),(11B),(v1)
-6: psllq Nq,Ib (11B) | vpsllq Hx,Ux,Ib (66),(11B),(v1)
-7: vpslldq Hx,Ux,Ib (66),(11B),(v1)
-EndTable
-
-GrpTable: Grp15
-0: fxsave | RDFSBASE Ry (F3),(11B)
-1: fxstor | RDGSBASE Ry (F3),(11B)
-2: vldmxcsr Md (v1) | WRFSBASE Ry (F3),(11B)
-3: vstmxcsr Md (v1) | WRGSBASE Ry (F3),(11B)
-4: XSAVE | ptwrite Ey (F3),(11B)
-5: XRSTOR | lfence (11B)
-6: XSAVEOPT | clwb (66) | mfence (11B)
-7: clflush | clflushopt (66) | sfence (11B)
-EndTable
-
-GrpTable: Grp16
-0: prefetch NTA
-1: prefetch T0
-2: prefetch T1
-3: prefetch T2
-EndTable
-
-GrpTable: Grp17
-1: BLSR By,Ey (v)
-2: BLSMSK By,Ey (v)
-3: BLSI By,Ey (v)
-EndTable
-
-GrpTable: Grp18
-1: vgatherpf0dps/d Wx (66),(ev)
-2: vgatherpf1dps/d Wx (66),(ev)
-5: vscatterpf0dps/d Wx (66),(ev)
-6: vscatterpf1dps/d Wx (66),(ev)
-EndTable
-
-GrpTable: Grp19
-1: vgatherpf0qps/d Wx (66),(ev)
-2: vgatherpf1qps/d Wx (66),(ev)
-5: vscatterpf0qps/d Wx (66),(ev)
-6: vscatterpf1qps/d Wx (66),(ev)
-EndTable
-
-# AMD's Prefetch Group
-GrpTable: GrpP
-0: PREFETCH
-1: PREFETCHW
-EndTable
-
-GrpTable: GrpPDLK
-0: MONTMUL
-1: XSHA1
-2: XSHA2
-EndTable
-
-GrpTable: GrpRNG
-0: xstore-rng
-1: xcrypt-ecb
-2: xcrypt-cbc
-4: xcrypt-cfb
-5: xcrypt-ofb
-EndTable
--- /dev/null
+/*
+ * x86 instruction attribute tables
+ *
+ * Written by Masami Hiramatsu <mhiramat@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ */
+#include <asm/insn.h>
+
+/* Attribute tables are generated from opcode map */
+#include "inat-tables.c"
+
+/* Attribute search APIs */
+insn_attr_t inat_get_opcode_attribute(insn_byte_t opcode)
+{
+ return inat_primary_table[opcode];
+}
+
+int inat_get_last_prefix_id(insn_byte_t last_pfx)
+{
+ insn_attr_t lpfx_attr;
+
+ lpfx_attr = inat_get_opcode_attribute(last_pfx);
+ return inat_last_prefix_id(lpfx_attr);
+}
+
+insn_attr_t inat_get_escape_attribute(insn_byte_t opcode, int lpfx_id,
+ insn_attr_t esc_attr)
+{
+ const insn_attr_t *table;
+ int n;
+
+ n = inat_escape_id(esc_attr);
+
+ table = inat_escape_tables[n][0];
+ if (!table)
+ return 0;
+ if (inat_has_variant(table[opcode]) && lpfx_id) {
+ table = inat_escape_tables[n][lpfx_id];
+ if (!table)
+ return 0;
+ }
+ return table[opcode];
+}
+
+insn_attr_t inat_get_group_attribute(insn_byte_t modrm, int lpfx_id,
+ insn_attr_t grp_attr)
+{
+ const insn_attr_t *table;
+ int n;
+
+ n = inat_group_id(grp_attr);
+
+ table = inat_group_tables[n][0];
+ if (!table)
+ return inat_group_common_attribute(grp_attr);
+ if (inat_has_variant(table[X86_MODRM_REG(modrm)]) && lpfx_id) {
+ table = inat_group_tables[n][lpfx_id];
+ if (!table)
+ return inat_group_common_attribute(grp_attr);
+ }
+ return table[X86_MODRM_REG(modrm)] |
+ inat_group_common_attribute(grp_attr);
+}
+
+insn_attr_t inat_get_avx_attribute(insn_byte_t opcode, insn_byte_t vex_m,
+ insn_byte_t vex_p)
+{
+ const insn_attr_t *table;
+ if (vex_m > X86_VEX_M_MAX || vex_p > INAT_LSTPFX_MAX)
+ return 0;
+ /* At first, this checks the master table */
+ table = inat_avx_tables[vex_m][0];
+ if (!table)
+ return 0;
+ if (!inat_is_group(table[opcode]) && vex_p) {
+ /* If this is not a group, get attribute directly */
+ table = inat_avx_tables[vex_m][vex_p];
+ if (!table)
+ return 0;
+ }
+ return table[opcode];
+}
+
--- /dev/null
+/*
+ * x86 instruction analysis
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2002, 2004, 2009
+ */
+
+#ifdef __KERNEL__
+#include <linux/string.h>
+#else
+#include <string.h>
+#endif
+#include <asm/inat.h>
+#include <asm/insn.h>
+
+/* Verify next sizeof(t) bytes can be on the same instruction */
+#define validate_next(t, insn, n) \
+ ((insn)->next_byte + sizeof(t) + n <= (insn)->end_kaddr)
+
+#define __get_next(t, insn) \
+ ({ t r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
+
+#define __peek_nbyte_next(t, insn, n) \
+ ({ t r = *(t*)((insn)->next_byte + n); r; })
+
+#define get_next(t, insn) \
+ ({ if (unlikely(!validate_next(t, insn, 0))) goto err_out; __get_next(t, insn); })
+
+#define peek_nbyte_next(t, insn, n) \
+ ({ if (unlikely(!validate_next(t, insn, n))) goto err_out; __peek_nbyte_next(t, insn, n); })
+
+#define peek_next(t, insn) peek_nbyte_next(t, insn, 0)
+
+/**
+ * insn_init() - initialize struct insn
+ * @insn: &struct insn to be initialized
+ * @kaddr: address (in kernel memory) of instruction (or copy thereof)
+ * @x86_64: !0 for 64-bit kernel or 64-bit app
+ */
+void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64)
+{
+ /*
+ * Instructions longer than MAX_INSN_SIZE (15 bytes) are invalid
+ * even if the input buffer is long enough to hold them.
+ */
+ if (buf_len > MAX_INSN_SIZE)
+ buf_len = MAX_INSN_SIZE;
+
+ memset(insn, 0, sizeof(*insn));
+ insn->kaddr = kaddr;
+ insn->end_kaddr = kaddr + buf_len;
+ insn->next_byte = kaddr;
+ insn->x86_64 = x86_64 ? 1 : 0;
+ insn->opnd_bytes = 4;
+ if (x86_64)
+ insn->addr_bytes = 8;
+ else
+ insn->addr_bytes = 4;
+}
+
+/**
+ * insn_get_prefixes - scan x86 instruction prefix bytes
+ * @insn: &struct insn containing instruction
+ *
+ * Populates the @insn->prefixes bitmap, and updates @insn->next_byte
+ * to point to the (first) opcode. No effect if @insn->prefixes.got
+ * is already set.
+ */
+void insn_get_prefixes(struct insn *insn)
+{
+ struct insn_field *prefixes = &insn->prefixes;
+ insn_attr_t attr;
+ insn_byte_t b, lb;
+ int i, nb;
+
+ if (prefixes->got)
+ return;
+
+ nb = 0;
+ lb = 0;
+ b = peek_next(insn_byte_t, insn);
+ attr = inat_get_opcode_attribute(b);
+ while (inat_is_legacy_prefix(attr)) {
+ /* Skip if same prefix */
+ for (i = 0; i < nb; i++)
+ if (prefixes->bytes[i] == b)
+ goto found;
+ if (nb == 4)
+ /* Invalid instruction */
+ break;
+ prefixes->bytes[nb++] = b;
+ if (inat_is_address_size_prefix(attr)) {
+ /* address size switches 2/4 or 4/8 */
+ if (insn->x86_64)
+ insn->addr_bytes ^= 12;
+ else
+ insn->addr_bytes ^= 6;
+ } else if (inat_is_operand_size_prefix(attr)) {
+ /* oprand size switches 2/4 */
+ insn->opnd_bytes ^= 6;
+ }
+found:
+ prefixes->nbytes++;
+ insn->next_byte++;
+ lb = b;
+ b = peek_next(insn_byte_t, insn);
+ attr = inat_get_opcode_attribute(b);
+ }
+ /* Set the last prefix */
+ if (lb && lb != insn->prefixes.bytes[3]) {
+ if (unlikely(insn->prefixes.bytes[3])) {
+ /* Swap the last prefix */
+ b = insn->prefixes.bytes[3];
+ for (i = 0; i < nb; i++)
+ if (prefixes->bytes[i] == lb)
+ prefixes->bytes[i] = b;
+ }
+ insn->prefixes.bytes[3] = lb;
+ }
+
+ /* Decode REX prefix */
+ if (insn->x86_64) {
+ b = peek_next(insn_byte_t, insn);
+ attr = inat_get_opcode_attribute(b);
+ if (inat_is_rex_prefix(attr)) {
+ insn->rex_prefix.value = b;
+ insn->rex_prefix.nbytes = 1;
+ insn->next_byte++;
+ if (X86_REX_W(b))
+ /* REX.W overrides opnd_size */
+ insn->opnd_bytes = 8;
+ }
+ }
+ insn->rex_prefix.got = 1;
+
+ /* Decode VEX prefix */
+ b = peek_next(insn_byte_t, insn);
+ attr = inat_get_opcode_attribute(b);
+ if (inat_is_vex_prefix(attr)) {
+ insn_byte_t b2 = peek_nbyte_next(insn_byte_t, insn, 1);
+ if (!insn->x86_64) {
+ /*
+ * In 32-bits mode, if the [7:6] bits (mod bits of
+ * ModRM) on the second byte are not 11b, it is
+ * LDS or LES or BOUND.
+ */
+ if (X86_MODRM_MOD(b2) != 3)
+ goto vex_end;
+ }
+ insn->vex_prefix.bytes[0] = b;
+ insn->vex_prefix.bytes[1] = b2;
+ if (inat_is_evex_prefix(attr)) {
+ b2 = peek_nbyte_next(insn_byte_t, insn, 2);
+ insn->vex_prefix.bytes[2] = b2;
+ b2 = peek_nbyte_next(insn_byte_t, insn, 3);
+ insn->vex_prefix.bytes[3] = b2;
+ insn->vex_prefix.nbytes = 4;
+ insn->next_byte += 4;
+ if (insn->x86_64 && X86_VEX_W(b2))
+ /* VEX.W overrides opnd_size */
+ insn->opnd_bytes = 8;
+ } else if (inat_is_vex3_prefix(attr)) {
+ b2 = peek_nbyte_next(insn_byte_t, insn, 2);
+ insn->vex_prefix.bytes[2] = b2;
+ insn->vex_prefix.nbytes = 3;
+ insn->next_byte += 3;
+ if (insn->x86_64 && X86_VEX_W(b2))
+ /* VEX.W overrides opnd_size */
+ insn->opnd_bytes = 8;
+ } else {
+ /*
+ * For VEX2, fake VEX3-like byte#2.
+ * Makes it easier to decode vex.W, vex.vvvv,
+ * vex.L and vex.pp. Masking with 0x7f sets vex.W == 0.
+ */
+ insn->vex_prefix.bytes[2] = b2 & 0x7f;
+ insn->vex_prefix.nbytes = 2;
+ insn->next_byte += 2;
+ }
+ }
+vex_end:
+ insn->vex_prefix.got = 1;
+
+ prefixes->got = 1;
+
+err_out:
+ return;
+}
+
+/**
+ * insn_get_opcode - collect opcode(s)
+ * @insn: &struct insn containing instruction
+ *
+ * Populates @insn->opcode, updates @insn->next_byte to point past the
+ * opcode byte(s), and set @insn->attr (except for groups).
+ * If necessary, first collects any preceding (prefix) bytes.
+ * Sets @insn->opcode.value = opcode1. No effect if @insn->opcode.got
+ * is already 1.
+ */
+void insn_get_opcode(struct insn *insn)
+{
+ struct insn_field *opcode = &insn->opcode;
+ insn_byte_t op;
+ int pfx_id;
+ if (opcode->got)
+ return;
+ if (!insn->prefixes.got)
+ insn_get_prefixes(insn);
+
+ /* Get first opcode */
+ op = get_next(insn_byte_t, insn);
+ opcode->bytes[0] = op;
+ opcode->nbytes = 1;
+
+ /* Check if there is VEX prefix or not */
+ if (insn_is_avx(insn)) {
+ insn_byte_t m, p;
+ m = insn_vex_m_bits(insn);
+ p = insn_vex_p_bits(insn);
+ insn->attr = inat_get_avx_attribute(op, m, p);
+ if ((inat_must_evex(insn->attr) && !insn_is_evex(insn)) ||
+ (!inat_accept_vex(insn->attr) &&
+ !inat_is_group(insn->attr)))
+ insn->attr = 0; /* This instruction is bad */
+ goto end; /* VEX has only 1 byte for opcode */
+ }
+
+ insn->attr = inat_get_opcode_attribute(op);
+ while (inat_is_escape(insn->attr)) {
+ /* Get escaped opcode */
+ op = get_next(insn_byte_t, insn);
+ opcode->bytes[opcode->nbytes++] = op;
+ pfx_id = insn_last_prefix_id(insn);
+ insn->attr = inat_get_escape_attribute(op, pfx_id, insn->attr);
+ }
+ if (inat_must_vex(insn->attr))
+ insn->attr = 0; /* This instruction is bad */
+end:
+ opcode->got = 1;
+
+err_out:
+ return;
+}
+
+/**
+ * insn_get_modrm - collect ModRM byte, if any
+ * @insn: &struct insn containing instruction
+ *
+ * Populates @insn->modrm and updates @insn->next_byte to point past the
+ * ModRM byte, if any. If necessary, first collects the preceding bytes
+ * (prefixes and opcode(s)). No effect if @insn->modrm.got is already 1.
+ */
+void insn_get_modrm(struct insn *insn)
+{
+ struct insn_field *modrm = &insn->modrm;
+ insn_byte_t pfx_id, mod;
+ if (modrm->got)
+ return;
+ if (!insn->opcode.got)
+ insn_get_opcode(insn);
+
+ if (inat_has_modrm(insn->attr)) {
+ mod = get_next(insn_byte_t, insn);
+ modrm->value = mod;
+ modrm->nbytes = 1;
+ if (inat_is_group(insn->attr)) {
+ pfx_id = insn_last_prefix_id(insn);
+ insn->attr = inat_get_group_attribute(mod, pfx_id,
+ insn->attr);
+ if (insn_is_avx(insn) && !inat_accept_vex(insn->attr))
+ insn->attr = 0; /* This is bad */
+ }
+ }
+
+ if (insn->x86_64 && inat_is_force64(insn->attr))
+ insn->opnd_bytes = 8;
+ modrm->got = 1;
+
+err_out:
+ return;
+}
+
+
+/**
+ * insn_rip_relative() - Does instruction use RIP-relative addressing mode?
+ * @insn: &struct insn containing instruction
+ *
+ * If necessary, first collects the instruction up to and including the
+ * ModRM byte. No effect if @insn->x86_64 is 0.
+ */
+int insn_rip_relative(struct insn *insn)
+{
+ struct insn_field *modrm = &insn->modrm;
+
+ if (!insn->x86_64)
+ return 0;
+ if (!modrm->got)
+ insn_get_modrm(insn);
+ /*
+ * For rip-relative instructions, the mod field (top 2 bits)
+ * is zero and the r/m field (bottom 3 bits) is 0x5.
+ */
+ return (modrm->nbytes && (modrm->value & 0xc7) == 0x5);
+}
+
+/**
+ * insn_get_sib() - Get the SIB byte of instruction
+ * @insn: &struct insn containing instruction
+ *
+ * If necessary, first collects the instruction up to and including the
+ * ModRM byte.
+ */
+void insn_get_sib(struct insn *insn)
+{
+ insn_byte_t modrm;
+
+ if (insn->sib.got)
+ return;
+ if (!insn->modrm.got)
+ insn_get_modrm(insn);
+ if (insn->modrm.nbytes) {
+ modrm = (insn_byte_t)insn->modrm.value;
+ if (insn->addr_bytes != 2 &&
+ X86_MODRM_MOD(modrm) != 3 && X86_MODRM_RM(modrm) == 4) {
+ insn->sib.value = get_next(insn_byte_t, insn);
+ insn->sib.nbytes = 1;
+ }
+ }
+ insn->sib.got = 1;
+
+err_out:
+ return;
+}
+
+
+/**
+ * insn_get_displacement() - Get the displacement of instruction
+ * @insn: &struct insn containing instruction
+ *
+ * If necessary, first collects the instruction up to and including the
+ * SIB byte.
+ * Displacement value is sign-expanded.
+ */
+void insn_get_displacement(struct insn *insn)
+{
+ insn_byte_t mod, rm, base;
+
+ if (insn->displacement.got)
+ return;
+ if (!insn->sib.got)
+ insn_get_sib(insn);
+ if (insn->modrm.nbytes) {
+ /*
+ * Interpreting the modrm byte:
+ * mod = 00 - no displacement fields (exceptions below)
+ * mod = 01 - 1-byte displacement field
+ * mod = 10 - displacement field is 4 bytes, or 2 bytes if
+ * address size = 2 (0x67 prefix in 32-bit mode)
+ * mod = 11 - no memory operand
+ *
+ * If address size = 2...
+ * mod = 00, r/m = 110 - displacement field is 2 bytes
+ *
+ * If address size != 2...
+ * mod != 11, r/m = 100 - SIB byte exists
+ * mod = 00, SIB base = 101 - displacement field is 4 bytes
+ * mod = 00, r/m = 101 - rip-relative addressing, displacement
+ * field is 4 bytes
+ */
+ mod = X86_MODRM_MOD(insn->modrm.value);
+ rm = X86_MODRM_RM(insn->modrm.value);
+ base = X86_SIB_BASE(insn->sib.value);
+ if (mod == 3)
+ goto out;
+ if (mod == 1) {
+ insn->displacement.value = get_next(signed char, insn);
+ insn->displacement.nbytes = 1;
+ } else if (insn->addr_bytes == 2) {
+ if ((mod == 0 && rm == 6) || mod == 2) {
+ insn->displacement.value =
+ get_next(short, insn);
+ insn->displacement.nbytes = 2;
+ }
+ } else {
+ if ((mod == 0 && rm == 5) || mod == 2 ||
+ (mod == 0 && base == 5)) {
+ insn->displacement.value = get_next(int, insn);
+ insn->displacement.nbytes = 4;
+ }
+ }
+ }
+out:
+ insn->displacement.got = 1;
+
+err_out:
+ return;
+}
+
+/* Decode moffset16/32/64. Return 0 if failed */
+static int __get_moffset(struct insn *insn)
+{
+ switch (insn->addr_bytes) {
+ case 2:
+ insn->moffset1.value = get_next(short, insn);
+ insn->moffset1.nbytes = 2;
+ break;
+ case 4:
+ insn->moffset1.value = get_next(int, insn);
+ insn->moffset1.nbytes = 4;
+ break;
+ case 8:
+ insn->moffset1.value = get_next(int, insn);
+ insn->moffset1.nbytes = 4;
+ insn->moffset2.value = get_next(int, insn);
+ insn->moffset2.nbytes = 4;
+ break;
+ default: /* opnd_bytes must be modified manually */
+ goto err_out;
+ }
+ insn->moffset1.got = insn->moffset2.got = 1;
+
+ return 1;
+
+err_out:
+ return 0;
+}
+
+/* Decode imm v32(Iz). Return 0 if failed */
+static int __get_immv32(struct insn *insn)
+{
+ switch (insn->opnd_bytes) {
+ case 2:
+ insn->immediate.value = get_next(short, insn);
+ insn->immediate.nbytes = 2;
+ break;
+ case 4:
+ case 8:
+ insn->immediate.value = get_next(int, insn);
+ insn->immediate.nbytes = 4;
+ break;
+ default: /* opnd_bytes must be modified manually */
+ goto err_out;
+ }
+
+ return 1;
+
+err_out:
+ return 0;
+}
+
+/* Decode imm v64(Iv/Ov), Return 0 if failed */
+static int __get_immv(struct insn *insn)
+{
+ switch (insn->opnd_bytes) {
+ case 2:
+ insn->immediate1.value = get_next(short, insn);
+ insn->immediate1.nbytes = 2;
+ break;
+ case 4:
+ insn->immediate1.value = get_next(int, insn);
+ insn->immediate1.nbytes = 4;
+ break;
+ case 8:
+ insn->immediate1.value = get_next(int, insn);
+ insn->immediate1.nbytes = 4;
+ insn->immediate2.value = get_next(int, insn);
+ insn->immediate2.nbytes = 4;
+ break;
+ default: /* opnd_bytes must be modified manually */
+ goto err_out;
+ }
+ insn->immediate1.got = insn->immediate2.got = 1;
+
+ return 1;
+err_out:
+ return 0;
+}
+
+/* Decode ptr16:16/32(Ap) */
+static int __get_immptr(struct insn *insn)
+{
+ switch (insn->opnd_bytes) {
+ case 2:
+ insn->immediate1.value = get_next(short, insn);
+ insn->immediate1.nbytes = 2;
+ break;
+ case 4:
+ insn->immediate1.value = get_next(int, insn);
+ insn->immediate1.nbytes = 4;
+ break;
+ case 8:
+ /* ptr16:64 is not exist (no segment) */
+ return 0;
+ default: /* opnd_bytes must be modified manually */
+ goto err_out;
+ }
+ insn->immediate2.value = get_next(unsigned short, insn);
+ insn->immediate2.nbytes = 2;
+ insn->immediate1.got = insn->immediate2.got = 1;
+
+ return 1;
+err_out:
+ return 0;
+}
+
+/**
+ * insn_get_immediate() - Get the immediates of instruction
+ * @insn: &struct insn containing instruction
+ *
+ * If necessary, first collects the instruction up to and including the
+ * displacement bytes.
+ * Basically, most of immediates are sign-expanded. Unsigned-value can be
+ * get by bit masking with ((1 << (nbytes * 8)) - 1)
+ */
+void insn_get_immediate(struct insn *insn)
+{
+ if (insn->immediate.got)
+ return;
+ if (!insn->displacement.got)
+ insn_get_displacement(insn);
+
+ if (inat_has_moffset(insn->attr)) {
+ if (!__get_moffset(insn))
+ goto err_out;
+ goto done;
+ }
+
+ if (!inat_has_immediate(insn->attr))
+ /* no immediates */
+ goto done;
+
+ switch (inat_immediate_size(insn->attr)) {
+ case INAT_IMM_BYTE:
+ insn->immediate.value = get_next(signed char, insn);
+ insn->immediate.nbytes = 1;
+ break;
+ case INAT_IMM_WORD:
+ insn->immediate.value = get_next(short, insn);
+ insn->immediate.nbytes = 2;
+ break;
+ case INAT_IMM_DWORD:
+ insn->immediate.value = get_next(int, insn);
+ insn->immediate.nbytes = 4;
+ break;
+ case INAT_IMM_QWORD:
+ insn->immediate1.value = get_next(int, insn);
+ insn->immediate1.nbytes = 4;
+ insn->immediate2.value = get_next(int, insn);
+ insn->immediate2.nbytes = 4;
+ break;
+ case INAT_IMM_PTR:
+ if (!__get_immptr(insn))
+ goto err_out;
+ break;
+ case INAT_IMM_VWORD32:
+ if (!__get_immv32(insn))
+ goto err_out;
+ break;
+ case INAT_IMM_VWORD:
+ if (!__get_immv(insn))
+ goto err_out;
+ break;
+ default:
+ /* Here, insn must have an immediate, but failed */
+ goto err_out;
+ }
+ if (inat_has_second_immediate(insn->attr)) {
+ insn->immediate2.value = get_next(signed char, insn);
+ insn->immediate2.nbytes = 1;
+ }
+done:
+ insn->immediate.got = 1;
+
+err_out:
+ return;
+}
+
+/**
+ * insn_get_length() - Get the length of instruction
+ * @insn: &struct insn containing instruction
+ *
+ * If necessary, first collects the instruction up to and including the
+ * immediates bytes.
+ */
+void insn_get_length(struct insn *insn)
+{
+ if (insn->length)
+ return;
+ if (!insn->immediate.got)
+ insn_get_immediate(insn);
+ insn->length = (unsigned char)((unsigned long)insn->next_byte
+ - (unsigned long)insn->kaddr);
+}
--- /dev/null
+# x86 Opcode Maps
+#
+# This is (mostly) based on following documentations.
+# - Intel(R) 64 and IA-32 Architectures Software Developer's Manual Vol.2C
+# (#326018-047US, June 2013)
+#
+#<Opcode maps>
+# Table: table-name
+# Referrer: escaped-name
+# AVXcode: avx-code
+# opcode: mnemonic|GrpXXX [operand1[,operand2...]] [(extra1)[,(extra2)...] [| 2nd-mnemonic ...]
+# (or)
+# opcode: escape # escaped-name
+# EndTable
+#
+# mnemonics that begin with lowercase 'v' accept a VEX or EVEX prefix
+# mnemonics that begin with lowercase 'k' accept a VEX prefix
+#
+#<group maps>
+# GrpTable: GrpXXX
+# reg: mnemonic [operand1[,operand2...]] [(extra1)[,(extra2)...] [| 2nd-mnemonic ...]
+# EndTable
+#
+# AVX Superscripts
+# (ev): this opcode requires EVEX prefix.
+# (evo): this opcode is changed by EVEX prefix (EVEX opcode)
+# (v): this opcode requires VEX prefix.
+# (v1): this opcode only supports 128bit VEX.
+#
+# Last Prefix Superscripts
+# - (66): the last prefix is 0x66
+# - (F3): the last prefix is 0xF3
+# - (F2): the last prefix is 0xF2
+# - (!F3) : the last prefix is not 0xF3 (including non-last prefix case)
+# - (66&F2): Both 0x66 and 0xF2 prefixes are specified.
+
+Table: one byte opcode
+Referrer:
+AVXcode:
+# 0x00 - 0x0f
+00: ADD Eb,Gb
+01: ADD Ev,Gv
+02: ADD Gb,Eb
+03: ADD Gv,Ev
+04: ADD AL,Ib
+05: ADD rAX,Iz
+06: PUSH ES (i64)
+07: POP ES (i64)
+08: OR Eb,Gb
+09: OR Ev,Gv
+0a: OR Gb,Eb
+0b: OR Gv,Ev
+0c: OR AL,Ib
+0d: OR rAX,Iz
+0e: PUSH CS (i64)
+0f: escape # 2-byte escape
+# 0x10 - 0x1f
+10: ADC Eb,Gb
+11: ADC Ev,Gv
+12: ADC Gb,Eb
+13: ADC Gv,Ev
+14: ADC AL,Ib
+15: ADC rAX,Iz
+16: PUSH SS (i64)
+17: POP SS (i64)
+18: SBB Eb,Gb
+19: SBB Ev,Gv
+1a: SBB Gb,Eb
+1b: SBB Gv,Ev
+1c: SBB AL,Ib
+1d: SBB rAX,Iz
+1e: PUSH DS (i64)
+1f: POP DS (i64)
+# 0x20 - 0x2f
+20: AND Eb,Gb
+21: AND Ev,Gv
+22: AND Gb,Eb
+23: AND Gv,Ev
+24: AND AL,Ib
+25: AND rAx,Iz
+26: SEG=ES (Prefix)
+27: DAA (i64)
+28: SUB Eb,Gb
+29: SUB Ev,Gv
+2a: SUB Gb,Eb
+2b: SUB Gv,Ev
+2c: SUB AL,Ib
+2d: SUB rAX,Iz
+2e: SEG=CS (Prefix)
+2f: DAS (i64)
+# 0x30 - 0x3f
+30: XOR Eb,Gb
+31: XOR Ev,Gv
+32: XOR Gb,Eb
+33: XOR Gv,Ev
+34: XOR AL,Ib
+35: XOR rAX,Iz
+36: SEG=SS (Prefix)
+37: AAA (i64)
+38: CMP Eb,Gb
+39: CMP Ev,Gv
+3a: CMP Gb,Eb
+3b: CMP Gv,Ev
+3c: CMP AL,Ib
+3d: CMP rAX,Iz
+3e: SEG=DS (Prefix)
+3f: AAS (i64)
+# 0x40 - 0x4f
+40: INC eAX (i64) | REX (o64)
+41: INC eCX (i64) | REX.B (o64)
+42: INC eDX (i64) | REX.X (o64)
+43: INC eBX (i64) | REX.XB (o64)
+44: INC eSP (i64) | REX.R (o64)
+45: INC eBP (i64) | REX.RB (o64)
+46: INC eSI (i64) | REX.RX (o64)
+47: INC eDI (i64) | REX.RXB (o64)
+48: DEC eAX (i64) | REX.W (o64)
+49: DEC eCX (i64) | REX.WB (o64)
+4a: DEC eDX (i64) | REX.WX (o64)
+4b: DEC eBX (i64) | REX.WXB (o64)
+4c: DEC eSP (i64) | REX.WR (o64)
+4d: DEC eBP (i64) | REX.WRB (o64)
+4e: DEC eSI (i64) | REX.WRX (o64)
+4f: DEC eDI (i64) | REX.WRXB (o64)
+# 0x50 - 0x5f
+50: PUSH rAX/r8 (d64)
+51: PUSH rCX/r9 (d64)
+52: PUSH rDX/r10 (d64)
+53: PUSH rBX/r11 (d64)
+54: PUSH rSP/r12 (d64)
+55: PUSH rBP/r13 (d64)
+56: PUSH rSI/r14 (d64)
+57: PUSH rDI/r15 (d64)
+58: POP rAX/r8 (d64)
+59: POP rCX/r9 (d64)
+5a: POP rDX/r10 (d64)
+5b: POP rBX/r11 (d64)
+5c: POP rSP/r12 (d64)
+5d: POP rBP/r13 (d64)
+5e: POP rSI/r14 (d64)
+5f: POP rDI/r15 (d64)
+# 0x60 - 0x6f
+60: PUSHA/PUSHAD (i64)
+61: POPA/POPAD (i64)
+62: BOUND Gv,Ma (i64) | EVEX (Prefix)
+63: ARPL Ew,Gw (i64) | MOVSXD Gv,Ev (o64)
+64: SEG=FS (Prefix)
+65: SEG=GS (Prefix)
+66: Operand-Size (Prefix)
+67: Address-Size (Prefix)
+68: PUSH Iz (d64)
+69: IMUL Gv,Ev,Iz
+6a: PUSH Ib (d64)
+6b: IMUL Gv,Ev,Ib
+6c: INS/INSB Yb,DX
+6d: INS/INSW/INSD Yz,DX
+6e: OUTS/OUTSB DX,Xb
+6f: OUTS/OUTSW/OUTSD DX,Xz
+# 0x70 - 0x7f
+70: JO Jb
+71: JNO Jb
+72: JB/JNAE/JC Jb
+73: JNB/JAE/JNC Jb
+74: JZ/JE Jb
+75: JNZ/JNE Jb
+76: JBE/JNA Jb
+77: JNBE/JA Jb
+78: JS Jb
+79: JNS Jb
+7a: JP/JPE Jb
+7b: JNP/JPO Jb
+7c: JL/JNGE Jb
+7d: JNL/JGE Jb
+7e: JLE/JNG Jb
+7f: JNLE/JG Jb
+# 0x80 - 0x8f
+80: Grp1 Eb,Ib (1A)
+81: Grp1 Ev,Iz (1A)
+82: Grp1 Eb,Ib (1A),(i64)
+83: Grp1 Ev,Ib (1A)
+84: TEST Eb,Gb
+85: TEST Ev,Gv
+86: XCHG Eb,Gb
+87: XCHG Ev,Gv
+88: MOV Eb,Gb
+89: MOV Ev,Gv
+8a: MOV Gb,Eb
+8b: MOV Gv,Ev
+8c: MOV Ev,Sw
+8d: LEA Gv,M
+8e: MOV Sw,Ew
+8f: Grp1A (1A) | POP Ev (d64)
+# 0x90 - 0x9f
+90: NOP | PAUSE (F3) | XCHG r8,rAX
+91: XCHG rCX/r9,rAX
+92: XCHG rDX/r10,rAX
+93: XCHG rBX/r11,rAX
+94: XCHG rSP/r12,rAX
+95: XCHG rBP/r13,rAX
+96: XCHG rSI/r14,rAX
+97: XCHG rDI/r15,rAX
+98: CBW/CWDE/CDQE
+99: CWD/CDQ/CQO
+9a: CALLF Ap (i64)
+9b: FWAIT/WAIT
+9c: PUSHF/D/Q Fv (d64)
+9d: POPF/D/Q Fv (d64)
+9e: SAHF
+9f: LAHF
+# 0xa0 - 0xaf
+a0: MOV AL,Ob
+a1: MOV rAX,Ov
+a2: MOV Ob,AL
+a3: MOV Ov,rAX
+a4: MOVS/B Yb,Xb
+a5: MOVS/W/D/Q Yv,Xv
+a6: CMPS/B Xb,Yb
+a7: CMPS/W/D Xv,Yv
+a8: TEST AL,Ib
+a9: TEST rAX,Iz
+aa: STOS/B Yb,AL
+ab: STOS/W/D/Q Yv,rAX
+ac: LODS/B AL,Xb
+ad: LODS/W/D/Q rAX,Xv
+ae: SCAS/B AL,Yb
+# Note: The May 2011 Intel manual shows Xv for the second parameter of the
+# next instruction but Yv is correct
+af: SCAS/W/D/Q rAX,Yv
+# 0xb0 - 0xbf
+b0: MOV AL/R8L,Ib
+b1: MOV CL/R9L,Ib
+b2: MOV DL/R10L,Ib
+b3: MOV BL/R11L,Ib
+b4: MOV AH/R12L,Ib
+b5: MOV CH/R13L,Ib
+b6: MOV DH/R14L,Ib
+b7: MOV BH/R15L,Ib
+b8: MOV rAX/r8,Iv
+b9: MOV rCX/r9,Iv
+ba: MOV rDX/r10,Iv
+bb: MOV rBX/r11,Iv
+bc: MOV rSP/r12,Iv
+bd: MOV rBP/r13,Iv
+be: MOV rSI/r14,Iv
+bf: MOV rDI/r15,Iv
+# 0xc0 - 0xcf
+c0: Grp2 Eb,Ib (1A)
+c1: Grp2 Ev,Ib (1A)
+c2: RETN Iw (f64)
+c3: RETN
+c4: LES Gz,Mp (i64) | VEX+2byte (Prefix)
+c5: LDS Gz,Mp (i64) | VEX+1byte (Prefix)
+c6: Grp11A Eb,Ib (1A)
+c7: Grp11B Ev,Iz (1A)
+c8: ENTER Iw,Ib
+c9: LEAVE (d64)
+ca: RETF Iw
+cb: RETF
+cc: INT3
+cd: INT Ib
+ce: INTO (i64)
+cf: IRET/D/Q
+# 0xd0 - 0xdf
+d0: Grp2 Eb,1 (1A)
+d1: Grp2 Ev,1 (1A)
+d2: Grp2 Eb,CL (1A)
+d3: Grp2 Ev,CL (1A)
+d4: AAM Ib (i64)
+d5: AAD Ib (i64)
+d6:
+d7: XLAT/XLATB
+d8: ESC
+d9: ESC
+da: ESC
+db: ESC
+dc: ESC
+dd: ESC
+de: ESC
+df: ESC
+# 0xe0 - 0xef
+# Note: "forced64" is Intel CPU behavior: they ignore 0x66 prefix
+# in 64-bit mode. AMD CPUs accept 0x66 prefix, it causes RIP truncation
+# to 16 bits. In 32-bit mode, 0x66 is accepted by both Intel and AMD.
+e0: LOOPNE/LOOPNZ Jb (f64)
+e1: LOOPE/LOOPZ Jb (f64)
+e2: LOOP Jb (f64)
+e3: JrCXZ Jb (f64)
+e4: IN AL,Ib
+e5: IN eAX,Ib
+e6: OUT Ib,AL
+e7: OUT Ib,eAX
+# With 0x66 prefix in 64-bit mode, for AMD CPUs immediate offset
+# in "near" jumps and calls is 16-bit. For CALL,
+# push of return address is 16-bit wide, RSP is decremented by 2
+# but is not truncated to 16 bits, unlike RIP.
+e8: CALL Jz (f64)
+e9: JMP-near Jz (f64)
+ea: JMP-far Ap (i64)
+eb: JMP-short Jb (f64)
+ec: IN AL,DX
+ed: IN eAX,DX
+ee: OUT DX,AL
+ef: OUT DX,eAX
+# 0xf0 - 0xff
+f0: LOCK (Prefix)
+f1:
+f2: REPNE (Prefix) | XACQUIRE (Prefix)
+f3: REP/REPE (Prefix) | XRELEASE (Prefix)
+f4: HLT
+f5: CMC
+f6: Grp3_1 Eb (1A)
+f7: Grp3_2 Ev (1A)
+f8: CLC
+f9: STC
+fa: CLI
+fb: STI
+fc: CLD
+fd: STD
+fe: Grp4 (1A)
+ff: Grp5 (1A)
+EndTable
+
+Table: 2-byte opcode (0x0f)
+Referrer: 2-byte escape
+AVXcode: 1
+# 0x0f 0x00-0x0f
+00: Grp6 (1A)
+01: Grp7 (1A)
+02: LAR Gv,Ew
+03: LSL Gv,Ew
+04:
+05: SYSCALL (o64)
+06: CLTS
+07: SYSRET (o64)
+08: INVD
+09: WBINVD
+0a:
+0b: UD2 (1B)
+0c:
+# AMD's prefetch group. Intel supports prefetchw(/1) only.
+0d: GrpP
+0e: FEMMS
+# 3DNow! uses the last imm byte as opcode extension.
+0f: 3DNow! Pq,Qq,Ib
+# 0x0f 0x10-0x1f
+# NOTE: According to Intel SDM opcode map, vmovups and vmovupd has no operands
+# but it actually has operands. And also, vmovss and vmovsd only accept 128bit.
+# MOVSS/MOVSD has too many forms(3) on SDM. This map just shows a typical form.
+# Many AVX instructions lack v1 superscript, according to Intel AVX-Prgramming
+# Reference A.1
+10: vmovups Vps,Wps | vmovupd Vpd,Wpd (66) | vmovss Vx,Hx,Wss (F3),(v1) | vmovsd Vx,Hx,Wsd (F2),(v1)
+11: vmovups Wps,Vps | vmovupd Wpd,Vpd (66) | vmovss Wss,Hx,Vss (F3),(v1) | vmovsd Wsd,Hx,Vsd (F2),(v1)
+12: vmovlps Vq,Hq,Mq (v1) | vmovhlps Vq,Hq,Uq (v1) | vmovlpd Vq,Hq,Mq (66),(v1) | vmovsldup Vx,Wx (F3) | vmovddup Vx,Wx (F2)
+13: vmovlps Mq,Vq (v1) | vmovlpd Mq,Vq (66),(v1)
+14: vunpcklps Vx,Hx,Wx | vunpcklpd Vx,Hx,Wx (66)
+15: vunpckhps Vx,Hx,Wx | vunpckhpd Vx,Hx,Wx (66)
+16: vmovhps Vdq,Hq,Mq (v1) | vmovlhps Vdq,Hq,Uq (v1) | vmovhpd Vdq,Hq,Mq (66),(v1) | vmovshdup Vx,Wx (F3)
+17: vmovhps Mq,Vq (v1) | vmovhpd Mq,Vq (66),(v1)
+18: Grp16 (1A)
+19:
+# Intel SDM opcode map does not list MPX instructions. For now using Gv for
+# bnd registers and Ev for everything else is OK because the instruction
+# decoder does not use the information except as an indication that there is
+# a ModR/M byte.
+1a: BNDCL Gv,Ev (F3) | BNDCU Gv,Ev (F2) | BNDMOV Gv,Ev (66) | BNDLDX Gv,Ev
+1b: BNDCN Gv,Ev (F2) | BNDMOV Ev,Gv (66) | BNDMK Gv,Ev (F3) | BNDSTX Ev,Gv
+1c:
+1d:
+1e:
+1f: NOP Ev
+# 0x0f 0x20-0x2f
+20: MOV Rd,Cd
+21: MOV Rd,Dd
+22: MOV Cd,Rd
+23: MOV Dd,Rd
+24:
+25:
+26:
+27:
+28: vmovaps Vps,Wps | vmovapd Vpd,Wpd (66)
+29: vmovaps Wps,Vps | vmovapd Wpd,Vpd (66)
+2a: cvtpi2ps Vps,Qpi | cvtpi2pd Vpd,Qpi (66) | vcvtsi2ss Vss,Hss,Ey (F3),(v1) | vcvtsi2sd Vsd,Hsd,Ey (F2),(v1)
+2b: vmovntps Mps,Vps | vmovntpd Mpd,Vpd (66)
+2c: cvttps2pi Ppi,Wps | cvttpd2pi Ppi,Wpd (66) | vcvttss2si Gy,Wss (F3),(v1) | vcvttsd2si Gy,Wsd (F2),(v1)
+2d: cvtps2pi Ppi,Wps | cvtpd2pi Qpi,Wpd (66) | vcvtss2si Gy,Wss (F3),(v1) | vcvtsd2si Gy,Wsd (F2),(v1)
+2e: vucomiss Vss,Wss (v1) | vucomisd Vsd,Wsd (66),(v1)
+2f: vcomiss Vss,Wss (v1) | vcomisd Vsd,Wsd (66),(v1)
+# 0x0f 0x30-0x3f
+30: WRMSR
+31: RDTSC
+32: RDMSR
+33: RDPMC
+34: SYSENTER
+35: SYSEXIT
+36:
+37: GETSEC
+38: escape # 3-byte escape 1
+39:
+3a: escape # 3-byte escape 2
+3b:
+3c:
+3d:
+3e:
+3f:
+# 0x0f 0x40-0x4f
+40: CMOVO Gv,Ev
+41: CMOVNO Gv,Ev | kandw/q Vk,Hk,Uk | kandb/d Vk,Hk,Uk (66)
+42: CMOVB/C/NAE Gv,Ev | kandnw/q Vk,Hk,Uk | kandnb/d Vk,Hk,Uk (66)
+43: CMOVAE/NB/NC Gv,Ev
+44: CMOVE/Z Gv,Ev | knotw/q Vk,Uk | knotb/d Vk,Uk (66)
+45: CMOVNE/NZ Gv,Ev | korw/q Vk,Hk,Uk | korb/d Vk,Hk,Uk (66)
+46: CMOVBE/NA Gv,Ev | kxnorw/q Vk,Hk,Uk | kxnorb/d Vk,Hk,Uk (66)
+47: CMOVA/NBE Gv,Ev | kxorw/q Vk,Hk,Uk | kxorb/d Vk,Hk,Uk (66)
+48: CMOVS Gv,Ev
+49: CMOVNS Gv,Ev
+4a: CMOVP/PE Gv,Ev | kaddw/q Vk,Hk,Uk | kaddb/d Vk,Hk,Uk (66)
+4b: CMOVNP/PO Gv,Ev | kunpckbw Vk,Hk,Uk (66) | kunpckwd/dq Vk,Hk,Uk
+4c: CMOVL/NGE Gv,Ev
+4d: CMOVNL/GE Gv,Ev
+4e: CMOVLE/NG Gv,Ev
+4f: CMOVNLE/G Gv,Ev
+# 0x0f 0x50-0x5f
+50: vmovmskps Gy,Ups | vmovmskpd Gy,Upd (66)
+51: vsqrtps Vps,Wps | vsqrtpd Vpd,Wpd (66) | vsqrtss Vss,Hss,Wss (F3),(v1) | vsqrtsd Vsd,Hsd,Wsd (F2),(v1)
+52: vrsqrtps Vps,Wps | vrsqrtss Vss,Hss,Wss (F3),(v1)
+53: vrcpps Vps,Wps | vrcpss Vss,Hss,Wss (F3),(v1)
+54: vandps Vps,Hps,Wps | vandpd Vpd,Hpd,Wpd (66)
+55: vandnps Vps,Hps,Wps | vandnpd Vpd,Hpd,Wpd (66)
+56: vorps Vps,Hps,Wps | vorpd Vpd,Hpd,Wpd (66)
+57: vxorps Vps,Hps,Wps | vxorpd Vpd,Hpd,Wpd (66)
+58: vaddps Vps,Hps,Wps | vaddpd Vpd,Hpd,Wpd (66) | vaddss Vss,Hss,Wss (F3),(v1) | vaddsd Vsd,Hsd,Wsd (F2),(v1)
+59: vmulps Vps,Hps,Wps | vmulpd Vpd,Hpd,Wpd (66) | vmulss Vss,Hss,Wss (F3),(v1) | vmulsd Vsd,Hsd,Wsd (F2),(v1)
+5a: vcvtps2pd Vpd,Wps | vcvtpd2ps Vps,Wpd (66) | vcvtss2sd Vsd,Hx,Wss (F3),(v1) | vcvtsd2ss Vss,Hx,Wsd (F2),(v1)
+5b: vcvtdq2ps Vps,Wdq | vcvtqq2ps Vps,Wqq (evo) | vcvtps2dq Vdq,Wps (66) | vcvttps2dq Vdq,Wps (F3)
+5c: vsubps Vps,Hps,Wps | vsubpd Vpd,Hpd,Wpd (66) | vsubss Vss,Hss,Wss (F3),(v1) | vsubsd Vsd,Hsd,Wsd (F2),(v1)
+5d: vminps Vps,Hps,Wps | vminpd Vpd,Hpd,Wpd (66) | vminss Vss,Hss,Wss (F3),(v1) | vminsd Vsd,Hsd,Wsd (F2),(v1)
+5e: vdivps Vps,Hps,Wps | vdivpd Vpd,Hpd,Wpd (66) | vdivss Vss,Hss,Wss (F3),(v1) | vdivsd Vsd,Hsd,Wsd (F2),(v1)
+5f: vmaxps Vps,Hps,Wps | vmaxpd Vpd,Hpd,Wpd (66) | vmaxss Vss,Hss,Wss (F3),(v1) | vmaxsd Vsd,Hsd,Wsd (F2),(v1)
+# 0x0f 0x60-0x6f
+60: punpcklbw Pq,Qd | vpunpcklbw Vx,Hx,Wx (66),(v1)
+61: punpcklwd Pq,Qd | vpunpcklwd Vx,Hx,Wx (66),(v1)
+62: punpckldq Pq,Qd | vpunpckldq Vx,Hx,Wx (66),(v1)
+63: packsswb Pq,Qq | vpacksswb Vx,Hx,Wx (66),(v1)
+64: pcmpgtb Pq,Qq | vpcmpgtb Vx,Hx,Wx (66),(v1)
+65: pcmpgtw Pq,Qq | vpcmpgtw Vx,Hx,Wx (66),(v1)
+66: pcmpgtd Pq,Qq | vpcmpgtd Vx,Hx,Wx (66),(v1)
+67: packuswb Pq,Qq | vpackuswb Vx,Hx,Wx (66),(v1)
+68: punpckhbw Pq,Qd | vpunpckhbw Vx,Hx,Wx (66),(v1)
+69: punpckhwd Pq,Qd | vpunpckhwd Vx,Hx,Wx (66),(v1)
+6a: punpckhdq Pq,Qd | vpunpckhdq Vx,Hx,Wx (66),(v1)
+6b: packssdw Pq,Qd | vpackssdw Vx,Hx,Wx (66),(v1)
+6c: vpunpcklqdq Vx,Hx,Wx (66),(v1)
+6d: vpunpckhqdq Vx,Hx,Wx (66),(v1)
+6e: movd/q Pd,Ey | vmovd/q Vy,Ey (66),(v1)
+6f: movq Pq,Qq | vmovdqa Vx,Wx (66) | vmovdqa32/64 Vx,Wx (66),(evo) | vmovdqu Vx,Wx (F3) | vmovdqu32/64 Vx,Wx (F3),(evo) | vmovdqu8/16 Vx,Wx (F2),(ev)
+# 0x0f 0x70-0x7f
+70: pshufw Pq,Qq,Ib | vpshufd Vx,Wx,Ib (66),(v1) | vpshufhw Vx,Wx,Ib (F3),(v1) | vpshuflw Vx,Wx,Ib (F2),(v1)
+71: Grp12 (1A)
+72: Grp13 (1A)
+73: Grp14 (1A)
+74: pcmpeqb Pq,Qq | vpcmpeqb Vx,Hx,Wx (66),(v1)
+75: pcmpeqw Pq,Qq | vpcmpeqw Vx,Hx,Wx (66),(v1)
+76: pcmpeqd Pq,Qq | vpcmpeqd Vx,Hx,Wx (66),(v1)
+# Note: Remove (v), because vzeroall and vzeroupper becomes emms without VEX.
+77: emms | vzeroupper | vzeroall
+78: VMREAD Ey,Gy | vcvttps2udq/pd2udq Vx,Wpd (evo) | vcvttsd2usi Gv,Wx (F2),(ev) | vcvttss2usi Gv,Wx (F3),(ev) | vcvttps2uqq/pd2uqq Vx,Wx (66),(ev)
+79: VMWRITE Gy,Ey | vcvtps2udq/pd2udq Vx,Wpd (evo) | vcvtsd2usi Gv,Wx (F2),(ev) | vcvtss2usi Gv,Wx (F3),(ev) | vcvtps2uqq/pd2uqq Vx,Wx (66),(ev)
+7a: vcvtudq2pd/uqq2pd Vpd,Wx (F3),(ev) | vcvtudq2ps/uqq2ps Vpd,Wx (F2),(ev) | vcvttps2qq/pd2qq Vx,Wx (66),(ev)
+7b: vcvtusi2sd Vpd,Hpd,Ev (F2),(ev) | vcvtusi2ss Vps,Hps,Ev (F3),(ev) | vcvtps2qq/pd2qq Vx,Wx (66),(ev)
+7c: vhaddpd Vpd,Hpd,Wpd (66) | vhaddps Vps,Hps,Wps (F2)
+7d: vhsubpd Vpd,Hpd,Wpd (66) | vhsubps Vps,Hps,Wps (F2)
+7e: movd/q Ey,Pd | vmovd/q Ey,Vy (66),(v1) | vmovq Vq,Wq (F3),(v1)
+7f: movq Qq,Pq | vmovdqa Wx,Vx (66) | vmovdqa32/64 Wx,Vx (66),(evo) | vmovdqu Wx,Vx (F3) | vmovdqu32/64 Wx,Vx (F3),(evo) | vmovdqu8/16 Wx,Vx (F2),(ev)
+# 0x0f 0x80-0x8f
+# Note: "forced64" is Intel CPU behavior (see comment about CALL insn).
+80: JO Jz (f64)
+81: JNO Jz (f64)
+82: JB/JC/JNAE Jz (f64)
+83: JAE/JNB/JNC Jz (f64)
+84: JE/JZ Jz (f64)
+85: JNE/JNZ Jz (f64)
+86: JBE/JNA Jz (f64)
+87: JA/JNBE Jz (f64)
+88: JS Jz (f64)
+89: JNS Jz (f64)
+8a: JP/JPE Jz (f64)
+8b: JNP/JPO Jz (f64)
+8c: JL/JNGE Jz (f64)
+8d: JNL/JGE Jz (f64)
+8e: JLE/JNG Jz (f64)
+8f: JNLE/JG Jz (f64)
+# 0x0f 0x90-0x9f
+90: SETO Eb | kmovw/q Vk,Wk | kmovb/d Vk,Wk (66)
+91: SETNO Eb | kmovw/q Mv,Vk | kmovb/d Mv,Vk (66)
+92: SETB/C/NAE Eb | kmovw Vk,Rv | kmovb Vk,Rv (66) | kmovq/d Vk,Rv (F2)
+93: SETAE/NB/NC Eb | kmovw Gv,Uk | kmovb Gv,Uk (66) | kmovq/d Gv,Uk (F2)
+94: SETE/Z Eb
+95: SETNE/NZ Eb
+96: SETBE/NA Eb
+97: SETA/NBE Eb
+98: SETS Eb | kortestw/q Vk,Uk | kortestb/d Vk,Uk (66)
+99: SETNS Eb | ktestw/q Vk,Uk | ktestb/d Vk,Uk (66)
+9a: SETP/PE Eb
+9b: SETNP/PO Eb
+9c: SETL/NGE Eb
+9d: SETNL/GE Eb
+9e: SETLE/NG Eb
+9f: SETNLE/G Eb
+# 0x0f 0xa0-0xaf
+a0: PUSH FS (d64)
+a1: POP FS (d64)
+a2: CPUID
+a3: BT Ev,Gv
+a4: SHLD Ev,Gv,Ib
+a5: SHLD Ev,Gv,CL
+a6: GrpPDLK
+a7: GrpRNG
+a8: PUSH GS (d64)
+a9: POP GS (d64)
+aa: RSM
+ab: BTS Ev,Gv
+ac: SHRD Ev,Gv,Ib
+ad: SHRD Ev,Gv,CL
+ae: Grp15 (1A),(1C)
+af: IMUL Gv,Ev
+# 0x0f 0xb0-0xbf
+b0: CMPXCHG Eb,Gb
+b1: CMPXCHG Ev,Gv
+b2: LSS Gv,Mp
+b3: BTR Ev,Gv
+b4: LFS Gv,Mp
+b5: LGS Gv,Mp
+b6: MOVZX Gv,Eb
+b7: MOVZX Gv,Ew
+b8: JMPE (!F3) | POPCNT Gv,Ev (F3)
+b9: Grp10 (1A)
+ba: Grp8 Ev,Ib (1A)
+bb: BTC Ev,Gv
+bc: BSF Gv,Ev (!F3) | TZCNT Gv,Ev (F3)
+bd: BSR Gv,Ev (!F3) | LZCNT Gv,Ev (F3)
+be: MOVSX Gv,Eb
+bf: MOVSX Gv,Ew
+# 0x0f 0xc0-0xcf
+c0: XADD Eb,Gb
+c1: XADD Ev,Gv
+c2: vcmpps Vps,Hps,Wps,Ib | vcmppd Vpd,Hpd,Wpd,Ib (66) | vcmpss Vss,Hss,Wss,Ib (F3),(v1) | vcmpsd Vsd,Hsd,Wsd,Ib (F2),(v1)
+c3: movnti My,Gy
+c4: pinsrw Pq,Ry/Mw,Ib | vpinsrw Vdq,Hdq,Ry/Mw,Ib (66),(v1)
+c5: pextrw Gd,Nq,Ib | vpextrw Gd,Udq,Ib (66),(v1)
+c6: vshufps Vps,Hps,Wps,Ib | vshufpd Vpd,Hpd,Wpd,Ib (66)
+c7: Grp9 (1A)
+c8: BSWAP RAX/EAX/R8/R8D
+c9: BSWAP RCX/ECX/R9/R9D
+ca: BSWAP RDX/EDX/R10/R10D
+cb: BSWAP RBX/EBX/R11/R11D
+cc: BSWAP RSP/ESP/R12/R12D
+cd: BSWAP RBP/EBP/R13/R13D
+ce: BSWAP RSI/ESI/R14/R14D
+cf: BSWAP RDI/EDI/R15/R15D
+# 0x0f 0xd0-0xdf
+d0: vaddsubpd Vpd,Hpd,Wpd (66) | vaddsubps Vps,Hps,Wps (F2)
+d1: psrlw Pq,Qq | vpsrlw Vx,Hx,Wx (66),(v1)
+d2: psrld Pq,Qq | vpsrld Vx,Hx,Wx (66),(v1)
+d3: psrlq Pq,Qq | vpsrlq Vx,Hx,Wx (66),(v1)
+d4: paddq Pq,Qq | vpaddq Vx,Hx,Wx (66),(v1)
+d5: pmullw Pq,Qq | vpmullw Vx,Hx,Wx (66),(v1)
+d6: vmovq Wq,Vq (66),(v1) | movq2dq Vdq,Nq (F3) | movdq2q Pq,Uq (F2)
+d7: pmovmskb Gd,Nq | vpmovmskb Gd,Ux (66),(v1)
+d8: psubusb Pq,Qq | vpsubusb Vx,Hx,Wx (66),(v1)
+d9: psubusw Pq,Qq | vpsubusw Vx,Hx,Wx (66),(v1)
+da: pminub Pq,Qq | vpminub Vx,Hx,Wx (66),(v1)
+db: pand Pq,Qq | vpand Vx,Hx,Wx (66),(v1) | vpandd/q Vx,Hx,Wx (66),(evo)
+dc: paddusb Pq,Qq | vpaddusb Vx,Hx,Wx (66),(v1)
+dd: paddusw Pq,Qq | vpaddusw Vx,Hx,Wx (66),(v1)
+de: pmaxub Pq,Qq | vpmaxub Vx,Hx,Wx (66),(v1)
+df: pandn Pq,Qq | vpandn Vx,Hx,Wx (66),(v1) | vpandnd/q Vx,Hx,Wx (66),(evo)
+# 0x0f 0xe0-0xef
+e0: pavgb Pq,Qq | vpavgb Vx,Hx,Wx (66),(v1)
+e1: psraw Pq,Qq | vpsraw Vx,Hx,Wx (66),(v1)
+e2: psrad Pq,Qq | vpsrad Vx,Hx,Wx (66),(v1)
+e3: pavgw Pq,Qq | vpavgw Vx,Hx,Wx (66),(v1)
+e4: pmulhuw Pq,Qq | vpmulhuw Vx,Hx,Wx (66),(v1)
+e5: pmulhw Pq,Qq | vpmulhw Vx,Hx,Wx (66),(v1)
+e6: vcvttpd2dq Vx,Wpd (66) | vcvtdq2pd Vx,Wdq (F3) | vcvtdq2pd/qq2pd Vx,Wdq (F3),(evo) | vcvtpd2dq Vx,Wpd (F2)
+e7: movntq Mq,Pq | vmovntdq Mx,Vx (66)
+e8: psubsb Pq,Qq | vpsubsb Vx,Hx,Wx (66),(v1)
+e9: psubsw Pq,Qq | vpsubsw Vx,Hx,Wx (66),(v1)
+ea: pminsw Pq,Qq | vpminsw Vx,Hx,Wx (66),(v1)
+eb: por Pq,Qq | vpor Vx,Hx,Wx (66),(v1) | vpord/q Vx,Hx,Wx (66),(evo)
+ec: paddsb Pq,Qq | vpaddsb Vx,Hx,Wx (66),(v1)
+ed: paddsw Pq,Qq | vpaddsw Vx,Hx,Wx (66),(v1)
+ee: pmaxsw Pq,Qq | vpmaxsw Vx,Hx,Wx (66),(v1)
+ef: pxor Pq,Qq | vpxor Vx,Hx,Wx (66),(v1) | vpxord/q Vx,Hx,Wx (66),(evo)
+# 0x0f 0xf0-0xff
+f0: vlddqu Vx,Mx (F2)
+f1: psllw Pq,Qq | vpsllw Vx,Hx,Wx (66),(v1)
+f2: pslld Pq,Qq | vpslld Vx,Hx,Wx (66),(v1)
+f3: psllq Pq,Qq | vpsllq Vx,Hx,Wx (66),(v1)
+f4: pmuludq Pq,Qq | vpmuludq Vx,Hx,Wx (66),(v1)
+f5: pmaddwd Pq,Qq | vpmaddwd Vx,Hx,Wx (66),(v1)
+f6: psadbw Pq,Qq | vpsadbw Vx,Hx,Wx (66),(v1)
+f7: maskmovq Pq,Nq | vmaskmovdqu Vx,Ux (66),(v1)
+f8: psubb Pq,Qq | vpsubb Vx,Hx,Wx (66),(v1)
+f9: psubw Pq,Qq | vpsubw Vx,Hx,Wx (66),(v1)
+fa: psubd Pq,Qq | vpsubd Vx,Hx,Wx (66),(v1)
+fb: psubq Pq,Qq | vpsubq Vx,Hx,Wx (66),(v1)
+fc: paddb Pq,Qq | vpaddb Vx,Hx,Wx (66),(v1)
+fd: paddw Pq,Qq | vpaddw Vx,Hx,Wx (66),(v1)
+fe: paddd Pq,Qq | vpaddd Vx,Hx,Wx (66),(v1)
+ff:
+EndTable
+
+Table: 3-byte opcode 1 (0x0f 0x38)
+Referrer: 3-byte escape 1
+AVXcode: 2
+# 0x0f 0x38 0x00-0x0f
+00: pshufb Pq,Qq | vpshufb Vx,Hx,Wx (66),(v1)
+01: phaddw Pq,Qq | vphaddw Vx,Hx,Wx (66),(v1)
+02: phaddd Pq,Qq | vphaddd Vx,Hx,Wx (66),(v1)
+03: phaddsw Pq,Qq | vphaddsw Vx,Hx,Wx (66),(v1)
+04: pmaddubsw Pq,Qq | vpmaddubsw Vx,Hx,Wx (66),(v1)
+05: phsubw Pq,Qq | vphsubw Vx,Hx,Wx (66),(v1)
+06: phsubd Pq,Qq | vphsubd Vx,Hx,Wx (66),(v1)
+07: phsubsw Pq,Qq | vphsubsw Vx,Hx,Wx (66),(v1)
+08: psignb Pq,Qq | vpsignb Vx,Hx,Wx (66),(v1)
+09: psignw Pq,Qq | vpsignw Vx,Hx,Wx (66),(v1)
+0a: psignd Pq,Qq | vpsignd Vx,Hx,Wx (66),(v1)
+0b: pmulhrsw Pq,Qq | vpmulhrsw Vx,Hx,Wx (66),(v1)
+0c: vpermilps Vx,Hx,Wx (66),(v)
+0d: vpermilpd Vx,Hx,Wx (66),(v)
+0e: vtestps Vx,Wx (66),(v)
+0f: vtestpd Vx,Wx (66),(v)
+# 0x0f 0x38 0x10-0x1f
+10: pblendvb Vdq,Wdq (66) | vpsrlvw Vx,Hx,Wx (66),(evo) | vpmovuswb Wx,Vx (F3),(ev)
+11: vpmovusdb Wx,Vd (F3),(ev) | vpsravw Vx,Hx,Wx (66),(ev)
+12: vpmovusqb Wx,Vq (F3),(ev) | vpsllvw Vx,Hx,Wx (66),(ev)
+13: vcvtph2ps Vx,Wx (66),(v) | vpmovusdw Wx,Vd (F3),(ev)
+14: blendvps Vdq,Wdq (66) | vpmovusqw Wx,Vq (F3),(ev) | vprorvd/q Vx,Hx,Wx (66),(evo)
+15: blendvpd Vdq,Wdq (66) | vpmovusqd Wx,Vq (F3),(ev) | vprolvd/q Vx,Hx,Wx (66),(evo)
+16: vpermps Vqq,Hqq,Wqq (66),(v) | vpermps/d Vqq,Hqq,Wqq (66),(evo)
+17: vptest Vx,Wx (66)
+18: vbroadcastss Vx,Wd (66),(v)
+19: vbroadcastsd Vqq,Wq (66),(v) | vbroadcastf32x2 Vqq,Wq (66),(evo)
+1a: vbroadcastf128 Vqq,Mdq (66),(v) | vbroadcastf32x4/64x2 Vqq,Wq (66),(evo)
+1b: vbroadcastf32x8/64x4 Vqq,Mdq (66),(ev)
+1c: pabsb Pq,Qq | vpabsb Vx,Wx (66),(v1)
+1d: pabsw Pq,Qq | vpabsw Vx,Wx (66),(v1)
+1e: pabsd Pq,Qq | vpabsd Vx,Wx (66),(v1)
+1f: vpabsq Vx,Wx (66),(ev)
+# 0x0f 0x38 0x20-0x2f
+20: vpmovsxbw Vx,Ux/Mq (66),(v1) | vpmovswb Wx,Vx (F3),(ev)
+21: vpmovsxbd Vx,Ux/Md (66),(v1) | vpmovsdb Wx,Vd (F3),(ev)
+22: vpmovsxbq Vx,Ux/Mw (66),(v1) | vpmovsqb Wx,Vq (F3),(ev)
+23: vpmovsxwd Vx,Ux/Mq (66),(v1) | vpmovsdw Wx,Vd (F3),(ev)
+24: vpmovsxwq Vx,Ux/Md (66),(v1) | vpmovsqw Wx,Vq (F3),(ev)
+25: vpmovsxdq Vx,Ux/Mq (66),(v1) | vpmovsqd Wx,Vq (F3),(ev)
+26: vptestmb/w Vk,Hx,Wx (66),(ev) | vptestnmb/w Vk,Hx,Wx (F3),(ev)
+27: vptestmd/q Vk,Hx,Wx (66),(ev) | vptestnmd/q Vk,Hx,Wx (F3),(ev)
+28: vpmuldq Vx,Hx,Wx (66),(v1) | vpmovm2b/w Vx,Uk (F3),(ev)
+29: vpcmpeqq Vx,Hx,Wx (66),(v1) | vpmovb2m/w2m Vk,Ux (F3),(ev)
+2a: vmovntdqa Vx,Mx (66),(v1) | vpbroadcastmb2q Vx,Uk (F3),(ev)
+2b: vpackusdw Vx,Hx,Wx (66),(v1)
+2c: vmaskmovps Vx,Hx,Mx (66),(v) | vscalefps/d Vx,Hx,Wx (66),(evo)
+2d: vmaskmovpd Vx,Hx,Mx (66),(v) | vscalefss/d Vx,Hx,Wx (66),(evo)
+2e: vmaskmovps Mx,Hx,Vx (66),(v)
+2f: vmaskmovpd Mx,Hx,Vx (66),(v)
+# 0x0f 0x38 0x30-0x3f
+30: vpmovzxbw Vx,Ux/Mq (66),(v1) | vpmovwb Wx,Vx (F3),(ev)
+31: vpmovzxbd Vx,Ux/Md (66),(v1) | vpmovdb Wx,Vd (F3),(ev)
+32: vpmovzxbq Vx,Ux/Mw (66),(v1) | vpmovqb Wx,Vq (F3),(ev)
+33: vpmovzxwd Vx,Ux/Mq (66),(v1) | vpmovdw Wx,Vd (F3),(ev)
+34: vpmovzxwq Vx,Ux/Md (66),(v1) | vpmovqw Wx,Vq (F3),(ev)
+35: vpmovzxdq Vx,Ux/Mq (66),(v1) | vpmovqd Wx,Vq (F3),(ev)
+36: vpermd Vqq,Hqq,Wqq (66),(v) | vpermd/q Vqq,Hqq,Wqq (66),(evo)
+37: vpcmpgtq Vx,Hx,Wx (66),(v1)
+38: vpminsb Vx,Hx,Wx (66),(v1) | vpmovm2d/q Vx,Uk (F3),(ev)
+39: vpminsd Vx,Hx,Wx (66),(v1) | vpminsd/q Vx,Hx,Wx (66),(evo) | vpmovd2m/q2m Vk,Ux (F3),(ev)
+3a: vpminuw Vx,Hx,Wx (66),(v1) | vpbroadcastmw2d Vx,Uk (F3),(ev)
+3b: vpminud Vx,Hx,Wx (66),(v1) | vpminud/q Vx,Hx,Wx (66),(evo)
+3c: vpmaxsb Vx,Hx,Wx (66),(v1)
+3d: vpmaxsd Vx,Hx,Wx (66),(v1) | vpmaxsd/q Vx,Hx,Wx (66),(evo)
+3e: vpmaxuw Vx,Hx,Wx (66),(v1)
+3f: vpmaxud Vx,Hx,Wx (66),(v1) | vpmaxud/q Vx,Hx,Wx (66),(evo)
+# 0x0f 0x38 0x40-0x8f
+40: vpmulld Vx,Hx,Wx (66),(v1) | vpmulld/q Vx,Hx,Wx (66),(evo)
+41: vphminposuw Vdq,Wdq (66),(v1)
+42: vgetexpps/d Vx,Wx (66),(ev)
+43: vgetexpss/d Vx,Hx,Wx (66),(ev)
+44: vplzcntd/q Vx,Wx (66),(ev)
+45: vpsrlvd/q Vx,Hx,Wx (66),(v)
+46: vpsravd Vx,Hx,Wx (66),(v) | vpsravd/q Vx,Hx,Wx (66),(evo)
+47: vpsllvd/q Vx,Hx,Wx (66),(v)
+# Skip 0x48-0x4b
+4c: vrcp14ps/d Vpd,Wpd (66),(ev)
+4d: vrcp14ss/d Vsd,Hpd,Wsd (66),(ev)
+4e: vrsqrt14ps/d Vpd,Wpd (66),(ev)
+4f: vrsqrt14ss/d Vsd,Hsd,Wsd (66),(ev)
+# Skip 0x50-0x57
+58: vpbroadcastd Vx,Wx (66),(v)
+59: vpbroadcastq Vx,Wx (66),(v) | vbroadcasti32x2 Vx,Wx (66),(evo)
+5a: vbroadcasti128 Vqq,Mdq (66),(v) | vbroadcasti32x4/64x2 Vx,Wx (66),(evo)
+5b: vbroadcasti32x8/64x4 Vqq,Mdq (66),(ev)
+# Skip 0x5c-0x63
+64: vpblendmd/q Vx,Hx,Wx (66),(ev)
+65: vblendmps/d Vx,Hx,Wx (66),(ev)
+66: vpblendmb/w Vx,Hx,Wx (66),(ev)
+# Skip 0x67-0x74
+75: vpermi2b/w Vx,Hx,Wx (66),(ev)
+76: vpermi2d/q Vx,Hx,Wx (66),(ev)
+77: vpermi2ps/d Vx,Hx,Wx (66),(ev)
+78: vpbroadcastb Vx,Wx (66),(v)
+79: vpbroadcastw Vx,Wx (66),(v)
+7a: vpbroadcastb Vx,Rv (66),(ev)
+7b: vpbroadcastw Vx,Rv (66),(ev)
+7c: vpbroadcastd/q Vx,Rv (66),(ev)
+7d: vpermt2b/w Vx,Hx,Wx (66),(ev)
+7e: vpermt2d/q Vx,Hx,Wx (66),(ev)
+7f: vpermt2ps/d Vx,Hx,Wx (66),(ev)
+80: INVEPT Gy,Mdq (66)
+81: INVPID Gy,Mdq (66)
+82: INVPCID Gy,Mdq (66)
+83: vpmultishiftqb Vx,Hx,Wx (66),(ev)
+88: vexpandps/d Vpd,Wpd (66),(ev)
+89: vpexpandd/q Vx,Wx (66),(ev)
+8a: vcompressps/d Wx,Vx (66),(ev)
+8b: vpcompressd/q Wx,Vx (66),(ev)
+8c: vpmaskmovd/q Vx,Hx,Mx (66),(v)
+8d: vpermb/w Vx,Hx,Wx (66),(ev)
+8e: vpmaskmovd/q Mx,Vx,Hx (66),(v)
+# 0x0f 0x38 0x90-0xbf (FMA)
+90: vgatherdd/q Vx,Hx,Wx (66),(v) | vpgatherdd/q Vx,Wx (66),(evo)
+91: vgatherqd/q Vx,Hx,Wx (66),(v) | vpgatherqd/q Vx,Wx (66),(evo)
+92: vgatherdps/d Vx,Hx,Wx (66),(v)
+93: vgatherqps/d Vx,Hx,Wx (66),(v)
+94:
+95:
+96: vfmaddsub132ps/d Vx,Hx,Wx (66),(v)
+97: vfmsubadd132ps/d Vx,Hx,Wx (66),(v)
+98: vfmadd132ps/d Vx,Hx,Wx (66),(v)
+99: vfmadd132ss/d Vx,Hx,Wx (66),(v),(v1)
+9a: vfmsub132ps/d Vx,Hx,Wx (66),(v)
+9b: vfmsub132ss/d Vx,Hx,Wx (66),(v),(v1)
+9c: vfnmadd132ps/d Vx,Hx,Wx (66),(v)
+9d: vfnmadd132ss/d Vx,Hx,Wx (66),(v),(v1)
+9e: vfnmsub132ps/d Vx,Hx,Wx (66),(v)
+9f: vfnmsub132ss/d Vx,Hx,Wx (66),(v),(v1)
+a0: vpscatterdd/q Wx,Vx (66),(ev)
+a1: vpscatterqd/q Wx,Vx (66),(ev)
+a2: vscatterdps/d Wx,Vx (66),(ev)
+a3: vscatterqps/d Wx,Vx (66),(ev)
+a6: vfmaddsub213ps/d Vx,Hx,Wx (66),(v)
+a7: vfmsubadd213ps/d Vx,Hx,Wx (66),(v)
+a8: vfmadd213ps/d Vx,Hx,Wx (66),(v)
+a9: vfmadd213ss/d Vx,Hx,Wx (66),(v),(v1)
+aa: vfmsub213ps/d Vx,Hx,Wx (66),(v)
+ab: vfmsub213ss/d Vx,Hx,Wx (66),(v),(v1)
+ac: vfnmadd213ps/d Vx,Hx,Wx (66),(v)
+ad: vfnmadd213ss/d Vx,Hx,Wx (66),(v),(v1)
+ae: vfnmsub213ps/d Vx,Hx,Wx (66),(v)
+af: vfnmsub213ss/d Vx,Hx,Wx (66),(v),(v1)
+b4: vpmadd52luq Vx,Hx,Wx (66),(ev)
+b5: vpmadd52huq Vx,Hx,Wx (66),(ev)
+b6: vfmaddsub231ps/d Vx,Hx,Wx (66),(v)
+b7: vfmsubadd231ps/d Vx,Hx,Wx (66),(v)
+b8: vfmadd231ps/d Vx,Hx,Wx (66),(v)
+b9: vfmadd231ss/d Vx,Hx,Wx (66),(v),(v1)
+ba: vfmsub231ps/d Vx,Hx,Wx (66),(v)
+bb: vfmsub231ss/d Vx,Hx,Wx (66),(v),(v1)
+bc: vfnmadd231ps/d Vx,Hx,Wx (66),(v)
+bd: vfnmadd231ss/d Vx,Hx,Wx (66),(v),(v1)
+be: vfnmsub231ps/d Vx,Hx,Wx (66),(v)
+bf: vfnmsub231ss/d Vx,Hx,Wx (66),(v),(v1)
+# 0x0f 0x38 0xc0-0xff
+c4: vpconflictd/q Vx,Wx (66),(ev)
+c6: Grp18 (1A)
+c7: Grp19 (1A)
+c8: sha1nexte Vdq,Wdq | vexp2ps/d Vx,Wx (66),(ev)
+c9: sha1msg1 Vdq,Wdq
+ca: sha1msg2 Vdq,Wdq | vrcp28ps/d Vx,Wx (66),(ev)
+cb: sha256rnds2 Vdq,Wdq | vrcp28ss/d Vx,Hx,Wx (66),(ev)
+cc: sha256msg1 Vdq,Wdq | vrsqrt28ps/d Vx,Wx (66),(ev)
+cd: sha256msg2 Vdq,Wdq | vrsqrt28ss/d Vx,Hx,Wx (66),(ev)
+db: VAESIMC Vdq,Wdq (66),(v1)
+dc: VAESENC Vdq,Hdq,Wdq (66),(v1)
+dd: VAESENCLAST Vdq,Hdq,Wdq (66),(v1)
+de: VAESDEC Vdq,Hdq,Wdq (66),(v1)
+df: VAESDECLAST Vdq,Hdq,Wdq (66),(v1)
+f0: MOVBE Gy,My | MOVBE Gw,Mw (66) | CRC32 Gd,Eb (F2) | CRC32 Gd,Eb (66&F2)
+f1: MOVBE My,Gy | MOVBE Mw,Gw (66) | CRC32 Gd,Ey (F2) | CRC32 Gd,Ew (66&F2)
+f2: ANDN Gy,By,Ey (v)
+f3: Grp17 (1A)
+f5: BZHI Gy,Ey,By (v) | PEXT Gy,By,Ey (F3),(v) | PDEP Gy,By,Ey (F2),(v)
+f6: ADCX Gy,Ey (66) | ADOX Gy,Ey (F3) | MULX By,Gy,rDX,Ey (F2),(v)
+f7: BEXTR Gy,Ey,By (v) | SHLX Gy,Ey,By (66),(v) | SARX Gy,Ey,By (F3),(v) | SHRX Gy,Ey,By (F2),(v)
+EndTable
+
+Table: 3-byte opcode 2 (0x0f 0x3a)
+Referrer: 3-byte escape 2
+AVXcode: 3
+# 0x0f 0x3a 0x00-0xff
+00: vpermq Vqq,Wqq,Ib (66),(v)
+01: vpermpd Vqq,Wqq,Ib (66),(v)
+02: vpblendd Vx,Hx,Wx,Ib (66),(v)
+03: valignd/q Vx,Hx,Wx,Ib (66),(ev)
+04: vpermilps Vx,Wx,Ib (66),(v)
+05: vpermilpd Vx,Wx,Ib (66),(v)
+06: vperm2f128 Vqq,Hqq,Wqq,Ib (66),(v)
+07:
+08: vroundps Vx,Wx,Ib (66) | vrndscaleps Vx,Wx,Ib (66),(evo)
+09: vroundpd Vx,Wx,Ib (66) | vrndscalepd Vx,Wx,Ib (66),(evo)
+0a: vroundss Vss,Wss,Ib (66),(v1) | vrndscaless Vx,Hx,Wx,Ib (66),(evo)
+0b: vroundsd Vsd,Wsd,Ib (66),(v1) | vrndscalesd Vx,Hx,Wx,Ib (66),(evo)
+0c: vblendps Vx,Hx,Wx,Ib (66)
+0d: vblendpd Vx,Hx,Wx,Ib (66)
+0e: vpblendw Vx,Hx,Wx,Ib (66),(v1)
+0f: palignr Pq,Qq,Ib | vpalignr Vx,Hx,Wx,Ib (66),(v1)
+14: vpextrb Rd/Mb,Vdq,Ib (66),(v1)
+15: vpextrw Rd/Mw,Vdq,Ib (66),(v1)
+16: vpextrd/q Ey,Vdq,Ib (66),(v1)
+17: vextractps Ed,Vdq,Ib (66),(v1)
+18: vinsertf128 Vqq,Hqq,Wqq,Ib (66),(v) | vinsertf32x4/64x2 Vqq,Hqq,Wqq,Ib (66),(evo)
+19: vextractf128 Wdq,Vqq,Ib (66),(v) | vextractf32x4/64x2 Wdq,Vqq,Ib (66),(evo)
+1a: vinsertf32x8/64x4 Vqq,Hqq,Wqq,Ib (66),(ev)
+1b: vextractf32x8/64x4 Wdq,Vqq,Ib (66),(ev)
+1d: vcvtps2ph Wx,Vx,Ib (66),(v)
+1e: vpcmpud/q Vk,Hd,Wd,Ib (66),(ev)
+1f: vpcmpd/q Vk,Hd,Wd,Ib (66),(ev)
+20: vpinsrb Vdq,Hdq,Ry/Mb,Ib (66),(v1)
+21: vinsertps Vdq,Hdq,Udq/Md,Ib (66),(v1)
+22: vpinsrd/q Vdq,Hdq,Ey,Ib (66),(v1)
+23: vshuff32x4/64x2 Vx,Hx,Wx,Ib (66),(ev)
+25: vpternlogd/q Vx,Hx,Wx,Ib (66),(ev)
+26: vgetmantps/d Vx,Wx,Ib (66),(ev)
+27: vgetmantss/d Vx,Hx,Wx,Ib (66),(ev)
+30: kshiftrb/w Vk,Uk,Ib (66),(v)
+31: kshiftrd/q Vk,Uk,Ib (66),(v)
+32: kshiftlb/w Vk,Uk,Ib (66),(v)
+33: kshiftld/q Vk,Uk,Ib (66),(v)
+38: vinserti128 Vqq,Hqq,Wqq,Ib (66),(v) | vinserti32x4/64x2 Vqq,Hqq,Wqq,Ib (66),(evo)
+39: vextracti128 Wdq,Vqq,Ib (66),(v) | vextracti32x4/64x2 Wdq,Vqq,Ib (66),(evo)
+3a: vinserti32x8/64x4 Vqq,Hqq,Wqq,Ib (66),(ev)
+3b: vextracti32x8/64x4 Wdq,Vqq,Ib (66),(ev)
+3e: vpcmpub/w Vk,Hk,Wx,Ib (66),(ev)
+3f: vpcmpb/w Vk,Hk,Wx,Ib (66),(ev)
+40: vdpps Vx,Hx,Wx,Ib (66)
+41: vdppd Vdq,Hdq,Wdq,Ib (66),(v1)
+42: vmpsadbw Vx,Hx,Wx,Ib (66),(v1) | vdbpsadbw Vx,Hx,Wx,Ib (66),(evo)
+43: vshufi32x4/64x2 Vx,Hx,Wx,Ib (66),(ev)
+44: vpclmulqdq Vdq,Hdq,Wdq,Ib (66),(v1)
+46: vperm2i128 Vqq,Hqq,Wqq,Ib (66),(v)
+4a: vblendvps Vx,Hx,Wx,Lx (66),(v)
+4b: vblendvpd Vx,Hx,Wx,Lx (66),(v)
+4c: vpblendvb Vx,Hx,Wx,Lx (66),(v1)
+50: vrangeps/d Vx,Hx,Wx,Ib (66),(ev)
+51: vrangess/d Vx,Hx,Wx,Ib (66),(ev)
+54: vfixupimmps/d Vx,Hx,Wx,Ib (66),(ev)
+55: vfixupimmss/d Vx,Hx,Wx,Ib (66),(ev)
+56: vreduceps/d Vx,Wx,Ib (66),(ev)
+57: vreducess/d Vx,Hx,Wx,Ib (66),(ev)
+60: vpcmpestrm Vdq,Wdq,Ib (66),(v1)
+61: vpcmpestri Vdq,Wdq,Ib (66),(v1)
+62: vpcmpistrm Vdq,Wdq,Ib (66),(v1)
+63: vpcmpistri Vdq,Wdq,Ib (66),(v1)
+66: vfpclassps/d Vk,Wx,Ib (66),(ev)
+67: vfpclassss/d Vk,Wx,Ib (66),(ev)
+cc: sha1rnds4 Vdq,Wdq,Ib
+df: VAESKEYGEN Vdq,Wdq,Ib (66),(v1)
+f0: RORX Gy,Ey,Ib (F2),(v)
+EndTable
+
+GrpTable: Grp1
+0: ADD
+1: OR
+2: ADC
+3: SBB
+4: AND
+5: SUB
+6: XOR
+7: CMP
+EndTable
+
+GrpTable: Grp1A
+0: POP
+EndTable
+
+GrpTable: Grp2
+0: ROL
+1: ROR
+2: RCL
+3: RCR
+4: SHL/SAL
+5: SHR
+6:
+7: SAR
+EndTable
+
+GrpTable: Grp3_1
+0: TEST Eb,Ib
+1:
+2: NOT Eb
+3: NEG Eb
+4: MUL AL,Eb
+5: IMUL AL,Eb
+6: DIV AL,Eb
+7: IDIV AL,Eb
+EndTable
+
+GrpTable: Grp3_2
+0: TEST Ev,Iz
+1:
+2: NOT Ev
+3: NEG Ev
+4: MUL rAX,Ev
+5: IMUL rAX,Ev
+6: DIV rAX,Ev
+7: IDIV rAX,Ev
+EndTable
+
+GrpTable: Grp4
+0: INC Eb
+1: DEC Eb
+EndTable
+
+GrpTable: Grp5
+0: INC Ev
+1: DEC Ev
+# Note: "forced64" is Intel CPU behavior (see comment about CALL insn).
+2: CALLN Ev (f64)
+3: CALLF Ep
+4: JMPN Ev (f64)
+5: JMPF Mp
+6: PUSH Ev (d64)
+7:
+EndTable
+
+GrpTable: Grp6
+0: SLDT Rv/Mw
+1: STR Rv/Mw
+2: LLDT Ew
+3: LTR Ew
+4: VERR Ew
+5: VERW Ew
+EndTable
+
+GrpTable: Grp7
+0: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B)
+1: SIDT Ms | MONITOR (000),(11B) | MWAIT (001),(11B) | CLAC (010),(11B) | STAC (011),(11B)
+2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B) | XEND (101)(11B) | XTEST (110)(11B)
+3: LIDT Ms
+4: SMSW Mw/Rv
+5: rdpkru (110),(11B) | wrpkru (111),(11B)
+6: LMSW Ew
+7: INVLPG Mb | SWAPGS (o64),(000),(11B) | RDTSCP (001),(11B)
+EndTable
+
+GrpTable: Grp8
+4: BT
+5: BTS
+6: BTR
+7: BTC
+EndTable
+
+GrpTable: Grp9
+1: CMPXCHG8B/16B Mq/Mdq
+3: xrstors
+4: xsavec
+5: xsaves
+6: VMPTRLD Mq | VMCLEAR Mq (66) | VMXON Mq (F3) | RDRAND Rv (11B)
+7: VMPTRST Mq | VMPTRST Mq (F3) | RDSEED Rv (11B)
+EndTable
+
+GrpTable: Grp10
+EndTable
+
+# Grp11A and Grp11B are expressed as Grp11 in Intel SDM
+GrpTable: Grp11A
+0: MOV Eb,Ib
+7: XABORT Ib (000),(11B)
+EndTable
+
+GrpTable: Grp11B
+0: MOV Eb,Iz
+7: XBEGIN Jz (000),(11B)
+EndTable
+
+GrpTable: Grp12
+2: psrlw Nq,Ib (11B) | vpsrlw Hx,Ux,Ib (66),(11B),(v1)
+4: psraw Nq,Ib (11B) | vpsraw Hx,Ux,Ib (66),(11B),(v1)
+6: psllw Nq,Ib (11B) | vpsllw Hx,Ux,Ib (66),(11B),(v1)
+EndTable
+
+GrpTable: Grp13
+0: vprord/q Hx,Wx,Ib (66),(ev)
+1: vprold/q Hx,Wx,Ib (66),(ev)
+2: psrld Nq,Ib (11B) | vpsrld Hx,Ux,Ib (66),(11B),(v1)
+4: psrad Nq,Ib (11B) | vpsrad Hx,Ux,Ib (66),(11B),(v1) | vpsrad/q Hx,Ux,Ib (66),(evo)
+6: pslld Nq,Ib (11B) | vpslld Hx,Ux,Ib (66),(11B),(v1)
+EndTable
+
+GrpTable: Grp14
+2: psrlq Nq,Ib (11B) | vpsrlq Hx,Ux,Ib (66),(11B),(v1)
+3: vpsrldq Hx,Ux,Ib (66),(11B),(v1)
+6: psllq Nq,Ib (11B) | vpsllq Hx,Ux,Ib (66),(11B),(v1)
+7: vpslldq Hx,Ux,Ib (66),(11B),(v1)
+EndTable
+
+GrpTable: Grp15
+0: fxsave | RDFSBASE Ry (F3),(11B)
+1: fxstor | RDGSBASE Ry (F3),(11B)
+2: vldmxcsr Md (v1) | WRFSBASE Ry (F3),(11B)
+3: vstmxcsr Md (v1) | WRGSBASE Ry (F3),(11B)
+4: XSAVE | ptwrite Ey (F3),(11B)
+5: XRSTOR | lfence (11B)
+6: XSAVEOPT | clwb (66) | mfence (11B)
+7: clflush | clflushopt (66) | sfence (11B)
+EndTable
+
+GrpTable: Grp16
+0: prefetch NTA
+1: prefetch T0
+2: prefetch T1
+3: prefetch T2
+EndTable
+
+GrpTable: Grp17
+1: BLSR By,Ey (v)
+2: BLSMSK By,Ey (v)
+3: BLSI By,Ey (v)
+EndTable
+
+GrpTable: Grp18
+1: vgatherpf0dps/d Wx (66),(ev)
+2: vgatherpf1dps/d Wx (66),(ev)
+5: vscatterpf0dps/d Wx (66),(ev)
+6: vscatterpf1dps/d Wx (66),(ev)
+EndTable
+
+GrpTable: Grp19
+1: vgatherpf0qps/d Wx (66),(ev)
+2: vgatherpf1qps/d Wx (66),(ev)
+5: vscatterpf0qps/d Wx (66),(ev)
+6: vscatterpf1qps/d Wx (66),(ev)
+EndTable
+
+# AMD's Prefetch Group
+GrpTable: GrpP
+0: PREFETCH
+1: PREFETCHW
+EndTable
+
+GrpTable: GrpPDLK
+0: MONTMUL
+1: XSHA1
+2: XSHA2
+EndTable
+
+GrpTable: GrpRNG
+0: xstore-rng
+1: xcrypt-ecb
+2: xcrypt-cbc
+4: xcrypt-cfb
+5: xcrypt-ofb
+EndTable
--- /dev/null
+#!/bin/awk -f
+# SPDX-License-Identifier: GPL-2.0
+# gen-insn-attr-x86.awk: Instruction attribute table generator
+# Written by Masami Hiramatsu <mhiramat@redhat.com>
+#
+# Usage: awk -f gen-insn-attr-x86.awk x86-opcode-map.txt > inat-tables.c
+
+# Awk implementation sanity check
+function check_awk_implement() {
+ if (sprintf("%x", 0) != "0")
+ return "Your awk has a printf-format problem."
+ return ""
+}
+
+# Clear working vars
+function clear_vars() {
+ delete table
+ delete lptable2
+ delete lptable1
+ delete lptable3
+ eid = -1 # escape id
+ gid = -1 # group id
+ aid = -1 # AVX id
+ tname = ""
+}
+
+BEGIN {
+ # Implementation error checking
+ awkchecked = check_awk_implement()
+ if (awkchecked != "") {
+ print "Error: " awkchecked > "/dev/stderr"
+ print "Please try to use gawk." > "/dev/stderr"
+ exit 1
+ }
+
+ # Setup generating tables
+ print "/* x86 opcode map generated from x86-opcode-map.txt */"
+ print "/* Do not change this code. */\n"
+ ggid = 1
+ geid = 1
+ gaid = 0
+ delete etable
+ delete gtable
+ delete atable
+
+ opnd_expr = "^[A-Za-z/]"
+ ext_expr = "^\\("
+ sep_expr = "^\\|$"
+ group_expr = "^Grp[0-9A-Za-z]+"
+
+ imm_expr = "^[IJAOL][a-z]"
+ imm_flag["Ib"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)"
+ imm_flag["Jb"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)"
+ imm_flag["Iw"] = "INAT_MAKE_IMM(INAT_IMM_WORD)"
+ imm_flag["Id"] = "INAT_MAKE_IMM(INAT_IMM_DWORD)"
+ imm_flag["Iq"] = "INAT_MAKE_IMM(INAT_IMM_QWORD)"
+ imm_flag["Ap"] = "INAT_MAKE_IMM(INAT_IMM_PTR)"
+ imm_flag["Iz"] = "INAT_MAKE_IMM(INAT_IMM_VWORD32)"
+ imm_flag["Jz"] = "INAT_MAKE_IMM(INAT_IMM_VWORD32)"
+ imm_flag["Iv"] = "INAT_MAKE_IMM(INAT_IMM_VWORD)"
+ imm_flag["Ob"] = "INAT_MOFFSET"
+ imm_flag["Ov"] = "INAT_MOFFSET"
+ imm_flag["Lx"] = "INAT_MAKE_IMM(INAT_IMM_BYTE)"
+
+ modrm_expr = "^([CDEGMNPQRSUVW/][a-z]+|NTA|T[012])"
+ force64_expr = "\\([df]64\\)"
+ rex_expr = "^REX(\\.[XRWB]+)*"
+ fpu_expr = "^ESC" # TODO
+
+ lprefix1_expr = "\\((66|!F3)\\)"
+ lprefix2_expr = "\\(F3\\)"
+ lprefix3_expr = "\\((F2|!F3|66\\&F2)\\)"
+ lprefix_expr = "\\((66|F2|F3)\\)"
+ max_lprefix = 4
+
+ # All opcodes starting with lower-case 'v', 'k' or with (v1) superscript
+ # accepts VEX prefix
+ vexok_opcode_expr = "^[vk].*"
+ vexok_expr = "\\(v1\\)"
+ # All opcodes with (v) superscript supports *only* VEX prefix
+ vexonly_expr = "\\(v\\)"
+ # All opcodes with (ev) superscript supports *only* EVEX prefix
+ evexonly_expr = "\\(ev\\)"
+
+ prefix_expr = "\\(Prefix\\)"
+ prefix_num["Operand-Size"] = "INAT_PFX_OPNDSZ"
+ prefix_num["REPNE"] = "INAT_PFX_REPNE"
+ prefix_num["REP/REPE"] = "INAT_PFX_REPE"
+ prefix_num["XACQUIRE"] = "INAT_PFX_REPNE"
+ prefix_num["XRELEASE"] = "INAT_PFX_REPE"
+ prefix_num["LOCK"] = "INAT_PFX_LOCK"
+ prefix_num["SEG=CS"] = "INAT_PFX_CS"
+ prefix_num["SEG=DS"] = "INAT_PFX_DS"
+ prefix_num["SEG=ES"] = "INAT_PFX_ES"
+ prefix_num["SEG=FS"] = "INAT_PFX_FS"
+ prefix_num["SEG=GS"] = "INAT_PFX_GS"
+ prefix_num["SEG=SS"] = "INAT_PFX_SS"
+ prefix_num["Address-Size"] = "INAT_PFX_ADDRSZ"
+ prefix_num["VEX+1byte"] = "INAT_PFX_VEX2"
+ prefix_num["VEX+2byte"] = "INAT_PFX_VEX3"
+ prefix_num["EVEX"] = "INAT_PFX_EVEX"
+
+ clear_vars()
+}
+
+function semantic_error(msg) {
+ print "Semantic error at " NR ": " msg > "/dev/stderr"
+ exit 1
+}
+
+function debug(msg) {
+ print "DEBUG: " msg
+}
+
+function array_size(arr, i,c) {
+ c = 0
+ for (i in arr)
+ c++
+ return c
+}
+
+/^Table:/ {
+ print "/* " $0 " */"
+ if (tname != "")
+ semantic_error("Hit Table: before EndTable:.");
+}
+
+/^Referrer:/ {
+ if (NF != 1) {
+ # escape opcode table
+ ref = ""
+ for (i = 2; i <= NF; i++)
+ ref = ref $i
+ eid = escape[ref]
+ tname = sprintf("inat_escape_table_%d", eid)
+ }
+}
+
+/^AVXcode:/ {
+ if (NF != 1) {
+ # AVX/escape opcode table
+ aid = $2
+ if (gaid <= aid)
+ gaid = aid + 1
+ if (tname == "") # AVX only opcode table
+ tname = sprintf("inat_avx_table_%d", $2)
+ }
+ if (aid == -1 && eid == -1) # primary opcode table
+ tname = "inat_primary_table"
+}
+
+/^GrpTable:/ {
+ print "/* " $0 " */"
+ if (!($2 in group))
+ semantic_error("No group: " $2 )
+ gid = group[$2]
+ tname = "inat_group_table_" gid
+}
+
+function print_table(tbl,name,fmt,n)
+{
+ print "const insn_attr_t " name " = {"
+ for (i = 0; i < n; i++) {
+ id = sprintf(fmt, i)
+ if (tbl[id])
+ print " [" id "] = " tbl[id] ","
+ }
+ print "};"
+}
+
+/^EndTable/ {
+ if (gid != -1) {
+ # print group tables
+ if (array_size(table) != 0) {
+ print_table(table, tname "[INAT_GROUP_TABLE_SIZE]",
+ "0x%x", 8)
+ gtable[gid,0] = tname
+ }
+ if (array_size(lptable1) != 0) {
+ print_table(lptable1, tname "_1[INAT_GROUP_TABLE_SIZE]",
+ "0x%x", 8)
+ gtable[gid,1] = tname "_1"
+ }
+ if (array_size(lptable2) != 0) {
+ print_table(lptable2, tname "_2[INAT_GROUP_TABLE_SIZE]",
+ "0x%x", 8)
+ gtable[gid,2] = tname "_2"
+ }
+ if (array_size(lptable3) != 0) {
+ print_table(lptable3, tname "_3[INAT_GROUP_TABLE_SIZE]",
+ "0x%x", 8)
+ gtable[gid,3] = tname "_3"
+ }
+ } else {
+ # print primary/escaped tables
+ if (array_size(table) != 0) {
+ print_table(table, tname "[INAT_OPCODE_TABLE_SIZE]",
+ "0x%02x", 256)
+ etable[eid,0] = tname
+ if (aid >= 0)
+ atable[aid,0] = tname
+ }
+ if (array_size(lptable1) != 0) {
+ print_table(lptable1,tname "_1[INAT_OPCODE_TABLE_SIZE]",
+ "0x%02x", 256)
+ etable[eid,1] = tname "_1"
+ if (aid >= 0)
+ atable[aid,1] = tname "_1"
+ }
+ if (array_size(lptable2) != 0) {
+ print_table(lptable2,tname "_2[INAT_OPCODE_TABLE_SIZE]",
+ "0x%02x", 256)
+ etable[eid,2] = tname "_2"
+ if (aid >= 0)
+ atable[aid,2] = tname "_2"
+ }
+ if (array_size(lptable3) != 0) {
+ print_table(lptable3,tname "_3[INAT_OPCODE_TABLE_SIZE]",
+ "0x%02x", 256)
+ etable[eid,3] = tname "_3"
+ if (aid >= 0)
+ atable[aid,3] = tname "_3"
+ }
+ }
+ print ""
+ clear_vars()
+}
+
+function add_flags(old,new) {
+ if (old && new)
+ return old " | " new
+ else if (old)
+ return old
+ else
+ return new
+}
+
+# convert operands to flags.
+function convert_operands(count,opnd, i,j,imm,mod)
+{
+ imm = null
+ mod = null
+ for (j = 1; j <= count; j++) {
+ i = opnd[j]
+ if (match(i, imm_expr) == 1) {
+ if (!imm_flag[i])
+ semantic_error("Unknown imm opnd: " i)
+ if (imm) {
+ if (i != "Ib")
+ semantic_error("Second IMM error")
+ imm = add_flags(imm, "INAT_SCNDIMM")
+ } else
+ imm = imm_flag[i]
+ } else if (match(i, modrm_expr))
+ mod = "INAT_MODRM"
+ }
+ return add_flags(imm, mod)
+}
+
+/^[0-9a-f]+\:/ {
+ if (NR == 1)
+ next
+ # get index
+ idx = "0x" substr($1, 1, index($1,":") - 1)
+ if (idx in table)
+ semantic_error("Redefine " idx " in " tname)
+
+ # check if escaped opcode
+ if ("escape" == $2) {
+ if ($3 != "#")
+ semantic_error("No escaped name")
+ ref = ""
+ for (i = 4; i <= NF; i++)
+ ref = ref $i
+ if (ref in escape)
+ semantic_error("Redefine escape (" ref ")")
+ escape[ref] = geid
+ geid++
+ table[idx] = "INAT_MAKE_ESCAPE(" escape[ref] ")"
+ next
+ }
+
+ variant = null
+ # converts
+ i = 2
+ while (i <= NF) {
+ opcode = $(i++)
+ delete opnds
+ ext = null
+ flags = null
+ opnd = null
+ # parse one opcode
+ if (match($i, opnd_expr)) {
+ opnd = $i
+ count = split($(i++), opnds, ",")
+ flags = convert_operands(count, opnds)
+ }
+ if (match($i, ext_expr))
+ ext = $(i++)
+ if (match($i, sep_expr))
+ i++
+ else if (i < NF)
+ semantic_error($i " is not a separator")
+
+ # check if group opcode
+ if (match(opcode, group_expr)) {
+ if (!(opcode in group)) {
+ group[opcode] = ggid
+ ggid++
+ }
+ flags = add_flags(flags, "INAT_MAKE_GROUP(" group[opcode] ")")
+ }
+ # check force(or default) 64bit
+ if (match(ext, force64_expr))
+ flags = add_flags(flags, "INAT_FORCE64")
+
+ # check REX prefix
+ if (match(opcode, rex_expr))
+ flags = add_flags(flags, "INAT_MAKE_PREFIX(INAT_PFX_REX)")
+
+ # check coprocessor escape : TODO
+ if (match(opcode, fpu_expr))
+ flags = add_flags(flags, "INAT_MODRM")
+
+ # check VEX codes
+ if (match(ext, evexonly_expr))
+ flags = add_flags(flags, "INAT_VEXOK | INAT_EVEXONLY")
+ else if (match(ext, vexonly_expr))
+ flags = add_flags(flags, "INAT_VEXOK | INAT_VEXONLY")
+ else if (match(ext, vexok_expr) || match(opcode, vexok_opcode_expr))
+ flags = add_flags(flags, "INAT_VEXOK")
+
+ # check prefixes
+ if (match(ext, prefix_expr)) {
+ if (!prefix_num[opcode])
+ semantic_error("Unknown prefix: " opcode)
+ flags = add_flags(flags, "INAT_MAKE_PREFIX(" prefix_num[opcode] ")")
+ }
+ if (length(flags) == 0)
+ continue
+ # check if last prefix
+ if (match(ext, lprefix1_expr)) {
+ lptable1[idx] = add_flags(lptable1[idx],flags)
+ variant = "INAT_VARIANT"
+ }
+ if (match(ext, lprefix2_expr)) {
+ lptable2[idx] = add_flags(lptable2[idx],flags)
+ variant = "INAT_VARIANT"
+ }
+ if (match(ext, lprefix3_expr)) {
+ lptable3[idx] = add_flags(lptable3[idx],flags)
+ variant = "INAT_VARIANT"
+ }
+ if (!match(ext, lprefix_expr)){
+ table[idx] = add_flags(table[idx],flags)
+ }
+ }
+ if (variant)
+ table[idx] = add_flags(table[idx],variant)
+}
+
+END {
+ if (awkchecked != "")
+ exit 1
+ # print escape opcode map's array
+ print "/* Escape opcode map array */"
+ print "const insn_attr_t * const inat_escape_tables[INAT_ESC_MAX + 1]" \
+ "[INAT_LSTPFX_MAX + 1] = {"
+ for (i = 0; i < geid; i++)
+ for (j = 0; j < max_lprefix; j++)
+ if (etable[i,j])
+ print " ["i"]["j"] = "etable[i,j]","
+ print "};\n"
+ # print group opcode map's array
+ print "/* Group opcode map array */"
+ print "const insn_attr_t * const inat_group_tables[INAT_GRP_MAX + 1]"\
+ "[INAT_LSTPFX_MAX + 1] = {"
+ for (i = 0; i < ggid; i++)
+ for (j = 0; j < max_lprefix; j++)
+ if (gtable[i,j])
+ print " ["i"]["j"] = "gtable[i,j]","
+ print "};\n"
+ # print AVX opcode map's array
+ print "/* AVX opcode map array */"
+ print "const insn_attr_t * const inat_avx_tables[X86_VEX_M_MAX + 1]"\
+ "[INAT_LSTPFX_MAX + 1] = {"
+ for (i = 0; i < gaid; i++)
+ for (j = 0; j < max_lprefix; j++)
+ if (atable[i,j])
+ print " ["i"]["j"] = "atable[i,j]","
+ print "};"
+}
+
#ifndef _ORC_H
#define _ORC_H
-#include "orc_types.h"
+#include <asm/orc_types.h>
struct objtool_file;
+++ /dev/null
-/*
- * Copyright (C) 2017 Josh Poimboeuf <jpoimboe@redhat.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version 2
- * of the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef _ORC_TYPES_H
-#define _ORC_TYPES_H
-
-#include <linux/types.h>
-#include <linux/compiler.h>
-
-/*
- * The ORC_REG_* registers are base registers which are used to find other
- * registers on the stack.
- *
- * ORC_REG_PREV_SP, also known as DWARF Call Frame Address (CFA), is the
- * address of the previous frame: the caller's SP before it called the current
- * function.
- *
- * ORC_REG_UNDEFINED means the corresponding register's value didn't change in
- * the current frame.
- *
- * The most commonly used base registers are SP and BP -- which the previous SP
- * is usually based on -- and PREV_SP and UNDEFINED -- which the previous BP is
- * usually based on.
- *
- * The rest of the base registers are needed for special cases like entry code
- * and GCC realigned stacks.
- */
-#define ORC_REG_UNDEFINED 0
-#define ORC_REG_PREV_SP 1
-#define ORC_REG_DX 2
-#define ORC_REG_DI 3
-#define ORC_REG_BP 4
-#define ORC_REG_SP 5
-#define ORC_REG_R10 6
-#define ORC_REG_R13 7
-#define ORC_REG_BP_INDIRECT 8
-#define ORC_REG_SP_INDIRECT 9
-#define ORC_REG_MAX 15
-
-/*
- * ORC_TYPE_CALL: Indicates that sp_reg+sp_offset resolves to PREV_SP (the
- * caller's SP right before it made the call). Used for all callable
- * functions, i.e. all C code and all callable asm functions.
- *
- * ORC_TYPE_REGS: Used in entry code to indicate that sp_reg+sp_offset points
- * to a fully populated pt_regs from a syscall, interrupt, or exception.
- *
- * ORC_TYPE_REGS_IRET: Used in entry code to indicate that sp_reg+sp_offset
- * points to the iret return frame.
- *
- * The UNWIND_HINT macros are used only for the unwind_hint struct. They
- * aren't used in struct orc_entry due to size and complexity constraints.
- * Objtool converts them to real types when it converts the hints to orc
- * entries.
- */
-#define ORC_TYPE_CALL 0
-#define ORC_TYPE_REGS 1
-#define ORC_TYPE_REGS_IRET 2
-#define UNWIND_HINT_TYPE_SAVE 3
-#define UNWIND_HINT_TYPE_RESTORE 4
-
-#ifndef __ASSEMBLY__
-/*
- * This struct is more or less a vastly simplified version of the DWARF Call
- * Frame Information standard. It contains only the necessary parts of DWARF
- * CFI, simplified for ease of access by the in-kernel unwinder. It tells the
- * unwinder how to find the previous SP and BP (and sometimes entry regs) on
- * the stack for a given code address. Each instance of the struct corresponds
- * to one or more code locations.
- */
-struct orc_entry {
- s16 sp_offset;
- s16 bp_offset;
- unsigned sp_reg:4;
- unsigned bp_reg:4;
- unsigned type:2;
-} __packed;
-
-/*
- * This struct is used by asm and inline asm code to manually annotate the
- * location of registers on the stack for the ORC unwinder.
- *
- * Type can be either ORC_TYPE_* or UNWIND_HINT_TYPE_*.
- */
-struct unwind_hint {
- u32 ip;
- s16 sp_offset;
- u8 sp_reg;
- u8 type;
-};
-#endif /* __ASSEMBLY__ */
-
-#endif /* _ORC_TYPES_H */
--- /dev/null
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+FILES='
+arch/x86/lib/insn.c
+arch/x86/lib/inat.c
+arch/x86/lib/x86-opcode-map.txt
+arch/x86/tools/gen-insn-attr-x86.awk
+arch/x86/include/asm/insn.h
+arch/x86/include/asm/inat.h
+arch/x86/include/asm/inat_types.h
+arch/x86/include/asm/orc_types.h
+'
+
+check()
+{
+ local file=$1
+
+ diff $file ../../$file > /dev/null ||
+ echo "Warning: synced file at 'tools/objtool/$file' differs from latest kernel version at '$file'"
+}
+
+if [ ! -d ../../kernel ] || [ ! -d ../../tools ] || [ ! -d ../objtool ]; then
+ exit 0
+fi
+
+for i in $FILES; do
+ check $i
+done
ifneq ($(OUTPUT),)
# check that the output directory actually exists
-OUTDIR := $(shell cd $(OUTPUT) && /bin/pwd)
+OUTDIR := $(shell cd $(OUTPUT) && pwd)
$(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist))
endif
dprintf("set %s as cpufreq governor\n", governor);
- if (cpupower_is_cpu_online(cpu) != 0) {
+ if (cpupower_is_cpu_online(cpu) != 1) {
perror("cpufreq_cpu_exists");
fprintf(stderr, "error: cpu %u does not exist\n", cpu);
return -1;
{
int num;
char *tmp;
+ int this_cpu;
+
+ this_cpu = sched_getcpu();
/* Assume idle state count is the same for all CPUs */
- cpuidle_sysfs_monitor.hw_states_num = cpuidle_state_count(0);
+ cpuidle_sysfs_monitor.hw_states_num = cpuidle_state_count(this_cpu);
if (cpuidle_sysfs_monitor.hw_states_num <= 0)
return NULL;
for (num = 0; num < cpuidle_sysfs_monitor.hw_states_num; num++) {
- tmp = cpuidle_state_name(0, num);
+ tmp = cpuidle_state_name(this_cpu, num);
if (tmp == NULL)
continue;
strncpy(cpuidle_cstates[num].name, tmp, CSTATE_NAME_LEN - 1);
free(tmp);
- tmp = cpuidle_state_desc(0, num);
+ tmp = cpuidle_state_desc(this_cpu, num);
if (tmp == NULL)
continue;
strncpy(cpuidle_cstates[num].desc, tmp, CSTATE_DESC_LEN - 1);
# check that the output directory actually exists
ifneq ($(OUTPUT),)
-OUTDIR := $(shell cd $(OUTPUT) && /bin/pwd)
+OUTDIR := $(shell cd $(OUTPUT) && pwd)
$(if $(OUTDIR),, $(error output directory "$(OUTPUT)" does not exist))
endif
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
- BPF_MOV64_IMM(BPF_REG_1, 0),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_write_user),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_EMIT_CALL(BPF_FUNC_trace_printk),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
offsetof(struct test_val, foo)),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_write_user),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_EMIT_CALL(BPF_FUNC_trace_printk),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_write_user),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_EMIT_CALL(BPF_FUNC_trace_printk),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
- .errstr = "R2 min value is outside of the array range",
+ .errstr = "R1 min value is outside of the array range",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
- BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JGT, BPF_REG_3,
- offsetof(struct test_val, foo), 4),
+ offsetof(struct test_val, foo), 3),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
- BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
- BPF_MOV64_IMM(BPF_REG_1, 0),
- BPF_MOV64_IMM(BPF_REG_3, 0),
- BPF_EMIT_CALL(BPF_FUNC_probe_write_user),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_EMIT_CALL(BPF_FUNC_trace_printk),
BPF_EXIT_INSN(),
},
.fixup_map2 = { 3 },
- .errstr = "R2 min value is outside of the array range",
+ .errstr = "R1 min value is outside of the array range",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
- "helper access to variable memory: size = 0 allowed on NULL",
+ "helper access to variable memory: size = 0 allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_MOV64_IMM(BPF_REG_2, 0),
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
- "helper access to variable memory: size > 0 not allowed on NULL",
+ "helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_MOV64_IMM(BPF_REG_2, 0),
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
- "helper access to variable memory: size = 0 allowed on != NULL stack pointer",
+ "helper access to variable memory: size = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
- "helper access to variable memory: size = 0 allowed on != NULL map pointer",
+ "helper access to variable memory: size = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
- "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer",
+ "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (ARG_PTR_TO_MEM_OR_NULL)",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
- "helper access to variable memory: size possible = 0 allowed on != NULL map pointer",
+ "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (ARG_PTR_TO_MEM_OR_NULL)",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
- "helper access to variable memory: size possible = 0 allowed on != NULL packet pointer",
+ "helper access to variable memory: size possible = 0 allowed on != NULL packet pointer (ARG_PTR_TO_MEM_OR_NULL)",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
offsetof(struct __sk_buff, data)),
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
+ {
+ "helper access to variable memory: size = 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 type=inv expected=fp",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ },
+ {
+ "helper access to variable memory: size > 0 not allowed on NULL (!ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_1, 0),
+ BPF_MOV64_IMM(BPF_REG_2, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "R1 type=inv expected=fp",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ },
+ {
+ "helper access to variable memory: size = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ },
+ {
+ "helper access to variable memory: size = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map1 = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ },
+ {
+ "helper access to variable memory: size possible = 0 allowed on != NULL stack pointer (!ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 4),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map1 = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ },
+ {
+ "helper access to variable memory: size possible = 0 allowed on != NULL map pointer (!ARG_PTR_TO_MEM_OR_NULL)",
+ .insns = {
+ BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
+ BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
+ BPF_LD_MAP_FD(BPF_REG_1, 0),
+ BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
+ BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0),
+ BPF_JMP_IMM(BPF_JGT, BPF_REG_2, 8, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_probe_read),
+ BPF_EXIT_INSN(),
+ },
+ .fixup_map1 = { 3 },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_TRACEPOINT,
+ },
{
"helper access to variable memory: 8 bytes leak",
.insns = {
--- /dev/null
+#include <stdio.h>
+#include <sys/mman.h>
+
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+
+#define PAGE_SIZE 4096
+#define LOW_ADDR ((void *) (1UL << 30))
+#define HIGH_ADDR ((void *) (1UL << 50))
+
+struct testcase {
+ void *addr;
+ unsigned long size;
+ unsigned long flags;
+ const char *msg;
+ unsigned int low_addr_required:1;
+ unsigned int keep_mapped:1;
+};
+
+static struct testcase testcases[] = {
+ {
+ .addr = NULL,
+ .size = 2 * PAGE_SIZE,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(NULL)",
+ .low_addr_required = 1,
+ },
+ {
+ .addr = LOW_ADDR,
+ .size = 2 * PAGE_SIZE,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(LOW_ADDR)",
+ .low_addr_required = 1,
+ },
+ {
+ .addr = HIGH_ADDR,
+ .size = 2 * PAGE_SIZE,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(HIGH_ADDR)",
+ .keep_mapped = 1,
+ },
+ {
+ .addr = HIGH_ADDR,
+ .size = 2 * PAGE_SIZE,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(HIGH_ADDR) again",
+ .keep_mapped = 1,
+ },
+ {
+ .addr = HIGH_ADDR,
+ .size = 2 * PAGE_SIZE,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
+ .msg = "mmap(HIGH_ADDR, MAP_FIXED)",
+ },
+ {
+ .addr = (void*) -1,
+ .size = 2 * PAGE_SIZE,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(-1)",
+ .keep_mapped = 1,
+ },
+ {
+ .addr = (void*) -1,
+ .size = 2 * PAGE_SIZE,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(-1) again",
+ },
+ {
+ .addr = (void *)((1UL << 47) - PAGE_SIZE),
+ .size = 2 * PAGE_SIZE,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap((1UL << 47), 2 * PAGE_SIZE)",
+ .low_addr_required = 1,
+ .keep_mapped = 1,
+ },
+ {
+ .addr = (void *)((1UL << 47) - PAGE_SIZE / 2),
+ .size = 2 * PAGE_SIZE,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap((1UL << 47), 2 * PAGE_SIZE / 2)",
+ .low_addr_required = 1,
+ .keep_mapped = 1,
+ },
+ {
+ .addr = (void *)((1UL << 47) - PAGE_SIZE),
+ .size = 2 * PAGE_SIZE,
+ .flags = MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
+ .msg = "mmap((1UL << 47) - PAGE_SIZE, 2 * PAGE_SIZE, MAP_FIXED)",
+ },
+ {
+ .addr = NULL,
+ .size = 2UL << 20,
+ .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(NULL, MAP_HUGETLB)",
+ .low_addr_required = 1,
+ },
+ {
+ .addr = LOW_ADDR,
+ .size = 2UL << 20,
+ .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(LOW_ADDR, MAP_HUGETLB)",
+ .low_addr_required = 1,
+ },
+ {
+ .addr = HIGH_ADDR,
+ .size = 2UL << 20,
+ .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(HIGH_ADDR, MAP_HUGETLB)",
+ .keep_mapped = 1,
+ },
+ {
+ .addr = HIGH_ADDR,
+ .size = 2UL << 20,
+ .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(HIGH_ADDR, MAP_HUGETLB) again",
+ .keep_mapped = 1,
+ },
+ {
+ .addr = HIGH_ADDR,
+ .size = 2UL << 20,
+ .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
+ .msg = "mmap(HIGH_ADDR, MAP_FIXED | MAP_HUGETLB)",
+ },
+ {
+ .addr = (void*) -1,
+ .size = 2UL << 20,
+ .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(-1, MAP_HUGETLB)",
+ .keep_mapped = 1,
+ },
+ {
+ .addr = (void*) -1,
+ .size = 2UL << 20,
+ .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap(-1, MAP_HUGETLB) again",
+ },
+ {
+ .addr = (void *)((1UL << 47) - PAGE_SIZE),
+ .size = 4UL << 20,
+ .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS,
+ .msg = "mmap((1UL << 47), 4UL << 20, MAP_HUGETLB)",
+ .low_addr_required = 1,
+ .keep_mapped = 1,
+ },
+ {
+ .addr = (void *)((1UL << 47) - (2UL << 20)),
+ .size = 4UL << 20,
+ .flags = MAP_HUGETLB | MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
+ .msg = "mmap((1UL << 47) - (2UL << 20), 4UL << 20, MAP_FIXED | MAP_HUGETLB)",
+ },
+};
+
+int main(int argc, char **argv)
+{
+ int i;
+ void *p;
+
+ for (i = 0; i < ARRAY_SIZE(testcases); i++) {
+ struct testcase *t = testcases + i;
+
+ p = mmap(t->addr, t->size, PROT_NONE, t->flags, -1, 0);
+
+ printf("%s: %p - ", t->msg, p);
+
+ if (p == MAP_FAILED) {
+ printf("FAILED\n");
+ continue;
+ }
+
+ if (t->low_addr_required && p >= (void *)(1UL << 47))
+ printf("FAILED\n");
+ else
+ printf("OK\n");
+ if (!t->keep_mapped)
+ munmap(p, t->size);
+ }
+ return 0;
+}
TARGETS_C_32BIT_ONLY := entry_from_vm86 syscall_arg_fault test_syscall_vdso unwind_vdso \
test_FCMOV test_FCOMI test_FISTTP \
vdso_restorer
-TARGETS_C_64BIT_ONLY := fsgsbase sysret_rip
+TARGETS_C_64BIT_ONLY := fsgsbase sysret_rip 5lvl
TARGETS_C_32BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_32BIT_ONLY)
TARGETS_C_64BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_64BIT_ONLY)
struct mpx_bd_entry {
union {
char x[MPX_BOUNDS_DIR_ENTRY_SIZE_BYTES];
- void *contents[1];
+ void *contents[0];
};
} __attribute__((packed));
struct mpx_bt_entry {
union {
char x[MPX_BOUNDS_TABLE_ENTRY_SIZE_BYTES];
- unsigned long contents[1];
+ unsigned long contents[0];
};
} __attribute__((packed));
if (!dprint_in_signal) {
vprintf(format, ap);
} else {
+ int ret;
int len = vsnprintf(dprint_in_signal_buffer,
DPRINT_IN_SIGNAL_BUF_SIZE,
format, ap);
*/
if (len > DPRINT_IN_SIGNAL_BUF_SIZE)
len = DPRINT_IN_SIGNAL_BUF_SIZE;
- write(1, dprint_in_signal_buffer, len);
+ ret = write(1, dprint_in_signal_buffer, len);
+ if (ret < 0)
+ abort();
}
va_end(ap);
}
unsigned long ip;
char *fpregs;
u32 *pkru_ptr;
- u64 si_pkey;
+ u64 siginfo_pkey;
u32 *si_pkey_ptr;
int pkru_offset;
fpregset_t fpregset;
si_pkey_ptr = (u32 *)(((u8 *)si) + si_pkey_offset);
dprintf1("si_pkey_ptr: %p\n", si_pkey_ptr);
dump_mem(si_pkey_ptr - 8, 24);
- si_pkey = *si_pkey_ptr;
- pkey_assert(si_pkey < NR_PKEYS);
- last_si_pkey = si_pkey;
+ siginfo_pkey = *si_pkey_ptr;
+ pkey_assert(siginfo_pkey < NR_PKEYS);
+ last_si_pkey = siginfo_pkey;
if ((si->si_code == SEGV_MAPERR) ||
(si->si_code == SEGV_ACCERR) ||
dprintf1("signal pkru from xsave: %08x\n", *pkru_ptr);
/* need __rdpkru() version so we do not do shadow_pkru checking */
dprintf1("signal pkru from pkru: %08x\n", __rdpkru());
- dprintf1("si_pkey from siginfo: %jx\n", si_pkey);
+ dprintf1("pkey from siginfo: %jx\n", siginfo_pkey);
*(u64 *)pkru_ptr = 0x00000000;
dprintf1("WARNING: set PRKU=0 to allow faulting instruction to continue\n");
pkru_faults++;
{
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
struct arch_timer_context *vtimer = vcpu_vtimer(vcpu);
- struct irq_desc *desc;
- struct irq_data *data;
- int phys_irq;
int ret;
if (timer->enabled)
return -EINVAL;
}
- /*
- * Find the physical IRQ number corresponding to the host_vtimer_irq
- */
- desc = irq_to_desc(host_vtimer_irq);
- if (!desc) {
- kvm_err("%s: no interrupt descriptor\n", __func__);
- return -EINVAL;
- }
-
- data = irq_desc_get_irq_data(desc);
- while (data->parent_data)
- data = data->parent_data;
-
- phys_irq = data->hwirq;
-
- /*
- * Tell the VGIC that the virtual interrupt is tied to a
- * physical interrupt. We do that once per VCPU.
- */
- ret = kvm_vgic_map_phys_irq(vcpu, vtimer->irq.irq, phys_irq);
+ ret = kvm_vgic_map_phys_irq(vcpu, host_vtimer_irq, vtimer->irq.irq);
if (ret)
return ret;
#include <linux/mman.h>
#include <linux/sched.h>
#include <linux/kvm.h>
+#include <linux/kvm_irqfd.h>
+#include <linux/irqbypass.h>
#include <trace/events/kvm.h>
#include <kvm/arm_pmu.h>
{
int i;
+ kvm_vgic_destroy(kvm);
+
free_percpu(kvm->arch.last_vcpu_ran);
kvm->arch.last_vcpu_ran = NULL;
kvm->vcpus[i] = NULL;
}
}
-
- kvm_vgic_destroy(kvm);
}
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
{
kvm_timer_schedule(vcpu);
+ kvm_vgic_v4_enable_doorbell(vcpu);
}
void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
{
kvm_timer_unschedule(vcpu);
+ kvm_vgic_v4_disable_doorbell(vcpu);
}
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
int ret;
- sigset_t sigsaved;
if (unlikely(!kvm_vcpu_initialized(vcpu)))
return -ENOEXEC;
if (run->immediate_exit)
return -EINTR;
- if (vcpu->sigset_active)
- sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
+ kvm_sigset_activate(vcpu);
ret = 1;
run->exit_reason = KVM_EXIT_UNKNOWN;
kvm_pmu_update_run(vcpu);
}
- if (vcpu->sigset_active)
- sigprocmask(SIG_SETMASK, &sigsaved, NULL);
+ kvm_sigset_deactivate(vcpu);
+
return ret;
}
return NULL;
}
+bool kvm_arch_has_irq_bypass(void)
+{
+ return true;
+}
+
+int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons,
+ struct irq_bypass_producer *prod)
+{
+ struct kvm_kernel_irqfd *irqfd =
+ container_of(cons, struct kvm_kernel_irqfd, consumer);
+
+ return kvm_vgic_v4_set_forwarding(irqfd->kvm, prod->irq,
+ &irqfd->irq_entry);
+}
+void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons,
+ struct irq_bypass_producer *prod)
+{
+ struct kvm_kernel_irqfd *irqfd =
+ container_of(cons, struct kvm_kernel_irqfd, consumer);
+
+ kvm_vgic_v4_unset_forwarding(irqfd->kvm, prod->irq,
+ &irqfd->irq_entry);
+}
+
+void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *cons)
+{
+ struct kvm_kernel_irqfd *irqfd =
+ container_of(cons, struct kvm_kernel_irqfd, consumer);
+
+ kvm_arm_halt_guest(irqfd->kvm);
+}
+
+void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *cons)
+{
+ struct kvm_kernel_irqfd *irqfd =
+ container_of(cons, struct kvm_kernel_irqfd, consumer);
+
+ kvm_arm_resume_guest(irqfd->kvm);
+}
+
/**
* Initialize Hyp-mode and memory mappings on all CPUs.
*/
cpu_if->vgic_ap1r[0] = __vgic_v3_read_ap1rn(0);
}
} else {
- if (static_branch_unlikely(&vgic_v3_cpuif_trap))
+ if (static_branch_unlikely(&vgic_v3_cpuif_trap) ||
+ cpu_if->its_vpe.its_vm)
write_gicreg(0, ICH_HCR_EL2);
cpu_if->vgic_elrsr = 0xffff;
/*
* If we need to trap system registers, we must write
* ICH_HCR_EL2 anyway, even if no interrupts are being
- * injected,
+ * injected. Same thing if GICv4 is used, as VLPI
+ * delivery is gated by ICH_HCR_EL2.En.
*/
- if (static_branch_unlikely(&vgic_v3_cpuif_trap))
+ if (static_branch_unlikely(&vgic_v3_cpuif_trap) ||
+ cpu_if->its_vpe.its_vm)
write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
}
if (ret)
goto out;
+ ret = vgic_v4_init(kvm);
+ if (ret)
+ goto out;
+
kvm_for_each_vcpu(i, vcpu, kvm)
kvm_vgic_vcpu_enable(vcpu);
kfree(dist->spis);
dist->nr_spis = 0;
+
+ if (vgic_supports_direct_msis(kvm))
+ vgic_v4_teardown(kvm);
}
void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
static int vgic_its_restore_tables_v0(struct vgic_its *its);
static int vgic_its_commit_v0(struct vgic_its *its);
static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
- struct kvm_vcpu *filter_vcpu);
+ struct kvm_vcpu *filter_vcpu, bool needs_inv);
/*
* Creates a new (reference to a) struct vgic_irq for a given LPI.
* However we only have those structs for mapped IRQs, so we read in
* the respective config data from memory here upon mapping the LPI.
*/
- ret = update_lpi_config(kvm, irq, NULL);
+ ret = update_lpi_config(kvm, irq, NULL, false);
if (ret)
return ERR_PTR(ret);
* VCPU. Unconditionally applies if filter_vcpu is NULL.
*/
static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq,
- struct kvm_vcpu *filter_vcpu)
+ struct kvm_vcpu *filter_vcpu, bool needs_inv)
{
u64 propbase = GICR_PROPBASER_ADDRESS(kvm->arch.vgic.propbaser);
u8 prop;
irq->priority = LPI_PROP_PRIORITY(prop);
irq->enabled = LPI_PROP_ENABLE_BIT(prop);
- vgic_queue_irq_unlock(kvm, irq, flags);
- } else {
- spin_unlock_irqrestore(&irq->irq_lock, flags);
+ if (!irq->hw) {
+ vgic_queue_irq_unlock(kvm, irq, flags);
+ return 0;
+ }
}
+ spin_unlock_irqrestore(&irq->irq_lock, flags);
+
+ if (irq->hw)
+ return its_prop_update_vlpi(irq->host_irq, prop, needs_inv);
+
return 0;
}
return i;
}
+static int update_affinity(struct vgic_irq *irq, struct kvm_vcpu *vcpu)
+{
+ int ret = 0;
+
+ spin_lock(&irq->irq_lock);
+ irq->target_vcpu = vcpu;
+ spin_unlock(&irq->irq_lock);
+
+ if (irq->hw) {
+ struct its_vlpi_map map;
+
+ ret = its_get_vlpi(irq->host_irq, &map);
+ if (ret)
+ return ret;
+
+ map.vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
+
+ ret = its_map_vlpi(irq->host_irq, &map);
+ }
+
+ return ret;
+}
+
/*
* Promotes the ITS view of affinity of an ITTE (which redistributor this LPI
* is targeting) to the VGIC's view, which deals with target VCPUs.
return;
vcpu = kvm_get_vcpu(kvm, ite->collection->target_addr);
-
- spin_lock(&ite->irq->irq_lock);
- ite->irq->target_vcpu = vcpu;
- spin_unlock(&ite->irq->irq_lock);
+ update_affinity(ite->irq, vcpu);
}
/*
return 0;
}
-/*
- * Find the target VCPU and the LPI number for a given devid/eventid pair
- * and make this IRQ pending, possibly injecting it.
- * Must be called with the its_lock mutex held.
- * Returns 0 on success, a positive error value for any ITS mapping
- * related errors and negative error values for generic errors.
- */
-static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
- u32 devid, u32 eventid)
+int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its,
+ u32 devid, u32 eventid, struct vgic_irq **irq)
{
struct kvm_vcpu *vcpu;
struct its_ite *ite;
- unsigned long flags;
if (!its->enabled)
return -EBUSY;
if (!vcpu->arch.vgic_cpu.lpis_enabled)
return -EBUSY;
- spin_lock_irqsave(&ite->irq->irq_lock, flags);
- ite->irq->pending_latch = true;
- vgic_queue_irq_unlock(kvm, ite->irq, flags);
-
+ *irq = ite->irq;
return 0;
}
-static struct vgic_io_device *vgic_get_its_iodev(struct kvm_io_device *dev)
+struct vgic_its *vgic_msi_to_its(struct kvm *kvm, struct kvm_msi *msi)
{
+ u64 address;
+ struct kvm_io_device *kvm_io_dev;
struct vgic_io_device *iodev;
- if (dev->ops != &kvm_io_gic_ops)
- return NULL;
+ if (!vgic_has_its(kvm))
+ return ERR_PTR(-ENODEV);
- iodev = container_of(dev, struct vgic_io_device, dev);
+ if (!(msi->flags & KVM_MSI_VALID_DEVID))
+ return ERR_PTR(-EINVAL);
+ address = (u64)msi->address_hi << 32 | msi->address_lo;
+
+ kvm_io_dev = kvm_io_bus_get_dev(kvm, KVM_MMIO_BUS, address);
+ if (!kvm_io_dev)
+ return ERR_PTR(-EINVAL);
+
+ if (kvm_io_dev->ops != &kvm_io_gic_ops)
+ return ERR_PTR(-EINVAL);
+
+ iodev = container_of(kvm_io_dev, struct vgic_io_device, dev);
if (iodev->iodev_type != IODEV_ITS)
- return NULL;
+ return ERR_PTR(-EINVAL);
+
+ return iodev->its;
+}
+
+/*
+ * Find the target VCPU and the LPI number for a given devid/eventid pair
+ * and make this IRQ pending, possibly injecting it.
+ * Must be called with the its_lock mutex held.
+ * Returns 0 on success, a positive error value for any ITS mapping
+ * related errors and negative error values for generic errors.
+ */
+static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its,
+ u32 devid, u32 eventid)
+{
+ struct vgic_irq *irq = NULL;
+ unsigned long flags;
+ int err;
+
+ err = vgic_its_resolve_lpi(kvm, its, devid, eventid, &irq);
+ if (err)
+ return err;
+
+ if (irq->hw)
+ return irq_set_irqchip_state(irq->host_irq,
+ IRQCHIP_STATE_PENDING, true);
+
+ spin_lock_irqsave(&irq->irq_lock, flags);
+ irq->pending_latch = true;
+ vgic_queue_irq_unlock(kvm, irq, flags);
- return iodev;
+ return 0;
}
/*
*/
int vgic_its_inject_msi(struct kvm *kvm, struct kvm_msi *msi)
{
- u64 address;
- struct kvm_io_device *kvm_io_dev;
- struct vgic_io_device *iodev;
+ struct vgic_its *its;
int ret;
- if (!vgic_has_its(kvm))
- return -ENODEV;
-
- if (!(msi->flags & KVM_MSI_VALID_DEVID))
- return -EINVAL;
+ its = vgic_msi_to_its(kvm, msi);
+ if (IS_ERR(its))
+ return PTR_ERR(its);
- address = (u64)msi->address_hi << 32 | msi->address_lo;
-
- kvm_io_dev = kvm_io_bus_get_dev(kvm, KVM_MMIO_BUS, address);
- if (!kvm_io_dev)
- return -EINVAL;
-
- iodev = vgic_get_its_iodev(kvm_io_dev);
- if (!iodev)
- return -EINVAL;
-
- mutex_lock(&iodev->its->its_lock);
- ret = vgic_its_trigger_msi(kvm, iodev->its, msi->devid, msi->data);
- mutex_unlock(&iodev->its->its_lock);
+ mutex_lock(&its->its_lock);
+ ret = vgic_its_trigger_msi(kvm, its, msi->devid, msi->data);
+ mutex_unlock(&its->its_lock);
if (ret < 0)
return ret;
list_del(&ite->ite_list);
/* This put matches the get in vgic_add_lpi. */
- if (ite->irq)
+ if (ite->irq) {
+ if (ite->irq->hw)
+ WARN_ON(its_unmap_vlpi(ite->irq->host_irq));
+
vgic_put_irq(kvm, ite->irq);
+ }
kfree(ite);
}
ite->collection = collection;
vcpu = kvm_get_vcpu(kvm, collection->target_addr);
- spin_lock(&ite->irq->irq_lock);
- ite->irq->target_vcpu = vcpu;
- spin_unlock(&ite->irq->irq_lock);
-
- return 0;
+ return update_affinity(ite->irq, vcpu);
}
/*
ite->irq->pending_latch = false;
+ if (ite->irq->hw)
+ return irq_set_irqchip_state(ite->irq->host_irq,
+ IRQCHIP_STATE_PENDING, false);
+
return 0;
}
if (!ite)
return E_ITS_INV_UNMAPPED_INTERRUPT;
- return update_lpi_config(kvm, ite->irq, NULL);
+ return update_lpi_config(kvm, ite->irq, NULL, true);
}
/*
irq = vgic_get_irq(kvm, NULL, intids[i]);
if (!irq)
continue;
- update_lpi_config(kvm, irq, vcpu);
+ update_lpi_config(kvm, irq, vcpu, false);
vgic_put_irq(kvm, irq);
}
kfree(intids);
+ if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.its_vm)
+ its_invall_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe);
+
return 0;
}
static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its,
u64 *its_cmd)
{
- struct vgic_dist *dist = &kvm->arch.vgic;
u32 target1_addr = its_cmd_get_target_addr(its_cmd);
u32 target2_addr = its_cmd_mask_field(its_cmd, 3, 16, 32);
struct kvm_vcpu *vcpu1, *vcpu2;
struct vgic_irq *irq;
+ u32 *intids;
+ int irq_count, i;
if (target1_addr >= atomic_read(&kvm->online_vcpus) ||
target2_addr >= atomic_read(&kvm->online_vcpus))
vcpu1 = kvm_get_vcpu(kvm, target1_addr);
vcpu2 = kvm_get_vcpu(kvm, target2_addr);
- spin_lock(&dist->lpi_list_lock);
+ irq_count = vgic_copy_lpi_list(vcpu1, &intids);
+ if (irq_count < 0)
+ return irq_count;
- list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) {
- spin_lock(&irq->irq_lock);
+ for (i = 0; i < irq_count; i++) {
+ irq = vgic_get_irq(kvm, NULL, intids[i]);
- if (irq->target_vcpu == vcpu1)
- irq->target_vcpu = vcpu2;
+ update_affinity(irq, vcpu2);
- spin_unlock(&irq->irq_lock);
+ vgic_put_irq(kvm, irq);
}
- spin_unlock(&dist->lpi_list_lock);
-
+ kfree(intids);
return 0;
}
if (!its)
return -ENOMEM;
+ if (vgic_initialized(dev->kvm)) {
+ int ret = vgic_v4_init(dev->kvm);
+ if (ret < 0) {
+ kfree(its);
+ return ret;
+ }
+ }
+
mutex_init(&its->its_lock);
mutex_init(&its->cmd_lock);
list_for_each_entry(ite, &device->itt_head, ite_list) {
gpa_t gpa = base + ite->event_id * ite_esz;
+ /*
+ * If an LPI carries the HW bit, this means that this
+ * interrupt is controlled by GICv4, and we do not
+ * have direct access to that state. Let's simply fail
+ * the save operation...
+ */
+ if (ite->irq->hw)
+ return -EACCES;
+
ret = vgic_its_save_ite(its, device, ite, gpa, ite_esz);
if (ret)
return ret;
return dist->has_its;
}
+bool vgic_supports_direct_msis(struct kvm *kvm)
+{
+ return kvm_vgic_global_state.has_gicv4 && vgic_has_its(kvm);
+}
+
static unsigned long vgic_mmio_read_v3_misc(struct kvm_vcpu *vcpu,
gpa_t addr, unsigned int len)
{
static bool group0_trap;
static bool group1_trap;
static bool common_trap;
+static bool gicv4_enable;
void vgic_v3_set_underflow(struct kvm_vcpu *vcpu)
{
}
early_param("kvm-arm.vgic_v3_common_trap", early_common_trap_cfg);
+static int __init early_gicv4_enable(char *buf)
+{
+ return strtobool(buf, &gicv4_enable);
+}
+early_param("kvm-arm.vgic_v4_enable", early_gicv4_enable);
+
/**
* vgic_v3_probe - probe for a GICv3 compatible interrupt controller in DT
* @node: pointer to the DT node
kvm_vgic_global_state.can_emulate_gicv2 = false;
kvm_vgic_global_state.ich_vtr_el2 = ich_vtr_el2;
+ /* GICv4 support? */
+ if (info->has_v4) {
+ kvm_vgic_global_state.has_gicv4 = gicv4_enable;
+ kvm_info("GICv4 support %sabled\n",
+ gicv4_enable ? "en" : "dis");
+ }
+
if (!info->vcpu.start) {
kvm_info("GICv3: no GICV resource entry\n");
kvm_vgic_global_state.vcpu_base = 0;
--- /dev/null
+/*
+ * Copyright (C) 2017 ARM Ltd.
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kvm_host.h>
+#include <linux/irqchip/arm-gic-v3.h>
+
+#include "vgic.h"
+
+/*
+ * How KVM uses GICv4 (insert rude comments here):
+ *
+ * The vgic-v4 layer acts as a bridge between several entities:
+ * - The GICv4 ITS representation offered by the ITS driver
+ * - VFIO, which is in charge of the PCI endpoint
+ * - The virtual ITS, which is the only thing the guest sees
+ *
+ * The configuration of VLPIs is triggered by a callback from VFIO,
+ * instructing KVM that a PCI device has been configured to deliver
+ * MSIs to a vITS.
+ *
+ * kvm_vgic_v4_set_forwarding() is thus called with the routing entry,
+ * and this is used to find the corresponding vITS data structures
+ * (ITS instance, device, event and irq) using a process that is
+ * extremely similar to the injection of an MSI.
+ *
+ * At this stage, we can link the guest's view of an LPI (uniquely
+ * identified by the routing entry) and the host irq, using the GICv4
+ * driver mapping operation. Should the mapping succeed, we've then
+ * successfully upgraded the guest's LPI to a VLPI. We can then start
+ * with updating GICv4's view of the property table and generating an
+ * INValidation in order to kickstart the delivery of this VLPI to the
+ * guest directly, without software intervention. Well, almost.
+ *
+ * When the PCI endpoint is deconfigured, this operation is reversed
+ * with VFIO calling kvm_vgic_v4_unset_forwarding().
+ *
+ * Once the VLPI has been mapped, it needs to follow any change the
+ * guest performs on its LPI through the vITS. For that, a number of
+ * command handlers have hooks to communicate these changes to the HW:
+ * - Any invalidation triggers a call to its_prop_update_vlpi()
+ * - The INT command results in a irq_set_irqchip_state(), which
+ * generates an INT on the corresponding VLPI.
+ * - The CLEAR command results in a irq_set_irqchip_state(), which
+ * generates an CLEAR on the corresponding VLPI.
+ * - DISCARD translates into an unmap, similar to a call to
+ * kvm_vgic_v4_unset_forwarding().
+ * - MOVI is translated by an update of the existing mapping, changing
+ * the target vcpu, resulting in a VMOVI being generated.
+ * - MOVALL is translated by a string of mapping updates (similar to
+ * the handling of MOVI). MOVALL is horrible.
+ *
+ * Note that a DISCARD/MAPTI sequence emitted from the guest without
+ * reprogramming the PCI endpoint after MAPTI does not result in a
+ * VLPI being mapped, as there is no callback from VFIO (the guest
+ * will get the interrupt via the normal SW injection). Fixing this is
+ * not trivial, and requires some horrible messing with the VFIO
+ * internals. Not fun. Don't do that.
+ *
+ * Then there is the scheduling. Each time a vcpu is about to run on a
+ * physical CPU, KVM must tell the corresponding redistributor about
+ * it. And if we've migrated our vcpu from one CPU to another, we must
+ * tell the ITS (so that the messages reach the right redistributor).
+ * This is done in two steps: first issue a irq_set_affinity() on the
+ * irq corresponding to the vcpu, then call its_schedule_vpe(). You
+ * must be in a non-preemptible context. On exit, another call to
+ * its_schedule_vpe() tells the redistributor that we're done with the
+ * vcpu.
+ *
+ * Finally, the doorbell handling: Each vcpu is allocated an interrupt
+ * which will fire each time a VLPI is made pending whilst the vcpu is
+ * not running. Each time the vcpu gets blocked, the doorbell
+ * interrupt gets enabled. When the vcpu is unblocked (for whatever
+ * reason), the doorbell interrupt is disabled.
+ */
+
+#define DB_IRQ_FLAGS (IRQ_NOAUTOEN | IRQ_DISABLE_UNLAZY | IRQ_NO_BALANCING)
+
+static irqreturn_t vgic_v4_doorbell_handler(int irq, void *info)
+{
+ struct kvm_vcpu *vcpu = info;
+
+ vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last = true;
+ kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
+ kvm_vcpu_kick(vcpu);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * vgic_v4_init - Initialize the GICv4 data structures
+ * @kvm: Pointer to the VM being initialized
+ *
+ * We may be called each time a vITS is created, or when the
+ * vgic is initialized. This relies on kvm->lock to be
+ * held. In both cases, the number of vcpus should now be
+ * fixed.
+ */
+int vgic_v4_init(struct kvm *kvm)
+{
+ struct vgic_dist *dist = &kvm->arch.vgic;
+ struct kvm_vcpu *vcpu;
+ int i, nr_vcpus, ret;
+
+ if (!vgic_supports_direct_msis(kvm))
+ return 0; /* Nothing to see here... move along. */
+
+ if (dist->its_vm.vpes)
+ return 0;
+
+ nr_vcpus = atomic_read(&kvm->online_vcpus);
+
+ dist->its_vm.vpes = kzalloc(sizeof(*dist->its_vm.vpes) * nr_vcpus,
+ GFP_KERNEL);
+ if (!dist->its_vm.vpes)
+ return -ENOMEM;
+
+ dist->its_vm.nr_vpes = nr_vcpus;
+
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ dist->its_vm.vpes[i] = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
+
+ ret = its_alloc_vcpu_irqs(&dist->its_vm);
+ if (ret < 0) {
+ kvm_err("VPE IRQ allocation failure\n");
+ kfree(dist->its_vm.vpes);
+ dist->its_vm.nr_vpes = 0;
+ dist->its_vm.vpes = NULL;
+ return ret;
+ }
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ int irq = dist->its_vm.vpes[i]->irq;
+
+ /*
+ * Don't automatically enable the doorbell, as we're
+ * flipping it back and forth when the vcpu gets
+ * blocked. Also disable the lazy disabling, as the
+ * doorbell could kick us out of the guest too
+ * early...
+ */
+ irq_set_status_flags(irq, DB_IRQ_FLAGS);
+ ret = request_irq(irq, vgic_v4_doorbell_handler,
+ 0, "vcpu", vcpu);
+ if (ret) {
+ kvm_err("failed to allocate vcpu IRQ%d\n", irq);
+ /*
+ * Trick: adjust the number of vpes so we know
+ * how many to nuke on teardown...
+ */
+ dist->its_vm.nr_vpes = i;
+ break;
+ }
+ }
+
+ if (ret)
+ vgic_v4_teardown(kvm);
+
+ return ret;
+}
+
+/**
+ * vgic_v4_teardown - Free the GICv4 data structures
+ * @kvm: Pointer to the VM being destroyed
+ *
+ * Relies on kvm->lock to be held.
+ */
+void vgic_v4_teardown(struct kvm *kvm)
+{
+ struct its_vm *its_vm = &kvm->arch.vgic.its_vm;
+ int i;
+
+ if (!its_vm->vpes)
+ return;
+
+ for (i = 0; i < its_vm->nr_vpes; i++) {
+ struct kvm_vcpu *vcpu = kvm_get_vcpu(kvm, i);
+ int irq = its_vm->vpes[i]->irq;
+
+ irq_clear_status_flags(irq, DB_IRQ_FLAGS);
+ free_irq(irq, vcpu);
+ }
+
+ its_free_vcpu_irqs(its_vm);
+ kfree(its_vm->vpes);
+ its_vm->nr_vpes = 0;
+ its_vm->vpes = NULL;
+}
+
+int vgic_v4_sync_hwstate(struct kvm_vcpu *vcpu)
+{
+ if (!vgic_supports_direct_msis(vcpu->kvm))
+ return 0;
+
+ return its_schedule_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe, false);
+}
+
+int vgic_v4_flush_hwstate(struct kvm_vcpu *vcpu)
+{
+ int irq = vcpu->arch.vgic_cpu.vgic_v3.its_vpe.irq;
+ int err;
+
+ if (!vgic_supports_direct_msis(vcpu->kvm))
+ return 0;
+
+ /*
+ * Before making the VPE resident, make sure the redistributor
+ * corresponding to our current CPU expects us here. See the
+ * doc in drivers/irqchip/irq-gic-v4.c to understand how this
+ * turns into a VMOVP command at the ITS level.
+ */
+ err = irq_set_affinity(irq, cpumask_of(smp_processor_id()));
+ if (err)
+ return err;
+
+ err = its_schedule_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe, true);
+ if (err)
+ return err;
+
+ /*
+ * Now that the VPE is resident, let's get rid of a potential
+ * doorbell interrupt that would still be pending.
+ */
+ err = irq_set_irqchip_state(irq, IRQCHIP_STATE_PENDING, false);
+
+ return err;
+}
+
+static struct vgic_its *vgic_get_its(struct kvm *kvm,
+ struct kvm_kernel_irq_routing_entry *irq_entry)
+{
+ struct kvm_msi msi = (struct kvm_msi) {
+ .address_lo = irq_entry->msi.address_lo,
+ .address_hi = irq_entry->msi.address_hi,
+ .data = irq_entry->msi.data,
+ .flags = irq_entry->msi.flags,
+ .devid = irq_entry->msi.devid,
+ };
+
+ return vgic_msi_to_its(kvm, &msi);
+}
+
+int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq,
+ struct kvm_kernel_irq_routing_entry *irq_entry)
+{
+ struct vgic_its *its;
+ struct vgic_irq *irq;
+ struct its_vlpi_map map;
+ int ret;
+
+ if (!vgic_supports_direct_msis(kvm))
+ return 0;
+
+ /*
+ * Get the ITS, and escape early on error (not a valid
+ * doorbell for any of our vITSs).
+ */
+ its = vgic_get_its(kvm, irq_entry);
+ if (IS_ERR(its))
+ return 0;
+
+ mutex_lock(&its->its_lock);
+
+ /* Perform then actual DevID/EventID -> LPI translation. */
+ ret = vgic_its_resolve_lpi(kvm, its, irq_entry->msi.devid,
+ irq_entry->msi.data, &irq);
+ if (ret)
+ goto out;
+
+ /*
+ * Emit the mapping request. If it fails, the ITS probably
+ * isn't v4 compatible, so let's silently bail out. Holding
+ * the ITS lock should ensure that nothing can modify the
+ * target vcpu.
+ */
+ map = (struct its_vlpi_map) {
+ .vm = &kvm->arch.vgic.its_vm,
+ .vpe = &irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe,
+ .vintid = irq->intid,
+ .properties = ((irq->priority & 0xfc) |
+ (irq->enabled ? LPI_PROP_ENABLED : 0) |
+ LPI_PROP_GROUP1),
+ .db_enabled = true,
+ };
+
+ ret = its_map_vlpi(virq, &map);
+ if (ret)
+ goto out;
+
+ irq->hw = true;
+ irq->host_irq = virq;
+
+out:
+ mutex_unlock(&its->its_lock);
+ return ret;
+}
+
+int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int virq,
+ struct kvm_kernel_irq_routing_entry *irq_entry)
+{
+ struct vgic_its *its;
+ struct vgic_irq *irq;
+ int ret;
+
+ if (!vgic_supports_direct_msis(kvm))
+ return 0;
+
+ /*
+ * Get the ITS, and escape early on error (not a valid
+ * doorbell for any of our vITSs).
+ */
+ its = vgic_get_its(kvm, irq_entry);
+ if (IS_ERR(its))
+ return 0;
+
+ mutex_lock(&its->its_lock);
+
+ ret = vgic_its_resolve_lpi(kvm, its, irq_entry->msi.devid,
+ irq_entry->msi.data, &irq);
+ if (ret)
+ goto out;
+
+ WARN_ON(!(irq->hw && irq->host_irq == virq));
+ irq->hw = false;
+ ret = its_unmap_vlpi(virq);
+
+out:
+ mutex_unlock(&its->its_lock);
+ return ret;
+}
+
+void kvm_vgic_v4_enable_doorbell(struct kvm_vcpu *vcpu)
+{
+ if (vgic_supports_direct_msis(vcpu->kvm)) {
+ int irq = vcpu->arch.vgic_cpu.vgic_v3.its_vpe.irq;
+ if (irq)
+ enable_irq(irq);
+ }
+}
+
+void kvm_vgic_v4_disable_doorbell(struct kvm_vcpu *vcpu)
+{
+ if (vgic_supports_direct_msis(vcpu->kvm)) {
+ int irq = vcpu->arch.vgic_cpu.vgic_v3.its_vpe.irq;
+ if (irq)
+ disable_irq(irq);
+ }
+}
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/list_sort.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
#include "vgic.h"
return 0;
}
-int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, u32 virt_irq, u32 phys_irq)
+/* @irq->irq_lock must be held */
+static int kvm_vgic_map_irq(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
+ unsigned int host_irq)
{
- struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq);
+ struct irq_desc *desc;
+ struct irq_data *data;
+
+ /*
+ * Find the physical IRQ number corresponding to @host_irq
+ */
+ desc = irq_to_desc(host_irq);
+ if (!desc) {
+ kvm_err("%s: no interrupt descriptor\n", __func__);
+ return -EINVAL;
+ }
+ data = irq_desc_get_irq_data(desc);
+ while (data->parent_data)
+ data = data->parent_data;
+
+ irq->hw = true;
+ irq->host_irq = host_irq;
+ irq->hwintid = data->hwirq;
+ return 0;
+}
+
+/* @irq->irq_lock must be held */
+static inline void kvm_vgic_unmap_irq(struct vgic_irq *irq)
+{
+ irq->hw = false;
+ irq->hwintid = 0;
+}
+
+int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq,
+ u32 vintid)
+{
+ struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
unsigned long flags;
+ int ret;
BUG_ON(!irq);
spin_lock_irqsave(&irq->irq_lock, flags);
-
- irq->hw = true;
- irq->hwintid = phys_irq;
-
+ ret = kvm_vgic_map_irq(vcpu, irq, host_irq);
spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq);
- return 0;
+ return ret;
}
-int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq)
+int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid)
{
struct vgic_irq *irq;
unsigned long flags;
if (!vgic_initialized(vcpu->kvm))
return -EAGAIN;
- irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq);
+ irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
BUG_ON(!irq);
spin_lock_irqsave(&irq->irq_lock, flags);
-
- irq->hw = false;
- irq->hwintid = 0;
-
+ kvm_vgic_unmap_irq(irq);
spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq);
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
+ WARN_ON(vgic_v4_sync_hwstate(vcpu));
+
/* An empty ap_list_head implies used_lrs == 0 */
if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
return;
/* Flush our emulation state into the GIC hardware before entering the guest. */
void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
{
+ WARN_ON(vgic_v4_flush_hwstate(vcpu));
+
/*
* If there are no virtual interrupts active or pending for this
* VCPU, then there is no work to do and we can bail out without
if (!vcpu->kvm->arch.vgic.enabled)
return false;
+ if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last)
+ return true;
+
spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags);
list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
}
}
-bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int virt_irq)
+bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid)
{
- struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq);
+ struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid);
bool map_is_active;
unsigned long flags;
}
}
+int vgic_its_resolve_lpi(struct kvm *kvm, struct vgic_its *its,
+ u32 devid, u32 eventid, struct vgic_irq **irq);
+struct vgic_its *vgic_msi_to_its(struct kvm *kvm, struct kvm_msi *msi);
+
+bool vgic_supports_direct_msis(struct kvm *kvm);
+int vgic_v4_init(struct kvm *kvm);
+void vgic_v4_teardown(struct kvm *kvm);
+int vgic_v4_sync_hwstate(struct kvm_vcpu *vcpu);
+int vgic_v4_flush_hwstate(struct kvm_vcpu *vcpu);
+
#endif
}
EXPORT_SYMBOL_GPL(kvm_vcpu_mark_page_dirty);
+void kvm_sigset_activate(struct kvm_vcpu *vcpu)
+{
+ if (!vcpu->sigset_active)
+ return;
+
+ /*
+ * This does a lockless modification of ->real_blocked, which is fine
+ * because, only current can change ->real_blocked and all readers of
+ * ->real_blocked don't care as long ->real_blocked is always a subset
+ * of ->blocked.
+ */
+ sigprocmask(SIG_SETMASK, &vcpu->sigset, ¤t->real_blocked);
+}
+
+void kvm_sigset_deactivate(struct kvm_vcpu *vcpu)
+{
+ if (!vcpu->sigset_active)
+ return;
+
+ sigprocmask(SIG_SETMASK, ¤t->real_blocked, NULL);
+ sigemptyset(¤t->real_blocked);
+}
+
static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
{
unsigned int old, val, grow;