]>
Commit | Line | Data |
---|---|---|
1eeaed76 RM |
1 | /* |
2 | * Copyright (C) 1994 Linus Torvalds | |
3 | * | |
4 | * Pentium III FXSR, SSE support | |
5 | * General FPU state handling cleanups | |
6 | * Gareth Hughes <gareth@valinux.com>, May 2000 | |
7 | * x86-64 work by Andi Kleen 2002 | |
8 | */ | |
9 | ||
1965aae3 PA |
10 | #ifndef _ASM_X86_I387_H |
11 | #define _ASM_X86_I387_H | |
1eeaed76 | 12 | |
3b0d6596 HX |
13 | #ifndef __ASSEMBLY__ |
14 | ||
1eeaed76 RM |
15 | #include <linux/sched.h> |
16 | #include <linux/kernel_stat.h> | |
17 | #include <linux/regset.h> | |
e4914012 | 18 | #include <linux/hardirq.h> |
86603283 | 19 | #include <linux/slab.h> |
92c37fa3 | 20 | #include <asm/asm.h> |
c9775b4c | 21 | #include <asm/cpufeature.h> |
1eeaed76 RM |
22 | #include <asm/processor.h> |
23 | #include <asm/sigcontext.h> | |
24 | #include <asm/user.h> | |
25 | #include <asm/uaccess.h> | |
dc1e35c6 | 26 | #include <asm/xsave.h> |
1eeaed76 | 27 | |
3c1c7f10 | 28 | extern unsigned int sig_xstate_size; |
1eeaed76 | 29 | extern void fpu_init(void); |
1eeaed76 | 30 | extern void mxcsr_feature_mask_init(void); |
aa283f49 | 31 | extern int init_fpu(struct task_struct *child); |
be98c2cd | 32 | extern void math_state_restore(void); |
e6e9cac8 | 33 | extern void __math_state_restore(void); |
36454936 | 34 | extern int dump_fpu(struct pt_regs *, struct user_i387_struct *); |
1eeaed76 RM |
35 | |
36 | extern user_regset_active_fn fpregs_active, xfpregs_active; | |
5b3efd50 SS |
37 | extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get, |
38 | xstateregs_get; | |
39 | extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set, | |
40 | xstateregs_set; | |
41 | ||
42 | /* | |
43 | * xstateregs_active == fpregs_active. Please refer to the comment | |
44 | * at the definition of fpregs_active. | |
45 | */ | |
46 | #define xstateregs_active fpregs_active | |
1eeaed76 | 47 | |
c37b5efe | 48 | extern struct _fpx_sw_bytes fx_sw_reserved; |
1eeaed76 | 49 | #ifdef CONFIG_IA32_EMULATION |
3c1c7f10 | 50 | extern unsigned int sig_xstate_ia32_size; |
c37b5efe | 51 | extern struct _fpx_sw_bytes fx_sw_reserved_ia32; |
1eeaed76 | 52 | struct _fpstate_ia32; |
ab513701 SS |
53 | struct _xstate_ia32; |
54 | extern int save_i387_xstate_ia32(void __user *buf); | |
55 | extern int restore_i387_xstate_ia32(void __user *buf); | |
1eeaed76 RM |
56 | #endif |
57 | ||
8eb91a57 BG |
58 | #ifdef CONFIG_MATH_EMULATION |
59 | extern void finit_soft_fpu(struct i387_soft_struct *soft); | |
60 | #else | |
61 | static inline void finit_soft_fpu(struct i387_soft_struct *soft) {} | |
62 | #endif | |
63 | ||
b359e8a4 SS |
64 | #define X87_FSW_ES (1 << 7) /* Exception Summary */ |
65 | ||
29104e10 SS |
66 | static __always_inline __pure bool use_xsaveopt(void) |
67 | { | |
6bad06b7 | 68 | return static_cpu_has(X86_FEATURE_XSAVEOPT); |
29104e10 SS |
69 | } |
70 | ||
c9775b4c | 71 | static __always_inline __pure bool use_xsave(void) |
c9ad4882 | 72 | { |
c9775b4c | 73 | return static_cpu_has(X86_FEATURE_XSAVE); |
c9ad4882 AK |
74 | } |
75 | ||
58a992b9 BG |
76 | static __always_inline __pure bool use_fxsr(void) |
77 | { | |
78 | return static_cpu_has(X86_FEATURE_FXSR); | |
79 | } | |
80 | ||
29104e10 SS |
81 | extern void __sanitize_i387_state(struct task_struct *); |
82 | ||
83 | static inline void sanitize_i387_state(struct task_struct *tsk) | |
84 | { | |
85 | if (!use_xsaveopt()) | |
86 | return; | |
87 | __sanitize_i387_state(tsk); | |
88 | } | |
89 | ||
1eeaed76 | 90 | #ifdef CONFIG_X86_64 |
b359e8a4 | 91 | static inline int fxrstor_checking(struct i387_fxsave_struct *fx) |
1eeaed76 RM |
92 | { |
93 | int err; | |
94 | ||
82024135 | 95 | /* See comment in fxsave() below. */ |
fd35fbcd PA |
96 | #ifdef CONFIG_AS_FXSAVEQ |
97 | asm volatile("1: fxrstorq %[fx]\n\t" | |
98 | "2:\n" | |
99 | ".section .fixup,\"ax\"\n" | |
100 | "3: movl $-1,%[err]\n" | |
101 | " jmp 2b\n" | |
102 | ".previous\n" | |
103 | _ASM_EXTABLE(1b, 3b) | |
104 | : [err] "=r" (err) | |
105 | : [fx] "m" (*fx), "0" (0)); | |
106 | #else | |
1eeaed76 RM |
107 | asm volatile("1: rex64/fxrstor (%[fx])\n\t" |
108 | "2:\n" | |
109 | ".section .fixup,\"ax\"\n" | |
110 | "3: movl $-1,%[err]\n" | |
111 | " jmp 2b\n" | |
112 | ".previous\n" | |
affe6637 | 113 | _ASM_EXTABLE(1b, 3b) |
1eeaed76 | 114 | : [err] "=r" (err) |
82024135 | 115 | : [fx] "R" (fx), "m" (*fx), "0" (0)); |
fd35fbcd | 116 | #endif |
1eeaed76 RM |
117 | return err; |
118 | } | |
119 | ||
c37b5efe | 120 | static inline int fxsave_user(struct i387_fxsave_struct __user *fx) |
1eeaed76 RM |
121 | { |
122 | int err; | |
123 | ||
8e221b6d SS |
124 | /* |
125 | * Clear the bytes not touched by the fxsave and reserved | |
126 | * for the SW usage. | |
127 | */ | |
128 | err = __clear_user(&fx->sw_reserved, | |
129 | sizeof(struct _fpx_sw_bytes)); | |
130 | if (unlikely(err)) | |
131 | return -EFAULT; | |
132 | ||
82024135 | 133 | /* See comment in fxsave() below. */ |
fd35fbcd PA |
134 | #ifdef CONFIG_AS_FXSAVEQ |
135 | asm volatile("1: fxsaveq %[fx]\n\t" | |
136 | "2:\n" | |
137 | ".section .fixup,\"ax\"\n" | |
138 | "3: movl $-1,%[err]\n" | |
139 | " jmp 2b\n" | |
140 | ".previous\n" | |
141 | _ASM_EXTABLE(1b, 3b) | |
142 | : [err] "=r" (err), [fx] "=m" (*fx) | |
143 | : "0" (0)); | |
144 | #else | |
1eeaed76 RM |
145 | asm volatile("1: rex64/fxsave (%[fx])\n\t" |
146 | "2:\n" | |
147 | ".section .fixup,\"ax\"\n" | |
148 | "3: movl $-1,%[err]\n" | |
149 | " jmp 2b\n" | |
150 | ".previous\n" | |
affe6637 | 151 | _ASM_EXTABLE(1b, 3b) |
1eeaed76 | 152 | : [err] "=r" (err), "=m" (*fx) |
82024135 | 153 | : [fx] "R" (fx), "0" (0)); |
fd35fbcd | 154 | #endif |
affe6637 JP |
155 | if (unlikely(err) && |
156 | __clear_user(fx, sizeof(struct i387_fxsave_struct))) | |
1eeaed76 RM |
157 | err = -EFAULT; |
158 | /* No need to clear here because the caller clears USED_MATH */ | |
159 | return err; | |
160 | } | |
161 | ||
86603283 | 162 | static inline void fpu_fxsave(struct fpu *fpu) |
1eeaed76 RM |
163 | { |
164 | /* Using "rex64; fxsave %0" is broken because, if the memory operand | |
165 | uses any extended registers for addressing, a second REX prefix | |
166 | will be generated (to the assembler, rex64 followed by semicolon | |
167 | is a separate instruction), and hence the 64-bitness is lost. */ | |
b6f7e38d | 168 | |
d7acb92f | 169 | #ifdef CONFIG_AS_FXSAVEQ |
1eeaed76 RM |
170 | /* Using "fxsaveq %0" would be the ideal choice, but is only supported |
171 | starting with gas 2.16. */ | |
172 | __asm__ __volatile__("fxsaveq %0" | |
86603283 | 173 | : "=m" (fpu->state->fxsave)); |
b6f7e38d | 174 | #else |
1eeaed76 RM |
175 | /* Using, as a workaround, the properly prefixed form below isn't |
176 | accepted by any binutils version so far released, complaining that | |
177 | the same type of prefix is used twice if an extended register is | |
82024135 BG |
178 | needed for addressing (fix submitted to mainline 2005-11-21). |
179 | asm volatile("rex64/fxsave %0" | |
180 | : "=m" (fpu->state->fxsave)); | |
181 | This, however, we can work around by forcing the compiler to select | |
1eeaed76 | 182 | an addressing mode that doesn't require extended registers. */ |
82024135 BG |
183 | asm volatile("rex64/fxsave (%[fx])" |
184 | : "=m" (fpu->state->fxsave) | |
185 | : [fx] "R" (&fpu->state->fxsave)); | |
1eeaed76 | 186 | #endif |
b359e8a4 SS |
187 | } |
188 | ||
1eeaed76 RM |
189 | #else /* CONFIG_X86_32 */ |
190 | ||
34ba476a JS |
191 | /* perform fxrstor iff the processor has extended states, otherwise frstor */ |
192 | static inline int fxrstor_checking(struct i387_fxsave_struct *fx) | |
1eeaed76 RM |
193 | { |
194 | /* | |
195 | * The "nop" is needed to make the instructions the same | |
196 | * length. | |
197 | */ | |
198 | alternative_input( | |
199 | "nop ; frstor %1", | |
200 | "fxrstor %1", | |
201 | X86_FEATURE_FXSR, | |
34ba476a JS |
202 | "m" (*fx)); |
203 | ||
fcb2ac5b | 204 | return 0; |
1eeaed76 RM |
205 | } |
206 | ||
58a992b9 BG |
207 | static inline void fpu_fxsave(struct fpu *fpu) |
208 | { | |
209 | asm volatile("fxsave %[fx]" | |
210 | : [fx] "=m" (fpu->state->fxsave)); | |
211 | } | |
212 | ||
b2b57fe0 BG |
213 | #endif /* CONFIG_X86_64 */ |
214 | ||
1eeaed76 RM |
215 | /* We need a safe address that is cheap to find and that is already |
216 | in L1 during context switch. The best choices are unfortunately | |
217 | different for UP and SMP */ | |
218 | #ifdef CONFIG_SMP | |
219 | #define safe_address (__per_cpu_offset[0]) | |
220 | #else | |
3292beb3 | 221 | #define safe_address (__get_cpu_var(kernel_cpustat).cpustat[CPUTIME_USER]) |
1eeaed76 RM |
222 | #endif |
223 | ||
224 | /* | |
225 | * These must be called with preempt disabled | |
226 | */ | |
86603283 | 227 | static inline void fpu_save_init(struct fpu *fpu) |
1eeaed76 | 228 | { |
c9ad4882 | 229 | if (use_xsave()) { |
86603283 | 230 | fpu_xsave(fpu); |
b359e8a4 SS |
231 | |
232 | /* | |
233 | * xsave header may indicate the init state of the FP. | |
234 | */ | |
58a992b9 BG |
235 | if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP)) |
236 | return; | |
237 | } else if (use_fxsr()) { | |
238 | fpu_fxsave(fpu); | |
239 | } else { | |
f994d99c | 240 | asm volatile("fnsave %[fx]; fwait" |
58a992b9 BG |
241 | : [fx] "=m" (fpu->state->fsave)); |
242 | return; | |
b359e8a4 SS |
243 | } |
244 | ||
58a992b9 BG |
245 | if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES)) |
246 | asm volatile("fnclex"); | |
247 | ||
1eeaed76 RM |
248 | /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception |
249 | is pending. Clear the x87 state here by setting it to fixed | |
250 | values. safe_address is a random variable that should be in L1 */ | |
251 | alternative_input( | |
b2b57fe0 | 252 | ASM_NOP8 ASM_NOP2, |
1eeaed76 | 253 | "emms\n\t" /* clear stack tags */ |
b2b57fe0 | 254 | "fildl %P[addr]", /* set F?P to defined value */ |
1eeaed76 RM |
255 | X86_FEATURE_FXSAVE_LEAK, |
256 | [addr] "m" (safe_address)); | |
86603283 AK |
257 | } |
258 | ||
259 | static inline void __save_init_fpu(struct task_struct *tsk) | |
260 | { | |
261 | fpu_save_init(&tsk->thread.fpu); | |
1eeaed76 RM |
262 | task_thread_info(tsk)->status &= ~TS_USEDFPU; |
263 | } | |
264 | ||
86603283 AK |
265 | static inline int fpu_fxrstor_checking(struct fpu *fpu) |
266 | { | |
267 | return fxrstor_checking(&fpu->state->fxsave); | |
268 | } | |
269 | ||
270 | static inline int fpu_restore_checking(struct fpu *fpu) | |
34ba476a | 271 | { |
c9ad4882 | 272 | if (use_xsave()) |
86603283 | 273 | return fpu_xrstor_checking(fpu); |
34ba476a | 274 | else |
86603283 AK |
275 | return fpu_fxrstor_checking(fpu); |
276 | } | |
277 | ||
278 | static inline int restore_fpu_checking(struct task_struct *tsk) | |
279 | { | |
280 | return fpu_restore_checking(&tsk->thread.fpu); | |
34ba476a JS |
281 | } |
282 | ||
1eeaed76 RM |
283 | /* |
284 | * Signal frame handlers... | |
285 | */ | |
ab513701 SS |
286 | extern int save_i387_xstate(void __user *buf); |
287 | extern int restore_i387_xstate(void __user *buf); | |
1eeaed76 RM |
288 | |
289 | static inline void __unlazy_fpu(struct task_struct *tsk) | |
290 | { | |
291 | if (task_thread_info(tsk)->status & TS_USEDFPU) { | |
292 | __save_init_fpu(tsk); | |
293 | stts(); | |
294 | } else | |
295 | tsk->fpu_counter = 0; | |
296 | } | |
297 | ||
298 | static inline void __clear_fpu(struct task_struct *tsk) | |
299 | { | |
300 | if (task_thread_info(tsk)->status & TS_USEDFPU) { | |
51115d4d BG |
301 | /* Ignore delayed exceptions from user space */ |
302 | asm volatile("1: fwait\n" | |
303 | "2:\n" | |
304 | _ASM_EXTABLE(1b, 2b)); | |
1eeaed76 RM |
305 | task_thread_info(tsk)->status &= ~TS_USEDFPU; |
306 | stts(); | |
307 | } | |
308 | } | |
309 | ||
5b1cbac3 LT |
310 | /* |
311 | * Were we in an interrupt that interrupted kernel mode? | |
312 | * | |
313 | * We can do a kernel_fpu_begin/end() pair *ONLY* if that | |
314 | * pair does nothing at all: TS_USEDFPU must be clear (so | |
315 | * that we don't try to save the FPU state), and TS must | |
316 | * be set (so that the clts/stts pair does nothing that is | |
317 | * visible in the interrupted kernel thread). | |
318 | */ | |
319 | static inline bool interrupted_kernel_fpu_idle(void) | |
320 | { | |
321 | return !(current_thread_info()->status & TS_USEDFPU) && | |
322 | (read_cr0() & X86_CR0_TS); | |
323 | } | |
324 | ||
325 | /* | |
326 | * Were we in user mode (or vm86 mode) when we were | |
327 | * interrupted? | |
328 | * | |
329 | * Doing kernel_fpu_begin/end() is ok if we are running | |
330 | * in an interrupt context from user mode - we'll just | |
331 | * save the FPU state as required. | |
332 | */ | |
333 | static inline bool interrupted_user_mode(void) | |
334 | { | |
335 | struct pt_regs *regs = get_irq_regs(); | |
336 | return regs && user_mode_vm(regs); | |
337 | } | |
338 | ||
339 | /* | |
340 | * Can we use the FPU in kernel mode with the | |
341 | * whole "kernel_fpu_begin/end()" sequence? | |
342 | * | |
343 | * It's always ok in process context (ie "not interrupt") | |
344 | * but it is sometimes ok even from an irq. | |
345 | */ | |
346 | static inline bool irq_fpu_usable(void) | |
347 | { | |
348 | return !in_interrupt() || | |
349 | interrupted_user_mode() || | |
350 | interrupted_kernel_fpu_idle(); | |
351 | } | |
352 | ||
1eeaed76 RM |
353 | static inline void kernel_fpu_begin(void) |
354 | { | |
355 | struct thread_info *me = current_thread_info(); | |
5b1cbac3 LT |
356 | |
357 | WARN_ON_ONCE(!irq_fpu_usable()); | |
1eeaed76 RM |
358 | preempt_disable(); |
359 | if (me->status & TS_USEDFPU) | |
360 | __save_init_fpu(me->task); | |
361 | else | |
362 | clts(); | |
363 | } | |
364 | ||
365 | static inline void kernel_fpu_end(void) | |
366 | { | |
367 | stts(); | |
368 | preempt_enable(); | |
369 | } | |
370 | ||
e4914012 SS |
371 | /* |
372 | * Some instructions like VIA's padlock instructions generate a spurious | |
373 | * DNA fault but don't modify SSE registers. And these instructions | |
0b8c3d5a CE |
374 | * get used from interrupt context as well. To prevent these kernel instructions |
375 | * in interrupt context interacting wrongly with other user/kernel fpu usage, we | |
e4914012 SS |
376 | * should use them only in the context of irq_ts_save/restore() |
377 | */ | |
378 | static inline int irq_ts_save(void) | |
379 | { | |
380 | /* | |
0b8c3d5a CE |
381 | * If in process context and not atomic, we can take a spurious DNA fault. |
382 | * Otherwise, doing clts() in process context requires disabling preemption | |
383 | * or some heavy lifting like kernel_fpu_begin() | |
e4914012 | 384 | */ |
0b8c3d5a | 385 | if (!in_atomic()) |
e4914012 SS |
386 | return 0; |
387 | ||
388 | if (read_cr0() & X86_CR0_TS) { | |
389 | clts(); | |
390 | return 1; | |
391 | } | |
392 | ||
393 | return 0; | |
394 | } | |
395 | ||
396 | static inline void irq_ts_restore(int TS_state) | |
397 | { | |
398 | if (TS_state) | |
399 | stts(); | |
400 | } | |
401 | ||
15d8791c LT |
402 | /* |
403 | * The question "does this thread have fpu access?" | |
404 | * is slightly racy, since preemption could come in | |
405 | * and revoke it immediately after the test. | |
406 | * | |
407 | * However, even in that very unlikely scenario, | |
408 | * we can just assume we have FPU access - typically | |
409 | * to save the FP state - we'll just take a #NM | |
410 | * fault and get the FPU access back. | |
411 | * | |
412 | * The actual user_fpu_begin/end() functions | |
413 | * need to be preemption-safe, though. | |
414 | * | |
415 | * NOTE! user_fpu_end() must be used only after you | |
416 | * have saved the FP state, and user_fpu_begin() must | |
417 | * be used only immediately before restoring it. | |
418 | * These functions do not do any save/restore on | |
419 | * their own. | |
420 | */ | |
421 | static inline int user_has_fpu(void) | |
422 | { | |
423 | return current_thread_info()->status & TS_USEDFPU; | |
424 | } | |
425 | ||
426 | static inline void user_fpu_end(void) | |
427 | { | |
428 | preempt_disable(); | |
429 | current_thread_info()->status &= ~TS_USEDFPU; | |
430 | stts(); | |
431 | preempt_enable(); | |
432 | } | |
433 | ||
434 | static inline void user_fpu_begin(void) | |
435 | { | |
436 | preempt_disable(); | |
437 | if (!user_has_fpu()) { | |
438 | clts(); | |
439 | current_thread_info()->status |= TS_USEDFPU; | |
440 | } | |
441 | preempt_enable(); | |
442 | } | |
443 | ||
1eeaed76 RM |
444 | /* |
445 | * These disable preemption on their own and are safe | |
446 | */ | |
447 | static inline void save_init_fpu(struct task_struct *tsk) | |
448 | { | |
c38e2345 | 449 | WARN_ON_ONCE(!(task_thread_info(tsk)->status & TS_USEDFPU)); |
1eeaed76 RM |
450 | preempt_disable(); |
451 | __save_init_fpu(tsk); | |
452 | stts(); | |
453 | preempt_enable(); | |
454 | } | |
455 | ||
456 | static inline void unlazy_fpu(struct task_struct *tsk) | |
457 | { | |
458 | preempt_disable(); | |
459 | __unlazy_fpu(tsk); | |
460 | preempt_enable(); | |
461 | } | |
462 | ||
463 | static inline void clear_fpu(struct task_struct *tsk) | |
464 | { | |
465 | preempt_disable(); | |
466 | __clear_fpu(tsk); | |
467 | preempt_enable(); | |
468 | } | |
469 | ||
1eeaed76 RM |
470 | /* |
471 | * i387 state interaction | |
472 | */ | |
473 | static inline unsigned short get_fpu_cwd(struct task_struct *tsk) | |
474 | { | |
475 | if (cpu_has_fxsr) { | |
86603283 | 476 | return tsk->thread.fpu.state->fxsave.cwd; |
1eeaed76 | 477 | } else { |
86603283 | 478 | return (unsigned short)tsk->thread.fpu.state->fsave.cwd; |
1eeaed76 RM |
479 | } |
480 | } | |
481 | ||
482 | static inline unsigned short get_fpu_swd(struct task_struct *tsk) | |
483 | { | |
484 | if (cpu_has_fxsr) { | |
86603283 | 485 | return tsk->thread.fpu.state->fxsave.swd; |
1eeaed76 | 486 | } else { |
86603283 | 487 | return (unsigned short)tsk->thread.fpu.state->fsave.swd; |
1eeaed76 RM |
488 | } |
489 | } | |
490 | ||
491 | static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk) | |
492 | { | |
493 | if (cpu_has_xmm) { | |
86603283 | 494 | return tsk->thread.fpu.state->fxsave.mxcsr; |
1eeaed76 RM |
495 | } else { |
496 | return MXCSR_DEFAULT; | |
497 | } | |
498 | } | |
499 | ||
86603283 AK |
500 | static bool fpu_allocated(struct fpu *fpu) |
501 | { | |
502 | return fpu->state != NULL; | |
503 | } | |
504 | ||
505 | static inline int fpu_alloc(struct fpu *fpu) | |
506 | { | |
507 | if (fpu_allocated(fpu)) | |
508 | return 0; | |
509 | fpu->state = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL); | |
510 | if (!fpu->state) | |
511 | return -ENOMEM; | |
512 | WARN_ON((unsigned long)fpu->state & 15); | |
513 | return 0; | |
514 | } | |
515 | ||
516 | static inline void fpu_free(struct fpu *fpu) | |
517 | { | |
518 | if (fpu->state) { | |
519 | kmem_cache_free(task_xstate_cachep, fpu->state); | |
520 | fpu->state = NULL; | |
521 | } | |
522 | } | |
523 | ||
524 | static inline void fpu_copy(struct fpu *dst, struct fpu *src) | |
525 | { | |
526 | memcpy(dst->state, src->state, xstate_size); | |
527 | } | |
528 | ||
5ee481da SY |
529 | extern void fpu_finit(struct fpu *fpu); |
530 | ||
3b0d6596 HX |
531 | #endif /* __ASSEMBLY__ */ |
532 | ||
1965aae3 | 533 | #endif /* _ASM_X86_I387_H */ |