2 * Copyright 2018 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/debugfs.h>
25 #include <linux/list.h>
26 #include <linux/module.h>
27 #include <linux/uaccess.h>
28 #include <linux/reboot.h>
29 #include <linux/syscalls.h>
32 #include "amdgpu_ras.h"
33 #include "amdgpu_atomfirmware.h"
34 #include "amdgpu_xgmi.h"
35 #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
37 const char *ras_error_string
[] = {
41 "multi_uncorrectable",
45 const char *ras_block_string
[] = {
62 #define ras_err_str(i) (ras_error_string[ffs(i)])
63 #define ras_block_str(i) (ras_block_string[i])
65 #define AMDGPU_RAS_FLAG_INIT_BY_VBIOS 1
66 #define AMDGPU_RAS_FLAG_INIT_NEED_RESET 2
67 #define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS)
69 /* inject address is 52 bits */
70 #define RAS_UMC_INJECT_ADDR_LIMIT (0x1ULL << 52)
72 enum amdgpu_ras_retire_page_reservation
{
73 AMDGPU_RAS_RETIRE_PAGE_RESERVED
,
74 AMDGPU_RAS_RETIRE_PAGE_PENDING
,
75 AMDGPU_RAS_RETIRE_PAGE_FAULT
,
78 atomic_t amdgpu_ras_in_intr
= ATOMIC_INIT(0);
80 static bool amdgpu_ras_check_bad_page(struct amdgpu_device
*adev
,
83 void amdgpu_ras_set_error_query_ready(struct amdgpu_device
*adev
, bool ready
)
85 if (adev
&& amdgpu_ras_get_context(adev
))
86 amdgpu_ras_get_context(adev
)->error_query_ready
= ready
;
89 bool amdgpu_ras_get_error_query_ready(struct amdgpu_device
*adev
)
91 if (adev
&& amdgpu_ras_get_context(adev
))
92 return amdgpu_ras_get_context(adev
)->error_query_ready
;
97 static ssize_t
amdgpu_ras_debugfs_read(struct file
*f
, char __user
*buf
,
98 size_t size
, loff_t
*pos
)
100 struct ras_manager
*obj
= (struct ras_manager
*)file_inode(f
)->i_private
;
101 struct ras_query_if info
= {
107 if (amdgpu_ras_error_query(obj
->adev
, &info
))
110 s
= snprintf(val
, sizeof(val
), "%s: %lu\n%s: %lu\n",
112 "ce", info
.ce_count
);
117 s
= min_t(u64
, s
, size
);
120 if (copy_to_user(buf
, &val
[*pos
], s
))
128 static const struct file_operations amdgpu_ras_debugfs_ops
= {
129 .owner
= THIS_MODULE
,
130 .read
= amdgpu_ras_debugfs_read
,
132 .llseek
= default_llseek
135 static int amdgpu_ras_find_block_id_by_name(const char *name
, int *block_id
)
139 for (i
= 0; i
< ARRAY_SIZE(ras_block_string
); i
++) {
141 if (strcmp(name
, ras_block_str(i
)) == 0)
147 static int amdgpu_ras_debugfs_ctrl_parse_data(struct file
*f
,
148 const char __user
*buf
, size_t size
,
149 loff_t
*pos
, struct ras_debug_if
*data
)
151 ssize_t s
= min_t(u64
, 64, size
);
164 memset(str
, 0, sizeof(str
));
165 memset(data
, 0, sizeof(*data
));
167 if (copy_from_user(str
, buf
, s
))
170 if (sscanf(str
, "disable %32s", block_name
) == 1)
172 else if (sscanf(str
, "enable %32s %8s", block_name
, err
) == 2)
174 else if (sscanf(str
, "inject %32s %8s", block_name
, err
) == 2)
176 else if (str
[0] && str
[1] && str
[2] && str
[3])
177 /* ascii string, but commands are not matched. */
181 if (amdgpu_ras_find_block_id_by_name(block_name
, &block_id
))
184 data
->head
.block
= block_id
;
185 /* only ue and ce errors are supported */
186 if (!memcmp("ue", err
, 2))
187 data
->head
.type
= AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE
;
188 else if (!memcmp("ce", err
, 2))
189 data
->head
.type
= AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE
;
196 if (sscanf(str
, "%*s %*s %*s %u %llu %llu",
197 &sub_block
, &address
, &value
) != 3)
198 if (sscanf(str
, "%*s %*s %*s 0x%x 0x%llx 0x%llx",
199 &sub_block
, &address
, &value
) != 3)
201 data
->head
.sub_block_index
= sub_block
;
202 data
->inject
.address
= address
;
203 data
->inject
.value
= value
;
206 if (size
< sizeof(*data
))
209 if (copy_from_user(data
, buf
, sizeof(*data
)))
217 * DOC: AMDGPU RAS debugfs control interface
219 * It accepts struct ras_debug_if who has two members.
221 * First member: ras_debug_if::head or ras_debug_if::inject.
223 * head is used to indicate which IP block will be under control.
225 * head has four members, they are block, type, sub_block_index, name.
226 * block: which IP will be under control.
227 * type: what kind of error will be enabled/disabled/injected.
228 * sub_block_index: some IPs have subcomponets. say, GFX, sDMA.
229 * name: the name of IP.
231 * inject has two more members than head, they are address, value.
232 * As their names indicate, inject operation will write the
233 * value to the address.
235 * The second member: struct ras_debug_if::op.
236 * It has three kinds of operations.
238 * - 0: disable RAS on the block. Take ::head as its data.
239 * - 1: enable RAS on the block. Take ::head as its data.
240 * - 2: inject errors on the block. Take ::inject as its data.
242 * How to use the interface?
246 * Copy the struct ras_debug_if in your codes and initialize it.
247 * Write the struct to the control node.
251 * .. code-block:: bash
253 * echo op block [error [sub_block address value]] > .../ras/ras_ctrl
257 * op: disable, enable, inject
258 * disable: only block is needed
259 * enable: block and error are needed
260 * inject: error, address, value are needed
261 * block: umc, sdma, gfx, .........
262 * see ras_block_string[] for details
264 * ue: multi_uncorrectable
265 * ce: single_correctable
267 * sub block index, pass 0 if there is no sub block
269 * here are some examples for bash commands:
271 * .. code-block:: bash
273 * echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
274 * echo inject umc ce 0 0 0 > /sys/kernel/debug/dri/0/ras/ras_ctrl
275 * echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl
277 * How to check the result?
279 * For disable/enable, please check ras features at
280 * /sys/class/drm/card[0/1/2...]/device/ras/features
282 * For inject, please check corresponding err count at
283 * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
286 * Operations are only allowed on blocks which are supported.
287 * Please check ras mask at /sys/module/amdgpu/parameters/ras_mask
288 * to see which blocks support RAS on a particular asic.
291 static ssize_t
amdgpu_ras_debugfs_ctrl_write(struct file
*f
, const char __user
*buf
,
292 size_t size
, loff_t
*pos
)
294 struct amdgpu_device
*adev
= (struct amdgpu_device
*)file_inode(f
)->i_private
;
295 struct ras_debug_if data
;
298 if (!amdgpu_ras_get_error_query_ready(adev
)) {
299 dev_warn(adev
->dev
, "RAS WARN: error injection "
300 "currently inaccessible\n");
304 ret
= amdgpu_ras_debugfs_ctrl_parse_data(f
, buf
, size
, pos
, &data
);
308 if (!amdgpu_ras_is_supported(adev
, data
.head
.block
))
313 ret
= amdgpu_ras_feature_enable(adev
, &data
.head
, 0);
316 ret
= amdgpu_ras_feature_enable(adev
, &data
.head
, 1);
319 if ((data
.inject
.address
>= adev
->gmc
.mc_vram_size
) ||
320 (data
.inject
.address
>= RAS_UMC_INJECT_ADDR_LIMIT
)) {
325 /* umc ce/ue error injection for a bad page is not allowed */
326 if ((data
.head
.block
== AMDGPU_RAS_BLOCK__UMC
) &&
327 amdgpu_ras_check_bad_page(adev
, data
.inject
.address
)) {
328 dev_warn(adev
->dev
, "RAS WARN: 0x%llx has been marked "
329 "as bad before error injection!\n",
330 data
.inject
.address
);
334 /* data.inject.address is offset instead of absolute gpu address */
335 ret
= amdgpu_ras_error_inject(adev
, &data
.inject
);
349 * DOC: AMDGPU RAS debugfs EEPROM table reset interface
351 * Some boards contain an EEPROM which is used to persistently store a list of
352 * bad pages which experiences ECC errors in vram. This interface provides
353 * a way to reset the EEPROM, e.g., after testing error injection.
357 * .. code-block:: bash
359 * echo 1 > ../ras/ras_eeprom_reset
361 * will reset EEPROM table to 0 entries.
364 static ssize_t
amdgpu_ras_debugfs_eeprom_write(struct file
*f
, const char __user
*buf
,
365 size_t size
, loff_t
*pos
)
367 struct amdgpu_device
*adev
= (struct amdgpu_device
*)file_inode(f
)->i_private
;
370 ret
= amdgpu_ras_eeprom_reset_table(&adev
->psp
.ras
.ras
->eeprom_control
);
372 return ret
== 1 ? size
: -EIO
;
375 static const struct file_operations amdgpu_ras_debugfs_ctrl_ops
= {
376 .owner
= THIS_MODULE
,
378 .write
= amdgpu_ras_debugfs_ctrl_write
,
379 .llseek
= default_llseek
382 static const struct file_operations amdgpu_ras_debugfs_eeprom_ops
= {
383 .owner
= THIS_MODULE
,
385 .write
= amdgpu_ras_debugfs_eeprom_write
,
386 .llseek
= default_llseek
390 * DOC: AMDGPU RAS sysfs Error Count Interface
392 * It allows the user to read the error count for each IP block on the gpu through
393 * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count
395 * It outputs the multiple lines which report the uncorrected (ue) and corrected
398 * The format of one line is below,
404 * .. code-block:: bash
410 static ssize_t
amdgpu_ras_sysfs_read(struct device
*dev
,
411 struct device_attribute
*attr
, char *buf
)
413 struct ras_manager
*obj
= container_of(attr
, struct ras_manager
, sysfs_attr
);
414 struct ras_query_if info
= {
418 if (!amdgpu_ras_get_error_query_ready(obj
->adev
))
419 return snprintf(buf
, PAGE_SIZE
,
420 "Query currently inaccessible\n");
422 if (amdgpu_ras_error_query(obj
->adev
, &info
))
425 return snprintf(buf
, PAGE_SIZE
, "%s: %lu\n%s: %lu\n",
427 "ce", info
.ce_count
);
432 #define get_obj(obj) do { (obj)->use++; } while (0)
433 #define alive_obj(obj) ((obj)->use)
435 static inline void put_obj(struct ras_manager
*obj
)
437 if (obj
&& --obj
->use
== 0)
438 list_del(&obj
->node
);
439 if (obj
&& obj
->use
< 0) {
440 DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n", obj
->head
.name
);
444 /* make one obj and return it. */
445 static struct ras_manager
*amdgpu_ras_create_obj(struct amdgpu_device
*adev
,
446 struct ras_common_if
*head
)
448 struct amdgpu_ras
*con
= amdgpu_ras_get_context(adev
);
449 struct ras_manager
*obj
;
454 if (head
->block
>= AMDGPU_RAS_BLOCK_COUNT
)
457 obj
= &con
->objs
[head
->block
];
458 /* already exist. return obj? */
464 list_add(&obj
->node
, &con
->head
);
470 /* return an obj equal to head, or the first when head is NULL */
471 struct ras_manager
*amdgpu_ras_find_obj(struct amdgpu_device
*adev
,
472 struct ras_common_if
*head
)
474 struct amdgpu_ras
*con
= amdgpu_ras_get_context(adev
);
475 struct ras_manager
*obj
;
482 if (head
->block
>= AMDGPU_RAS_BLOCK_COUNT
)
485 obj
= &con
->objs
[head
->block
];
487 if (alive_obj(obj
)) {
488 WARN_ON(head
->block
!= obj
->head
.block
);
492 for (i
= 0; i
< AMDGPU_RAS_BLOCK_COUNT
; i
++) {
494 if (alive_obj(obj
)) {
495 WARN_ON(i
!= obj
->head
.block
);
505 void amdgpu_ras_parse_status_code(struct amdgpu_device
* adev
,
506 const char* invoke_type
,
507 const char* block_name
,
508 enum ta_ras_status ret
)
511 case TA_RAS_STATUS__SUCCESS
:
513 case TA_RAS_STATUS__ERROR_RAS_NOT_AVAILABLE
:
515 "RAS WARN: %s %s currently unavailable\n",
521 "RAS ERROR: %s %s error failed ret 0x%X\n",
528 /* feature ctl begin */
529 static int amdgpu_ras_is_feature_allowed(struct amdgpu_device
*adev
,
530 struct ras_common_if
*head
)
532 struct amdgpu_ras
*con
= amdgpu_ras_get_context(adev
);
534 return con
->hw_supported
& BIT(head
->block
);
537 static int amdgpu_ras_is_feature_enabled(struct amdgpu_device
*adev
,
538 struct ras_common_if
*head
)
540 struct amdgpu_ras
*con
= amdgpu_ras_get_context(adev
);
542 return con
->features
& BIT(head
->block
);
546 * if obj is not created, then create one.
547 * set feature enable flag.
549 static int __amdgpu_ras_feature_enable(struct amdgpu_device
*adev
,
550 struct ras_common_if
*head
, int enable
)
552 struct amdgpu_ras
*con
= amdgpu_ras_get_context(adev
);
553 struct ras_manager
*obj
= amdgpu_ras_find_obj(adev
, head
);
555 /* If hardware does not support ras, then do not create obj.
556 * But if hardware support ras, we can create the obj.
557 * Ras framework checks con->hw_supported to see if it need do
558 * corresponding initialization.
559 * IP checks con->support to see if it need disable ras.
561 if (!amdgpu_ras_is_feature_allowed(adev
, head
))
563 if (!(!!enable
^ !!amdgpu_ras_is_feature_enabled(adev
, head
)))
568 obj
= amdgpu_ras_create_obj(adev
, head
);
572 /* In case we create obj somewhere else */
575 con
->features
|= BIT(head
->block
);
577 if (obj
&& amdgpu_ras_is_feature_enabled(adev
, head
)) {
578 con
->features
&= ~BIT(head
->block
);
586 /* wrapper of psp_ras_enable_features */
587 int amdgpu_ras_feature_enable(struct amdgpu_device
*adev
,
588 struct ras_common_if
*head
, bool enable
)
590 struct amdgpu_ras
*con
= amdgpu_ras_get_context(adev
);
591 union ta_ras_cmd_input
*info
;
597 info
= kzalloc(sizeof(union ta_ras_cmd_input
), GFP_KERNEL
);
602 info
->disable_features
= (struct ta_ras_disable_features_input
) {
603 .block_id
= amdgpu_ras_block_to_ta(head
->block
),
604 .error_type
= amdgpu_ras_error_to_ta(head
->type
),
607 info
->enable_features
= (struct ta_ras_enable_features_input
) {
608 .block_id
= amdgpu_ras_block_to_ta(head
->block
),
609 .error_type
= amdgpu_ras_error_to_ta(head
->type
),
613 /* Do not enable if it is not allowed. */
614 WARN_ON(enable
&& !amdgpu_ras_is_feature_allowed(adev
, head
));
615 /* Are we alerady in that state we are going to set? */
616 if (!(!!enable
^ !!amdgpu_ras_is_feature_enabled(adev
, head
))) {
621 if (!amdgpu_ras_intr_triggered()) {
622 ret
= psp_ras_enable_features(&adev
->psp
, info
, enable
);
624 amdgpu_ras_parse_status_code(adev
,
625 enable
? "enable":"disable",
626 ras_block_str(head
->block
),
627 (enum ta_ras_status
)ret
);
628 if (ret
== TA_RAS_STATUS__RESET_NEEDED
)
638 __amdgpu_ras_feature_enable(adev
, head
, enable
);
645 /* Only used in device probe stage and called only once. */
646 int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device
*adev
,
647 struct ras_common_if
*head
, bool enable
)
649 struct amdgpu_ras
*con
= amdgpu_ras_get_context(adev
);
655 if (con
->flags
& AMDGPU_RAS_FLAG_INIT_BY_VBIOS
) {
657 /* There is no harm to issue a ras TA cmd regardless of
658 * the currecnt ras state.
659 * If current state == target state, it will do nothing
660 * But sometimes it requests driver to reset and repost
661 * with error code -EAGAIN.
663 ret
= amdgpu_ras_feature_enable(adev
, head
, 1);
664 /* With old ras TA, we might fail to enable ras.
665 * Log it and just setup the object.
666 * TODO need remove this WA in the future.
668 if (ret
== -EINVAL
) {
669 ret
= __amdgpu_ras_feature_enable(adev
, head
, 1);
672 "RAS INFO: %s setup object\n",
673 ras_block_str(head
->block
));
676 /* setup the object then issue a ras TA disable cmd.*/
677 ret
= __amdgpu_ras_feature_enable(adev
, head
, 1);
681 ret
= amdgpu_ras_feature_enable(adev
, head
, 0);
684 ret
= amdgpu_ras_feature_enable(adev
, head
, enable
);
689 static int amdgpu_ras_disable_all_features(struct amdgpu_device
*adev
,
692 struct amdgpu_ras
*con
= amdgpu_ras_get_context(adev
);
693 struct ras_manager
*obj
, *tmp
;
695 list_for_each_entry_safe(obj
, tmp
, &con
->head
, node
) {
697 * aka just release the obj and corresponding flags
700 if (__amdgpu_ras_feature_enable(adev
, &obj
->head
, 0))
703 if (amdgpu_ras_feature_enable(adev
, &obj
->head
, 0))
708 return con
->features
;
711 static int amdgpu_ras_enable_all_features(struct amdgpu_device
*adev
,
714 struct amdgpu_ras
*con
= amdgpu_ras_get_context(adev
);
715 int ras_block_count
= AMDGPU_RAS_BLOCK_COUNT
;
717 const enum amdgpu_ras_error_type default_ras_type
=
718 AMDGPU_RAS_ERROR__NONE
;
720 for (i
= 0; i
< ras_block_count
; i
++) {
721 struct ras_common_if head
= {
723 .type
= default_ras_type
,
724 .sub_block_index
= 0,
726 strcpy(head
.name
, ras_block_str(i
));
729 * bypass psp. vbios enable ras for us.
730 * so just create the obj
732 if (__amdgpu_ras_feature_enable(adev
, &head
, 1))
735 if (amdgpu_ras_feature_enable(adev
, &head
, 1))
740 return con
->features
;
742 /* feature ctl end */
744 /* query/inject/cure begin */
745 int amdgpu_ras_error_query(struct amdgpu_device
*adev
,
746 struct ras_query_if
*info
)
748 struct ras_manager
*obj
= amdgpu_ras_find_obj(adev
, &info
->head
);
749 struct ras_err_data err_data
= {0, 0, 0, NULL
};
755 switch (info
->head
.block
) {
756 case AMDGPU_RAS_BLOCK__UMC
:
757 if (adev
->umc
.funcs
->query_ras_error_count
)
758 adev
->umc
.funcs
->query_ras_error_count(adev
, &err_data
);
759 /* umc query_ras_error_address is also responsible for clearing
762 if (adev
->umc
.funcs
->query_ras_error_address
)
763 adev
->umc
.funcs
->query_ras_error_address(adev
, &err_data
);
765 case AMDGPU_RAS_BLOCK__SDMA
:
766 if (adev
->sdma
.funcs
->query_ras_error_count
) {
767 for (i
= 0; i
< adev
->sdma
.num_instances
; i
++)
768 adev
->sdma
.funcs
->query_ras_error_count(adev
, i
,
772 case AMDGPU_RAS_BLOCK__GFX
:
773 if (adev
->gfx
.funcs
->query_ras_error_count
)
774 adev
->gfx
.funcs
->query_ras_error_count(adev
, &err_data
);
776 case AMDGPU_RAS_BLOCK__MMHUB
:
777 if (adev
->mmhub
.funcs
->query_ras_error_count
)
778 adev
->mmhub
.funcs
->query_ras_error_count(adev
, &err_data
);
780 case AMDGPU_RAS_BLOCK__PCIE_BIF
:
781 if (adev
->nbio
.funcs
->query_ras_error_count
)
782 adev
->nbio
.funcs
->query_ras_error_count(adev
, &err_data
);
784 case AMDGPU_RAS_BLOCK__XGMI_WAFL
:
785 amdgpu_xgmi_query_ras_error_count(adev
, &err_data
);
791 obj
->err_data
.ue_count
+= err_data
.ue_count
;
792 obj
->err_data
.ce_count
+= err_data
.ce_count
;
794 info
->ue_count
= obj
->err_data
.ue_count
;
795 info
->ce_count
= obj
->err_data
.ce_count
;
797 if (err_data
.ce_count
) {
798 dev_info(adev
->dev
, "%ld correctable hardware errors "
799 "detected in %s block, no user "
800 "action is needed.\n",
801 obj
->err_data
.ce_count
,
802 ras_block_str(info
->head
.block
));
804 if (err_data
.ue_count
) {
805 dev_info(adev
->dev
, "%ld uncorrectable hardware errors "
806 "detected in %s block\n",
807 obj
->err_data
.ue_count
,
808 ras_block_str(info
->head
.block
));
814 /* Trigger XGMI/WAFL error */
815 int amdgpu_ras_error_inject_xgmi(struct amdgpu_device
*adev
,
816 struct ta_ras_trigger_error_input
*block_info
)
820 if (amdgpu_dpm_set_df_cstate(adev
, DF_CSTATE_DISALLOW
))
821 dev_warn(adev
->dev
, "Failed to disallow df cstate");
823 if (amdgpu_dpm_allow_xgmi_power_down(adev
, false))
824 dev_warn(adev
->dev
, "Failed to disallow XGMI power down");
826 ret
= psp_ras_trigger_error(&adev
->psp
, block_info
);
828 if (amdgpu_ras_intr_triggered())
831 if (amdgpu_dpm_allow_xgmi_power_down(adev
, true))
832 dev_warn(adev
->dev
, "Failed to allow XGMI power down");
834 if (amdgpu_dpm_set_df_cstate(adev
, DF_CSTATE_DISALLOW
))
835 dev_warn(adev
->dev
, "Failed to allow df cstate");
840 /* wrapper of psp_ras_trigger_error */
841 int amdgpu_ras_error_inject(struct amdgpu_device
*adev
,
842 struct ras_inject_if
*info
)
844 struct ras_manager
*obj
= amdgpu_ras_find_obj(adev
, &info
->head
);
845 struct ta_ras_trigger_error_input block_info
= {
846 .block_id
= amdgpu_ras_block_to_ta(info
->head
.block
),
847 .inject_error_type
= amdgpu_ras_error_to_ta(info
->head
.type
),
848 .sub_block_index
= info
->head
.sub_block_index
,
849 .address
= info
->address
,
850 .value
= info
->value
,
857 /* Calculate XGMI relative offset */
858 if (adev
->gmc
.xgmi
.num_physical_nodes
> 1) {
860 amdgpu_xgmi_get_relative_phy_addr(adev
,
864 switch (info
->head
.block
) {
865 case AMDGPU_RAS_BLOCK__GFX
:
866 if (adev
->gfx
.funcs
->ras_error_inject
)
867 ret
= adev
->gfx
.funcs
->ras_error_inject(adev
, info
);
871 case AMDGPU_RAS_BLOCK__UMC
:
872 case AMDGPU_RAS_BLOCK__MMHUB
:
873 case AMDGPU_RAS_BLOCK__PCIE_BIF
:
874 ret
= psp_ras_trigger_error(&adev
->psp
, &block_info
);
876 case AMDGPU_RAS_BLOCK__XGMI_WAFL
:
877 ret
= amdgpu_ras_error_inject_xgmi(adev
, &block_info
);
880 dev_info(adev
->dev
, "%s error injection is not supported yet\n",
881 ras_block_str(info
->head
.block
));
885 amdgpu_ras_parse_status_code(adev
,
887 ras_block_str(info
->head
.block
),
888 (enum ta_ras_status
)ret
);
893 int amdgpu_ras_error_cure(struct amdgpu_device
*adev
,
894 struct ras_cure_if
*info
)
896 /* psp fw has no cure interface for now. */
900 /* get the total error counts on all IPs */
901 unsigned long amdgpu_ras_query_error_count(struct amdgpu_device
*adev
,
904 struct amdgpu_ras
*con
= amdgpu_ras_get_context(adev
);
905 struct ras_manager
*obj
;
906 struct ras_err_data data
= {0, 0};
911 list_for_each_entry(obj
, &con
->head
, node
) {
912 struct ras_query_if info
= {
916 if (amdgpu_ras_error_query(adev
, &info
))
919 data
.ce_count
+= info
.ce_count
;
920 data
.ue_count
+= info
.ue_count
;
923 return is_ce
? data
.ce_count
: data
.ue_count
;
925 /* query/inject/cure end */
930 static int amdgpu_ras_badpages_read(struct amdgpu_device
*adev
,
931 struct ras_badpage
**bps
, unsigned int *count
);
933 static char *amdgpu_ras_badpage_flags_str(unsigned int flags
)
936 case AMDGPU_RAS_RETIRE_PAGE_RESERVED
:
938 case AMDGPU_RAS_RETIRE_PAGE_PENDING
:
940 case AMDGPU_RAS_RETIRE_PAGE_FAULT
:
947 * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface
949 * It allows user to read the bad pages of vram on the gpu through
950 * /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages
952 * It outputs multiple lines, and each line stands for one gpu page.
954 * The format of one line is below,
955 * gpu pfn : gpu page size : flags
957 * gpu pfn and gpu page size are printed in hex format.
958 * flags can be one of below character,
960 * R: reserved, this gpu page is reserved and not able to use.
962 * P: pending for reserve, this gpu page is marked as bad, will be reserved
963 * in next window of page_reserve.
965 * F: unable to reserve. this gpu page can't be reserved due to some reasons.
969 * .. code-block:: bash
971 * 0x00000001 : 0x00001000 : R
972 * 0x00000002 : 0x00001000 : P
976 static ssize_t
amdgpu_ras_sysfs_badpages_read(struct file
*f
,
977 struct kobject
*kobj
, struct bin_attribute
*attr
,
978 char *buf
, loff_t ppos
, size_t count
)
980 struct amdgpu_ras
*con
=
981 container_of(attr
, struct amdgpu_ras
, badpages_attr
);
982 struct amdgpu_device
*adev
= con
->adev
;
983 const unsigned int element_size
=
984 sizeof("0xabcdabcd : 0x12345678 : R\n") - 1;
985 unsigned int start
= div64_ul(ppos
+ element_size
- 1, element_size
);
986 unsigned int end
= div64_ul(ppos
+ count
- 1, element_size
);
988 struct ras_badpage
*bps
= NULL
;
989 unsigned int bps_count
= 0;
991 memset(buf
, 0, count
);
993 if (amdgpu_ras_badpages_read(adev
, &bps
, &bps_count
))
996 for (; start
< end
&& start
< bps_count
; start
++)
997 s
+= scnprintf(&buf
[s
], element_size
+ 1,
998 "0x%08x : 0x%08x : %1s\n",
1001 amdgpu_ras_badpage_flags_str(bps
[start
].flags
));
1008 static ssize_t
amdgpu_ras_sysfs_features_read(struct device
*dev
,
1009 struct device_attribute
*attr
, char *buf
)
1011 struct amdgpu_ras
*con
=
1012 container_of(attr
, struct amdgpu_ras
, features_attr
);
1014 return scnprintf(buf
, PAGE_SIZE
, "feature mask: 0x%x\n", con
->features
);
1017 static int amdgpu_ras_sysfs_create_feature_node(struct amdgpu_device
*adev
)
1019 struct amdgpu_ras
*con
= amdgpu_ras_get_context(adev
);
1020 struct attribute
*attrs
[] = {
1021 &con
->features_attr
.attr
,
1024 struct bin_attribute
*bin_attrs
[] = {
1025 &con
->badpages_attr
,
1028 struct attribute_group group
= {
1031 .bin_attrs
= bin_attrs
,
1034 con
->features_attr
= (struct device_attribute
) {
1039 .show
= amdgpu_ras_sysfs_features_read
,
1042 con
->badpages_attr
= (struct bin_attribute
) {
1044 .name
= "gpu_vram_bad_pages",
1049 .read
= amdgpu_ras_sysfs_badpages_read
,
1052 sysfs_attr_init(attrs
[0]);
1053 sysfs_bin_attr_init(bin_attrs
[0]);
1055 return sysfs_create_group(&adev
->dev
->kobj
, &group
);
1058 static int amdgpu_ras_sysfs_remove_feature_node(struct amdgpu_device
*adev
)
1060 struct amdgpu_ras
*con
= amdgpu_ras_get_context(adev
);
1061 struct attribute
*attrs
[] = {
1062 &con
->features_attr
.attr
,
1065 struct bin_attribute
*bin_attrs
[] = {
1066 &con
->badpages_attr
,
1069 struct attribute_group group
= {
1072 .bin_attrs
= bin_attrs
,
1075 sysfs_remove_group(&adev
->dev
->kobj
, &group
);
1080 int amdgpu_ras_sysfs_create(struct amdgpu_device
*adev
,
1081 struct ras_fs_if
*head
)
1083 struct ras_manager
*obj
= amdgpu_ras_find_obj(adev
, &head
->head
);
1085 if (!obj
|| obj
->attr_inuse
)
1090 memcpy(obj
->fs_data
.sysfs_name
,
1092 sizeof(obj
->fs_data
.sysfs_name
));
1094 obj
->sysfs_attr
= (struct device_attribute
){
1096 .name
= obj
->fs_data
.sysfs_name
,
1099 .show
= amdgpu_ras_sysfs_read
,
1101 sysfs_attr_init(&obj
->sysfs_attr
.attr
);
1103 if (sysfs_add_file_to_group(&adev
->dev
->kobj
,
1104 &obj
->sysfs_attr
.attr
,
1110 obj
->attr_inuse
= 1;
1115 int amdgpu_ras_sysfs_remove(struct amdgpu_device
*adev
,
1116 struct ras_common_if
*head
)
1118 struct ras_manager
*obj
= amdgpu_ras_find_obj(adev
, head
);
1120 if (!obj
|| !obj
->attr_inuse
)
1123 sysfs_remove_file_from_group(&adev
->dev
->kobj
,
1124 &obj
->sysfs_attr
.attr
,
1126 obj
->attr_inuse
= 0;
1132 static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device
*adev
)
1134 struct amdgpu_ras
*con
= amdgpu_ras_get_context(adev
);
1135 struct ras_manager
*obj
, *tmp
;
1137 list_for_each_entry_safe(obj
, tmp
, &con
->head
, node
) {
1138 amdgpu_ras_sysfs_remove(adev
, &obj
->head
);
1141 amdgpu_ras_sysfs_remove_feature_node(adev
);
1148 * DOC: AMDGPU RAS Reboot Behavior for Unrecoverable Errors
1150 * Normally when there is an uncorrectable error, the driver will reset
1151 * the GPU to recover. However, in the event of an unrecoverable error,
1152 * the driver provides an interface to reboot the system automatically
1155 * The following file in debugfs provides that interface:
1156 * /sys/kernel/debug/dri/[0/1/2...]/ras/auto_reboot
1160 * .. code-block:: bash
1162 * echo true > .../ras/auto_reboot
1166 static void amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device
*adev
)
1168 struct amdgpu_ras
*con
= amdgpu_ras_get_context(adev
);
1169 struct drm_minor
*minor
= adev
->ddev
->primary
;
1171 con
->dir
= debugfs_create_dir("ras", minor
->debugfs_root
);
1172 debugfs_create_file("ras_ctrl", S_IWUGO
| S_IRUGO
, con
->dir
,
1173 adev
, &amdgpu_ras_debugfs_ctrl_ops
);
1174 debugfs_create_file("ras_eeprom_reset", S_IWUGO
| S_IRUGO
, con
->dir
,
1175 adev
, &amdgpu_ras_debugfs_eeprom_ops
);
1178 * After one uncorrectable error happens, usually GPU recovery will
1179 * be scheduled. But due to the known problem in GPU recovery failing
1180 * to bring GPU back, below interface provides one direct way to
1181 * user to reboot system automatically in such case within
1182 * ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine
1183 * will never be called.
1185 debugfs_create_bool("auto_reboot", S_IWUGO
| S_IRUGO
, con
->dir
,
1189 void amdgpu_ras_debugfs_create(struct amdgpu_device
*adev
,
1190 struct ras_fs_if
*head
)
1192 struct amdgpu_ras
*con
= amdgpu_ras_get_context(adev
);
1193 struct ras_manager
*obj
= amdgpu_ras_find_obj(adev
, &head
->head
);
1195 if (!obj
|| obj
->ent
)
1200 memcpy(obj
->fs_data
.debugfs_name
,
1202 sizeof(obj
->fs_data
.debugfs_name
));
1204 obj
->ent
= debugfs_create_file(obj
->fs_data
.debugfs_name
,
1205 S_IWUGO
| S_IRUGO
, con
->dir
, obj
,
1206 &amdgpu_ras_debugfs_ops
);
1209 void amdgpu_ras_debugfs_create_all(struct amdgpu_device
*adev
)
1211 struct amdgpu_ras
*con
= amdgpu_ras_get_context(adev
);
1212 struct ras_manager
*obj
;
1213 struct ras_fs_if fs_info
;
1216 * it won't be called in resume path, no need to check
1217 * suspend and gpu reset status
1222 amdgpu_ras_debugfs_create_ctrl_node(adev
);
1224 list_for_each_entry(obj
, &con
->head
, node
) {
1225 if (amdgpu_ras_is_supported(adev
, obj
->head
.block
) &&
1226 (obj
->attr_inuse
== 1)) {
1227 sprintf(fs_info
.debugfs_name
, "%s_err_inject",
1228 ras_block_str(obj
->head
.block
));
1229 fs_info
.head
= obj
->head
;
1230 amdgpu_ras_debugfs_create(adev
, &fs_info
);
1235 void amdgpu_ras_debugfs_remove(struct amdgpu_device
*adev
,
1236 struct ras_common_if
*head
)
1238 struct ras_manager
*obj
= amdgpu_ras_find_obj(adev
, head
);
1240 if (!obj
|| !obj
->ent
)
1243 debugfs_remove(obj
->ent
);
1248 static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device
*adev
)
1250 struct amdgpu_ras
*con
= amdgpu_ras_get_context(adev
);
1251 struct ras_manager
*obj
, *tmp
;
1253 list_for_each_entry_safe(obj
, tmp
, &con
->head
, node
) {
1254 amdgpu_ras_debugfs_remove(adev
, &obj
->head
);
1257 debugfs_remove_recursive(con
->dir
);
1264 static int amdgpu_ras_fs_init(struct amdgpu_device
*adev
)
1266 amdgpu_ras_sysfs_create_feature_node(adev
);
1271 static int amdgpu_ras_fs_fini(struct amdgpu_device
*adev
)
1273 amdgpu_ras_debugfs_remove_all(adev
);
1274 amdgpu_ras_sysfs_remove_all(adev
);
1280 static void amdgpu_ras_interrupt_handler(struct ras_manager
*obj
)
1282 struct ras_ih_data
*data
= &obj
->ih_data
;
1283 struct amdgpu_iv_entry entry
;
1285 struct ras_err_data err_data
= {0, 0, 0, NULL
};
1287 while (data
->rptr
!= data
->wptr
) {
1289 memcpy(&entry
, &data
->ring
[data
->rptr
],
1290 data
->element_size
);
1293 data
->rptr
= (data
->aligned_element_size
+
1294 data
->rptr
) % data
->ring_size
;
1296 /* Let IP handle its data, maybe we need get the output
1297 * from the callback to udpate the error type/count, etc
1300 ret
= data
->cb(obj
->adev
, &err_data
, &entry
);
1301 /* ue will trigger an interrupt, and in that case
1302 * we need do a reset to recovery the whole system.
1303 * But leave IP do that recovery, here we just dispatch
1306 if (ret
== AMDGPU_RAS_SUCCESS
) {
1307 /* these counts could be left as 0 if
1308 * some blocks do not count error number
1310 obj
->err_data
.ue_count
+= err_data
.ue_count
;
1311 obj
->err_data
.ce_count
+= err_data
.ce_count
;
1317 static void amdgpu_ras_interrupt_process_handler(struct work_struct
*work
)
1319 struct ras_ih_data
*data
=
1320 container_of(work
, struct ras_ih_data
, ih_work
);
1321 struct ras_manager
*obj
=
1322 container_of(data
, struct ras_manager
, ih_data
);
1324 amdgpu_ras_interrupt_handler(obj
);
1327 int amdgpu_ras_interrupt_dispatch(struct amdgpu_device
*adev
,
1328 struct ras_dispatch_if
*info
)
1330 struct ras_manager
*obj
= amdgpu_ras_find_obj(adev
, &info
->head
);
1331 struct ras_ih_data
*data
= &obj
->ih_data
;
1336 if (data
->inuse
== 0)
1339 /* Might be overflow... */
1340 memcpy(&data
->ring
[data
->wptr
], info
->entry
,
1341 data
->element_size
);
1344 data
->wptr
= (data
->aligned_element_size
+
1345 data
->wptr
) % data
->ring_size
;
1347 schedule_work(&data
->ih_work
);
1352 int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device
*adev
,
1353 struct ras_ih_if
*info
)
1355 struct ras_manager
*obj
= amdgpu_ras_find_obj(adev
, &info
->head
);
1356 struct ras_ih_data
*data
;
1361 data
= &obj
->ih_data
;
1362 if (data
->inuse
== 0)
1365 cancel_work_sync(&data
->ih_work
);
1368 memset(data
, 0, sizeof(*data
));
1374 int amdgpu_ras_interrupt_add_handler(struct amdgpu_device
*adev
,
1375 struct ras_ih_if
*info
)
1377 struct ras_manager
*obj
= amdgpu_ras_find_obj(adev
, &info
->head
);
1378 struct ras_ih_data
*data
;
1381 /* in case we registe the IH before enable ras feature */
1382 obj
= amdgpu_ras_create_obj(adev
, &info
->head
);
1388 data
= &obj
->ih_data
;
1389 /* add the callback.etc */
1390 *data
= (struct ras_ih_data
) {
1393 .element_size
= sizeof(struct amdgpu_iv_entry
),
1398 INIT_WORK(&data
->ih_work
, amdgpu_ras_interrupt_process_handler
);
1400 data
->aligned_element_size
= ALIGN(data
->element_size
, 8);
1401 /* the ring can store 64 iv entries. */
1402 data
->ring_size
= 64 * data
->aligned_element_size
;
1403 data
->ring
= kmalloc(data
->ring_size
, GFP_KERNEL
);
1415 static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device
*adev
)
1417 struct amdgpu_ras
*con
= amdgpu_ras_get_context(adev
);
1418 struct ras_manager
*obj
, *tmp
;
1420 list_for_each_entry_safe(obj
, tmp
, &con
->head
, node
) {
1421 struct ras_ih_if info
= {
1424 amdgpu_ras_interrupt_remove_handler(adev
, &info
);
1431 /* traversal all IPs except NBIO to query error counter */
1432 static void amdgpu_ras_log_on_err_counter(struct amdgpu_device
*adev
)
1434 struct amdgpu_ras
*con
= amdgpu_ras_get_context(adev
);
1435 struct ras_manager
*obj
;
1440 list_for_each_entry(obj
, &con
->head
, node
) {
1441 struct ras_query_if info
= {
1446 * PCIE_BIF IP has one different isr by ras controller
1447 * interrupt, the specific ras counter query will be
1448 * done in that isr. So skip such block from common
1449 * sync flood interrupt isr calling.
1451 if (info
.head
.block
== AMDGPU_RAS_BLOCK__PCIE_BIF
)
1454 amdgpu_ras_error_query(adev
, &info
);
1458 /* recovery begin */
1460 /* return 0 on success.
1461 * caller need free bps.
1463 static int amdgpu_ras_badpages_read(struct amdgpu_device
*adev
,
1464 struct ras_badpage
**bps
, unsigned int *count
)
1466 struct amdgpu_ras
*con
= amdgpu_ras_get_context(adev
);
1467 struct ras_err_handler_data
*data
;
1471 if (!con
|| !con
->eh_data
|| !bps
|| !count
)
1474 mutex_lock(&con
->recovery_lock
);
1475 data
= con
->eh_data
;
1476 if (!data
|| data
->count
== 0) {
1482 *bps
= kmalloc(sizeof(struct ras_badpage
) * data
->count
, GFP_KERNEL
);
1488 for (; i
< data
->count
; i
++) {
1489 (*bps
)[i
] = (struct ras_badpage
){
1490 .bp
= data
->bps
[i
].retired_page
,
1491 .size
= AMDGPU_GPU_PAGE_SIZE
,
1492 .flags
= AMDGPU_RAS_RETIRE_PAGE_RESERVED
,
1495 if (data
->last_reserved
<= i
)
1496 (*bps
)[i
].flags
= AMDGPU_RAS_RETIRE_PAGE_PENDING
;
1497 else if (data
->bps_bo
[i
] == NULL
)
1498 (*bps
)[i
].flags
= AMDGPU_RAS_RETIRE_PAGE_FAULT
;
1501 *count
= data
->count
;
1503 mutex_unlock(&con
->recovery_lock
);
1507 static void amdgpu_ras_do_recovery(struct work_struct
*work
)
1509 struct amdgpu_ras
*ras
=
1510 container_of(work
, struct amdgpu_ras
, recovery_work
);
1511 struct amdgpu_device
*remote_adev
= NULL
;
1512 struct amdgpu_device
*adev
= ras
->adev
;
1513 struct list_head device_list
, *device_list_handle
= NULL
;
1514 struct amdgpu_hive_info
*hive
= amdgpu_get_xgmi_hive(adev
, false);
1516 /* Build list of devices to query RAS related errors */
1517 if (hive
&& adev
->gmc
.xgmi
.num_physical_nodes
> 1)
1518 device_list_handle
= &hive
->device_list
;
1520 INIT_LIST_HEAD(&device_list
);
1521 list_add_tail(&adev
->gmc
.xgmi
.head
, &device_list
);
1522 device_list_handle
= &device_list
;
1525 list_for_each_entry(remote_adev
, device_list_handle
, gmc
.xgmi
.head
) {
1526 amdgpu_ras_log_on_err_counter(remote_adev
);
1529 if (amdgpu_device_should_recover_gpu(ras
->adev
))
1530 amdgpu_device_gpu_recover(ras
->adev
, 0);
1531 atomic_set(&ras
->in_recovery
, 0);
1534 /* alloc/realloc bps array */
1535 static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device
*adev
,
1536 struct ras_err_handler_data
*data
, int pages
)
1538 unsigned int old_space
= data
->count
+ data
->space_left
;
1539 unsigned int new_space
= old_space
+ pages
;
1540 unsigned int align_space
= ALIGN(new_space
, 512);
1541 void *bps
= kmalloc(align_space
* sizeof(*data
->bps
), GFP_KERNEL
);
1542 struct amdgpu_bo
**bps_bo
=
1543 kmalloc(align_space
* sizeof(*data
->bps_bo
), GFP_KERNEL
);
1545 if (!bps
|| !bps_bo
) {
1552 memcpy(bps
, data
->bps
,
1553 data
->count
* sizeof(*data
->bps
));
1557 memcpy(bps_bo
, data
->bps_bo
,
1558 data
->count
* sizeof(*data
->bps_bo
));
1559 kfree(data
->bps_bo
);
1563 data
->bps_bo
= bps_bo
;
1564 data
->space_left
+= align_space
- old_space
;
1568 /* it deal with vram only. */
1569 int amdgpu_ras_add_bad_pages(struct amdgpu_device
*adev
,
1570 struct eeprom_table_record
*bps
, int pages
)
1572 struct amdgpu_ras
*con
= amdgpu_ras_get_context(adev
);
1573 struct ras_err_handler_data
*data
;
1576 if (!con
|| !con
->eh_data
|| !bps
|| pages
<= 0)
1579 mutex_lock(&con
->recovery_lock
);
1580 data
= con
->eh_data
;
1584 if (data
->space_left
<= pages
)
1585 if (amdgpu_ras_realloc_eh_data_space(adev
, data
, pages
)) {
1590 memcpy(&data
->bps
[data
->count
], bps
, pages
* sizeof(*data
->bps
));
1591 data
->count
+= pages
;
1592 data
->space_left
-= pages
;
1595 mutex_unlock(&con
->recovery_lock
);
1601 * write error record array to eeprom, the function should be
1602 * protected by recovery_lock
1604 static int amdgpu_ras_save_bad_pages(struct amdgpu_device
*adev
)
1606 struct amdgpu_ras
*con
= amdgpu_ras_get_context(adev
);
1607 struct ras_err_handler_data
*data
;
1608 struct amdgpu_ras_eeprom_control
*control
;
1611 if (!con
|| !con
->eh_data
)
1614 control
= &con
->eeprom_control
;
1615 data
= con
->eh_data
;
1616 save_count
= data
->count
- control
->num_recs
;
1617 /* only new entries are saved */
1619 if (amdgpu_ras_eeprom_process_recods(control
,
1620 &data
->bps
[control
->num_recs
],
1623 dev_err(adev
->dev
, "Failed to save EEPROM table data!");
1631 * read error record array in eeprom and reserve enough space for
1632 * storing new bad pages
1634 static int amdgpu_ras_load_bad_pages(struct amdgpu_device
*adev
)
1636 struct amdgpu_ras_eeprom_control
*control
=
1637 &adev
->psp
.ras
.ras
->eeprom_control
;
1638 struct eeprom_table_record
*bps
= NULL
;
1641 /* no bad page record, skip eeprom access */
1642 if (!control
->num_recs
)
1645 bps
= kcalloc(control
->num_recs
, sizeof(*bps
), GFP_KERNEL
);
1649 if (amdgpu_ras_eeprom_process_recods(control
, bps
, false,
1650 control
->num_recs
)) {
1651 dev_err(adev
->dev
, "Failed to load EEPROM table records!");
1656 ret
= amdgpu_ras_add_bad_pages(adev
, bps
, control
->num_recs
);
1664 * check if an address belongs to bad page
1666 * Note: this check is only for umc block
1668 static bool amdgpu_ras_check_bad_page(struct amdgpu_device
*adev
,
1671 struct amdgpu_ras
*con
= amdgpu_ras_get_context(adev
);
1672 struct ras_err_handler_data
*data
;
1676 if (!con
|| !con
->eh_data
)
1679 mutex_lock(&con
->recovery_lock
);
1680 data
= con
->eh_data
;
1684 addr
>>= AMDGPU_GPU_PAGE_SHIFT
;
1685 for (i
= 0; i
< data
->count
; i
++)
1686 if (addr
== data
->bps
[i
].retired_page
) {
1692 mutex_unlock(&con
->recovery_lock
);
1696 /* called in gpu recovery/init */
1697 int amdgpu_ras_reserve_bad_pages(struct amdgpu_device
*adev
)
1699 struct amdgpu_ras
*con
= amdgpu_ras_get_context(adev
);
1700 struct ras_err_handler_data
*data
;
1702 struct amdgpu_bo
*bo
= NULL
;
1705 if (!con
|| !con
->eh_data
)
1708 mutex_lock(&con
->recovery_lock
);
1709 data
= con
->eh_data
;
1712 /* reserve vram at driver post stage. */
1713 for (i
= data
->last_reserved
; i
< data
->count
; i
++) {
1714 bp
= data
->bps
[i
].retired_page
;
1716 /* There are two cases of reserve error should be ignored:
1717 * 1) a ras bad page has been allocated (used by someone);
1718 * 2) a ras bad page has been reserved (duplicate error injection
1721 if (amdgpu_bo_create_kernel_at(adev
, bp
<< AMDGPU_GPU_PAGE_SHIFT
,
1722 AMDGPU_GPU_PAGE_SIZE
,
1723 AMDGPU_GEM_DOMAIN_VRAM
,
1725 dev_warn(adev
->dev
, "RAS WARN: reserve vram for "
1726 "retired page %llx fail\n", bp
);
1728 data
->bps_bo
[i
] = bo
;
1729 data
->last_reserved
= i
+ 1;
1733 /* continue to save bad pages to eeprom even reesrve_vram fails */
1734 ret
= amdgpu_ras_save_bad_pages(adev
);
1736 mutex_unlock(&con
->recovery_lock
);
1740 /* called when driver unload */
1741 static int amdgpu_ras_release_bad_pages(struct amdgpu_device
*adev
)
1743 struct amdgpu_ras
*con
= amdgpu_ras_get_context(adev
);
1744 struct ras_err_handler_data
*data
;
1745 struct amdgpu_bo
*bo
;
1748 if (!con
|| !con
->eh_data
)
1751 mutex_lock(&con
->recovery_lock
);
1752 data
= con
->eh_data
;
1756 for (i
= data
->last_reserved
- 1; i
>= 0; i
--) {
1757 bo
= data
->bps_bo
[i
];
1759 amdgpu_bo_free_kernel(&bo
, NULL
, NULL
);
1761 data
->bps_bo
[i
] = bo
;
1762 data
->last_reserved
= i
;
1765 mutex_unlock(&con
->recovery_lock
);
1769 int amdgpu_ras_recovery_init(struct amdgpu_device
*adev
)
1771 struct amdgpu_ras
*con
= amdgpu_ras_get_context(adev
);
1772 struct ras_err_handler_data
**data
;
1776 data
= &con
->eh_data
;
1780 *data
= kmalloc(sizeof(**data
), GFP_KERNEL
| __GFP_ZERO
);
1786 mutex_init(&con
->recovery_lock
);
1787 INIT_WORK(&con
->recovery_work
, amdgpu_ras_do_recovery
);
1788 atomic_set(&con
->in_recovery
, 0);
1791 ret
= amdgpu_ras_eeprom_init(&con
->eeprom_control
);
1795 if (con
->eeprom_control
.num_recs
) {
1796 ret
= amdgpu_ras_load_bad_pages(adev
);
1799 ret
= amdgpu_ras_reserve_bad_pages(adev
);
1807 amdgpu_ras_release_bad_pages(adev
);
1809 kfree((*data
)->bps
);
1810 kfree((*data
)->bps_bo
);
1812 con
->eh_data
= NULL
;
1814 dev_warn(adev
->dev
, "Failed to initialize ras recovery!\n");
1819 static int amdgpu_ras_recovery_fini(struct amdgpu_device
*adev
)
1821 struct amdgpu_ras
*con
= amdgpu_ras_get_context(adev
);
1822 struct ras_err_handler_data
*data
= con
->eh_data
;
1824 /* recovery_init failed to init it, fini is useless */
1828 cancel_work_sync(&con
->recovery_work
);
1829 amdgpu_ras_release_bad_pages(adev
);
1831 mutex_lock(&con
->recovery_lock
);
1832 con
->eh_data
= NULL
;
1834 kfree(data
->bps_bo
);
1836 mutex_unlock(&con
->recovery_lock
);
1842 /* return 0 if ras will reset gpu and repost.*/
1843 int amdgpu_ras_request_reset_on_boot(struct amdgpu_device
*adev
,
1846 struct amdgpu_ras
*ras
= amdgpu_ras_get_context(adev
);
1851 ras
->flags
|= AMDGPU_RAS_FLAG_INIT_NEED_RESET
;
1856 * check hardware's ras ability which will be saved in hw_supported.
1857 * if hardware does not support ras, we can skip some ras initializtion and
1858 * forbid some ras operations from IP.
1859 * if software itself, say boot parameter, limit the ras ability. We still
1860 * need allow IP do some limited operations, like disable. In such case,
1861 * we have to initialize ras as normal. but need check if operation is
1862 * allowed or not in each function.
1864 static void amdgpu_ras_check_supported(struct amdgpu_device
*adev
,
1865 uint32_t *hw_supported
, uint32_t *supported
)
1870 if (amdgpu_sriov_vf(adev
) || !adev
->is_atom_fw
||
1871 (adev
->asic_type
!= CHIP_VEGA20
&&
1872 adev
->asic_type
!= CHIP_ARCTURUS
))
1875 if (amdgpu_atomfirmware_mem_ecc_supported(adev
)) {
1876 dev_info(adev
->dev
, "HBM ECC is active.\n");
1877 *hw_supported
|= (1 << AMDGPU_RAS_BLOCK__UMC
|
1878 1 << AMDGPU_RAS_BLOCK__DF
);
1880 dev_info(adev
->dev
, "HBM ECC is not presented.\n");
1882 if (amdgpu_atomfirmware_sram_ecc_supported(adev
)) {
1883 dev_info(adev
->dev
, "SRAM ECC is active.\n");
1884 *hw_supported
|= ~(1 << AMDGPU_RAS_BLOCK__UMC
|
1885 1 << AMDGPU_RAS_BLOCK__DF
);
1887 dev_info(adev
->dev
, "SRAM ECC is not presented.\n");
1889 /* hw_supported needs to be aligned with RAS block mask. */
1890 *hw_supported
&= AMDGPU_RAS_BLOCK_MASK
;
1892 *supported
= amdgpu_ras_enable
== 0 ?
1893 0 : *hw_supported
& amdgpu_ras_mask
;
1896 int amdgpu_ras_init(struct amdgpu_device
*adev
)
1898 struct amdgpu_ras
*con
= amdgpu_ras_get_context(adev
);
1904 con
= kmalloc(sizeof(struct amdgpu_ras
) +
1905 sizeof(struct ras_manager
) * AMDGPU_RAS_BLOCK_COUNT
,
1906 GFP_KERNEL
|__GFP_ZERO
);
1910 con
->objs
= (struct ras_manager
*)(con
+ 1);
1912 amdgpu_ras_set_context(adev
, con
);
1914 amdgpu_ras_check_supported(adev
, &con
->hw_supported
,
1916 if (!con
->hw_supported
) {
1917 amdgpu_ras_set_context(adev
, NULL
);
1923 INIT_LIST_HEAD(&con
->head
);
1924 /* Might need get this flag from vbios. */
1925 con
->flags
= RAS_DEFAULT_FLAGS
;
1927 if (adev
->nbio
.funcs
->init_ras_controller_interrupt
) {
1928 r
= adev
->nbio
.funcs
->init_ras_controller_interrupt(adev
);
1933 if (adev
->nbio
.funcs
->init_ras_err_event_athub_interrupt
) {
1934 r
= adev
->nbio
.funcs
->init_ras_err_event_athub_interrupt(adev
);
1939 amdgpu_ras_mask
&= AMDGPU_RAS_BLOCK_MASK
;
1941 if (amdgpu_ras_fs_init(adev
))
1944 dev_info(adev
->dev
, "RAS INFO: ras initialized successfully, "
1945 "hardware ability[%x] ras_mask[%x]\n",
1946 con
->hw_supported
, con
->supported
);
1949 amdgpu_ras_set_context(adev
, NULL
);
1955 /* helper function to handle common stuff in ip late init phase */
1956 int amdgpu_ras_late_init(struct amdgpu_device
*adev
,
1957 struct ras_common_if
*ras_block
,
1958 struct ras_fs_if
*fs_info
,
1959 struct ras_ih_if
*ih_info
)
1963 /* disable RAS feature per IP block if it is not supported */
1964 if (!amdgpu_ras_is_supported(adev
, ras_block
->block
)) {
1965 amdgpu_ras_feature_enable_on_boot(adev
, ras_block
, 0);
1969 r
= amdgpu_ras_feature_enable_on_boot(adev
, ras_block
, 1);
1972 /* request gpu reset. will run again */
1973 amdgpu_ras_request_reset_on_boot(adev
,
1976 } else if (adev
->in_suspend
|| adev
->in_gpu_reset
) {
1977 /* in resume phase, if fail to enable ras,
1978 * clean up all ras fs nodes, and disable ras */
1984 /* in resume phase, no need to create ras fs node */
1985 if (adev
->in_suspend
|| adev
->in_gpu_reset
)
1989 r
= amdgpu_ras_interrupt_add_handler(adev
, ih_info
);
1994 r
= amdgpu_ras_sysfs_create(adev
, fs_info
);
2000 amdgpu_ras_sysfs_remove(adev
, ras_block
);
2003 amdgpu_ras_interrupt_remove_handler(adev
, ih_info
);
2005 amdgpu_ras_feature_enable(adev
, ras_block
, 0);
2009 /* helper function to remove ras fs node and interrupt handler */
2010 void amdgpu_ras_late_fini(struct amdgpu_device
*adev
,
2011 struct ras_common_if
*ras_block
,
2012 struct ras_ih_if
*ih_info
)
2014 if (!ras_block
|| !ih_info
)
2017 amdgpu_ras_sysfs_remove(adev
, ras_block
);
2019 amdgpu_ras_interrupt_remove_handler(adev
, ih_info
);
2020 amdgpu_ras_feature_enable(adev
, ras_block
, 0);
2023 /* do some init work after IP late init as dependence.
2024 * and it runs in resume/gpu reset/booting up cases.
2026 void amdgpu_ras_resume(struct amdgpu_device
*adev
)
2028 struct amdgpu_ras
*con
= amdgpu_ras_get_context(adev
);
2029 struct ras_manager
*obj
, *tmp
;
2034 if (con
->flags
& AMDGPU_RAS_FLAG_INIT_BY_VBIOS
) {
2035 /* Set up all other IPs which are not implemented. There is a
2036 * tricky thing that IP's actual ras error type should be
2037 * MULTI_UNCORRECTABLE, but as driver does not handle it, so
2038 * ERROR_NONE make sense anyway.
2040 amdgpu_ras_enable_all_features(adev
, 1);
2042 /* We enable ras on all hw_supported block, but as boot
2043 * parameter might disable some of them and one or more IP has
2044 * not implemented yet. So we disable them on behalf.
2046 list_for_each_entry_safe(obj
, tmp
, &con
->head
, node
) {
2047 if (!amdgpu_ras_is_supported(adev
, obj
->head
.block
)) {
2048 amdgpu_ras_feature_enable(adev
, &obj
->head
, 0);
2049 /* there should be no any reference. */
2050 WARN_ON(alive_obj(obj
));
2055 if (con
->flags
& AMDGPU_RAS_FLAG_INIT_NEED_RESET
) {
2056 con
->flags
&= ~AMDGPU_RAS_FLAG_INIT_NEED_RESET
;
2057 /* setup ras obj state as disabled.
2058 * for init_by_vbios case.
2059 * if we want to enable ras, just enable it in a normal way.
2060 * If we want do disable it, need setup ras obj as enabled,
2061 * then issue another TA disable cmd.
2062 * See feature_enable_on_boot
2064 amdgpu_ras_disable_all_features(adev
, 1);
2065 amdgpu_ras_reset_gpu(adev
);
2069 void amdgpu_ras_suspend(struct amdgpu_device
*adev
)
2071 struct amdgpu_ras
*con
= amdgpu_ras_get_context(adev
);
2076 amdgpu_ras_disable_all_features(adev
, 0);
2077 /* Make sure all ras objects are disabled. */
2079 amdgpu_ras_disable_all_features(adev
, 1);
2082 /* do some fini work before IP fini as dependence */
2083 int amdgpu_ras_pre_fini(struct amdgpu_device
*adev
)
2085 struct amdgpu_ras
*con
= amdgpu_ras_get_context(adev
);
2090 /* Need disable ras on all IPs here before ip [hw/sw]fini */
2091 amdgpu_ras_disable_all_features(adev
, 0);
2092 amdgpu_ras_recovery_fini(adev
);
2096 int amdgpu_ras_fini(struct amdgpu_device
*adev
)
2098 struct amdgpu_ras
*con
= amdgpu_ras_get_context(adev
);
2103 amdgpu_ras_fs_fini(adev
);
2104 amdgpu_ras_interrupt_remove_all(adev
);
2106 WARN(con
->features
, "Feature mask is not cleared");
2109 amdgpu_ras_disable_all_features(adev
, 1);
2111 amdgpu_ras_set_context(adev
, NULL
);
2117 void amdgpu_ras_global_ras_isr(struct amdgpu_device
*adev
)
2119 uint32_t hw_supported
, supported
;
2121 amdgpu_ras_check_supported(adev
, &hw_supported
, &supported
);
2125 if (atomic_cmpxchg(&amdgpu_ras_in_intr
, 0, 1) == 0) {
2126 dev_info(adev
->dev
, "uncorrectable hardware error"
2127 "(ERREVENT_ATHUB_INTERRUPT) detected!\n");
2129 amdgpu_ras_reset_gpu(adev
);