1 /*
2 * Copyright 2021 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #include "amdgpu_reset.h"
25 #include "aldebaran.h"
26 #include "sienna_cichlid.h"
27 #include "smu_v13_0_10.h"
28
amdgpu_reset_init(struct amdgpu_device * adev)29 int amdgpu_reset_init(struct amdgpu_device *adev)
30 {
31 int ret = 0;
32
33 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
34 case IP_VERSION(13, 0, 2):
35 case IP_VERSION(13, 0, 6):
36 ret = aldebaran_reset_init(adev);
37 break;
38 case IP_VERSION(11, 0, 7):
39 ret = sienna_cichlid_reset_init(adev);
40 break;
41 case IP_VERSION(13, 0, 10):
42 ret = smu_v13_0_10_reset_init(adev);
43 break;
44 default:
45 break;
46 }
47
48 return ret;
49 }
50
amdgpu_reset_fini(struct amdgpu_device * adev)51 int amdgpu_reset_fini(struct amdgpu_device *adev)
52 {
53 int ret = 0;
54
55 switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
56 case IP_VERSION(13, 0, 2):
57 case IP_VERSION(13, 0, 6):
58 ret = aldebaran_reset_fini(adev);
59 break;
60 case IP_VERSION(11, 0, 7):
61 ret = sienna_cichlid_reset_fini(adev);
62 break;
63 case IP_VERSION(13, 0, 10):
64 ret = smu_v13_0_10_reset_fini(adev);
65 break;
66 default:
67 break;
68 }
69
70 return ret;
71 }
72
amdgpu_reset_prepare_hwcontext(struct amdgpu_device * adev,struct amdgpu_reset_context * reset_context)73 int amdgpu_reset_prepare_hwcontext(struct amdgpu_device *adev,
74 struct amdgpu_reset_context *reset_context)
75 {
76 struct amdgpu_reset_handler *reset_handler = NULL;
77
78 if (adev->reset_cntl && adev->reset_cntl->get_reset_handler)
79 reset_handler = adev->reset_cntl->get_reset_handler(
80 adev->reset_cntl, reset_context);
81 if (!reset_handler)
82 return -EOPNOTSUPP;
83
84 return reset_handler->prepare_hwcontext(adev->reset_cntl,
85 reset_context);
86 }
87
amdgpu_reset_perform_reset(struct amdgpu_device * adev,struct amdgpu_reset_context * reset_context)88 int amdgpu_reset_perform_reset(struct amdgpu_device *adev,
89 struct amdgpu_reset_context *reset_context)
90 {
91 int ret;
92 struct amdgpu_reset_handler *reset_handler = NULL;
93
94 if (adev->reset_cntl)
95 reset_handler = adev->reset_cntl->get_reset_handler(
96 adev->reset_cntl, reset_context);
97 if (!reset_handler)
98 return -EOPNOTSUPP;
99
100 ret = reset_handler->perform_reset(adev->reset_cntl, reset_context);
101 if (ret)
102 return ret;
103
104 return reset_handler->restore_hwcontext(adev->reset_cntl,
105 reset_context);
106 }
107
108
amdgpu_reset_destroy_reset_domain(struct kref * ref)109 void amdgpu_reset_destroy_reset_domain(struct kref *ref)
110 {
111 struct amdgpu_reset_domain *reset_domain = container_of(ref,
112 struct amdgpu_reset_domain,
113 refcount);
114 if (reset_domain->wq)
115 destroy_workqueue(reset_domain->wq);
116
117 kvfree(reset_domain);
118 }
119
amdgpu_reset_create_reset_domain(enum amdgpu_reset_domain_type type,char * wq_name)120 struct amdgpu_reset_domain *amdgpu_reset_create_reset_domain(enum amdgpu_reset_domain_type type,
121 char *wq_name)
122 {
123 struct amdgpu_reset_domain *reset_domain;
124
125 reset_domain = kvzalloc(sizeof(struct amdgpu_reset_domain), GFP_KERNEL);
126 if (!reset_domain) {
127 DRM_ERROR("Failed to allocate amdgpu_reset_domain!");
128 return NULL;
129 }
130
131 reset_domain->type = type;
132 kref_init(&reset_domain->refcount);
133
134 reset_domain->wq = create_singlethread_workqueue(wq_name);
135 if (!reset_domain->wq) {
136 DRM_ERROR("Failed to allocate wq for amdgpu_reset_domain!");
137 amdgpu_reset_put_reset_domain(reset_domain);
138 return NULL;
139
140 }
141
142 atomic_set(&reset_domain->in_gpu_reset, 0);
143 atomic_set(&reset_domain->reset_res, 0);
144 init_rwsem(&reset_domain->sem);
145
146 return reset_domain;
147 }
148
amdgpu_device_lock_reset_domain(struct amdgpu_reset_domain * reset_domain)149 void amdgpu_device_lock_reset_domain(struct amdgpu_reset_domain *reset_domain)
150 {
151 atomic_set(&reset_domain->in_gpu_reset, 1);
152 down_write(&reset_domain->sem);
153 }
154
155
amdgpu_device_unlock_reset_domain(struct amdgpu_reset_domain * reset_domain)156 void amdgpu_device_unlock_reset_domain(struct amdgpu_reset_domain *reset_domain)
157 {
158 atomic_set(&reset_domain->in_gpu_reset, 0);
159 up_write(&reset_domain->sem);
160 }
161