1 /*
2  * Copyright 2011-2017 Blender Foundation
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 CCL_NAMESPACE_BEGIN
18 
19 #if defined(__BRANCHED_PATH__) && defined(__VOLUME__)
20 
kernel_split_branched_path_volume_indirect_light_init(KernelGlobals * kg,int ray_index)21 ccl_device_inline void kernel_split_branched_path_volume_indirect_light_init(KernelGlobals *kg,
22                                                                              int ray_index)
23 {
24   kernel_split_branched_path_indirect_loop_init(kg, ray_index);
25 
26   ADD_RAY_FLAG(kernel_split_state.ray_state, ray_index, RAY_BRANCHED_VOLUME_INDIRECT);
27 }
28 
kernel_split_branched_path_volume_indirect_light_iter(KernelGlobals * kg,int ray_index)29 ccl_device_noinline bool kernel_split_branched_path_volume_indirect_light_iter(KernelGlobals *kg,
30                                                                                int ray_index)
31 {
32   SplitBranchedState *branched_state = &kernel_split_state.branched_state[ray_index];
33 
34   ShaderData *sd = kernel_split_sd(sd, ray_index);
35   PathRadiance *L = &kernel_split_state.path_radiance[ray_index];
36   ShaderData *emission_sd = AS_SHADER_DATA(&kernel_split_state.sd_DL_shadow[ray_index]);
37 
38   /* GPU: no decoupled ray marching, scatter probalistically */
39   int num_samples = kernel_data.integrator.volume_samples;
40   float num_samples_inv = 1.0f / num_samples;
41 
42   Ray volume_ray = branched_state->ray;
43   volume_ray.t = (!IS_STATE(&branched_state->ray_state, 0, RAY_HIT_BACKGROUND)) ?
44                      branched_state->isect.t :
45                      FLT_MAX;
46 
47   float step_size = volume_stack_step_size(kg, branched_state->path_state.volume_stack);
48 
49   for (int j = branched_state->next_sample; j < num_samples; j++) {
50     ccl_global PathState *ps = &kernel_split_state.path_state[ray_index];
51     *ps = branched_state->path_state;
52 
53     ccl_global Ray *pray = &kernel_split_state.ray[ray_index];
54     *pray = branched_state->ray;
55 
56     ccl_global float3 *tp = &kernel_split_state.throughput[ray_index];
57     *tp = branched_state->throughput * num_samples_inv;
58 
59     /* branch RNG state */
60     path_state_branch(ps, j, num_samples);
61 
62     /* integrate along volume segment with distance sampling */
63     VolumeIntegrateResult result = kernel_volume_integrate(
64         kg, ps, sd, &volume_ray, L, tp, step_size);
65 
66 #  ifdef __VOLUME_SCATTER__
67     if (result == VOLUME_PATH_SCATTERED) {
68       /* direct lighting */
69       kernel_path_volume_connect_light(kg, sd, emission_sd, *tp, &branched_state->path_state, L);
70 
71       /* indirect light bounce */
72       if (!kernel_path_volume_bounce(kg, sd, tp, ps, &L->state, pray)) {
73         continue;
74       }
75 
76       /* start the indirect path */
77       branched_state->next_closure = 0;
78       branched_state->next_sample = j + 1;
79 
80       /* Attempting to share too many samples is slow for volumes as it causes us to
81        * loop here more and have many calls to kernel_volume_integrate which evaluates
82        * shaders. The many expensive shader evaluations cause the work load to become
83        * unbalanced and many threads to become idle in this kernel. Limiting the
84        * number of shared samples here helps quite a lot.
85        */
86       if (branched_state->shared_sample_count < 2) {
87         if (kernel_split_branched_indirect_start_shared(kg, ray_index)) {
88           continue;
89         }
90       }
91 
92       return true;
93     }
94 #  endif
95   }
96 
97   branched_state->next_sample = num_samples;
98 
99   branched_state->waiting_on_shared_samples = (branched_state->shared_sample_count > 0);
100   if (branched_state->waiting_on_shared_samples) {
101     return true;
102   }
103 
104   kernel_split_branched_path_indirect_loop_end(kg, ray_index);
105 
106   /* todo: avoid this calculation using decoupled ray marching */
107   float3 throughput = kernel_split_state.throughput[ray_index];
108   kernel_volume_shadow(
109       kg, emission_sd, &kernel_split_state.path_state[ray_index], &volume_ray, &throughput);
110   kernel_split_state.throughput[ray_index] = throughput;
111 
112   return false;
113 }
114 
115 #endif /* __BRANCHED_PATH__ && __VOLUME__ */
116 
kernel_do_volume(KernelGlobals * kg)117 ccl_device void kernel_do_volume(KernelGlobals *kg)
118 {
119 #ifdef __VOLUME__
120   /* We will empty this queue in this kernel. */
121   if (ccl_global_id(0) == 0 && ccl_global_id(1) == 0) {
122     kernel_split_params.queue_index[QUEUE_ACTIVE_AND_REGENERATED_RAYS] = 0;
123 #  ifdef __BRANCHED_PATH__
124     kernel_split_params.queue_index[QUEUE_VOLUME_INDIRECT_ITER] = 0;
125 #  endif /* __BRANCHED_PATH__ */
126   }
127 
128   int ray_index = ccl_global_id(1) * ccl_global_size(0) + ccl_global_id(0);
129 
130   if (*kernel_split_params.use_queues_flag) {
131     ray_index = get_ray_index(kg,
132                               ray_index,
133                               QUEUE_ACTIVE_AND_REGENERATED_RAYS,
134                               kernel_split_state.queue_data,
135                               kernel_split_params.queue_size,
136                               1);
137   }
138 
139   ccl_global char *ray_state = kernel_split_state.ray_state;
140 
141   PathRadiance *L = &kernel_split_state.path_radiance[ray_index];
142   ccl_global PathState *state = &kernel_split_state.path_state[ray_index];
143 
144   if (IS_STATE(ray_state, ray_index, RAY_ACTIVE) ||
145       IS_STATE(ray_state, ray_index, RAY_HIT_BACKGROUND)) {
146     ccl_global float3 *throughput = &kernel_split_state.throughput[ray_index];
147     ccl_global Ray *ray = &kernel_split_state.ray[ray_index];
148     ccl_global Intersection *isect = &kernel_split_state.isect[ray_index];
149     ShaderData *sd = kernel_split_sd(sd, ray_index);
150     ShaderData *emission_sd = AS_SHADER_DATA(&kernel_split_state.sd_DL_shadow[ray_index]);
151 
152     bool hit = !IS_STATE(ray_state, ray_index, RAY_HIT_BACKGROUND);
153 
154     /* Sanitize volume stack. */
155     if (!hit) {
156       kernel_volume_clean_stack(kg, state->volume_stack);
157     }
158     /* volume attenuation, emission, scatter */
159     if (state->volume_stack[0].shader != SHADER_NONE) {
160       Ray volume_ray = *ray;
161       volume_ray.t = (hit) ? isect->t : FLT_MAX;
162 
163 #  ifdef __BRANCHED_PATH__
164       if (!kernel_data.integrator.branched ||
165           IS_FLAG(ray_state, ray_index, RAY_BRANCHED_INDIRECT)) {
166 #  endif /* __BRANCHED_PATH__ */
167         float step_size = volume_stack_step_size(kg, state->volume_stack);
168 
169         {
170           /* integrate along volume segment with distance sampling */
171           VolumeIntegrateResult result = kernel_volume_integrate(
172               kg, state, sd, &volume_ray, L, throughput, step_size);
173 
174 #  ifdef __VOLUME_SCATTER__
175           if (result == VOLUME_PATH_SCATTERED) {
176             /* direct lighting */
177             kernel_path_volume_connect_light(kg, sd, emission_sd, *throughput, state, L);
178 
179             /* indirect light bounce */
180             if (kernel_path_volume_bounce(kg, sd, throughput, state, &L->state, ray)) {
181               ASSIGN_RAY_STATE(ray_state, ray_index, RAY_REGENERATED);
182             }
183             else {
184               kernel_split_path_end(kg, ray_index);
185             }
186           }
187 #  endif /* __VOLUME_SCATTER__ */
188         }
189 
190 #  ifdef __BRANCHED_PATH__
191       }
192       else {
193         kernel_split_branched_path_volume_indirect_light_init(kg, ray_index);
194 
195         if (kernel_split_branched_path_volume_indirect_light_iter(kg, ray_index)) {
196           ASSIGN_RAY_STATE(ray_state, ray_index, RAY_REGENERATED);
197         }
198       }
199 #  endif /* __BRANCHED_PATH__ */
200     }
201   }
202 
203 #  ifdef __BRANCHED_PATH__
204   /* iter loop */
205   ray_index = get_ray_index(kg,
206                             ccl_global_id(1) * ccl_global_size(0) + ccl_global_id(0),
207                             QUEUE_VOLUME_INDIRECT_ITER,
208                             kernel_split_state.queue_data,
209                             kernel_split_params.queue_size,
210                             1);
211 
212   if (IS_STATE(ray_state, ray_index, RAY_VOLUME_INDIRECT_NEXT_ITER)) {
213     /* for render passes, sum and reset indirect light pass variables
214      * for the next samples */
215     path_radiance_sum_indirect(&kernel_split_state.path_radiance[ray_index]);
216     path_radiance_reset_indirect(&kernel_split_state.path_radiance[ray_index]);
217 
218     if (kernel_split_branched_path_volume_indirect_light_iter(kg, ray_index)) {
219       ASSIGN_RAY_STATE(ray_state, ray_index, RAY_REGENERATED);
220     }
221   }
222 #  endif /* __BRANCHED_PATH__ */
223 
224 #endif /* __VOLUME__ */
225 }
226 
227 CCL_NAMESPACE_END
228