3a9b5dc8 |
1 | #ifdef ADAPTIVE_SAMPLING |
189f85a3 |
2 | |
3a9b5dc8 |
3 | #extension GL_ARB_shader_image_load_store : require |
1d865689 |
4 | |
4eaaf9d8 |
5 | #extension GL_ARB_shader_image_size : enable |
6 | |
3a9b5dc8 |
7 | //! OpenGL image used for accumulating rendering result. |
8 | volatile restrict layout(size1x32) uniform image2D uRenderImage; |
9 | |
10 | //! OpenGL image storing variance of sampled pixels blocks. |
11 | volatile restrict layout(size1x32) uniform iimage2D uVarianceImage; |
12 | |
13 | #else // ADAPTIVE_SAMPLING |
14 | |
15 | //! Input image. |
16 | uniform sampler2D uInputTexture; |
17 | |
18 | //! Ray tracing depth image. |
19 | uniform sampler2D uDepthTexture; |
20 | |
21 | #endif // ADAPTIVE_SAMPLING |
22 | |
23 | //! Number of accumulated frames. |
24 | uniform int uAccumFrames; |
25 | |
26 | //! Is debug mode enabled for importance screen sampling. |
27 | uniform int uDebugAdaptive; |
d9e72440 |
28 | |
eb85ed36 |
29 | //! Exposure value for tone mapping. |
30 | uniform float uExposure; |
31 | |
32 | #ifdef TONE_MAPPING_FILMIC |
33 | |
34 | //! White point value for filmic tone mapping. |
35 | uniform float uWhitePoint; |
36 | |
37 | #endif // TONE_MAPPING |
38 | |
189f85a3 |
39 | //! Output pixel color. |
40 | out vec4 OutColor; |
41 | |
3a9b5dc8 |
42 | //! RGB weight factors to calculate luminance. |
43 | #define LUMA vec3 (0.2126f, 0.7152f, 0.0722f) |
44 | |
45 | //! Scale factor used to quantize visual error. |
46 | #define SCALE_FACTOR 1.0e6f |
47 | |
eb85ed36 |
48 | // ======================================================================= |
49 | // function : ToneMappingFilmic |
50 | // purpose : |
51 | // ======================================================================= |
52 | vec4 ToneMappingFilmic(vec4 theColor, float theWhitePoint) |
53 | { |
54 | vec4 aPackColor = vec4 (theColor.rgb, theWhitePoint); |
55 | vec4 aFilmicCurve = 1.425f * aPackColor + vec4 (0.05f); |
56 | vec4 aResultColor = (aPackColor * aFilmicCurve + vec4 (0.004f)) / (aPackColor * (aFilmicCurve + vec4 (0.55f)) + vec4 (0.0491f)) - vec4 (0.0821f); |
57 | return vec4 (aResultColor.rgb / aResultColor.www, 1.0); |
58 | } |
59 | |
3a9b5dc8 |
60 | // ======================================================================= |
61 | // function : main |
62 | // purpose : |
63 | // ======================================================================= |
189f85a3 |
64 | void main (void) |
65 | { |
3a9b5dc8 |
66 | #ifndef ADAPTIVE_SAMPLING |
67 | |
189f85a3 |
68 | vec4 aColor = texelFetch (uInputTexture, ivec2 (gl_FragCoord.xy), 0); |
69 | |
3a9b5dc8 |
70 | #ifdef PATH_TRACING |
71 | float aDepth = aColor.w; // path tracing uses averaged depth |
72 | #else |
1d865689 |
73 | float aDepth = texelFetch (uDepthTexture, ivec2 (gl_FragCoord.xy), 0).r; |
3a9b5dc8 |
74 | #endif |
75 | |
1d865689 |
76 | gl_FragDepth = aDepth; |
77 | |
3a9b5dc8 |
78 | #else // ADAPTIVE_SAMPLING |
79 | |
80 | ivec2 aPixel = ivec2 (gl_FragCoord.xy); |
81 | |
82 | vec4 aColor = vec4 (0.0); |
83 | |
84 | // fetch accumulated color and total number of samples |
85 | aColor.x = imageLoad (uRenderImage, ivec2 (3 * aPixel.x + 0, |
86 | 2 * aPixel.y + 0)).x; |
87 | aColor.y = imageLoad (uRenderImage, ivec2 (3 * aPixel.x + 1, |
88 | 2 * aPixel.y + 0)).x; |
89 | aColor.z = imageLoad (uRenderImage, ivec2 (3 * aPixel.x + 1, |
90 | 2 * aPixel.y + 1)).x; |
91 | aColor.w = imageLoad (uRenderImage, ivec2 (3 * aPixel.x + 0, |
92 | 2 * aPixel.y + 1)).x; |
93 | |
94 | // calculate normalization factor |
95 | float aSampleWeight = 1.f / max (1.0, aColor.w); |
96 | |
97 | // calculate averaged depth value |
98 | gl_FragDepth = imageLoad (uRenderImage, ivec2 (3 * aPixel.x + 2, |
99 | 2 * aPixel.y + 1)).x * aSampleWeight; |
100 | |
101 | // calculate averaged radiance for all samples and even samples only |
102 | float aHalfRad = imageLoad (uRenderImage, ivec2 (3 * aPixel.x + 2, |
103 | 2 * aPixel.y + 0)).x * aSampleWeight * 2.f; |
104 | |
105 | float aAverRad = dot (aColor.rgb, LUMA) * aSampleWeight; |
106 | |
107 | // apply our 'tone mapping' operator (gamma correction and clamping) |
108 | aHalfRad = min (1.f, sqrt (aHalfRad)); |
109 | aAverRad = min (1.f, sqrt (aAverRad)); |
110 | |
111 | // calculate visual error |
112 | float anError = (aAverRad - aHalfRad) * (aAverRad - aHalfRad); |
113 | |
4eaaf9d8 |
114 | // accumulate visual error to current block; estimated error is written only |
115 | // after the first 40 samples and path length has reached 10 bounces or more |
116 | imageAtomicAdd (uVarianceImage, ivec2 (aPixel / vec2 (BLOCK_SIZE)), int (mix (SCALE_FACTOR, anError * SCALE_FACTOR, aColor.w > 40.f))); |
3a9b5dc8 |
117 | |
118 | if (uDebugAdaptive == 0) // normal rendering |
d9e72440 |
119 | { |
3a9b5dc8 |
120 | aColor = vec4 (aColor.rgb * aSampleWeight, 1.0); |
d9e72440 |
121 | } |
3a9b5dc8 |
122 | else // showing number of samples |
d9e72440 |
123 | { |
4eaaf9d8 |
124 | vec2 aRatio = vec2 (1.f, 1.f); |
125 | |
126 | #ifdef GL_ARB_shader_image_size |
127 | aRatio = vec2 (imageSize (uRenderImage)) / vec2 (3.f * 512.f, 2.f * 512.f); |
128 | #endif |
129 | |
130 | aColor = vec4 (0.5f * aColor.rgb * aSampleWeight + vec3 (0.f, sqrt (aRatio.x * aRatio.y) * aColor.w / uAccumFrames * 0.35f, 0.f), 1.0); |
d9e72440 |
131 | } |
3a9b5dc8 |
132 | |
133 | #endif // ADAPTIVE_SAMPLING |
134 | |
135 | #ifdef PATH_TRACING |
136 | |
eb85ed36 |
137 | aColor *= pow (2, uExposure); |
138 | |
139 | #ifdef TONE_MAPPING_FILMIC |
140 | aColor = ToneMappingFilmic (aColor, uWhitePoint); |
141 | #endif // TONE_MAPPING |
142 | |
143 | // apply gamma correction (we use gamma = 2) |
144 | OutColor = vec4 (sqrt (aColor.rgb), 0.f); |
3a9b5dc8 |
145 | |
146 | #else // not PATH_TRACING |
147 | |
eb85ed36 |
148 | OutColor = aColor; |
3a9b5dc8 |
149 | |
150 | #endif |
189f85a3 |
151 | } |