Compare commits

...

10 Commits

Author SHA1 Message Date
Peder Bergebakken Sundt 2e252fb664 Add presentation slides 2019-04-08 20:07:48 +02:00
Peder Bergebakken Sundt b2c33fcade Switch to older method of binding textures in GL, for backward compatibility 2019-04-08 15:25:13 +02:00
Peder Bergebakken Sundt 420c052854 Add report build dependencies 2019-04-07 20:44:10 +02:00
Peder Bergebakken Sundt 55f9249363 Finish creating the delivery report 2019-04-07 20:29:45 +02:00
Peder Bergebakken Sundt 349a7934c4 Add fog events to the day-night cycle 2019-04-07 20:26:38 +02:00
Peder Bergebakken Sundt d73da5b2a1 Add in first draft of the final delivery 2019-04-07 00:18:43 +02:00
Peder Bergebakken Sundt 95d6981461 Add support for fog into the scene shader 2019-04-07 00:03:03 +02:00
Peder Bergebakken Sundt ccdf4ab3c9 Improve assimp compiletime 2019-04-07 00:02:33 +02:00
Peder Bergebakken Sundt 44dbde8148 rename report into log 2019-04-04 14:47:37 +02:00
Peder Bergebakken Sundt 9c889c2d9a Add report, in its current unfinished state 2019-04-02 23:51:16 +02:00
53 changed files with 1015 additions and 28 deletions

4
.gitignore vendored
View File

@ -32,3 +32,7 @@
*.app
build/
report/pd-images/
report/*_combined.md
report/*_combined.md5_hash
report/*_combined_out.pdf

View File

@ -41,7 +41,12 @@ option (SFML_BUILD_NETWORK OFF)
add_subdirectory(lib/SFML)
# assimp
set(BUILD_ASSIMP_TOOLS ON)
set(BUILD_ASSIMP_TOOLS OFF)
set(ASSIMP_BUILD_ASSIMP_TOOLS OFF)
set(ASSIMP_BUILD_SAMPLES OFF)
set(ASSIMP_BUILD_TESTS OFF)
set(ASSIMP_COVERALLS OFF)
set(ASSIMP_NO_EXPORT ON)
set(ASSIMP_BUILD_STATIC_LIB ON)
add_subdirectory(lib/assimp)
link_directories("lib/assimp/build/code")

67
report/build_report.sh Executable file
View File

@ -0,0 +1,67 @@
#/usr/bin/env bash
here="$(pwd)"
echo Building log_combined.md...
(
cat log_part1_intro.md; echo;
cat log_part2_hills.md; echo;
cat log_part3_models.md; echo;
cat log_part4_optimizations.md; echo;
cat log_part5_scene.md; echo;
cat log_part6_effect.md; echo;
cat log_part7_daylight.md; echo;
) | sed -e "s/ i / I /g" | sed -e "s/ i'm / I'm /g" > log_combined.md
echo Building delivery_combined.md...
(
cat delivery_part1.md; echo;
cat delivery_part2.md; echo;
cat delivery_part3.md; echo;
cat delivery_part4.md; echo;
cat delivery_part5.md; echo;
) | sed -e "s/ i / I /g" | sed -e "s/ i'm / I'm /g" > delivery_combined.md
#ENGINE=pdflatex
#ENGINE=lualatex
ENGINE=xelatex
VARIABLES="$VARIABLES --filter pandoc-codeblock-include"
VARIABLES="$VARIABLES --filter pandoc-imagine"
VARIABLES="$VARIABLES --filter pandoc-crossref"
#VARIABLES="$VARIABLES --variable classoption=twocolumn"
VARIABLES="$VARIABLES --table-of-contents"
VARIABLES="$VARIABLES --number-sections"
#VARIABLES="$VARIABLES --number-offset=0,0"
VARIABLES="$VARIABLES --variable papersize=a4paper"
VARIABLES="$VARIABLES --variable geometry:margin=2cm"
VARIABLES="$VARIABLES --variable links-as-notes=true"
VARIABLES="$VARIABLES --highlight-style=pygments" # the default
#VARIABLES="$VARIABLES --highlight-style=haddock" # kinda nice for python at least
#VARIABLES="$VARIABLES --highlight-style=tango"
#VARIABLES="$VARIABLES --highlight-style=espresso"
#VARIABLES="$VARIABLES --highlight-style=zenburn"
#VARIABLES="$VARIABLES --highlight-style=kate"
#VARIABLES="$VARIABLES --highlight-style=monochrome"
#VARIABLES="$VARIABLES --highlight-style=breezedark"
ls -1 *.md | grep -v "part" |
( while read source; do
(
base="$(basename $source)"
dest="$(echo $base | rev | cut -c4- | rev)_out"
cd "$(dirname $source)"
if [ "$(md5sum "$source")" != "$(cat ${source}5_hash 2>/dev/null)" ]; then
md5sum "$source" > "${source}5_hash"
echo "Converting $source into $(dirname $source)/${dest}.pdf ..."
pandoc "$base" --pdf-engine="$ENGINE" $VARIABLES -o "$dest.pdf"
fi
) &
done
wait )

34
report/delivery_part1.md Normal file
View File

@ -0,0 +1,34 @@
% TDT4230Final assignment report
% Peder Bergebakken Sundt
% 7'th of April 2019
\small
```{.shebang im_out="stdout"}
#!/usr/bin/env bash
printf "time for some intricate graphics surgery!\n" | cowsay -f surgery | head -n -5 | sed -e "s/^/ /"
```
\normalsize
\newpage
# The project
For this project, we're supposed to investigate a more advanced or complex visualisation method in detail by implementing it ourselves using C++ and OpenGl 4.3+. I'll be doing it by myself.
The idea i have in mind for the scene i want to create, is a field of grass with trees spread about in it, where a car is driving along the ups and downs of the hills. I then plan to throw all the effect i can at it to make it look good.
I want to look more into effects one can apply to a scene of different materials. In detail, i plan to implement:
Phong lighting,
texturing,
normal mapping,
displacement mapping,
importing model meshes with transformations and materials from external files,
reflections,
fog and
rim backlights.
I also want to implement some post-processing effects:
Chromatic aberration,
Depth of field,
Vignette and
Noise / Grain

58
report/delivery_part2.md Normal file
View File

@ -0,0 +1,58 @@
# How does the implementation achieve its goal?
The final implementation has four types defined:
`SceneNode`, `Mesh`, `PNGImage`, and `Material`. The `Material` structs references several `PNGImage`s and stores colors and rules on how to be applied to a `SceneNode`. `SceneNode`s reference a `Mesh`, stores all material properties applied to the node, which shader it should rendered with, and a list of child `SceneNode`s.
Each mesh can be UV mapped. Each vertex has a UV coordinate assigned to it, which is passed along with the vertex position into the shaders. Texturing meshes is done by looking up the pixel color from a diffuse texture, using the interpolated UV coordinates. This diffuse color is used as the 'basecolor' in further calculations.
## Normal mapping
Normals are defined in two places: One normal vector per vertex in the mesh, and an optional tangental normal map texture. The normal vector is combined with it's tangent and bitangent vectors (tangents in the U and V directions respectively) into a TBN transformation matrix, which the tangential normal vector fetched from the normal map can be transformed with. This allows us to define the normal vector along the different surfaces of the mesh.
## Displacement mapping
Displacement mapping is done in the vertex shader. A displacement texture is mapped into using the UV coordinates. The texture describes how much to offset the vertex along the normal vector. This is further controlled with a displacement coefficient uniform passed into the vertex shader. See @fig:img-fine-plane and @fig:img-displacement-normals.
## Phong lighting
The Phong lighting model is implemented in the fragment shader. The model describes four light components: The diffuse component, the emissive component, the specular component and the ambient component. Each of these components have a color/intensity assigned with them, which is stored in the `SceneNode`/`Material`.
The colors are computed using the normal vector computed as described above. The basecolor is multiplied with the sum of the diffuse and the emissive colors, and the specular color is then added on top. I chose to combine the ambient and emissive into one single component, since i don't need the distinction in my case. I did however make the small change of multiplying the emissive color with the color of the first light in the scene. This allows me to 'tint' the emissive component as well.
I have two type of nodes in the scene for lights: point lights and spot lights. Each light has a color associated with them as well as a position and three attenuation factors. The final attenuation is computed as $\frac{1}{x + y\cdot |L| + z\cdot |L|^2}$ from these three factors.
## Loading models
Importing of models is done using the library called `assimp`. It is a huge and bulky library which takes decades to compile, but it gets the job done. Each model file is actually a whole 'scene'. I first traverse the materials defined in this scene and store them into my own material structs. I then traverse the textures in the scene and load them into `PNGImage` structs. I then traverse all the meshes stored in the scene and store those. At last i traverse the nodes in the scene, creating my own nodes. I apply the transformations, materials, textures and meshes referenced in each node. Finally i rotate the root node to account for me using a coordinate system where z points skyward.
## Reflections
Reflections are implemented in the fragment shader, using the vector pointing from the camera to the fragment (F), and the normal vector. I reflect the F vector along the normal vector and normalize the result. Computing the dot product between the normalized reflection and any other unit vector gives me the cosine of the angle between the two. Computing this cosine northward and skyward allows me to map the reflection into a sphere and retrieve the UV coordinates used to fetch the reflected color value from a reflection texture map (see fig:img-reflection and fig:img-reflection-map).
## Fog
Fog is an easy effect to implement. I originally planned for it to be implemented as a post-processing effect, but moved it as discussed in @sec:learn to the fragment shader.
The z component of the fragment position in MVP space is transformed into linear space and then multiplied by a fog strength uniform. This new value is used as the mix factor between fragment color, and our fog color. (See @fig:img-fog)
## Rim backlights
To make objects pop a bit more, one can apply a rim backlight color. The effect tries to create a edge/rim/silhouette light around an object: The more the surface normal points away from the camera, the more it lights up. Maximum brightness at 90 degrees away from the camera, and decreases the more it faces the camera. I compute the dot product between the normalized vector from camera to the fragment, and the normal vector, which gives me the cosine value between the two: A value of 1 when pointing away from the camera, 0 when at 90 degrees, and a value of -1 when facing the camera. Adding a "strength" value to this will skew it more towards the camera. Divide it by the same strength value and clamping it will yield the rim light strength (see @fig:img-rim-lights).
## Post processing
Post processing is achieved by rendering the whole scene, not to the window, but to an internal framebuffer instead. This framebuffer is then used as a texture covering a single quad which is then rendered to the window. This in-between step allows me to apply different kinds of effects using the separate fragment shader applied to the quad, effects which rely on being able to access neighboring pixel's depth and color values.
### Depth of Field / Blur
Using this post processing shader, I could apply blur to the scene. Depth of field is a selective blur, keeping just a certain distance range in focus. I first transform the depthbuffer (see @fig:img-depth-map) to be 0 around the point of focus and tend towards 1 otherwise. I then use this focus value as the range of my blur. The blur is simply the average of a selection of neighboring pixels. See @fig:img-depth-of-field for results.
### Chromatic aberration
Light refracts differently depending on the wavelength. (see @fig:img-what-is). By scaling the tree color components by different amounts, i can recreate this effect. This scaling is further multiplied by the focus value computed above, to avoid aberration near the vertical line in @fig:img-what-is.
### Vignette
The vignette effect is simply a darkening of the image the further away from the center one is. One can simply use the euclidean distance to compute this. See @fig:img-vingette.
### Noise / Grain
GLSL doesn't have a random number generator built in, but I found one online. I modified it to use the UV vector and a time uniform as its seed. This generator is used to add noise to the image. The nose is multiplied with the focus value for a dramatic effect.

41
report/delivery_part3.md Normal file
View File

@ -0,0 +1,41 @@
# Notable problems encountered on the way, and how i solved them
## General difficulties
A lot of time was spent cleaning up and modifying the gloom base project. A lot of time was also spent working with `assimp` and getting the internal framebuffer to render correctly. `assimp` and `OpenGL` aren't the most verbose debugging companion out there.
I learned that the handedness of face culling and normal maps aren't the same everywhere. Luckily `assimp` supports flipping faces. When reading the grass texture, i had to flip the R and G color components of the normal map to make it look right. See @fig:img-wrong-handedness and @fig:img-flipped-handedness.
## The slope of the displacement map
The scrolling field of grass is actually just a static plane mesh of 100x100 vertices, with a perlin noise displacement map applied to it (I use an UV offset uniform to make the field scroll, the map is mirrored on repeat to account for sharp edges, see @fig:img-gl-mirror). You can however see in @fig:img-fine-plane that the old normals doesn't mesh with the now displaced geometry. I therefore had to recalculate the normals using the slope of the displacement. I rotate the TBN matrix and normal vectors in the shader to make it behave nice with the lighting. Luckily i have both the tangent and bitangen vector pointing in the U and V direction. calculating the slope of the displacment in both of these directions allows me to add the normal vector times the slope to the tangent and the bitangent. after normalizing the tangens, i can compute the new normal vector using the cross product of the two. From these i construct the TBN matrix. See @lst:new-tbn for the code.
This did however give me a pretty coarse image, so I moved the computation of the TBN matrix from the vertex shader to the fragement shader. This will give me a slight performance penalty, but I can undo the change in a simplified shader should I need the performance boost. See @fig:img-displacement-normals for results.
## Transparent objects {#sec:trans}
When rendering transparent objects with depth testing enabled, we run into issues as seen in @fig:img-tree-alpha. The depth test is simply a comparison against the depth buffer, which determines if a fragment should be rendered or not. When a fragment is rendered, the depth buffer is updated with the depth of the rendered fragment. Fragment which will appear behind already rendered fragments will be skipped. But non-opaque objects should allow objects behind to still be visible.
As a first step to try to fix this issue, i split the rendering of the scene into two stages: opaque nodes and transparent nodes. The first stage will traverse the scene graph and store all transparent nodes in a list. Afterwards the list is sorted by the distance away from camera, then rendered back to front. This will ensure that the transparent meshes furthest away are rendered before the ones in front, which won't trip up the depth test. The results of this can be viewed in @fig:img-tree-sorted.
We still have issues here however. Faces within the same mesh aren't sorted and could be rendered in the wrong order. This is visible near the top of the tree in @fig:img-tree-sorted. To fix this one could sort all the faces, but this isn't feasible in real time rendering applications. I then had the idea to try to disable the depth test. This look *better* in this case, but it would mean that opaque objects would always be beneath transparent ones, since the transparent ones are rendered in a second pass afterwards.
I then arrived at the solution of setting `glDepthMask(GL_FALSE)`, which makes the depth buffer read only. All writes to the depth buffer are ignored. Using this, the depth buffer created by the opaque objects can be used while rendering the transparent ones, and since the transparent ones are rendered in sorted order, they *kinda* work out as well. See @fig:img-tree-depth-readonly for the result. The new rendering pipeline is visualized in @fig:render-pipeline.
## Need for optimizations
At some point when i had over 5000 meshes in the scene i noticed a performance drop. I started to look into some optimizations. Resizing the window didn't affect the FPS, so I shouldn't be fragment bound. I assume i'm not vertex bound either, so i had to be bandwidth bound, which makes sense due to my single channel ram and integrated graphics. Reducing the amount of data sent between the CPU and the GPU was my goal.
After some searching through the code I came over the part where I upload the uniforms for each draw call to gl. (See @lst:uniform-upload)
I first optimized the `s->location()` calls:
It is a lookup from a uniform name string to location `GLint`. Asking GL directly everytime is costly due to the limited bandwith, and the compiler being unable to inline dynamically linked function. Therefore I'll cache the results per shader. See @fig:location-cache for the fix.
Uploading all these uniforms per GL draw call is ineffective as well.
Most of the uniforms don't change between draw calls. I therefore added caching of the uniforms using a bunch of nasty prepossessing tricks and static variables. See @lst:uniform-cache.
The next step for optimization would be to combine meshes with same materials into a single mesh. Most of the grass could be optimized this way. Each bundle of grass consists of 64 nodes the same materials and textures applied. concatenating the meshes would decrease the scene traversal and other rendering overhead significantly.
I could also specialize the shader for each material. I thought of replacing many of the uniforms with defines, and compile a separate shader for each material, but time is a limited resource.

9
report/delivery_part4.md Normal file
View File

@ -0,0 +1,9 @@
# What i learned about the methods in terms of advantages, limitations, and how to use it effectively {#sec:learn}
Post-processing is a great tool, but it adds complexity to the rendering pipeline. Debugging issues with the framebuffer isn't easy. It does have the advantage allowing me to skew the window along a sinus curve should i want to.
Post-processing also a cost-saving measure in terms of performance. It can allow me to only compute some value only once per pixel instead of once per fragment which are privy to cover one another. The grain and vignette effect are both possible to implement in the scene shader, doing it in the post processing step spares computation.
The method i used to render transparent objects works *okay*, as described in @sec:trans, but it does have consequences for the post-processing step later in the pipeline. I now have an incomplete depth buffer to work with, as seen in @fig:img-depth-map, where no grass or leaves show up. This makes adding a fog effect in post create weird artifacts. Fog can however be done in the fragment shader for the scene anyway, with only a slight performance penalty due to overlapping fragments.
One other weakness with the way i render transparent objects is that transparent meshes which cut into each other will be render incorrectly. The whole mesh is sorted and rendered, not each face. If i had two transparent ice cubes inside one another *(kinda like a Venn diagram)* then one cube would be rendered on top of the other one. This doesn't matter for grass, but more complex and central objects in the scene may suffer from this.

183
report/delivery_part5.md Normal file
View File

@ -0,0 +1,183 @@
# Appendix
![
The seqmented pane with the cobble texture and normal map and lighting applied to it.
](images/0-base.png){#fig:img-base}
![
The plane from @fig:img-base with a perlin noise displacement map applied to it
](images/1-perlin-displacement.png){#fig:img-perlin-displacement}
![
First rendering of the downloaded grass texture and normal map
](images/2-wrong-handedness.png){#fig:img-wrong-handedness}
![
Rendering of the downloaded grass texture with flipped normal map handedness
](images/3-flipped-handedness.png){#fig:img-flipped-handedness}
![
The field with grass texture, normal map and displacement map
](images/4-fine-plane.png){#fig:img-fine-plane}
![
How a mirrored-on-repeat texture behaves
](images/5-gl-mirror.jpg){#fig:img-gl-mirror}
```{.cpp #lst:new-tbn caption="Modified TBN matrix to account for the slope of the displacement"}
if (isDisplacementMapped) {
float o = texture(displaceTex, UV).r * 2 - 1;
float u = (texture(displaceTex, UV + vec2(0.0001, 0)).r*2-1 - o) / 0.0004; // magic numbers!
float v = (texture(displaceTex, UV + vec2(0, 0.0001)).r*2-1 - o) / 0.0004; // magic numbers!
TBN = mat3(
normalize(tangent + normal*u),
normalize(bitangent + normal*v),
normalize(cross(tangent + normal*u, bitangent + normal*v))
);
}
```
![
The displaced field with the TBN matrix rotated along the slope of the displacement.
](images/6-displacement-normals.png){#fig:img-displacement-normals}
![
Car mesh loaded from car model, without transformations
](images/7-car-meshes.png){#fig:img-car-meshes}
![
Car mesh loaded from car model with transformations applied.
](images/8-car-transformations.png){#fig:img-car-transformations}
![
Car mesh loaded from car model with transformations applied, rotated to make z point skyward.
](images/9-car-coordinate-system.png){#fig:img-car-coordinate-system}
![
Diffuse colors loaded from car model
](images/10-car-materials.png){#fig:img-car-materials}
![
Diffuse, emissive and specular colors loaded from car model
](images/11-material-colors.png){#fig:img-material-colors}
![
Car model with all colors, with reflection mapping applied.
](images/12-reflection.png){#fig:img-reflection}
![
The reflection map texture applied to the car
](../res/textures/reflection_field.png){#fig:img-reflection-map}
![
Tree model loaded from model file, no texture support yet.
](images/13-tree.png){#fig:img-tree}
![
Tree model loaded from model file, textures applied.
](images/14-tree-alpha.png){#fig:img-tree-alpha}
![
Tree model with textures, transparent meshes rendered last in sorted order.
](images/15-tree-sorted.png){#fig:img-tree-sorted}
![
Tree model with textures, transparent meshes rendered last in sorted order, with depthbuffer in read-only mode.
](images/16-tree-depth-readonly.png){#fig:img-tree-depth-readonly}
```{.dot include="images/rendering-pipeline.dot" caption="The scene rendering pipline, handling transparent nodes" #fig:render-pipeline}
```
![
Grass model loaded, cloned and placed randomly throughout the scene
](images/17-low-fps.png){#fig:img-low-fps}
```{.cpp caption="The node uniforms being uploaded to the shader" #lst:uniform-upload}
glUniformMatrix4fv(s->location("MVP") , 1, GL_FALSE, glm::value_ptr(node->MVP));
glUniformMatrix4fv(s->location("MV") , 1, GL_FALSE, glm::value_ptr(node->MV));
glUniformMatrix4fv(s->location("MVnormal"), 1, GL_FALSE, glm::value_ptr(node->MVnormal));
glUniform2fv(s->location("uvOffset") , 1, glm::value_ptr(node->uvOffset));
glUniform3fv(s->location("diffuse_color") , 1, glm::value_ptr(node->diffuse_color));
glUniform3fv(s->location("emissive_color"), 1, glm::value_ptr(node->emissive_color));
glUniform3fv(s->location("specular_color"), 1, glm::value_ptr(node->specular_color));
glUniform1f( s->location("opacity"), node->opacity);
glUniform1f( s->location("shininess"), node->shininess);
glUniform1f( s->location("reflexiveness"), node->reflexiveness);
glUniform1f( s->location("displacementCoefficient"), node->displacementCoefficient);
glUniform1ui(s->location("isTextured"), node->isTextured);
glUniform1ui(s->location("isVertexColored"), node->isVertexColored);
glUniform1ui(s->location("isNormalMapped"), node->isNormalMapped);
glUniform1ui(s->location("isDisplacementMapped"), node->isDisplacementMapped);
glUniform1ui(s->location("isReflectionMapped"), node->isReflectionMapped);
glUniform1ui(s->location("isIlluminated"), node->isIlluminated);
glUniform1ui(s->location("isInverted"), node->isInverted);
```
```{.cpp caption="Function for caching the uniform locations per shader. The commented line is the old implementation." #lst:location-cache}
GLint inline Shader::location(std::string const& name) {
//return glGetUniformLocation(mProgram, name.c_str());
auto it = this->cache.find(name);
if (it == this->cache.end())
return this->cache[name] = glGetUniformLocation(mProgram, name.c_str());
return it->second;
}
```
```{.cpp caption="The uniform caching defines" #lst:uniform-cache}
bool shader_changed = s != prev_s;
#define cache(x) static decltype(node->x) cached_ ## x; \
if (shader_changed || cached_ ## x != node->x) { cached_ ## x = node->x;
#define um4fv(x) cache(x) glUniformMatrix4fv(s->location(#x), 1, GL_FALSE, glm::value_ptr(node->x)); }
#define u2fv(x) cache(x) glUniform2fv( s->location(#x), 1, glm::value_ptr(node->x)); }
#define u3fv(x) cache(x) glUniform3fv( s->location(#x), 1, glm::value_ptr(node->x)); }
#define u1f(x) cache(x) glUniform1f( s->location(#x), node->x); }
#define u1ui(x) cache(x) glUniform1ui( s->location(#x), node->x); }
```
![
Car, trees and grass combined into a night driving scene. Two yellow spot lights attached to the head lights, two yelllow point lights attached to the head lights, two red point lights attached to the read lights.
](images/18-night-scene-lights.png){#fig:img-night-scene-lights}
![
A pink rim backlight applied to the car with strength of 0.3.
](images/19-rim-lights.png){#fig:img-rim-lights}
![
Visualisation of the transformed depth buffer, transformed into a 'point of focus' buffer. z=0 at the depth of the car, tends toward 1 otherwise.
](images/20-depth-map.png){#fig:img-depth-map}
![
Depth of field applied to the scene
](images/21-depth-of-field.png){#fig:img-depth-of-field}
![
The vignette effect, applied to a white frame buffer
](images/22-vingette.png){#fig:img-vingette}
![
The chromatic aberration effect. F is the point of focus. The transformed depthbuffer is centered around the vertical line crossing F.
](images/23.5-what-is.jpg){#fig:img-what-is}
![
Chromatic aberration applied to the scene, where the aberration coefficients have been multiplied by 3.
](images/23-chromatic-aberration.png){#fig:img-chromatic-aberration}
![
Noise/grain applied to the frame buffer.
](images/24-noise.png){#fig:img-noise}
![
Noise/grain, multiplied by the depthbuffer/point of focus, applied to the frame buffer.
](images/25-all-effects.png){#fig:img-all-effects}
![
The same scene, during the day. Spotlights have been turned off.
](images/26-day.png){#fig:img-day}
![
The early-morning scene with some strong fog applied. The code was later changed to have the fog affect the background color as well.
](images/27-fog.png){#fig:img-fog}
```{.dot include="images/effect-order.dot" caption="A high-level graph representing the fragment shader for the scene" #fig:effect-order}
```

BIN
report/images/0-base.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.1 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.1 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.7 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.8 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.1 MiB

BIN
report/images/13-tree.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.0 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.3 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.3 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.3 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.4 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.9 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.8 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.6 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 212 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.9 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 147 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.1 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 446 KiB

BIN
report/images/24-noise.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.9 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.2 MiB

BIN
report/images/26-day.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.3 MiB

BIN
report/images/27-fog.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.9 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.6 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.3 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 27 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.3 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.4 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.0 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.7 MiB

View File

@ -0,0 +1,33 @@
digraph asd {
//rankdir=LR;
dpi=600;
ratio=0.4;
node [fontname=arial, shape=rectangle, style=filled, fillcolor="#ddddee"]
normal [ label="compute_normal()" ]
base [ label="vec4(1, 1, 1, object_opacity);"]
vertex_color [ label="vertex_color" ]
texture [ label="texture(diffuse_texture, UV)" ]
invert [ label="if (inverted)\l color.rgb = 1 - color.rgb" ]
phong [ label="color = phong(color)" ]
reflecton [ label="reflection()" ]
fog [ label="linearDepth() * fog_color" ]
rim [ label="compute_rim_light()" ]
multiply [shape=ellipse, fillcolor = "#ffccaa"]
out [shape=ellipse, fillcolor = "#ccffaa"]
normal -> phong;
normal -> reflecton;
normal -> rim;
base -> multiply;
vertex_color -> multiply [label="if(has_vert_colors)"];
texture -> multiply [label="if(has_texture)"];
multiply -> invert;
invert -> phong;
rim -> out [label="mix"];
phong -> out [label="mix"];
reflecton -> out [label="mix"];
fog -> out [label="mix"];
}

View File

@ -0,0 +1,17 @@
digraph asd {
//rankdir=LR;
dpi=600;
ratio=0.55;
node [fontname=arial, shape=rectangle, style=filled, fillcolor="#ddddff"]
null [ label="updateNodes(rootNode);" ]
0 [ label="renderNodes(rootNode, only_opaque=true);" ]
1 [ label="std::sort(transparent_nodes);" ]
2 [ label="glDepthMask(GL_FALSE);" ]
3 [ label="for (Node* n : transparent_nodes)\l renderNodes(n, no_recursion=true);\l" ]
4 [ label="glDepthMask(GL_TRUE);" ]
5 [ label="renderNodes(hudNode);" ]
null->0
0->1 [label="create vector of the\lskipped transparent nodes"]
1->2->3->4->5
}

7
report/install_dependencies.sh Executable file
View File

@ -0,0 +1,7 @@
#!/usr/bin/env bash
sudo pacman -S pandoc
sudo pacman -S pandoc-crossref
sudo pacman -S graphviz
pip install --user pandoc-imagine
pip install --user pandoc-codeblock-include

26
report/log_part1_intro.md Normal file
View File

@ -0,0 +1,26 @@
% TDT4230 - Final assignment log
% Peder Berbebakken Sundt
% 4'th of april 2019
\newpage
```{.shebang im_out="stdout"}
#!/usr/bin/env bash
printf "time for some intricate graphics surgery!\n" | cowsay -f surgery | head -n -1 | sed -e "s/^/ /"
```
# the task
todo
# Getting started
First I had to clean out all the cruft from the glowbox scene in the program. while at it, I removed a lot of the unneccesary code and made a lot of helpers which keeps track of VAO IDs buffer IDs. Now, these are generated and cached automatically causing me a lot less of hassle.
I then went though and added the ability to use multiple shaders. The each sceneNode can specify a shader.
If the shader is a `nullptr`, then it is inherited from the parent.
When changing shader I had to make sure all the uniforms are passed properly. This was done through the use of a few helper structs and great miss-use of the c++ preprocessor to make all the uniforms named. Now the program simply uses the uniform name instead of hardcoding all the location numbers.
When support for multiple shaders was in place, I revamped the camera transforms to make sure I'm able to move the camera intuitively should I so choose to do so.
This was done with the help of `glm::lookAt()`. While at it I changed the coordinate system to make Z point skyward, the only sensible way.

104
report/log_part2_hills.md Normal file
View File

@ -0,0 +1,104 @@
# Creating the hills
To make my plane of grass I first needed a plane. I added a generator function in `glutils.cpp` which makes a segmented plane for me with as many vertices and faces as specified. This was needed since I plan to add a displacement map capability, but only do so in the vertex shader. I thus need a lot of vertexes. The other route would be to make a texel shader which divides the faces into smaller faces and use it on the plane, which would save memory bandwith. But making it from the start is the easier option.
I added the plane to the scene and set the cobble texture on it.
It didn't look right, so I went through the shader again to make sure the lighting was correct. I failed to pass the specular shininess factor properly as a uniform. I also had failed to account for the specular component being negative (before raising it to the power of the shininess). I added this check and now the lighting looks correct.
![](images/0-base.png)
While at it I added the ability to specify all the color components and the attenuation per light source. With this I'm able to create a sun far away without being bothered by attenuation.
Now for the displacement of the plane. I created a `PNGImage` generator function which generates a perlin noise texture with the amount of layers and scales specified.
This I registered as the displacement texture for the plane. In the vertex shader I added a `isDisplacementMapped` uniform shader flag which adds the normal vector multiplied by the displacement to each vertex.
![](images/1-perlin-displacement.png)
```{.shebang im_out="stdout"}
#!/usr/bin/env bash
echo A M A S I N G | boxes -d unicornsay -a c -p h10
```
I've yet to modify the normals from the displacement, so the hill currently won't cast shadows. I currently don't have a plan on how to fix this. Perhaps use the bitangents to calculate the slope of the displacement in the vertex shader?
At this point I went online a found myself a grass texture and normal map.
![](images/2-wrong-handedness.png)
```{.shebang im_out="stdout"}
#!/usr/bin/env bash
printf " Something's wrong...! \n" | cowsay -f head-in | sed -e "s/^/ /"
```
Apparently, the direction of the normal map colors aren't the same everywhere. I therefore added a flag to the image loader function which will flip the handedness. (inverting the R and G channels).
![](images/3-flipped-handedness.png)
*Much better*
Now we'll up the granularity by decreasing the UV step per vertex along the plane, and enable the displacement map:
![](images/4-fine-plane.png)
## Scrolling the field
Now, how can we scroll this plane?
I decided the easies way would be to add a uniform variable called `uvOffset` to the vertex shader. Now I can simply scroll the plane by adding to this offset to all the UV coordinates in the vertex shader before passing it to the fragment shader:
```c++
/*vec2*/plainNode->uvOffset += /*vec2*/speed * timeDelta;
```
The code above works since I added in some operator overloads for `vec2`, `vec3`, and `vec4` with scalars.
Now we unfortunately see steep cuts where the perlin noise texture repeats. This we simply fix by mirroring the texture on repeat with `GL_MIRRORED_REPEAT`:
![](images/5-gl-mirror.jpg)
An another solution to making the perlin noise repeatable is to pass the repeat size into the `glm::gtx::perlin` function as I create the texture. But as of now I chose the quick and dirty solution. It also has the added effect of creating a less repeating texture. It repeats from 0-2 instead of 0-1.
At this point I was stuck with a bug where the coordinates of the lights were doubled. After two days of debugging I found the line where I update pass the light position into the uniform in the fragment shader:
```c++
lights[id].position = vec3(node->MV * vec4(node->position, 1.0));
```
Which *should* have been
```c++
lights[id].position = vec3(node->MV * vec4(vec3(0.0), 1.0));
```
*...yeah.*
## Normal mapping the displacement
After that goober, I moved on to try to rotate the normals according the displacement map. After some playing around I landed on this solution in glsl:
\small
```c++
if (isDisplacementMapped) {
float o = texture(displaceTex, UV).r * 2 - 1;
float u = (texture(displaceTex, UV + vec2(0.0001, 0)).r*2-1 - o) / 0.0004; // magic numbers!
float v = (texture(displaceTex, UV + vec2(0, 0.0001)).r*2-1 - o) / 0.0004; // magic numbers!
TBN = mat3(
normalize(tangent + normal*u),
normalize(bitangent + normal*v),
normalize(cross(tangent + normal*u, bitangent + normal*v))
);
}
```
\normalsize
Here I find the slope along the displacement map along the `U` and `V` direction. `o` is the 'origin', and `u` and `v` are the tangent and bitangent slope cosines *(derived with the help of a few magic numbers which should be made into uniforms really,s ince they only apply for the plane as of now)*. Using these cosines I can simply add the normal vector multiplied by the cosine to the tangent and the bitangent and normalize them, giving me the new tangents. I can from here derive the new normal vector by simply computing the cross product of the two tangents.
This did however give me a pretty coarse image, so I moved the computation of the TBN matrix from the vertex shader to the fragement shader. This will give me a slight performance penalty, but I can undo the change in a simplified shader should I need the performance boost later. Here we can see how the displacement affects the normals along the displaced plane:
![](images/6-displacement-normals.png)
```{.shebang im_out="stdout"}
#!/usr/bin/env bash
echo Windows XP background incoming? | boxes -d whirly -a c -p h15
```

124
report/log_part3_models.md Normal file
View File

@ -0,0 +1,124 @@
# Loading models
Now I want to load the car model I found online. After downloading the car model from `sketchfab.com`, I added `assimp` as a submodule to the project repository. I then linked in the library using cmake. From here I had to make a few helper functions to read the models and parse them into the already established structs and classes.
*This took some time.*
Here I finally loaded all the vertices and faces:
![](images/7-car-meshes.png)
Here I applied all the model transformations for each sub mesh:
![](images/8-car-transformations.png)
Here I accounted for the difference in coordinate systems. I prefer to have the Z component point skyward.
![](images/9-car-coordinate-system.png)
## The material system
Now it's time to load the properties from the models. To do this I had to implement a material system. Here I loaded all the materials and loaded the material diffuse color and the color vertices. To make this work I had to expand the shader to accept colors per vertex.
![](images/10-car-materials.png)
Formalizing the materials as a struct allows me to apply it recursively to a `SceneNode` and its children should I wish to do so. I also need to expand the model loader to read textures, but I'll cross that bridge when I get to it.
But for now, it's time for a revamp of the lighting system, yet again!
Instead of having a diffuse, specular and emissive color component per light source, I'll have a single color per light and instead store the color components per `SceneNode`. This allows me to set material properties per node in the scene. The lights will now only store the basecolor and attenuation coefficients.
![](images/11-material-colors.png)
In the image above the shader uses the emissive and specular colors per node in the scene. Seems like the car model is expecting a metal effect on top. The emissive colors are pretty bright.
This is a good excuse to create a reflection map shader!
![](images/12-reflection.png)
This car looks *nice*.
I opted to implement the reflection shader by reflecting the vector from the camera to the fragment by the normal vector. I then map the reflection vector into UV coordinates as if it covers a sphere. I the found some 360 images online to use as the reflection map texture:
![](../res/textures/reflection_field.png)
The color extracted from the reflection map can then be mixed with the diffuse and emissive colors in two ways in my shader:
```cpp
basecolor = (reflexiveness < 0)
? basecolor * mix(vec3(0.0), reflection, -reflexiveness)
: mix(basecolor, reflection, reflexiveness);
```
A `reflexiveness` value between 0 and -1 will multiply the reflection with the base color, after having mixed the reflection with white using `-reflexiveness` as the interpolation factor.
A `reflexiveness` value between 0 and 1 will instead mix the reflection with the basecolor directly using `reflexiveness` as the interpolation factor.
I've yet to define *'basecolor'*. The basecolor is the emissive and diffuse parts of the phong model. This is roughly how I compute them. $a(m)$ is the attenuation:
$a(m) = x + y\cdot |L_{m}| + z\cdot |L_{m}|^2 \\
basecolor_{frag} =
k_e + k_d * \sum_{m\in lights}{\frac{\hat{L}_m \cdot \hat{N}}{a(m)}} \\
finalcolor_{frag} =
reflection(basecolor) + \sum{\frac{k_s(\hat{R}_m\cdot \hat{V})^\alpha}{a(m)}}$
The specular component is added in after having mixed with the reflection.
## Textures
The scene is supposed to have both trees and grass loaded as well, let's try loading in the tree model I found online:
![](images/13-tree.png)
Textures aren't loaded yet, I knew this day would come sooner or later. Thanks to all the work that went into the material system, loading models was a quick fix. *(after recieving a thourough beating from `assimp`)*
![](images/14-tree-alpha.png)
## Transparancy
Seems like the transparent textures are rendered before the field, making the field fail the depth test. Seems like I have to sort and render all nodes with any partial opacity and render them last. Sorted by distance from camera. Hang on a minute...
![](images/15-tree-sorted.png)
Huh, it *kinda* works, since all transparent objects now are rendered at last in sorted order. But within a single mesh we still have depth priority issues. The topmost leaves on the tree pictured above was rendered before the lower ones.
One idea I tried out was disabling the depth test while rendering the transparent objects. For this scene, it looked ok, but if we where to have an opaque object in front of the tree, we would see the leaves on top.
No opaque object, being rendered earlier than the transparent ones, would be able to occlude it.
After some reading and searching around I found this neat option in OpenGL:
```c++
glDepthMask(GL_FALSE);
```
This will make the depth buffer read only, locking the depth buffer in place. If I first render all opaque objects while the depth buffer is writeable, then lock the depth buffer and render the opaque objects in sorted order, I get a nice compromise. Opaque objects will be able to occlude transparent ones, due to the depth test, but transparent ones don't affect the depth test. Since the transparent objects are sorted by the distance from the camera, they will be rendered on top of another in the correct order anyway. The only issue is with transparent meshes which overlap. There is still some weirdness in the tree leaves since it is a single mesh, but nothing too noticeable in leaves and bushes.
![](images/16-tree-depth-readonly.png)
Neato!
The current rendering pipeline:
```dot
digraph asd {
//rankdir=LR;
dpi=600;
ratio=0.7;
node [fontname=arial, shape=rectangle, style=filled, fillcolor="#dddddd"]
null [ label="updateNodes(rootNode)" ]
0 [ label="renderNodes(rootNode, only_opaque=true)" ]
1 [ label="std::sort(transparent_nodes)" ]
2 [ label="glDepthMask(GL_FALSE)" ]
3 [ label="for (Node* n : transparent_nodes)\l renderNodes(n, no_recursion=true)\l" ]
4 [ label="glDepthMask(GL_TRUE)" ]
5 [ label="renderNodes(hudNode)" ]
null->0
0->1 [label="create vector of the\lskipped transparent nodes"]
1->2->3->4->5
}
```
Now we are rendering something which looks *kinda* nice. I then had the model loader load my grass model, and duplicated it roughly a hundred times. This model has a single grass mesh which it uses 64 times in a "cluster" of grass. All of these uses transparent textures. Since I decided to add quite a few of these into the scene graph I started to notice some issues with performance. After I added a few helpful print statement, I was delighted to learn that I'm trying to render **5455** nodes with geometry.
![](images/17-low-fps.png)

View File

@ -0,0 +1,68 @@
# Optimizations
So at this point I started to look into some optimizations. I tried resizing my window to see if I was fragment bound or not. This didn't make a significant difference. I therefore had to be either vertex bound or bandwidth bound. Being vertexbound but not fragment bound with this little geometry makes little sense, so I started to look into reducing the amount of bandwidth between the cpu and the gpu, and the amount of data the shader uses (since I'm running on an integrated graphics processor using the same ram as the cpu).
After some searching through the code I came over the part where I upload the uniforms for each draw call to gl:
\small
```c++
glUniformMatrix4fv(s->location("MVP") , 1, GL_FALSE, glm::value_ptr(node->MVP));
glUniformMatrix4fv(s->location("MV") , 1, GL_FALSE, glm::value_ptr(node->MV));
glUniformMatrix4fv(s->location("MVnormal"), 1, GL_FALSE, glm::value_ptr(node->MVnormal));
glUniform2fv(s->location("uvOffset") , 1, glm::value_ptr(node->uvOffset));
glUniform3fv(s->location("diffuse_color") , 1, glm::value_ptr(node->diffuse_color));
glUniform3fv(s->location("emissive_color"), 1, glm::value_ptr(node->emissive_color));
glUniform3fv(s->location("specular_color"), 1, glm::value_ptr(node->specular_color));
glUniform1f( s->location("opacity"), node->opacity);
glUniform1f( s->location("shininess"), node->shininess);
glUniform1f( s->location("reflexiveness"), node->reflexiveness);
glUniform1f( s->location("displacementCoefficient"), node->displacementCoefficient);
glUniform1ui(s->location("isTextured"), node->isTextured);
glUniform1ui(s->location("isVertexColored"), node->isVertexColored);
glUniform1ui(s->location("isNormalMapped"), node->isNormalMapped);
glUniform1ui(s->location("isDisplacementMapped"), node->isDisplacementMapped);
glUniform1ui(s->location("isReflectionMapped"), node->isReflectionMapped);
glUniform1ui(s->location("isIlluminated"), node->isIlluminated);
glUniform1ui(s->location("isInverted"), node->isInverted);
```
\normalsize
*Yeah...* I think I could optimize this. The `s->location` function is a uniform name string to location GLint ID lookup. I believe calling GL for this lookup is costly, so I'll cache the results per shader and make the compiler inline the caching lookup function (not possible with the gl functions, since code is dynamically linked, my code on the other hand is subject to compile-time optimizations). The cached function is shown below. The commented line is the old implementation:
```c++
GLint inline Shader::location(std::string const& name) {
//return glGetUniformLocation(mProgram, name.c_str());
auto it = cache.find(name);
if (it == cache.end())
return cache[name] = glGetUniformLocation(mProgram, name.c_str());
return it->second;
}
```
This boosted me from 11 FPS up to around 14 FPS, pretty neat!
Next up: avoiding reuploading unchanged information to the shader time and time again for every object in the scene.
Most of the time, objects are pretty similar, only differing in tranformation/position. Most of the uniforms remain unchanged between draw calls.
I replaced all the `glUniformX` methods with these defines shown below, which performs caching for me using static cache variables:
\small
```c++
bool shader_changed = s != prev_s;
#define cache(x) static decltype(node->x) cached_ ## x; \
if (shader_changed || cached_ ## x != node->x) \
{ cached_ ## x = node->x;
#define um4fv(x) cache(x) glUniformMatrix4fv(s->location(#x), 1, GL_FALSE, glm::value_ptr(node->x)); }
#define u2fv(x) cache(x) glUniform2fv( s->location(#x), 1, glm::value_ptr(node->x)); }
#define u3fv(x) cache(x) glUniform3fv( s->location(#x), 1, glm::value_ptr(node->x)); }
#define u1f(x) cache(x) glUniform1f( s->location(#x), node->x); }
#define u1ui(x) cache(x) glUniform1ui( s->location(#x), node->x); }
```
\normalsize
This is a bigger CPU workload, but it optimizes communication with the graphics processor, which is well suited for high bandwidth streaming but not for smaller back and forth communication.
This caching bumped me up from 14 FPS to 21 on my less-than-ideal integrated graphics chip. The precomputed `shader_changed` bool value alone was responsible for 2 FPS. `s` and `prev_s` are static, making the compiler unable to optimize it with reuse like I do here.
Caching the `glBindTextureUnit` calls done when selecting textures further sped up the rendering by an another 4.3 FPS.
Perhaps restructuring the `renderNode` function to use iteration instead of recursion would be the next logical improvement. I don't need that kind of performance as of now. I believe I'm currently bandwidth bound anyway, since disabling the tangents and bitangents give a 8 FPS improvement. That's integrated graphics for you.

15
report/log_part5_scene.md Normal file
View File

@ -0,0 +1,15 @@
# Creating the scene
For now, I decided to change the scene into what I imagine the end result to be like. The idea is to have a car drive along the moving field, moving along the slope of the field. The car should have some headlights and backlights as well.
To be able to do this, I had to implement bilinear filtering on the PNGImage struct, to be able to read the displacement map where the car is at, so i can calculate the slope.
I then made the trees and the grass scroll along the movement of the offset displacement map, wrapping around when leaving the edge of the field.
I then added a check to see if any of the trees or grass which wrapped around is in the path of the car or not. If it is, then I make them invisible. This ensures there ar no obstacles in the path of the car, avoiding unseemly clipping.
I then changed the lightning to be at night time. To have this make sense, I tweaked the shader so that the color of the first light is multiplied by the emissive component of the objects. This allows me to use the first light to set the "ambient tone" of the scene. Will be handy.
The scene is pretty dark now, so I added in some spot lights attached to the front lights of the card, which are transformed along with the car.
They look a bit mechanical alone, so I added in four point lights aswell, two attached to the front lights, and two attached to the back lights with a red hue. This blends together pretty well.
![](images/18-night-scene-lights.png)

119
report/log_part6_effect.md Normal file
View File

@ -0,0 +1,119 @@
# Adding effects
The car however blends into the backround now. To make it pop a bit more, I added support for a rim light color in the shader.
I don't know how these are usually done, but I simply opted for the intuitive implementation I thought out.
The more the surface normal points away from the camera, the more it should be lit up. When the surface is pointing 90 degrees away from the camera it should be at maximum brightness, and it should decrease the more it points toward the camera. Using the dot product between the normalize position vector and the normalize normal vector in MV space gives me a cosine. Now I have a value of 1 when pointing away from the camera, 0 when pointing 90 degrees to the side and a value of -1 when pointing towards the camera. Adding a "strength" uniform value to this will skew it towards the camera. I then divide it by this same strength value and clamp it between 0 and 1 to have it be 0 when pointing towards the camera and 1 when pointing to the side. This value is multiplied by a rim light color component.
```c++
c.rgb += backlight_color * clamp((dot(normalize(vertex), normal)
+ strength) / strength, 0, 1);
```
Below you can see an exaggerated use of the effect:
![](images/19-rim-lights.png)
Setting the rim light color to a weak white color makes the car pop just so slightly more from the surroundings.
## Post processing
Now is the time to add in the post-processing step to the rendering pipeline.
The idea is to render the scene not directly to the window, but to an internal frame buffer. Then I can, using two triangles, render this framebuffer to the window as a texture using a separate shader, henceforth refered to as the post shader. The post shader has the ability to combine the color and depth buffer values of not just a single pixel, but also its neighbors. This allows me to implement effects such as blur and depth of field.
This took quite a lot of time. GL isn't the most verbose debugging companion to work with, and I was blessed with a lot of issues and weird behaviors with the framebuffers. After a few days I got it working again.
```c++
#version 430 core
layout(binding = 0) uniform sampler2D framebuffer;
layout(binding = 1) uniform sampler2D depthbuffer;
layout(location = 0) out vec4 color_out;
uniform uint windowWidth;
uniform uint windowHeight;
void main() {
vec2 dx = vec2(1,0) * 1.0/windowWidth;
vec2 dy = vec2(0,1) * 1.0/windowHeight;
vec2 UV = gl_FragCoord.xy / vec2(windowWidth, windowHeight);
color_out = texture(framebuffer, UV);
}
```
This simple fragmentshader allows me to make a depth of field effect. Here is a visualization of the depthbuffer, after transforming it to be 0 around the focal distance where the car is at, and tend towards a value of 1 otherwise. This "focus value" is stored as `z` in the shader, should you start wondering in the code further below:
![](images/20-depth-map.png)
Here we can see the transparent objects (grass and leaves), which was rendered with the depth buffer in read only mode, doesn't show up on the depth buffer. Due to this I'm unable to use this depth buffer as fog since it would look weird. But if I want fog I can do that in the normal shader anyway. This depth map will suffice in my dark scene for effects such as depth of field.
## Depth of Field
Depth of field is a blur dependent on distance from the focal point. I planned on computing a weighted average for each pixel, where the weight per neighbor is their depth. This computation proved expensive however, due to all the access to the depthbuffer. Therefore I arrived at this simplified model instead:
```c++
int radius = int(5*z); // z=0 in focus, otherwise -> 1
vec3 color = vec3(0);
for (int x = -radius; x <= radius; x++)
for (int y = -radius; y <= radius; y++)
color += texture(framebuffer, UV + x*dx + y*dy).rgb;
color /= pow(2*radius+1, 2);
color_out.rgb = color;
```
Resulting in:
![](images/21-depth-of-field.png)
A weakness with this depth of field model effect though is that the edges of objects don't blur, only the surfaces. At a distance it looks okay, but elements in the foreground; in front of the point of focus, will look weird.
I could try replacing the trees with non-transparent ones for a more detailed effect.
## Vignette
Next up I wanted to try adding a simple vignette to the mix. Simply computing the euclidian distance from the center of the screen is doable with the UV coordinates:
```c++
color_out = vec4(color.rgb * (1-pow(length((UV-0.5)*1.2), 3)), color.a);
```
When `color.rgb` is all white, we get the output:
![](images/22-vingette.png)
Which demonstrates the effect of the vignette quite clearly.
## Chromatic Aberration
Chromatic aberation is the effect of the different frequencies of light refracting differently. This is demonstrated in the figure below:
![](images/23.5-what-is.jpg)
As we can see, the further away we are from the focal point, the more aberration we ought to have. The aberration also gets worse the closer to the edge of the lens we move. I therefore added in the aberration as a modification to the depth of field effect, where I read the color values from the framebuffer anyway. I now scale the UV vector into the framebuffer according the depthbuffer and the aberration factor per color component:
```c++
for (int x = -radius; x <= radius; x++)
for (int y = -radius; y <= radius; y++){
vec2 p = UV + x*dx + y*dy;
color.r += texture(framebuffer, (p-0.5)*(1+z*chomatic_aberration_r) + 0.5).r;
color.g += texture(framebuffer, (p-0.5)*(1+z*chomatic_aberration_g) + 0.5).g;
color.b += texture(framebuffer, (p-0.5)*(1+z*chomatic_aberration_b) + 0.5).b;
}
```
An exaggerated example:
![](images/23-chromatic-aberration.png)
## Grain
Next up I wanted to add some grain. GLSL doesn't have a random number generation function built in, but I found one online which was based around using a UV vector. I further modified it to use a time uniform as well making it vary per frame.
![](images/24-noise.png)
The grain in the middle of the screen however doesn't look good, so here again I opted to add more grain where the scene is out of focus using the depth buffer. I could also make it a part of the vignette, but I think using the depth buffer value looks better for this scene. I simply do this:
```c++
color += (random(UV)-0.5) * z * 0.2;
```
All these effects comes together into this final product:
![](images/25-all-effects.png)

View File

@ -0,0 +1,32 @@
# Day-night cycle
```{.shebang im_out="stdout"}
#!/usr/bin/env bash
echo At this point I\'m only playing around, but it\'s all for that sick-ass demo, am I right? | cowsay -f cheese | sed -e "s/^/ /"
```
Now that all the ground-work is in place, i can start adding timestamped events to happen in the scene:
Camera movement, change of lightning, etc. I started by adding a day-night cycle. I then expanded these timestamps to control the car headlights:
```cpp
struct seq_t { double t; vec3 light_c; vec3 bg_c; bool has_headlights; };
static const vector<seq_t> sequence = {
{ 0, vec3(0.2 , 0.2 , 0.7), vec3(0.05, 0.1 , 0.15), 1}, // night
{ 9, vec3(0.4 , 0.4 , 0.8), vec3(0.15, 0.15, 0.35), 1}, // dusk
{10, vec3(1.0 , 0.6 , 0.4), vec3(0.8 , 0.4 , 0.2 ), 1}, // sunrise
{11, vec3(0.9 , 0.7 , 0.5), vec3(0.8 , 0.6 , 0.2 ), 1}, // sunrise2
{12, vec3(0.85, 0.85, 0.9), vec3(0.3 , 0.5 , 0.8 ), 0}, // morning
{18, vec3(1.0 , 1.0 , 1.0), vec3(0.35, 0.6 , 0.9 ), 0}, // noon
{24, vec3(0.7 , 0.9 , 1.0), vec3(0.3 , 0.5 , 0.8 ), 0}, // evening
{25, vec3(0.9 , 0.7 , 0.5), vec3(0.8 , 0.6 , 0.2 ), 0}, // sundown
{26, vec3(1.0 , 0.6 , 0.4), vec3(0.8 , 0.4 , 0.2 ), 1}, // sunset
{27, vec3(0.5 , 0.5 , 0.8), vec3(0.35, 0.15, 0.35), 1}, // dusk
{36, vec3(0.2 , 0.2 , 0.7), vec3(0.05, 0.1 , 0.15), 1}, // night
};
```
Interpolating between these points per frame made it look quite nice:
![](images/26-day.png)
Now where there once again shines light, we can see the post-processing effects that much better!

BIN
report/presentation.pdf Normal file

Binary file not shown.

9
res/models/sources Normal file
View File

@ -0,0 +1,9 @@
# Model Sources
Thanks!
https://sketchfab.com/3d-models/fur-tree-41fa3210e50944eaa489c148e5e2ccc7#download
https://sketchfab.com/3d-models/beetle-42c85b6e79b949eea51fd5d6c37a457f
https://sketchfab.com/3d-models/single-grass-52b3808c8a4f48188ff9a1217f2d84aa

View File

@ -27,6 +27,9 @@ uniform vec3 specular_color;
uniform vec3 emissive_color;
uniform vec3 backlight_color;
uniform vec3 fog_color;
uniform float fog_strength;
uniform bool isIlluminated;
uniform bool isTextured;
uniform bool isVertexColored;
@ -138,7 +141,15 @@ vec3 phong(vec3 basecolor, vec3 nnormal) {
return basecolor + specular_color * specular_component;
}
const float near = 0.1;
const float far = 5000.0;
float linearDepth() {
float z = gl_FragCoord.z * 2.0 - 1.0;
return (2.0 * near * far) / (far + near - z * (far - near));
}
void main() {
vec3 nnormal = get_nnormal(); // normalized normal
vec4 c = vec4(vec3(1.0), opacity);
if (isVertexColored) c *= color;
@ -152,8 +163,9 @@ void main() {
}
if (backlight_strength > 0.05)
c.rgb += backlight_color * clamp((dot(normalize(vertex), nnormal) + backlight_strength) / backlight_strength, 0, 1);
//c.rgb = diffuse_color;
//c.rgb = emissive_color;
//c.rgb = specular_color;
float fog = linearDepth()/1500;
if (fog_strength > 0.05) c.rgb = mix(c.rgb, fog_color, pow(fog,1.2)*fog_strength);
color_out = c;
}

View File

@ -30,7 +30,6 @@ sf::SoundBuffer* buffer;
Gloom::Shader* current_shader = nullptr;
Gloom::Shader* prev_shader = nullptr; // The last shader to glDrawElements
// the framebuffer we render the scene to before post-processing
GLuint framebufferID = 0;
GLuint framebufferTextureID = 0;
@ -240,20 +239,25 @@ void renderNode(SceneNode* node, Gloom::Shader* parent_shader, vector<NodeDistSh
#define u3fv(x) cache(x) glUniform3fv( s->location(#x), 1, glm::value_ptr(node->x)); }
#define u1f(x) cache(x) glUniform1f( s->location(#x), node->x); }
#define u1ui(x) cache(x) glUniform1ui( s->location(#x), node->x); }
#define ubtu(n,i,x) init_cache(x) if(node->i) { if_cache(x) glBindTextureUnit(n, node->x); } } else cached_##x = -1;
//#define ubtu(n,i,x) init_cache(x) if(node->i) { if_cache(x) glBindTextureUnit(n, node->x); } } else cached_##x = -1;
#define ubtu(n,i,x) init_cache(x) if(node->i) { if_cache(x) glActiveTexture(GL_TEXTURE0+n); glBindTexture(GL_TEXTURE_2D, node->x); } } else cached_##x = -1;
switch(node->nodeType) {
case GEOMETRY:
if (transparent_nodes!=nullptr && node->has_transparancy()) {
// defer to sorted pass later on
//transparent_nodes->emplace_back(node, s, glm::length(vec3(node->MVP[3])));
//transparent_nodes->push_back({node, s, glm::length(vec3(node->MVP[3]))});
transparent_nodes->emplace_back(node, s, glm::length(vec3(node->MVP*vec4(0,0,0,1))));
//transparent_nodes->push_back({node, s, glm::length(vec3(node->MVP*vec4(0,0,0,1)))});
}
else if(node->vertexArrayObjectID != -1) {
if (node->opacity <= 0.05) break;
// load uniforms
// load scene uniforms
if (shader_changed) { // guaranteed at start of every frame, due to post_shader
glUniform3fv(s->location("fog_color"), 1, glm::value_ptr(fog_color));
glUniform1f( s->location("fog_strength"), fog_strength);
}
// load material uniforms
um4fv(MVP);
um4fv(MV);
um4fv(MVnormal);
@ -364,8 +368,12 @@ void renderFrame(GLFWwindow* window, int windowWidth, int windowHeight) {
glUniform1f(post_shader->location("time"), t);
glUniform1ui(post_shader->location("windowWidth"), windowWidth);
glUniform1ui(post_shader->location("windowHeight"), windowHeight);
glBindTextureUnit(0, framebufferTextureID);
glBindTextureUnit(1, framebufferDepthTextureID);
//glBindTextureUnit(0, framebufferTextureID);
//glBindTextureUnit(1, framebufferDepthTextureID);
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, framebufferTextureID);
glActiveTexture(GL_TEXTURE1);
glBindTexture(GL_TEXTURE_2D, framebufferDepthTextureID);
glBindVertexArray(postVAO);
glDrawElements(GL_TRIANGLES, 6 /*vertices*/, GL_UNSIGNED_INT, nullptr);
prev_shader = post_shader;

View File

@ -30,6 +30,9 @@ vec3 cameraPosition = vec3(420, -120, 190);
vec3 cameraLookAt = vec3(460, 220, 0);
vec3 cameraUpward = vec3(0, 0, 1);
vec3 fog_color = vec3(1.0);
float fog_strength = 0;
const size_t N_GRASS = 150;
const size_t N_TREES = 30;
const size_t DISPLACEMENT = 30;
@ -252,19 +255,20 @@ void step_scene(double timeDelta) {
// time of day events
{
struct seq_t { double t; vec3 light_c; vec3 bg_c; bool has_headlights; };
struct seq_t { double t; vec3 light_c; vec3 bg_c; bool has_headlights; vec3 fog_c; float fog; };
static const vector<seq_t> sequence = {
{ 0, vec3(0.2 , 0.2 , 0.7), vec3(0.05, 0.1 , 0.15), 1}, // night
{ 9, vec3(0.4 , 0.4 , 0.8), vec3(0.15, 0.15, 0.35), 1}, // dusk
{10, vec3(1.0 , 0.6 , 0.4), vec3(0.8 , 0.4 , 0.2 ), 1}, // sunrise
{11, vec3(0.9 , 0.7 , 0.5), vec3(0.8 , 0.6 , 0.2 ), 1}, // sunrise2
{12, vec3(0.85, 0.85, 0.9), vec3(0.3 , 0.5 , 0.8 ), 0}, // morning
{18, vec3(1.0 , 1.0 , 1.0), vec3(0.35, 0.6 , 0.9 ), 0}, // noon
{24, vec3(0.7 , 0.9 , 1.0), vec3(0.3 , 0.5 , 0.8 ), 0}, // evening
{25, vec3(0.9 , 0.7 , 0.5), vec3(0.8 , 0.6 , 0.2 ), 0}, // sundown
{26, vec3(1.0 , 0.6 , 0.4), vec3(0.8 , 0.4 , 0.2 ), 1}, // sunset
{27, vec3(0.5 , 0.5 , 0.8), vec3(0.35, 0.15, 0.35), 1}, // dusk
{36, vec3(0.2 , 0.2 , 0.7), vec3(0.05, 0.1 , 0.15), 1}, // night
{ 0, vec3(0.2 , 0.2 , 0.7), vec3(0.05, 0.1 , 0.15), 1, vec3(0.2 , 0.2 , 0.7), 0.0}, // night
{ 1, vec3(0.2 , 0.2 , 0.7), vec3(0.05, 0.1 , 0.15), 1, vec3(0.2 , 0.2 , 0.7), 0.6}, // night
{ 9, vec3(0.4 , 0.4 , 0.8), vec3(0.15, 0.15, 0.35), 1, vec3(0.5 , 0.5 , 0.8), 0.7}, // dusk
{10, vec3(1.0 , 0.6 , 0.4), vec3(0.8 , 0.4 , 0.2 ), 1, vec3(1.0 , 0.7 , 0.6), 0.8}, // sunrise
{11, vec3(0.9 , 0.7 , 0.5), vec3(0.8 , 0.6 , 0.2 ), 1, vec3(1.0 , 0.85, 0.7), 0.9}, // sunrise2
{12, vec3(0.85, 0.85, 0.9), vec3(0.3 , 0.5 , 0.8 ), 0, vec3(0.85, 0.85, 0.9), 0.8}, // morning
{18, vec3(1.0 , 1.0 , 1.0), vec3(0.35, 0.6 , 0.9 ), 0, vec3(1.0 , 1.0 , 1.0), 0.0}, // noon
{24, vec3(0.7 , 0.9 , 1.0), vec3(0.3 , 0.5 , 0.8 ), 0, vec3(1.0 , 1.0 , 1.0), 0.0}, // evening
{25, vec3(0.9 , 0.7 , 0.5), vec3(0.8 , 0.6 , 0.2 ), 0, vec3(1.0 , 1.0 , 1.0), 0.0}, // sundown
{26, vec3(1.0 , 0.6 , 0.4), vec3(0.8 , 0.4 , 0.2 ), 1, vec3(1.0 , 1.0 , 1.0), 0.0}, // sunset
{27, vec3(0.5 , 0.5 , 0.8), vec3(0.35, 0.15, 0.35), 1, vec3(1.0 , 1.0 , 1.0), 0.0}, // dusk
{36, vec3(0.2 , 0.2 , 0.7), vec3(0.05, 0.1 , 0.15), 1, vec3(1.0 , 1.0 , 1.0), 0.0}, // night
};
assert(sequence.front().light_c == sequence.back().light_c);
assert(sequence.front().bg_c == sequence.back().bg_c);
@ -272,15 +276,20 @@ void step_scene(double timeDelta) {
static const size_t seq_size = sequence.size();
static size_t /*current*/ seq_pos = 0;
double t = fmod(timeAcc, t_max);
double t = fmod(timeAcc+34, t_max);
while (sequence[(seq_pos+1) % seq_size].t < t || sequence[seq_pos].t > t)
seq_pos = (seq_pos+1) % seq_size;
const seq_t& seq_a = sequence[seq_pos];
const seq_t& seq_b = sequence[(seq_pos+1) % seq_size];
double /*interpolation */f/*actor*/ = (t - seq_a.t) / (seq_b.t - seq_a.t);
lightNode[0]->light_color = vec3(seq_a.light_c * (1-f) + seq_b.light_c * f);
vec3 bg_color = vec3(seq_a.bg_c * (1-f) + seq_b.bg_c * f);
double /*interpolation */f/*actor*/ = (t - seq_a.t) / (seq_b.t - seq_a.t);
lightNode[0]->light_color = seq_a.light_c * (1-f) + seq_b.light_c * f;
vec3 bg_color = seq_a.bg_c * (1-f) + seq_b.bg_c * f;
fog_color = seq_a.fog_c * (1-f) + seq_b.fog_c * f;
fog_strength = seq_a.fog * (1-f) + seq_b.fog * f;
bg_color = glm::mix(bg_color, fog_color, fog_strength);
glClearColor(bg_color.r, bg_color.g, bg_color.b, 1.0f);
/*interpolation */f/*actor*/ = t/t_max;

View File

@ -9,6 +9,9 @@ extern SceneNode* rootNode;
extern SceneNode* hudNode;
extern SceneNode* lightNode[N_LIGHTS];
extern vec3 fog_color;
extern float fog_strength;
extern glm::vec3 cameraPosition;
extern glm::vec3 cameraLookAt;
extern glm::vec3 cameraUpward;