Moved illumination into own files

This commit is contained in:
Martin Opat 2024-12-30 12:09:45 +01:00
parent 479c450fef
commit e983b3e3a8
2 changed files with 22 additions and 163 deletions

View File

@ -7,9 +7,8 @@
#include "shading.h"
// Raycast + phong, TODO: Consider wrapping in a class
__global__ void raycastKernel(float* volumeData, unsigned char* framebuffer, int d_volumeWidth, int d_volumeHeight, int d_volumeDepth) {
__global__ void raycastKernel(float* volumeData, unsigned char* framebuffer) {
int px = blockIdx.x * blockDim.x + threadIdx.x;
int py = blockIdx.y * blockDim.y + threadIdx.y;
if (px >= IMAGE_WIDTH || py >= IMAGE_HEIGHT) return;
@ -30,9 +29,9 @@ __global__ void raycastKernel(float* volumeData, unsigned char* framebuffer, int
v *= tanHalfFov;
// Find ray direction
Vec3 cameraRight = (cameraDir.cross(cameraUp)).normalize();
cameraUp = (cameraRight.cross(cameraDir)).normalize();
Vec3 rayDir = (cameraDir + cameraRight*u + cameraUp*v).normalize();
Vec3 cameraRight = (d_cameraDir.cross(d_cameraUp)).normalize();
d_cameraUp = (cameraRight.cross(d_cameraDir)).normalize();
Vec3 rayDir = (d_cameraDir + cameraRight*u + d_cameraUp*v).normalize();
// Intersect (for simplicity just a 3D box from 0 to 1 in all dimensions) - TODO: Think about whether this is the best way to do this
float tNear = 0.0f;
@ -56,9 +55,9 @@ __global__ void raycastKernel(float* volumeData, unsigned char* framebuffer, int
}
};
intersectAxis(cameraPos.x, rayDir.x);
intersectAxis(cameraPos.y, rayDir.y);
intersectAxis(cameraPos.z, rayDir.z);
intersectAxis(d_cameraPos.x, rayDir.x);
intersectAxis(d_cameraPos.y, rayDir.y);
intersectAxis(d_cameraPos.z, rayDir.z);
if (tNear > tFar) continue; // No intersectionn
if (tNear < 0.0f) tNear = 0.0f;
@ -68,30 +67,30 @@ __global__ void raycastKernel(float* volumeData, unsigned char* framebuffer, int
float tCurrent = tNear;
while (tCurrent < tFar && alphaAccum < alphaAcumLimit) {
Point3 pos = cameraPos + rayDir * tCurrent;
Point3 pos = d_cameraPos + rayDir * tCurrent;
// Convert to volume indices
float fx = pos.x * (d_volumeWidth - 1);
float fy = pos.y * (d_volumeHeight - 1);
float fz = pos.z * (d_volumeDepth - 1);
float fx = pos.x * (VOLUME_WIDTH - 1);
float fy = pos.y * (VOLUME_HEIGHT - 1);
float fz = pos.z * (VOLUME_DEPTH - 1);
int ix = (int)roundf(fx);
int iy = (int)roundf(fy);
int iz = (int)roundf(fz);
// Sample
float density = sampleVolumeNearest(volumeData, d_volumeWidth, d_volumeHeight, d_volumeDepth, ix, iy, iz);
float density = sampleVolumeNearest(volumeData, VOLUME_WIDTH, VOLUME_HEIGHT, VOLUME_DEPTH, ix, iy, iz);
// Basic transfer function. TODO: Move to a separate file, and then improve
float alphaSample = density * 0.1f;
// float alphaSample = 1.0f - expf(-density * 0.1f);
Color3 baseColor = Color3(density, 0.1f*density, 1.f - density); // TODO: Implement a proper transfer function
Color3 baseColor = Color3::init(density, 0.1f*density, 1.f - density); // TODO: Implement a proper transfer function
// If density ~ 0, skip shading
if (density > minAllowedDensity) {
Vec3 grad = computeGradient(volumeData, d_volumeWidth, d_volumeHeight, d_volumeDepth, ix, iy, iz);
Vec3 grad = computeGradient(volumeData, VOLUME_WIDTH, VOLUME_HEIGHT, VOLUME_DEPTH, ix, iy, iz);
Vec3 normal = -grad.normalize();
Vec3 lightDir = (lightPos - pos).normalize();
Vec3 lightDir = (d_lightPos - pos).normalize();
Vec3 viewDir = -rayDir.normalize();
// Apply Phong

View File

@ -10,142 +10,11 @@
#include "objs/sphere.h"
#include "img/handler.h"
#include "consts.h"
#include "illumination/illumination.h"
__constant__ int d_volumeWidth;
__constant__ int d_volumeHeight;
__constant__ int d_volumeDepth;
static float* d_volume = nullptr;
// ----------------------------------------------------------------------------------------------------
__device__ Vec3 phongShading(const Vec3& normal, const Vec3& lightDir, const Vec3& viewDir, const Vec3& baseColor) {
Vec3 ambient = baseColor * ambientStrength;
double diff = fmax(normal.dot(lightDir), 0.0);
Vec3 diffuse = baseColor * (diffuseStrength * diff);
Vec3 reflectDir = (normal * (2.0 * normal.dot(lightDir)) - lightDir).normalize();
double spec = pow(fmax(viewDir.dot(reflectDir), 0.0), shininess);
Vec3 specular = Vec3(1.0, 1.0, 1.0) * (specularStrength * spec);
return ambient + diffuse + specular;
}
// Raycast + phong
__global__ void raycastKernel(float* volumeData, unsigned char* framebuffer, int imageWidth, int imageHeight, Vec3 cameraPos, Vec3 cameraDir, Vec3 cameraUp, float fov, float stepSize, Vec3 lightPos) {
int px = blockIdx.x * blockDim.x + threadIdx.x;
int py = blockIdx.y * blockDim.y + threadIdx.y;
if (px >= imageWidth || py >= imageHeight) return;
float accumR = 0.0f;
float accumG = 0.0f;
float accumB = 0.0f;
// Multiple samples per pixel
for (int s = 0; s < SAMPLES_PER_PIXEL; s++) {
// Map to [-1, 1]
float u = ((px + 0.5f) / imageWidth ) * 2.0f - 1.0f;
float v = ((py + 0.5f) / imageHeight) * 2.0f - 1.0f;
// TODO: Move this (and all similar transformation code) to its own separate file
float tanHalfFov = tanf(fov * 0.5f);
u *= tanHalfFov;
v *= tanHalfFov;
// Find ray direction
Vec3 cameraRight = (cameraDir.cross(cameraUp)).normalize();
cameraUp = (cameraRight.cross(cameraDir)).normalize();
Vec3 rayDir = (cameraDir + cameraRight*u + cameraUp*v).normalize();
// Intersect (for simplicity just a 3D box from 0 to 1 in all dimensions) - TODO: Think about whether this is the best way to do this
float tNear = 0.0f;
float tFar = 1e6f;
auto intersectAxis = [&](float start, float dirVal) {
if (fabsf(dirVal) < epsilon) {
if (start < 0.f || start > 1.f) {
tNear = 1e9f;
tFar = -1e9f;
}
} else {
float t0 = (0.0f - start) / dirVal;
float t1 = (1.0f - start) / dirVal;
if (t0>t1) {
float tmp=t0;
t0=t1;
t1=tmp;
}
if (t0>tNear) tNear = t0;
if (t1<tFar ) tFar = t1;
}
};
intersectAxis(cameraPos.x, rayDir.x);
intersectAxis(cameraPos.y, rayDir.y);
intersectAxis(cameraPos.z, rayDir.z);
if (tNear > tFar) continue; // No intersectionn
if (tNear < 0.0f) tNear = 0.0f;
float colorR = 0.0f, colorG = 0.0f, colorB = 0.0f;
float alphaAccum = 0.0f;
float tCurrent = tNear;
while (tCurrent < tFar && alphaAccum < alphaAcumLimit) {
Vec3 pos = cameraPos + rayDir * tCurrent;
// Convert to volume indices
float fx = pos.x * (d_volumeWidth - 1);
float fy = pos.y * (d_volumeHeight - 1);
float fz = pos.z * (d_volumeDepth - 1);
int ix = (int)roundf(fx);
int iy = (int)roundf(fy);
int iz = (int)roundf(fz);
// Sample
float density = sampleVolumeNearest(volumeData, d_volumeWidth, d_volumeHeight, d_volumeDepth, ix, iy, iz);
// Basic transfer function. TODO: Move to a separate file, and then improve
float alphaSample = density * 0.1f;
// float alphaSample = 1.0f - expf(-density * 0.1f);
Vec3 baseColor = Vec3(density, 0.1f*density, 1.f - density); // TODO: Implement a proper transfer function
// If density ~ 0, skip shading
if (density > minAllowedDensity) {
Vec3 grad = computeGradient(volumeData, d_volumeWidth, d_volumeHeight, d_volumeDepth, ix, iy, iz);
Vec3 normal = -grad.normalize();
Vec3 lightDir = (lightPos - pos).normalize();
Vec3 viewDir = -rayDir.normalize();
// Apply Phong
Vec3 shadedColor = phongShading(normal, lightDir, viewDir, baseColor);
// Compose
colorR += (1.0f - alphaAccum) * shadedColor.x * alphaSample;
colorG += (1.0f - alphaAccum) * shadedColor.y * alphaSample;
colorB += (1.0f - alphaAccum) * shadedColor.z * alphaSample;
alphaAccum += (1.0f - alphaAccum) * alphaSample;
}
tCurrent += stepSize;
}
accumR += colorR;
accumG += colorG;
accumB += colorB;
}
// Average samples
accumR /= (float)SAMPLES_PER_PIXEL;
accumG /= (float)SAMPLES_PER_PIXEL;
accumB /= (float)SAMPLES_PER_PIXEL;
// Final colour
int fbIndex = (py * imageWidth + px) * 3;
framebuffer[fbIndex + 0] = (unsigned char)(fminf(accumR, 1.f) * 255);
framebuffer[fbIndex + 1] = (unsigned char)(fminf(accumG, 1.f) * 255);
framebuffer[fbIndex + 2] = (unsigned char)(fminf(accumB, 1.f) * 255);
}
void getTemperature(std::vector<float>& temperatureData, int idx = 0) {
std::string path = "data/trimmed";
@ -183,6 +52,7 @@ int main(int argc, char** argv) {
getSpeed(data);
// TODO: Eveontually remove debug below (i.e., eliminate for-loop etc.)
// Generate debug volume data
float* hostVolume = new float[VOLUME_WIDTH * VOLUME_HEIGHT * VOLUME_DEPTH];
// generateVolume(hostVolume, VOLUME_WIDTH, VOLUME_HEIGHT, VOLUME_DEPTH);
@ -192,7 +62,7 @@ int main(int argc, char** argv) {
if (data[i] + epsilon >= infty) hostVolume[i] = 0.0f;
}
// Min-max normalization
// Min-max normalization
float minVal = *std::min_element(hostVolume, hostVolume + VOLUME_WIDTH * VOLUME_HEIGHT * VOLUME_DEPTH);
float maxVal = *std::max_element(hostVolume, hostVolume + VOLUME_WIDTH * VOLUME_HEIGHT * VOLUME_DEPTH);
for (int i = 0; i < VOLUME_WIDTH * VOLUME_HEIGHT * VOLUME_DEPTH; i++) {
@ -204,17 +74,15 @@ int main(int argc, char** argv) {
cudaMalloc((void**)&d_volume, volumeSize);
cudaMemcpy(d_volume, hostVolume, volumeSize, cudaMemcpyHostToDevice);
int w = VOLUME_WIDTH, h = VOLUME_HEIGHT, d = VOLUME_DEPTH;
cudaMemcpyToSymbol(d_volumeWidth, &w, sizeof(int));
cudaMemcpyToSymbol(d_volumeHeight, &h, sizeof(int));
cudaMemcpyToSymbol(d_volumeDepth, &d, sizeof(int));
// Allocate framebuffer
unsigned char* d_framebuffer;
size_t fbSize = IMAGE_WIDTH * IMAGE_HEIGHT * 3 * sizeof(unsigned char);
cudaMalloc((void**)&d_framebuffer, fbSize);
cudaMemset(d_framebuffer, 0, fbSize);
// Copy external constants from consts.h to cuda
copyConstantsToDevice();
// Launch kernel
dim3 blockSize(16, 16); // TODO: Figure out a good size for parallelization
dim3 gridSize((IMAGE_WIDTH + blockSize.x - 1)/blockSize.x,
@ -222,15 +90,7 @@ int main(int argc, char** argv) {
raycastKernel<<<gridSize, blockSize>>>(
d_volume,
d_framebuffer,
IMAGE_WIDTH,
IMAGE_HEIGHT,
cameraPos,
cameraDir.normalize(),
cameraUp.normalize(),
fov,
stepSize,
lightPos
d_framebuffer
);
cudaDeviceSynchronize();