我正在研究一些非常简单的射线追踪器。相机(简单射线脚轮)的长宽比计算错误
现在我正在试图使透视相机正常工作。
我使用这样的循环来渲染场景(只有两个,硬编码的球体 - I投射光线从它的中心的每个像素,不AA施加):
Camera * camera = new PerspectiveCamera({ 0.0f, 0.0f, 0.0f }/*pos*/,
{ 0.0f, 0.0f, 1.0f }/*direction*/, { 0.0f, 1.0f, 0.0f }/*up*/,
buffer->getSize() /*projectionPlaneSize*/);
Sphere * sphere1 = new Sphere({ 300.0f, 50.0f, 1000.0f }, 100.0f); //center, radius
Sphere * sphere2 = new Sphere({ 100.0f, 50.0f, 1000.0f }, 50.0f);
for(int i = 0; i < buffer->getSize().getX(); i++) {
for(int j = 0; j < buffer->getSize().getY(); j++) {
//for each pixel of buffer (image)
double centerX = i + 0.5;
double centerY = j + 0.5;
Geometries::Ray ray = camera->generateRay(centerX, centerY);
Collision * collision = ray.testCollision(sphere1, sphere2);
if(collision){
//output red
}else{
//output blue
}
}
}
的Camera::generateRay(float x, float y)
是:
Camera::generateRay(float x, float y) {
//position = camera position, direction = camera direction etc.
Point2D xy = fromImageToPlaneSpace({ x, y });
Vector3D imagePoint = right * xy.getX() + up * xy.getY() + position + direction;
Vector3D rayDirection = imagePoint - position;
rayDirection.normalizeIt();
return Geometries::Ray(position, rayDirection);
}
Point2D fromImageToPlaneSpace(Point2D uv) {
float width = projectionPlaneSize.getX();
float height = projectionPlaneSize.getY();
float x = ((2 * uv.getX() - width)/width) * tan(fovX);
float y = ((2 * uv.getY() - height)/height) * tan(fovY);
return Point2D(x, y);
}
的视场:
double fovX = 3.14159265359/4.0;
double fovY = projectionPlaneSize.getY()/projectionPlaneSize.getX() * fovX;
我做什么不对或步没我省略?
它可能是一个精度问题,而不是fromImageToPlaneSpace(...)
?