2015-09-28 72 views
1

我正在制作一个统一的视频游戏,并决定使用光线追踪。我有代码,但正如你将在第二秒看到的那样。它并不是完全一帧一帧地渲染。 这是我的光线跟踪代码,这是主摄像头的主要脚本。如何做到与C统一实时光线追踪#

using UnityEngine; 
using System.Collections; 

public class RayTracer : MonoBehaviour 
{ 

    public Color backgroundColor = Color.black; 
    public float RenderResolution = 1f; 
    public float maxDist = 100f; 
    public int maxRecursion = 4; 


    private Light[] lights; 
    private Texture2D renderTexture; 

    void Awake() 
    { 
     renderTexture = new Texture2D((int)(Screen.width * RenderResolution), (int)(Screen.height * RenderResolution)); 
     lights = FindObjectsOfType(typeof(Light)) as Light[]; 
    } 

    void Start() 
    { 
     RayTrace(); 
    } 

    void OnGUI() 
    { 
     GUI.DrawTexture(new Rect(0, 0, Screen.width, Screen.height), renderTexture); 
    } 

    void RayTrace() 
    { 
     for (int x = 0; x < renderTexture.width; x++) 
     { 
      for (int y = 0; y < renderTexture.height; y++) 
      { 
       Color color = Color.black; 
       Ray ray = GetComponent<Camera>().ScreenPointToRay(new Vector3(x/RenderResolution, y/RenderResolution, 0)); 

       renderTexture.SetPixel(x, y, TraceRay(ray, color, 0)); 
      } 
     } 

     renderTexture.Apply(); 
    } 

    Color TraceRay(Ray ray, Color color, int recursiveLevel) 
    { 

     if (recursiveLevel < maxRecursion) 
     { 
      RaycastHit hit; 
      if (Physics.Raycast(ray, out hit, maxDist)) 
      { 
       Vector3 viewVector = ray.direction; 
       Vector3 pos = hit.point + hit.normal * 0.0001f; 
       Vector3 normal = hit.normal; 

       RayTracerObject rto = hit.collider.gameObject.GetComponent<RayTracerObject>(); 
       //Does the object we hit have that script? 
       if (rto == null) 
       { 
        var GO = hit.collider.gameObject; 
        Debug.Log("Raycast hit failure! On " + GO.name + " position " + GO.transform.position.ToString()); 
        return color; //exit out 
       } 

       Material mat = hit.collider.GetComponent<Renderer>().material; 
       if (mat.mainTexture) 
       { 
        color += (mat.mainTexture as Texture2D).GetPixelBilinear(hit.textureCoord.x, hit.textureCoord.y); 
       } 
       else 
       { 
        color += mat.color; 
       } 

       color *= TraceLight(rto, viewVector, pos, normal); 

       if (rto.reflectiveCoeff > 0) 
       { 
        float reflet = 2.0f * Vector3.Dot(viewVector, normal); 
        Ray newRay = new Ray(pos, viewVector - reflet * normal); 
        color += rto.reflectiveCoeff * TraceRay(newRay, color, recursiveLevel + 1); 
       } 

       if (rto.transparentCoeff > 0) 
       { 
        Ray newRay = new Ray(hit.point - hit.normal * 0.0001f, viewVector); 
        color += rto.transparentCoeff * TraceRay(newRay, color, recursiveLevel + 1); 
       } 
      } 
     } 

     return color; 

    } 

    Color TraceLight(RayTracerObject rto, Vector3 viewVector, Vector3 pos, Vector3 normal) 
    { 
     Color c = RenderSettings.ambientLight; 

     foreach (Light light in lights) 
     { 
      if (light.enabled) 
      { 
       c += LightTrace(rto, light, viewVector, pos, normal); 
      } 
     } 
     return c; 
    } 

    Color LightTrace(RayTracerObject rto, Light light, Vector3 viewVector, Vector3 pos, Vector3 normal) 
    { 


     float dot, distance, contribution; 
     Vector3 direction; 
     switch (light.type) 
     { 
      case LightType.Directional: 
       contribution = 0; 
       direction = -light.transform.forward; 
       dot = Vector3.Dot(direction, normal); 
       if (dot > 0) 
       { 
        if (Physics.Raycast(pos, direction, maxDist)) 
        { 
         return Color.black; 
        } 

        if (rto.lambertCoeff > 0) 
        { 
         contribution += dot * rto.lambertCoeff; 
        } 
        if (rto.reflectiveCoeff > 0) 
        { 
         if (rto.phongCoeff > 0) 
         { 
          float reflet = 2.0f * Vector3.Dot(viewVector, normal); 
          Vector3 phongDir = viewVector - reflet * normal; 
          float phongTerm = max(Vector3.Dot(phongDir, viewVector), 0.0f); 
          phongTerm = rto.reflectiveCoeff * Mathf.Pow(phongTerm, rto.phongPower) * rto.phongCoeff; 

          contribution += phongTerm; 
         } 
         if (rto.blinnPhongCoeff > 0) 
         { 
          Vector3 blinnDir = -light.transform.forward - viewVector; 
          float temp = Mathf.Sqrt(Vector3.Dot(blinnDir, blinnDir)); 
          if (temp != 0.0f) 
          { 
           blinnDir = (1.0f/temp) * blinnDir; 
           float blinnTerm = max(Vector3.Dot(blinnDir, normal), 0.0f); 
           blinnTerm = rto.reflectiveCoeff * Mathf.Pow(blinnTerm, rto.blinnPhongPower) * rto.blinnPhongCoeff; 

           contribution += blinnTerm; 
          } 
         } 
        } 
       } 
       return light.color * light.intensity * contribution; 
      case LightType.Point: 
       contribution = 0; 
       direction = (light.transform.position - pos).normalized; 
       dot = Vector3.Dot(normal, direction); 
       distance = Vector3.Distance(pos, light.transform.position); 
       if ((distance < light.range) && (dot > 0)) 
       { 
        if (Physics.Raycast(pos, direction, distance)) 
        { 
         return Color.black; 
        } 

        if (rto.lambertCoeff > 0) 
        { 
         contribution += dot * rto.lambertCoeff; 
        } 
        if (rto.reflectiveCoeff > 0) 
        { 
         if (rto.phongCoeff > 0) 
         { 
          float reflet = 2.0f * Vector3.Dot(viewVector, normal); 
          Vector3 phongDir = viewVector - reflet * normal; 
          float phongTerm = max(Vector3.Dot(phongDir, viewVector), 0.0f); 
          phongTerm = rto.reflectiveCoeff * Mathf.Pow(phongTerm, rto.phongPower) * rto.phongCoeff; 

          contribution += phongTerm; 
         } 
         if (rto.blinnPhongCoeff > 0) 
         { 
          Vector3 blinnDir = -light.transform.forward - viewVector; 
          float temp = Mathf.Sqrt(Vector3.Dot(blinnDir, blinnDir)); 
          if (temp != 0.0f) 
          { 
           blinnDir = (1.0f/temp) * blinnDir; 
           float blinnTerm = max(Vector3.Dot(blinnDir, normal), 0.0f); 
           blinnTerm = rto.reflectiveCoeff * Mathf.Pow(blinnTerm, rto.blinnPhongPower) * rto.blinnPhongCoeff; 

           contribution += blinnTerm; 
          } 
         } 
        } 
       } 
       if (contribution == 0) 
       { 
        return Color.black; 
       } 
       return light.color * light.intensity * contribution; 
      case LightType.Spot: 
       contribution = 0; 
       direction = (light.transform.position - pos).normalized; 
       dot = Vector3.Dot(normal, direction); 
       distance = Vector3.Distance(pos, light.transform.position); 
       if (distance < light.range && dot > 0) 
       { 
        float dot2 = Vector3.Dot(-light.transform.forward, direction); 
        if (dot2 > (1 - light.spotAngle/180)) 
        { 
         if (Physics.Raycast(pos, direction, distance)) 
         { 
          return Color.black; 
         } 
         if (rto.lambertCoeff > 0) 
         { 
          contribution += dot * rto.lambertCoeff; 
         } 
         if (rto.reflectiveCoeff > 0) 
         { 
          if (rto.phongCoeff > 0) 
          { 
           float reflet = 2.0f * Vector3.Dot(viewVector, normal); 
           Vector3 phongDir = viewVector - reflet * normal; 
           float phongTerm = max(Vector3.Dot(phongDir, viewVector), 0.0f); 
           phongTerm = rto.reflectiveCoeff * Mathf.Pow(phongTerm, rto.phongPower) * rto.phongCoeff; 

           contribution += phongTerm; 
          } 
          if (rto.blinnPhongCoeff > 0) 
          { 
           Vector3 blinnDir = -light.transform.forward - viewVector; 
           float temp = Mathf.Sqrt(Vector3.Dot(blinnDir, blinnDir)); 
           if (temp != 0.0f) 
           { 
            blinnDir = (1.0f/temp) * blinnDir; 
            float blinnTerm = max(Vector3.Dot(blinnDir, normal), 0.0f); 
            blinnTerm = rto.reflectiveCoeff * Mathf.Pow(blinnTerm, rto.blinnPhongPower) * rto.blinnPhongCoeff; 

            contribution += blinnTerm; 
           } 
          } 
         } 
        } 
       } 
       if (contribution == 0) 
       { 
        return Color.black; 
       } 
       return light.color * light.intensity * contribution; 
     } 
     return Color.black; 
    } 

    float max(float x0, float x1) 
    { 
     return x0 > x1 ? x0 : x1; 
    } 
} 

这是附着在场景中的对象

using UnityEngine; 
using System.Collections; 

public class RayTracerObject : MonoBehaviour 
{ 

    public float lambertCoeff = 1f; 

    public float reflectiveCoeff = 0f; 

    public float phongCoeff = 1f; 
    public float phongPower = 2f; 

    public float blinnPhongCoeff = 1f; 
    public float blinnPhongPower = 2f; 

    public float transparentCoeff = 0f; 


    public Color baseColor = Color.gray; 

    void Awake() 
    { 
     if (!GetComponent<Renderer>().material.mainTexture) 
     { 
      GetComponent<Renderer>().material.color = baseColor; 
     } 
    } 
} 

我怎么会去这样做的代码?代码是什么?

回答

5

尽管在主线程中进行光线跟踪是一个完全可以接受的设计,但它可能不是您想要的Unity,因为它会阻止其他任何事情。

现在你可以生成一个子线程来执行光线跟踪并让主线程呈现结果。但问题在于,这两种方法都没有使用GPU,这首先使用Unity来击败点。

如何做到实时团结与C#

这一切都取决于你打算使它什么场景组成以及如何光线追踪。您可以在低分辨率下实时渲染一些简单的东西,然而,使用合理的屏幕分辨率和合理的光线反弹级别进行渲染,即投射反射或透射材料的递归光线的数量可能会更加困难。

相反,我会劝你遵循那里现在正在使用被称为通用GPUGPGPU技术的GPU进行实时光线跟踪在光线追踪的变化趋势。 nVidia在这个主题上有一些谈话,并且可以在YouTube上找到。 Here is my sample Unity GPGPU galaxy simulation可能证明对GPGPU有用。

样品GPGPU内核只是告诉你什么是GPGPU约为:

// File: Galaxy1Compute.compute 

// Each #kernel tells which function to compile; you can have many kernels 
#pragma kernel UpdateStars 

#include "Galaxy.cginc" 

// blackmagic 
#define BLOCKSIZE 128 

RWStructuredBuffer<Star> stars; 

Texture2D HueTexture; 

// refer to http://forum.unity3d.com/threads/163591-Compute-Shader-SamplerState-confusion 
SamplerState samplerHueTexture; 

// time ellapsed since last frame 
float deltaTime; 

const float Softening=3e4f; 
#define Softening2 Softening * Softening 

static float G = 6.67300e-11f; 
static float DefaultMass = 1000000.0f; 

// Do a pre-calculation assuming all the stars have the same mass 
static float GMM = G*DefaultMass*DefaultMass; 


[numthreads(BLOCKSIZE,1,1)] 
void UpdateStars (uint3 id : SV_DispatchThreadID) 
{ 
    uint i = id.x; 
    uint numStars, stride; 
    stars.GetDimensions(numStars, stride); 

    float3 position = stars[i].position; 
    float3 velocity = stars[i].velocity; 

    float3 A=float3(0,0,0); 

    [loop] 
    for (uint j = 0; j < numStars; j++) 
    {  
     if (i != j) 
     { 
      float3 D = stars[j].position - stars[i].position; 
      float r = length(D); 
      float f = GMM/(r * r + Softening2); 
      A += f * normalize(D); 
     } 
    } 

    velocity += A * deltaTime; 
    position += velocity * deltaTime; 

    if (i < numStars) 
    { 
     stars[i].velocity = velocity; 
     stars[i].position = position; 
     stars[i].accelMagnitude = length(A); 
    } 


} 

此外,还有一些关于这个问题精美书籍。 Real-time Volume Graphics,尽管它涵盖了体积,但它确实涵盖了投射射线 - 射线追踪的本质。最难的范例转变是为GPGPU写作,一旦你明白了,编写GPGPU光线追踪程序是GPGPU卷着色器的一个简单步骤。

enter image description here

一个奇妙的大部头陪任何光线追踪作者是马特·菲尔的Physically Based Rendering书(有一个第2版,但我还没有读到)

enter image description here