{- WETT -} {-# LANGUAGE TupleSections, RecordWildCards, NamedFieldPuns, BangPatterns #-} module RayTracing where import Data.Foldable (Foldable(foldl'), minimumBy) import Data.Ord (comparing) import Data.Maybe (fromMaybe) import Data.Functor ((<&>)) import Data.Bifunctor (Bifunctor(bimap, first, second)) import qualified Data.Map.Strict as M (findWithDefault, fromDistinctAscList) import Control.Parallel.Strategies (evalTuple2, parListChunk, using, rseq) import Codec.Picture (PngSavable(encodePng), PixelRGB8, PixelRGB8, Image, generateImage) import Codec.Picture.Types (PixelRGB8(PixelRGB8)) import qualified Data.ByteString.Lazy as B import System.IO (Handle) import System.Random (mkStdGen, RandomGen, Random (randomR)) import Data.List (unfoldr) -- Unpacking our vectors yields a speedup of about 4 times with respect to only strict fields, in the samples I tested -- With fields that are neither unpacked nor strict it takes significantly longer (I didn't bother to wait for the program to finish) data Vector3 = V3 {-# UNPACK #-} !Double {-# UNPACK #-} !Double {-# UNPACK #-} !Double deriving (Eq, Show) type RGB = Vector3 type Point = Vector3 type Ray = (Point, Vector3) fromTuple :: (Double, Double, Double) -> Vector3 fromTuple (x, y, z) = V3 x y z vZero :: Vector3 vZero = V3 0 0 0 -- | Vector addition. infixl 6 <+> (<+>) :: Vector3 -> Vector3 -> Vector3 (V3 x1 y1 z1) <+> (V3 x2 y2 z2) = V3 (x1 + x2) (y1 + y2) (z1 + z2) -- | Vector subtraction. infixl 6 >-> (>->) :: Vector3 -> Vector3 -> Vector3 (V3 x1 y1 z1) >-> (V3 x2 y2 z2) = V3 (x1 - x2) (y1 - y2) (z1 - z2) -- | Vector dot product. infixl 7 <.> (<.>) :: Vector3 -> Vector3 -> Double (V3 x1 y1 z1) <.> (V3 x2 y2 z2) = x1 * x2 + y1 * y2 + z1 * z2 -- | Scalar-Vector product. infixl 7 **> (**>) :: Double -> Vector3 -> Vector3 l **> (V3 x y z) = l `seq` V3 (l * x) (l * y) (l * z) -- | Component-wise vector multiplication infixl 7 <**> (<**>) :: Vector3 -> Vector3 -> Vector3 (V3 x1 y1 z1) <**> (V3 x2 y2 z2) = V3 (x1 * x2) (y1 * y2) (z1 * z2) -- | Vector cross-product. (><) :: Vector3 -> Vector3 -> Vector3 infixl 8 >< (V3 a1 a2 a3) >< (V3 b1 b2 b3) = V3 (a2 * b3 - a3 * b2) (a3 * b1 - a1 * b3) (a1 * b2 - a2 * b1) -- | The scalar length of a vector. norm :: Vector3 -> Double norm (V3 x y z) = sqrt $ x * x + y * y + z * z -- | Creates a vector in the same direction as the given vector but with a norm of 1 normalize :: Vector3 -> Vector3 normalize v = (1 / l) **> v where l = norm v -- | Applies a function to all components of a vector. mapVector :: (Double -> Double) -> Vector3 -> Vector3 mapVector f (V3 x y z) = V3 (f x) (f y) (f z) -- | Combines two vectors component-wise with the given function. liftVector :: (Double -> Double -> Double) -> Vector3 -> Vector3 -> Vector3 liftVector f (V3 x1 y1 z1) (V3 x2 y2 z2) = V3 (f x1 x2) (f y1 y2) (f z1 z2) directionFromTo :: Point -> Point -> Vector3 directionFromTo from to = normalize $ to >-> from -- | A Material contains various lighting constants. data Material = Material { kSpecular :: RGB, kDiffuse :: RGB, kAmbient :: RGB, kEmissive :: RGB, aShiny :: RGB, kReflected :: RGB } deriving (Show, Eq) -- | A Beamer is used to implement a light source, by providing an (unfiltered) list of beams -- | from a surface-point to the light. For example, a point light would return a one-element list. -- | Each Beam is a tuple of (dist, dir) and dir is a normalized direction vector from the given point -- | to the light, and dist is the distance along the direction vector from the point to the light. type Beamer = Point -> [(Double, Vector3)] -- | A Light encapsulates a Beamer and lighting constants. -- | Each Beam from the Beamer is weighted as a fraction of ALL beams (including the obstructed ones) -- | to get the final lighting data Light = Light { lightBeams :: Beamer, iSpecular :: RGB, iDiffuse :: RGB } -- | Given a ray, an obstruction returns a (possibly empty) list of surface normals -- | and distances from the ray's origin to the surface type Obstruction = Ray -> [(Double, Vector3)] type Object = (Material, Obstruction) data Scene = Scene { lights :: [Light], objects :: [Object], iAmbient :: RGB, iEmissive :: RGB, backgroundColor :: RGB } -- | A Camera takes a given resolution (W, H) and a position (x, y) and returns the rays -- | that should be traced for that pixel. The rays are averaged component-wise. -- | For example, a camera with a DOF effect would return multiple rays with their origins -- | slightly distributed around a given camera origin. type Camera = (Int, Int) -> (Int, Int) -> [Ray] -- | A list of all pixels (positons) for a given width and height, in sorted order. getPixels :: (Int, Int) -> [(Int, Int)] getPixels (width, height) = [(i, j) | i <- [1..width], j <- [1..height]] solveQuadratic :: Double -> Double -> Double -> [Double] solveQuadratic a b c | disc > 0 = [m + n, m - n] | disc == 0 = [m] | otherwise = [] where disc = b * b - 4 * a * c twoa = 2 * a m = -b / twoa n = sqrt disc / twoa pointBeams :: Point -> Point -> [(Double, Vector3)] pointBeams l o = [(norm $ l >-> o, directionFromTo o l)] -- | Create an area light with the given density (beams/unit length), the corner of the plane, two vectors, and a width. -- | The first vector actually describes the first edge, the second is merely used to calculate the plane, then the width is used. rectBeams :: Double -> Point -> Vector3 -> Vector3 -> Double -> Point -> [(Double, Vector3)] rectBeams !spacing !planeOrigin !v1 !v2 width !o = map f points where edge2 = width **> normalize ((v1 >< v2) >< v1) density = 1 / spacing count1 = floor (density * norm v1) count2 = floor (density * norm edge2) edge1N = normalize v1 edge2N = normalize edge2 points = [planeOrigin <+> spacing **> (c1d **> edge1N <+> c2d **> edge2N) | c1 <- [0..count1], let c1d = fromInteger c1, c2 <- [0..count2], let c2d = fromInteger c2] f p = (norm $ p >-> o, directionFromTo o p) -- | A spherical obstruction, combine it with appropriate material to get a sphere sphere :: Point -> Double -> Obstruction sphere c r (o, d) = zip dists dirVectForDist where dd = d <.> d oc = o >-> c twodoc = 2 * (d <.> oc) ocr = oc <.> oc - r * r dists = solveQuadratic dd twodoc ocr dirVectForDist = map (directionFromTo c . (<+> o) . (**> d)) dists -- | An infinite plane through the given point with a surface normal towards the given point plane :: Point -> Point -> Obstruction plane !planeOrigin !to (!o, !d) = case compare npo 0 of EQ -> [] GT -> [(npo / dn, negSnv)] LT -> [(npo / dn, snv)] where !snv = directionFromTo planeOrigin to !negSnv = mapVector negate snv dn = d <.> snv npo = snv <.> (planeOrigin >-> o) -- | Exactly the same as plane, but only visible in the direction of the surface normal -- Not implemented in terms of plane because I'm lazy (and because it might hurt performance? idk) oneWayPlane :: Point -> Point -> Obstruction oneWayPlane !planeOrigin !to (!o, !d) = case compare npo 0 of EQ -> [] GT -> [] LT -> [(npo / dn, snv)] where !snv = directionFromTo planeOrigin to dn = d <.> snv npo = snv <.> (planeOrigin >-> o) -- | A rectangular surface. Takes as an arguments an existing plane, -- | then additionally filters only the rays that lie in the bounds. boundedPlane :: (Point -> Point -> Obstruction) -> Point -> Vector3 -> Vector3 -> Obstruction boundedPlane planeF !cornerOrigin !width !height r@(!o, !d) = filter isInBounds pois where (!scalarWidth, !scalarHeight) = (norm width, norm height) (!normalWidth, !normalHeight) = (normalize width, normalize height) pois = planeF cornerOrigin (cornerOrigin <+> width >< height) r isInBounds (t, _) = 0 <= hPos && hPos <= scalarWidth && 0 <= vPos && vPos <= scalarHeight where poiRelCorner = o <+> t **> d >-> cornerOrigin hPos = normalWidth <.> poiRelCorner vPos = normalHeight <.> poiRelCorner -- | The first point defines the first corner. The second vector runs from the first corner to the second corner. -- | The third vector is used to create a plane with the first, its length is not relevant. -- | Then, in this plane along a line perpendicular to the first vector, through the first corner, -- at a distance of the given depth, the third point is placed. -- | Finally, the last point is placed on a line perpendicular to both the previous line and the first vector, -- through the first corner, at a distance of the given height. -- | This way it is impossible to incorrectly define a rectangular prism, unless the given vectors are zero or collinear. rectPrism :: Point -> Vector3 -> Vector3 -> Double -> Double -> Obstruction rectPrism p1 v1 v2 depth height r = concatMap ($ r) planes where boundedPlaneF = boundedPlane plane vUpN = normalize (v1 >< v2) vDepthN = normalize (vUpN >< v1) vUp = height **> vUpN vDepth = depth **> vDepthN planes = [boundedPlaneF p1 v1 vDepth, boundedPlaneF p1 vDepth vUp, boundedPlaneF p1 vUp v1, boundedPlaneF (p1 <+> vUp) v1 vDepth, boundedPlaneF (p1 <+> v1) vDepth vUp, boundedPlaneF (p1 <+> vDepth) vUp v1] -- trace :: Scene -> Ray -> Maybe (Double, Vector3) -- trace Scene {objects} r = case filter ((> 0) . fst) $ concatMap (($ r) . snd) objects of -- [] -> Nothing -- xs -> Just $ minimumBy (comparing fst) xs trace :: Scene -> Ray -> Maybe (Double, Vector3) trace sc r = fst <$> traceObj sc r -- | We need this epsilon, otherwise Rays would be obstructed by objects they just bounced off of. -- | Filtering for (> 0) is not quite enough due to numerical inaccuricies. eps :: Double eps = 1e-5 -- | Given a Ray, returns Just ((dist, snv), obj) if the ray intersects any objects, where obj is the -- | closest Obstruction in the Scene, snv is the surface normal where the ray intersects that Obstruction, -- | and dist is the distance along the Ray to that Obstruction. Otherwise returns Nothing. traceObj :: Scene -> Ray -> Maybe ((Double, Vector3), Object) traceObj Scene {objects} r = case filter ((> eps) . fst . fst) $ concatMap (\obj -> map (, obj) $ snd obj r) objects of [] -> Nothing xs -> Just $ minimumBy (comparing (fst . fst)) xs -- | Phong illumination. -- | b: number of remaining recursive rays to trace. -- | vo: the first ray (from a recursion standpoint), used to calculate lighting. -- | (t, snv): the distance and surface normal to the object to illuminate, as returned by trace -- | Formulae adapted from https://en.wikipedia.org/w/index.php?title=Phong_reflection_model&oldid=996573952#Description phongIlluminate :: Int -> Ray -> Scene -> Ray -> (Double, Vector3) -> Material -> RGB phongIlluminate b vo@(!viewOrigin, _) sc@Scene {..} (o, d) (t, snv) Material {..} = kEmissive <**> iEmissive <+> kAmbient <**> iAmbient <+> foldl' (<+>) vZero (map phong lights) <+> kReflected <**> renderRay b vo sc (p, reflect $ mapVector negate d) where p = o <+> t **> d -- point where ray meets surface vv = directionFromTo p viewOrigin -- vector to the VIEWER, not to the inbound ray, normalized reflect lv = (2 * (lv <.> snv) **> snv) >-> lv -- reflects a vector about the given normal phong Light {..} = mapVector (/ fromIntegral (length allBeams)) (sum (map diffuse rs) **> iDiffuse <**> kDiffuse <+> iSpecular <**> kSpecular <**> foldl' (<+>) vZero (map specular rs)) where allBeams = lightBeams p rs = [rl | (tl, rl) <- allBeams, maybe True ((> tl) . fst) $ trace sc (p, rl)] diffuse = clamp 0 1 . (<.> snv) specular lv = mapVector (((reflect lv <.> vv) **) . clamp 0 1) aShiny -- | Finds the next object and calculates the illumination. Performs mutual recursion with phongIlluminate. renderRay :: Int -> Ray -> Scene -> Ray -> RGB renderRay 0 _ _ _ = vZero renderRay b vo scene@Scene{..} ray = fromMaybe backgroundColor $ traceObj scene ray <&> second fst <&> uncurry (phongIlluminate (b - 1) vo scene ray) -- | Creates a basic camera. Arguments: -- 'from': the viewpoint origin; -- 'to': the point towards which the camera is looking; -- 'th': the field of view, in radians, 0 < th < pi; -- 'v': the "up" vector, typically @(0, 1, 0)@ -- Adapted from https://en.wikipedia.org/w/index.php?title=Ray_tracing_(graphics)&oldid=1000210013#Calculate_rays_for_rectangular_viewport standardCamera :: Point -> Point -> Double -> Vector3 -> Camera standardCamera from to th vUp (width, height) (i, j) | 0 >= th || th >= pi = error "FOV must be in range (0, pi)" | otherwise = [(from, normalize $ p1h <+> fromIntegral (i - 1) **> qx <+> fromIntegral (j - 1) **> qy)] where t = directionFromTo from to v = normalize vUp b = t >< v gx = tan $ th / 2 gy = gx * fromIntegral (height - 1) / fromIntegral (width - 1) qx = mapVector (/ fromIntegral (width - 1)) (2 * gx **> b) qy = mapVector (/ fromIntegral (height - 1)) (2 * gy **> v) p1h = t >-> gx **> b >-> gy **> v randomPointsInRadius :: RandomGen g => g -> Double -> [(Double, Double)] randomPointsInRadius gin r = filter isInRadius $ unfoldr (Just . genPoint) gin where genPoint g = let (x, g') = randomR (-r, r) g in let (y, g'') = randomR (-r, r) g' in ((x, y), g'') !rSq = r * r isInRadius (x, y) = x * x + y * y <= rSq -- | A camera with a depth of field effect, requires too many rays to converge in practice (in my tests) dofCamera :: Double -> Double -> Int -> Point -> Point -> Double -> Vector3 -> Camera dofCamera focalDepth focalRadius rayCount from to th vUp (width, height) (i, j) = rays where -- this part is shared for all pixels camera = standardCamera from to th vUp (width, height) v = normalize vUp !forward = directionFromTo from to !right = forward >< v !up = right >< forward focalPlane = plane (from <+> focalDepth **> forward) from -- this part is different for each pixel baseRay = head $ camera (i, j) -- (V3 0 0 0, V3 .8 -.4 -.4) seed = width * j + i gen = mkStdGen seed -- where the ray intersects the focal plane -- if there is no base ray the FOV is >= 90deg, which is not allowed focalPoint = from <+> fst (head $ focalPlane baseRay) **> snd baseRay offset (dx, dy) = let origin = from <+> (dx **> right) <+> (dy **> up) in (origin, directionFromTo origin focalPoint) rays = map offset $ take rayCount $ randomPointsInRadius gen focalRadius vectorAverage :: Traversable t => t Vector3 -> Vector3 vectorAverage vs = mapVector (/ (fromIntegral $ length vs)) $ foldl' (<+>) vZero vs -- | Renders the scene. render :: Int -> Camera -> Scene -> (Int, Int) -> [((Int, Int), RGB)] render d cam scene size@(w, h) = map (\px -> (px, vectorAverage $ map (\vo -> renderRay d vo scene vo) $ camera px)) pixels `using` strat where camera = cam size pixels = getPixels size -- Parallelism is surprsingly easy to achieve here. We just need an evaluation strategy that -- evaluates our RGB vectors to WHNF, since the constructor arguments are strict they will be fully evaluated. -- We evaluate the pixel tuple too, for good measure. -- Then we just wrap in parListChunk (the 128 chunks are somewhat arbitrary, I haven't really tested that). !chunkSize = max 64 ((w * h) `div` 128) strat = parListChunk chunkSize (evalTuple2 (evalTuple2 rseq rseq) rseq) -- | Be especially careful when trying to convert to a bounded type, otherwise overflow may occur before the clamp clamp :: Ord a => a -> a -> a -> a clamp lb up val | lb > up = error "lower bound greater than upper bound" | val < lb = lb | val > up = up | otherwise = val -- | Converts an RGB value to a JuicyPixels Pixel rgbToPixel :: RGB -> PixelRGB8 rgbToPixel rgb = let (V3 r g b) = mapVector (* 255) rgb in PixelRGB8 (rgbClampRound r) (rgbClampRound g) (rgbClampRound b) where rgbClampRound v | isNaN v = 255 -- clamp the double value first to avoid overflow with bounded types | otherwise = round $ clamp 0 255 v -- uses h - y for the y coordinate because JuicyPixels starts from the bottom left instead of the top left renderToImage :: (Int, Int) -> [((Int, Int), RGB)] -> Image PixelRGB8 renderToImage (w, h) rs = generateImage (\x y -> M.findWithDefault (error "pixel out of bounds") (x, h - y) m) w h where m = M.fromDistinctAscList (map (bimap (first (subtract 1)) rgbToPixel) rs) hWriteImageAsPng :: (PngSavable a) => Handle -> Image a -> IO () hWriteImageAsPng h = B.hPut h . encodePng -- | The main rendering function, renders the given scene with the given camera, -- | tracing for each pixel at most the given number of rays (which should be at least 1) -- | Writes the result as a png to the given handle, which should be open in binary mode. hRenderAndWrite :: Scene -> Camera -> (Int, Int) -> Int -> Handle -> IO () hRenderAndWrite sc cam size b h = do r <- do putStrLn "started rendering..." return $ render b cam sc size -- I really wish I knew how to sequence non-IO actions to get an actual progress indicator, oh well... img <- do putStrLn "started converting to image..." return $ renderToImage size r putStrLn "started writing file..." hWriteImageAsPng h img putStrLn "done." {- TTEW -}