CommonMaterial.hlsl 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333
  1. #ifndef UNITY_COMMON_MATERIAL_INCLUDED
  2. #define UNITY_COMMON_MATERIAL_INCLUDED
  3. //-----------------------------------------------------------------------------
  4. // Define constants
  5. //-----------------------------------------------------------------------------
  6. #define DEFAULT_SPECULAR_VALUE 0.04
  7. // Following constant are used when we use clear coat properties that can't be store in the Gbuffer (with the Lit shader)
  8. #define CLEAR_COAT_IOR 1.5
  9. #define CLEAR_COAT_IETA (1.0 / CLEAR_COAT_IOR) // IETA is the inverse eta which is the ratio of IOR of two interface
  10. #define CLEAR_COAT_F0 0.04 // IORToFresnel0(CLEAR_COAT_IOR)
  11. #define CLEAR_COAT_ROUGHNESS 0.01
  12. #define CLEAR_COAT_PERCEPTUAL_SMOOTHNESS RoughnessToPerceptualSmoothness(CLEAR_COAT_ROUGHNESS)
  13. #define CLEAR_COAT_PERCEPTUAL_ROUGHNESS RoughnessToPerceptualRoughness(CLEAR_COAT_ROUGHNESS)
  14. //-----------------------------------------------------------------------------
  15. // Helper functions for roughness
  16. //-----------------------------------------------------------------------------
  17. real PerceptualRoughnessToRoughness(real perceptualRoughness)
  18. {
  19. return perceptualRoughness * perceptualRoughness;
  20. }
  21. real RoughnessToPerceptualRoughness(real roughness)
  22. {
  23. return sqrt(roughness);
  24. }
  25. real RoughnessToPerceptualSmoothness(real roughness)
  26. {
  27. return 1.0 - sqrt(roughness);
  28. }
  29. real PerceptualSmoothnessToRoughness(real perceptualSmoothness)
  30. {
  31. return (1.0 - perceptualSmoothness) * (1.0 - perceptualSmoothness);
  32. }
  33. real PerceptualSmoothnessToPerceptualRoughness(real perceptualSmoothness)
  34. {
  35. return (1.0 - perceptualSmoothness);
  36. }
  37. // Beckmann to GGX roughness "conversions":
  38. //
  39. // As also noted for NormalVariance in this file, Beckmann microfacet models use a Gaussian distribution of slopes
  40. // and the roughness parameter absorbs constants in the canonical Gaussian formula and is thus not exactly the variance.
  41. // The relationship is:
  42. //
  43. // roughnessBeckmann^2 = 2 variance (where variance is usually denoted sigma^2 but some comp gfx papers use sigma for
  44. // variance or even sigma for roughness itself.)
  45. //
  46. // Microfacet BRDF models with a GGX NDF implies a Cauchy distribution of slopes (also corresponds to the distribution
  47. // of slopes on an ellipsoid). Cauchy distributions don't have second moments, which precludes having a variance,
  48. // but chopping the far tails of GGX and keeping 94% of the mass yields a distribution with a defined variance where
  49. // we can then relate the roughness of GGX to a variance (see Ray Tracing Gems p153 - the reference is wrong though,
  50. // the Conty paper doesn't mention this at all, but it can be found in stats using quantiles):
  51. //
  52. // roughnessGGX^2 = variance / 2
  53. //
  54. // From the two previous, if we want roughly comparable variances of slopes between a Beckmann and a GGX NDF, we can
  55. // equate the variances and get a conversion of their roughnesses:
  56. //
  57. // 2 * roughnessGGX^2 = roughnessBeckmann^2 / 2 <==>
  58. // 4 * roughnessGGX^2 = roughnessBeckmann^2 <==>
  59. // 2 * roughnessGGX = roughnessBeckmann
  60. //
  61. // (Note that the Ray Tracing Gems paper makes an error on p154 writing sqrt(2) * roughnessGGX = roughnessBeckmann;
  62. // Their validation study using ray tracing and LEADR - which looks good - is for the *variance to GGX* roughness mapping,
  63. // not the Beckmann to GGX roughness "conversion")
  64. real BeckmannRoughnessToGGXRoughness(real roughnessBeckmann)
  65. {
  66. return 0.5 * roughnessBeckmann;
  67. }
  68. real PerceptualRoughnessBeckmannToGGX(real perceptualRoughnessBeckmann)
  69. {
  70. //sqrt(a_ggx) = sqrt(0.5) sqrt(a_beckmann)
  71. return sqrt(0.5) * perceptualRoughnessBeckmann;
  72. }
  73. real GGXRoughnessToBeckmannRoughness(real roughnessGGX)
  74. {
  75. return 2.0 * roughnessGGX;
  76. }
  77. real PerceptualRoughnessToPerceptualSmoothness(real perceptualRoughness)
  78. {
  79. return (1.0 - perceptualRoughness);
  80. }
  81. // WARNING: this has been deprecated, and should not be used!
  82. // Using roughness values of 0 leads to INFs and NANs. The only sensible place to use the roughness
  83. // value of 0 is IBL, so we do not modify the perceptual roughness which is used to select the MIP map level.
  84. // Note: making the constant too small results in aliasing.
  85. real ClampRoughnessForAnalyticalLights(real roughness)
  86. {
  87. return max(roughness, 1.0 / 1024.0);
  88. }
  89. // Given that the GGX model is invalid for a roughness of 0.0. This values have been experimentally evaluated to be the limit for the roughness
  90. // for integration.
  91. real ClampRoughnessForRaytracing(real roughness)
  92. {
  93. return max(roughness, 0.001225);
  94. }
  95. real ClampPerceptualRoughnessForRaytracing(real perceptualRoughness)
  96. {
  97. return max(perceptualRoughness, 0.035);
  98. }
  99. void ConvertValueAnisotropyToValueTB(real value, real anisotropy, out real valueT, out real valueB)
  100. {
  101. // Use the parametrization of Sony Imageworks.
  102. // Ref: Revisiting Physically Based Shading at Imageworks, p. 15.
  103. valueT = value * (1 + anisotropy);
  104. valueB = value * (1 - anisotropy);
  105. }
  106. void ConvertAnisotropyToRoughness(real perceptualRoughness, real anisotropy, out real roughnessT, out real roughnessB)
  107. {
  108. real roughness = PerceptualRoughnessToRoughness(perceptualRoughness);
  109. ConvertValueAnisotropyToValueTB(roughness, anisotropy, roughnessT, roughnessB);
  110. }
  111. void ConvertRoughnessTAndAnisotropyToRoughness(real roughnessT, real anisotropy, out real roughness)
  112. {
  113. roughness = roughnessT / (1 + anisotropy);
  114. }
  115. void ConvertRoughnessToAnisotropy(real roughnessT, real roughnessB, out real anisotropy)
  116. {
  117. anisotropy = ((roughnessT - roughnessB) / max(roughnessT + roughnessB, 0.0001));
  118. }
  119. // WARNING: this has been deprecated, and should not be used!
  120. // Same as ConvertAnisotropyToRoughness but
  121. // roughnessT and roughnessB are clamped, and are meant to be used with punctual and directional lights.
  122. void ConvertAnisotropyToClampRoughness(real perceptualRoughness, real anisotropy, out real roughnessT, out real roughnessB)
  123. {
  124. ConvertAnisotropyToRoughness(perceptualRoughness, anisotropy, roughnessT, roughnessB);
  125. roughnessT = ClampRoughnessForAnalyticalLights(roughnessT);
  126. roughnessB = ClampRoughnessForAnalyticalLights(roughnessB);
  127. }
  128. // Use with stack BRDF (clear coat / coat) - This only used same equation to convert from Blinn-Phong spec power to Beckmann roughness
  129. real RoughnessToVariance(real roughness)
  130. {
  131. return 2.0 / Sq(roughness) - 2.0;
  132. }
  133. real VarianceToRoughness(real variance)
  134. {
  135. return sqrt(2.0 / (variance + 2.0));
  136. }
  137. // Normal Map Filtering - This must match HDRP\Editor\AssetProcessors\NormalMapFilteringTexturePostprocessor.cs - highestVarianceAllowed (TODO: Move in core)
  138. #define NORMALMAP_HIGHEST_VARIANCE 0.03125
  139. float DecodeVariance(float gradientW)
  140. {
  141. return gradientW * NORMALMAP_HIGHEST_VARIANCE;
  142. }
  143. // Return modified perceptualSmoothness based on provided variance (get from GeometricNormalVariance + TextureNormalVariance)
  144. float NormalFiltering(float perceptualSmoothness, float variance, float threshold)
  145. {
  146. float roughness = PerceptualSmoothnessToRoughness(perceptualSmoothness);
  147. // Ref: Geometry into Shading - http://graphics.pixar.com/library/BumpRoughness/paper.pdf - equation (3)
  148. float squaredRoughness = saturate(roughness * roughness + min(2.0 * variance, threshold * threshold)); // threshold can be really low, square the value for easier control
  149. return RoughnessToPerceptualSmoothness(sqrt(squaredRoughness));
  150. }
  151. // Reference: Error Reduction and Simplification for Shading Anti-Aliasing
  152. // Specular antialiasing for geometry-induced normal (and NDF) variations: Tokuyoshi / Kaplanyan et al.'s method.
  153. // This is the deferred approximation, which works reasonably well so we keep it for forward too for now.
  154. // screenSpaceVariance should be at most 0.5^2 = 0.25, as that corresponds to considering
  155. // a gaussian pixel reconstruction kernel with a standard deviation of 0.5 of a pixel, thus 2 sigma covering the whole pixel.
  156. float GeometricNormalVariance(float3 geometricNormalWS, float screenSpaceVariance)
  157. {
  158. float3 deltaU = ddx(geometricNormalWS);
  159. float3 deltaV = ddy(geometricNormalWS);
  160. return screenSpaceVariance * (dot(deltaU, deltaU) + dot(deltaV, deltaV));
  161. }
  162. // Return modified perceptualSmoothness
  163. float GeometricNormalFiltering(float perceptualSmoothness, float3 geometricNormalWS, float screenSpaceVariance, float threshold)
  164. {
  165. float variance = GeometricNormalVariance(geometricNormalWS, screenSpaceVariance);
  166. return NormalFiltering(perceptualSmoothness, variance, threshold);
  167. }
  168. // Normal map filtering based on The Order : 1886 SIGGRAPH course notes implementation.
  169. // Basically Toksvig with an intermediate single vMF lobe induced dispersion (Han et al. 2007)
  170. //
  171. // This returns 2 times the variance of the induced "mesoNDF" lobe (an NDF induced from a section of
  172. // the normal map) from the level 0 mip normals covered by the "current texel".
  173. //
  174. // avgNormalLength gives the dispersion information for the covered normals.
  175. //
  176. // Note that hw filtering on the normal map should be trilinear to be conservative, while anisotropic
  177. // risk underfiltering. Could also compute average normal on the fly with a proper normal map format,
  178. // like Toksvig.
  179. float TextureNormalVariance(float avgNormalLength)
  180. {
  181. float variance = 0.0;
  182. if (avgNormalLength < 1.0)
  183. {
  184. float avgNormLen2 = avgNormalLength * avgNormalLength;
  185. float kappa = (3.0 * avgNormalLength - avgNormalLength * avgNormLen2) / (1.0 - avgNormLen2);
  186. // Ref: Frequency Domain Normal Map Filtering - http://www.cs.columbia.edu/cg/normalmap/normalmap.pdf (equation 21)
  187. // Relationship between between the standard deviation of a Gaussian distribution and the roughness parameter of a Beckmann distribution.
  188. // is roughness^2 = 2 variance (note: variance is sigma^2)
  189. // (Ref: Filtering Distributions of Normals for Shading Antialiasing - Equation just after (14))
  190. // Relationship between gaussian lobe and vMF lobe is 2 * variance = 1 / (2 * kappa) = roughness^2
  191. // (Equation 36 of Normal map filtering based on The Order : 1886 SIGGRAPH course notes implementation).
  192. // So to get variance we must use variance = 1 / (4 * kappa)
  193. variance = 0.25 / kappa;
  194. }
  195. return variance;
  196. }
  197. float TextureNormalFiltering(float perceptualSmoothness, float avgNormalLength, float threshold)
  198. {
  199. float variance = TextureNormalVariance(avgNormalLength);
  200. return NormalFiltering(perceptualSmoothness, variance, threshold);
  201. }
  202. // ----------------------------------------------------------------------------
  203. // Helper for Disney parametrization
  204. // ----------------------------------------------------------------------------
  205. float3 ComputeDiffuseColor(float3 baseColor, float metallic)
  206. {
  207. return baseColor * (1.0 - metallic);
  208. }
  209. float3 ComputeFresnel0(float3 baseColor, float metallic, float dielectricF0)
  210. {
  211. return lerp(dielectricF0.xxx, baseColor, metallic);
  212. }
  213. // ----------------------------------------------------------------------------
  214. // Helper for normal blending
  215. // ----------------------------------------------------------------------------
  216. // ref https://www.gamedev.net/topic/678043-how-to-blend-world-space-normals/#entry5287707
  217. // assume compositing in world space
  218. // Note: Using vtxNormal = real3(0, 0, 1) give the BlendNormalRNM formulation.
  219. // TODO: Untested
  220. real3 BlendNormalWorldspaceRNM(real3 n1, real3 n2, real3 vtxNormal)
  221. {
  222. // Build the shortest-arc quaternion
  223. real4 q = real4(cross(vtxNormal, n2), dot(vtxNormal, n2) + 1.0) / sqrt(2.0 * (dot(vtxNormal, n2) + 1));
  224. // Rotate the normal
  225. return n1 * (q.w * q.w - dot(q.xyz, q.xyz)) + 2 * q.xyz * dot(q.xyz, n1) + 2 * q.w * cross(q.xyz, n1);
  226. }
  227. // ref http://blog.selfshadow.com/publications/blending-in-detail/
  228. // ref https://gist.github.com/selfshadow/8048308
  229. // Reoriented Normal Mapping
  230. // Blending when n1 and n2 are already 'unpacked' and normalised
  231. // assume compositing in tangent space
  232. real3 BlendNormalRNM(real3 n1, real3 n2)
  233. {
  234. real3 t = n1.xyz + real3(0.0, 0.0, 1.0);
  235. real3 u = n2.xyz * real3(-1.0, -1.0, 1.0);
  236. real3 r = (t / t.z) * dot(t, u) - u;
  237. return r;
  238. }
  239. // assume compositing in tangent space
  240. real3 BlendNormal(real3 n1, real3 n2)
  241. {
  242. return normalize(real3(n1.xy * n2.z + n2.xy * n1.z, n1.z * n2.z));
  243. }
  244. // ----------------------------------------------------------------------------
  245. // Helper for triplanar
  246. // ----------------------------------------------------------------------------
  247. // Ref: http://http.developer.nvidia.com/GPUGems3/gpugems3_ch01.html / http://www.slideshare.net/icastano/cascades-demo-secrets
  248. real3 ComputeTriplanarWeights(real3 normal)
  249. {
  250. // Determine the blend weights for the 3 planar projections.
  251. real3 blendWeights = abs(normal);
  252. // Tighten up the blending zone
  253. blendWeights = (blendWeights - 0.2);
  254. blendWeights = blendWeights * blendWeights * blendWeights; // pow(blendWeights, 3);
  255. // Force weights to sum to 1.0 (very important!)
  256. blendWeights = max(blendWeights, real3(0.0, 0.0, 0.0));
  257. blendWeights /= dot(blendWeights, 1.0);
  258. return blendWeights;
  259. }
  260. // Planar/Triplanar convention for Unity in world space
  261. void GetTriplanarCoordinate(float3 position, out float2 uvXZ, out float2 uvXY, out float2 uvZY)
  262. {
  263. // Caution: This must follow the same rule as what is use for SurfaceGradient triplanar
  264. // TODO: Currently the normal mapping looks wrong without SURFACE_GRADIENT option because we don't handle corretly the tangent space
  265. uvXZ = float2(position.x, position.z);
  266. uvXY = float2(position.x, position.y);
  267. uvZY = float2(position.z, position.y);
  268. }
  269. // ----------------------------------------------------------------------------
  270. // Helper for detail map operation
  271. // ----------------------------------------------------------------------------
  272. real LerpWhiteTo(real b, real t)
  273. {
  274. real oneMinusT = 1.0 - t;
  275. return oneMinusT + b * t;
  276. }
  277. real3 LerpWhiteTo(real3 b, real t)
  278. {
  279. real oneMinusT = 1.0 - t;
  280. return real3(oneMinusT, oneMinusT, oneMinusT) + b * t;
  281. }
  282. #endif // UNITY_COMMON_MATERIAL_INCLUDED