PMREMGenerator.js 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582
  1. /**
  2. * @author Emmett Lalish / elalish
  3. *
  4. * This class generates a Prefiltered, Mipmapped Radiance Environment Map
  5. * (PMREM) from a cubeMap environment texture. This allows different levels of
  6. * blur to be quickly accessed based on material roughness. It is packed into a
  7. * special CubeUV format that allows us to perform custom interpolation so that
  8. * we can support nonlinear formats such as RGBE. Unlike a traditional mipmap
  9. * chain, it only goes down to the LOD_MIN level (above), and then creates extra
  10. * even more filtered 'mips' at the same LOD_MIN resolution, associated with
  11. * higher roughness levels. In this way we maintain resolution to smoothly
  12. * interpolate diffuse lighting while limiting sampling computation.
  13. */
  14. THREE.PMREMGenerator = ( function () {
  15. const LOD_MIN = 4;
  16. const LOD_MAX = 8;
  17. const SIZE_MAX = Math.pow( 2, LOD_MAX );
  18. // The standard deviations (radians) associated with the extra mips. These are
  19. // chosen to approximate a Trowbridge-Reitz distribution function times the
  20. // geometric shadowing function.
  21. const EXTRA_LOD_SIGMA = [ 0.125, 0.215, 0.35, 0.446, 0.526, 0.582 ];
  22. const TOTAL_LODS = LOD_MAX - LOD_MIN + 1 + EXTRA_LOD_SIGMA.length;
  23. const ENCODINGS = {
  24. [ THREE.LinearEncoding ]: 0,
  25. [ THREE.sRGBEncoding ]: 1,
  26. [ THREE.RGBEEncoding ]: 2,
  27. [ THREE.RGBM7Encoding ]: 3,
  28. [ THREE.RGBM16Encoding ]: 4,
  29. [ THREE.RGBDEncoding ]: 5,
  30. [ THREE.GammaEncoding ]: 6
  31. };
  32. var _flatCamera = new THREE.OrthographicCamera();
  33. var _blurMaterial = getShader();
  34. var { _lodPlanes, _sizeLods, _sigmas } = createPlanes();
  35. var _pingPongRenderTarget = null;
  36. // Golden Ratio
  37. const PHI = ( 1 + Math.sqrt( 5 ) ) / 2;
  38. const INV_PHI = 1 / PHI;
  39. // Vertices of a dodecahedron (except the opposites, which represent the
  40. // same axis), used as axis directions evenly spread on a sphere.
  41. var _axisDirections = [
  42. new THREE.Vector3( 1, 1, 1 ),
  43. new THREE.Vector3( - 1, 1, 1 ),
  44. new THREE.Vector3( 1, 1, - 1 ),
  45. new THREE.Vector3( - 1, 1, - 1 ),
  46. new THREE.Vector3( 0, PHI, - INV_PHI ),
  47. new THREE.Vector3( INV_PHI, 0, PHI ),
  48. new THREE.Vector3( - INV_PHI, 0, PHI ),
  49. new THREE.Vector3( PHI, INV_PHI, 0 ),
  50. new THREE.Vector3( - PHI, INV_PHI, 0 ) ];
  51. var PMREMGenerator = function ( renderer ) {
  52. this.renderer = renderer;
  53. };
  54. PMREMGenerator.prototype = {
  55. constructor: PMREMGenerator,
  56. /**
  57. * Generates a PMREM from a supplied Scene, which can be faster than using an
  58. * image if networking bandwidth is low. Optional near and far planes ensure
  59. * the scene is rendered in its entirety (the cubeCamera is placed at the
  60. * origin).
  61. */
  62. fromScene: function ( scene, near = 0.1, far = 100 ) {
  63. const cubeUVRenderTarget = allocateTargets();
  64. sceneToCubeUV( scene, near, far, cubeUVRenderTarget );
  65. applyPMREM( cubeUVRenderTarget );
  66. _pingPongRenderTarget.dispose();
  67. return cubeUVRenderTarget;
  68. },
  69. /**
  70. * Generates a PMREM from an equirectangular texture, which can be either LDR
  71. * (RGBFormat) or HDR (RGBEFormat).
  72. */
  73. fromEquirectangular: function ( equirectangular ) {
  74. equirectangular.magFilter = THREE.NearestFilter;
  75. equirectangular.minFilter = THREE.NearestFilter;
  76. equirectangular.generateMipmaps = false;
  77. const cubeUVRenderTarget = allocateTargets( equirectangular );
  78. equirectangularToCubeUV( equirectangular, cubeUVRenderTarget );
  79. applyPMREM( cubeUVRenderTarget );
  80. _pingPongRenderTarget.dispose();
  81. return cubeUVRenderTarget;
  82. },
  83. };
  84. function createPlanes() {
  85. var _lodPlanes = [];
  86. var _sizeLods = [];
  87. var _sigmas = [];
  88. let lod = LOD_MAX;
  89. for ( let i = 0; i < TOTAL_LODS; i ++ ) {
  90. const sizeLod = Math.pow( 2, lod );
  91. _sizeLods.push( sizeLod );
  92. let sigma = 1.0 / sizeLod;
  93. if ( i > LOD_MAX - LOD_MIN ) {
  94. sigma = EXTRA_LOD_SIGMA[ i - LOD_MAX + LOD_MIN - 1 ];
  95. } else if ( i == 0 ) {
  96. sigma = 0;
  97. }
  98. _sigmas.push( sigma );
  99. const texelSize = 1.0 / ( sizeLod - 1 );
  100. const min = - texelSize / 2;
  101. const max = 1 + texelSize / 2;
  102. const uv1 = [ min, min, max, min, max, max, min, min, max, max, min, max ];
  103. const cubeFaces = 6;
  104. const vertices = 6;
  105. const positionSize = 3;
  106. const uvSize = 2;
  107. const faceIndexSize = 1;
  108. const position = new Float32Array( positionSize * vertices * cubeFaces );
  109. const uv = new Float32Array( uvSize * vertices * cubeFaces );
  110. const faceIndex = new Float32Array( faceIndexSize * vertices * cubeFaces );
  111. for ( let face = 0; face < cubeFaces; face ++ ) {
  112. const x = ( face % 3 ) * 2 / 3 - 1;
  113. const y = face > 2 ? 0 : - 1;
  114. const coordinates = [
  115. [ x, y, 0 ],
  116. [ x + 2 / 3, y, 0 ],
  117. [ x + 2 / 3, y + 1, 0 ],
  118. [ x, y, 0 ],
  119. [ x + 2 / 3, y + 1, 0 ],
  120. [ x, y + 1, 0 ]
  121. ];
  122. position.set( Array.concat( ...coordinates ),
  123. positionSize * vertices * face );
  124. uv.set( uv1, uvSize * vertices * face );
  125. const fill = [ face, face, face, face, face, face ];
  126. faceIndex.set( fill, faceIndexSize * vertices * face );
  127. }
  128. const planes = new THREE.BufferGeometry();
  129. planes.addAttribute(
  130. 'position', new THREE.BufferAttribute( position, positionSize ) );
  131. planes.addAttribute( 'uv', new THREE.BufferAttribute( uv, uvSize ) );
  132. planes.addAttribute(
  133. 'faceIndex', new THREE.BufferAttribute( faceIndex, faceIndexSize ) );
  134. _lodPlanes.push( planes );
  135. if ( lod > LOD_MIN ) {
  136. lod --;
  137. }
  138. }
  139. return { _lodPlanes, _sizeLods, _sigmas };
  140. }
  141. function allocateTargets( equirectangular ) {
  142. const params = {
  143. magFilter: THREE.NearestFilter,
  144. minFilter: THREE.NearestFilter,
  145. generateMipmaps: false,
  146. type: equirectangular ? equirectangular.type : THREE.UnsignedByteType,
  147. format: equirectangular ? equirectangular.format : THREE.RGBEFormat,
  148. encoding: equirectangular ? equirectangular.encoding : THREE.RGBEEncoding,
  149. depthBuffer: false,
  150. stencilBuffer: false
  151. };
  152. const cubeUVRenderTarget = createRenderTarget(
  153. { ...params, depthBuffer: ( equirectangular ? false : true ) } );
  154. _pingPongRenderTarget = createRenderTarget( params );
  155. return cubeUVRenderTarget;
  156. }
  157. function sceneToCubeUV(
  158. scene, near, far,
  159. cubeUVRenderTarget ) {
  160. const fov = 90;
  161. const aspect = 1;
  162. const cubeCamera = new THREE.PerspectiveCamera( fov, aspect, near, far );
  163. const upSign = [ 1, 1, 1, 1, - 1, 1 ];
  164. const forwardSign = [ 1, 1, - 1, - 1, - 1, 1 ];
  165. const gammaOutput = this.renderer.gammaOutput;
  166. const toneMapping = this.renderer.toneMapping;
  167. const toneMappingExposure = this.renderer.toneMappingExposure;
  168. this.renderer.toneMapping = THREE.LinearToneMapping;
  169. this.renderer.toneMappingExposure = 1.0;
  170. this.renderer.gammaOutput = false;
  171. scene.scale.z *= - 1;
  172. this.renderer.setRenderTarget( cubeUVRenderTarget );
  173. for ( let i = 0; i < 6; i ++ ) {
  174. const col = i % 3;
  175. if ( col == 0 ) {
  176. cubeCamera.up.set( 0, upSign[ i ], 0 );
  177. cubeCamera.lookAt( forwardSign[ i ], 0, 0 );
  178. } else if ( col == 1 ) {
  179. cubeCamera.up.set( 0, 0, upSign[ i ] );
  180. cubeCamera.lookAt( 0, forwardSign[ i ], 0 );
  181. } else {
  182. cubeCamera.up.set( 0, upSign[ i ], 0 );
  183. cubeCamera.lookAt( 0, 0, forwardSign[ i ] );
  184. }
  185. setViewport(
  186. col * SIZE_MAX, i > 2 ? SIZE_MAX : 0, SIZE_MAX, SIZE_MAX );
  187. this.renderer.render( scene, cubeCamera );
  188. }
  189. this.renderer.toneMapping = toneMapping;
  190. this.renderer.toneMappingExposure = toneMappingExposure;
  191. this.renderer.gammaOutput = gammaOutput;
  192. scene.scale.z *= - 1;
  193. }
  194. function equirectangularToCubeUV(
  195. equirectangular, cubeUVRenderTarget ) {
  196. const scene = new THREE.Scene();
  197. scene.add( new THREE.Mesh( _lodPlanes[ 0 ], _blurMaterial ) );
  198. const uniforms = _blurMaterial.uniforms;
  199. uniforms[ 'envMap' ].value = equirectangular;
  200. uniforms[ 'copyEquirectangular' ].value = true;
  201. uniforms[ 'texelSize' ].value.set(
  202. 1.0 / equirectangular.image.width, 1.0 / equirectangular.image.height );
  203. uniforms[ 'inputEncoding' ].value = ENCODINGS[ equirectangular.encoding ];
  204. uniforms[ 'outputEncoding' ].value = ENCODINGS[ equirectangular.encoding ];
  205. this.renderer.setRenderTarget( cubeUVRenderTarget );
  206. setViewport( 0, 0, 3 * SIZE_MAX, 2 * SIZE_MAX );
  207. this.renderer.render( scene, _flatCamera );
  208. }
  209. function createRenderTarget( params ) {
  210. const cubeUVRenderTarget =
  211. new THREE.WebGLRenderTarget( 3 * SIZE_MAX, 3 * SIZE_MAX, params );
  212. cubeUVRenderTarget.texture.mapping = THREE.CubeUVReflectionMapping;
  213. cubeUVRenderTarget.texture.name = 'PMREM.cubeUv';
  214. return cubeUVRenderTarget;
  215. }
  216. function setViewport( x, y, width, height ) {
  217. const dpr = this.threeRenderer.getPixelRatio();
  218. this.threeRenderer.setViewport( x / dpr, y / dpr, width / dpr, height / dpr );
  219. }
  220. function applyPMREM( cubeUVRenderTarget ) {
  221. for ( let i = 1; i < TOTAL_LODS; i ++ ) {
  222. const sigma = Math.sqrt(
  223. _sigmas[ i ] * _sigmas[ i ] -
  224. _sigmas[ i - 1 ] * _sigmas[ i - 1 ] );
  225. const poleAxis =
  226. _axisDirections[ ( i - 1 ) % _axisDirections.length ];
  227. blur( cubeUVRenderTarget, i - 1, i, sigma, poleAxis );
  228. }
  229. }
  230. /**
  231. * This is a two-pass Gaussian blur for a cubemap. Normally this is done
  232. * vertically and horizontally, but this breaks down on a cube. Here we apply
  233. * the blur latitudinally (around the poles), and then longitudinally (towards
  234. * the poles) to approximate the orthogonally-separable blur. It is least
  235. * accurate at the poles, but still does a decent job.
  236. */
  237. function blur(
  238. cubeUVRenderTarget, lodIn, lodOut,
  239. sigma, poleAxis ) {
  240. halfBlur(
  241. cubeUVRenderTarget,
  242. _pingPongRenderTarget,
  243. lodIn,
  244. lodOut,
  245. sigma,
  246. 'latitudinal',
  247. poleAxis );
  248. halfBlur(
  249. _pingPongRenderTarget,
  250. cubeUVRenderTarget,
  251. lodOut,
  252. lodOut,
  253. sigma,
  254. 'longitudinal',
  255. poleAxis );
  256. }
  257. function halfBlur(
  258. targetIn, targetOut, lodIn,
  259. lodOut, sigmaRadians, direction,
  260. poleAxis ) {
  261. if ( direction !== 'latitudinal' && direction !== 'longitudinal' ) {
  262. console.error(
  263. 'blur direction must be either latitudinal or longitudinal!' );
  264. }
  265. // The maximum length of the blur for loop, chosen to equal the number needed
  266. // for GENERATED_SIGMA. Smaller _sigmas will use fewer samples and exit early,
  267. // but not recompile the shader.
  268. const MAX_SAMPLES = 20;
  269. // Number of standard deviations at which to cut off the discrete approximation.
  270. const STANDARD_DEVIATIONS = 3;
  271. const blurScene = new THREE.Scene();
  272. blurScene.add( new THREE.Mesh( _lodPlanes[ lodOut ], _blurMaterial ) );
  273. const blurUniforms = _blurMaterial.uniforms;
  274. const pixels = _sizeLods[ lodIn ] - 1;
  275. const radiansPerPixel = isFinite( sigmaRadians ) ? Math.PI / ( 2 * pixels ) : 2 * Math.PI / ( 2 * MAX_SAMPLES - 1 );
  276. const sigmaPixels = sigmaRadians / radiansPerPixel;
  277. const samples = isFinite( sigmaRadians ) ? 1 + Math.floor( STANDARD_DEVIATIONS * sigmaPixels ) : MAX_SAMPLES;
  278. if ( samples > MAX_SAMPLES ) {
  279. console.warn( `sigmaRadians, ${
  280. sigmaRadians}, is too large and will clip, as it requested ${
  281. samples} samples when the maximum is set to ${MAX_SAMPLES}` );
  282. }
  283. let weights = [];
  284. let sum = 0;
  285. for ( let i = 0; i < MAX_SAMPLES; ++ i ) {
  286. const x = i / sigmaPixels;
  287. const weight = Math.exp( - x * x / 2 );
  288. weights.push( weight );
  289. if ( i == 0 ) {
  290. sum += weight;
  291. } else if ( i < samples ) {
  292. sum += 2 * weight;
  293. }
  294. }
  295. weights = weights.map( w => w / sum );
  296. blurUniforms[ 'envMap' ].value = targetIn.texture;
  297. blurUniforms[ 'copyEquirectangular' ].value = false;
  298. blurUniforms[ 'samples' ].value = samples;
  299. blurUniforms[ 'weights' ].value = weights;
  300. blurUniforms[ 'latitudinal' ].value = direction === 'latitudinal';
  301. if ( poleAxis ) {
  302. blurUniforms[ 'poleAxis' ].value = poleAxis;
  303. }
  304. blurUniforms[ 'dTheta' ].value = radiansPerPixel;
  305. blurUniforms[ 'mipInt' ].value = LOD_MAX - lodIn;
  306. blurUniforms[ 'inputEncoding' ].value = ENCODINGS[ targetIn.texture.encoding ];
  307. blurUniforms[ 'outputEncoding' ].value = ENCODINGS[ targetIn.texture.encoding ];
  308. const outputSize = _sizeLods[ lodOut ];
  309. const x = 3 * Math.max( 0, SIZE_MAX - 2 * outputSize );
  310. const y = ( lodOut === 0 ? 0 : 2 * SIZE_MAX ) +
  311. 2 * outputSize *
  312. ( lodOut > LOD_MAX - LOD_MIN ? lodOut - LOD_MAX + LOD_MIN : 0 );
  313. this.renderer.autoClear = false;
  314. this.renderer.setRenderTarget( targetOut );
  315. setViewport( x, y, 3 * outputSize, 2 * outputSize );
  316. this.renderer.render( blurScene, _flatCamera );
  317. }
  318. function getShader( maxSamples ) {
  319. const weights = new Float32Array( maxSamples );
  320. const texelSize = new THREE.Vector2( 1, 1 );
  321. const poleAxis = new THREE.Vector3( 0, 1, 0 );
  322. var shaderMaterial = new THREE.RawShaderMaterial( {
  323. defines: { 'n': maxSamples },
  324. uniforms: {
  325. 'envMap': { value: null },
  326. 'copyEquirectangular': { value: false },
  327. 'texelSize': { value: texelSize },
  328. 'samples': { value: 1 },
  329. 'weights': { value: weights },
  330. 'latitudinal': { value: false },
  331. 'dTheta': { value: 0 },
  332. 'mipInt': { value: 0 },
  333. 'poleAxis': { value: poleAxis },
  334. 'inputEncoding': { value: ENCODINGS[ THREE.LinearEncoding ] },
  335. 'outputEncoding': { value: ENCODINGS[ THREE.LinearEncoding ] }
  336. },
  337. vertexShader: `
  338. precision mediump float;
  339. precision mediump int;
  340. attribute vec3 position;
  341. attribute vec2 uv;
  342. attribute float faceIndex;
  343. varying vec2 vUv;
  344. varying float vFaceIndex;
  345. void main() {
  346. vUv = uv;
  347. vFaceIndex = faceIndex;
  348. gl_Position = vec4( position, 1.0 );
  349. }
  350. `,
  351. fragmentShader: `
  352. precision mediump float;
  353. precision mediump int;
  354. varying vec2 vUv;
  355. varying float vFaceIndex;
  356. uniform sampler2D envMap;
  357. uniform bool copyEquirectangular;
  358. uniform vec2 texelSize;
  359. uniform int samples;
  360. uniform float weights[n];
  361. uniform bool latitudinal;
  362. uniform float dTheta;
  363. uniform float mipInt;
  364. uniform vec3 poleAxis;
  365. uniform int inputEncoding;
  366. uniform int outputEncoding;
  367. #include <encodings_pars_fragment>
  368. vec4 inputTexelToLinear(vec4 value){
  369. if(inputEncoding == 0){
  370. return value;
  371. }else if(inputEncoding == 1){
  372. return sRGBToLinear(value);
  373. }else if(inputEncoding == 2){
  374. return RGBEToLinear(value);
  375. }else if(inputEncoding == 3){
  376. return RGBMToLinear(value, 7.0);
  377. }else if(inputEncoding == 4){
  378. return RGBMToLinear(value, 16.0);
  379. }else if(inputEncoding == 5){
  380. return RGBDToLinear(value, 256.0);
  381. }else{
  382. return GammaToLinear(value, 2.2);
  383. }
  384. }
  385. vec4 linearToOutputTexel(vec4 value){
  386. if(outputEncoding == 0){
  387. return value;
  388. }else if(outputEncoding == 1){
  389. return LinearTosRGB(value);
  390. }else if(outputEncoding == 2){
  391. return LinearToRGBE(value);
  392. }else if(outputEncoding == 3){
  393. return LinearToRGBM(value, 7.0);
  394. }else if(outputEncoding == 4){
  395. return LinearToRGBM(value, 16.0);
  396. }else if(outputEncoding == 5){
  397. return LinearToRGBD(value, 256.0);
  398. }else{
  399. return LinearToGamma(value, 2.2);
  400. }
  401. }
  402. vec4 envMapTexelToLinear(vec4 color) {
  403. return inputTexelToLinear(color);
  404. }
  405. #define ENVMAP_TYPE_CUBE_UV
  406. #include <cube_uv_reflection_fragment>
  407. #define RECIPROCAL_PI 0.31830988618
  408. #define RECIPROCAL_PI2 0.15915494
  409. void main() {
  410. gl_FragColor = vec4(0.0);
  411. outputDirection = getDirection(vUv, vFaceIndex);
  412. if (copyEquirectangular) {
  413. vec3 direction = normalize(outputDirection);
  414. vec2 uv;
  415. uv.y = asin(clamp(direction.y, -1.0, 1.0)) * RECIPROCAL_PI + 0.5;
  416. uv.x = atan(direction.z, direction.x) * RECIPROCAL_PI2 + 0.5;
  417. vec2 f = fract(uv / texelSize - 0.5);
  418. uv -= f * texelSize;
  419. vec3 tl = envMapTexelToLinear(texture2D(envMap, uv)).rgb;
  420. uv.x += texelSize.x;
  421. vec3 tr = envMapTexelToLinear(texture2D(envMap, uv)).rgb;
  422. uv.y += texelSize.y;
  423. vec3 br = envMapTexelToLinear(texture2D(envMap, uv)).rgb;
  424. uv.x -= texelSize.x;
  425. vec3 bl = envMapTexelToLinear(texture2D(envMap, uv)).rgb;
  426. vec3 tm = mix(tl, tr, f.x);
  427. vec3 bm = mix(bl, br, f.x);
  428. gl_FragColor.rgb = mix(tm, bm, f.y);
  429. } else {
  430. for (int i = 0; i < n; i++) {
  431. if (i >= samples)
  432. break;
  433. for (int dir = -1; dir < 2; dir += 2) {
  434. if (i == 0 && dir == 1)
  435. continue;
  436. vec3 axis = latitudinal ? poleAxis : cross(poleAxis, outputDirection);
  437. if (all(equal(axis, vec3(0.0))))
  438. axis = cross(vec3(0.0, 1.0, 0.0), outputDirection);
  439. axis = normalize(axis);
  440. float theta = dTheta * float(dir * i);
  441. float cosTheta = cos(theta);
  442. // Rodrigues' axis-angle rotation
  443. vec3 sampleDirection = outputDirection * cosTheta
  444. + cross(axis, outputDirection) * sin(theta)
  445. + axis * dot(axis, outputDirection) * (1.0 - cosTheta);
  446. gl_FragColor.rgb +=
  447. weights[i] * bilinearCubeUV(envMap, sampleDirection, mipInt);
  448. }
  449. }
  450. }
  451. gl_FragColor = linearToOutputTexel(gl_FragColor);
  452. }
  453. `,
  454. blending: THREE.NoBlending,
  455. depthTest: false,
  456. depthWrite: false
  457. } );
  458. shaderMaterial.type = 'SphericalGaussianBlur';
  459. return shaderMaterial;
  460. }
  461. return PMREMGenerator;
  462. } )();