PMREMGenerator.js 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631
  1. /**
  2. * @author Emmett Lalish / elalish
  3. *
  4. * This class generates a Prefiltered, Mipmapped Radiance Environment Map
  5. * (PMREM) from a cubeMap environment texture. This allows different levels of
  6. * blur to be quickly accessed based on material roughness. It is packed into a
  7. * special CubeUV format that allows us to perform custom interpolation so that
  8. * we can support nonlinear formats such as RGBE. Unlike a traditional mipmap
  9. * chain, it only goes down to the LOD_MIN level (above), and then creates extra
  10. * even more filtered 'mips' at the same LOD_MIN resolution, associated with
  11. * higher roughness levels. In this way we maintain resolution to smoothly
  12. * interpolate diffuse lighting while limiting sampling computation.
  13. */
  14. import {
  15. BufferAttribute,
  16. BufferGeometry,
  17. CubeUVReflectionMapping,
  18. GammaEncoding,
  19. LinearEncoding,
  20. LinearToneMapping,
  21. Mesh,
  22. NearestFilter,
  23. NoBlending,
  24. OrthographicCamera,
  25. PerspectiveCamera,
  26. RGBDEncoding,
  27. RGBEEncoding,
  28. RGBEFormat,
  29. RGBM16Encoding,
  30. RGBM7Encoding,
  31. RawShaderMaterial,
  32. Scene,
  33. UnsignedByteType,
  34. Vector2,
  35. Vector3,
  36. WebGLRenderTarget,
  37. sRGBEncoding
  38. } from "../../../build/three.module.js";
  39. var PMREMGenerator = ( function () {
  40. const LOD_MIN = 4;
  41. const LOD_MAX = 8;
  42. const SIZE_MAX = Math.pow( 2, LOD_MAX );
  43. // The standard deviations (radians) associated with the extra mips. These are
  44. // chosen to approximate a Trowbridge-Reitz distribution function times the
  45. // geometric shadowing function.
  46. const EXTRA_LOD_SIGMA = [ 0.125, 0.215, 0.35, 0.446, 0.526, 0.582 ];
  47. const TOTAL_LODS = LOD_MAX - LOD_MIN + 1 + EXTRA_LOD_SIGMA.length;
  48. // The maximum length of the blur for loop, chosen to equal the number needed
  49. // for GENERATED_SIGMA. Smaller _sigmas will use fewer samples and exit early,
  50. // but not recompile the shader.
  51. const MAX_SAMPLES = 20;
  52. const ENCODINGS = {
  53. [ LinearEncoding ]: 0,
  54. [ sRGBEncoding ]: 1,
  55. [ RGBEEncoding ]: 2,
  56. [ RGBM7Encoding ]: 3,
  57. [ RGBM16Encoding ]: 4,
  58. [ RGBDEncoding ]: 5,
  59. [ GammaEncoding ]: 6
  60. };
  61. var _flatCamera = new OrthographicCamera();
  62. var _blurMaterial = _getShader( MAX_SAMPLES );
  63. var { _lodPlanes, _sizeLods, _sigmas } = _createPlanes();
  64. var _pingPongRenderTarget = null;
  65. var _renderer = null;
  66. // Golden Ratio
  67. const PHI = ( 1 + Math.sqrt( 5 ) ) / 2;
  68. const INV_PHI = 1 / PHI;
  69. // Vertices of a dodecahedron (except the opposites, which represent the
  70. // same axis), used as axis directions evenly spread on a sphere.
  71. var _axisDirections = [
  72. new Vector3( 1, 1, 1 ),
  73. new Vector3( - 1, 1, 1 ),
  74. new Vector3( 1, 1, - 1 ),
  75. new Vector3( - 1, 1, - 1 ),
  76. new Vector3( 0, PHI, INV_PHI ),
  77. new Vector3( 0, PHI, - INV_PHI ),
  78. new Vector3( INV_PHI, 0, PHI ),
  79. new Vector3( - INV_PHI, 0, PHI ),
  80. new Vector3( PHI, INV_PHI, 0 ),
  81. new Vector3( - PHI, INV_PHI, 0 ) ];
  82. var PMREMGenerator = function ( renderer ) {
  83. _renderer = renderer;
  84. };
  85. PMREMGenerator.prototype = {
  86. constructor: PMREMGenerator,
  87. /**
  88. * Generates a PMREM from a supplied Scene, which can be faster than using an
  89. * image if networking bandwidth is low. Optional sigma specifies a blur radius
  90. * in radians to be applied to the scene before PMREM generation. Optional near
  91. * and far planes ensure the scene is rendered in its entirety (the cubeCamera
  92. * is placed at the origin).
  93. */
  94. fromScene: function ( scene, sigma = 0, near = 0.1, far = 100 ) {
  95. const cubeUVRenderTarget = _allocateTargets();
  96. _sceneToCubeUV( scene, near, far, cubeUVRenderTarget );
  97. if ( sigma > 0 ) {
  98. _blur( cubeUVRenderTarget, 0, 0, sigma );
  99. }
  100. _applyPMREM( cubeUVRenderTarget );
  101. _cleanUp();
  102. return cubeUVRenderTarget;
  103. },
  104. /**
  105. * Generates a PMREM from an equirectangular texture, which can be either LDR
  106. * (RGBFormat) or HDR (RGBEFormat).
  107. */
  108. fromEquirectangular: function ( equirectangular ) {
  109. equirectangular.magFilter = NearestFilter;
  110. equirectangular.minFilter = NearestFilter;
  111. equirectangular.generateMipmaps = false;
  112. const cubeUVRenderTarget = _allocateTargets( equirectangular );
  113. _equirectangularToCubeUV( equirectangular, cubeUVRenderTarget );
  114. _applyPMREM( cubeUVRenderTarget );
  115. _cleanUp();
  116. return cubeUVRenderTarget;
  117. },
  118. };
  119. function _createPlanes() {
  120. var _lodPlanes = [];
  121. var _sizeLods = [];
  122. var _sigmas = [];
  123. let lod = LOD_MAX;
  124. for ( let i = 0; i < TOTAL_LODS; i ++ ) {
  125. const sizeLod = Math.pow( 2, lod );
  126. _sizeLods.push( sizeLod );
  127. let sigma = 1.0 / sizeLod;
  128. if ( i > LOD_MAX - LOD_MIN ) {
  129. sigma = EXTRA_LOD_SIGMA[ i - LOD_MAX + LOD_MIN - 1 ];
  130. } else if ( i == 0 ) {
  131. sigma = 0;
  132. }
  133. _sigmas.push( sigma );
  134. const texelSize = 1.0 / ( sizeLod - 1 );
  135. const min = - texelSize / 2;
  136. const max = 1 + texelSize / 2;
  137. const uv1 = [ min, min, max, min, max, max, min, min, max, max, min, max ];
  138. const cubeFaces = 6;
  139. const vertices = 6;
  140. const positionSize = 3;
  141. const uvSize = 2;
  142. const faceIndexSize = 1;
  143. const position = new Float32Array( positionSize * vertices * cubeFaces );
  144. const uv = new Float32Array( uvSize * vertices * cubeFaces );
  145. const faceIndex = new Float32Array( faceIndexSize * vertices * cubeFaces );
  146. for ( let face = 0; face < cubeFaces; face ++ ) {
  147. const x = ( face % 3 ) * 2 / 3 - 1;
  148. const y = face > 2 ? 0 : - 1;
  149. const coordinates = [
  150. [ x, y, 0 ],
  151. [ x + 2 / 3, y, 0 ],
  152. [ x + 2 / 3, y + 1, 0 ],
  153. [ x, y, 0 ],
  154. [ x + 2 / 3, y + 1, 0 ],
  155. [ x, y + 1, 0 ]
  156. ];
  157. position.set( [].concat( ...coordinates ),
  158. positionSize * vertices * face );
  159. uv.set( uv1, uvSize * vertices * face );
  160. const fill = [ face, face, face, face, face, face ];
  161. faceIndex.set( fill, faceIndexSize * vertices * face );
  162. }
  163. const planes = new BufferGeometry();
  164. planes.addAttribute(
  165. 'position', new BufferAttribute( position, positionSize ) );
  166. planes.addAttribute( 'uv', new BufferAttribute( uv, uvSize ) );
  167. planes.addAttribute(
  168. 'faceIndex', new BufferAttribute( faceIndex, faceIndexSize ) );
  169. _lodPlanes.push( planes );
  170. if ( lod > LOD_MIN ) {
  171. lod --;
  172. }
  173. }
  174. return { _lodPlanes, _sizeLods, _sigmas };
  175. }
  176. function _allocateTargets( equirectangular ) {
  177. const params = {
  178. magFilter: NearestFilter,
  179. minFilter: NearestFilter,
  180. generateMipmaps: false,
  181. type: equirectangular ? equirectangular.type : UnsignedByteType,
  182. format: equirectangular ? equirectangular.format : RGBEFormat,
  183. encoding: equirectangular ? equirectangular.encoding : RGBEEncoding,
  184. depthBuffer: false,
  185. stencilBuffer: false
  186. };
  187. const cubeUVRenderTarget = _createRenderTarget(
  188. { ...params, depthBuffer: ( equirectangular ? false : true ) } );
  189. _pingPongRenderTarget = _createRenderTarget( params );
  190. return cubeUVRenderTarget;
  191. }
  192. function _cleanUp() {
  193. _pingPongRenderTarget.dispose();
  194. _renderer.setRenderTarget( null );
  195. var size = _renderer.getSize();
  196. _renderer.setViewport( 0, 0, size.x, size.y );
  197. }
  198. function _sceneToCubeUV(
  199. scene, near, far,
  200. cubeUVRenderTarget ) {
  201. const fov = 90;
  202. const aspect = 1;
  203. const cubeCamera = new PerspectiveCamera( fov, aspect, near, far );
  204. const upSign = [ 1, 1, 1, 1, - 1, 1 ];
  205. const forwardSign = [ 1, 1, - 1, - 1, - 1, 1 ];
  206. const gammaOutput = _renderer.gammaOutput;
  207. const toneMapping = _renderer.toneMapping;
  208. const toneMappingExposure = _renderer.toneMappingExposure;
  209. _renderer.toneMapping = LinearToneMapping;
  210. _renderer.toneMappingExposure = 1.0;
  211. _renderer.gammaOutput = false;
  212. scene.scale.z *= - 1;
  213. _renderer.setRenderTarget( cubeUVRenderTarget );
  214. for ( let i = 0; i < 6; i ++ ) {
  215. const col = i % 3;
  216. if ( col == 0 ) {
  217. cubeCamera.up.set( 0, upSign[ i ], 0 );
  218. cubeCamera.lookAt( forwardSign[ i ], 0, 0 );
  219. } else if ( col == 1 ) {
  220. cubeCamera.up.set( 0, 0, upSign[ i ] );
  221. cubeCamera.lookAt( 0, forwardSign[ i ], 0 );
  222. } else {
  223. cubeCamera.up.set( 0, upSign[ i ], 0 );
  224. cubeCamera.lookAt( 0, 0, forwardSign[ i ] );
  225. }
  226. _setViewport(
  227. col * SIZE_MAX, i > 2 ? SIZE_MAX : 0, SIZE_MAX, SIZE_MAX );
  228. _renderer.render( scene, cubeCamera );
  229. }
  230. _renderer.toneMapping = toneMapping;
  231. _renderer.toneMappingExposure = toneMappingExposure;
  232. _renderer.gammaOutput = gammaOutput;
  233. scene.scale.z *= - 1;
  234. }
  235. function _equirectangularToCubeUV(
  236. equirectangular, cubeUVRenderTarget ) {
  237. const scene = new Scene();
  238. scene.add( new Mesh( _lodPlanes[ 0 ], _blurMaterial ) );
  239. const uniforms = _blurMaterial.uniforms;
  240. uniforms[ 'envMap' ].value = equirectangular;
  241. uniforms[ 'copyEquirectangular' ].value = true;
  242. uniforms[ 'texelSize' ].value.set(
  243. 1.0 / equirectangular.image.width, 1.0 / equirectangular.image.height );
  244. uniforms[ 'inputEncoding' ].value = ENCODINGS[ equirectangular.encoding ];
  245. uniforms[ 'outputEncoding' ].value = ENCODINGS[ equirectangular.encoding ];
  246. _renderer.setRenderTarget( cubeUVRenderTarget );
  247. _setViewport( 0, 0, 3 * SIZE_MAX, 2 * SIZE_MAX );
  248. _renderer.render( scene, _flatCamera );
  249. }
  250. function _createRenderTarget( params ) {
  251. const cubeUVRenderTarget =
  252. new WebGLRenderTarget( 3 * SIZE_MAX, 3 * SIZE_MAX, params );
  253. cubeUVRenderTarget.texture.mapping = CubeUVReflectionMapping;
  254. cubeUVRenderTarget.texture.name = 'PMREM.cubeUv';
  255. return cubeUVRenderTarget;
  256. }
  257. function _setViewport( x, y, width, height ) {
  258. const dpr = _renderer.getPixelRatio();
  259. _renderer.setViewport( x / dpr, y / dpr, width / dpr, height / dpr );
  260. }
  261. function _applyPMREM( cubeUVRenderTarget ) {
  262. var autoClear = _renderer.autoClear;
  263. _renderer.autoClear = false;
  264. for ( let i = 1; i < TOTAL_LODS; i ++ ) {
  265. const sigma = Math.sqrt(
  266. _sigmas[ i ] * _sigmas[ i ] -
  267. _sigmas[ i - 1 ] * _sigmas[ i - 1 ] );
  268. const poleAxis =
  269. _axisDirections[ ( i - 1 ) % _axisDirections.length ];
  270. _blur( cubeUVRenderTarget, i - 1, i, sigma, poleAxis );
  271. }
  272. _renderer.autoClear = autoClear;
  273. }
  274. /**
  275. * This is a two-pass Gaussian blur for a cubemap. Normally this is done
  276. * vertically and horizontally, but this breaks down on a cube. Here we apply
  277. * the blur latitudinally (around the poles), and then longitudinally (towards
  278. * the poles) to approximate the orthogonally-separable blur. It is least
  279. * accurate at the poles, but still does a decent job.
  280. */
  281. function _blur(
  282. cubeUVRenderTarget, lodIn, lodOut,
  283. sigma, poleAxis ) {
  284. _halfBlur(
  285. cubeUVRenderTarget,
  286. _pingPongRenderTarget,
  287. lodIn,
  288. lodOut,
  289. sigma,
  290. 'latitudinal',
  291. poleAxis );
  292. _halfBlur(
  293. _pingPongRenderTarget,
  294. cubeUVRenderTarget,
  295. lodOut,
  296. lodOut,
  297. sigma,
  298. 'longitudinal',
  299. poleAxis );
  300. }
  301. function _halfBlur(
  302. targetIn, targetOut, lodIn,
  303. lodOut, sigmaRadians, direction,
  304. poleAxis ) {
  305. if ( direction !== 'latitudinal' && direction !== 'longitudinal' ) {
  306. console.error(
  307. 'blur direction must be either latitudinal or longitudinal!' );
  308. }
  309. // Number of standard deviations at which to cut off the discrete approximation.
  310. const STANDARD_DEVIATIONS = 3;
  311. const blurScene = new Scene();
  312. blurScene.add( new Mesh( _lodPlanes[ lodOut ], _blurMaterial ) );
  313. const blurUniforms = _blurMaterial.uniforms;
  314. const pixels = _sizeLods[ lodIn ] - 1;
  315. const radiansPerPixel = isFinite( sigmaRadians ) ? Math.PI / ( 2 * pixels ) : 2 * Math.PI / ( 2 * MAX_SAMPLES - 1 );
  316. const sigmaPixels = sigmaRadians / radiansPerPixel;
  317. const samples = isFinite( sigmaRadians ) ? 1 + Math.floor( STANDARD_DEVIATIONS * sigmaPixels ) : MAX_SAMPLES;
  318. if ( samples > MAX_SAMPLES ) {
  319. console.warn( `sigmaRadians, ${
  320. sigmaRadians}, is too large and will clip, as it requested ${
  321. samples} samples when the maximum is set to ${MAX_SAMPLES}` );
  322. }
  323. let weights = [];
  324. let sum = 0;
  325. for ( let i = 0; i < MAX_SAMPLES; ++ i ) {
  326. const x = i / sigmaPixels;
  327. const weight = Math.exp( - x * x / 2 );
  328. weights.push( weight );
  329. if ( i == 0 ) {
  330. sum += weight;
  331. } else if ( i < samples ) {
  332. sum += 2 * weight;
  333. }
  334. }
  335. weights = weights.map( w => w / sum );
  336. blurUniforms[ 'envMap' ].value = targetIn.texture;
  337. blurUniforms[ 'copyEquirectangular' ].value = false;
  338. blurUniforms[ 'samples' ].value = samples;
  339. blurUniforms[ 'weights' ].value = weights;
  340. blurUniforms[ 'latitudinal' ].value = direction === 'latitudinal';
  341. if ( poleAxis ) {
  342. blurUniforms[ 'poleAxis' ].value = poleAxis;
  343. }
  344. blurUniforms[ 'dTheta' ].value = radiansPerPixel;
  345. blurUniforms[ 'mipInt' ].value = LOD_MAX - lodIn;
  346. blurUniforms[ 'inputEncoding' ].value = ENCODINGS[ targetIn.texture.encoding ];
  347. blurUniforms[ 'outputEncoding' ].value = ENCODINGS[ targetIn.texture.encoding ];
  348. const outputSize = _sizeLods[ lodOut ];
  349. const x = 3 * Math.max( 0, SIZE_MAX - 2 * outputSize );
  350. const y = ( lodOut === 0 ? 0 : 2 * SIZE_MAX ) +
  351. 2 * outputSize *
  352. ( lodOut > LOD_MAX - LOD_MIN ? lodOut - LOD_MAX + LOD_MIN : 0 );
  353. _renderer.setRenderTarget( targetOut );
  354. _setViewport( x, y, 3 * outputSize, 2 * outputSize );
  355. _renderer.render( blurScene, _flatCamera );
  356. }
  357. function _getShader( maxSamples ) {
  358. const weights = new Float32Array( maxSamples );
  359. const texelSize = new Vector2( 1, 1 );
  360. const poleAxis = new Vector3( 0, 1, 0 );
  361. var shaderMaterial = new RawShaderMaterial( {
  362. defines: { 'n': maxSamples },
  363. uniforms: {
  364. 'envMap': { value: null },
  365. 'copyEquirectangular': { value: false },
  366. 'texelSize': { value: texelSize },
  367. 'samples': { value: 1 },
  368. 'weights': { value: weights },
  369. 'latitudinal': { value: false },
  370. 'dTheta': { value: 0 },
  371. 'mipInt': { value: 0 },
  372. 'poleAxis': { value: poleAxis },
  373. 'inputEncoding': { value: ENCODINGS[ LinearEncoding ] },
  374. 'outputEncoding': { value: ENCODINGS[ LinearEncoding ] }
  375. },
  376. vertexShader: `
  377. precision mediump float;
  378. precision mediump int;
  379. attribute vec3 position;
  380. attribute vec2 uv;
  381. attribute float faceIndex;
  382. varying vec2 vUv;
  383. varying float vFaceIndex;
  384. void main() {
  385. vUv = uv;
  386. vFaceIndex = faceIndex;
  387. gl_Position = vec4( position, 1.0 );
  388. }
  389. `,
  390. fragmentShader: `
  391. precision mediump float;
  392. precision mediump int;
  393. varying vec2 vUv;
  394. varying float vFaceIndex;
  395. uniform sampler2D envMap;
  396. uniform bool copyEquirectangular;
  397. uniform vec2 texelSize;
  398. uniform int samples;
  399. uniform float weights[n];
  400. uniform bool latitudinal;
  401. uniform float dTheta;
  402. uniform float mipInt;
  403. uniform vec3 poleAxis;
  404. uniform int inputEncoding;
  405. uniform int outputEncoding;
  406. #include <encodings_pars_fragment>
  407. vec4 inputTexelToLinear(vec4 value){
  408. if(inputEncoding == 0){
  409. return value;
  410. }else if(inputEncoding == 1){
  411. return sRGBToLinear(value);
  412. }else if(inputEncoding == 2){
  413. return RGBEToLinear(value);
  414. }else if(inputEncoding == 3){
  415. return RGBMToLinear(value, 7.0);
  416. }else if(inputEncoding == 4){
  417. return RGBMToLinear(value, 16.0);
  418. }else if(inputEncoding == 5){
  419. return RGBDToLinear(value, 256.0);
  420. }else{
  421. return GammaToLinear(value, 2.2);
  422. }
  423. }
  424. vec4 linearToOutputTexel(vec4 value){
  425. if(outputEncoding == 0){
  426. return value;
  427. }else if(outputEncoding == 1){
  428. return LinearTosRGB(value);
  429. }else if(outputEncoding == 2){
  430. return LinearToRGBE(value);
  431. }else if(outputEncoding == 3){
  432. return LinearToRGBM(value, 7.0);
  433. }else if(outputEncoding == 4){
  434. return LinearToRGBM(value, 16.0);
  435. }else if(outputEncoding == 5){
  436. return LinearToRGBD(value, 256.0);
  437. }else{
  438. return LinearToGamma(value, 2.2);
  439. }
  440. }
  441. vec4 envMapTexelToLinear(vec4 color) {
  442. return inputTexelToLinear(color);
  443. }
  444. #define ENVMAP_TYPE_CUBE_UV
  445. #include <cube_uv_reflection_fragment>
  446. #define RECIPROCAL_PI 0.31830988618
  447. #define RECIPROCAL_PI2 0.15915494
  448. void main() {
  449. gl_FragColor = vec4(0.0);
  450. vec3 outputDirection = getDirection(vUv, vFaceIndex);
  451. if (copyEquirectangular) {
  452. vec3 direction = normalize(outputDirection);
  453. vec2 uv;
  454. uv.y = asin(clamp(direction.y, -1.0, 1.0)) * RECIPROCAL_PI + 0.5;
  455. uv.x = atan(direction.z, direction.x) * RECIPROCAL_PI2 + 0.5;
  456. vec2 f = fract(uv / texelSize - 0.5);
  457. uv -= f * texelSize;
  458. vec3 tl = envMapTexelToLinear(texture2D(envMap, uv)).rgb;
  459. uv.x += texelSize.x;
  460. vec3 tr = envMapTexelToLinear(texture2D(envMap, uv)).rgb;
  461. uv.y += texelSize.y;
  462. vec3 br = envMapTexelToLinear(texture2D(envMap, uv)).rgb;
  463. uv.x -= texelSize.x;
  464. vec3 bl = envMapTexelToLinear(texture2D(envMap, uv)).rgb;
  465. vec3 tm = mix(tl, tr, f.x);
  466. vec3 bm = mix(bl, br, f.x);
  467. gl_FragColor.rgb = mix(tm, bm, f.y);
  468. } else {
  469. for (int i = 0; i < n; i++) {
  470. if (i >= samples)
  471. break;
  472. for (int dir = -1; dir < 2; dir += 2) {
  473. if (i == 0 && dir == 1)
  474. continue;
  475. vec3 axis = latitudinal ? poleAxis : cross(poleAxis, outputDirection);
  476. if (all(equal(axis, vec3(0.0))))
  477. axis = cross(vec3(0.0, 1.0, 0.0), outputDirection);
  478. axis = normalize(axis);
  479. float theta = dTheta * float(dir * i);
  480. float cosTheta = cos(theta);
  481. // Rodrigues' axis-angle rotation
  482. vec3 sampleDirection = outputDirection * cosTheta
  483. + cross(axis, outputDirection) * sin(theta)
  484. + axis * dot(axis, outputDirection) * (1.0 - cosTheta);
  485. gl_FragColor.rgb +=
  486. weights[i] * bilinearCubeUV(envMap, sampleDirection, mipInt);
  487. }
  488. }
  489. }
  490. gl_FragColor = linearToOutputTexel(gl_FragColor);
  491. }
  492. `,
  493. blending: NoBlending,
  494. depthTest: false,
  495. depthWrite: false
  496. } );
  497. shaderMaterial.type = 'SphericalGaussianBlur';
  498. return shaderMaterial;
  499. }
  500. return PMREMGenerator;
  501. } )();
  502. export { PMREMGenerator };