三。js drawImage从多视图画布WebGLRenderer到多画布

时间:2022-06-01 04:23:11

My problem is that I want to have only 1 WebGLRenderer, but have many camera views that can be placed in many unique canvases. In the example below I have 9 views that are in 1 canvas, each with unique cameras, scenes, and meshes which are then drawn onto their own canvases using the ctx.drawImage method. This method works, but drawImage is far too slow to even get 10 fps let alone the 60+ fps that is desired.

我的问题是,我希望只有一个WebGLRenderer,但是有许多相机视图可以放置在许多独特的画布中。在下面的示例中,我有9个视图,它们位于一个画布中,每个视图都有惟一的摄像机、场景和网格,然后使用ctx将它们绘制到自己的画布上。drawImage方法。这种方法是有效的,但是drawImage却太慢,甚至不能获得10个fps,更不用说需要的60+ fps了。

Is there any way around this problem that doesn't involve using the slow drawImage method or is there a way to speed up this whole process?

有没有什么方法可以解决这个问题而不需要使用缓慢的drawImage方法或者有没有一种方法可以加速这个过程?

Thanks for the help and the example code is placed below.

感谢您的帮助,下面是示例代码。

http://jsfiddle.net/QD8M2/

http://jsfiddle.net/QD8M2/

<!doctype html>

<html lang="en">
<head>
    <meta charset="utf-8">

    <title>Three.js Test</title>

    <style>
        body {
            margin: 0;
            padding: 0;
            overflow: hidden;
        }
    </style>

    <!--[if lt IE 9]>
    <script src="http://html5shiv.googlecode.com/svn/trunk/html5.js"></script>
    <![endif]-->
</head>

<body>
    <script src="./three.min.js"></script>
    <script>
        var renderer;
        var windowWidth, windowHeight;
        var numberMeshes = 1;
        var dpr = window.devicePixelRatio || 1;

        var viewDemensions = {x: 3, y: 3};

        var views = [];

        init();
        animate();

        function init() {
            for (var i = 0; i < viewDemensions.x; i++) {
                for (var j = 0; j < viewDemensions.y; j++) {
                    var obj = {};

                    obj.left = i/viewDemensions.x;
                    obj.bottom = j/viewDemensions.y;
                    obj.width = 1/viewDemensions.x;
                    obj.height = 1/viewDemensions.y;

                    obj.canvas = document.createElement('canvas');
                    obj.context = obj.canvas.getContext('2d');

                    document.body.appendChild(obj.canvas);

                    var camera = new THREE.PerspectiveCamera(75, 100/100, 1, 10000);
                    camera.position.z = 1000;
                    obj.camera = camera;

                    var scene = new THREE.Scene();

                    var geometry = new THREE.SphereGeometry(100, 10, 10);

                    obj.meshes = [];

                    for (var k = 0; k < numberMeshes; k++) {
                        var material = new THREE.MeshBasicMaterial({ color: 0xffffff*Math.random(), wireframe: true });
                        var mesh = new THREE.Mesh(geometry, material);
                        var scale = 2*Math.random();
                        mesh.scale.set(scale, scale, scale);
                        scene.add(mesh);
                        obj.meshes.push(mesh);
                    }

                    obj.scene = scene;

                    views.push(obj);
                }
            }

            renderer = new THREE.WebGLRenderer({
                preserveDrawingBuffer: true
            });

            // document.body.appendChild(renderer.domElement);
        }

        function updateSize() {
            if (windowWidth != window.innerWidth || windowHeight != window.innerHeight) {
                windowWidth  = window.innerWidth;
                windowHeight = window.innerHeight;
                renderer.setSize (windowWidth, windowHeight);
            }
        }

        function animate() {

            updateSize();

            for (var i = 0; i < views.length; i++) {
                var view = views[i];
                var left   = Math.floor(view.left*windowWidth) * dpr;
                var bottom = Math.floor(view.bottom*windowHeight) * dpr;
                var width  = Math.floor(view.width*windowWidth) * dpr;
                var height = Math.floor(view.height*windowHeight) * dpr;
                view.canvas.width = width;
                view.canvas.height = height;
                view.canvas.style.width = Math.floor(view.width*windowWidth) + 'px';
                view.canvas.style.height = Math.floor(view.height*windowHeight) + 'px';
                view.context.scale(dpr, dpr);
                view.camera.aspect = width/height;
                view.camera.updateProjectionMatrix();
                renderer.setViewport(left, bottom, width, height);
                renderer.setScissor(left, bottom, width, height);
                renderer.enableScissorTest (true);
                renderer.setClearColor(new THREE.Color().setRGB(0.5, 0.5, 0.5));
                for (var j = 0; j < numberMeshes; j++) {
                    view.meshes[j].rotation.x += 0.03*Math.random();
                    view.meshes[j].rotation.y += 0.05*Math.random();
                }
                renderer.render(view.scene, view.camera);
                view.context.drawImage(renderer.domElement,left,bottom,width,height,0,0,view.width*windowWidth,view.height*windowHeight);
            }

            requestAnimationFrame(animate);
        }
    </script>
</body>
</html>

2 个解决方案

#1


1  

After some discussion as comments, using FBO with different views, and then using these textures as input to position in different views, might be suitable for your case. Please check. Note that this does not involve drawing to a buffer, then readingpixels back, and then applying it to a canvas.

经过一些讨论作为评论,使用带有不同视图的FBO,然后使用这些纹理作为在不同视图中的位置的输入,可能适合您的情况。请查一下。请注意,这并不涉及到绘图到缓冲区,然后重新读取像素,然后将其应用到画布上。

EDIT1: Added pseudocode Using Three.js

使用Three.js添加伪代码

Create offscreen target

创建私生活方面的目标

rtTexture = new THREE.WebGLRenderTarget( window.innerWidth, window.innerHeight, ..);

rtTexture = new三人。WebGLRenderTarget(窗口。innerWidth,窗口。innerHeight . .);

Create screen, material, and mesh

创建屏幕、材质和网格

mtlScreen = new THREE.ShaderMaterial( {uniforms: { tDiffuse: { type: "t", value: rtTexture } },

mtlScreen = new三人。材质({制服:{tdiffusion: {type:“t”,value: rtTexture}}),

mtl = new THREE.MeshBasicMaterial( { map: rtTexture } );

mtl = new三人。网格材料({map: rtTexture);

mesh = new THREE.Mesh( plane, function(rtTexture) );

网= new三人。网格(平面、功能(rtTexture));

scene.add( mesh );

现场。添加(网);

Now render to offscreen first, then to display

现在先渲染到屏幕外,然后显示

renderer.render( sceneRTT, cameraRTT, rtTexture, ..);

渲染器。渲染(sceneRTT, cameraRTT, rtTexture, ..);

renderer.render( scene, camera );

渲染器。呈现(场景、摄像头等);

Refer to standard three example to get the full code - https://github.com/prabindh/three.js/blob/master/examples/webgl_rtt.html, and I posted a short slide on this at http://www.slideshare.net/prabindh/render-to-texture-with-threejs

参考标准3示例以获得完整的代码——https://github.com/prabindh/three.js/blob/master/exames/webgl_rtt.html,我在http://www.slideshare.net/prabindh/rendertotexwith- threejs上发布了一个简短的幻灯片

Approach With GLES2:

方法用GLES2:

Quick setup for FBO with GLES2 (trivial change to WebGL):

使用GLES2快速设置FBO(对WebGL的简单更改):

  •  glGenFramebuffers(NUM_FBO, fboId);
     glGenTextures(NUM_FBO, fboTextureId);
     glGenTextures(1, &regularTextureId);
    
  • glGenFramebuffers(NUM_FBO fboId);

Then comes setting up for drawing to offscreen buffers:

然后,将绘图设置到屏幕外的缓冲区:

  •       GL_CHECK(glBindTexture(GL_TEXTURE_2D, 0));
            GL_CHECK(glBindTexture(GL_TEXTURE_2D, fboTextureId[i]));
            GL_CHECK(glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, globals->inTextureWidth, globals->inTextureHeight, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL));
            GL_CHECK(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR));
            GL_CHECK(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR));
    
            GL_CHECK(glBindFramebuffer(GL_FRAMEBUFFER, fboId[i]));
            GL_CHECK(glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, fboTextureId[i], 0));
    
  • GL_CHECK(glBindTexture(GL_TEXTURE_2D,0));

Then draw to offscreen buffer:

然后绘制到屏幕外缓冲区:

  •         //Bind regular texture
            GL_CHECK(glBindTexture(GL_TEXTURE_2D, 0));
            GL_CHECK(glBindTexture(GL_TEXTURE_2D, regularTextureId));
            add_texture(globals->inTextureWidth, globals->inTextureHeight, globals->textureData, globals->inPixelFormat);
            GL_CHECK(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR));
            GL_CHECK(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR));
             //Draw with regular draw calls to FBO
            GL_CHECK(_test17(globals, numObjectsPerSide, 1));
    
  • / /绑定纹理

Now use this as a texture input, and draw to regular display:

现在使用这个作为纹理输入,并绘制到常规显示:

  •         GL_CHECK(glBindTexture(GL_TEXTURE_2D, fboTextureId[i]));
    
            GL_CHECK(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR));
            GL_CHECK(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR));
            //draw to display buffer
    
           //Now get back display framebuffer and unbind the FBO
            GL_CHECK(glBindFramebuffer(GL_FRAMEBUFFER, 0));
    
  • GL_CHECK(glBindTexture(GL_TEXTURE_2D fboTextureId[我]));

https://github.com/prabindh/sgxperf/blob/master/sgxperf_test17.cpp

https://github.com/prabindh/sgxperf/blob/master/sgxperf_test17.cpp

#2


0  

Here is the solution I came up with. Not quite as pro as prabindh's answer but it is the only way I could come up with how to solve my problem.

这是我想到的解决办法。不像prabindh的回答那么专业,但这是我能想出解决问题的唯一方法。

Basically I used a WebGLRenderTarget to render each different view/scene onto the renderer and then copied the pixels out using readPixels onto a canvas. I tried to get web workers working to improve performance but couldn't get them to play nice with the image writing. My code is below.

基本上,我使用WebGLRenderTarget将每个不同的视图/场景渲染到渲染器上,然后将像素输出到画布上。我试着让网络工作者努力提高工作表现,但却不能让他们在图像写作中表现得很好。我的代码如下。

If you meant something completely different prabindh, I would love some help on how to make this faster and/or better for my current use case.

如果您的意思是完全不同的prabindh,我希望您能提供一些帮助,帮助我如何使其更快和/或更好地用于当前的用例。

var renderer;
var gl;
var times = [];
var windowWidth, windowHeight;
var numberMeshes = 10;
var dpr = window.devicePixelRatio || 1;

var viewDemensions = {x: 2, y: 2};

var views = [];

init();
animate();

function init() {
    renderer = new THREE.WebGLRenderer({preserveDrawingBuffer: true});
    renderer.autoClear = false;

    gl = renderer.getContext();

    for (var i = 0; i < viewDemensions.x; i++) {
        for (var j = 0; j < viewDemensions.y; j++) {
            var obj = {};

            obj.left = i/viewDemensions.x;
            obj.bottom = j/viewDemensions.y;
            obj.width = 1/viewDemensions.x;
            obj.height = 1/viewDemensions.y;

            obj.canvas = document.createElement('canvas');
            obj.context = obj.canvas.getContext('2d');

            document.body.appendChild(obj.canvas);

            var camera = new THREE.PerspectiveCamera(75, 100/100, 1, 10000);
            camera.position.z = 1000;
            obj.camera = camera;

            var scene = new THREE.Scene();

            var geometry = new THREE.SphereGeometry(100, 10, 10);

            obj.meshes = [];

            for (var k = 0; k < numberMeshes; k++) {
                var material = new THREE.MeshBasicMaterial({ color: 0xffffff*Math.random(), wireframe: true });
                var mesh = new THREE.Mesh(geometry, material);
                var scale = 2*Math.random();
                mesh.scale.set(scale, scale, scale);
                scene.add(mesh);
                obj.meshes.push(mesh);
            }

            obj.scene = scene;

            obj.widthVal = 100;
            obj.heightVal = 100;

            obj.imageData = obj.context.getImageData(0,0,obj.widthVal,obj.heightVal);

            obj.pixels = new Uint8Array(obj.imageData.data.length);

            // obj.ww = new Worker("ww.js");

            // obj.frames = [];
            // obj.prevFrame = 0;

            // obj.ww.onmessage = function (event) {
            //     var i = event.data.i;
            //     var imageData = event.data.imageData;
            //     views[i].context.putImageData(imageData,0,0);
            // };

            obj.target = new THREE.WebGLRenderTarget( 100, 100, { minFilter: THREE.LinearFilter, magFilter: THREE.NearestFilter, format: THREE.RGBFormat } );

            views.push(obj);
        }
    }
}

function updateSize() {
    if (windowWidth != window.innerWidth || windowHeight != window.innerHeight) {
        windowWidth  = window.innerWidth;
        windowHeight = window.innerHeight;
    }
}

function animate() {

    updateSize();

    var i, j;
    var view, width, height;
    var sWidth, sHeight;
    var mesh;

    for (i = 0; i < views.length; i++) {
        view = views[i];
        // if (!view.lock) {
        for (j = 0; j < view.meshes.length; j++) {
            mesh = view.meshes[j];
            mesh.rotation.x += 0.03;
            mesh.rotation.y += 0.05;
        }

        sWidth = Math.floor(view.width*windowWidth);
        sHeight = Math.floor(view.height*windowHeight);

        width  = sWidth * dpr;
        height = sHeight * dpr;

        var same = true;

        if (view.widthVal != width || view.heightVal != height) {
            same = false;
            view.widthVal = width;
            view.heightVal = height;
        }

        view.canvas.width = width;
        view.canvas.height = height;
        view.canvas.style.width = sWidth + 'px';
        view.canvas.style.height = sHeight + 'px';
        view.context.scale(dpr, dpr);

        view.camera.aspect = width/height;
        view.camera.updateProjectionMatrix();
        renderer.setSize(sWidth, sHeight);
        view.target.width = width;
        view.target.height = height;
        renderer.render(view.scene, view.camera, view.target, true);

        if (!same) {
            view.imageData = view.context.createImageData(width,height);
            view.pixels = new Uint8Array(view.imageData.data.length);
        }

        gl.readPixels(0,0,width,height,gl.RGBA,gl.UNSIGNED_BYTE,view.pixels);

        // view.ww.postMessage({imageData: imageData, pixels: pixels, i:i});

        view.imageData.data.set(view.pixels);

        view.context.putImageData(view.imageData,0,0);
    }

    requestAnimationFrame(animate);

}

#1


1  

After some discussion as comments, using FBO with different views, and then using these textures as input to position in different views, might be suitable for your case. Please check. Note that this does not involve drawing to a buffer, then readingpixels back, and then applying it to a canvas.

经过一些讨论作为评论,使用带有不同视图的FBO,然后使用这些纹理作为在不同视图中的位置的输入,可能适合您的情况。请查一下。请注意,这并不涉及到绘图到缓冲区,然后重新读取像素,然后将其应用到画布上。

EDIT1: Added pseudocode Using Three.js

使用Three.js添加伪代码

Create offscreen target

创建私生活方面的目标

rtTexture = new THREE.WebGLRenderTarget( window.innerWidth, window.innerHeight, ..);

rtTexture = new三人。WebGLRenderTarget(窗口。innerWidth,窗口。innerHeight . .);

Create screen, material, and mesh

创建屏幕、材质和网格

mtlScreen = new THREE.ShaderMaterial( {uniforms: { tDiffuse: { type: "t", value: rtTexture } },

mtlScreen = new三人。材质({制服:{tdiffusion: {type:“t”,value: rtTexture}}),

mtl = new THREE.MeshBasicMaterial( { map: rtTexture } );

mtl = new三人。网格材料({map: rtTexture);

mesh = new THREE.Mesh( plane, function(rtTexture) );

网= new三人。网格(平面、功能(rtTexture));

scene.add( mesh );

现场。添加(网);

Now render to offscreen first, then to display

现在先渲染到屏幕外,然后显示

renderer.render( sceneRTT, cameraRTT, rtTexture, ..);

渲染器。渲染(sceneRTT, cameraRTT, rtTexture, ..);

renderer.render( scene, camera );

渲染器。呈现(场景、摄像头等);

Refer to standard three example to get the full code - https://github.com/prabindh/three.js/blob/master/examples/webgl_rtt.html, and I posted a short slide on this at http://www.slideshare.net/prabindh/render-to-texture-with-threejs

参考标准3示例以获得完整的代码——https://github.com/prabindh/three.js/blob/master/exames/webgl_rtt.html,我在http://www.slideshare.net/prabindh/rendertotexwith- threejs上发布了一个简短的幻灯片

Approach With GLES2:

方法用GLES2:

Quick setup for FBO with GLES2 (trivial change to WebGL):

使用GLES2快速设置FBO(对WebGL的简单更改):

  •  glGenFramebuffers(NUM_FBO, fboId);
     glGenTextures(NUM_FBO, fboTextureId);
     glGenTextures(1, &regularTextureId);
    
  • glGenFramebuffers(NUM_FBO fboId);

Then comes setting up for drawing to offscreen buffers:

然后,将绘图设置到屏幕外的缓冲区:

  •       GL_CHECK(glBindTexture(GL_TEXTURE_2D, 0));
            GL_CHECK(glBindTexture(GL_TEXTURE_2D, fboTextureId[i]));
            GL_CHECK(glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, globals->inTextureWidth, globals->inTextureHeight, 0, GL_RGBA, GL_UNSIGNED_BYTE, NULL));
            GL_CHECK(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR));
            GL_CHECK(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR));
    
            GL_CHECK(glBindFramebuffer(GL_FRAMEBUFFER, fboId[i]));
            GL_CHECK(glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, fboTextureId[i], 0));
    
  • GL_CHECK(glBindTexture(GL_TEXTURE_2D,0));

Then draw to offscreen buffer:

然后绘制到屏幕外缓冲区:

  •         //Bind regular texture
            GL_CHECK(glBindTexture(GL_TEXTURE_2D, 0));
            GL_CHECK(glBindTexture(GL_TEXTURE_2D, regularTextureId));
            add_texture(globals->inTextureWidth, globals->inTextureHeight, globals->textureData, globals->inPixelFormat);
            GL_CHECK(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR));
            GL_CHECK(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR));
             //Draw with regular draw calls to FBO
            GL_CHECK(_test17(globals, numObjectsPerSide, 1));
    
  • / /绑定纹理

Now use this as a texture input, and draw to regular display:

现在使用这个作为纹理输入,并绘制到常规显示:

  •         GL_CHECK(glBindTexture(GL_TEXTURE_2D, fboTextureId[i]));
    
            GL_CHECK(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR));
            GL_CHECK(glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR));
            //draw to display buffer
    
           //Now get back display framebuffer and unbind the FBO
            GL_CHECK(glBindFramebuffer(GL_FRAMEBUFFER, 0));
    
  • GL_CHECK(glBindTexture(GL_TEXTURE_2D fboTextureId[我]));

https://github.com/prabindh/sgxperf/blob/master/sgxperf_test17.cpp

https://github.com/prabindh/sgxperf/blob/master/sgxperf_test17.cpp

#2


0  

Here is the solution I came up with. Not quite as pro as prabindh's answer but it is the only way I could come up with how to solve my problem.

这是我想到的解决办法。不像prabindh的回答那么专业,但这是我能想出解决问题的唯一方法。

Basically I used a WebGLRenderTarget to render each different view/scene onto the renderer and then copied the pixels out using readPixels onto a canvas. I tried to get web workers working to improve performance but couldn't get them to play nice with the image writing. My code is below.

基本上,我使用WebGLRenderTarget将每个不同的视图/场景渲染到渲染器上,然后将像素输出到画布上。我试着让网络工作者努力提高工作表现,但却不能让他们在图像写作中表现得很好。我的代码如下。

If you meant something completely different prabindh, I would love some help on how to make this faster and/or better for my current use case.

如果您的意思是完全不同的prabindh,我希望您能提供一些帮助,帮助我如何使其更快和/或更好地用于当前的用例。

var renderer;
var gl;
var times = [];
var windowWidth, windowHeight;
var numberMeshes = 10;
var dpr = window.devicePixelRatio || 1;

var viewDemensions = {x: 2, y: 2};

var views = [];

init();
animate();

function init() {
    renderer = new THREE.WebGLRenderer({preserveDrawingBuffer: true});
    renderer.autoClear = false;

    gl = renderer.getContext();

    for (var i = 0; i < viewDemensions.x; i++) {
        for (var j = 0; j < viewDemensions.y; j++) {
            var obj = {};

            obj.left = i/viewDemensions.x;
            obj.bottom = j/viewDemensions.y;
            obj.width = 1/viewDemensions.x;
            obj.height = 1/viewDemensions.y;

            obj.canvas = document.createElement('canvas');
            obj.context = obj.canvas.getContext('2d');

            document.body.appendChild(obj.canvas);

            var camera = new THREE.PerspectiveCamera(75, 100/100, 1, 10000);
            camera.position.z = 1000;
            obj.camera = camera;

            var scene = new THREE.Scene();

            var geometry = new THREE.SphereGeometry(100, 10, 10);

            obj.meshes = [];

            for (var k = 0; k < numberMeshes; k++) {
                var material = new THREE.MeshBasicMaterial({ color: 0xffffff*Math.random(), wireframe: true });
                var mesh = new THREE.Mesh(geometry, material);
                var scale = 2*Math.random();
                mesh.scale.set(scale, scale, scale);
                scene.add(mesh);
                obj.meshes.push(mesh);
            }

            obj.scene = scene;

            obj.widthVal = 100;
            obj.heightVal = 100;

            obj.imageData = obj.context.getImageData(0,0,obj.widthVal,obj.heightVal);

            obj.pixels = new Uint8Array(obj.imageData.data.length);

            // obj.ww = new Worker("ww.js");

            // obj.frames = [];
            // obj.prevFrame = 0;

            // obj.ww.onmessage = function (event) {
            //     var i = event.data.i;
            //     var imageData = event.data.imageData;
            //     views[i].context.putImageData(imageData,0,0);
            // };

            obj.target = new THREE.WebGLRenderTarget( 100, 100, { minFilter: THREE.LinearFilter, magFilter: THREE.NearestFilter, format: THREE.RGBFormat } );

            views.push(obj);
        }
    }
}

function updateSize() {
    if (windowWidth != window.innerWidth || windowHeight != window.innerHeight) {
        windowWidth  = window.innerWidth;
        windowHeight = window.innerHeight;
    }
}

function animate() {

    updateSize();

    var i, j;
    var view, width, height;
    var sWidth, sHeight;
    var mesh;

    for (i = 0; i < views.length; i++) {
        view = views[i];
        // if (!view.lock) {
        for (j = 0; j < view.meshes.length; j++) {
            mesh = view.meshes[j];
            mesh.rotation.x += 0.03;
            mesh.rotation.y += 0.05;
        }

        sWidth = Math.floor(view.width*windowWidth);
        sHeight = Math.floor(view.height*windowHeight);

        width  = sWidth * dpr;
        height = sHeight * dpr;

        var same = true;

        if (view.widthVal != width || view.heightVal != height) {
            same = false;
            view.widthVal = width;
            view.heightVal = height;
        }

        view.canvas.width = width;
        view.canvas.height = height;
        view.canvas.style.width = sWidth + 'px';
        view.canvas.style.height = sHeight + 'px';
        view.context.scale(dpr, dpr);

        view.camera.aspect = width/height;
        view.camera.updateProjectionMatrix();
        renderer.setSize(sWidth, sHeight);
        view.target.width = width;
        view.target.height = height;
        renderer.render(view.scene, view.camera, view.target, true);

        if (!same) {
            view.imageData = view.context.createImageData(width,height);
            view.pixels = new Uint8Array(view.imageData.data.length);
        }

        gl.readPixels(0,0,width,height,gl.RGBA,gl.UNSIGNED_BYTE,view.pixels);

        // view.ww.postMessage({imageData: imageData, pixels: pixels, i:i});

        view.imageData.data.set(view.pixels);

        view.context.putImageData(view.imageData,0,0);
    }

    requestAnimationFrame(animate);

}